Commit f1244df1 authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'linus' into rt/base

parents 9e6d9c5e 326ba501
......@@ -123,6 +123,9 @@ available from the same CVS repository.
There are user and developer mailing lists available through the v9fs project
on sourceforge (http://sourceforge.net/projects/v9fs).
A stand-alone version of the module (which should build for any 2.6 kernel)
is available via (http://github.com/ericvh/9p-sac/tree/master)
News and other information is maintained on SWiK (http://swik.net/v9fs).
Bug reports may be issued through the kernel.org bugzilla
......
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 31
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
......
......@@ -574,10 +574,11 @@ static int a2000_hwclk(int op, struct rtc_time *t)
tod_2000.cntrl1 = TOD2000_CNTRL1_HOLD;
while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt--) {
while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) {
tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
udelay(70);
tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
--cnt;
}
if (!cnt)
......@@ -649,10 +650,11 @@ static int amiga_set_clock_mmss(unsigned long nowtime)
tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt--) {
while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) {
tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
udelay(70);
tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
--cnt;
}
if (!cnt)
......
......@@ -36,12 +36,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
return NULL;
pte = kmap(page);
if (pte) {
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
}
kunmap(pte);
kunmap(page);
pgtable_page_ctor(page);
return page;
}
......
......@@ -135,8 +135,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#endif
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable.h>
/*
* Macro to mark a page protection value as "uncacheable".
*/
......@@ -154,6 +152,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
: (prot)))
#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
/*
......
......@@ -334,10 +334,12 @@
#define __NR_inotify_init1 328
#define __NR_preadv 329
#define __NR_pwritev 330
#define __NR_rt_tgsigqueueinfo 331
#define __NR_perf_counter_open 332
#ifdef __KERNEL__
#define NR_syscalls 331
#define NR_syscalls 333
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
......
......@@ -755,4 +755,6 @@ sys_call_table:
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_counter_open
......@@ -349,6 +349,8 @@ ENTRY(sys_call_table)
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_counter_open
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
......
This diff is collapsed.
......@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <asm/firmware.h>
#include <asm/rtc.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
......@@ -84,6 +85,9 @@ static int __init ps3_rtc_init(void)
{
struct platform_device *pdev;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
......
......@@ -1151,6 +1151,9 @@ static int __init acpi_processor_init(void)
{
int result = 0;
if (acpi_disabled)
return 0;
memset(&errata, 0, sizeof(errata));
#ifdef CONFIG_SMP
......@@ -1197,6 +1200,9 @@ out_proc:
static void __exit acpi_processor_exit(void)
{
if (acpi_disabled)
return;
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
......
......@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
if (pr->limit.thermal.tx > tx)
tx = pr->limit.thermal.tx;
result = acpi_processor_set_throttling(pr, tx);
result = acpi_processor_set_throttling(pr, tx, false);
if (result)
goto end;
}
......@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
if (state <= max_pstate) {
if (pr->flags.throttling && pr->throttling.state)
result = acpi_processor_set_throttling(pr, 0);
result = acpi_processor_set_throttling(pr, 0, false);
cpufreq_set_cur_state(pr->id, state);
} else {
cpufreq_set_cur_state(pr->id, max_pstate);
result = acpi_processor_set_throttling(pr,
state - max_pstate);
state - max_pstate, false);
}
return result;
}
......
......@@ -62,7 +62,8 @@ struct throttling_tstate {
#define THROTTLING_POSTCHANGE (2)
static int acpi_processor_get_throttling(struct acpi_processor *pr);
int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force);
static int acpi_processor_update_tsd_coord(void)
{
......@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
*/
target_state = throttling_limit;
}
return acpi_processor_set_throttling(pr, target_state);
return acpi_processor_set_throttling(pr, target_state, false);
}
/*
......@@ -839,10 +840,10 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
if (ret >= 0) {
state = acpi_get_throttling_state(pr, value);
if (state == -1) {
ACPI_WARNING((AE_INFO,
"Invalid throttling state, reset"));
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Invalid throttling state, reset\n"));
state = 0;
ret = acpi_processor_set_throttling(pr, state);
ret = acpi_processor_set_throttling(pr, state, true);
if (ret)
return ret;
}
......@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
}
static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
int state)
int state, bool force)
{
u32 value = 0;
u32 duty_mask = 0;
......@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
if (!pr->flags.throttling)
return -ENODEV;
if (state == pr->throttling.state)
if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
......@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
}
static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int state)
int state, bool force)
{
int ret;
acpi_integer value;
......@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
if (!pr->flags.throttling)
return -ENODEV;
if (state == pr->throttling.state)
if (!force && (state == pr->throttling.state))
return 0;
if (state < pr->throttling_platform_limit)
......@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
cpumask_var_t saved_mask;
int ret = 0;
......@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
/* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr(current, cpumask_of(pr->id));
ret = p_throttling->acpi_processor_set_throttling(pr,
t_state.target_state);
t_state.target_state, force);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
......@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
set_cpus_allowed_ptr(current, cpumask_of(i));
ret = match_pr->throttling.
acpi_processor_set_throttling(
match_pr, t_state.target_state);
match_pr, t_state.target_state, force);
}
}
/*
......@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Disabling throttling (was T%d)\n",
pr->throttling.state));
result = acpi_processor_set_throttling(pr, 0);
result = acpi_processor_set_throttling(pr, 0, false);
if (result)
goto end;
}
......@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
if (strcmp(tmpbuf, charp) != 0)
return -EINVAL;
result = acpi_processor_set_throttling(pr, state_val);
result = acpi_processor_set_throttling(pr, state_val, false);
if (result)
return result;
......
......@@ -117,6 +117,9 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
gpio_data->inverted = !!inverted;
/* After inverting, we need to update the LED. */
schedule_work(&gpio_data->work);
return n;
}
static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show,
......@@ -146,20 +149,26 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
return -EINVAL;
}
if (gpio_data->gpio == gpio)
return n;
if (!gpio) {
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
gpio_data->gpio = 0;
return n;
}
if (gpio_data->gpio > 0 && gpio_data->gpio != gpio)
free_irq(gpio_to_irq(gpio_data->gpio), led);
gpio_data->gpio = gpio;
ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq,
IRQF_SHARED | IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING, "ledtrig-gpio", led);
if (ret)
if (ret) {
dev_err(dev, "request_irq failed with error %d\n", ret);
} else {
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
gpio_data->gpio = gpio;
}
return ret ? ret : n;
}
......@@ -211,7 +220,8 @@ static void gpio_trig_deactivate(struct led_classdev *led)
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
flush_work(&gpio_data->work);
free_irq(gpio_to_irq(gpio_data->gpio),led);
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
}
}
......
......@@ -288,7 +288,7 @@ static void maciisi_sync(struct adb_request *req)
}
/* This could be BAD... when the ADB controller doesn't respond
* for this long, it's probably not coming back :-( */
if(count >= 50) /* Hopefully shouldn't happen */
if (count > 50) /* Hopefully shouldn't happen */
printk(KERN_ERR "maciisi_send_request: poll timed out!\n");
}
......
......@@ -531,7 +531,7 @@ static inline void smc_rcv(struct net_device *dev)
local_irq_restore(flags); \
__ret; \
})
#define smc_special_lock(lock, flags) spin_lock_irq(lock, flags)
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else
#define smc_special_trylock(lock, flags) (1)
......
......@@ -70,6 +70,9 @@ struct virtnet_info
struct sk_buff_head recv;
struct sk_buff_head send;
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
/* Chain pages by the private ptr. */
struct page *pages;
};
......@@ -273,19 +276,22 @@ drop:
dev_kfree_skb(skb);
}
static void try_fill_recv_maxbufs(struct virtnet_info *vi)
static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct scatterlist sg[2+MAX_SKB_FRAGS];
int num, err, i;
bool oom = false;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
for (;;) {
struct virtio_net_hdr *hdr;
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
if (unlikely(!skb))
if (unlikely(!skb)) {
oom = true;
break;
}
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, MAX_PACKET_LEN);
......@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
f->page = get_a_page(vi, GFP_ATOMIC);
f->page = get_a_page(vi, gfp);
if (!f->page)
break;
......@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
return !oom;
}
static void try_fill_recv(struct virtnet_info *vi)
/* Returns false if we couldn't fill entirely (OOM). */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
struct scatterlist sg[1];
int err;
bool oom = false;
if (!vi->mergeable_rx_bufs) {
try_fill_recv_maxbufs(vi);
return;
}
if (!vi->mergeable_rx_bufs)
return try_fill_recv_maxbufs(vi, gfp);
for (;;) {
skb_frag_t *f;
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
if (unlikely(!skb))
if (unlikely(!skb)) {
oom = true;
break;
}
skb_reserve(skb, NET_IP_ALIGN);
f = &skb_shinfo(skb)->frags[0];
f->page = get_a_page(vi, GFP_ATOMIC);
f->page = get_a_page(vi, gfp);
if (!f->page) {
oom = true;
kfree_skb(skb);
break;
}
......@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
return !oom;
}
static void skb_recv_done(struct virtqueue *rvq)
......@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
}
}
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
bool still_empty;
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
try_fill_recv(vi, GFP_KERNEL);
still_empty = (vi->num == 0);
napi_enable(&vi->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
schedule_delayed_work(&vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
......@@ -400,10 +428,10 @@ again:
received++;
}
/* FIXME: If we oom and completely run out of inbufs, we need
* to start a timer trying to fill more. */
if (vi->num < vi->max / 2)
try_fill_recv(vi);
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
schedule_delayed_work(&vi->refill, 0);
}
/* Out of packets? */
if (received < budget) {
......@@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->vdev = vdev;
vdev->priv = vi;
vi->pages = NULL;
INIT_DELAYED_WORK(&vi->refill, refill_work);
/* If they give us a callback when all buffers are done, we don't need
* the timer. */
......@@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev)
}
/* Last of all, set up some receive buffers. */
try_fill_recv(vi);
try_fill_recv(vi, GFP_KERNEL);
/* If we didn't even get one input buffer, we're useless. */
if (vi->num == 0) {
......@@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
free:
......@@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev)
BUG_ON(vi->num != 0);
unregister_netdev(vi->dev);
cancel_delayed_work_sync(&vi->refill);
vdev->config->del_vqs(vi->vdev);
......
......@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
acpi_status status;
struct acpi_object_list input;
union acpi_object params[3];
char method[4] = "WM";
char method[5] = "WM";
if (!find_guid(guid_string, &wblock))
return AE_ERROR;
......@@ -328,8 +328,8 @@ struct acpi_buffer *out)
acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input, wc_input;
union acpi_object wc_params[1], wq_params[1];
char method[4];
char wc_method[4] = "WC";
char method[5];
char wc_method[5] = "WC";
if (!guid_string || !out)
return AE_BAD_PARAMETER;
......@@ -410,7 +410,7 @@ const struct acpi_buffer *in)
acpi_handle handle;
struct acpi_object_list input;
union acpi_object params[2];
char method[4] = "WS";
char method[5] = "WS";
if (!guid_string || !in)
return AE_BAD_DATA;
......
......@@ -244,7 +244,7 @@ int pps_register_cdev(struct pps_device *pps)
}
pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
"pps%d", pps->id);
if (err)
if (IS_ERR(pps->dev))
goto del_cdev;
dev_set_drvdata(pps->dev, pps);
......
......@@ -953,7 +953,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
mutex_lock(&tz->lock);
tz->ops->get_temp(tz, &temp);
if (tz->ops->get_temp(tz, &temp)) {
/* get_temp failed - retry it later */
printk(KERN_WARNING PREFIX "failed to read out thermal zone "
"%d\n", tz->id);
goto leave;
}
for (count = 0; count < tz->trips; count++) {
tz->ops->get_trip_type(tz, count, &trip_type);
......@@ -1005,6 +1010,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
THERMAL_TRIPS_NONE);
tz->last_temperature = temp;
leave:
if (tz->passive)
thermal_zone_device_set_polling(tz, tz->passive_delay);
else if (tz->polling_delay)
......
......@@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
xenfb_init_shared_page(info, fb_info);
ret = xenfb_connect_backend(dev, info);
if (ret < 0)
goto error;
ret = register_framebuffer(fb_info);
if (ret) {
fb_deferred_io_cleanup(fb_info);
......@@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
}
info->fb_info = fb_info;
ret = xenfb_connect_backend(dev, info);
if (ret < 0)
goto error;
xenfb_make_preferred_console();
return 0;
......
......@@ -76,7 +76,7 @@ static const match_table_t tokens = {
* Return 0 upon success, -ERRNO upon failure.
*/
static int v9fs_parse_options(struct v9fs_session_info *v9ses)
static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
{
char *options;
substring_t args[MAX_OPT_ARGS];
......@@ -90,10 +90,10 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses)
v9ses->debug = 0;
v9ses->cache = 0;
if (!v9ses->options)
if (!opts)
return 0;
options = kstrdup(v9ses->options, GFP_KERNEL);
options = kstrdup(opts, GFP_KERNEL);
if (!options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
......@@ -206,24 +206,14 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->uid = ~0;
v9ses->dfltuid = V9FS_DEFUID;
v9ses->dfltgid = V9FS_DEFGID;
if (data) {
v9ses->options = kstrdup(data, GFP_KERNEL);
if (!v9ses->options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
retval = -ENOMEM;
goto error;
}
}
rc = v9fs_parse_options(v9ses);
rc = v9fs_parse_options(v9ses, data);
if (rc < 0) {
retval = rc;
goto error;
}
v9ses->clnt = p9_client_create(dev_name, v9ses->options);
v9ses->clnt = p9_client_create(dev_name, data);
if (IS_ERR(v9ses->clnt)) {
retval = PTR_ERR(v9ses->clnt);
v9ses->clnt = NULL;
......@@ -280,7 +270,6 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
__putname(v9ses->uname);
__putname(v9ses->aname);
kfree(v9ses->options);
}
/**
......
......@@ -85,7 +85,6 @@ struct v9fs_session_info {
unsigned int afid;
unsigned int cache;
char *options; /* copy of mount options */
char *uname; /* user name to mount as */
char *aname; /* name of remote hierarchy being mounted */
unsigned int maxdata; /* max data for client interface */
......
......@@ -171,7 +171,6 @@ int v9fs_uflags2omode(int uflags, int extended)
/**
* v9fs_blank_wstat - helper function to setup a 9P stat structure
* @v9ses: 9P session info (for determining extended mode)
* @wstat: structure to initialize
*
*/
......@@ -207,13 +206,18 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
struct inode *v9fs_get_inode(struct super_block *sb, int mode)
{
int err;
struct inode *inode;
struct v9fs_session_info *v9ses = sb->s_fs_info;
P9_DPRINTK(P9_DEBUG_VFS, "super block: %p mode: %o\n", sb, mode);
inode = new_inode(sb);
if (inode) {
if (!inode) {
P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
return ERR_PTR(-ENOMEM);
}
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
......@@ -230,10 +234,10 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
if (!v9fs_extended(v9ses)) {
P9_DPRINTK(P9_DEBUG_ERROR,
"special files without extended mode\n");
return ERR_PTR(-EINVAL);
err = -EINVAL;
goto error;
}
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
init_special_inode(inode, inode->i_mode, inode->i_rdev);
break;
case S_IFREG:
inode->i_op = &v9fs_file_inode_operations;
......@@ -243,7 +247,8 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
if (!v9fs_extended(v9ses)) {
P9_DPRINTK(P9_DEBUG_ERROR,
"extended modes used w/o 9P2000.u\n");
return ERR_PTR(-EINVAL);
err = -EINVAL;
goto error;
}
inode->i_op = &v9fs_symlink_inode_operations;
break;
......@@ -256,16 +261,17 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
inode->i_fop = &v9fs_dir_operations;
break;
default:
P9_DPRINTK(P9_DEBUG_ERROR,
"BAD mode 0x%x S_IFMT 0x%x\n",
P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%x S_IFMT 0x%x\n",
mode, mode & S_IFMT);
return ERR_PTR(-EINVAL);
}
} else {
P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
return ERR_PTR(-ENOMEM);
err = -EINVAL;
goto error;
}
return inode;
error:
iput(inode);
return ERR_PTR(err);
}
/*
......@@ -338,30 +344,25 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
ret = NULL;
st = p9_client_stat(fid);
if (IS_ERR(st)) {
err = PTR_ERR(st);
st = NULL;
goto error;
}
if (IS_ERR(st))
return ERR_CAST(st);
umode = p9mode2unixmode(v9ses, st->mode);
ret = v9fs_get_inode(sb, umode);
if (IS_ERR(ret)) {
err = PTR_ERR(ret);
ret = NULL;
goto error;
}
v9fs_stat2inode(st, ret, sb);
ret->i_ino = v9fs_qid2ino(&st->qid);
p9stat_free(st);
kfree(st);
return ret;
error:
p9stat_free(st);
kfree(st);
if (ret)
iput(ret);
return ERR_PTR(err);
}
......@@ -403,9 +404,9 @@ v9fs_open_created(struct inode *inode, struct file *file)
* @v9ses: session information
* @dir: directory that dentry is being created in
* @dentry: dentry that is being created
* @extension: 9p2000.u extension string to support devices, etc.
* @perm: create permissions
* @mode: open mode
* @extension: 9p2000.u extension string to support devices, etc.
*
*/
static struct p9_fid *
......@@ -470,7 +471,10 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
dentry->d_op = &v9fs_dentry_operations;
d_instantiate(dentry, inode);
v9fs_fid_add(dentry, fid);
err = v9fs_fid_add(dentry, fid);
if (err < 0)
goto error;
return ofid;
error:
......
......@@ -81,7 +81,7 @@ static int v9fs_set_super(struct super_block *s, void *data)
static void
v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
int flags)
int flags, void *data)
{
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize_bits = fls(v9ses->maxdata - 1);
......@@ -91,6 +91,8 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
MS_NOATIME;
save_mount_options(sb, data);
}
/**
......@@ -113,14 +115,11 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
struct v9fs_session_info *v9ses = NULL;
struct p9_wstat *st = NULL;
int mode = S_IRWXUGO | S_ISVTX;
uid_t uid = current_fsuid();
gid_t gid = current_fsgid();
struct p9_fid *fid;
int retval = 0;
P9_DPRINTK(P9_DEBUG_VFS, " \n");
st = NULL;
v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
if (!v9ses)
return -ENOMEM;
......@@ -142,7 +141,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(sb);
goto free_stat;
}
v9fs_fill_super(sb, v9ses, flags);
v9fs_fill_super(sb, v9ses, flags, data);
inode = v9fs_get_inode(sb, S_IFDIR | mode);
if (IS_ERR(inode)) {
......@@ -150,9 +149,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
goto release_sb;
}
inode->i_uid = uid;
inode->i_gid = gid;
root = d_alloc_root(inode);
if (!root) {
iput(inode);
......@@ -173,10 +169,8 @@ P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
simple_set_mnt(mnt, sb);
return 0;
release_sb:
deactivate_locked_super(sb);
free_stat:
p9stat_free(st);
kfree(st);
clunk_fid:
......@@ -185,7 +179,12 @@ clunk_fid:
close_session:
v9fs_session_close(v9ses);
kfree(v9ses);
return retval;
release_sb:
p9stat_free(st);
kfree(st);
deactivate_locked_super(sb);
return retval;
}
......@@ -207,24 +206,10 @@ static void v9fs_kill_super(struct super_block *s)
v9fs_session_close(v9ses);
kfree(v9ses);
s->s_fs_info = NULL;
P9_DPRINTK(P9_DEBUG_VFS, "exiting kill_super\n");
}
/**
* v9fs_show_options - Show mount options in /proc/mounts
* @m: seq_file to write to
* @mnt: mount descriptor
*
*/
static int v9fs_show_options(struct seq_file *m, struct vfsmount *mnt)
{
struct v9fs_session_info *v9ses = mnt->mnt_sb->s_fs_info;
seq_printf(m, "%s", v9ses->options);
return 0;
}
static void
v9fs_umount_begin(struct super_block *sb)
{
......@@ -237,7 +222,7 @@ v9fs_umount_begin(struct super_block *sb)
static const struct super_operations v9fs_super_ops = {
.statfs = simple_statfs,
.clear_inode = v9fs_clear_inode,
.show_options = v9fs_show_options,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
};
......
......@@ -134,9 +134,16 @@ static int afs_readpage(struct file *file, struct page *page)
inode = page->mapping->host;
ASSERT(file != NULL);
if (file) {
key = file->private_data;
ASSERT(key != NULL);
} else {
key = afs_request_key(AFS_FS_S(inode->i_sb)->volume->cell);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error_nokey;
}
}
_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
......@@ -207,12 +214,17 @@ static int afs_readpage(struct file *file, struct page *page)
unlock_page(page);
}
if (!file)
key_put(key);
_leave(" = 0");
return 0;
error:
SetPageError(page);
unlock_page(page);
if (!file)
key_put(key);
error_nokey:
_leave(" = %d", ret);
return ret;
}
......
......@@ -105,16 +105,45 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
return send;
}
/*
* This is NEVER supposed to be called. Inotify marks should either have been
* removed from the idr when the watch was removed or in the
* fsnotify_destroy_mark_by_group() call when the inotify instance was being
* torn down. This is only called if the idr is about to be freed but there
* are still marks in it.
*/
static int idr_callback(int id, void *p, void *data)
{
BUG();
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *ientry;
static bool warned = false;
if (warned)
return 0;
warned = false;
entry = p;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
"idr. Probably leaking memory\n", id, p, data);
/*
* I'm taking the liberty of assuming that the mark in question is a
* valid address and I'm dereferencing it. This might help to figure
* out why we got here and the panic is no worse than the original
* BUG() that was here.
*/
if (entry)
printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
entry->group, entry->inode, ientry->wd);
return 0;
}
static void inotify_free_group_priv(struct fsnotify_group *group)
{
/* ideally the idr is empty and we won't hit the BUG in teh callback */
idr_for_each(&group->inotify_data.idr, idr_callback, NULL);
idr_for_each(&group->inotify_data.idr, idr_callback, group);
idr_remove_all(&group->inotify_data.idr);
idr_destroy(&group->inotify_data.idr);
}
......
......@@ -47,9 +47,6 @@
static struct vfsmount *inotify_mnt __read_mostly;
/* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
static struct inotify_event nul_inotify_event;
/* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
......@@ -199,8 +196,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
inotify_free_event_priv(fsn_priv);
}
/* round up event->name_len so it is a multiple of event_size */
name_len = roundup(event->name_len, event_size);
/* round up event->name_len so it is a multiple of event_size
* plus an extra byte for the terminating '\0'.
*/
name_len = roundup(event->name_len + 1, event_size);
inotify_event.len = name_len;
inotify_event.mask = inotify_mask_to_arg(event->mask);
......@@ -224,8 +223,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
return -EFAULT;
buf += event->name_len;
/* fill userspace with 0's from nul_inotify_event */
if (copy_to_user(buf, &nul_inotify_event, len_to_zero))
/* fill userspace with 0's */
if (clear_user(buf, len_to_zero))
return -EFAULT;
buf += len_to_zero;
event_size += name_len;
......@@ -364,20 +363,53 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
}
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark_entry *ientry)
{
struct idr *idr;
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *found_ientry;
int wd;
spin_lock(&group->inotify_data.idr_lock);
idr = &group->inotify_data.idr;
idr_remove(idr, ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
wd = ientry->wd;
if (wd == -1)
goto out;
entry = idr_find(&group->inotify_data.idr, wd);
if (unlikely(!entry))
goto out;
found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
if (unlikely(found_ientry != ientry)) {
/* We found an entry in the idr with the right wd, but it's
* not the entry we were told to remove. eparis seriously
* fucked up somewhere. */
WARN_ON(1);
ientry->wd = -1;
goto out;
}
/* One ref for being in the idr, one ref held by the caller */
BUG_ON(atomic_read(&entry->refcnt) < 2);
idr_remove(idr, wd);
ientry->wd = -1;
/* removed from the idr, drop that ref */
fsnotify_put_mark(entry);
out:
spin_unlock(&group->inotify_data.idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
* internal reference help on the mark because it is in the idr.
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
struct fsnotify_group *group)
......@@ -417,9 +449,6 @@ skip_send_ignore:
/* remove this entry from the idr */
inotify_remove_from_idr(group, ientry);
/* removed from idr, drop that reference */
fsnotify_put_mark(entry);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
......@@ -431,15 +460,76 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry)
kmem_cache_free(inotify_inode_mark_cachep, ientry);
}
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct fsnotify_mark_entry *entry = NULL;
struct fsnotify_mark_entry *entry;
struct inotify_inode_mark_entry *ientry;
struct inotify_inode_mark_entry *tmp_ientry;
int ret = 0;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
int ret;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!mask))
return -EINVAL;
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (!entry)
return -ENOENT;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
spin_lock(&entry->lock);
old_mask = entry->mask;
if (add) {
entry->mask |= mask;
new_mask = entry->mask;
} else {
entry->mask = mask;
new_mask = entry->mask;
}
spin_unlock(&entry->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this entry than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* more bits in this entry than the group? */
int do_group = (new_mask & ~group->mask);
/* update the inode with this new entry */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
/* update the group mask with the new mask */
if (dropped || do_group)
fsnotify_recalc_group_mask(group);
}
/* return the wd */
ret = ientry->wd;
/* match the get from fsnotify_find_mark_entry() */
fsnotify_put_mark(entry);
return ret;
}
static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct inotify_inode_mark_entry *tmp_ientry;
__u32 mask;
__u32 old_mask, new_mask;
int ret;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
......@@ -449,17 +539,11 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!tmp_ientry))
return -ENOMEM;
/* we set the mask at the end after attaching it */
fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
tmp_ientry->fsn_entry.mask = mask;
tmp_ientry->wd = -1;
find_entry:
spin_lock(&inode->i_lock);
entry = fsnotify_find_mark_entry(group, inode);
spin_unlock(&inode->i_lock);
if (entry) {
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
} else {
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
......@@ -474,80 +558,59 @@ retry:
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
/* idr was out of memory allocate and try again */
if (ret == -EAGAIN)
goto retry;
goto out_err;
}
/* we put the mark on the idr, take a reference */
fsnotify_get_mark(&tmp_ientry->fsn_entry);
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_ientry);
if (ret == -EEXIST)
goto find_entry;
goto out_err;
}
/* tmp_ientry has been added to the inode, so we are all set up.
* now we just need to make sure tmp_ientry doesn't get freed and
* we need to set up entry and ientry so the generic code can
* do its thing. */
ientry = tmp_ientry;
entry = &ientry->fsn_entry;
tmp_ientry = NULL;
/* update the idr hint, who cares about races, it's just a hint */
group->inotify_data.last_wd = tmp_ientry->wd;
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
/* update the idr hint */
group->inotify_data.last_wd = ientry->wd;
/* we put the mark on the idr, take a reference */
fsnotify_get_mark(entry);
}
ret = ientry->wd;
spin_lock(&entry->lock);
old_mask = entry->mask;
if (add) {
entry->mask |= mask;
new_mask = entry->mask;
} else {
entry->mask = mask;
new_mask = entry->mask;
}
spin_unlock(&entry->lock);
/* return the watch descriptor for this new entry */
ret = tmp_ientry->wd;
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this entry than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* more bits in this entry than the group? */
int do_group = (new_mask & ~group->mask);
/* match the ref from fsnotify_init_markentry() */
fsnotify_put_mark(&tmp_ientry->fsn_entry);
/* update the inode with this new entry */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
out_err:
if (ret < 0)
kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
/* update the group mask with the new mask */
if (dropped || do_group)
fsnotify_recalc_group_mask(group);
}
return ret;
}
/* this either matches fsnotify_find_mark_entry, or init_mark_entry
* depending on which path we took... */
fsnotify_put_mark(entry);
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
int ret = 0;
out_err:
/* could be an error, could be that we found an existing mark */
if (tmp_ientry) {
/* on the idr but didn't make it on the inode */
if (tmp_ientry->wd != -1)
inotify_remove_from_idr(group, tmp_ientry);
kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
}
retry:
/* try to update and existing watch with the new arg */
ret = inotify_update_existing_watch(group, inode, arg);
/* no mark present, try to add a new one */
if (ret == -ENOENT)
ret = inotify_new_watch(group, inode, arg);
/*
* inotify_new_watch could race with another thread which did an
* inotify_new_watch between the update_existing and the add watch
* here, go back and try to update an existing mark again.
*/
if (ret == -EEXIST)
goto retry;
return ret;
}
......
......@@ -174,7 +174,7 @@ struct acpi_processor_throttling {
cpumask_var_t shared_cpu_map;
int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
int state);
int state, bool force);
u32 address;
u8 duty_offset;
......@@ -321,7 +321,8 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
/* in processor_throttling.c */
int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
int acpi_processor_get_throttling_info(struct acpi_processor *pr);
extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
extern int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force);
extern const struct file_operations acpi_processor_throttling_fops;
extern void acpi_processor_throttling_init(void);
/* in processor_idle.c */
......
......@@ -21,7 +21,7 @@ struct flex_array {
struct {
int element_size;
int total_nr_elements;
struct flex_array_part *parts[0];
struct flex_array_part *parts[];
};
/*
* This little trick makes sure that
......@@ -36,12 +36,14 @@ struct flex_array {
.total_nr_elements = (total), \
} } }
struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags);
int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags);
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int end, gfp_t flags);
void flex_array_free(struct flex_array *fa);
void flex_array_free_parts(struct flex_array *fa);
int flex_array_put(struct flex_array *fa, int element_nr, void *src,
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags);
void *flex_array_get(struct flex_array *fa, int element_nr);
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
#endif /* _FLEX_ARRAY_H */
......@@ -815,11 +815,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD) {
atomic_inc(&current->signal->count);
atomic_inc(&current->signal->live);
if (clone_flags & CLONE_THREAD)
return 0;
}
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
......@@ -877,16 +874,6 @@ void __cleanup_signal(struct signal_struct *sig)
kmem_cache_free(signal_cachep, sig);
}
static void cleanup_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
atomic_dec(&sig->live);
if (atomic_dec_and_test(&sig->count))
__cleanup_signal(sig);
}
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
......@@ -1242,6 +1229,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
if (clone_flags & CLONE_THREAD) {
atomic_inc(&current->signal->count);
atomic_inc(&current->signal->live);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
......@@ -1285,7 +1274,8 @@ bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
cleanup_signal(p);
if (!(clone_flags & CLONE_THREAD))
__cleanup_signal(p->signal);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
......
......@@ -909,16 +909,18 @@ void __symbol_put(const char *symbol)
}
EXPORT_SYMBOL(__symbol_put);
/* Note this assumes addr is a function, which it currently always is. */
void symbol_put_addr(void *addr)
{
struct module *modaddr;
unsigned long a = (unsigned long)dereference_function_descriptor(addr);
if (core_kernel_text((unsigned long)addr))
if (core_kernel_text(a))
return;
/* module_text_address is safe here: we're supposed to have reference
* to module from symbol_get, so it can't go away. */
modaddr = __module_text_address((unsigned long)addr);
modaddr = __module_text_address(a);
BUG_ON(!modaddr);
module_put(modaddr);
}
......@@ -2353,6 +2355,7 @@ static noinline struct module *load_module(void __user *umod,
if (err < 0)
goto unlink;
add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
if (mod->sect_attrs)
add_notes_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
/* Get rid of temporary copy */
......
......@@ -99,7 +99,8 @@ static inline int elements_fit_in_base(struct flex_array *fa)
* capacity in the base structure. Also note that no effort is made
* to efficiently pack objects across page boundaries.
*/
struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags)
{
struct flex_array *ret;
int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
......@@ -115,16 +116,14 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
return ret;
}
static int fa_element_to_part_nr(struct flex_array *fa, int element_nr)
static int fa_element_to_part_nr(struct flex_array *fa,
unsigned int element_nr)
{
return element_nr / __elements_per_part(fa->element_size);
}
/**
* flex_array_free_parts - just free the second-level pages
* @src: address of data to copy into the array
* @element_nr: index of the position in which to insert
* the new element.
*
* This is to be used in cases where the base 'struct flex_array'
* has been statically allocated and should not be free.
......@@ -146,14 +145,12 @@ void flex_array_free(struct flex_array *fa)
kfree(fa);
}
static int fa_index_inside_part(struct flex_array *fa, int element_nr)
static unsigned int index_inside_part(struct flex_array *fa,
unsigned int element_nr)
{
return element_nr % __elements_per_part(fa->element_size);
}
unsigned int part_offset;
static int index_inside_part(struct flex_array *fa, int element_nr)
{
int part_offset = fa_index_inside_part(fa, element_nr);
part_offset = element_nr % __elements_per_part(fa->element_size);
return part_offset * fa->element_size;
}
......@@ -188,7 +185,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
*
* Locking must be provided by the caller.
*/
int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags)
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags)
{
int part_nr = fa_element_to_part_nr(fa, element_nr);
struct flex_array_part *part;
......@@ -198,10 +196,11 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags
return -ENOSPC;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else
else {
part = __fa_get_part(fa, part_nr, flags);
if (!part)
return -ENOMEM;
}
dst = &part->elements[index_inside_part(fa, element_nr)];
memcpy(dst, src, fa->element_size);
return 0;
......@@ -219,7 +218,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags
*
* Locking must be provided by the caller.
*/
int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int end, gfp_t flags)
{
int start_part;
int end_part;
......@@ -250,18 +250,19 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
*
* Locking must be provided by the caller.
*/
void *flex_array_get(struct flex_array *fa, int element_nr)
void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
{
int part_nr = fa_element_to_part_nr(fa, element_nr);
struct flex_array_part *part;
if (element_nr >= fa->total_nr_elements)
return NULL;
if (!fa->parts[part_nr])
return NULL;
if (elements_fit_in_base(fa))
part = (struct flex_array_part *)&fa->parts[0];
else
else {
part = fa->parts[part_nr];
if (!part)
return NULL;
}
return &part->elements[index_inside_part(fa, element_nr)];
}
......@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void)
return lmb.memory.size;
}
u64 __init lmb_end_of_DRAM(void)
u64 lmb_end_of_DRAM(void)
{
int idx = lmb.memory.cnt - 1;
......
......@@ -358,6 +358,7 @@ static int page_referenced_one(struct page *page,
*/
if (vma->vm_flags & VM_LOCKED) {
*mapcount = 1; /* break early from loop */
*vm_flags |= VM_LOCKED;
goto out_unmap;
}
......
......@@ -631,9 +631,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
referenced = page_referenced(page, 1,
sc->mem_cgroup, &vm_flags);
/* In active use or really unfreeable? Activate it. */
/*
* In active use or really unfreeable? Activate it.
* If page which have PG_mlocked lost isoltation race,
* try_to_unmap moves it to unevictable list
*/
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
referenced && page_mapping_inuse(page))
referenced && page_mapping_inuse(page)
&& !(vm_flags & VM_LOCKED))
goto activate_locked;
/*
......
......@@ -60,9 +60,9 @@ static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
/**
* v9fs_parse_options - parse mount options into session structure
* @options: options string passed from mount
* @v9ses: existing v9fs session information
* parse_options - parse mount options into client structure
* @opts: options string passed from mount
* @clnt: existing v9fs client information
*
* Return 0 upon success, -ERRNO upon failure
*/
......@@ -232,7 +232,7 @@ EXPORT_SYMBOL(p9_tag_lookup);
/**
* p9_tag_init - setup tags structure and contents
* @tags: tags structure from the client struct
* @c: v9fs client struct
*
* This initializes the tags structure for each client instance.
*
......@@ -258,7 +258,7 @@ error:
/**
* p9_tag_cleanup - cleans up tags structure and reclaims resources
* @tags: tags structure from the client struct
* @c: v9fs client struct
*
* This frees resources associated with the tags structure
*
......@@ -411,14 +411,9 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if (c->dotu)
err = -ecode;
if (!err) {
if (!err || !IS_ERR_VALUE(err))
err = p9_errstr2errno(ename, strlen(ename));
/* string match failed */
if (!err)
err = -ESERVERFAULT;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
kfree(ename);
......@@ -430,8 +425,8 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
/**
* p9_client_flush - flush (cancel) a request
* c: client state
* req: request to cancel
* @c: client state
* @oldreq: request to cancel
*
* This sents a flush for a particular requests and links
* the flush request to the original request. The current
......
......@@ -239,7 +239,7 @@ int p9_errstr2errno(char *errstr, int len)
errstr[len] = 0;
printk(KERN_ERR "%s: server reported unknown error %s\n",
__func__, errstr);
errno = 1;
errno = ESERVERFAULT;
}
return -errno;
......
......@@ -119,8 +119,8 @@ struct p9_poll_wait {
* @wpos: write position for current frame
* @wsize: amount of data to write for current frame
* @wbuf: current write buffer
* @poll_pending_link: pending links to be polled per conn
* @poll_wait: array of wait_q's for various worker threads
* @poll_waddr: ????
* @pt: poll state
* @rq: current read work
* @wq: current write work
......@@ -700,9 +700,9 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
}
/**
* parse_options - parse mount options into session structure
* @options: options string passed from mount
* @opts: transport-specific structure to parse options into
* parse_opts - parse mount options into p9_fd_opts structure
* @params: options string passed from mount
* @opts: fd transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
......
......@@ -67,14 +67,15 @@
* @pd: Protection Domain pointer
* @qp: Queue Pair pointer
* @cq: Completion Queue pointer
* @dm_mr: DMA Memory Region pointer
* @lkey: The local access only memory region key
* @timeout: Number of uSecs to wait for connection management events
* @sq_depth: The depth of the Send Queue
* @sq_sem: Semaphore for the SQ
* @rq_depth: The depth of the Receive Queue.
* @rq_count: Count of requests in the Receive Queue.
* @addr: The remote peer's address
* @req_lock: Protects the active request list
* @send_wait: Wait list when the SQ fills up
* @cm_done: Completion event for connection management tracking
*/
struct p9_trans_rdma {
......@@ -154,9 +155,9 @@ static match_table_t tokens = {
};
/**
* parse_options - parse mount options into session structure
* @options: options string passed from mount
* @opts: transport-specific structure to parse options into
* parse_opts - parse mount options into rdma options structure
* @params: options string passed from mount
* @opts: rdma transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
......
......@@ -57,11 +57,9 @@ static int chan_index;
* @initialized: whether the channel is initialized
* @inuse: whether the channel is in use
* @lock: protects multiple elements within this structure
* @client: client instance
* @vdev: virtio dev associated with this channel
* @vq: virtio queue associated with this channel
* @tagpool: accounting for tag ids (and request slots)
* @reqs: array of request slots
* @max_tag: current number of request_slots allocated
* @sg: scatter gather list which is used to pack a request (protected?)
*
* We keep all per-channel information in a structure.
......@@ -92,7 +90,7 @@ static unsigned int rest_of_page(void *data)
/**
* p9_virtio_close - reclaim resources of a channel
* @trans: transport state
* @client: client instance
*
* This reclaims a channel by freeing its resources and
* reseting its inuse flag.
......@@ -181,9 +179,8 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
/**
* p9_virtio_request - issue a request
* @t: transport state
* @tc: &p9_fcall request to transmit
* @rc: &p9_fcall to put reponse into
* @client: client instance issuing the request
* @req: request to be issued
*
*/
......
......@@ -813,6 +813,8 @@ int ip_append_data(struct sock *sk,
inet->cork.addr = ipc->addr;
}
rt = *rtp;
if (unlikely(!rt))
return -EFAULT;
/*
* We steal reference to this route, caller should not release it
*/
......
......@@ -262,6 +262,8 @@ void ima_counts_put(struct path *path, int mask)
else if (mask & (MAY_READ | MAY_EXEC))
iint->readcount--;
mutex_unlock(&iint->mutex);
kref_put(&iint->refcount, iint_free);
}
/*
......@@ -291,6 +293,8 @@ void ima_counts_get(struct file *file)
if (file->f_mode & FMODE_WRITE)
iint->writecount++;
mutex_unlock(&iint->mutex);
kref_put(&iint->refcount, iint_free);
}
EXPORT_SYMBOL_GPL(ima_counts_get);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment