Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
cd69a7d5
Commit
cd69a7d5
authored
Aug 28, 2009
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'rt/base' into rt/head
parents
b4acb0b6
f1244df1
Changes
43
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
43 changed files
with
559 additions
and
469 deletions
+559
-469
Documentation/filesystems/9p.txt
Documentation/filesystems/9p.txt
+3
-0
Makefile
Makefile
+1
-1
arch/m68k/amiga/config.c
arch/m68k/amiga/config.c
+4
-2
arch/m68k/include/asm/motorola_pgalloc.h
arch/m68k/include/asm/motorola_pgalloc.h
+4
-6
arch/m68k/include/asm/pgtable_mm.h
arch/m68k/include/asm/pgtable_mm.h
+1
-2
arch/m68k/include/asm/unistd.h
arch/m68k/include/asm/unistd.h
+3
-1
arch/m68k/kernel/entry.S
arch/m68k/kernel/entry.S
+2
-0
arch/m68knommu/kernel/syscalltable.S
arch/m68knommu/kernel/syscalltable.S
+2
-0
arch/powerpc/configs/ps3_defconfig
arch/powerpc/configs/ps3_defconfig
+75
-136
arch/powerpc/platforms/ps3/time.c
arch/powerpc/platforms/ps3/time.c
+4
-0
drivers/acpi/processor_core.c
drivers/acpi/processor_core.c
+6
-0
drivers/acpi/processor_thermal.c
drivers/acpi/processor_thermal.c
+3
-3
drivers/acpi/processor_throttling.c
drivers/acpi/processor_throttling.c
+16
-14
drivers/leds/ledtrig-gpio.c
drivers/leds/ledtrig-gpio.c
+17
-7
drivers/macintosh/via-maciisi.c
drivers/macintosh/via-maciisi.c
+1
-1
drivers/net/smc91x.c
drivers/net/smc91x.c
+1
-1
drivers/net/virtio_net.c
drivers/net/virtio_net.c
+46
-15
drivers/platform/x86/wmi.c
drivers/platform/x86/wmi.c
+4
-4
drivers/pps/pps.c
drivers/pps/pps.c
+1
-1
drivers/thermal/thermal_sys.c
drivers/thermal/thermal_sys.c
+8
-1
drivers/video/xen-fbfront.c
drivers/video/xen-fbfront.c
+4
-4
fs/9p/v9fs.c
fs/9p/v9fs.c
+5
-16
fs/9p/v9fs.h
fs/9p/v9fs.h
+0
-1
fs/9p/vfs_inode.c
fs/9p/vfs_inode.c
+65
-61
fs/9p/vfs_super.c
fs/9p/vfs_super.c
+12
-27
fs/afs/file.c
fs/afs/file.c
+15
-3
fs/notify/inotify/inotify_fsnotify.c
fs/notify/inotify/inotify_fsnotify.c
+31
-2
fs/notify/inotify/inotify_user.c
fs/notify/inotify/inotify_user.c
+146
-83
include/acpi/processor.h
include/acpi/processor.h
+3
-2
include/linux/flex_array.h
include/linux/flex_array.h
+7
-5
kernel/fork.c
kernel/fork.c
+5
-15
kernel/module.c
kernel/module.c
+6
-3
lib/flex_array.c
lib/flex_array.c
+21
-20
lib/lmb.c
lib/lmb.c
+1
-1
mm/rmap.c
mm/rmap.c
+1
-0
mm/vmscan.c
mm/vmscan.c
+7
-2
net/9p/client.c
net/9p/client.c
+8
-13
net/9p/error.c
net/9p/error.c
+1
-1
net/9p/trans_fd.c
net/9p/trans_fd.c
+4
-4
net/9p/trans_rdma.c
net/9p/trans_rdma.c
+5
-4
net/9p/trans_virtio.c
net/9p/trans_virtio.c
+4
-7
net/ipv4/ip_output.c
net/ipv4/ip_output.c
+2
-0
security/integrity/ima/ima_main.c
security/integrity/ima/ima_main.c
+4
-0
No files found.
Documentation/filesystems/9p.txt
View file @
cd69a7d5
...
...
@@ -123,6 +123,9 @@ available from the same CVS repository.
There are user and developer mailing lists available through the v9fs project
on sourceforge (http://sourceforge.net/projects/v9fs).
A stand-alone version of the module (which should build for any 2.6 kernel)
is available via (http://github.com/ericvh/9p-sac/tree/master)
News and other information is maintained on SWiK (http://swik.net/v9fs).
Bug reports may be issued through the kernel.org bugzilla
...
...
Makefile
View file @
cd69a7d5
VERSION
=
2
PATCHLEVEL
=
6
SUBLEVEL
=
31
EXTRAVERSION
=
-rc
7
EXTRAVERSION
=
-rc
8
NAME
=
Man-Eating Seals of Antiquity
# *DOCUMENTATION*
...
...
arch/m68k/amiga/config.c
View file @
cd69a7d5
...
...
@@ -574,10 +574,11 @@ static int a2000_hwclk(int op, struct rtc_time *t)
tod_2000
.
cntrl1
=
TOD2000_CNTRL1_HOLD
;
while
((
tod_2000
.
cntrl1
&
TOD2000_CNTRL1_BUSY
)
&&
cnt
--
)
{
while
((
tod_2000
.
cntrl1
&
TOD2000_CNTRL1_BUSY
)
&&
cnt
)
{
tod_2000
.
cntrl1
&=
~
TOD2000_CNTRL1_HOLD
;
udelay
(
70
);
tod_2000
.
cntrl1
|=
TOD2000_CNTRL1_HOLD
;
--
cnt
;
}
if
(
!
cnt
)
...
...
@@ -649,10 +650,11 @@ static int amiga_set_clock_mmss(unsigned long nowtime)
tod_2000
.
cntrl1
|=
TOD2000_CNTRL1_HOLD
;
while
((
tod_2000
.
cntrl1
&
TOD2000_CNTRL1_BUSY
)
&&
cnt
--
)
{
while
((
tod_2000
.
cntrl1
&
TOD2000_CNTRL1_BUSY
)
&&
cnt
)
{
tod_2000
.
cntrl1
&=
~
TOD2000_CNTRL1_HOLD
;
udelay
(
70
);
tod_2000
.
cntrl1
|=
TOD2000_CNTRL1_HOLD
;
--
cnt
;
}
if
(
!
cnt
)
...
...
arch/m68k/include/asm/motorola_pgalloc.h
View file @
cd69a7d5
...
...
@@ -36,12 +36,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
return
NULL
;
pte
=
kmap
(
page
);
if
(
pte
)
{
__flush_page_to_ram
(
pte
);
flush_tlb_kernel_page
(
pte
);
nocache_page
(
pte
);
}
kunmap
(
pte
);
__flush_page_to_ram
(
pte
);
flush_tlb_kernel_page
(
pte
);
nocache_page
(
pte
);
kunmap
(
page
);
pgtable_page_ctor
(
page
);
return
page
;
}
...
...
arch/m68k/include/asm/pgtable_mm.h
View file @
cd69a7d5
...
...
@@ -135,8 +135,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#endif
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable.h>
/*
* Macro to mark a page protection value as "uncacheable".
*/
...
...
@@ -154,6 +152,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
: (prot)))
#include <asm-generic/pgtable.h>
#endif
/* !__ASSEMBLY__ */
/*
...
...
arch/m68k/include/asm/unistd.h
View file @
cd69a7d5
...
...
@@ -334,10 +334,12 @@
#define __NR_inotify_init1 328
#define __NR_preadv 329
#define __NR_pwritev 330
#define __NR_rt_tgsigqueueinfo 331
#define __NR_perf_counter_open 332
#ifdef __KERNEL__
#define NR_syscalls 33
1
#define NR_syscalls 33
3
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
...
...
arch/m68k/kernel/entry.S
View file @
cd69a7d5
...
...
@@ -755,4 +755,6 @@ sys_call_table:
.
long
sys_inotify_init1
.
long
sys_preadv
.
long
sys_pwritev
/*
330
*/
.
long
sys_rt_tgsigqueueinfo
.
long
sys_perf_counter_open
arch/m68knommu/kernel/syscalltable.S
View file @
cd69a7d5
...
...
@@ -349,6 +349,8 @@ ENTRY(sys_call_table)
.
long
sys_inotify_init1
.
long
sys_preadv
.
long
sys_pwritev
/*
330
*/
.
long
sys_rt_tgsigqueueinfo
.
long
sys_perf_counter_open
.
rept
NR_syscalls
-(
.
-
sys_call_table
)/
4
.
long
sys_ni_syscall
...
...
arch/powerpc/configs/ps3_defconfig
View file @
cd69a7d5
This diff is collapsed.
Click to expand it.
arch/powerpc/platforms/ps3/time.c
View file @
cd69a7d5
...
...
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <asm/firmware.h>
#include <asm/rtc.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
...
...
@@ -84,6 +85,9 @@ static int __init ps3_rtc_init(void)
{
struct
platform_device
*
pdev
;
if
(
!
firmware_has_feature
(
FW_FEATURE_PS3_LV1
))
return
-
ENODEV
;
pdev
=
platform_device_register_simple
(
"rtc-ps3"
,
-
1
,
NULL
,
0
);
if
(
IS_ERR
(
pdev
))
return
PTR_ERR
(
pdev
);
...
...
drivers/acpi/processor_core.c
View file @
cd69a7d5
...
...
@@ -1151,6 +1151,9 @@ static int __init acpi_processor_init(void)
{
int
result
=
0
;
if
(
acpi_disabled
)
return
0
;
memset
(
&
errata
,
0
,
sizeof
(
errata
));
#ifdef CONFIG_SMP
...
...
@@ -1197,6 +1200,9 @@ out_proc:
static
void
__exit
acpi_processor_exit
(
void
)
{
if
(
acpi_disabled
)
return
;
acpi_processor_ppc_exit
();
acpi_thermal_cpufreq_exit
();
...
...
drivers/acpi/processor_thermal.c
View file @
cd69a7d5
...
...
@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
if
(
pr
->
limit
.
thermal
.
tx
>
tx
)
tx
=
pr
->
limit
.
thermal
.
tx
;
result
=
acpi_processor_set_throttling
(
pr
,
tx
);
result
=
acpi_processor_set_throttling
(
pr
,
tx
,
false
);
if
(
result
)
goto
end
;
}
...
...
@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
if
(
state
<=
max_pstate
)
{
if
(
pr
->
flags
.
throttling
&&
pr
->
throttling
.
state
)
result
=
acpi_processor_set_throttling
(
pr
,
0
);
result
=
acpi_processor_set_throttling
(
pr
,
0
,
false
);
cpufreq_set_cur_state
(
pr
->
id
,
state
);
}
else
{
cpufreq_set_cur_state
(
pr
->
id
,
max_pstate
);
result
=
acpi_processor_set_throttling
(
pr
,
state
-
max_pstate
);
state
-
max_pstate
,
false
);
}
return
result
;
}
...
...
drivers/acpi/processor_throttling.c
View file @
cd69a7d5
...
...
@@ -62,7 +62,8 @@ struct throttling_tstate {
#define THROTTLING_POSTCHANGE (2)
static
int
acpi_processor_get_throttling
(
struct
acpi_processor
*
pr
);
int
acpi_processor_set_throttling
(
struct
acpi_processor
*
pr
,
int
state
);
int
acpi_processor_set_throttling
(
struct
acpi_processor
*
pr
,
int
state
,
bool
force
);
static
int
acpi_processor_update_tsd_coord
(
void
)
{
...
...
@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
*/
target_state
=
throttling_limit
;
}
return
acpi_processor_set_throttling
(
pr
,
target_state
);
return
acpi_processor_set_throttling
(
pr
,
target_state
,
false
);
}
/*
...
...
@@ -839,10 +840,10 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
if
(
ret
>=
0
)
{
state
=
acpi_get_throttling_state
(
pr
,
value
);
if
(
state
==
-
1
)
{
ACPI_
WARNING
((
AE
_INFO
,
"Invalid throttling state, reset"
));
ACPI_
DEBUG_PRINT
((
ACPI_DB
_INFO
,
"Invalid throttling state, reset
\n
"
));
state
=
0
;
ret
=
acpi_processor_set_throttling
(
pr
,
state
);
ret
=
acpi_processor_set_throttling
(
pr
,
state
,
true
);
if
(
ret
)
return
ret
;
}
...
...
@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
}
static
int
acpi_processor_set_throttling_fadt
(
struct
acpi_processor
*
pr
,
int
state
)
int
state
,
bool
force
)
{
u32
value
=
0
;
u32
duty_mask
=
0
;
...
...
@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
if
(
!
pr
->
flags
.
throttling
)
return
-
ENODEV
;
if
(
state
==
pr
->
throttling
.
state
)
if
(
!
force
&&
(
state
==
pr
->
throttling
.
state
)
)
return
0
;
if
(
state
<
pr
->
throttling_platform_limit
)
...
...
@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
}
static
int
acpi_processor_set_throttling_ptc
(
struct
acpi_processor
*
pr
,
int
state
)
int
state
,
bool
force
)
{
int
ret
;
acpi_integer
value
;
...
...
@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
if
(
!
pr
->
flags
.
throttling
)
return
-
ENODEV
;
if
(
state
==
pr
->
throttling
.
state
)
if
(
!
force
&&
(
state
==
pr
->
throttling
.
state
)
)
return
0
;
if
(
state
<
pr
->
throttling_platform_limit
)
...
...
@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return
0
;
}
int
acpi_processor_set_throttling
(
struct
acpi_processor
*
pr
,
int
state
)
int
acpi_processor_set_throttling
(
struct
acpi_processor
*
pr
,
int
state
,
bool
force
)
{
cpumask_var_t
saved_mask
;
int
ret
=
0
;
...
...
@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
/* FIXME: use work_on_cpu() */
set_cpus_allowed_ptr
(
current
,
cpumask_of
(
pr
->
id
));
ret
=
p_throttling
->
acpi_processor_set_throttling
(
pr
,
t_state
.
target_state
);
t_state
.
target_state
,
force
);
}
else
{
/*
* When the T-state coordination is SW_ALL or HW_ALL,
...
...
@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
set_cpus_allowed_ptr
(
current
,
cpumask_of
(
i
));
ret
=
match_pr
->
throttling
.
acpi_processor_set_throttling
(
match_pr
,
t_state
.
target_state
);
match_pr
,
t_state
.
target_state
,
force
);
}
}
/*
...
...
@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
ACPI_DEBUG_PRINT
((
ACPI_DB_INFO
,
"Disabling throttling (was T%d)
\n
"
,
pr
->
throttling
.
state
));
result
=
acpi_processor_set_throttling
(
pr
,
0
);
result
=
acpi_processor_set_throttling
(
pr
,
0
,
false
);
if
(
result
)
goto
end
;
}
...
...
@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
if
(
strcmp
(
tmpbuf
,
charp
)
!=
0
)
return
-
EINVAL
;
result
=
acpi_processor_set_throttling
(
pr
,
state_val
);
result
=
acpi_processor_set_throttling
(
pr
,
state_val
,
false
);
if
(
result
)
return
result
;
...
...
drivers/leds/ledtrig-gpio.c
View file @
cd69a7d5
...
...
@@ -117,6 +117,9 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
gpio_data
->
inverted
=
!!
inverted
;
/* After inverting, we need to update the LED. */
schedule_work
(
&
gpio_data
->
work
);
return
n
;
}
static
DEVICE_ATTR
(
inverted
,
0644
,
gpio_trig_inverted_show
,
...
...
@@ -146,20 +149,26 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
return
-
EINVAL
;
}
if
(
gpio_data
->
gpio
==
gpio
)
return
n
;
if
(
!
gpio
)
{
free_irq
(
gpio_to_irq
(
gpio_data
->
gpio
),
led
);
if
(
gpio_data
->
gpio
!=
0
)
free_irq
(
gpio_to_irq
(
gpio_data
->
gpio
),
led
);
gpio_data
->
gpio
=
0
;
return
n
;
}
if
(
gpio_data
->
gpio
>
0
&&
gpio_data
->
gpio
!=
gpio
)
free_irq
(
gpio_to_irq
(
gpio_data
->
gpio
),
led
);
gpio_data
->
gpio
=
gpio
;
ret
=
request_irq
(
gpio_to_irq
(
gpio
),
gpio_trig_irq
,
IRQF_SHARED
|
IRQF_TRIGGER_RISING
|
IRQF_TRIGGER_FALLING
,
"ledtrig-gpio"
,
led
);
if
(
ret
)
if
(
ret
)
{
dev_err
(
dev
,
"request_irq failed with error %d
\n
"
,
ret
);
}
else
{
if
(
gpio_data
->
gpio
!=
0
)
free_irq
(
gpio_to_irq
(
gpio_data
->
gpio
),
led
);
gpio_data
->
gpio
=
gpio
;
}
return
ret
?
ret
:
n
;
}
...
...
@@ -211,7 +220,8 @@ static void gpio_trig_deactivate(struct led_classdev *led)
device_remove_file
(
led
->
dev
,
&
dev_attr_inverted
);
device_remove_file
(
led
->
dev
,
&
dev_attr_desired_brightness
);
flush_work
(
&
gpio_data
->
work
);
free_irq
(
gpio_to_irq
(
gpio_data
->
gpio
),
led
);
if
(
gpio_data
->
gpio
!=
0
)
free_irq
(
gpio_to_irq
(
gpio_data
->
gpio
),
led
);
kfree
(
gpio_data
);
}
}
...
...
drivers/macintosh/via-maciisi.c
View file @
cd69a7d5
...
...
@@ -288,7 +288,7 @@ static void maciisi_sync(struct adb_request *req)
}
/* This could be BAD... when the ADB controller doesn't respond
* for this long, it's probably not coming back :-( */
if
(
count
>=
50
)
/* Hopefully shouldn't happen */
if
(
count
>
50
)
/* Hopefully shouldn't happen */
printk
(
KERN_ERR
"maciisi_send_request: poll timed out!
\n
"
);
}
...
...
drivers/net/smc91x.c
View file @
cd69a7d5
...
...
@@ -531,7 +531,7 @@ static inline void smc_rcv(struct net_device *dev)
local_irq_restore(flags); \
__ret; \
})
#define smc_special_lock(lock, flags) spin_lock_irq(lock, flags)
#define smc_special_lock(lock, flags) spin_lock_irq
save
(lock, flags)
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else
#define smc_special_trylock(lock, flags) (1)
...
...
drivers/net/virtio_net.c
View file @
cd69a7d5
...
...
@@ -70,6 +70,9 @@ struct virtnet_info
struct
sk_buff_head
recv
;
struct
sk_buff_head
send
;
/* Work struct for refilling if we run low on memory. */
struct
delayed_work
refill
;
/* Chain pages by the private ptr. */
struct
page
*
pages
;
};
...
...
@@ -273,19 +276,22 @@ drop:
dev_kfree_skb
(
skb
);
}
static
void
try_fill_recv_maxbufs
(
struct
virtnet_info
*
vi
)
static
bool
try_fill_recv_maxbufs
(
struct
virtnet_info
*
vi
,
gfp_t
gfp
)
{
struct
sk_buff
*
skb
;
struct
scatterlist
sg
[
2
+
MAX_SKB_FRAGS
];
int
num
,
err
,
i
;
bool
oom
=
false
;
sg_init_table
(
sg
,
2
+
MAX_SKB_FRAGS
);
for
(;;)
{
struct
virtio_net_hdr
*
hdr
;
skb
=
netdev_alloc_skb
(
vi
->
dev
,
MAX_PACKET_LEN
+
NET_IP_ALIGN
);
if
(
unlikely
(
!
skb
))
if
(
unlikely
(
!
skb
))
{
oom
=
true
;
break
;
}
skb_reserve
(
skb
,
NET_IP_ALIGN
);
skb_put
(
skb
,
MAX_PACKET_LEN
);
...
...
@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
if
(
vi
->
big_packets
)
{
for
(
i
=
0
;
i
<
MAX_SKB_FRAGS
;
i
++
)
{
skb_frag_t
*
f
=
&
skb_shinfo
(
skb
)
->
frags
[
i
];
f
->
page
=
get_a_page
(
vi
,
GFP_ATOMIC
);
f
->
page
=
get_a_page
(
vi
,
gfp
);
if
(
!
f
->
page
)
break
;
...
...
@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
if
(
unlikely
(
vi
->
num
>
vi
->
max
))
vi
->
max
=
vi
->
num
;
vi
->
rvq
->
vq_ops
->
kick
(
vi
->
rvq
);
return
!
oom
;
}
static
void
try_fill_recv
(
struct
virtnet_info
*
vi
)
/* Returns false if we couldn't fill entirely (OOM). */
static
bool
try_fill_recv
(
struct
virtnet_info
*
vi
,
gfp_t
gfp
)
{
struct
sk_buff
*
skb
;
struct
scatterlist
sg
[
1
];
int
err
;
bool
oom
=
false
;
if
(
!
vi
->
mergeable_rx_bufs
)
{
try_fill_recv_maxbufs
(
vi
);
return
;
}
if
(
!
vi
->
mergeable_rx_bufs
)
return
try_fill_recv_maxbufs
(
vi
,
gfp
);
for
(;;)
{
skb_frag_t
*
f
;
skb
=
netdev_alloc_skb
(
vi
->
dev
,
GOOD_COPY_LEN
+
NET_IP_ALIGN
);
if
(
unlikely
(
!
skb
))
if
(
unlikely
(
!
skb
))
{
oom
=
true
;
break
;
}
skb_reserve
(
skb
,
NET_IP_ALIGN
);
f
=
&
skb_shinfo
(
skb
)
->
frags
[
0
];
f
->
page
=
get_a_page
(
vi
,
GFP_ATOMIC
);
f
->
page
=
get_a_page
(
vi
,
gfp
);
if
(
!
f
->
page
)
{
oom
=
true
;
kfree_skb
(
skb
);
break
;
}
...
...
@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
if
(
unlikely
(
vi
->
num
>
vi
->
max
))
vi
->
max
=
vi
->
num
;
vi
->
rvq
->
vq_ops
->
kick
(
vi
->
rvq
);
return
!
oom
;
}
static
void
skb_recv_done
(
struct
virtqueue
*
rvq
)
...
...
@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
}
}
static
void
refill_work
(
struct
work_struct
*
work
)
{
struct
virtnet_info
*
vi
;
bool
still_empty
;
vi
=
container_of
(
work
,
struct
virtnet_info
,
refill
.
work
);
napi_disable
(
&
vi
->
napi
);
try_fill_recv
(
vi
,
GFP_KERNEL
);
still_empty
=
(
vi
->
num
==
0
);
napi_enable
(
&
vi
->
napi
);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if
(
still_empty
)
schedule_delayed_work
(
&
vi
->
refill
,
HZ
/
2
);
}
static
int
virtnet_poll
(
struct
napi_struct
*
napi
,
int
budget
)
{
struct
virtnet_info
*
vi
=
container_of
(
napi
,
struct
virtnet_info
,
napi
);
...
...
@@ -400,10 +428,10 @@ again:
received
++
;
}
/* FIXME: If we oom and completely run out of inbufs, we need
* to start a timer trying to fill more. */
if
(
vi
->
num
<
vi
->
max
/
2
)
try_fill_recv
(
vi
);
if
(
vi
->
num
<
vi
->
max
/
2
)
{
if
(
!
try_fill_recv
(
vi
,
GFP_ATOMIC
))
schedule_delayed_work
(
&
vi
->
refill
,
0
);
}
/* Out of packets? */
if
(
received
<
budget
)
{
...
...
@@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vi
->
vdev
=
vdev
;
vdev
->
priv
=
vi
;
vi
->
pages
=
NULL
;
INIT_DELAYED_WORK
(
&
vi
->
refill
,
refill_work
);
/* If they give us a callback when all buffers are done, we don't need
* the timer. */
...
...
@@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev)
}
/* Last of all, set up some receive buffers. */
try_fill_recv
(
vi
);
try_fill_recv
(
vi
,
GFP_KERNEL
);
/* If we didn't even get one input buffer, we're useless. */
if
(
vi
->
num
==
0
)
{
...
...
@@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev
(
dev
);
cancel_delayed_work_sync
(
&
vi
->
refill
);
free_vqs:
vdev
->
config
->
del_vqs
(
vdev
);
free:
...
...
@@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev)
BUG_ON
(
vi
->
num
!=
0
);
unregister_netdev
(
vi
->
dev
);
cancel_delayed_work_sync
(
&
vi
->
refill
);
vdev
->
config
->
del_vqs
(
vi
->
vdev
);
...
...
drivers/platform/x86/wmi.c
View file @
cd69a7d5
...
...
@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
acpi_status
status
;
struct
acpi_object_list
input
;
union
acpi_object
params
[
3
];
char
method
[
4
]
=
"WM"
;
char
method
[
5
]
=
"WM"
;
if
(
!
find_guid
(
guid_string
,
&
wblock
))
return
AE_ERROR
;
...
...
@@ -328,8 +328,8 @@ struct acpi_buffer *out)
acpi_status
status
,
wc_status
=
AE_ERROR
;
struct
acpi_object_list
input
,
wc_input
;
union
acpi_object
wc_params
[
1
],
wq_params
[
1
];
char
method
[
4
];
char
wc_method
[
4
]
=
"WC"
;
char
method
[
5
];
char
wc_method
[
5
]
=
"WC"
;
if
(
!
guid_string
||
!
out
)
return
AE_BAD_PARAMETER
;
...
...
@@ -410,7 +410,7 @@ const struct acpi_buffer *in)
acpi_handle
handle
;
struct
acpi_object_list
input
;
union
acpi_object
params
[
2
];
char
method
[
4
]
=
"WS"
;
char
method
[
5
]
=
"WS"
;
if
(
!
guid_string
||
!
in
)
return
AE_BAD_DATA
;
...
...
drivers/pps/pps.c
View file @
cd69a7d5
...
...
@@ -244,7 +244,7 @@ int pps_register_cdev(struct pps_device *pps)
}
pps
->
dev
=
device_create
(
pps_class
,
pps
->
info
.
dev
,
pps
->
devno
,
NULL
,
"pps%d"
,
pps
->
id
);
if
(
err
)
if
(
IS_ERR
(
pps
->
dev
)
)
goto
del_cdev
;
dev_set_drvdata
(
pps
->
dev
,
pps
);
...
...
drivers/thermal/thermal_sys.c
View file @
cd69a7d5
...
...
@@ -953,7 +953,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
mutex_lock
(
&
tz
->
lock
);
tz
->
ops
->
get_temp
(
tz
,
&
temp
);
if
(
tz
->
ops
->
get_temp
(
tz
,
&
temp
))
{
/* get_temp failed - retry it later */
printk
(
KERN_WARNING
PREFIX
"failed to read out thermal zone "
"%d
\n
"
,
tz
->
id
);
goto
leave
;
}
for
(
count
=
0
;
count
<
tz
->
trips
;
count
++
)
{
tz
->
ops
->
get_trip_type
(
tz
,
count
,
&
trip_type
);
...
...
@@ -1005,6 +1010,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
THERMAL_TRIPS_NONE
);
tz
->
last_temperature
=
temp
;
leave:
if
(
tz
->
passive
)
thermal_zone_device_set_polling
(
tz
,
tz
->
passive_delay
);
else
if
(
tz
->
polling_delay
)
...
...
drivers/video/xen-fbfront.c
View file @
cd69a7d5
...
...
@@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
xenfb_init_shared_page
(
info
,
fb_info
);
ret
=
xenfb_connect_backend
(
dev
,
info
);
if
(
ret
<
0
)
goto
error
;
ret
=
register_framebuffer
(
fb_info
);
if
(
ret
)
{
fb_deferred_io_cleanup
(
fb_info
);
...
...
@@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
}
info
->
fb_info
=
fb_info
;
ret
=
xenfb_connect_backend
(
dev
,
info
);
if
(
ret
<
0
)
goto
error
;
xenfb_make_preferred_console
();
return
0
;
...
...
fs/9p/v9fs.c
View file @
cd69a7d5
...
...
@@ -76,7 +76,7 @@ static const match_table_t tokens = {
* Return 0 upon success, -ERRNO upon failure.
*/
static
int
v9fs_parse_options
(
struct
v9fs_session_info
*
v9ses
)
static
int
v9fs_parse_options
(
struct
v9fs_session_info
*
v9ses
,
char
*
opts
)
{
char
*
options
;
substring_t
args
[
MAX_OPT_ARGS
];
...
...
@@ -90,10 +90,10 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses)
v9ses
->
debug
=
0
;
v9ses
->
cache
=
0
;
if
(
!
v9ses
->
option
s
)
if
(
!
opt
s
)
return
0
;
options
=
kstrdup
(
v9ses
->
option
s
,
GFP_KERNEL
);
options
=
kstrdup
(
opt
s
,
GFP_KERNEL
);
if
(
!
options
)
{
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"failed to allocate copy of option string
\n
"
);
...
...
@@ -206,24 +206,14 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses
->
uid
=
~
0
;
v9ses
->
dfltuid
=
V9FS_DEFUID
;
v9ses
->
dfltgid
=
V9FS_DEFGID
;
if
(
data
)
{
v9ses
->
options
=
kstrdup
(
data
,
GFP_KERNEL
);
if
(
!
v9ses
->
options
)
{
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"failed to allocate copy of option string
\n
"
);
retval
=
-
ENOMEM
;
goto
error
;
}
}
rc
=
v9fs_parse_options
(
v9ses
);
rc
=
v9fs_parse_options
(
v9ses
,
data
);
if
(
rc
<
0
)
{
retval
=
rc
;
goto
error
;
}
v9ses
->
clnt
=
p9_client_create
(
dev_name
,
v9ses
->
options
);
v9ses
->
clnt
=
p9_client_create
(
dev_name
,
data
);
if
(
IS_ERR
(
v9ses
->
clnt
))
{
retval
=
PTR_ERR
(
v9ses
->
clnt
);
v9ses
->
clnt
=
NULL
;
...
...
@@ -280,7 +270,6 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
__putname
(
v9ses
->
uname
);
__putname
(
v9ses
->
aname
);
kfree
(
v9ses
->
options
);
}
/**
...
...
fs/9p/v9fs.h
View file @
cd69a7d5
...
...
@@ -85,7 +85,6 @@ struct v9fs_session_info {
unsigned
int
afid
;
unsigned
int
cache
;
char
*
options
;
/* copy of mount options */
char
*
uname
;
/* user name to mount as */
char
*
aname
;
/* name of remote hierarchy being mounted */
unsigned
int
maxdata
;
/* max data for client interface */
...
...
fs/9p/vfs_inode.c
View file @
cd69a7d5
...
...
@@ -171,7 +171,6 @@ int v9fs_uflags2omode(int uflags, int extended)
/**
* v9fs_blank_wstat - helper function to setup a 9P stat structure
* @v9ses: 9P session info (for determining extended mode)
* @wstat: structure to initialize
*
*/
...
...
@@ -207,65 +206,72 @@ v9fs_blank_wstat(struct p9_wstat *wstat)
struct
inode
*
v9fs_get_inode
(
struct
super_block
*
sb
,
int
mode
)
{
int
err
;
struct
inode
*
inode
;
struct
v9fs_session_info
*
v9ses
=
sb
->
s_fs_info
;
P9_DPRINTK
(
P9_DEBUG_VFS
,
"super block: %p mode: %o
\n
"
,
sb
,
mode
);
inode
=
new_inode
(
sb
);
if
(
inode
)
{
inode
->
i_mode
=
mode
;
inode
->
i_uid
=
current_fsuid
();
inode
->
i_gid
=
current_fsgid
();
inode
->
i_blocks
=
0
;
inode
->
i_rdev
=
0
;
inode
->
i_atime
=
inode
->
i_mtime
=
inode
->
i_ctime
=
CURRENT_TIME
;
inode
->
i_mapping
->
a_ops
=
&
v9fs_addr_operations
;
switch
(
mode
&
S_IFMT
)
{
case
S_IFIFO
:
case
S_IFBLK
:
case
S_IFCHR
:
case
S_IFSOCK
:
if
(
!
v9fs_extended
(
v9ses
))
{
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"special files without extended mode
\n
"
);
return
ERR_PTR
(
-
EINVAL
);
}
init_special_inode
(
inode
,
inode
->
i_mode
,
inode
->
i_rdev
);
break
;
case
S_IFREG
:
inode
->
i_op
=
&
v9fs_file_inode_operations
;
inode
->
i_fop
=
&
v9fs_file_operations
;
break
;
case
S_IFLNK
:
if
(
!
v9fs_extended
(
v9ses
))
{
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"extended modes used w/o 9P2000.u
\n
"
);
return
ERR_PTR
(
-
EINVAL
);
}
inode
->
i_op
=
&
v9fs_symlink_inode_operations
;
break
;
case
S_IFDIR
:
inc_nlink
(
inode
);
if
(
v9fs_extended
(
v9ses
))
inode
->
i_op
=
&
v9fs_dir_inode_operations_ext
;
else
inode
->
i_op
=
&
v9fs_dir_inode_operations
;
inode
->
i_fop
=
&
v9fs_dir_operations
;
break
;
default:
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"BAD mode 0x%x S_IFMT 0x%x
\n
"
,
mode
,
mode
&
S_IFMT
);
return
ERR_PTR
(
-
EINVAL
);
}
}
else
{
if
(
!
inode
)
{
P9_EPRINTK
(
KERN_WARNING
,
"Problem allocating inode
\n
"
);
return
ERR_PTR
(
-
ENOMEM
);
}
inode
->
i_mode
=
mode
;
inode
->
i_uid
=
current_fsuid
();
inode
->
i_gid
=
current_fsgid
();
inode
->
i_blocks
=
0
;
inode
->
i_rdev
=
0
;
inode
->
i_atime
=
inode
->
i_mtime
=
inode
->
i_ctime
=
CURRENT_TIME
;
inode
->
i_mapping
->
a_ops
=
&
v9fs_addr_operations
;
switch
(
mode
&
S_IFMT
)
{
case
S_IFIFO
:
case
S_IFBLK
:
case
S_IFCHR
:
case
S_IFSOCK
:
if
(
!
v9fs_extended
(
v9ses
))
{
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"special files without extended mode
\n
"
);
err
=
-
EINVAL
;
goto
error
;
}
init_special_inode
(
inode
,
inode
->
i_mode
,
inode
->
i_rdev
);
break
;
case
S_IFREG
:
inode
->
i_op
=
&
v9fs_file_inode_operations
;
inode
->
i_fop
=
&
v9fs_file_operations
;
break
;
case
S_IFLNK
:
if
(
!
v9fs_extended
(
v9ses
))
{
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"extended modes used w/o 9P2000.u
\n
"
);
err
=
-
EINVAL
;
goto
error
;
}
inode
->
i_op
=
&
v9fs_symlink_inode_operations
;
break
;
case
S_IFDIR
:
inc_nlink
(
inode
);
if
(
v9fs_extended
(
v9ses
))
inode
->
i_op
=
&
v9fs_dir_inode_operations_ext
;
else
inode
->
i_op
=
&
v9fs_dir_inode_operations
;
inode
->
i_fop
=
&
v9fs_dir_operations
;
break
;
default:
P9_DPRINTK
(
P9_DEBUG_ERROR
,
"BAD mode 0x%x S_IFMT 0x%x
\n
"
,
mode
,
mode
&
S_IFMT
);
err
=
-
EINVAL
;
goto
error
;
}
return
inode
;
error:
iput
(
inode
);
return
ERR_PTR
(
err
);
}
/*
...
...
@@ -338,30 +344,25 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
ret
=
NULL
;
st
=
p9_client_stat
(
fid
);
if
(
IS_ERR
(
st
))
{
err
=
PTR_ERR
(
st
);
st
=
NULL
;
goto
error
;
}
if
(
IS_ERR
(
st
))
return
ERR_CAST
(
st
);
umode
=
p9mode2unixmode
(
v9ses
,
st
->
mode
);
ret
=
v9fs_get_inode
(
sb
,
umode
);
if
(
IS_ERR
(
ret
))
{
err
=
PTR_ERR
(
ret
);
ret
=
NULL
;
goto
error
;
}
v9fs_stat2inode
(
st
,
ret
,
sb
);
ret
->
i_ino
=
v9fs_qid2ino
(
&
st
->
qid
);
p9stat_free
(
st
);
kfree
(
st
);
return
ret
;
error:
p9stat_free
(
st
);
kfree
(
st
);
if
(
ret
)
iput
(
ret
);
return
ERR_PTR
(
err
);
}
...
...
@@ -403,9 +404,9 @@ v9fs_open_created(struct inode *inode, struct file *file)
* @v9ses: session information
* @dir: directory that dentry is being created in
* @dentry: dentry that is being created
* @extension: 9p2000.u extension string to support devices, etc.
* @perm: create permissions
* @mode: open mode
* @extension: 9p2000.u extension string to support devices, etc.
*
*/
static
struct
p9_fid
*
...
...
@@ -470,7 +471,10 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
dentry
->
d_op
=
&
v9fs_dentry_operations
;
d_instantiate
(
dentry
,
inode
);
v9fs_fid_add
(
dentry
,
fid
);
err
=
v9fs_fid_add
(
dentry
,
fid
);
if
(
err
<
0
)
goto
error
;
return
ofid
;
error:
...
...
fs/9p/vfs_super.c
View file @
cd69a7d5
...
...
@@ -81,7 +81,7 @@ static int v9fs_set_super(struct super_block *s, void *data)
static
void
v9fs_fill_super
(
struct
super_block
*
sb
,
struct
v9fs_session_info
*
v9ses
,
int
flags
)
int
flags
,
void
*
data
)
{
sb
->
s_maxbytes
=
MAX_LFS_FILESIZE
;
sb
->
s_blocksize_bits
=
fls
(
v9ses
->
maxdata
-
1
);
...
...
@@ -91,6 +91,8 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb
->
s_flags
=
flags
|
MS_ACTIVE
|
MS_SYNCHRONOUS
|
MS_DIRSYNC
|
MS_NOATIME
;
save_mount_options
(
sb
,
data
);
}
/**
...
...
@@ -113,14 +115,11 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
struct
v9fs_session_info
*
v9ses
=
NULL
;
struct
p9_wstat
*
st
=
NULL
;
int
mode
=
S_IRWXUGO
|
S_ISVTX
;
uid_t
uid
=
current_fsuid
();
gid_t
gid
=
current_fsgid
();
struct
p9_fid
*
fid
;
int
retval
=
0
;
P9_DPRINTK
(
P9_DEBUG_VFS
,
"
\n
"
);
st
=
NULL
;
v9ses
=
kzalloc
(
sizeof
(
struct
v9fs_session_info
),
GFP_KERNEL
);
if
(
!
v9ses
)
return
-
ENOMEM
;
...
...
@@ -142,7 +141,7 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
retval
=
PTR_ERR
(
sb
);
goto
free_stat
;
}
v9fs_fill_super
(
sb
,
v9ses
,
flags
);
v9fs_fill_super
(
sb
,
v9ses
,
flags
,
data
);
inode
=
v9fs_get_inode
(
sb
,
S_IFDIR
|
mode
);
if
(
IS_ERR
(
inode
))
{
...
...
@@ -150,9 +149,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
goto
release_sb
;
}
inode
->
i_uid
=
uid
;
inode
->
i_gid
=
gid
;
root
=
d_alloc_root
(
inode
);
if
(
!
root
)
{
iput
(
inode
);
...
...
@@ -173,10 +169,8 @@ P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
simple_set_mnt
(
mnt
,
sb
);
return
0
;
release_sb:
deactivate_locked_super
(
sb
);
free_stat:
p9stat_free
(
st
);
kfree
(
st
);
clunk_fid:
...
...
@@ -185,7 +179,12 @@ clunk_fid:
close_session:
v9fs_session_close
(
v9ses
);
kfree
(
v9ses
);
return
retval
;
release_sb:
p9stat_free
(
st
);
kfree
(
st
);
deactivate_locked_super
(
sb
);
return
retval
;
}
...
...
@@ -207,24 +206,10 @@ static void v9fs_kill_super(struct super_block *s)
v9fs_session_close
(
v9ses
);
kfree
(
v9ses
);
s
->
s_fs_info
=
NULL
;
P9_DPRINTK
(
P9_DEBUG_VFS
,
"exiting kill_super
\n
"
);
}
/**
* v9fs_show_options - Show mount options in /proc/mounts
* @m: seq_file to write to
* @mnt: mount descriptor
*
*/
static
int
v9fs_show_options
(
struct
seq_file
*
m
,
struct
vfsmount
*
mnt
)
{
struct
v9fs_session_info
*
v9ses
=
mnt
->
mnt_sb
->
s_fs_info
;
seq_printf
(
m
,
"%s"
,
v9ses
->
options
);
return
0
;
}
static
void
v9fs_umount_begin
(
struct
super_block
*
sb
)
{
...
...
@@ -237,7 +222,7 @@ v9fs_umount_begin(struct super_block *sb)
static
const
struct
super_operations
v9fs_super_ops
=
{
.
statfs
=
simple_statfs
,
.
clear_inode
=
v9fs_clear_inode
,
.
show_options
=
v9fs
_show_options
,
.
show_options
=
generic
_show_options
,
.
umount_begin
=
v9fs_umount_begin
,
};
...
...
fs/afs/file.c
View file @
cd69a7d5
...
...
@@ -134,9 +134,16 @@ static int afs_readpage(struct file *file, struct page *page)
inode
=
page
->
mapping
->
host
;
ASSERT
(
file
!=
NULL
);
key
=
file
->
private_data
;
ASSERT
(
key
!=
NULL
);
if
(
file
)
{
key
=
file
->
private_data
;
ASSERT
(
key
!=
NULL
);
}
else
{
key
=
afs_request_key
(
AFS_FS_S
(
inode
->
i_sb
)
->
volume
->
cell
);
if
(
IS_ERR
(
key
))
{
ret
=
PTR_ERR
(
key
);
goto
error_nokey
;
}
}
_enter
(
"{%x},{%lu},{%lu}"
,
key_serial
(
key
),
inode
->
i_ino
,
page
->
index
);
...
...
@@ -207,12 +214,17 @@ static int afs_readpage(struct file *file, struct page *page)
unlock_page
(
page
);
}
if
(
!
file
)
key_put
(
key
);
_leave
(
" = 0"
);
return
0
;
error:
SetPageError
(
page
);
unlock_page
(
page
);
if
(
!
file
)
key_put
(
key
);
error_nokey:
_leave
(
" = %d"
,
ret
);
return
ret
;
}
...
...
fs/notify/inotify/inotify_fsnotify.c
View file @
cd69a7d5
...
...
@@ -105,16 +105,45 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
return
send
;
}
/*
* This is NEVER supposed to be called. Inotify marks should either have been
* removed from the idr when the watch was removed or in the
* fsnotify_destroy_mark_by_group() call when the inotify instance was being
* torn down. This is only called if the idr is about to be freed but there
* are still marks in it.
*/
static
int
idr_callback
(
int
id
,
void
*
p
,
void
*
data
)
{
BUG
();
struct
fsnotify_mark_entry
*
entry
;
struct
inotify_inode_mark_entry
*
ientry
;
static
bool
warned
=
false
;
if
(
warned
)
return
0
;
warned
=
false
;
entry
=
p
;
ientry
=
container_of
(
entry
,
struct
inotify_inode_mark_entry
,
fsn_entry
);
WARN
(
1
,
"inotify closing but id=%d for entry=%p in group=%p still in "
"idr. Probably leaking memory
\n
"
,
id
,
p
,
data
);
/*
* I'm taking the liberty of assuming that the mark in question is a
* valid address and I'm dereferencing it. This might help to figure
* out why we got here and the panic is no worse than the original
* BUG() that was here.
*/
if
(
entry
)
printk
(
KERN_WARNING
"entry->group=%p inode=%p wd=%d
\n
"
,
entry
->
group
,
entry
->
inode
,
ientry
->
wd
);
return
0
;
}
static
void
inotify_free_group_priv
(
struct
fsnotify_group
*
group
)
{
/* ideally the idr is empty and we won't hit the BUG in teh callback */
idr_for_each
(
&
group
->
inotify_data
.
idr
,
idr_callback
,
NULL
);
idr_for_each
(
&
group
->
inotify_data
.
idr
,
idr_callback
,
group
);
idr_remove_all
(
&
group
->
inotify_data
.
idr
);
idr_destroy
(
&
group
->
inotify_data
.
idr
);
}
...
...
fs/notify/inotify/inotify_user.c
View file @
cd69a7d5
...
...
@@ -47,9 +47,6 @@
static
struct
vfsmount
*
inotify_mnt
__read_mostly
;
/* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
static
struct
inotify_event
nul_inotify_event
;
/* these are configurable via /proc/sys/fs/inotify/ */
static
int
inotify_max_user_instances
__read_mostly
;
static
int
inotify_max_queued_events
__read_mostly
;
...
...
@@ -199,8 +196,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
inotify_free_event_priv
(
fsn_priv
);
}
/* round up event->name_len so it is a multiple of event_size */
name_len
=
roundup
(
event
->
name_len
,
event_size
);
/* round up event->name_len so it is a multiple of event_size
* plus an extra byte for the terminating '\0'.
*/
name_len
=
roundup
(
event
->
name_len
+
1
,
event_size
);
inotify_event
.
len
=
name_len
;
inotify_event
.
mask
=
inotify_mask_to_arg
(
event
->
mask
);
...
...
@@ -224,8 +223,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
return
-
EFAULT
;
buf
+=
event
->
name_len
;
/* fill userspace with 0's
from nul_inotify_event
*/
if
(
c
opy_to_user
(
buf
,
&
nul_inotify_event
,
len_to_zero
))
/* fill userspace with 0's */
if
(
c
lear_user
(
buf
,
len_to_zero
))
return
-
EFAULT
;
buf
+=
len_to_zero
;
event_size
+=
name_len
;
...
...
@@ -364,20 +363,53 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return
error
;
}
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static
void
inotify_remove_from_idr
(
struct
fsnotify_group
*
group
,
struct
inotify_inode_mark_entry
*
ientry
)
{
struct
idr
*
idr
;
struct
fsnotify_mark_entry
*
entry
;
struct
inotify_inode_mark_entry
*
found_ientry
;
int
wd
;
spin_lock
(
&
group
->
inotify_data
.
idr_lock
);
idr
=
&
group
->
inotify_data
.
idr
;
idr_remove
(
idr
,
ientry
->
wd
);
spin_unlock
(
&
group
->
inotify_data
.
idr_lock
);
wd
=
ientry
->
wd
;
if
(
wd
==
-
1
)
goto
out
;
entry
=
idr_find
(
&
group
->
inotify_data
.
idr
,
wd
);
if
(
unlikely
(
!
entry
))
goto
out
;
found_ientry
=
container_of
(
entry
,
struct
inotify_inode_mark_entry
,
fsn_entry
);
if
(
unlikely
(
found_ientry
!=
ientry
))
{
/* We found an entry in the idr with the right wd, but it's
* not the entry we were told to remove. eparis seriously
* fucked up somewhere. */
WARN_ON
(
1
);
ientry
->
wd
=
-
1
;
goto
out
;
}
/* One ref for being in the idr, one ref held by the caller */
BUG_ON
(
atomic_read
(
&
entry
->
refcnt
)
<
2
);
idr_remove
(
idr
,
wd
);
ientry
->
wd
=
-
1
;
/* removed from the idr, drop that ref */
fsnotify_put_mark
(
entry
);
out:
spin_unlock
(
&
group
->
inotify_data
.
idr_lock
);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
* internal reference help on the mark because it is in the idr.
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
void
inotify_ignored_and_remove_idr
(
struct
fsnotify_mark_entry
*
entry
,
struct
fsnotify_group
*
group
)
...
...
@@ -417,9 +449,6 @@ skip_send_ignore:
/* remove this entry from the idr */
inotify_remove_from_idr
(
group
,
ientry
);
/* removed from idr, drop that reference */
fsnotify_put_mark
(
entry
);
atomic_dec
(
&
group
->
inotify_data
.
user
->
inotify_watches
);
}
...
...
@@ -431,80 +460,29 @@ static void inotify_free_mark(struct fsnotify_mark_entry *entry)
kmem_cache_free
(
inotify_inode_mark_cachep
,
ientry
);
}
static
int
inotify_update_watch
(
struct
fsnotify_group
*
group
,
struct
inode
*
inode
,
u32
arg
)
static
int
inotify_update_existing_watch
(
struct
fsnotify_group
*
group
,
struct
inode
*
inode
,
u32
arg
)
{
struct
fsnotify_mark_entry
*
entry
=
NULL
;
struct
fsnotify_mark_entry
*
entry
;
struct
inotify_inode_mark_entry
*
ientry
;
struct
inotify_inode_mark_entry
*
tmp_ientry
;
int
ret
=
0
;
int
add
=
(
arg
&
IN_MASK_ADD
);
__u32
mask
;
__u32
old_mask
,
new_mask
;
__u32
mask
;
int
add
=
(
arg
&
IN_MASK_ADD
);
int
ret
;
/* don't allow invalid bits: we don't want flags set */
mask
=
inotify_arg_to_mask
(
arg
);
if
(
unlikely
(
!
mask
))
return
-
EINVAL
;
tmp_ientry
=
kmem_cache_alloc
(
inotify_inode_mark_cachep
,
GFP_KERNEL
);
if
(
unlikely
(
!
tmp_ientry
))
return
-
ENOMEM
;
/* we set the mask at the end after attaching it */
fsnotify_init_mark
(
&
tmp_ientry
->
fsn_entry
,
inotify_free_mark
);
tmp_ientry
->
wd
=
-
1
;
find_entry:
spin_lock
(
&
inode
->
i_lock
);
entry
=
fsnotify_find_mark_entry
(
group
,
inode
);
spin_unlock
(
&
inode
->
i_lock
);
if
(
entry
)
{
ientry
=
container_of
(
entry
,
struct
inotify_inode_mark_entry
,
fsn_entry
);
}
else
{
ret
=
-
ENOSPC
;
if
(
atomic_read
(
&
group
->
inotify_data
.
user
->
inotify_watches
)
>=
inotify_max_user_watches
)
goto
out_err
;
retry:
ret
=
-
ENOMEM
;
if
(
unlikely
(
!
idr_pre_get
(
&
group
->
inotify_data
.
idr
,
GFP_KERNEL
)))
goto
out_err
;
spin_lock
(
&
group
->
inotify_data
.
idr_lock
);
ret
=
idr_get_new_above
(
&
group
->
inotify_data
.
idr
,
&
tmp_ientry
->
fsn_entry
,
group
->
inotify_data
.
last_wd
,
&
tmp_ientry
->
wd
);
spin_unlock
(
&
group
->
inotify_data
.
idr_lock
);
if
(
ret
)
{
if
(
ret
==
-
EAGAIN
)
goto
retry
;
goto
out_err
;
}
ret
=
fsnotify_add_mark
(
&
tmp_ientry
->
fsn_entry
,
group
,
inode
);
if
(
ret
)
{
inotify_remove_from_idr
(
group
,
tmp_ientry
);
if
(
ret
==
-
EEXIST
)
goto
find_entry
;
goto
out_err
;
}
/* tmp_ientry has been added to the inode, so we are all set up.
* now we just need to make sure tmp_ientry doesn't get freed and
* we need to set up entry and ientry so the generic code can
* do its thing. */
ientry
=
tmp_ientry
;
entry
=
&
ientry
->
fsn_entry
;
tmp_ientry
=
NULL
;
atomic_inc
(
&
group
->
inotify_data
.
user
->
inotify_watches
);
/* update the idr hint */
group
->
inotify_data
.
last_wd
=
ientry
->
wd
;
/* we put the mark on the idr, take a reference */
fsnotify_get_mark
(
entry
);
}
if
(
!
entry
)
return
-
ENOENT
;
ret
=
ientry
->
wd
;
ientry
=
container_of
(
entry
,
struct
inotify_inode_mark_entry
,
fsn_entry
)
;
spin_lock
(
&
entry
->
lock
);
...
...
@@ -536,18 +514,103 @@ retry:
fsnotify_recalc_group_mask
(
group
);
}
/* this either matches fsnotify_find_mark_entry, or init_mark_entry
* depending on which path we took... */
/* return the wd */
ret
=
ientry
->
wd
;
/* match the get from fsnotify_find_mark_entry() */
fsnotify_put_mark
(
entry
);
return
ret
;
}
static
int
inotify_new_watch
(
struct
fsnotify_group
*
group
,
struct
inode
*
inode
,
u32
arg
)
{
struct
inotify_inode_mark_entry
*
tmp_ientry
;
__u32
mask
;
int
ret
;
/* don't allow invalid bits: we don't want flags set */
mask
=
inotify_arg_to_mask
(
arg
);
if
(
unlikely
(
!
mask
))
return
-
EINVAL
;
tmp_ientry
=
kmem_cache_alloc
(
inotify_inode_mark_cachep
,
GFP_KERNEL
);
if
(
unlikely
(
!
tmp_ientry
))
return
-
ENOMEM
;
fsnotify_init_mark
(
&
tmp_ientry
->
fsn_entry
,
inotify_free_mark
);
tmp_ientry
->
fsn_entry
.
mask
=
mask
;
tmp_ientry
->
wd
=
-
1
;
ret
=
-
ENOSPC
;
if
(
atomic_read
(
&
group
->
inotify_data
.
user
->
inotify_watches
)
>=
inotify_max_user_watches
)
goto
out_err
;
retry:
ret
=
-
ENOMEM
;
if
(
unlikely
(
!
idr_pre_get
(
&
group
->
inotify_data
.
idr
,
GFP_KERNEL
)))
goto
out_err
;
spin_lock
(
&
group
->
inotify_data
.
idr_lock
);
ret
=
idr_get_new_above
(
&
group
->
inotify_data
.
idr
,
&
tmp_ientry
->
fsn_entry
,
group
->
inotify_data
.
last_wd
,
&
tmp_ientry
->
wd
);
spin_unlock
(
&
group
->
inotify_data
.
idr_lock
);
if
(
ret
)
{
/* idr was out of memory allocate and try again */
if
(
ret
==
-
EAGAIN
)
goto
retry
;
goto
out_err
;
}
/* we put the mark on the idr, take a reference */
fsnotify_get_mark
(
&
tmp_ientry
->
fsn_entry
);
/* we are on the idr, now get on the inode */
ret
=
fsnotify_add_mark
(
&
tmp_ientry
->
fsn_entry
,
group
,
inode
);
if
(
ret
)
{
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr
(
group
,
tmp_ientry
);
goto
out_err
;
}
/* update the idr hint, who cares about races, it's just a hint */
group
->
inotify_data
.
last_wd
=
tmp_ientry
->
wd
;
/* increment the number of watches the user has */
atomic_inc
(
&
group
->
inotify_data
.
user
->
inotify_watches
);
/* return the watch descriptor for this new entry */
ret
=
tmp_ientry
->
wd
;
/* match the ref from fsnotify_init_markentry() */
fsnotify_put_mark
(
&
tmp_ientry
->
fsn_entry
);
out_err:
/* could be an error, could be that we found an existing mark */
if
(
tmp_ientry
)
{
/* on the idr but didn't make it on the inode */
if
(
tmp_ientry
->
wd
!=
-
1
)
inotify_remove_from_idr
(
group
,
tmp_ientry
);
if
(
ret
<
0
)
kmem_cache_free
(
inotify_inode_mark_cachep
,
tmp_ientry
);
}
return
ret
;
}
static
int
inotify_update_watch
(
struct
fsnotify_group
*
group
,
struct
inode
*
inode
,
u32
arg
)
{
int
ret
=
0
;
retry:
/* try to update and existing watch with the new arg */
ret
=
inotify_update_existing_watch
(
group
,
inode
,
arg
);
/* no mark present, try to add a new one */
if
(
ret
==
-
ENOENT
)
ret
=
inotify_new_watch
(
group
,
inode
,
arg
);
/*
* inotify_new_watch could race with another thread which did an
* inotify_new_watch between the update_existing and the add watch
* here, go back and try to update an existing mark again.
*/
if
(
ret
==
-
EEXIST
)
goto
retry
;
return
ret
;
}
...
...
include/acpi/processor.h
View file @
cd69a7d5
...
...
@@ -174,7 +174,7 @@ struct acpi_processor_throttling {
cpumask_var_t
shared_cpu_map
;
int
(
*
acpi_processor_get_throttling
)
(
struct
acpi_processor
*
pr
);
int
(
*
acpi_processor_set_throttling
)
(
struct
acpi_processor
*
pr
,
int
state
);
int
state
,
bool
force
);
u32
address
;
u8
duty_offset
;
...
...
@@ -321,7 +321,8 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
/* in processor_throttling.c */
int
acpi_processor_tstate_has_changed
(
struct
acpi_processor
*
pr
);
int
acpi_processor_get_throttling_info
(
struct
acpi_processor
*
pr
);
extern
int
acpi_processor_set_throttling
(
struct
acpi_processor
*
pr
,
int
state
);
extern
int
acpi_processor_set_throttling
(
struct
acpi_processor
*
pr
,
int
state
,
bool
force
);
extern
const
struct
file_operations
acpi_processor_throttling_fops
;
extern
void
acpi_processor_throttling_init
(
void
);
/* in processor_idle.c */
...
...
include/linux/flex_array.h
View file @
cd69a7d5
...
...
@@ -21,7 +21,7 @@ struct flex_array {
struct
{
int
element_size
;
int
total_nr_elements
;
struct
flex_array_part
*
parts
[
0
];
struct
flex_array_part
*
parts
[];
};
/*
* This little trick makes sure that
...
...
@@ -36,12 +36,14 @@ struct flex_array {
.total_nr_elements = (total), \
} } }
struct
flex_array
*
flex_array_alloc
(
int
element_size
,
int
total
,
gfp_t
flags
);
int
flex_array_prealloc
(
struct
flex_array
*
fa
,
int
start
,
int
end
,
gfp_t
flags
);
struct
flex_array
*
flex_array_alloc
(
int
element_size
,
unsigned
int
total
,
gfp_t
flags
);
int
flex_array_prealloc
(
struct
flex_array
*
fa
,
unsigned
int
start
,
unsigned
int
end
,
gfp_t
flags
);
void
flex_array_free
(
struct
flex_array
*
fa
);
void
flex_array_free_parts
(
struct
flex_array
*
fa
);
int
flex_array_put
(
struct
flex_array
*
fa
,
int
element_nr
,
void
*
src
,
int
flex_array_put
(
struct
flex_array
*
fa
,
unsigned
int
element_nr
,
void
*
src
,
gfp_t
flags
);
void
*
flex_array_get
(
struct
flex_array
*
fa
,
int
element_nr
);
void
*
flex_array_get
(
struct
flex_array
*
fa
,
unsigned
int
element_nr
);
#endif
/* _FLEX_ARRAY_H */
kernel/fork.c
View file @
cd69a7d5
...
...
@@ -846,11 +846,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct
signal_struct
*
sig
;
if
(
clone_flags
&
CLONE_THREAD
)
{
atomic_inc
(
&
current
->
signal
->
count
);
atomic_inc
(
&
current
->
signal
->
live
);
if
(
clone_flags
&
CLONE_THREAD
)
return
0
;
}
sig
=
kmem_cache_alloc
(
signal_cachep
,
GFP_KERNEL
);
tsk
->
signal
=
sig
;
...
...
@@ -908,16 +905,6 @@ void __cleanup_signal(struct signal_struct *sig)
kmem_cache_free
(
signal_cachep
,
sig
);
}
static
void
cleanup_signal
(
struct
task_struct
*
tsk
)
{
struct
signal_struct
*
sig
=
tsk
->
signal
;
atomic_dec
(
&
sig
->
live
);
if
(
atomic_dec_and_test
(
&
sig
->
count
))
__cleanup_signal
(
sig
);
}
static
void
copy_flags
(
unsigned
long
clone_flags
,
struct
task_struct
*
p
)
{
unsigned
long
new_flags
=
p
->
flags
;
...
...
@@ -1281,6 +1268,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
if
(
clone_flags
&
CLONE_THREAD
)
{
atomic_inc
(
&
current
->
signal
->
count
);
atomic_inc
(
&
current
->
signal
->
live
);
p
->
group_leader
=
current
->
group_leader
;
list_add_tail_rcu
(
&
p
->
thread_group
,
&
p
->
group_leader
->
thread_group
);
}
...
...
@@ -1326,7 +1315,8 @@ bad_fork_cleanup_mm:
if
(
p
->
mm
)
mmput
(
p
->
mm
);
bad_fork_cleanup_signal:
cleanup_signal
(
p
);
if
(
!
(
clone_flags
&
CLONE_THREAD
))
__cleanup_signal
(
p
->
signal
);
bad_fork_cleanup_sighand:
__cleanup_sighand
(
p
->
sighand
);
bad_fork_cleanup_fs:
...
...
kernel/module.c
View file @
cd69a7d5
...
...
@@ -909,16 +909,18 @@ void __symbol_put(const char *symbol)
}
EXPORT_SYMBOL
(
__symbol_put
);
/* Note this assumes addr is a function, which it currently always is. */
void
symbol_put_addr
(
void
*
addr
)
{
struct
module
*
modaddr
;
unsigned
long
a
=
(
unsigned
long
)
dereference_function_descriptor
(
addr
);
if
(
core_kernel_text
(
(
unsigned
long
)
addr
))
if
(
core_kernel_text
(
a
))
return
;
/* module_text_address is safe here: we're supposed to have reference
* to module from symbol_get, so it can't go away. */
modaddr
=
__module_text_address
(
(
unsigned
long
)
addr
);
modaddr
=
__module_text_address
(
a
);
BUG_ON
(
!
modaddr
);
module_put
(
modaddr
);
}
...
...
@@ -2353,7 +2355,8 @@ static noinline struct module *load_module(void __user *umod,
if
(
err
<
0
)
goto
unlink
;
add_sect_attrs
(
mod
,
hdr
->
e_shnum
,
secstrings
,
sechdrs
);
add_notes_attrs
(
mod
,
hdr
->
e_shnum
,
secstrings
,
sechdrs
);
if
(
mod
->
sect_attrs
)
add_notes_attrs
(
mod
,
hdr
->
e_shnum
,
secstrings
,
sechdrs
);
/* Get rid of temporary copy */
vfree
(
hdr
);
...
...
lib/flex_array.c
View file @
cd69a7d5
...
...
@@ -99,7 +99,8 @@ static inline int elements_fit_in_base(struct flex_array *fa)
* capacity in the base structure. Also note that no effort is made
* to efficiently pack objects across page boundaries.
*/
struct
flex_array
*
flex_array_alloc
(
int
element_size
,
int
total
,
gfp_t
flags
)
struct
flex_array
*
flex_array_alloc
(
int
element_size
,
unsigned
int
total
,
gfp_t
flags
)
{
struct
flex_array
*
ret
;
int
max_size
=
nr_base_part_ptrs
()
*
__elements_per_part
(
element_size
);
...
...
@@ -115,16 +116,14 @@ struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
return
ret
;
}
static
int
fa_element_to_part_nr
(
struct
flex_array
*
fa
,
int
element_nr
)
static
int
fa_element_to_part_nr
(
struct
flex_array
*
fa
,
unsigned
int
element_nr
)
{
return
element_nr
/
__elements_per_part
(
fa
->
element_size
);
}
/**
* flex_array_free_parts - just free the second-level pages
* @src: address of data to copy into the array
* @element_nr: index of the position in which to insert
* the new element.
*
* This is to be used in cases where the base 'struct flex_array'
* has been statically allocated and should not be free.
...
...
@@ -146,14 +145,12 @@ void flex_array_free(struct flex_array *fa)
kfree
(
fa
);
}
static
int
fa_index_inside_part
(
struct
flex_array
*
fa
,
int
element_nr
)
static
unsigned
int
index_inside_part
(
struct
flex_array
*
fa
,
unsigned
int
element_nr
)
{
return
element_nr
%
__elements_per_part
(
fa
->
element_size
);
}
unsigned
int
part_offset
;
static
int
index_inside_part
(
struct
flex_array
*
fa
,
int
element_nr
)
{
int
part_offset
=
fa_index_inside_part
(
fa
,
element_nr
);
part_offset
=
element_nr
%
__elements_per_part
(
fa
->
element_size
);
return
part_offset
*
fa
->
element_size
;
}
...
...
@@ -188,7 +185,8 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
*
* Locking must be provided by the caller.
*/
int
flex_array_put
(
struct
flex_array
*
fa
,
int
element_nr
,
void
*
src
,
gfp_t
flags
)
int
flex_array_put
(
struct
flex_array
*
fa
,
unsigned
int
element_nr
,
void
*
src
,
gfp_t
flags
)
{
int
part_nr
=
fa_element_to_part_nr
(
fa
,
element_nr
);
struct
flex_array_part
*
part
;
...
...
@@ -198,10 +196,11 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags
return
-
ENOSPC
;
if
(
elements_fit_in_base
(
fa
))
part
=
(
struct
flex_array_part
*
)
&
fa
->
parts
[
0
];
else
else
{
part
=
__fa_get_part
(
fa
,
part_nr
,
flags
);
if
(
!
part
)
return
-
ENOMEM
;
if
(
!
part
)
return
-
ENOMEM
;
}
dst
=
&
part
->
elements
[
index_inside_part
(
fa
,
element_nr
)];
memcpy
(
dst
,
src
,
fa
->
element_size
);
return
0
;
...
...
@@ -219,7 +218,8 @@ int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags
*
* Locking must be provided by the caller.
*/
int
flex_array_prealloc
(
struct
flex_array
*
fa
,
int
start
,
int
end
,
gfp_t
flags
)
int
flex_array_prealloc
(
struct
flex_array
*
fa
,
unsigned
int
start
,
unsigned
int
end
,
gfp_t
flags
)
{
int
start_part
;
int
end_part
;
...
...
@@ -250,18 +250,19 @@ int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
*
* Locking must be provided by the caller.
*/
void
*
flex_array_get
(
struct
flex_array
*
fa
,
int
element_nr
)
void
*
flex_array_get
(
struct
flex_array
*
fa
,
unsigned
int
element_nr
)
{
int
part_nr
=
fa_element_to_part_nr
(
fa
,
element_nr
);
struct
flex_array_part
*
part
;
if
(
element_nr
>=
fa
->
total_nr_elements
)
return
NULL
;
if
(
!
fa
->
parts
[
part_nr
])
return
NULL
;
if
(
elements_fit_in_base
(
fa
))
part
=
(
struct
flex_array_part
*
)
&
fa
->
parts
[
0
];
else
else
{
part
=
fa
->
parts
[
part_nr
];
if
(
!
part
)
return
NULL
;
}
return
&
part
->
elements
[
index_inside_part
(
fa
,
element_nr
)];
}
lib/lmb.c
View file @
cd69a7d5
...
...
@@ -429,7 +429,7 @@ u64 __init lmb_phys_mem_size(void)
return
lmb
.
memory
.
size
;
}
u64
__init
lmb_end_of_DRAM
(
void
)
u64
lmb_end_of_DRAM
(
void
)
{
int
idx
=
lmb
.
memory
.
cnt
-
1
;
...
...
mm/rmap.c
View file @
cd69a7d5
...
...
@@ -358,6 +358,7 @@ static int page_referenced_one(struct page *page,
*/
if
(
vma
->
vm_flags
&
VM_LOCKED
)
{
*
mapcount
=
1
;
/* break early from loop */
*
vm_flags
|=
VM_LOCKED
;
goto
out_unmap
;
}
...
...
mm/vmscan.c
View file @
cd69a7d5
...
...
@@ -631,9 +631,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
referenced
=
page_referenced
(
page
,
1
,
sc
->
mem_cgroup
,
&
vm_flags
);
/* In active use or really unfreeable? Activate it. */
/*
* In active use or really unfreeable? Activate it.
* If page which have PG_mlocked lost isoltation race,
* try_to_unmap moves it to unevictable list
*/
if
(
sc
->
order
<=
PAGE_ALLOC_COSTLY_ORDER
&&
referenced
&&
page_mapping_inuse
(
page
))
referenced
&&
page_mapping_inuse
(
page
)
&&
!
(
vm_flags
&
VM_LOCKED
))
goto
activate_locked
;
/*
...
...
net/9p/client.c
View file @
cd69a7d5
...
...
@@ -60,9 +60,9 @@ static struct p9_req_t *
p9_client_rpc
(
struct
p9_client
*
c
,
int8_t
type
,
const
char
*
fmt
,
...);
/**
*
v9fs_parse_options - parse mount options into session
structure
* @opt
ion
s: options string passed from mount
* @
v9ses: existing v9fs session
information
*
parse_options - parse mount options into client
structure
* @opts: options string passed from mount
* @
clnt: existing v9fs client
information
*
* Return 0 upon success, -ERRNO upon failure
*/
...
...
@@ -232,7 +232,7 @@ EXPORT_SYMBOL(p9_tag_lookup);
/**
* p9_tag_init - setup tags structure and contents
* @
tags: tags structure from the
client struct
* @
c: v9fs
client struct
*
* This initializes the tags structure for each client instance.
*
...
...
@@ -258,7 +258,7 @@ error:
/**
* p9_tag_cleanup - cleans up tags structure and reclaims resources
* @
tags: tags structure from the
client struct
* @
c: v9fs
client struct
*
* This frees resources associated with the tags structure
*
...
...
@@ -411,14 +411,9 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if
(
c
->
dotu
)
err
=
-
ecode
;
if
(
!
err
)
{
if
(
!
err
||
!
IS_ERR_VALUE
(
err
))
err
=
p9_errstr2errno
(
ename
,
strlen
(
ename
));
/* string match failed */
if
(
!
err
)
err
=
-
ESERVERFAULT
;
}
P9_DPRINTK
(
P9_DEBUG_9P
,
"<<< RERROR (%d) %s
\n
"
,
-
ecode
,
ename
);
kfree
(
ename
);
...
...
@@ -430,8 +425,8 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
/**
* p9_client_flush - flush (cancel) a request
* c: client state
* req: request to cancel
*
@
c: client state
*
@old
req: request to cancel
*
* This sents a flush for a particular requests and links
* the flush request to the original request. The current
...
...
net/9p/error.c
View file @
cd69a7d5
...
...
@@ -239,7 +239,7 @@ int p9_errstr2errno(char *errstr, int len)
errstr
[
len
]
=
0
;
printk
(
KERN_ERR
"%s: server reported unknown error %s
\n
"
,
__func__
,
errstr
);
errno
=
1
;
errno
=
ESERVERFAULT
;
}
return
-
errno
;
...
...
net/9p/trans_fd.c
View file @
cd69a7d5
...
...
@@ -119,8 +119,8 @@ struct p9_poll_wait {
* @wpos: write position for current frame
* @wsize: amount of data to write for current frame
* @wbuf: current write buffer
* @poll_pending_link: pending links to be polled per conn
* @poll_wait: array of wait_q's for various worker threads
* @poll_waddr: ????
* @pt: poll state
* @rq: current read work
* @wq: current write work
...
...
@@ -700,9 +700,9 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
}
/**
* parse_opt
ions - parse mount options into session
structure
* @
option
s: options string passed from mount
* @opts: transport-specific structure to parse options into
* parse_opt
s - parse mount options into p9_fd_opts
structure
* @
param
s: options string passed from mount
* @opts:
fd
transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
...
...
net/9p/trans_rdma.c
View file @
cd69a7d5
...
...
@@ -67,14 +67,15 @@
* @pd: Protection Domain pointer
* @qp: Queue Pair pointer
* @cq: Completion Queue pointer
* @dm_mr: DMA Memory Region pointer
* @lkey: The local access only memory region key
* @timeout: Number of uSecs to wait for connection management events
* @sq_depth: The depth of the Send Queue
* @sq_sem: Semaphore for the SQ
* @rq_depth: The depth of the Receive Queue.
* @rq_count: Count of requests in the Receive Queue.
* @addr: The remote peer's address
* @req_lock: Protects the active request list
* @send_wait: Wait list when the SQ fills up
* @cm_done: Completion event for connection management tracking
*/
struct
p9_trans_rdma
{
...
...
@@ -154,9 +155,9 @@ static match_table_t tokens = {
};
/**
* parse_opt
ions - parse mount options into session
structure
* @
option
s: options string passed from mount
* @opts: transport-specific structure to parse options into
* parse_opt
s - parse mount options into rdma options
structure
* @
param
s: options string passed from mount
* @opts:
rdma
transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
...
...
net/9p/trans_virtio.c
View file @
cd69a7d5
...
...
@@ -57,11 +57,9 @@ static int chan_index;
* @initialized: whether the channel is initialized
* @inuse: whether the channel is in use
* @lock: protects multiple elements within this structure
* @client: client instance
* @vdev: virtio dev associated with this channel
* @vq: virtio queue associated with this channel
* @tagpool: accounting for tag ids (and request slots)
* @reqs: array of request slots
* @max_tag: current number of request_slots allocated
* @sg: scatter gather list which is used to pack a request (protected?)
*
* We keep all per-channel information in a structure.
...
...
@@ -92,7 +90,7 @@ static unsigned int rest_of_page(void *data)
/**
* p9_virtio_close - reclaim resources of a channel
* @
trans: transport stat
e
* @
client: client instanc
e
*
* This reclaims a channel by freeing its resources and
* reseting its inuse flag.
...
...
@@ -181,9 +179,8 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
/**
* p9_virtio_request - issue a request
* @t: transport state
* @tc: &p9_fcall request to transmit
* @rc: &p9_fcall to put reponse into
* @client: client instance issuing the request
* @req: request to be issued
*
*/
...
...
net/ipv4/ip_output.c
View file @
cd69a7d5
...
...
@@ -813,6 +813,8 @@ int ip_append_data(struct sock *sk,
inet
->
cork
.
addr
=
ipc
->
addr
;
}
rt
=
*
rtp
;
if
(
unlikely
(
!
rt
))
return
-
EFAULT
;
/*
* We steal reference to this route, caller should not release it
*/
...
...
security/integrity/ima/ima_main.c
View file @
cd69a7d5
...
...
@@ -262,6 +262,8 @@ void ima_counts_put(struct path *path, int mask)
else
if
(
mask
&
(
MAY_READ
|
MAY_EXEC
))
iint
->
readcount
--
;
mutex_unlock
(
&
iint
->
mutex
);
kref_put
(
&
iint
->
refcount
,
iint_free
);
}
/*
...
...
@@ -291,6 +293,8 @@ void ima_counts_get(struct file *file)
if
(
file
->
f_mode
&
FMODE_WRITE
)
iint
->
writecount
++
;
mutex_unlock
(
&
iint
->
mutex
);
kref_put
(
&
iint
->
refcount
,
iint_free
);
}
EXPORT_SYMBOL_GPL
(
ima_counts_get
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment