Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
c2171f66
Commit
c2171f66
authored
Oct 13, 2009
by
Thomas Gleixner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'rt/head' into rt/2.6.31
parents
476ced36
d99f9884
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
55 additions
and
86 deletions
+55
-86
include/linux/sched.h
include/linux/sched.h
+0
-1
kernel/fork.c
kernel/fork.c
+0
-1
kernel/futex.c
kernel/futex.c
+22
-62
kernel/softirq.c
kernel/softirq.c
+5
-14
mm/slab.c
mm/slab.c
+28
-8
No files found.
include/linux/sched.h
View file @
c2171f66
...
...
@@ -1492,7 +1492,6 @@ struct task_struct {
#endif
struct
list_head
pi_state_list
;
struct
futex_pi_state
*
pi_state_cache
;
struct
task_struct
*
futex_wakeup
;
#endif
#ifdef CONFIG_PERF_COUNTERS
struct
perf_counter_context
*
perf_counter_ctxp
;
...
...
kernel/fork.c
View file @
c2171f66
...
...
@@ -1190,7 +1190,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
INIT_LIST_HEAD
(
&
p
->
pi_state_list
);
p
->
pi_state_cache
=
NULL
;
p
->
futex_wakeup
=
NULL
;
#endif
/*
* sigaltstack should be cleared when sharing the same VM
...
...
kernel/futex.c
View file @
c2171f66
...
...
@@ -713,7 +713,7 @@ retry:
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed.
*/
static
void
wake_futex
(
struct
task_struct
**
wake_list
,
struct
futex_q
*
q
)
static
void
wake_futex
(
struct
futex_q
*
q
)
{
struct
task_struct
*
p
=
q
->
task
;
...
...
@@ -736,51 +736,8 @@ static void wake_futex(struct task_struct **wake_list, struct futex_q *q)
smp_wmb
();
q
->
lock_ptr
=
NULL
;
/*
* Atomically grab the task, if ->futex_wakeup is !0 already it means
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
* This cmpxchg() implies a full barrier, which pairs with the write
* barrier implied by the wakeup in wake_futex_list().
*/
if
(
cmpxchg
(
&
p
->
futex_wakeup
,
0
,
p
)
!=
0
)
{
/*
* It was already queued, drop the extra ref and we're done.
*/
put_task_struct
(
p
);
return
;
}
/*
* Put the task on our wakeup list by atomically switching it with
* the list head. (XXX its a local list, no possible concurrency,
* this could be written without cmpxchg).
*/
do
{
p
->
futex_wakeup
=
*
wake_list
;
}
while
(
cmpxchg
(
wake_list
,
p
->
futex_wakeup
,
p
)
!=
p
->
futex_wakeup
);
}
/*
* For each task on the list, deliver the pending wakeup and release the
* task reference obtained in wake_futex().
*/
static
void
wake_futex_list
(
struct
task_struct
*
head
)
{
while
(
head
!=
&
init_task
)
{
struct
task_struct
*
next
=
head
->
futex_wakeup
;
head
->
futex_wakeup
=
NULL
;
/*
* wake_up_state() implies a wmb() to pair with the queueing
* in wake_futex() so as to not miss wakeups.
*/
wake_up_state
(
head
,
TASK_NORMAL
);
put_task_struct
(
head
);
head
=
next
;
}
wake_up_state
(
p
,
TASK_NORMAL
);
put_task_struct
(
p
);
}
static
int
wake_futex_pi
(
u32
__user
*
uaddr
,
u32
uval
,
struct
futex_q
*
this
)
...
...
@@ -894,7 +851,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
struct
futex_q
*
this
,
*
next
;
struct
plist_head
*
head
;
union
futex_key
key
=
FUTEX_KEY_INIT
;
struct
task_struct
*
wake_list
=
&
init_task
;
int
ret
;
if
(
!
bitset
)
...
...
@@ -919,7 +875,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
if
(
!
(
this
->
bitset
&
bitset
))
continue
;
wake_futex
(
&
wake_list
,
this
);
wake_futex
(
this
);
if
(
++
ret
>=
nr_wake
)
break
;
}
...
...
@@ -927,8 +883,6 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
spin_unlock
(
&
hb
->
lock
);
put_futex_key
(
fshared
,
&
key
);
wake_futex_list
(
wake_list
);
out:
return
ret
;
}
...
...
@@ -945,7 +899,6 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
struct
futex_hash_bucket
*
hb1
,
*
hb2
;
struct
plist_head
*
head
;
struct
futex_q
*
this
,
*
next
;
struct
task_struct
*
wake_list
=
&
init_task
;
int
ret
,
op_ret
;
retry:
...
...
@@ -996,7 +949,7 @@ retry_private:
plist_for_each_entry_safe
(
this
,
next
,
head
,
list
)
{
if
(
match_futex
(
&
this
->
key
,
&
key1
))
{
wake_futex
(
&
wake_list
,
this
);
wake_futex
(
this
);
if
(
++
ret
>=
nr_wake
)
break
;
}
...
...
@@ -1008,7 +961,7 @@ retry_private:
op_ret
=
0
;
plist_for_each_entry_safe
(
this
,
next
,
head
,
list
)
{
if
(
match_futex
(
&
this
->
key
,
&
key2
))
{
wake_futex
(
&
wake_list
,
this
);
wake_futex
(
this
);
if
(
++
op_ret
>=
nr_wake2
)
break
;
}
...
...
@@ -1021,8 +974,6 @@ out_put_keys:
put_futex_key
(
fshared
,
&
key2
);
out_put_key1:
put_futex_key
(
fshared
,
&
key1
);
wake_futex_list
(
wake_list
);
out:
return
ret
;
}
...
...
@@ -1177,7 +1128,6 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
struct
futex_hash_bucket
*
hb1
,
*
hb2
;
struct
plist_head
*
head1
;
struct
futex_q
*
this
,
*
next
;
struct
task_struct
*
wake_list
=
&
init_task
;
u32
curval2
;
if
(
requeue_pi
)
{
...
...
@@ -1322,7 +1272,7 @@ retry_private:
* woken by futex_unlock_pi().
*/
if
(
++
task_count
<=
nr_wake
&&
!
requeue_pi
)
{
wake_futex
(
&
wake_list
,
this
);
wake_futex
(
this
);
continue
;
}
...
...
@@ -1368,8 +1318,6 @@ out_put_keys:
put_futex_key
(
fshared
,
&
key2
);
out_put_key1:
put_futex_key
(
fshared
,
&
key1
);
wake_futex_list
(
wake_list
);
out:
if
(
pi_state
!=
NULL
)
free_pi_state
(
pi_state
);
...
...
@@ -1805,6 +1753,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
current
->
timer_slack_ns
);
}
retry:
/* Prepare to wait on uaddr. */
ret
=
futex_wait_setup
(
uaddr
,
val
,
fshared
,
&
q
,
&
hb
);
if
(
ret
)
...
...
@@ -1822,9 +1771,14 @@ static int futex_wait(u32 __user *uaddr, int fshared,
goto
out_put_key
;
/*
* We expect signal_pending(current), but
another thread may
*
have handled it for us already
.
* We expect signal_pending(current), but
we might be the
*
victim of a spurious wakeup as well
.
*/
if
(
!
signal_pending
(
current
))
{
put_futex_key
(
fshared
,
&
q
.
key
);
goto
retry
;
}
ret
=
-
ERESTARTSYS
;
if
(
!
abs_time
)
goto
out_put_key
;
...
...
@@ -2131,9 +2085,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
*/
plist_del
(
&
q
->
list
,
&
q
->
list
.
plist
);
/* Handle spurious wakeups gracefully */
ret
=
-
EAGAIN
;
if
(
timeout
&&
!
timeout
->
task
)
ret
=
-
ETIMEDOUT
;
else
else
if
(
signal_pending
(
current
))
ret
=
-
ERESTARTNOINTR
;
}
return
ret
;
...
...
@@ -2215,6 +2171,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
q
.
bitset
=
bitset
;
q
.
rt_waiter
=
&
rt_waiter
;
retry:
key2
=
FUTEX_KEY_INIT
;
ret
=
get_futex_key
(
uaddr2
,
fshared
,
&
key2
,
VERIFY_WRITE
);
if
(
unlikely
(
ret
!=
0
))
...
...
@@ -2307,6 +2264,9 @@ out_put_keys:
out_key2:
put_futex_key
(
fshared
,
&
key2
);
/* Spurious wakeup ? */
if
(
ret
==
-
EAGAIN
)
goto
retry
;
out:
if
(
to
)
{
hrtimer_cancel
(
&
to
->
timer
);
...
...
kernel/softirq.c
View file @
c2171f66
...
...
@@ -1139,9 +1139,7 @@ static const char *softirq_names [] =
[
NET_RX_SOFTIRQ
]
=
"net-rx"
,
[
BLOCK_SOFTIRQ
]
=
"block"
,
[
TASKLET_SOFTIRQ
]
=
"tasklet"
,
#ifdef CONFIG_HIGH_RES_TIMERS
[
HRTIMER_SOFTIRQ
]
=
"hrtimer"
,
#endif
[
RCU_SOFTIRQ
]
=
"rcu"
,
};
...
...
@@ -1161,8 +1159,6 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
per_cpu
(
ksoftirqd
,
hotcpu
)[
i
].
tsk
=
NULL
;
}
for
(
i
=
0
;
i
<
NR_SOFTIRQS
;
i
++
)
{
if
(
!
softirq_names
[
i
])
continue
;
p
=
kthread_create
(
ksoftirqd
,
&
per_cpu
(
ksoftirqd
,
hotcpu
)[
i
],
"sirq-%s/%d"
,
softirq_names
[
i
],
...
...
@@ -1179,11 +1175,8 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
break
;
case
CPU_ONLINE
:
case
CPU_ONLINE_FROZEN
:
for
(
i
=
0
;
i
<
NR_SOFTIRQS
;
i
++
)
{
p
=
per_cpu
(
ksoftirqd
,
hotcpu
)[
i
].
tsk
;
if
(
p
)
wake_up_process
(
p
);
}
for
(
i
=
0
;
i
<
NR_SOFTIRQS
;
i
++
)
wake_up_process
(
per_cpu
(
ksoftirqd
,
hotcpu
)[
i
].
tsk
);
break
;
#ifdef CONFIG_HOTPLUG_CPU
case
CPU_UP_CANCELED
:
...
...
@@ -1197,11 +1190,9 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
for
(
i
=
0
;
i
<
NR_SOFTIRQS
;
i
++
)
{
param
.
sched_priority
=
MAX_RT_PRIO
-
1
;
p
=
per_cpu
(
ksoftirqd
,
hotcpu
)[
i
].
tsk
;
if
(
p
)
{
sched_setscheduler
(
p
,
SCHED_FIFO
,
&
param
);
per_cpu
(
ksoftirqd
,
hotcpu
)[
i
].
tsk
=
NULL
;
kthread_stop
(
p
);
}
sched_setscheduler
(
p
,
SCHED_FIFO
,
&
param
);
per_cpu
(
ksoftirqd
,
hotcpu
)[
i
].
tsk
=
NULL
;
kthread_stop
(
p
);
}
takeover_tasklets
(
hotcpu
);
break
;
...
...
mm/slab.c
View file @
c2171f66
...
...
@@ -164,6 +164,9 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu)
local_irq_disable
();
}
#define slab_spin_trylock_irq(lock, cpu) \
({ int __l = spin_trylock_irq(lock); if (__l) (cpu) = smp_processor_id(); __l; })
# define slab_spin_lock_irq(lock, cpu) \
do { spin_lock_irq(lock); (cpu) = smp_processor_id(); } while (0)
# define slab_spin_unlock_irq(lock, cpu) spin_unlock_irq(lock)
...
...
@@ -241,10 +244,26 @@ static void slab_irq_disable_GFP_WAIT(gfp_t flags, int *cpu)
slab_irq_disable
(
*
cpu
);
}
static
int
_slab_spin_trylock_irq
(
spinlock_t
*
lock
,
int
*
cpu
)
{
int
locked
;
slab_irq_disable
(
*
cpu
);
locked
=
spin_trylock
(
lock
);
if
(
!
locked
)
slab_irq_enable
(
*
cpu
);
return
locked
;
}
# define slab_spin_trylock_irq(lock, cpu) \
_slab_spin_trylock_irq((lock), &(cpu))
# define slab_spin_lock_irq(lock, cpu) \
do { slab_irq_disable(cpu); spin_lock(lock); } while (0)
# define slab_spin_unlock_irq(lock, cpu) \
do { spin_unlock(lock); slab_irq_enable(cpu); } while (0)
# define slab_spin_lock_irqsave(lock, flags, cpu) \
do { slab_irq_disable(cpu); spin_lock_irqsave(lock, flags); } while (0)
# define slab_spin_unlock_irqrestore(lock, flags, cpu) \
...
...
@@ -1063,7 +1082,7 @@ static int transfer_objects(struct array_cache *to,
#ifndef CONFIG_NUMA
#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3
, this_cpu
) 0
#define reap_alien(cachep, l3) 0
static
inline
struct
array_cache
**
alloc_alien_cache
(
int
node
,
int
limit
,
gfp_t
gfp
)
{
...
...
@@ -1161,16 +1180,17 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
* Called from cache_reap() to regularly drain alien caches round robin.
*/
static
int
reap_alien
(
struct
kmem_cache
*
cachep
,
struct
kmem_list3
*
l3
,
int
*
this_cpu
)
reap_alien
(
struct
kmem_cache
*
cachep
,
struct
kmem_list3
*
l3
)
{
int
node
=
per_cpu
(
reap_node
,
*
this_cpu
);
int
node
=
__get_cpu_var
(
reap_node
);
int
this_cpu
;
if
(
l3
->
alien
)
{
struct
array_cache
*
ac
=
l3
->
alien
[
node
];
if
(
ac
&&
ac
->
avail
&&
s
pin_trylock_irq
(
&
ac
->
lock
))
{
__drain_alien_cache
(
cachep
,
ac
,
node
,
this_cpu
);
s
pin_unlock_irq
(
&
ac
->
lock
);
if
(
ac
&&
ac
->
avail
&&
s
lab_spin_trylock_irq
(
&
ac
->
lock
,
this_cpu
))
{
__drain_alien_cache
(
cachep
,
ac
,
node
,
&
this_cpu
);
s
lab_spin_unlock_irq
(
&
ac
->
lock
,
this_cpu
);
return
1
;
}
}
...
...
@@ -4274,7 +4294,7 @@ int drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
*/
static
void
cache_reap
(
struct
work_struct
*
w
)
{
int
this_cpu
=
raw_
smp_processor_id
(),
node
=
cpu_to_node
(
this_cpu
);
int
this_cpu
=
smp_processor_id
(),
node
=
cpu_to_node
(
this_cpu
);
struct
kmem_cache
*
searchp
;
struct
kmem_list3
*
l3
;
struct
delayed_work
*
work
=
to_delayed_work
(
w
);
...
...
@@ -4294,7 +4314,7 @@ static void cache_reap(struct work_struct *w)
*/
l3
=
searchp
->
nodelists
[
node
];
work_done
+=
reap_alien
(
searchp
,
l3
,
&
this_cpu
);
work_done
+=
reap_alien
(
searchp
,
l3
);
node
=
cpu_to_node
(
this_cpu
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment