Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
23a0ee90
Commit
23a0ee90
authored
Aug 12, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'core/locking' into core/urgent
parents
cc7a486c
0f2bc27b
Changes
15
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
366 additions
and
146 deletions
+366
-146
fs/jbd/transaction.c
fs/jbd/transaction.c
+2
-2
fs/jbd2/transaction.c
fs/jbd2/transaction.c
+2
-2
include/linux/lockdep.h
include/linux/lockdep.h
+49
-21
include/linux/rcuclassic.h
include/linux/rcuclassic.h
+1
-1
include/linux/spinlock.h
include/linux/spinlock.h
+6
-0
include/linux/spinlock_api_smp.h
include/linux/spinlock_api_smp.h
+2
-0
kernel/lockdep.c
kernel/lockdep.c
+239
-56
kernel/lockdep_internals.h
kernel/lockdep_internals.h
+3
-3
kernel/lockdep_proc.c
kernel/lockdep_proc.c
+6
-31
kernel/sched.c
kernel/sched.c
+13
-8
kernel/sched_rt.c
kernel/sched_rt.c
+5
-3
kernel/spinlock.c
kernel/spinlock.c
+11
-0
kernel/workqueue.c
kernel/workqueue.c
+12
-12
lib/debug_locks.c
lib/debug_locks.c
+2
-0
mm/mmap.c
mm/mmap.c
+13
-7
No files found.
fs/jbd/transaction.c
View file @
23a0ee90
...
...
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
goto
out
;
}
lock_
acquire
(
&
handle
->
h_lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
handle
->
h_lockdep_map
);
out:
return
handle
;
...
...
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
spin_unlock
(
&
journal
->
j_state_lock
);
}
lock_
release
(
&
handle
->
h_lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
handle
->
h_lockdep_map
);
jbd_free_handle
(
handle
);
return
err
;
...
...
fs/jbd2/transaction.c
View file @
23a0ee90
...
...
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
goto
out
;
}
lock_
acquire
(
&
handle
->
h_lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
handle
->
h_lockdep_map
);
out:
return
handle
;
}
...
...
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
spin_unlock
(
&
journal
->
j_state_lock
);
}
lock_
release
(
&
handle
->
h_lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
handle
->
h_lockdep_map
);
jbd2_free_handle
(
handle
);
return
err
;
...
...
include/linux/lockdep.h
View file @
23a0ee90
...
...
@@ -89,6 +89,7 @@ struct lock_class {
struct
lockdep_subclass_key
*
key
;
unsigned
int
subclass
;
unsigned
int
dep_gen_id
;
/*
* IRQ/softirq usage tracking bits:
...
...
@@ -189,6 +190,14 @@ struct lock_chain {
u64
chain_key
;
};
#define MAX_LOCKDEP_KEYS_BITS 13
/*
* Subtract one because we offset hlock->class_idx by 1 in order
* to make 0 mean no class. This avoids overflowing the class_idx
* bitfield and hitting the BUG in hlock_class().
*/
#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
struct
held_lock
{
/*
* One-way hash of the dependency chain up to this point. We
...
...
@@ -205,14 +214,14 @@ struct held_lock {
* with zero), here we store the previous hash value:
*/
u64
prev_chain_key
;
struct
lock_class
*
class
;
unsigned
long
acquire_ip
;
struct
lockdep_map
*
instance
;
struct
lockdep_map
*
nest_lock
;
#ifdef CONFIG_LOCK_STAT
u64
waittime_stamp
;
u64
holdtime_stamp
;
#endif
unsigned
int
class_idx
:
MAX_LOCKDEP_KEYS_BITS
;
/*
* The lock-stack is unified in that the lock chains of interrupt
* contexts nest ontop of process context chains, but we 'separate'
...
...
@@ -226,11 +235,11 @@ struct held_lock {
* The following field is used to detect when we cross into an
* interrupt context:
*/
int
irq_context
;
int
trylock
;
int
read
;
int
check
;
int
hardirqs_off
;
unsigned
int
irq_context
:
2
;
/* bit 0 - soft, bit 1 - hard */
unsigned
int
trylock
:
1
;
unsigned
int
read
:
2
;
/* see lock_acquire() comment */
unsigned
int
check
:
2
;
/* see lock_acquire() comment */
unsigned
int
hardirqs_off
:
1
;
};
/*
...
...
@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* 2: full validation
*/
extern
void
lock_acquire
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
int
trylock
,
int
read
,
int
check
,
unsigned
long
ip
);
int
trylock
,
int
read
,
int
check
,
struct
lockdep_map
*
nest_lock
,
unsigned
long
ip
);
extern
void
lock_release
(
struct
lockdep_map
*
lock
,
int
nested
,
unsigned
long
ip
);
extern
void
lock_set_subclass
(
struct
lockdep_map
*
lock
,
unsigned
int
subclass
,
unsigned
long
ip
);
# define INIT_LOCKDEP .lockdep_recursion = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
...
...
@@ -313,8 +326,9 @@ static inline void lockdep_on(void)
{
}
# define lock_acquire(l, s, t, r, c,
i)
do { } while (0)
# define lock_acquire(l, s, t, r, c,
n, i)
do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
...
...
@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# else
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define spin_release(l, n, i) lock_release(l, n, i)
#else
...
...
@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2,
NULL,
i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2,
NULL,
i)
# else
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1,
NULL,
i)
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1,
NULL,
i)
# endif
# define rwlock_release(l, n, i) lock_release(l, n, i)
#else
...
...
@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2,
NULL,
i)
# else
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1,
NULL,
i)
# endif
# define mutex_release(l, n, i) lock_release(l, n, i)
#else
...
...
@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2,
NULL,
i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2,
NULL,
i)
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1,
NULL,
i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1,
NULL,
i)
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
...
...
@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define rwsem_release(l, n, i) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
# else
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
# endif
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#else
# define lock_map_acquire(l) do { } while (0)
# define lock_map_release(l) do { } while (0)
#endif
#endif
/* __LINUX_LOCKDEP_H */
include/linux/rcuclassic.h
View file @
23a0ee90
...
...
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern
struct
lockdep_map
rcu_lock_map
;
# define rcu_read_acquire() \
lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
lock_acquire(&rcu_lock_map, 0, 0, 2, 1,
NULL,
_THIS_IP_)
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
#else
# define rcu_read_acquire() do { } while (0)
...
...
include/linux/spinlock.h
View file @
23a0ee90
...
...
@@ -183,8 +183,14 @@ do { \
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif
#define write_lock(lock) _write_lock(lock)
...
...
include/linux/spinlock_api_smp.h
View file @
23a0ee90
...
...
@@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr);
void
__lockfunc
_spin_lock
(
spinlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_spin_lock_nested
(
spinlock_t
*
lock
,
int
subclass
)
__acquires
(
lock
);
void
__lockfunc
_spin_lock_nest_lock
(
spinlock_t
*
lock
,
struct
lockdep_map
*
map
)
__acquires
(
lock
);
void
__lockfunc
_read_lock
(
rwlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_write_lock
(
rwlock_t
*
lock
)
__acquires
(
lock
);
void
__lockfunc
_spin_lock_bh
(
spinlock_t
*
lock
)
__acquires
(
lock
);
...
...
kernel/lockdep.c
View file @
23a0ee90
This diff is collapsed.
Click to expand it.
kernel/lockdep_internals.h
View file @
23a0ee90
...
...
@@ -17,9 +17,6 @@
*/
#define MAX_LOCKDEP_ENTRIES 8192UL
#define MAX_LOCKDEP_KEYS_BITS 11
#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
#define MAX_LOCKDEP_CHAINS_BITS 14
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
...
...
@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains;
extern
unsigned
int
max_lockdep_depth
;
extern
unsigned
int
max_recursion_depth
;
extern
unsigned
long
lockdep_count_forward_deps
(
struct
lock_class
*
);
extern
unsigned
long
lockdep_count_backward_deps
(
struct
lock_class
*
);
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* Various lockdep statistics:
...
...
kernel/lockdep_proc.c
View file @
23a0ee90
...
...
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
{
}
static
unsigned
long
count_forward_deps
(
struct
lock_class
*
class
)
{
struct
lock_list
*
entry
;
unsigned
long
ret
=
1
;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry
(
entry
,
&
class
->
locks_after
,
entry
)
ret
+=
count_forward_deps
(
entry
->
class
);
return
ret
;
}
static
unsigned
long
count_backward_deps
(
struct
lock_class
*
class
)
{
struct
lock_list
*
entry
;
unsigned
long
ret
=
1
;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry
(
entry
,
&
class
->
locks_before
,
entry
)
ret
+=
count_backward_deps
(
entry
->
class
);
return
ret
;
}
static
void
print_name
(
struct
seq_file
*
m
,
struct
lock_class
*
class
)
{
char
str
[
128
];
...
...
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v)
#ifdef CONFIG_DEBUG_LOCKDEP
seq_printf
(
m
,
" OPS:%8ld"
,
class
->
ops
);
#endif
nr_forward_deps
=
count_forward_deps
(
class
);
nr_forward_deps
=
lockdep_
count_forward_deps
(
class
);
seq_printf
(
m
,
" FD:%5ld"
,
nr_forward_deps
);
nr_backward_deps
=
count_backward_deps
(
class
);
nr_backward_deps
=
lockdep_
count_backward_deps
(
class
);
seq_printf
(
m
,
" BD:%5ld"
,
nr_backward_deps
);
get_usage_chars
(
class
,
&
c1
,
&
c2
,
&
c3
,
&
c4
);
...
...
@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v)
for
(
i
=
0
;
i
<
chain
->
depth
;
i
++
)
{
class
=
lock_chain_get_class
(
chain
,
i
);
if
(
!
class
->
key
)
continue
;
seq_printf
(
m
,
"[%p] "
,
class
->
key
);
print_name
(
m
,
class
);
seq_puts
(
m
,
"
\n
"
);
...
...
@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if
(
class
->
usage_mask
&
LOCKF_ENABLED_HARDIRQS_READ
)
nr_hardirq_read_unsafe
++
;
sum_forward_deps
+=
count_forward_deps
(
class
);
sum_forward_deps
+=
lockdep_
count_forward_deps
(
class
);
}
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON
(
debug_atomic_read
(
&
nr_unused_locks
)
!=
nr_unused
);
...
...
kernel/sched.c
View file @
23a0ee90
...
...
@@ -600,7 +600,6 @@ struct rq {
/* BKL stats */
unsigned
int
bkl_count
;
#endif
struct
lock_class_key
rq_lock_key
;
};
static
DEFINE_PER_CPU_SHARED_ALIGNED
(
struct
rq
,
runqueues
);
...
...
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
}
else
{
if
(
rq1
<
rq2
)
{
spin_lock
(
&
rq1
->
lock
);
spin_lock
(
&
rq2
->
lock
);
spin_lock
_nested
(
&
rq2
->
lock
,
SINGLE_DEPTH_NESTING
);
}
else
{
spin_lock
(
&
rq2
->
lock
);
spin_lock
(
&
rq1
->
lock
);
spin_lock
_nested
(
&
rq1
->
lock
,
SINGLE_DEPTH_NESTING
);
}
}
update_rq_clock
(
rq1
);
...
...
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
if
(
busiest
<
this_rq
)
{
spin_unlock
(
&
this_rq
->
lock
);
spin_lock
(
&
busiest
->
lock
);
spin_lock
(
&
this_rq
->
lock
);
spin_lock
_nested
(
&
this_rq
->
lock
,
SINGLE_DEPTH_NESTING
);
ret
=
1
;
}
else
spin_lock
(
&
busiest
->
lock
);
spin_lock
_nested
(
&
busiest
->
lock
,
SINGLE_DEPTH_NESTING
);
}
return
ret
;
}
static
void
double_unlock_balance
(
struct
rq
*
this_rq
,
struct
rq
*
busiest
)
__releases
(
busiest
->
lock
)
{
spin_unlock
(
&
busiest
->
lock
);
lock_set_subclass
(
&
this_rq
->
lock
.
dep_map
,
0
,
_RET_IP_
);
}
/*
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
...
...
@@ -3637,7 +3643,7 @@ redo:
ld_moved
=
move_tasks
(
this_rq
,
this_cpu
,
busiest
,
imbalance
,
sd
,
CPU_NEWLY_IDLE
,
&
all_pinned
);
spin_unlock
(
&
busiest
->
lock
);
double_unlock_balance
(
this_rq
,
busiest
);
if
(
unlikely
(
all_pinned
))
{
cpu_clear
(
cpu_of
(
busiest
),
*
cpus
);
...
...
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
else
schedstat_inc
(
sd
,
alb_failed
);
}
spin_unlock
(
&
target_rq
->
lock
);
double_unlock_balance
(
busiest_rq
,
target_rq
);
}
#ifdef CONFIG_NO_HZ
...
...
@@ -8000,7 +8006,6 @@ void __init sched_init(void)
rq
=
cpu_rq
(
i
);
spin_lock_init
(
&
rq
->
lock
);
lockdep_set_class
(
&
rq
->
lock
,
&
rq
->
rq_lock_key
);
rq
->
nr_running
=
0
;
init_cfs_rq
(
&
rq
->
cfs
,
rq
);
init_rt_rq
(
&
rq
->
rt
,
rq
);
...
...
kernel/sched_rt.c
View file @
23a0ee90
...
...
@@ -861,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
#define RT_MAX_TRIES 3
static
int
double_lock_balance
(
struct
rq
*
this_rq
,
struct
rq
*
busiest
);
static
void
double_unlock_balance
(
struct
rq
*
this_rq
,
struct
rq
*
busiest
);
static
void
deactivate_task
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
sleep
);
static
int
pick_rt_task
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
cpu
)
...
...
@@ -1022,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
break
;
/* try again */
spin_unlock
(
&
lowest_rq
->
lock
);
double_unlock_balance
(
rq
,
lowest_rq
);
lowest_rq
=
NULL
;
}
...
...
@@ -1091,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
resched_task
(
lowest_rq
->
curr
);
spin_unlock
(
&
lowest_rq
->
lock
);
double_unlock_balance
(
rq
,
lowest_rq
);
ret
=
1
;
out:
...
...
@@ -1197,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
}
skip:
spin_unlock
(
&
src_rq
->
lock
);
double_unlock_balance
(
this_rq
,
src_rq
);
}
return
ret
;
...
...
kernel/spinlock.c
View file @
23a0ee90
...
...
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
}
EXPORT_SYMBOL
(
_spin_lock_nested
);
unsigned
long
__lockfunc
_spin_lock_irqsave_nested
(
spinlock_t
*
lock
,
int
subclass
)
{
unsigned
long
flags
;
...
...
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
EXPORT_SYMBOL
(
_spin_lock_irqsave_nested
);
void
__lockfunc
_spin_lock_nest_lock
(
spinlock_t
*
lock
,
struct
lockdep_map
*
nest_lock
)
{
preempt_disable
();
spin_acquire_nest
(
&
lock
->
dep_map
,
0
,
0
,
nest_lock
,
_RET_IP_
);
LOCK_CONTENDED
(
lock
,
_raw_spin_trylock
,
_raw_spin_lock
);
}
EXPORT_SYMBOL
(
_spin_lock_nest_lock
);
#endif
void
__lockfunc
_spin_unlock
(
spinlock_t
*
lock
)
...
...
kernel/workqueue.c
View file @
23a0ee90
...
...
@@ -290,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
BUG_ON
(
get_wq_data
(
work
)
!=
cwq
);
work_clear_pending
(
work
);
lock_
acquire
(
&
cwq
->
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
acquire
(
&
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
map_acquire
(
&
cwq
->
wq
->
lockdep_map
);
lock_
map_acquire
(
&
lockdep_map
);
f
(
work
);
lock_
release
(
&
lockdep_map
,
1
,
_THIS_IP_
);
lock_
release
(
&
cwq
->
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_release
(
&
lockdep_map
);
lock_
map_release
(
&
cwq
->
wq
->
lockdep_map
);
if
(
unlikely
(
in_atomic
()
||
lockdep_depth
(
current
)
>
0
))
{
printk
(
KERN_ERR
"BUG: workqueue leaked lock or atomic: "
...
...
@@ -413,8 +413,8 @@ void flush_workqueue(struct workqueue_struct *wq)
int
cpu
;
might_sleep
();
lock_
acquire
(
&
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
release
(
&
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_acquire
(
&
wq
->
lockdep_map
);
lock_
map_release
(
&
wq
->
lockdep_map
);
for_each_cpu_mask_nr
(
cpu
,
*
cpu_map
)
flush_cpu_workqueue
(
per_cpu_ptr
(
wq
->
cpu_wq
,
cpu
));
}
...
...
@@ -441,8 +441,8 @@ int flush_work(struct work_struct *work)
if
(
!
cwq
)
return
0
;
lock_
acquire
(
&
cwq
->
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
release
(
&
cwq
->
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_acquire
(
&
cwq
->
wq
->
lockdep_map
);
lock_
map_release
(
&
cwq
->
wq
->
lockdep_map
);
prev
=
NULL
;
spin_lock_irq
(
&
cwq
->
lock
);
...
...
@@ -536,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
might_sleep
();
lock_
acquire
(
&
work
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
release
(
&
work
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_acquire
(
&
work
->
lockdep_map
);
lock_
map_release
(
&
work
->
lockdep_map
);
cwq
=
get_wq_data
(
work
);
if
(
!
cwq
)
...
...
@@ -872,8 +872,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
if
(
cwq
->
thread
==
NULL
)
return
;
lock_
acquire
(
&
cwq
->
wq
->
lockdep_map
,
0
,
0
,
0
,
2
,
_THIS_IP_
);
lock_
release
(
&
cwq
->
wq
->
lockdep_map
,
1
,
_THIS_IP_
);
lock_
map_acquire
(
&
cwq
->
wq
->
lockdep_map
);
lock_
map_release
(
&
cwq
->
wq
->
lockdep_map
);
flush_cpu_workqueue
(
cwq
);
/*
...
...
lib/debug_locks.c
View file @
23a0ee90
...
...
@@ -8,6 +8,7 @@
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/module.h>
...
...
@@ -37,6 +38,7 @@ int debug_locks_off(void)
{
if
(
xchg
(
&
debug_locks
,
0
))
{
if
(
!
debug_locks_silent
)
{
oops_in_progress
=
1
;
console_verbose
();
return
1
;
}
...
...
mm/mmap.c
View file @
23a0ee90
...
...
@@ -2273,14 +2273,14 @@ int install_special_mapping(struct mm_struct *mm,
static
DEFINE_MUTEX
(
mm_all_locks_mutex
);
static
void
vm_lock_anon_vma
(
struct
anon_vma
*
anon_vma
)
static
void
vm_lock_anon_vma
(
struct
mm_struct
*
mm
,
struct
anon_vma
*
anon_vma
)
{
if
(
!
test_bit
(
0
,
(
unsigned
long
*
)
&
anon_vma
->
head
.
next
))
{
/*
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
spin_lock
(
&
anon_vma
->
lock
);
spin_lock
_nest_lock
(
&
anon_vma
->
lock
,
&
mm
->
mmap_sem
);
/*
* We can safely modify head.next after taking the
* anon_vma->lock. If some other vma in this mm shares
...
...
@@ -2296,7 +2296,7 @@ static void vm_lock_anon_vma(struct anon_vma *anon_vma)
}
}
static
void
vm_lock_mapping
(
struct
address_space
*
mapping
)
static
void
vm_lock_mapping
(
struct
mm_struct
*
mm
,
struct
address_space
*
mapping
)
{
if
(
!
test_bit
(
AS_MM_ALL_LOCKS
,
&
mapping
->
flags
))
{
/*
...
...
@@ -2310,7 +2310,7 @@ static void vm_lock_mapping(struct address_space *mapping)
*/
if
(
test_and_set_bit
(
AS_MM_ALL_LOCKS
,
&
mapping
->
flags
))
BUG
();
spin_lock
(
&
mapping
->
i_mmap_lock
);
spin_lock
_nest_lock
(
&
mapping
->
i_mmap_lock
,
&
mm
->
mmap_sem
);
}
}
...
...
@@ -2358,11 +2358,17 @@ int mm_take_all_locks(struct mm_struct *mm)
for
(
vma
=
mm
->
mmap
;
vma
;
vma
=
vma
->
vm_next
)
{
if
(
signal_pending
(
current
))
goto
out_unlock
;
if
(
vma
->
anon_vma
)
vm_lock_anon_vma
(
vma
->
anon_vma
);
if
(
vma
->
vm_file
&&
vma
->
vm_file
->
f_mapping
)
vm_lock_mapping
(
vma
->
vm_file
->
f_mapping
);
vm_lock_mapping
(
mm
,
vma
->
vm_file
->
f_mapping
);
}
for
(
vma
=
mm
->
mmap
;
vma
;
vma
=
vma
->
vm_next
)
{
if
(
signal_pending
(
current
))
goto
out_unlock
;
if
(
vma
->
anon_vma
)
vm_lock_anon_vma
(
mm
,
vma
->
anon_vma
);
}
ret
=
0
;
out_unlock:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment