Commit d7aaba14 authored by Ming Lei's avatar Ming Lei Committed by Peter Zijlstra

lockdep: Implement find_usage_*wards by BFS

This patch uses BFS to implement find_usage_*wards(),which
was originally writen by DFS.
Signed-off-by: default avatarMing Lei <tom.leiming@gmail.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-6-git-send-email-tom.leiming@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent db0002a3
...@@ -963,7 +963,7 @@ exit: ...@@ -963,7 +963,7 @@ exit:
return ret; return ret;
} }
static inline int __bfs_forward(struct lock_list *src_entry, static inline int __bfs_forwards(struct lock_list *src_entry,
void *data, void *data,
int (*match)(struct lock_list *entry, void *data), int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry) struct lock_list **target_entry)
...@@ -972,7 +972,7 @@ static inline int __bfs_forward(struct lock_list *src_entry, ...@@ -972,7 +972,7 @@ static inline int __bfs_forward(struct lock_list *src_entry,
} }
static inline int __bfs_backward(struct lock_list *src_entry, static inline int __bfs_backwards(struct lock_list *src_entry,
void *data, void *data,
int (*match)(struct lock_list *entry, void *data), int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry) struct lock_list **target_entry)
...@@ -1085,18 +1085,6 @@ static noinline int print_bfs_bug(int ret) ...@@ -1085,18 +1085,6 @@ static noinline int print_bfs_bug(int ret)
return 0; return 0;
} }
#define RECURSION_LIMIT 40
static int noinline print_infinite_recursion_bug(void)
{
if (!debug_locks_off_graph_unlock())
return 0;
WARN_ON(1);
return 0;
}
unsigned long __lockdep_count_forward_deps(struct lock_class *class, unsigned long __lockdep_count_forward_deps(struct lock_class *class,
unsigned int depth) unsigned int depth)
{ {
...@@ -1170,7 +1158,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target, ...@@ -1170,7 +1158,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
debug_atomic_inc(&nr_cyclic_checks); debug_atomic_inc(&nr_cyclic_checks);
result = __bfs_forward(root, target, class_equal, target_entry); result = __bfs_forwards(root, target, class_equal, target_entry);
return result; return result;
} }
...@@ -1181,101 +1169,70 @@ check_noncircular(struct lock_list *root, struct lock_class *target, ...@@ -1181,101 +1169,70 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
* proving that two subgraphs can be connected by a new dependency * proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency. * without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/ */
static enum lock_usage_bit find_usage_bit;
static struct lock_class *forwards_match, *backwards_match; static struct lock_class *forwards_match, *backwards_match;
#define BFS_PROCESS_RET(ret) do { \
if (ret < 0) \
return print_bfs_bug(ret); \
if (ret == 1) \
return 1; \
} while (0)
static inline int usage_match(struct lock_list *entry, void *bit)
{
return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
}
/* /*
* Find a node in the forwards-direction dependency sub-graph starting * Find a node in the forwards-direction dependency sub-graph starting
* at <source> that matches <find_usage_bit>. * at @root->class that matches @bit.
* *
* Return 2 if such a node exists in the subgraph, and put that node * Return 0 if such a node exists in the subgraph, and put that node
* into <forwards_match>. * into *@target_entry.
* *
* Return 1 otherwise and keep <forwards_match> unchanged. * Return 1 otherwise and keep *@target_entry unchanged.
* Return 0 on error. * Return <0 on error.
*/ */
static noinline int static int
find_usage_forwards(struct lock_class *source, unsigned int depth) find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
struct lock_list **target_entry)
{ {
struct lock_list *entry; int result;
int ret;
if (lockdep_dependency_visit(source, depth))
return 1;
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
return print_infinite_recursion_bug();
debug_atomic_inc(&nr_find_usage_forwards_checks); debug_atomic_inc(&nr_find_usage_forwards_checks);
if (source->usage_mask & (1 << find_usage_bit)) {
forwards_match = source;
return 2;
}
/* result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
* Check this lock's dependency list:
*/ return result;
list_for_each_entry(entry, &source->locks_after, entry) {
debug_atomic_inc(&nr_find_usage_forwards_recursions);
ret = find_usage_forwards(entry->class, depth+1);
if (ret == 2 || ret == 0)
return ret;
}
return 1;
} }
/* /*
* Find a node in the backwards-direction dependency sub-graph starting * Find a node in the backwards-direction dependency sub-graph starting
* at <source> that matches <find_usage_bit>. * at @root->class that matches @bit.
* *
* Return 2 if such a node exists in the subgraph, and put that node * Return 0 if such a node exists in the subgraph, and put that node
* into <backwards_match>. * into *@target_entry.
* *
* Return 1 otherwise and keep <backwards_match> unchanged. * Return 1 otherwise and keep *@target_entry unchanged.
* Return 0 on error. * Return <0 on error.
*/ */
static noinline int static int
find_usage_backwards(struct lock_class *source, unsigned int depth) find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
struct lock_list **target_entry)
{ {
struct lock_list *entry; int result;
int ret;
if (lockdep_dependency_visit(source, depth))
return 1;
if (!__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
return print_infinite_recursion_bug();
debug_atomic_inc(&nr_find_usage_backwards_checks); debug_atomic_inc(&nr_find_usage_backwards_checks);
if (source->usage_mask & (1 << find_usage_bit)) {
backwards_match = source;
return 2;
}
if (!source && debug_locks_off_graph_unlock()) { result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
WARN_ON(1);
return 0;
}
/* return result;
* Check this lock's dependency list:
*/
list_for_each_entry(entry, &source->locks_before, entry) {
debug_atomic_inc(&nr_find_usage_backwards_recursions);
ret = find_usage_backwards(entry->class, depth+1);
if (ret == 2 || ret == 0)
return ret;
}
return 1;
} }
static int static int
print_bad_irq_dependency(struct task_struct *curr, print_bad_irq_dependency(struct task_struct *curr,
struct held_lock *prev, struct held_lock *prev,
...@@ -1343,18 +1300,21 @@ check_usage(struct task_struct *curr, struct held_lock *prev, ...@@ -1343,18 +1300,21 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
enum lock_usage_bit bit_forwards, const char *irqclass) enum lock_usage_bit bit_forwards, const char *irqclass)
{ {
int ret; int ret;
struct lock_list this;
struct lock_list *uninitialized_var(target_entry);
find_usage_bit = bit_backwards; this.parent = NULL;
/* fills in <backwards_match> */
ret = find_usage_backwards(hlock_class(prev), 0); this.class = hlock_class(prev);
if (!ret || ret == 1) ret = find_usage_backwards(&this, bit_backwards, &target_entry);
return ret; BFS_PROCESS_RET(ret);
backwards_match = target_entry->class;
this.class = hlock_class(next);
ret = find_usage_forwards(&this, bit_forwards, &target_entry);
BFS_PROCESS_RET(ret);
forwards_match = target_entry->class;
find_usage_bit = bit_forwards;
ret = find_usage_forwards(hlock_class(next), 0);
if (!ret || ret == 1)
return ret;
/* ret == 2 */
return print_bad_irq_dependency(curr, prev, next, return print_bad_irq_dependency(curr, prev, next,
bit_backwards, bit_forwards, irqclass); bit_backwards, bit_forwards, irqclass);
} }
...@@ -2029,14 +1989,16 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, ...@@ -2029,14 +1989,16 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass) enum lock_usage_bit bit, const char *irqclass)
{ {
int ret; int ret;
struct lock_list root;
struct lock_list *uninitialized_var(target_entry);
find_usage_bit = bit; root.parent = NULL;
/* fills in <forwards_match> */ root.class = hlock_class(this);
ret = find_usage_forwards(hlock_class(this), 0); ret = find_usage_forwards(&root, bit, &target_entry);
if (!ret || ret == 1) BFS_PROCESS_RET(ret);
return ret;
return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); return print_irq_inversion_bug(curr, target_entry->class,
this, 1, irqclass);
} }
/* /*
...@@ -2048,14 +2010,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, ...@@ -2048,14 +2010,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass) enum lock_usage_bit bit, const char *irqclass)
{ {
int ret; int ret;
struct lock_list root;
struct lock_list *uninitialized_var(target_entry);
find_usage_bit = bit; root.parent = NULL;
/* fills in <backwards_match> */ root.class = hlock_class(this);
ret = find_usage_backwards(hlock_class(this), 0); ret = find_usage_backwards(&root, bit, &target_entry);
if (!ret || ret == 1) BFS_PROCESS_RET(ret);
return ret;
return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); return print_irq_inversion_bug(curr, target_entry->class,
this, 1, irqclass);
} }
void print_irqtrace_events(struct task_struct *curr) void print_irqtrace_events(struct task_struct *curr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment