Commit 4b32d0a4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

lockdep: various fixes

 - update the copyright notices
 - use the default hash function
 - fix a thinko in a BUILD_BUG_ON
 - add a WARN_ON to spot inconsitent naming
 - fix a termination issue in /proc/lock_stat

[akpm@linux-foundation.org: cleanups]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4fe87745
/* /*
* Runtime locking correctness validator * Runtime locking correctness validator
* *
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* *
* see Documentation/lockdep-design.txt for more details. * see Documentation/lockdep-design.txt for more details.
*/ */
......
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
* *
* Started by Ingo Molnar: * Started by Ingo Molnar:
* *
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* *
* this code maps all the lock dependencies as they occur in a live kernel * this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs: * and will warn about the following classes of locking bugs:
...@@ -37,6 +38,7 @@ ...@@ -37,6 +38,7 @@
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/hash.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -238,8 +240,7 @@ LIST_HEAD(all_lock_classes); ...@@ -238,8 +240,7 @@ LIST_HEAD(all_lock_classes);
*/ */
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
#define CLASSHASH_MASK (CLASSHASH_SIZE - 1) #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
#define classhashentry(key) (classhash_table + __classhashfn((key))) #define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct list_head classhash_table[CLASSHASH_SIZE]; static struct list_head classhash_table[CLASSHASH_SIZE];
...@@ -250,9 +251,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE]; ...@@ -250,9 +251,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
*/ */
#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
#define CHAINHASH_MASK (CHAINHASH_SIZE - 1) #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define __chainhashfn(chain) \
(((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct list_head chainhash_table[CHAINHASH_SIZE]; static struct list_head chainhash_table[CHAINHASH_SIZE];
...@@ -676,7 +675,8 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) ...@@ -676,7 +675,8 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
* (or spin_lock_init()) call - which acts as the key. For static * (or spin_lock_init()) call - which acts as the key. For static
* locks we use the lock object itself as the key. * locks we use the lock object itself as the key.
*/ */
BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class)); BUILD_BUG_ON(sizeof(struct lock_class_key) >
sizeof(struct lockdep_map));
key = lock->key->subkeys + subclass; key = lock->key->subkeys + subclass;
...@@ -686,9 +686,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) ...@@ -686,9 +686,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
* We can walk the hash lockfree, because the hash only * We can walk the hash lockfree, because the hash only
* grows, and we are careful when adding entries to the end: * grows, and we are careful when adding entries to the end:
*/ */
list_for_each_entry(class, hash_head, hash_entry) list_for_each_entry(class, hash_head, hash_entry) {
if (class->key == key) if (class->key == key) {
WARN_ON_ONCE(class->name != lock->name);
return class; return class;
}
}
return NULL; return NULL;
} }
......
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
* *
* Started by Ingo Molnar: * Started by Ingo Molnar:
* *
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* *
* Code for /proc/lockdep and /proc/lockdep_stats: * Code for /proc/lockdep and /proc/lockdep_stats:
* *
...@@ -498,6 +499,9 @@ static void *ls_start(struct seq_file *m, loff_t *pos) ...@@ -498,6 +499,9 @@ static void *ls_start(struct seq_file *m, loff_t *pos)
if (data->iter == data->stats) if (data->iter == data->stats)
seq_header(m); seq_header(m);
if (data->iter == data->iter_end)
data->iter = NULL;
return data->iter; return data->iter;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment