Commit 2c4f6219 authored by David S. Miller's avatar David S. Miller

[TCP]: Fix MD5 signature pool locking.

The locking calls assumed that these code paths were only
invoked in software interrupt context, but that isn't true.

Therefore we need to use spin_{lock,unlock}_bh() throughout.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7f62ad5d
...@@ -2266,12 +2266,12 @@ void tcp_free_md5sig_pool(void) ...@@ -2266,12 +2266,12 @@ void tcp_free_md5sig_pool(void)
{ {
struct tcp_md5sig_pool **pool = NULL; struct tcp_md5sig_pool **pool = NULL;
spin_lock(&tcp_md5sig_pool_lock); spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) { if (--tcp_md5sig_users == 0) {
pool = tcp_md5sig_pool; pool = tcp_md5sig_pool;
tcp_md5sig_pool = NULL; tcp_md5sig_pool = NULL;
} }
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
if (pool) if (pool)
__tcp_free_md5sig_pool(pool); __tcp_free_md5sig_pool(pool);
} }
...@@ -2314,36 +2314,36 @@ struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void) ...@@ -2314,36 +2314,36 @@ struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
int alloc = 0; int alloc = 0;
retry: retry:
spin_lock(&tcp_md5sig_pool_lock); spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool; pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) { if (tcp_md5sig_users++ == 0) {
alloc = 1; alloc = 1;
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) { } else if (!pool) {
tcp_md5sig_users--; tcp_md5sig_users--;
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
cpu_relax(); cpu_relax();
goto retry; goto retry;
} else } else
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
if (alloc) { if (alloc) {
/* we cannot hold spinlock here because this may sleep. */ /* we cannot hold spinlock here because this may sleep. */
struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(); struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
spin_lock(&tcp_md5sig_pool_lock); spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) { if (!p) {
tcp_md5sig_users--; tcp_md5sig_users--;
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
return NULL; return NULL;
} }
pool = tcp_md5sig_pool; pool = tcp_md5sig_pool;
if (pool) { if (pool) {
/* oops, it has already been assigned. */ /* oops, it has already been assigned. */
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
__tcp_free_md5sig_pool(p); __tcp_free_md5sig_pool(p);
} else { } else {
tcp_md5sig_pool = pool = p; tcp_md5sig_pool = pool = p;
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
} }
} }
return pool; return pool;
...@@ -2354,11 +2354,11 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); ...@@ -2354,11 +2354,11 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
{ {
struct tcp_md5sig_pool **p; struct tcp_md5sig_pool **p;
spin_lock(&tcp_md5sig_pool_lock); spin_lock_bh(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool; p = tcp_md5sig_pool;
if (p) if (p)
tcp_md5sig_users++; tcp_md5sig_users++;
spin_unlock(&tcp_md5sig_pool_lock); spin_unlock_bh(&tcp_md5sig_pool_lock);
return (p ? *per_cpu_ptr(p, cpu) : NULL); return (p ? *per_cpu_ptr(p, cpu) : NULL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment