Commit 75f82937 authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

net: Convert netfilter to percpu_locked

Allows that code to be preemtible
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 25b665a7
...@@ -39,9 +39,10 @@ atomic_t flow_cache_genid = ATOMIC_INIT(0); ...@@ -39,9 +39,10 @@ atomic_t flow_cache_genid = ATOMIC_INIT(0);
static u32 flow_hash_shift; static u32 flow_hash_shift;
#define flow_hash_size (1 << flow_hash_shift) #define flow_hash_size (1 << flow_hash_shift)
static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
#define flow_table(cpu) (per_cpu(flow_tables, cpu)) static DEFINE_PER_CPU_LOCKED(struct flow_cache_entry **, flow_tables);
#define flow_table(cpu) (per_cpu_var_locked(flow_tables, cpu))
static struct kmem_cache *flow_cachep __read_mostly; static struct kmem_cache *flow_cachep __read_mostly;
...@@ -168,24 +169,24 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2) ...@@ -168,24 +169,24 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
flow_resolve_t resolver) flow_resolve_t resolver)
{ {
struct flow_cache_entry *fle, **head; struct flow_cache_entry **table, *fle, **head;
unsigned int hash; unsigned int hash;
int cpu; int cpu;
local_bh_disable(); local_bh_disable();
cpu = smp_processor_id(); table = get_cpu_var_locked(flow_tables, &cpu);
fle = NULL; fle = NULL;
/* Packet really early in init? Making flow_cache_init a /* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */ * pre-smp initcall would solve this. --RR */
if (!flow_table(cpu)) if (!table)
goto nocache; goto nocache;
if (flow_hash_rnd_recalc(cpu)) if (flow_hash_rnd_recalc(cpu))
flow_new_hash_rnd(cpu); flow_new_hash_rnd(cpu);
hash = flow_hash_code(key, cpu); hash = flow_hash_code(key, cpu);
head = &flow_table(cpu)[hash]; head = &table[hash];
for (fle = *head; fle; fle = fle->next) { for (fle = *head; fle; fle = fle->next) {
if (fle->family == family && if (fle->family == family &&
fle->dir == dir && fle->dir == dir &&
...@@ -195,6 +196,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, ...@@ -195,6 +196,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
if (ret) if (ret)
atomic_inc(fle->object_ref); atomic_inc(fle->object_ref);
put_cpu_var_locked(flow_tables, cpu);
local_bh_enable(); local_bh_enable();
return ret; return ret;
...@@ -220,6 +222,8 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, ...@@ -220,6 +222,8 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
} }
nocache: nocache:
put_cpu_var_locked(flow_tables, cpu);
{ {
int err; int err;
void *obj; void *obj;
...@@ -249,14 +253,15 @@ nocache: ...@@ -249,14 +253,15 @@ nocache:
static void flow_cache_flush_tasklet(unsigned long data) static void flow_cache_flush_tasklet(unsigned long data)
{ {
struct flow_flush_info *info = (void *)data; struct flow_flush_info *info = (void *)data;
struct flow_cache_entry **table;
int i; int i;
int cpu; int cpu;
cpu = smp_processor_id(); table = get_cpu_var_locked(flow_tables, &cpu);
for (i = 0; i < flow_hash_size; i++) { for (i = 0; i < flow_hash_size; i++) {
struct flow_cache_entry *fle; struct flow_cache_entry *fle;
fle = flow_table(cpu)[i]; fle = table[i];
for (; fle; fle = fle->next) { for (; fle; fle = fle->next) {
unsigned genid = atomic_read(&flow_cache_genid); unsigned genid = atomic_read(&flow_cache_genid);
...@@ -267,6 +272,7 @@ static void flow_cache_flush_tasklet(unsigned long data) ...@@ -267,6 +272,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
atomic_dec(fle->object_ref); atomic_dec(fle->object_ref);
} }
} }
put_cpu_var_locked(flow_tables, cpu);
if (atomic_dec_and_test(&info->cpuleft)) if (atomic_dec_and_test(&info->cpuleft))
complete(&info->completion); complete(&info->completion);
......
...@@ -261,7 +261,7 @@ unsigned int arpt_do_table(struct sk_buff *skb, ...@@ -261,7 +261,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
xt_info_rdlock_bh(); xt_info_rdlock_bh();
private = table->private; private = table->private;
table_base = private->entries[smp_processor_id()]; table_base = private->entries[raw_smp_processor_id()];
e = get_entry(table_base, private->hook_entry[hook]); e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]); back = get_entry(table_base, private->underflow[hook]);
...@@ -1183,7 +1183,7 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len, ...@@ -1183,7 +1183,7 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
i = 0; i = 0;
/* Choose the copy that is on our node */ /* Choose the copy that is on our node */
curcpu = smp_processor_id(); curcpu = raw_smp_processor_id();
loc_cpu_entry = private->entries[curcpu]; loc_cpu_entry = private->entries[curcpu];
xt_info_wrlock(curcpu); xt_info_wrlock(curcpu);
ARPT_ENTRY_ITERATE(loc_cpu_entry, ARPT_ENTRY_ITERATE(loc_cpu_entry,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment