Commit 00ef66eb authored by Thomas Gleixner's avatar Thomas Gleixner

net: Fix netfilter percpu assumptions for real

commit 21ece08c (net: fix the xtables smp_processor_id assumptions for
-rt) fixed only half of the problem. The filter functions might run in
thread context and can be preempted and migrated on -RT.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 82c07cbb
......@@ -468,22 +468,35 @@ DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
* _Only_ that special combination of being per-cpu and never getting
* re-entered asynchronously means that the count is safe.
*/
static inline void xt_info_rdlock_bh(void)
static inline int xt_info_rdlock_bh(void)
{
struct xt_info_lock *lock;
int cpu;
local_bh_disable();
lock = &__raw_get_cpu_var(xt_info_locks);
if (likely(!lock->readers++))
preempt_disable_rt();
cpu = smp_processor_id();
lock = &per_cpu(xt_info_locks, cpu);
if (likely(!lock->readers++)) {
preempt_enable_rt();
spin_lock(&lock->lock);
} else
preempt_enable_rt();
return cpu;
}
static inline void xt_info_rdunlock_bh(void)
static inline void xt_info_rdunlock_bh(int cpu)
{
struct xt_info_lock *lock = &__raw_get_cpu_var(xt_info_locks);
struct xt_info_lock *lock = &per_cpu(xt_info_locks, cpu);
if (likely(!--lock->readers))
preempt_disable_rt();
if (likely(!--lock->readers)) {
preempt_enable_rt();
spin_unlock(&lock->lock);
} else
preempt_enable_rt();
local_bh_enable();
}
......
......@@ -252,6 +252,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
void *table_base;
const struct xt_table_info *private;
struct xt_target_param tgpar;
int cpu;
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return NF_DROP;
......@@ -259,9 +260,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
indev = in ? in->name : nulldevname;
outdev = out ? out->name : nulldevname;
xt_info_rdlock_bh();
cpu = xt_info_rdlock_bh();
private = table->private;
table_base = private->entries[raw_smp_processor_id()];
table_base = private->entries[cpu];
e = get_entry(table_base, private->hook_entry[hook]);
back = get_entry(table_base, private->underflow[hook]);
......@@ -332,7 +333,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
/* Verdict */
break;
} while (!hotdrop);
xt_info_rdunlock_bh();
xt_info_rdunlock_bh(cpu);
if (hotdrop)
return NF_DROP;
......
......@@ -325,6 +325,7 @@ ipt_do_table(struct sk_buff *skb,
struct xt_table_info *private;
struct xt_match_param mtpar;
struct xt_target_param tgpar;
int cpu;
/* Initialization */
ip = ip_hdr(skb);
......@@ -346,9 +347,9 @@ ipt_do_table(struct sk_buff *skb,
mtpar.hooknum = tgpar.hooknum = hook;
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
xt_info_rdlock_bh();
cpu = xt_info_rdlock_bh();
private = table->private;
table_base = private->entries[raw_smp_processor_id()];
table_base = private->entries[cpu];
e = get_entry(table_base, private->hook_entry[hook]);
......@@ -435,7 +436,7 @@ ipt_do_table(struct sk_buff *skb,
/* Verdict */
break;
} while (!hotdrop);
xt_info_rdunlock_bh();
xt_info_rdunlock_bh(cpu);
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
......
......@@ -355,6 +355,7 @@ ip6t_do_table(struct sk_buff *skb,
struct xt_table_info *private;
struct xt_match_param mtpar;
struct xt_target_param tgpar;
int cpu;
/* Initialization */
indev = in ? in->name : nulldevname;
......@@ -373,9 +374,9 @@ ip6t_do_table(struct sk_buff *skb,
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
xt_info_rdlock_bh();
cpu = xt_info_rdlock_bh();
private = table->private;
table_base = private->entries[raw_smp_processor_id()];
table_base = private->entries[cpu];
e = get_entry(table_base, private->hook_entry[hook]);
......@@ -464,7 +465,7 @@ ip6t_do_table(struct sk_buff *skb,
#ifdef CONFIG_NETFILTER_DEBUG
tb_comefrom = NETFILTER_LINK_POISON;
#endif
xt_info_rdunlock_bh();
xt_info_rdunlock_bh(cpu);
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment