Commit 0d55af87 authored by Alexey Dobriyan's avatar Alexey Dobriyan Committed by Patrick McHardy

netfilter: netns nf_conntrack: per-netns statistics

Signed-off-by: default avatarAlexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
parent 6058fa6b
...@@ -290,12 +290,12 @@ extern unsigned int nf_conntrack_htable_size; ...@@ -290,12 +290,12 @@ extern unsigned int nf_conntrack_htable_size;
extern int nf_conntrack_checksum; extern int nf_conntrack_checksum;
extern int nf_conntrack_max; extern int nf_conntrack_max;
DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); #define NF_CT_STAT_INC(net, count) \
#define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++) (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++)
#define NF_CT_STAT_INC_ATOMIC(count) \ #define NF_CT_STAT_INC_ATOMIC(net, count) \
do { \ do { \
local_bh_disable(); \ local_bh_disable(); \
__get_cpu_var(nf_conntrack_stat).count++; \ per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \
local_bh_enable(); \ local_bh_enable(); \
} while (0) } while (0)
......
...@@ -12,6 +12,7 @@ struct netns_ct { ...@@ -12,6 +12,7 @@ struct netns_ct {
struct hlist_head *hash; struct hlist_head *hash;
struct hlist_head *expect_hash; struct hlist_head *expect_hash;
struct hlist_head unconfirmed; struct hlist_head unconfirmed;
struct ip_conntrack_stat *stat;
#ifdef CONFIG_NF_CONNTRACK_EVENTS #ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_ecache *ecache; struct nf_conntrack_ecache *ecache;
#endif #endif
......
...@@ -294,7 +294,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -294,7 +294,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (!cpu_possible(cpu)) if (!cpu_possible(cpu))
continue; continue;
*pos = cpu+1; *pos = cpu+1;
return &per_cpu(nf_conntrack_stat, cpu); return per_cpu_ptr(init_net.ct.stat, cpu);
} }
return NULL; return NULL;
...@@ -308,7 +308,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) ...@@ -308,7 +308,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (!cpu_possible(cpu)) if (!cpu_possible(cpu))
continue; continue;
*pos = cpu+1; *pos = cpu+1;
return &per_cpu(nf_conntrack_stat, cpu); return per_cpu_ptr(init_net.ct.stat, cpu);
} }
return NULL; return NULL;
......
...@@ -56,9 +56,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_untracked); ...@@ -56,9 +56,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
unsigned int nf_ct_log_invalid __read_mostly; unsigned int nf_ct_log_invalid __read_mostly;
static struct kmem_cache *nf_conntrack_cachep __read_mostly; static struct kmem_cache *nf_conntrack_cachep __read_mostly;
DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
static int nf_conntrack_hash_rnd_initted; static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd; static unsigned int nf_conntrack_hash_rnd;
...@@ -171,6 +168,7 @@ static void ...@@ -171,6 +168,7 @@ static void
destroy_conntrack(struct nf_conntrack *nfct) destroy_conntrack(struct nf_conntrack *nfct)
{ {
struct nf_conn *ct = (struct nf_conn *)nfct; struct nf_conn *ct = (struct nf_conn *)nfct;
struct net *net = nf_ct_net(ct);
struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_l4proto *l4proto;
pr_debug("destroy_conntrack(%p)\n", ct); pr_debug("destroy_conntrack(%p)\n", ct);
...@@ -203,7 +201,7 @@ destroy_conntrack(struct nf_conntrack *nfct) ...@@ -203,7 +201,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode); hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
} }
NF_CT_STAT_INC(delete); NF_CT_STAT_INC(net, delete);
spin_unlock_bh(&nf_conntrack_lock); spin_unlock_bh(&nf_conntrack_lock);
if (ct->master) if (ct->master)
...@@ -216,6 +214,7 @@ destroy_conntrack(struct nf_conntrack *nfct) ...@@ -216,6 +214,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
static void death_by_timeout(unsigned long ul_conntrack) static void death_by_timeout(unsigned long ul_conntrack)
{ {
struct nf_conn *ct = (void *)ul_conntrack; struct nf_conn *ct = (void *)ul_conntrack;
struct net *net = nf_ct_net(ct);
struct nf_conn_help *help = nfct_help(ct); struct nf_conn_help *help = nfct_help(ct);
struct nf_conntrack_helper *helper; struct nf_conntrack_helper *helper;
...@@ -230,7 +229,7 @@ static void death_by_timeout(unsigned long ul_conntrack) ...@@ -230,7 +229,7 @@ static void death_by_timeout(unsigned long ul_conntrack)
spin_lock_bh(&nf_conntrack_lock); spin_lock_bh(&nf_conntrack_lock);
/* Inside lock so preempt is disabled on module removal path. /* Inside lock so preempt is disabled on module removal path.
* Otherwise we can get spurious warnings. */ * Otherwise we can get spurious warnings. */
NF_CT_STAT_INC(delete_list); NF_CT_STAT_INC(net, delete_list);
clean_from_lists(ct); clean_from_lists(ct);
spin_unlock_bh(&nf_conntrack_lock); spin_unlock_bh(&nf_conntrack_lock);
nf_ct_put(ct); nf_ct_put(ct);
...@@ -249,11 +248,11 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) ...@@ -249,11 +248,11 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
local_bh_disable(); local_bh_disable();
hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
if (nf_ct_tuple_equal(tuple, &h->tuple)) { if (nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found); NF_CT_STAT_INC(net, found);
local_bh_enable(); local_bh_enable();
return h; return h;
} }
NF_CT_STAT_INC(searched); NF_CT_STAT_INC(net, searched);
} }
local_bh_enable(); local_bh_enable();
...@@ -366,7 +365,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -366,7 +365,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
add_timer(&ct->timeout); add_timer(&ct->timeout);
atomic_inc(&ct->ct_general.use); atomic_inc(&ct->ct_general.use);
set_bit(IPS_CONFIRMED_BIT, &ct->status); set_bit(IPS_CONFIRMED_BIT, &ct->status);
NF_CT_STAT_INC(insert); NF_CT_STAT_INC(net, insert);
spin_unlock_bh(&nf_conntrack_lock); spin_unlock_bh(&nf_conntrack_lock);
help = nfct_help(ct); help = nfct_help(ct);
if (help && help->helper) if (help && help->helper)
...@@ -381,7 +380,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -381,7 +380,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT; return NF_ACCEPT;
out: out:
NF_CT_STAT_INC(insert_failed); NF_CT_STAT_INC(net, insert_failed);
spin_unlock_bh(&nf_conntrack_lock); spin_unlock_bh(&nf_conntrack_lock);
return NF_DROP; return NF_DROP;
} }
...@@ -405,11 +404,11 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, ...@@ -405,11 +404,11 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) { hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple)) { nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found); NF_CT_STAT_INC(net, found);
rcu_read_unlock_bh(); rcu_read_unlock_bh();
return 1; return 1;
} }
NF_CT_STAT_INC(searched); NF_CT_STAT_INC(net, searched);
} }
rcu_read_unlock_bh(); rcu_read_unlock_bh();
...@@ -454,7 +453,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) ...@@ -454,7 +453,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
if (del_timer(&ct->timeout)) { if (del_timer(&ct->timeout)) {
death_by_timeout((unsigned long)ct); death_by_timeout((unsigned long)ct);
dropped = 1; dropped = 1;
NF_CT_STAT_INC_ATOMIC(early_drop); NF_CT_STAT_INC_ATOMIC(net, early_drop);
} }
nf_ct_put(ct); nf_ct_put(ct);
return dropped; return dropped;
...@@ -581,7 +580,7 @@ init_conntrack(struct net *net, ...@@ -581,7 +580,7 @@ init_conntrack(struct net *net,
ct->secmark = exp->master->secmark; ct->secmark = exp->master->secmark;
#endif #endif
nf_conntrack_get(&ct->master->ct_general); nf_conntrack_get(&ct->master->ct_general);
NF_CT_STAT_INC(expect_new); NF_CT_STAT_INC(net, expect_new);
} else { } else {
struct nf_conntrack_helper *helper; struct nf_conntrack_helper *helper;
...@@ -591,7 +590,7 @@ init_conntrack(struct net *net, ...@@ -591,7 +590,7 @@ init_conntrack(struct net *net,
if (help) if (help)
rcu_assign_pointer(help->helper, helper); rcu_assign_pointer(help->helper, helper);
} }
NF_CT_STAT_INC(new); NF_CT_STAT_INC(net, new);
} }
/* Overload tuple linked list to put us in unconfirmed list. */ /* Overload tuple linked list to put us in unconfirmed list. */
...@@ -683,7 +682,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, ...@@ -683,7 +682,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
/* Previously seen (loopback or untracked)? Ignore. */ /* Previously seen (loopback or untracked)? Ignore. */
if (skb->nfct) { if (skb->nfct) {
NF_CT_STAT_INC_ATOMIC(ignore); NF_CT_STAT_INC_ATOMIC(net, ignore);
return NF_ACCEPT; return NF_ACCEPT;
} }
...@@ -693,8 +692,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, ...@@ -693,8 +692,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
&dataoff, &protonum); &dataoff, &protonum);
if (ret <= 0) { if (ret <= 0) {
pr_debug("not prepared to track yet or error occured\n"); pr_debug("not prepared to track yet or error occured\n");
NF_CT_STAT_INC_ATOMIC(error); NF_CT_STAT_INC_ATOMIC(net, error);
NF_CT_STAT_INC_ATOMIC(invalid); NF_CT_STAT_INC_ATOMIC(net, invalid);
return -ret; return -ret;
} }
...@@ -706,8 +705,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, ...@@ -706,8 +705,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
if (l4proto->error != NULL) { if (l4proto->error != NULL) {
ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum); ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
if (ret <= 0) { if (ret <= 0) {
NF_CT_STAT_INC_ATOMIC(error); NF_CT_STAT_INC_ATOMIC(net, error);
NF_CT_STAT_INC_ATOMIC(invalid); NF_CT_STAT_INC_ATOMIC(net, invalid);
return -ret; return -ret;
} }
} }
...@@ -716,13 +715,13 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, ...@@ -716,13 +715,13 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
l3proto, l4proto, &set_reply, &ctinfo); l3proto, l4proto, &set_reply, &ctinfo);
if (!ct) { if (!ct) {
/* Not valid part of a connection */ /* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(invalid); NF_CT_STAT_INC_ATOMIC(net, invalid);
return NF_ACCEPT; return NF_ACCEPT;
} }
if (IS_ERR(ct)) { if (IS_ERR(ct)) {
/* Too stressed to deal. */ /* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(drop); NF_CT_STAT_INC_ATOMIC(net, drop);
return NF_DROP; return NF_DROP;
} }
...@@ -735,7 +734,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, ...@@ -735,7 +734,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
pr_debug("nf_conntrack_in: Can't track with proto module\n"); pr_debug("nf_conntrack_in: Can't track with proto module\n");
nf_conntrack_put(skb->nfct); nf_conntrack_put(skb->nfct);
skb->nfct = NULL; skb->nfct = NULL;
NF_CT_STAT_INC_ATOMIC(invalid); NF_CT_STAT_INC_ATOMIC(net, invalid);
return -ret; return -ret;
} }
...@@ -1043,6 +1042,7 @@ void nf_conntrack_cleanup(struct net *net) ...@@ -1043,6 +1042,7 @@ void nf_conntrack_cleanup(struct net *net)
nf_conntrack_acct_fini(); nf_conntrack_acct_fini();
nf_conntrack_expect_fini(net); nf_conntrack_expect_fini(net);
free_percpu(net->ct.stat);
nf_conntrack_helper_fini(); nf_conntrack_helper_fini();
nf_conntrack_proto_fini(); nf_conntrack_proto_fini();
} }
...@@ -1152,6 +1152,9 @@ int nf_conntrack_init(struct net *net) ...@@ -1152,6 +1152,9 @@ int nf_conntrack_init(struct net *net)
max_factor = 4; max_factor = 4;
} }
atomic_set(&net->ct.count, 0); atomic_set(&net->ct.count, 0);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat)
goto err_stat;
ret = nf_conntrack_ecache_init(net); ret = nf_conntrack_ecache_init(net);
if (ret < 0) if (ret < 0)
goto err_ecache; goto err_ecache;
...@@ -1222,5 +1225,7 @@ err_free_hash: ...@@ -1222,5 +1225,7 @@ err_free_hash:
err_hash: err_hash:
nf_conntrack_ecache_fini(net); nf_conntrack_ecache_fini(net);
err_ecache: err_ecache:
free_percpu(net->ct.stat);
err_stat:
return -ENOMEM; return -ENOMEM;
} }
...@@ -53,7 +53,7 @@ void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) ...@@ -53,7 +53,7 @@ void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
master_help->expecting[exp->class]--; master_help->expecting[exp->class]--;
nf_ct_expect_put(exp); nf_ct_expect_put(exp);
NF_CT_STAT_INC(expect_delete); NF_CT_STAT_INC(net, expect_delete);
} }
EXPORT_SYMBOL_GPL(nf_ct_unlink_expect); EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
...@@ -326,7 +326,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) ...@@ -326,7 +326,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
add_timer(&exp->timeout); add_timer(&exp->timeout);
atomic_inc(&exp->use); atomic_inc(&exp->use);
NF_CT_STAT_INC(expect_create); NF_CT_STAT_INC(net, expect_create);
} }
/* Race with expectations being used means we could have none to find; OK. */ /* Race with expectations being used means we could have none to find; OK. */
......
...@@ -203,7 +203,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -203,7 +203,7 @@ static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (!cpu_possible(cpu)) if (!cpu_possible(cpu))
continue; continue;
*pos = cpu + 1; *pos = cpu + 1;
return &per_cpu(nf_conntrack_stat, cpu); return per_cpu_ptr(init_net.ct.stat, cpu);
} }
return NULL; return NULL;
...@@ -217,7 +217,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) ...@@ -217,7 +217,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (!cpu_possible(cpu)) if (!cpu_possible(cpu))
continue; continue;
*pos = cpu + 1; *pos = cpu + 1;
return &per_cpu(nf_conntrack_stat, cpu); return per_cpu_ptr(init_net.ct.stat, cpu);
} }
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment