Commit 7a9546ee authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: ib_net pointer should depends on CONFIG_NET_NS

We can shrink size of "struct inet_bind_bucket" by 50%, using
read_pnet() and write_pnet()
Signed-off-by: default avatarEric Dumazet <dada1@cosmosbay.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8f424b5f
...@@ -77,13 +77,20 @@ struct inet_ehash_bucket { ...@@ -77,13 +77,20 @@ struct inet_ehash_bucket {
* ports are created in O(1) time? I thought so. ;-) -DaveM * ports are created in O(1) time? I thought so. ;-) -DaveM
*/ */
struct inet_bind_bucket { struct inet_bind_bucket {
#ifdef CONFIG_NET_NS
struct net *ib_net; struct net *ib_net;
#endif
unsigned short port; unsigned short port;
signed short fastreuse; signed short fastreuse;
struct hlist_node node; struct hlist_node node;
struct hlist_head owners; struct hlist_head owners;
}; };
static inline struct net *ib_net(struct inet_bind_bucket *ib)
{
return read_pnet(&ib->ib_net);
}
#define inet_bind_bucket_for_each(tb, node, head) \ #define inet_bind_bucket_for_each(tb, node, head) \
hlist_for_each_entry(tb, node, head, node) hlist_for_each_entry(tb, node, head, node)
......
...@@ -109,7 +109,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) ...@@ -109,7 +109,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
hashinfo->bhash_size)]; hashinfo->bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->ib_net == net && tb->port == rover) if (ib_net(tb) == net && tb->port == rover)
goto next; goto next;
break; break;
next: next:
...@@ -137,7 +137,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) ...@@ -137,7 +137,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
hashinfo->bhash_size)]; hashinfo->bhash_size)];
spin_lock(&head->lock); spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain) inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->ib_net == net && tb->port == snum) if (ib_net(tb) == net && tb->port == snum)
goto tb_found; goto tb_found;
} }
tb = NULL; tb = NULL;
......
...@@ -35,7 +35,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, ...@@ -35,7 +35,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
if (tb != NULL) { if (tb != NULL) {
tb->ib_net = hold_net(net); write_pnet(&tb->ib_net, hold_net(net));
tb->port = snum; tb->port = snum;
tb->fastreuse = 0; tb->fastreuse = 0;
INIT_HLIST_HEAD(&tb->owners); INIT_HLIST_HEAD(&tb->owners);
...@@ -51,7 +51,7 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket ...@@ -51,7 +51,7 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
{ {
if (hlist_empty(&tb->owners)) { if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node); __hlist_del(&tb->node);
release_net(tb->ib_net); release_net(ib_net(tb));
kmem_cache_free(cachep, tb); kmem_cache_free(cachep, tb);
} }
} }
...@@ -449,7 +449,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, ...@@ -449,7 +449,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
* unique enough. * unique enough.
*/ */
inet_bind_bucket_for_each(tb, node, &head->chain) { inet_bind_bucket_for_each(tb, node, &head->chain) {
if (tb->ib_net == net && tb->port == port) { if (ib_net(tb) == net && tb->port == port) {
WARN_ON(hlist_empty(&tb->owners)); WARN_ON(hlist_empty(&tb->owners));
if (tb->fastreuse >= 0) if (tb->fastreuse >= 0)
goto next_port; goto next_port;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment