Commit 8feaf0c0 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by David S. Miller

[INET]: Generalise tcp_tw_bucket, aka TIME_WAIT sockets

This paves the way to generalise the rest of the sock ID lookup
routines and saves some bytes in TCPv4 TIME_WAIT sockets on distro
kernels (where IPv6 is always built as a module):

[root@qemu ~]# grep tw_sock /proc/slabinfo
tw_sock_TCPv6  0  0  128  31  1
tw_sock_TCP    0  0   96  41  1
[root@qemu ~]#

Now if a protocol wants to use the TIME_WAIT generic infrastructure it
only has to set the sk_prot->twsk_obj_size field with the size of its
inet_timewait_sock derived sock and proto_register will create
sk_prot->twsk_slab, for now its only for INET sockets, but we can
introduce timewait_sock later if some non INET transport protocolo
wants to use this stuff.

Next changesets will take advantage of this new infrastructure to
generalise even more TCP code.

[acme@toy net-2.6.14]$ grep built-in /tmp/before.size /tmp/after.size
/tmp/before.size: 188646   11764    5068  205478   322a6 net/ipv4/built-in.o
/tmp/after.size:  188144   11764    5068  204976   320b0 net/ipv4/built-in.o
[acme@toy net-2.6.14]$

Tested with both IPv4 & IPv6 (::1 (localhost) & ::ffff:172.20.0.1
(qemu host)).
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 33b62231
...@@ -308,6 +308,41 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, ...@@ -308,6 +308,41 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only) #define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk)) #define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
#include <linux/tcp.h>
struct tcp6_timewait_sock {
struct tcp_timewait_sock tw_v6_sk;
struct in6_addr tw_v6_daddr;
struct in6_addr tw_v6_rcv_saddr;
};
static inline struct tcp6_timewait_sock *tcp6_twsk(const struct sock *sk)
{
return (struct tcp6_timewait_sock *)sk;
}
static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
&inet6_sk(sk)->rcv_saddr : &tcp6_twsk(sk)->tw_v6_rcv_saddr;
}
static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
{
return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
}
static inline int tcp_twsk_ipv6only(const struct sock *sk)
{
return inet_twsk(sk)->tw_ipv6only;
}
static inline int tcp_v6_ipv6only(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
ipv6_only_sock(sk) : tcp_twsk_ipv6only(sk);
}
#else #else
#define __ipv6_only_sock(sk) 0 #define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0 #define ipv6_only_sock(sk) 0
...@@ -322,8 +357,19 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) ...@@ -322,8 +357,19 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
return NULL; return NULL;
} }
#endif #define __tcp_v6_rcv_saddr(__sk) NULL
#define tcp_v6_rcv_saddr(__sk) NULL
#define tcp_twsk_ipv6only(__sk) 0
#define tcp_v6_ipv6only(__sk) 0
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#endif #define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
(((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif #endif /* __KERNEL__ */
#endif /* _IPV6_H */
...@@ -179,6 +179,7 @@ struct tcp_info ...@@ -179,6 +179,7 @@ struct tcp_info
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/inet_timewait_sock.h>
/* This defines a selective acknowledgement block. */ /* This defines a selective acknowledgement block. */
struct tcp_sack_block { struct tcp_sack_block {
...@@ -387,6 +388,20 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) ...@@ -387,6 +388,20 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk)
return (struct tcp_sock *)sk; return (struct tcp_sock *)sk;
} }
struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk;
__u32 tw_rcv_nxt;
__u32 tw_snd_nxt;
__u32 tw_rcv_wnd;
__u32 tw_ts_recent;
long tw_ts_recent_stamp;
};
static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
{
return (struct tcp_timewait_sock *)sk;
}
static inline void *tcp_ca(const struct tcp_sock *tp) static inline void *tcp_ca(const struct tcp_sock *tp)
{ {
return (void *) tp->ca_priv; return (void *) tp->ca_priv;
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#ifndef _INET_HASHTABLES_H #ifndef _INET_HASHTABLES_H
#define _INET_HASHTABLES_H #define _INET_HASHTABLES_H
#include <linux/config.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
...@@ -310,4 +312,43 @@ sherry_cache: ...@@ -310,4 +312,43 @@ sherry_cache:
read_unlock(&hashinfo->lhash_lock); read_unlock(&hashinfo->lhash_lock);
return sk; return sk;
} }
/* Socket demux engine toys. */
#ifdef __BIG_ENDIAN
#define INET_COMBINED_PORTS(__sport, __dport) \
(((__u32)(__sport) << 16) | (__u32)(__dport))
#else /* __LITTLE_ENDIAN */
#define INET_COMBINED_PORTS(__sport, __dport) \
(((__u32)(__dport) << 16) | (__u32)(__sport))
#endif
#if (BITS_PER_LONG == 64)
#ifdef __BIG_ENDIAN
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __u64 __name = (((__u64)(__saddr)) << 32) | ((__u64)(__daddr));
#else /* __LITTLE_ENDIAN */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
#endif /* __BIG_ENDIAN */
#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
((inet_sk(__sk)->daddr == (__saddr)) && \
(inet_sk(__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
((inet_twsk(__sk)->tw_daddr == (__saddr)) && \
(inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
#endif /* _INET_HASHTABLES_H */ #endif /* _INET_HASHTABLES_H */
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for a generic INET TIMEWAIT sock
*
* From code originally in net/tcp.h
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _INET_TIMEWAIT_SOCK_
#define _INET_TIMEWAIT_SOCK_
#include <linux/config.h>
#include <linux/list.h>
#include <linux/types.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/atomic.h>
#if (BITS_PER_LONG == 64)
#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8
#else
#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4
#endif
struct inet_bind_bucket;
/*
* This is a TIME_WAIT sock. It works around the memory consumption
* problems of sockets in such a state on heavily loaded servers, but
* without violating the protocol specification.
*/
struct inet_timewait_sock {
/*
* Now struct sock also uses sock_common, so please just
* don't add nothing before this first member (__tw_common) --acme
*/
struct sock_common __tw_common;
#define tw_family __tw_common.skc_family
#define tw_state __tw_common.skc_state
#define tw_reuse __tw_common.skc_reuse
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
#define tw_node __tw_common.skc_node
#define tw_bind_node __tw_common.skc_bind_node
#define tw_refcnt __tw_common.skc_refcnt
#define tw_prot __tw_common.skc_prot
volatile unsigned char tw_substate;
/* 3 bits hole, try to pack */
unsigned char tw_rcv_wscale;
/* Socket demultiplex comparisons on incoming packets. */
/* these five are in inet_sock */
__u16 tw_sport;
__u32 tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES)));
__u32 tw_rcv_saddr;
__u16 tw_dport;
__u16 tw_num;
/* And these are ours. */
__u8 tw_ipv6only:1;
/* 31 bits hole, try to pack */
int tw_hashent;
int tw_timeout;
unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
};
static inline void inet_twsk_add_node(struct inet_timewait_sock *tw,
struct hlist_head *list)
{
hlist_add_head(&tw->tw_node, list);
}
static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
struct hlist_head *list)
{
hlist_add_head(&tw->tw_bind_node, list);
}
static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
{
return tw->tw_death_node.pprev != NULL;
}
static inline void inet_twsk_dead_node_init(struct inet_timewait_sock *tw)
{
tw->tw_death_node.pprev = NULL;
}
static inline void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
{
__hlist_del(&tw->tw_death_node);
inet_twsk_dead_node_init(tw);
}
static inline int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
{
if (inet_twsk_dead_hashed(tw)) {
__inet_twsk_del_dead_node(tw);
return 1;
}
return 0;
}
#define inet_twsk_for_each(tw, node, head) \
hlist_for_each_entry(tw, node, head, tw_node)
#define inet_twsk_for_each_inmate(tw, node, jail) \
hlist_for_each_entry(tw, node, jail, tw_death_node)
#define inet_twsk_for_each_inmate_safe(tw, node, safe, jail) \
hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
}
static inline u32 inet_rcv_saddr(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
inet_sk(sk)->rcv_saddr : inet_twsk(sk)->tw_rcv_saddr;
}
static inline void inet_twsk_put(struct inet_timewait_sock *tw)
{
if (atomic_dec_and_test(&tw->tw_refcnt)) {
#ifdef SOCK_REFCNT_DEBUG
printk(KERN_DEBUG "%s timewait_sock %p released\n",
tw->tw_prot->name, tw);
#endif
kmem_cache_free(tw->tw_prot->twsk_slab, tw);
}
}
#endif /* _INET_TIMEWAIT_SOCK_ */
...@@ -88,6 +88,7 @@ do { spin_lock_init(&((__sk)->sk_lock.slock)); \ ...@@ -88,6 +88,7 @@ do { spin_lock_init(&((__sk)->sk_lock.slock)); \
} while(0) } while(0)
struct sock; struct sock;
struct proto;
/** /**
* struct sock_common - minimal network layer representation of sockets * struct sock_common - minimal network layer representation of sockets
...@@ -98,10 +99,11 @@ struct sock; ...@@ -98,10 +99,11 @@ struct sock;
* @skc_node: main hash linkage for various protocol lookup tables * @skc_node: main hash linkage for various protocol lookup tables
* @skc_bind_node: bind hash linkage for various protocol lookup tables * @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_refcnt: reference count * @skc_refcnt: reference count
* @skc_prot: protocol handlers inside a network family
* *
* This is the minimal network layer representation of sockets, the header * This is the minimal network layer representation of sockets, the header
* for struct sock and struct tcp_tw_bucket. * for struct sock and struct inet_timewait_sock.
*/ */
struct sock_common { struct sock_common {
unsigned short skc_family; unsigned short skc_family;
volatile unsigned char skc_state; volatile unsigned char skc_state;
...@@ -110,11 +112,12 @@ struct sock_common { ...@@ -110,11 +112,12 @@ struct sock_common {
struct hlist_node skc_node; struct hlist_node skc_node;
struct hlist_node skc_bind_node; struct hlist_node skc_bind_node;
atomic_t skc_refcnt; atomic_t skc_refcnt;
struct proto *skc_prot;
}; };
/** /**
* struct sock - network layer representation of sockets * struct sock - network layer representation of sockets
* @__sk_common: shared layout with tcp_tw_bucket * @__sk_common: shared layout with inet_timewait_sock
* @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
* @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
* @sk_lock: synchronizer * @sk_lock: synchronizer
...@@ -140,7 +143,6 @@ struct sock_common { ...@@ -140,7 +143,6 @@ struct sock_common {
* @sk_backlog: always used with the per-socket spinlock held * @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct * @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used * @sk_error_queue: rarely used
* @sk_prot: protocol handlers inside a network family
* @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance) * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
* @sk_err: last error * @sk_err: last error
* @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out' * @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
...@@ -173,7 +175,7 @@ struct sock_common { ...@@ -173,7 +175,7 @@ struct sock_common {
*/ */
struct sock { struct sock {
/* /*
* Now struct tcp_tw_bucket also uses sock_common, so please just * Now struct inet_timewait_sock also uses sock_common, so please just
* don't add nothing before this first member (__sk_common) --acme * don't add nothing before this first member (__sk_common) --acme
*/ */
struct sock_common __sk_common; struct sock_common __sk_common;
...@@ -184,6 +186,7 @@ struct sock { ...@@ -184,6 +186,7 @@ struct sock {
#define sk_node __sk_common.skc_node #define sk_node __sk_common.skc_node
#define sk_bind_node __sk_common.skc_bind_node #define sk_bind_node __sk_common.skc_bind_node
#define sk_refcnt __sk_common.skc_refcnt #define sk_refcnt __sk_common.skc_refcnt
#define sk_prot __sk_common.skc_prot
unsigned char sk_shutdown : 2, unsigned char sk_shutdown : 2,
sk_no_check : 2, sk_no_check : 2,
sk_userlocks : 4; sk_userlocks : 4;
...@@ -218,7 +221,6 @@ struct sock { ...@@ -218,7 +221,6 @@ struct sock {
struct sk_buff *tail; struct sk_buff *tail;
} sk_backlog; } sk_backlog;
struct sk_buff_head sk_error_queue; struct sk_buff_head sk_error_queue;
struct proto *sk_prot;
struct proto *sk_prot_creator; struct proto *sk_prot_creator;
rwlock_t sk_callback_lock; rwlock_t sk_callback_lock;
int sk_err, int sk_err,
...@@ -557,6 +559,9 @@ struct proto { ...@@ -557,6 +559,9 @@ struct proto {
kmem_cache_t *slab; kmem_cache_t *slab;
unsigned int obj_size; unsigned int obj_size;
kmem_cache_t *twsk_slab;
unsigned int twsk_obj_size;
struct request_sock_ops *rsk_prot; struct request_sock_ops *rsk_prot;
struct module *owner; struct module *owner;
......
...@@ -38,207 +38,14 @@ ...@@ -38,207 +38,14 @@
#include <net/ip.h> #include <net/ip.h>
#include <net/tcp_states.h> #include <net/tcp_states.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
#include <linux/seq_file.h> #include <linux/seq_file.h>
extern struct inet_hashinfo tcp_hashinfo; extern struct inet_hashinfo tcp_hashinfo;
#if (BITS_PER_LONG == 64)
#define TCP_ADDRCMP_ALIGN_BYTES 8
#else
#define TCP_ADDRCMP_ALIGN_BYTES 4
#endif
/* This is a TIME_WAIT bucket. It works around the memory consumption
* problems of sockets in such a state on heavily loaded servers, but
* without violating the protocol specification.
*/
struct tcp_tw_bucket {
/*
* Now struct sock also uses sock_common, so please just
* don't add nothing before this first member (__tw_common) --acme
*/
struct sock_common __tw_common;
#define tw_family __tw_common.skc_family
#define tw_state __tw_common.skc_state
#define tw_reuse __tw_common.skc_reuse
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
#define tw_node __tw_common.skc_node
#define tw_bind_node __tw_common.skc_bind_node
#define tw_refcnt __tw_common.skc_refcnt
volatile unsigned char tw_substate;
unsigned char tw_rcv_wscale;
__u16 tw_sport;
/* Socket demultiplex comparisons on incoming packets. */
/* these five are in inet_sock */
__u32 tw_daddr
__attribute__((aligned(TCP_ADDRCMP_ALIGN_BYTES)));
__u32 tw_rcv_saddr;
__u16 tw_dport;
__u16 tw_num;
/* And these are ours. */
int tw_hashent;
int tw_timeout;
__u32 tw_rcv_nxt;
__u32 tw_snd_nxt;
__u32 tw_rcv_wnd;
__u32 tw_ts_recent;
long tw_ts_recent_stamp;
unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr tw_v6_daddr;
struct in6_addr tw_v6_rcv_saddr;
int tw_v6_ipv6only;
#endif
};
static __inline__ void tw_add_node(struct tcp_tw_bucket *tw,
struct hlist_head *list)
{
hlist_add_head(&tw->tw_node, list);
}
static __inline__ void tw_add_bind_node(struct tcp_tw_bucket *tw,
struct hlist_head *list)
{
hlist_add_head(&tw->tw_bind_node, list);
}
static inline int tw_dead_hashed(struct tcp_tw_bucket *tw)
{
return tw->tw_death_node.pprev != NULL;
}
static __inline__ void tw_dead_node_init(struct tcp_tw_bucket *tw)
{
tw->tw_death_node.pprev = NULL;
}
static __inline__ void __tw_del_dead_node(struct tcp_tw_bucket *tw)
{
__hlist_del(&tw->tw_death_node);
tw_dead_node_init(tw);
}
static __inline__ int tw_del_dead_node(struct tcp_tw_bucket *tw)
{
if (tw_dead_hashed(tw)) {
__tw_del_dead_node(tw);
return 1;
}
return 0;
}
#define tw_for_each(tw, node, head) \
hlist_for_each_entry(tw, node, head, tw_node)
#define tw_for_each_inmate(tw, node, jail) \
hlist_for_each_entry(tw, node, jail, tw_death_node)
#define tw_for_each_inmate_safe(tw, node, safe, jail) \
hlist_for_each_entry_safe(tw, node, safe, jail, tw_death_node)
#define tcptw_sk(__sk) ((struct tcp_tw_bucket *)(__sk))
static inline u32 tcp_v4_rcv_saddr(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
inet_sk(sk)->rcv_saddr : tcptw_sk(sk)->tw_rcv_saddr;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static inline struct in6_addr *__tcp_v6_rcv_saddr(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
&inet6_sk(sk)->rcv_saddr : &tcptw_sk(sk)->tw_v6_rcv_saddr;
}
static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
{
return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
}
#define tcptw_sk_ipv6only(__sk) (tcptw_sk(__sk)->tw_v6_ipv6only)
static inline int tcp_v6_ipv6only(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
ipv6_only_sock(sk) : tcptw_sk_ipv6only(sk);
}
#else
# define __tcp_v6_rcv_saddr(__sk) NULL
# define tcp_v6_rcv_saddr(__sk) NULL
# define tcptw_sk_ipv6only(__sk) 0
# define tcp_v6_ipv6only(__sk) 0
#endif
extern kmem_cache_t *tcp_timewait_cachep;
static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
{
if (atomic_dec_and_test(&tw->tw_refcnt)) {
#ifdef SOCK_REFCNT_DEBUG
printk(KERN_DEBUG "tw_bucket %p released\n", tw);
#endif
kmem_cache_free(tcp_timewait_cachep, tw);
}
}
extern atomic_t tcp_orphan_count; extern atomic_t tcp_orphan_count;
extern int tcp_tw_count; extern int tcp_tw_count;
extern void tcp_time_wait(struct sock *sk, int state, int timeo); extern void tcp_time_wait(struct sock *sk, int state, int timeo);
extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw); extern void tcp_tw_deschedule(struct inet_timewait_sock *tw);
/* Socket demux engine toys. */
#ifdef __BIG_ENDIAN
#define TCP_COMBINED_PORTS(__sport, __dport) \
(((__u32)(__sport)<<16) | (__u32)(__dport))
#else /* __LITTLE_ENDIAN */
#define TCP_COMBINED_PORTS(__sport, __dport) \
(((__u32)(__dport)<<16) | (__u32)(__sport))
#endif
#if (BITS_PER_LONG == 64)
#ifdef __BIG_ENDIAN
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
__u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
#else /* __LITTLE_ENDIAN */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
__u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
#endif /* __BIG_ENDIAN */
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
(((*((__u64 *)&(tcptw_sk(__sk)->tw_daddr))) == (__cookie)) && \
((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#else /* 32-bit arch */
#define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
#define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
((inet_sk(__sk)->daddr == (__saddr)) && \
(inet_sk(__sk)->rcv_saddr == (__daddr)) && \
((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define TCP_IPV4_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
((tcptw_sk(__sk)->tw_daddr == (__saddr)) && \
(tcptw_sk(__sk)->tw_rcv_saddr == (__daddr)) && \
((*((__u32 *)&(tcptw_sk(__sk)->tw_dport))) == (__ports)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#endif /* 64-bit arch */
#define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
(((*((__u32 *)&(inet_sk(__sk)->dport)))== (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
#define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_HEADER (128 + MAX_HEADER)
...@@ -543,7 +350,7 @@ extern int tcp_v4_rcv(struct sk_buff *skb); ...@@ -543,7 +350,7 @@ extern int tcp_v4_rcv(struct sk_buff *skb);
extern int tcp_v4_remember_stamp(struct sock *sk); extern int tcp_v4_remember_stamp(struct sock *sk);
extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw); extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t size); struct msghdr *msg, size_t size);
...@@ -616,10 +423,9 @@ enum tcp_tw_status ...@@ -616,10 +423,9 @@ enum tcp_tw_status
}; };
extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb, struct sk_buff *skb,
struct tcphdr *th, const struct tcphdr *th);
unsigned len);
extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
struct request_sock *req, struct request_sock *req,
......
...@@ -1378,7 +1378,8 @@ static LIST_HEAD(proto_list); ...@@ -1378,7 +1378,8 @@ static LIST_HEAD(proto_list);
int proto_register(struct proto *prot, int alloc_slab) int proto_register(struct proto *prot, int alloc_slab)
{ {
char *request_sock_slab_name; char *request_sock_slab_name = NULL;
char *timewait_sock_slab_name;
int rc = -ENOBUFS; int rc = -ENOBUFS;
if (alloc_slab) { if (alloc_slab) {
...@@ -1409,6 +1410,23 @@ int proto_register(struct proto *prot, int alloc_slab) ...@@ -1409,6 +1410,23 @@ int proto_register(struct proto *prot, int alloc_slab)
goto out_free_request_sock_slab_name; goto out_free_request_sock_slab_name;
} }
} }
if (prot->twsk_obj_size) {
static const char mask[] = "tw_sock_%s";
timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
if (timewait_sock_slab_name == NULL)
goto out_free_request_sock_slab;
sprintf(timewait_sock_slab_name, mask, prot->name);
prot->twsk_slab = kmem_cache_create(timewait_sock_slab_name,
prot->twsk_obj_size,
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (prot->twsk_slab == NULL)
goto out_free_timewait_sock_slab_name;
}
} }
write_lock(&proto_list_lock); write_lock(&proto_list_lock);
...@@ -1417,6 +1435,13 @@ int proto_register(struct proto *prot, int alloc_slab) ...@@ -1417,6 +1435,13 @@ int proto_register(struct proto *prot, int alloc_slab)
rc = 0; rc = 0;
out: out:
return rc; return rc;
out_free_timewait_sock_slab_name:
kfree(timewait_sock_slab_name);
out_free_request_sock_slab:
if (prot->rsk_prot && prot->rsk_prot->slab) {
kmem_cache_destroy(prot->rsk_prot->slab);
prot->rsk_prot->slab = NULL;
}
out_free_request_sock_slab_name: out_free_request_sock_slab_name:
kfree(request_sock_slab_name); kfree(request_sock_slab_name);
out_free_sock_slab: out_free_sock_slab:
...@@ -1444,6 +1469,14 @@ void proto_unregister(struct proto *prot) ...@@ -1444,6 +1469,14 @@ void proto_unregister(struct proto *prot)
prot->rsk_prot->slab = NULL; prot->rsk_prot->slab = NULL;
} }
if (prot->twsk_slab != NULL) {
const char *name = kmem_cache_name(prot->twsk_slab);
kmem_cache_destroy(prot->twsk_slab);
kfree(name);
prot->twsk_slab = NULL;
}
list_del(&prot->node); list_del(&prot->node);
write_unlock(&proto_list_lock); write_unlock(&proto_list_lock);
} }
......
...@@ -271,8 +271,6 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; ...@@ -271,8 +271,6 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
kmem_cache_t *tcp_timewait_cachep;
atomic_t tcp_orphan_count = ATOMIC_INIT(0); atomic_t tcp_orphan_count = ATOMIC_INIT(0);
int sysctl_tcp_mem[3]; int sysctl_tcp_mem[3];
...@@ -2264,13 +2262,6 @@ void __init tcp_init(void) ...@@ -2264,13 +2262,6 @@ void __init tcp_init(void)
if (!tcp_hashinfo.bind_bucket_cachep) if (!tcp_hashinfo.bind_bucket_cachep)
panic("tcp_init: Cannot alloc tcp_bind_bucket cache."); panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
sizeof(struct tcp_tw_bucket),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!tcp_timewait_cachep)
panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
/* Size and allocate the main established and bind bucket /* Size and allocate the main established and bind bucket
* hash tables. * hash tables.
* *
...@@ -2363,4 +2354,3 @@ EXPORT_SYMBOL(tcp_sendpage); ...@@ -2363,4 +2354,3 @@ EXPORT_SYMBOL(tcp_sendpage);
EXPORT_SYMBOL(tcp_setsockopt); EXPORT_SYMBOL(tcp_setsockopt);
EXPORT_SYMBOL(tcp_shutdown); EXPORT_SYMBOL(tcp_shutdown);
EXPORT_SYMBOL(tcp_statistics); EXPORT_SYMBOL(tcp_statistics);
EXPORT_SYMBOL(tcp_timewait_cachep);
...@@ -81,7 +81,7 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, ...@@ -81,7 +81,7 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); r->id.tcpdiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
if (r->tcpdiag_state == TCP_TIME_WAIT) { if (r->tcpdiag_state == TCP_TIME_WAIT) {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket*)sk; const struct inet_timewait_sock *tw = inet_twsk(sk);
long tmo = tw->tw_ttd - jiffies; long tmo = tw->tw_ttd - jiffies;
if (tmo < 0) if (tmo < 0)
tmo = 0; tmo = 0;
...@@ -99,10 +99,12 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, ...@@ -99,10 +99,12 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r->tcpdiag_inode = 0; r->tcpdiag_inode = 0;
#ifdef CONFIG_IP_TCPDIAG_IPV6 #ifdef CONFIG_IP_TCPDIAG_IPV6
if (r->tcpdiag_family == AF_INET6) { if (r->tcpdiag_family == AF_INET6) {
const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk);
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src, ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_src,
&tw->tw_v6_rcv_saddr); &tcp6tw->tw_v6_rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst, ipv6_addr_copy((struct in6_addr *)r->id.tcpdiag_dst,
&tw->tw_v6_daddr); &tcp6tw->tw_v6_daddr);
} }
#endif #endif
nlh->nlmsg_len = skb->tail - b; nlh->nlmsg_len = skb->tail - b;
...@@ -239,7 +241,7 @@ static int tcpdiag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh) ...@@ -239,7 +241,7 @@ static int tcpdiag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
out: out:
if (sk) { if (sk) {
if (sk->sk_state == TCP_TIME_WAIT) if (sk->sk_state == TCP_TIME_WAIT)
tcp_tw_put((struct tcp_tw_bucket*)sk); inet_twsk_put((struct inet_timewait_sock *)sk);
else else
sock_put(sk); sock_put(sk);
} }
......
...@@ -106,7 +106,7 @@ int sysctl_local_port_range[2] = { 1024, 4999 }; ...@@ -106,7 +106,7 @@ int sysctl_local_port_range[2] = { 1024, 4999 };
static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
{ {
const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk); const u32 sk_rcv_saddr = inet_rcv_saddr(sk);
struct sock *sk2; struct sock *sk2;
struct hlist_node *node; struct hlist_node *node;
int reuse = sk->sk_reuse; int reuse = sk->sk_reuse;
...@@ -119,7 +119,7 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb ...@@ -119,7 +119,7 @@ static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse || if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) { sk2->sk_state == TCP_LISTEN) {
const u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2); const u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr || if (!sk2_rcv_saddr || !sk_rcv_saddr ||
sk2_rcv_saddr == sk_rcv_saddr) sk2_rcv_saddr == sk_rcv_saddr)
break; break;
...@@ -251,10 +251,10 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, ...@@ -251,10 +251,10 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
const int dif) const int dif)
{ {
struct inet_ehash_bucket *head; struct inet_ehash_bucket *head;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) INET_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(sport, hnum); const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
struct sock *sk; struct sock *sk;
struct hlist_node *node; const struct hlist_node *node;
/* Optimize here for direct hit, only listening connections can /* Optimize here for direct hit, only listening connections can
* have wildcards anyways. * have wildcards anyways.
*/ */
...@@ -262,13 +262,13 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr, ...@@ -262,13 +262,13 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
head = &tcp_hashinfo.ehash[hash]; head = &tcp_hashinfo.ehash[hash];
read_lock(&head->lock); read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) { sk_for_each(sk, node, &head->chain) {
if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */ goto hit; /* You sunk my battleship! */
} }
/* Must check for a TIME_WAIT'er before going to listener hash. */ /* Must check for a TIME_WAIT'er before going to listener hash. */
sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) { sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; goto hit;
} }
sk = NULL; sk = NULL;
...@@ -313,27 +313,28 @@ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) ...@@ -313,27 +313,28 @@ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
/* called with local bh disabled */ /* called with local bh disabled */
static int __tcp_v4_check_established(struct sock *sk, __u16 lport, static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
struct tcp_tw_bucket **twp) struct inet_timewait_sock **twp)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
u32 daddr = inet->rcv_saddr; u32 daddr = inet->rcv_saddr;
u32 saddr = inet->daddr; u32 saddr = inet->daddr;
int dif = sk->sk_bound_dev_if; int dif = sk->sk_bound_dev_if;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) INET_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size); const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
struct sock *sk2; struct sock *sk2;
struct hlist_node *node; const struct hlist_node *node;
struct tcp_tw_bucket *tw; struct inet_timewait_sock *tw;
write_lock(&head->lock); write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */ /* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
tw = (struct tcp_tw_bucket *)sk2; tw = inet_twsk(sk2);
if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
/* With PAWS, it is safe from the viewpoint /* With PAWS, it is safe from the viewpoint
...@@ -350,15 +351,15 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -350,15 +351,15 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
fall back to VJ's scheme and use initial fall back to VJ's scheme and use initial
timestamp retrieved from peer table. timestamp retrieved from peer table.
*/ */
if (tw->tw_ts_recent_stamp && if (tcptw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse && (!twp || (sysctl_tcp_tw_reuse &&
xtime.tv_sec - xtime.tv_sec -
tw->tw_ts_recent_stamp > 1))) { tcptw->tw_ts_recent_stamp > 1))) {
if ((tp->write_seq = tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
tw->tw_snd_nxt + 65535 + 2) == 0) if (tp->write_seq == 0)
tp->write_seq = 1; tp->write_seq = 1;
tp->rx_opt.ts_recent = tw->tw_ts_recent; tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
sock_hold(sk2); sock_hold(sk2);
goto unique; goto unique;
} else } else
...@@ -369,7 +370,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -369,7 +370,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
/* And established part... */ /* And established part... */
sk_for_each(sk2, node, &head->chain) { sk_for_each(sk2, node, &head->chain) {
if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif))
goto not_unique; goto not_unique;
} }
...@@ -392,7 +393,7 @@ unique: ...@@ -392,7 +393,7 @@ unique:
tcp_tw_deschedule(tw); tcp_tw_deschedule(tw);
NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
tcp_tw_put(tw); inet_twsk_put(tw);
} }
return 0; return 0;
...@@ -429,7 +430,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk) ...@@ -429,7 +430,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
static u32 hint; static u32 hint;
u32 offset = hint + connect_port_offset(sk); u32 offset = hint + connect_port_offset(sk);
struct hlist_node *node; struct hlist_node *node;
struct tcp_tw_bucket *tw = NULL; struct inet_timewait_sock *tw = NULL;
local_bh_disable(); local_bh_disable();
for (i = 1; i <= range; i++) { for (i = 1; i <= range; i++) {
...@@ -482,7 +483,7 @@ ok: ...@@ -482,7 +483,7 @@ ok:
if (tw) { if (tw) {
tcp_tw_deschedule(tw); tcp_tw_deschedule(tw);
tcp_tw_put(tw); inet_twsk_put(tw);
} }
ret = 0; ret = 0;
...@@ -757,7 +758,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -757,7 +758,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
return; return;
} }
if (sk->sk_state == TCP_TIME_WAIT) { if (sk->sk_state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket *)sk); inet_twsk_put((struct inet_timewait_sock *)sk);
return; return;
} }
...@@ -1002,12 +1003,13 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, ...@@ -1002,12 +1003,13 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; struct inet_timewait_sock *tw = inet_twsk(sk);
const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
tcp_v4_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
tcp_tw_put(tw); inet_twsk_put(tw);
} }
static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
...@@ -1368,7 +1370,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) ...@@ -1368,7 +1370,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
bh_lock_sock(nsk); bh_lock_sock(nsk);
return nsk; return nsk;
} }
tcp_tw_put((struct tcp_tw_bucket *)nsk); inet_twsk_put((struct inet_timewait_sock *)nsk);
return NULL; return NULL;
} }
...@@ -1557,25 +1559,25 @@ discard_and_relse: ...@@ -1557,25 +1559,25 @@ discard_and_relse:
do_time_wait: do_time_wait:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
tcp_tw_put((struct tcp_tw_bucket *) sk); inet_twsk_put((struct inet_timewait_sock *) sk);
goto discard_it; goto discard_it;
} }
if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(TCP_MIB_INERRS); TCP_INC_STATS_BH(TCP_MIB_INERRS);
tcp_tw_put((struct tcp_tw_bucket *) sk); inet_twsk_put((struct inet_timewait_sock *) sk);
goto discard_it; goto discard_it;
} }
switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk, switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
skb, th, skb->len)) { skb, th)) {
case TCP_TW_SYN: { case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
skb->nh.iph->daddr, skb->nh.iph->daddr,
ntohs(th->dest), ntohs(th->dest),
tcp_v4_iif(skb)); tcp_v4_iif(skb));
if (sk2) { if (sk2) {
tcp_tw_deschedule((struct tcp_tw_bucket *)sk); tcp_tw_deschedule((struct inet_timewait_sock *)sk);
tcp_tw_put((struct tcp_tw_bucket *)sk); inet_twsk_put((struct inet_timewait_sock *)sk);
sk = sk2; sk = sk2;
goto process; goto process;
} }
...@@ -1639,18 +1641,18 @@ int tcp_v4_remember_stamp(struct sock *sk) ...@@ -1639,18 +1641,18 @@ int tcp_v4_remember_stamp(struct sock *sk)
return 0; return 0;
} }
int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw) int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
{ {
struct inet_peer *peer = NULL; struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
peer = inet_getpeer(tw->tw_daddr, 1);
if (peer) { if (peer) {
if ((s32)(peer->tcp_ts - tw->tw_ts_recent) <= 0 || const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
(peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec && (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
peer->tcp_ts_stamp <= tw->tw_ts_recent_stamp)) { peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
peer->tcp_ts_stamp = tw->tw_ts_recent_stamp; peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
peer->tcp_ts = tw->tw_ts_recent; peer->tcp_ts = tcptw->tw_ts_recent;
} }
inet_putpeer(peer); inet_putpeer(peer);
return 1; return 1;
...@@ -1758,13 +1760,13 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock); ...@@ -1758,13 +1760,13 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */ /* Proc filesystem TCP sock list dumping. */
static inline struct tcp_tw_bucket *tw_head(struct hlist_head *head) static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
{ {
return hlist_empty(head) ? NULL : return hlist_empty(head) ? NULL :
list_entry(head->first, struct tcp_tw_bucket, tw_node); list_entry(head->first, struct inet_timewait_sock, tw_node);
} }
static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw) static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
{ {
return tw->tw_node.next ? return tw->tw_node.next ?
hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
...@@ -1860,7 +1862,7 @@ static void *established_get_first(struct seq_file *seq) ...@@ -1860,7 +1862,7 @@ static void *established_get_first(struct seq_file *seq)
for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) { for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
struct sock *sk; struct sock *sk;
struct hlist_node *node; struct hlist_node *node;
struct tcp_tw_bucket *tw; struct inet_timewait_sock *tw;
/* We can reschedule _before_ having picked the target: */ /* We can reschedule _before_ having picked the target: */
cond_resched_softirq(); cond_resched_softirq();
...@@ -1874,8 +1876,8 @@ static void *established_get_first(struct seq_file *seq) ...@@ -1874,8 +1876,8 @@ static void *established_get_first(struct seq_file *seq)
goto out; goto out;
} }
st->state = TCP_SEQ_STATE_TIME_WAIT; st->state = TCP_SEQ_STATE_TIME_WAIT;
tw_for_each(tw, node, inet_twsk_for_each(tw, node,
&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) { &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
if (tw->tw_family != st->family) { if (tw->tw_family != st->family) {
continue; continue;
} }
...@@ -1892,7 +1894,7 @@ out: ...@@ -1892,7 +1894,7 @@ out:
static void *established_get_next(struct seq_file *seq, void *cur) static void *established_get_next(struct seq_file *seq, void *cur)
{ {
struct sock *sk = cur; struct sock *sk = cur;
struct tcp_tw_bucket *tw; struct inet_timewait_sock *tw;
struct hlist_node *node; struct hlist_node *node;
struct tcp_iter_state* st = seq->private; struct tcp_iter_state* st = seq->private;
...@@ -2159,7 +2161,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) ...@@ -2159,7 +2161,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
} }
static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i) static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
{ {
unsigned int dest, src; unsigned int dest, src;
__u16 destp, srcp; __u16 destp, srcp;
...@@ -2261,6 +2263,7 @@ struct proto tcp_prot = { ...@@ -2261,6 +2263,7 @@ struct proto tcp_prot = {
.sysctl_rmem = sysctl_tcp_rmem, .sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER, .max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock), .obj_size = sizeof(struct tcp_sock),
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.rsk_prot = &tcp_request_sock_ops, .rsk_prot = &tcp_request_sock_ops,
}; };
......
...@@ -41,7 +41,7 @@ int sysctl_tcp_max_tw_buckets = NR_FILE*2; ...@@ -41,7 +41,7 @@ int sysctl_tcp_max_tw_buckets = NR_FILE*2;
int sysctl_tcp_syncookies = SYNC_INIT; int sysctl_tcp_syncookies = SYNC_INIT;
int sysctl_tcp_abort_on_overflow; int sysctl_tcp_abort_on_overflow;
static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo); static void tcp_tw_schedule(struct inet_timewait_sock *tw, int timeo);
static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{ {
...@@ -58,7 +58,7 @@ int tcp_tw_count; ...@@ -58,7 +58,7 @@ int tcp_tw_count;
/* Must be called with locally disabled BHs. */ /* Must be called with locally disabled BHs. */
static void tcp_timewait_kill(struct tcp_tw_bucket *tw) static void tcp_timewait_kill(struct inet_timewait_sock *tw)
{ {
struct inet_bind_hashbucket *bhead; struct inet_bind_hashbucket *bhead;
struct inet_bind_bucket *tb; struct inet_bind_bucket *tb;
...@@ -85,11 +85,11 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) ...@@ -85,11 +85,11 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
#ifdef SOCK_REFCNT_DEBUG #ifdef SOCK_REFCNT_DEBUG
if (atomic_read(&tw->tw_refcnt) != 1) { if (atomic_read(&tw->tw_refcnt) != 1) {
printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
atomic_read(&tw->tw_refcnt)); tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
} }
#endif #endif
tcp_tw_put(tw); inet_twsk_put(tw);
} }
/* /*
...@@ -121,19 +121,20 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw) ...@@ -121,19 +121,20 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
* to avoid misread sequence numbers, states etc. --ANK * to avoid misread sequence numbers, states etc. --ANK
*/ */
enum tcp_tw_status enum tcp_tw_status
tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcphdr *th, unsigned len) const struct tcphdr *th)
{ {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
struct tcp_options_received tmp_opt; struct tcp_options_received tmp_opt;
int paws_reject = 0; int paws_reject = 0;
tmp_opt.saw_tstamp = 0; tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) { if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
tcp_parse_options(skb, &tmp_opt, 0); tcp_parse_options(skb, &tmp_opt, 0);
if (tmp_opt.saw_tstamp) { if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tw->tw_ts_recent; tmp_opt.ts_recent = tcptw->tw_ts_recent;
tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
paws_reject = tcp_paws_check(&tmp_opt, th->rst); paws_reject = tcp_paws_check(&tmp_opt, th->rst);
} }
} }
...@@ -144,20 +145,20 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, ...@@ -144,20 +145,20 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
/* Out of window, send ACK */ /* Out of window, send ACK */
if (paws_reject || if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
tw->tw_rcv_nxt, tcptw->tw_rcv_nxt,
tw->tw_rcv_nxt + tw->tw_rcv_wnd)) tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
return TCP_TW_ACK; return TCP_TW_ACK;
if (th->rst) if (th->rst)
goto kill; goto kill;
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt)) if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
goto kill_with_rst; goto kill_with_rst;
/* Dup ACK? */ /* Dup ACK? */
if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) || if (!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
tcp_tw_put(tw); inet_twsk_put(tw);
return TCP_TW_SUCCESS; return TCP_TW_SUCCESS;
} }
...@@ -165,19 +166,19 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, ...@@ -165,19 +166,19 @@ tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
* reset. * reset.
*/ */
if (!th->fin || if (!th->fin ||
TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) { TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
kill_with_rst: kill_with_rst:
tcp_tw_deschedule(tw); tcp_tw_deschedule(tw);
tcp_tw_put(tw); inet_twsk_put(tw);
return TCP_TW_RST; return TCP_TW_RST;
} }
/* FIN arrived, enter true time-wait state. */ /* FIN arrived, enter true time-wait state. */
tw->tw_substate = TCP_TIME_WAIT; tw->tw_substate = TCP_TIME_WAIT;
tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tmp_opt.saw_tstamp) { if (tmp_opt.saw_tstamp) {
tw->tw_ts_recent_stamp = xtime.tv_sec; tcptw->tw_ts_recent_stamp = xtime.tv_sec;
tw->tw_ts_recent = tmp_opt.rcv_tsval; tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
} }
/* I am shamed, but failed to make it more elegant. /* I am shamed, but failed to make it more elegant.
...@@ -186,7 +187,7 @@ kill_with_rst: ...@@ -186,7 +187,7 @@ kill_with_rst:
* do not undertsnad recycling in any case, it not * do not undertsnad recycling in any case, it not
* a big problem in practice. --ANK */ * a big problem in practice. --ANK */
if (tw->tw_family == AF_INET && if (tw->tw_family == AF_INET &&
sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp && sysctl_tcp_tw_recycle && tcptw->tw_ts_recent_stamp &&
tcp_v4_tw_remember_stamp(tw)) tcp_v4_tw_remember_stamp(tw))
tcp_tw_schedule(tw, tw->tw_timeout); tcp_tw_schedule(tw, tw->tw_timeout);
else else
...@@ -212,7 +213,7 @@ kill_with_rst: ...@@ -212,7 +213,7 @@ kill_with_rst:
*/ */
if (!paws_reject && if (!paws_reject &&
(TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt && (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */ /* In window segment, it may be only reset or bare ack. */
...@@ -224,18 +225,18 @@ kill_with_rst: ...@@ -224,18 +225,18 @@ kill_with_rst:
if (sysctl_tcp_rfc1337 == 0) { if (sysctl_tcp_rfc1337 == 0) {
kill: kill:
tcp_tw_deschedule(tw); tcp_tw_deschedule(tw);
tcp_tw_put(tw); inet_twsk_put(tw);
return TCP_TW_SUCCESS; return TCP_TW_SUCCESS;
} }
} }
tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) { if (tmp_opt.saw_tstamp) {
tw->tw_ts_recent = tmp_opt.rcv_tsval; tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
tw->tw_ts_recent_stamp = xtime.tv_sec; tcptw->tw_ts_recent_stamp = xtime.tv_sec;
} }
tcp_tw_put(tw); inet_twsk_put(tw);
return TCP_TW_SUCCESS; return TCP_TW_SUCCESS;
} }
...@@ -257,9 +258,10 @@ kill: ...@@ -257,9 +258,10 @@ kill:
*/ */
if (th->syn && !th->rst && !th->ack && !paws_reject && if (th->syn && !th->rst && !th->ack && !paws_reject &&
(after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) || (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
(tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { (tmp_opt.saw_tstamp &&
u32 isn = tw->tw_snd_nxt + 65535 + 2; (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
if (isn == 0) if (isn == 0)
isn++; isn++;
TCP_SKB_CB(skb)->when = isn; TCP_SKB_CB(skb)->when = isn;
...@@ -284,7 +286,7 @@ kill: ...@@ -284,7 +286,7 @@ kill:
*/ */
return TCP_TW_ACK; return TCP_TW_ACK;
} }
tcp_tw_put(tw); inet_twsk_put(tw);
return TCP_TW_SUCCESS; return TCP_TW_SUCCESS;
} }
...@@ -293,7 +295,7 @@ kill: ...@@ -293,7 +295,7 @@ kill:
* relevant info into it from the SK, and mess with hash chains * relevant info into it from the SK, and mess with hash chains
* and list linkage. * and list linkage.
*/ */
static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) static void __tcp_tw_hashdance(struct sock *sk, struct inet_timewait_sock *tw)
{ {
const struct inet_sock *inet = inet_sk(sk); const struct inet_sock *inet = inet_sk(sk);
struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent]; struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent];
...@@ -306,7 +308,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) ...@@ -306,7 +308,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
spin_lock(&bhead->lock); spin_lock(&bhead->lock);
tw->tw_tb = inet->bind_hash; tw->tw_tb = inet->bind_hash;
BUG_TRAP(inet->bind_hash); BUG_TRAP(inet->bind_hash);
tw_add_bind_node(tw, &tw->tw_tb->owners); inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock); spin_unlock(&bhead->lock);
write_lock(&ehead->lock); write_lock(&ehead->lock);
...@@ -316,7 +318,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) ...@@ -316,7 +318,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
sock_prot_dec_use(sk->sk_prot); sock_prot_dec_use(sk->sk_prot);
/* Step 3: Hash TW into TIMEWAIT half of established hash table. */ /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain); inet_twsk_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain);
atomic_inc(&tw->tw_refcnt); atomic_inc(&tw->tw_refcnt);
write_unlock(&ehead->lock); write_unlock(&ehead->lock);
...@@ -327,19 +329,23 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) ...@@ -327,19 +329,23 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
*/ */
void tcp_time_wait(struct sock *sk, int state, int timeo) void tcp_time_wait(struct sock *sk, int state, int timeo)
{ {
struct tcp_tw_bucket *tw = NULL; struct inet_timewait_sock *tw = NULL;
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
int recycle_ok = 0; int recycle_ok = 0;
if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp) if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tp->af_specific->remember_stamp(sk); recycle_ok = tp->af_specific->remember_stamp(sk);
if (tcp_tw_count < sysctl_tcp_max_tw_buckets) if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC); tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab, SLAB_ATOMIC);
if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const struct inet_sock *inet = inet_sk(sk);
const int rto = (tp->rto << 2) - (tp->rto >> 1);
if(tw != NULL) { /* Remember our protocol */
struct inet_sock *inet = inet_sk(sk); tw->tw_prot = sk->sk_prot_creator;
int rto = (tp->rto<<2) - (tp->rto>>1);
/* Give us an identity. */ /* Give us an identity. */
tw->tw_daddr = inet->daddr; tw->tw_daddr = inet->daddr;
...@@ -356,25 +362,23 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -356,25 +362,23 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
atomic_set(&tw->tw_refcnt, 1); atomic_set(&tw->tw_refcnt, 1);
tw->tw_hashent = sk->sk_hashent; tw->tw_hashent = sk->sk_hashent;
tw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_rcv_nxt = tp->rcv_nxt;
tw->tw_snd_nxt = tp->snd_nxt; tcptw->tw_snd_nxt = tp->snd_nxt;
tw->tw_rcv_wnd = tcp_receive_window(tp); tcptw->tw_rcv_wnd = tcp_receive_window(tp);
tw->tw_ts_recent = tp->rx_opt.ts_recent; tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
tw_dead_node_init(tw); inet_twsk_dead_node_init(tw);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (tw->tw_family == PF_INET6) { if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr); ipv6_addr_copy(&tcp6tw->tw_v6_daddr, &np->daddr);
ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr); ipv6_addr_copy(&tcp6tw->tw_v6_rcv_saddr, &np->rcv_saddr);
tw->tw_v6_ipv6only = np->ipv6only; tw->tw_ipv6only = np->ipv6only;
} else { } else
memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr)); tw->tw_ipv6only = 0;
memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
tw->tw_v6_ipv6only = 0;
}
#endif #endif
/* Linkage updates. */ /* Linkage updates. */
__tcp_tw_hashdance(sk, tw); __tcp_tw_hashdance(sk, tw);
...@@ -392,7 +396,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) ...@@ -392,7 +396,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
} }
tcp_tw_schedule(tw, timeo); tcp_tw_schedule(tw, timeo);
tcp_tw_put(tw); inet_twsk_put(tw);
} else { } else {
/* Sorry, if we're out of memory, just CLOSE this /* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than * socket up. We've got bigger problems than
...@@ -427,7 +431,7 @@ static u32 twkill_thread_slots; ...@@ -427,7 +431,7 @@ static u32 twkill_thread_slots;
/* Returns non-zero if quota exceeded. */ /* Returns non-zero if quota exceeded. */
static int tcp_do_twkill_work(int slot, unsigned int quota) static int tcp_do_twkill_work(int slot, unsigned int quota)
{ {
struct tcp_tw_bucket *tw; struct inet_timewait_sock *tw;
struct hlist_node *node; struct hlist_node *node;
unsigned int killed; unsigned int killed;
int ret; int ret;
...@@ -441,11 +445,11 @@ static int tcp_do_twkill_work(int slot, unsigned int quota) ...@@ -441,11 +445,11 @@ static int tcp_do_twkill_work(int slot, unsigned int quota)
killed = 0; killed = 0;
ret = 0; ret = 0;
rescan: rescan:
tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) { inet_twsk_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
__tw_del_dead_node(tw); __inet_twsk_del_dead_node(tw);
spin_unlock(&tw_death_lock); spin_unlock(&tw_death_lock);
tcp_timewait_kill(tw); tcp_timewait_kill(tw);
tcp_tw_put(tw); inet_twsk_put(tw);
killed++; killed++;
spin_lock(&tw_death_lock); spin_lock(&tw_death_lock);
if (killed > quota) { if (killed > quota) {
...@@ -531,11 +535,11 @@ static void twkill_work(void *dummy) ...@@ -531,11 +535,11 @@ static void twkill_work(void *dummy)
*/ */
/* This is for handling early-kills of TIME_WAIT sockets. */ /* This is for handling early-kills of TIME_WAIT sockets. */
void tcp_tw_deschedule(struct tcp_tw_bucket *tw) void tcp_tw_deschedule(struct inet_timewait_sock *tw)
{ {
spin_lock(&tw_death_lock); spin_lock(&tw_death_lock);
if (tw_del_dead_node(tw)) { if (inet_twsk_del_dead_node(tw)) {
tcp_tw_put(tw); inet_twsk_put(tw);
if (--tcp_tw_count == 0) if (--tcp_tw_count == 0)
del_timer(&tcp_tw_timer); del_timer(&tcp_tw_timer);
} }
...@@ -552,7 +556,7 @@ static struct timer_list tcp_twcal_timer = ...@@ -552,7 +556,7 @@ static struct timer_list tcp_twcal_timer =
TIMER_INITIALIZER(tcp_twcal_tick, 0, 0); TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS]; static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) static void tcp_tw_schedule(struct inet_timewait_sock *tw, const int timeo)
{ {
struct hlist_head *list; struct hlist_head *list;
int slot; int slot;
...@@ -586,7 +590,7 @@ static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) ...@@ -586,7 +590,7 @@ static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
spin_lock(&tw_death_lock); spin_lock(&tw_death_lock);
/* Unlink it, if it was scheduled */ /* Unlink it, if it was scheduled */
if (tw_del_dead_node(tw)) if (inet_twsk_del_dead_node(tw))
tcp_tw_count--; tcp_tw_count--;
else else
atomic_inc(&tw->tw_refcnt); atomic_inc(&tw->tw_refcnt);
...@@ -644,13 +648,13 @@ void tcp_twcal_tick(unsigned long dummy) ...@@ -644,13 +648,13 @@ void tcp_twcal_tick(unsigned long dummy)
for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) { for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
if (time_before_eq(j, now)) { if (time_before_eq(j, now)) {
struct hlist_node *node, *safe; struct hlist_node *node, *safe;
struct tcp_tw_bucket *tw; struct inet_timewait_sock *tw;
tw_for_each_inmate_safe(tw, node, safe, inet_twsk_for_each_inmate_safe(tw, node, safe,
&tcp_twcal_row[slot]) { &tcp_twcal_row[slot]) {
__tw_del_dead_node(tw); __inet_twsk_del_dead_node(tw);
tcp_timewait_kill(tw); tcp_timewait_kill(tw);
tcp_tw_put(tw); inet_twsk_put(tw);
killed++; killed++;
} }
} else { } else {
......
...@@ -1041,7 +1041,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) ...@@ -1041,7 +1041,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
const struct in6_addr *sk2_rcv_saddr6 = tcp_v6_rcv_saddr(sk2); const struct in6_addr *sk2_rcv_saddr6 = tcp_v6_rcv_saddr(sk2);
u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
u32 sk2_rcv_saddr = tcp_v4_rcv_saddr(sk2); u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk); int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = tcp_v6_ipv6only(sk2); int sk2_ipv6only = tcp_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(sk_rcv_saddr6); int addr_type = ipv6_addr_type(sk_rcv_saddr6);
......
...@@ -308,33 +308,32 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u ...@@ -308,33 +308,32 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
struct in6_addr *daddr, u16 hnum, struct in6_addr *daddr, u16 hnum,
int dif) int dif)
{ {
struct inet_ehash_bucket *head;
struct sock *sk; struct sock *sk;
struct hlist_node *node; const struct hlist_node *node;
__u32 ports = TCP_COMBINED_PORTS(sport, hnum); const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
int hash;
/* Optimize here for direct hit, only listening connections can /* Optimize here for direct hit, only listening connections can
* have wildcards anyways. * have wildcards anyways.
*/ */
hash = tcp_v6_hashfn(daddr, hnum, saddr, sport); const int hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
head = &tcp_hashinfo.ehash[hash]; struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
read_lock(&head->lock); read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) { sk_for_each(sk, node, &head->chain) {
/* For IPV6 do the cheaper port and family tests first. */ /* For IPV6 do the cheaper port and family tests first. */
if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif)) if (INET6_MATCH(sk, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */ goto hit; /* You sunk my battleship! */
} }
/* Must check for a TIME_WAIT'er before going to listener hash. */ /* Must check for a TIME_WAIT'er before going to listener hash. */
sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) { sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
/* FIXME: acme: check this... */ const struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
if(*((__u32 *)&(tw->tw_dport)) == ports && if(*((__u32 *)&(tw->tw_dport)) == ports &&
sk->sk_family == PF_INET6) { sk->sk_family == PF_INET6) {
if(ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk);
ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
(!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)) if (ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
(!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
goto hit; goto hit;
} }
} }
...@@ -455,43 +454,46 @@ static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb) ...@@ -455,43 +454,46 @@ static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
} }
static int __tcp_v6_check_established(struct sock *sk, __u16 lport, static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
struct tcp_tw_bucket **twp) struct inet_timewait_sock **twp)
{ {
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *daddr = &np->rcv_saddr; struct in6_addr *daddr = &np->rcv_saddr;
struct in6_addr *saddr = &np->daddr; struct in6_addr *saddr = &np->daddr;
int dif = sk->sk_bound_dev_if; int dif = sk->sk_bound_dev_if;
u32 ports = TCP_COMBINED_PORTS(inet->dport, lport); const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport); const int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
struct sock *sk2; struct sock *sk2;
struct hlist_node *node; const struct hlist_node *node;
struct tcp_tw_bucket *tw; struct inet_timewait_sock *tw;
write_lock(&head->lock); write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */ /* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
tw = (struct tcp_tw_bucket*)sk2; const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk2);
tw = inet_twsk(sk2);
if(*((__u32 *)&(tw->tw_dport)) == ports && if(*((__u32 *)&(tw->tw_dport)) == ports &&
sk2->sk_family == PF_INET6 && sk2->sk_family == PF_INET6 &&
ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) && ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (tw->tw_ts_recent_stamp && if (tcptw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse && (!twp ||
xtime.tv_sec - (sysctl_tcp_tw_reuse &&
tw->tw_ts_recent_stamp > 1))) { xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
/* See comment in tcp_ipv4.c */ /* See comment in tcp_ipv4.c */
tp->write_seq = tw->tw_snd_nxt + 65535 + 2; tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
if (!tp->write_seq) if (!tp->write_seq)
tp->write_seq = 1; tp->write_seq = 1;
tp->rx_opt.ts_recent = tw->tw_ts_recent; tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tw->tw_ts_recent_stamp; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
sock_hold(sk2); sock_hold(sk2);
goto unique; goto unique;
} else } else
...@@ -502,7 +504,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport, ...@@ -502,7 +504,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
/* And established part... */ /* And established part... */
sk_for_each(sk2, node, &head->chain) { sk_for_each(sk2, node, &head->chain) {
if(TCP_IPV6_MATCH(sk2, saddr, daddr, ports, dif)) if (INET6_MATCH(sk2, saddr, daddr, ports, dif))
goto not_unique; goto not_unique;
} }
...@@ -521,7 +523,7 @@ unique: ...@@ -521,7 +523,7 @@ unique:
tcp_tw_deschedule(tw); tcp_tw_deschedule(tw);
NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
tcp_tw_put(tw); inet_twsk_put(tw);
} }
return 0; return 0;
...@@ -556,7 +558,7 @@ static int tcp_v6_hash_connect(struct sock *sk) ...@@ -556,7 +558,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
static u32 hint; static u32 hint;
u32 offset = hint + tcpv6_port_offset(sk); u32 offset = hint + tcpv6_port_offset(sk);
struct hlist_node *node; struct hlist_node *node;
struct tcp_tw_bucket *tw = NULL; struct inet_timewait_sock *tw = NULL;
local_bh_disable(); local_bh_disable();
for (i = 1; i <= range; i++) { for (i = 1; i <= range; i++) {
...@@ -609,7 +611,7 @@ ok: ...@@ -609,7 +611,7 @@ ok:
if (tw) { if (tw) {
tcp_tw_deschedule(tw); tcp_tw_deschedule(tw);
tcp_tw_put(tw); inet_twsk_put(tw);
} }
ret = 0; ret = 0;
...@@ -845,7 +847,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ...@@ -845,7 +847,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} }
if (sk->sk_state == TCP_TIME_WAIT) { if (sk->sk_state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket*)sk); inet_twsk_put((struct inet_timewait_sock *)sk);
return; return;
} }
...@@ -1223,12 +1225,14 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ...@@ -1223,12 +1225,14 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; struct inet_timewait_sock *tw = inet_twsk(sk);
const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt, tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent); tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcptw->tw_ts_recent);
tcp_tw_put(tw); inet_twsk_put(tw);
} }
static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
...@@ -1261,7 +1265,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) ...@@ -1261,7 +1265,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
bh_lock_sock(nsk); bh_lock_sock(nsk);
return nsk; return nsk;
} }
tcp_tw_put((struct tcp_tw_bucket*)nsk); inet_twsk_put((struct inet_timewait_sock *)nsk);
return NULL; return NULL;
} }
...@@ -1798,26 +1802,26 @@ discard_and_relse: ...@@ -1798,26 +1802,26 @@ discard_and_relse:
do_time_wait: do_time_wait:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
tcp_tw_put((struct tcp_tw_bucket *) sk); inet_twsk_put((struct inet_timewait_sock *)sk);
goto discard_it; goto discard_it;
} }
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(TCP_MIB_INERRS); TCP_INC_STATS_BH(TCP_MIB_INERRS);
tcp_tw_put((struct tcp_tw_bucket *) sk); inet_twsk_put((struct inet_timewait_sock *)sk);
goto discard_it; goto discard_it;
} }
switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
skb, th, skb->len)) { skb, th)) {
case TCP_TW_SYN: case TCP_TW_SYN:
{ {
struct sock *sk2; struct sock *sk2;
sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb)); sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
if (sk2 != NULL) { if (sk2 != NULL) {
tcp_tw_deschedule((struct tcp_tw_bucket *)sk); tcp_tw_deschedule((struct inet_timewait_sock *)sk);
tcp_tw_put((struct tcp_tw_bucket *)sk); inet_twsk_put((struct inet_timewait_sock *)sk);
sk = sk2; sk = sk2;
goto process; goto process;
} }
...@@ -2137,17 +2141,18 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) ...@@ -2137,17 +2141,18 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
} }
static void get_timewait6_sock(struct seq_file *seq, static void get_timewait6_sock(struct seq_file *seq,
struct tcp_tw_bucket *tw, int i) struct inet_timewait_sock *tw, int i)
{ {
struct in6_addr *dest, *src; struct in6_addr *dest, *src;
__u16 destp, srcp; __u16 destp, srcp;
struct tcp6_timewait_sock *tcp6tw = tcp6_twsk((struct sock *)tw);
int ttd = tw->tw_ttd - jiffies; int ttd = tw->tw_ttd - jiffies;
if (ttd < 0) if (ttd < 0)
ttd = 0; ttd = 0;
dest = &tw->tw_v6_daddr; dest = &tcp6tw->tw_v6_daddr;
src = &tw->tw_v6_rcv_saddr; src = &tcp6tw->tw_v6_rcv_saddr;
destp = ntohs(tw->tw_dport); destp = ntohs(tw->tw_dport);
srcp = ntohs(tw->tw_sport); srcp = ntohs(tw->tw_sport);
...@@ -2244,6 +2249,7 @@ struct proto tcpv6_prot = { ...@@ -2244,6 +2249,7 @@ struct proto tcpv6_prot = {
.sysctl_rmem = sysctl_tcp_rmem, .sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER, .max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp6_sock), .obj_size = sizeof(struct tcp6_sock),
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
.rsk_prot = &tcp6_request_sock_ops, .rsk_prot = &tcp6_request_sock_ops,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment