Commit 40efc6fa authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

[TCP]: less inline's

TCP inline usage cleanup:
 * get rid of inline in several places
 * replace __inline__ with inline where possible
 * move functions used in one file out of tcp.h
 * let compiler decide on used once cases

On x86_64: 
   text	   data	    bss	    dec	    hex	filename
3594701	 648348	 567400	4810449	 4966d1	vmlinux.orig
3593133	 648580	 567400	4809113	 496199	vmlinux

On sparc64:
   text	   data	    bss	    dec	    hex	filename
2538278	 406152	 530392	3474822	 350586	vmlinux.ORIG
2536382	 406384	 530392	3473158	 34ff06	vmlinux
Signed-off-by: default avatarStephen Hemminger <shemminger@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3c19065a
This diff is collapsed.
...@@ -174,6 +174,34 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) ...@@ -174,6 +174,34 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
return err; return err;
} }
/*
* Linear increase during slow start
*/
void tcp_slow_start(struct tcp_sock *tp)
{
if (sysctl_tcp_abc) {
/* RFC3465: Slow Start
* TCP sender SHOULD increase cwnd by the number of
* previously unacknowledged bytes ACKed by each incoming
* acknowledgment, provided the increase is not more than L
*/
if (tp->bytes_acked < tp->mss_cache)
return;
/* We MAY increase by 2 if discovered delayed ack */
if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) {
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}
}
tp->bytes_acked = 0;
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
/* /*
* TCP Reno congestion control * TCP Reno congestion control
* This is special case used for fallback as well. * This is special case used for fallback as well.
......
...@@ -115,7 +115,7 @@ int sysctl_tcp_abc = 1; ...@@ -115,7 +115,7 @@ int sysctl_tcp_abc = 1;
/* Adapt the MSS value used to make delayed ack decision to the /* Adapt the MSS value used to make delayed ack decision to the
* real world. * real world.
*/ */
static inline void tcp_measure_rcv_mss(struct sock *sk, static void tcp_measure_rcv_mss(struct sock *sk,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
...@@ -246,7 +246,7 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, ...@@ -246,7 +246,7 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
return 0; return 0;
} }
static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb) struct sk_buff *skb)
{ {
/* Check #1 */ /* Check #1 */
...@@ -341,6 +341,26 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) ...@@ -341,6 +341,26 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss); tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
} }
/* Initialize RCV_MSS value.
* RCV_MSS is an our guess about MSS used by the peer.
* We haven't any direct information about the MSS.
* It's better to underestimate the RCV_MSS rather than overestimate.
* Overestimations make us ACKing less frequently than needed.
* Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
*/
void tcp_initialize_rcv_mss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
hint = min(hint, tp->rcv_wnd/2);
hint = min(hint, TCP_MIN_RCVMSS);
hint = max(hint, TCP_MIN_MSS);
inet_csk(sk)->icsk_ack.rcv_mss = hint;
}
/* Receiver "autotuning" code. /* Receiver "autotuning" code.
* *
* The algorithm for RTT estimation w/o timestamps is based on * The algorithm for RTT estimation w/o timestamps is based on
...@@ -735,6 +755,27 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) ...@@ -735,6 +755,27 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
return min_t(__u32, cwnd, tp->snd_cwnd_clamp); return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
} }
/* Set slow start threshold and cwnd not falling to slow start */
void tcp_enter_cwr(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->prior_ssthresh = 0;
tp->bytes_acked = 0;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
tp->undo_marker = 0;
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp) + 1U);
tp->snd_cwnd_cnt = 0;
tp->high_seq = tp->snd_nxt;
tp->snd_cwnd_stamp = tcp_time_stamp;
TCP_ECN_queue_cwr(tp);
tcp_set_ca_state(sk, TCP_CA_CWR);
}
}
/* Initialize metrics on socket. */ /* Initialize metrics on socket. */
static void tcp_init_metrics(struct sock *sk) static void tcp_init_metrics(struct sock *sk)
...@@ -2070,7 +2111,7 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, ...@@ -2070,7 +2111,7 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
tcp_ack_no_tstamp(sk, seq_rtt, flag); tcp_ack_no_tstamp(sk, seq_rtt, flag);
} }
static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
u32 in_flight, int good) u32 in_flight, int good)
{ {
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
...@@ -2082,7 +2123,7 @@ static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, ...@@ -2082,7 +2123,7 @@ static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
* RFC2988 recommends to restart timer to now+rto. * RFC2988 recommends to restart timer to now+rto.
*/ */
static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp)
{ {
if (!tp->packets_out) { if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
...@@ -2147,7 +2188,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, ...@@ -2147,7 +2188,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
return acked; return acked;
} }
static inline u32 tcp_usrtt(const struct sk_buff *skb) static u32 tcp_usrtt(const struct sk_buff *skb)
{ {
struct timeval tv, now; struct timeval tv, now;
...@@ -2583,7 +2624,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, ...@@ -2583,7 +2624,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
/* Fast parse options. This hopes to only see timestamps. /* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options(). * If it is wrong it falls back on tcp_parse_options().
*/ */
static inline int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
struct tcp_sock *tp) struct tcp_sock *tp)
{ {
if (th->doff == sizeof(struct tcphdr)>>2) { if (th->doff == sizeof(struct tcphdr)>>2) {
...@@ -2804,8 +2845,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) ...@@ -2804,8 +2845,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
} }
} }
static __inline__ int static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
{ {
if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
if (before(seq, sp->start_seq)) if (before(seq, sp->start_seq))
...@@ -2817,7 +2857,7 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) ...@@ -2817,7 +2857,7 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
return 0; return 0;
} }
static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
{ {
if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) {
if (before(seq, tp->rcv_nxt)) if (before(seq, tp->rcv_nxt))
...@@ -2832,7 +2872,7 @@ static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) ...@@ -2832,7 +2872,7 @@ static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq)
} }
} }
static inline void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) static void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq)
{ {
if (!tp->rx_opt.dsack) if (!tp->rx_opt.dsack)
tcp_dsack_set(tp, seq, end_seq); tcp_dsack_set(tp, seq, end_seq);
...@@ -2890,7 +2930,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) ...@@ -2890,7 +2930,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
} }
} }
static __inline__ void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2) static inline void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
{ {
__u32 tmp; __u32 tmp;
...@@ -3455,7 +3495,7 @@ void tcp_cwnd_application_limited(struct sock *sk) ...@@ -3455,7 +3495,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_time_stamp;
} }
static inline int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp)
{ {
/* If the user specified a specific send buffer setting, do /* If the user specified a specific send buffer setting, do
* not modify it. * not modify it.
...@@ -3502,7 +3542,7 @@ static void tcp_new_space(struct sock *sk) ...@@ -3502,7 +3542,7 @@ static void tcp_new_space(struct sock *sk)
sk->sk_write_space(sk); sk->sk_write_space(sk);
} }
static inline void tcp_check_space(struct sock *sk) static void tcp_check_space(struct sock *sk)
{ {
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
...@@ -3512,7 +3552,7 @@ static inline void tcp_check_space(struct sock *sk) ...@@ -3512,7 +3552,7 @@ static inline void tcp_check_space(struct sock *sk)
} }
} }
static __inline__ void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp)
{ {
tcp_push_pending_frames(sk, tp); tcp_push_pending_frames(sk, tp);
tcp_check_space(sk); tcp_check_space(sk);
...@@ -3544,7 +3584,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) ...@@ -3544,7 +3584,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
} }
} }
static __inline__ void tcp_ack_snd_check(struct sock *sk) static inline void tcp_ack_snd_check(struct sock *sk)
{ {
if (!inet_csk_ack_scheduled(sk)) { if (!inet_csk_ack_scheduled(sk)) {
/* We sent a data segment already. */ /* We sent a data segment already. */
...@@ -3692,8 +3732,7 @@ static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) ...@@ -3692,8 +3732,7 @@ static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
return result; return result;
} }
static __inline__ int static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
{ {
return skb->ip_summed != CHECKSUM_UNNECESSARY && return skb->ip_summed != CHECKSUM_UNNECESSARY &&
__tcp_checksum_complete_user(sk, skb); __tcp_checksum_complete_user(sk, skb);
...@@ -4474,3 +4513,4 @@ EXPORT_SYMBOL(sysctl_tcp_abc); ...@@ -4474,3 +4513,4 @@ EXPORT_SYMBOL(sysctl_tcp_abc);
EXPORT_SYMBOL(tcp_parse_options); EXPORT_SYMBOL(tcp_parse_options);
EXPORT_SYMBOL(tcp_rcv_established); EXPORT_SYMBOL(tcp_rcv_established);
EXPORT_SYMBOL(tcp_rcv_state_process); EXPORT_SYMBOL(tcp_rcv_state_process);
EXPORT_SYMBOL(tcp_initialize_rcv_mss);
...@@ -270,8 +270,7 @@ failure: ...@@ -270,8 +270,7 @@ failure:
/* /*
* This routine does path mtu discovery as defined in RFC1191. * This routine does path mtu discovery as defined in RFC1191.
*/ */
static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
u32 mtu)
{ {
struct dst_entry *dst; struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk); struct inet_sock *inet = inet_sk(sk);
...@@ -662,7 +661,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) ...@@ -662,7 +661,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
kfree(inet_rsk(req)->opt); kfree(inet_rsk(req)->opt);
} }
static inline void syn_flood_warning(struct sk_buff *skb) static void syn_flood_warning(struct sk_buff *skb)
{ {
static unsigned long warntime; static unsigned long warntime;
...@@ -677,7 +676,7 @@ static inline void syn_flood_warning(struct sk_buff *skb) ...@@ -677,7 +676,7 @@ static inline void syn_flood_warning(struct sk_buff *skb)
/* /*
* Save and compile IPv4 options into the request_sock if needed. * Save and compile IPv4 options into the request_sock if needed.
*/ */
static inline struct ip_options *tcp_v4_save_options(struct sock *sk, static struct ip_options *tcp_v4_save_options(struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options *opt = &(IPCB(skb)->opt);
......
...@@ -51,7 +51,7 @@ int sysctl_tcp_retrans_collapse = 1; ...@@ -51,7 +51,7 @@ int sysctl_tcp_retrans_collapse = 1;
*/ */
int sysctl_tcp_tso_win_divisor = 3; int sysctl_tcp_tso_win_divisor = 3;
static inline void update_send_head(struct sock *sk, struct tcp_sock *tp, static void update_send_head(struct sock *sk, struct tcp_sock *tp,
struct sk_buff *skb) struct sk_buff *skb)
{ {
sk->sk_send_head = skb->next; sk->sk_send_head = skb->next;
...@@ -124,7 +124,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) ...@@ -124,7 +124,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
tp->snd_cwnd_used = 0; tp->snd_cwnd_used = 0;
} }
static inline void tcp_event_data_sent(struct tcp_sock *tp, static void tcp_event_data_sent(struct tcp_sock *tp,
struct sk_buff *skb, struct sock *sk) struct sk_buff *skb, struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
...@@ -142,7 +142,7 @@ static inline void tcp_event_data_sent(struct tcp_sock *tp, ...@@ -142,7 +142,7 @@ static inline void tcp_event_data_sent(struct tcp_sock *tp,
icsk->icsk_ack.pingpong = 1; icsk->icsk_ack.pingpong = 1;
} }
static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
{ {
tcp_dec_quickack_mode(sk, pkts); tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
...@@ -212,7 +212,7 @@ void tcp_select_initial_window(int __space, __u32 mss, ...@@ -212,7 +212,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
* value can be stuffed directly into th->window for an outgoing * value can be stuffed directly into th->window for an outgoing
* frame. * frame.
*/ */
static __inline__ u16 tcp_select_window(struct sock *sk) static u16 tcp_select_window(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
u32 cur_win = tcp_receive_window(tp); u32 cur_win = tcp_receive_window(tp);
...@@ -250,6 +250,75 @@ static __inline__ u16 tcp_select_window(struct sock *sk) ...@@ -250,6 +250,75 @@ static __inline__ u16 tcp_select_window(struct sock *sk)
return new_win; return new_win;
} }
static void tcp_build_and_update_options(__u32 *ptr, struct tcp_sock *tp,
__u32 tstamp)
{
if (tp->rx_opt.tstamp_ok) {
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
*ptr++ = htonl(tstamp);
*ptr++ = htonl(tp->rx_opt.ts_recent);
}
if (tp->rx_opt.eff_sacks) {
struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
int this_sack;
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_SACK << 8) |
(TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
TCPOLEN_SACK_PERBLOCK)));
for(this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
*ptr++ = htonl(sp[this_sack].start_seq);
*ptr++ = htonl(sp[this_sack].end_seq);
}
if (tp->rx_opt.dsack) {
tp->rx_opt.dsack = 0;
tp->rx_opt.eff_sacks--;
}
}
}
/* Construct a tcp options header for a SYN or SYN_ACK packet.
* If this is every changed make sure to change the definition of
* MAX_SYN_SIZE to match the new maximum number of options that you
* can generate.
*/
static void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
int offer_wscale, int wscale, __u32 tstamp,
__u32 ts_recent)
{
/* We always get an MSS option.
* The option bytes which will be seen in normal data
* packets should timestamps be used, must be in the MSS
* advertised. But we subtract them from tp->mss_cache so
* that calculations in tcp_sendmsg are simpler etc.
* So account for this fact here if necessary. If we
* don't do this correctly, as a receiver we won't
* recognize data packets as being full sized when we
* should, and thus we won't abide by the delayed ACK
* rules correctly.
* SACKs don't matter, we never delay an ACK when we
* have any of those going out.
*/
*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
if (ts) {
if(sack)
*ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
else
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
*ptr++ = htonl(tstamp); /* TSVAL */
*ptr++ = htonl(ts_recent); /* TSECR */
} else if(sack)
*ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
if (offer_wscale)
*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
}
/* This routine actually transmits TCP packets queued in by /* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial * tcp_do_sendmsg(). This is used by both the initial
...@@ -724,7 +793,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) ...@@ -724,7 +793,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
/* Congestion window validation. (RFC2861) */ /* Congestion window validation. (RFC2861) */
static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
{ {
__u32 packets_out = tp->packets_out; __u32 packets_out = tp->packets_out;
...@@ -773,7 +842,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk ...@@ -773,7 +842,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
/* This must be invoked the first time we consider transmitting /* This must be invoked the first time we consider transmitting
* SKB onto the wire. * SKB onto the wire.
*/ */
static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
{ {
int tso_segs = tcp_skb_pcount(skb); int tso_segs = tcp_skb_pcount(skb);
...@@ -1794,7 +1863,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, ...@@ -1794,7 +1863,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
/* /*
* Do all connect socket setups that can be done AF independent. * Do all connect socket setups that can be done AF independent.
*/ */
static inline void tcp_connect_init(struct sock *sk) static void tcp_connect_init(struct sock *sk)
{ {
struct dst_entry *dst = __sk_dst_get(sk); struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment