Commit 50b1a782 authored by Zhu Yi's avatar Zhu Yi Committed by David S. Miller

sctp: use limited socket backlog

Make sctp adapt to the limited socket backlog change.

Cc: Vlad Yasevich <vladislav.yasevich@hp.com>
Cc: Sridhar Samudrala <sri@us.ibm.com>
Signed-off-by: default avatarZhu Yi <yi.zhu@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 79545b68
......@@ -75,7 +75,7 @@ static struct sctp_association *__sctp_lookup_association(
const union sctp_addr *peer,
struct sctp_transport **pt);
static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
/* Calculate the SCTP checksum of an SCTP packet. */
......@@ -265,8 +265,13 @@ int sctp_rcv(struct sk_buff *skb)
}
if (sock_owned_by_user(sk)) {
if (sctp_add_backlog(sk, skb)) {
sctp_bh_unlock_sock(sk);
sctp_chunk_free(chunk);
skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release;
}
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG);
sctp_add_backlog(sk, skb);
} else {
SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
......@@ -336,8 +341,10 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
sk_add_backlog(sk, skb);
backloged = 1;
if (sk_add_backlog_limited(sk, skb))
sctp_chunk_free(chunk);
else
backloged = 1;
} else
sctp_inq_push(inqueue, chunk);
......@@ -362,22 +369,27 @@ done:
return 0;
}
static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
struct sctp_ep_common *rcvr = chunk->rcvr;
int ret;
/* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear from us
*/
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
sctp_association_hold(sctp_assoc(rcvr));
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_hold(sctp_ep(rcvr));
else
BUG();
ret = sk_add_backlog_limited(sk, skb);
if (!ret) {
/* Hold the assoc/ep while hanging on the backlog queue.
* This way, we know structures we need will not disappear
* from us
*/
if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
sctp_association_hold(sctp_assoc(rcvr));
else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
sctp_endpoint_hold(sctp_ep(rcvr));
else
BUG();
}
return ret;
sk_add_backlog(sk, skb);
}
/* Handle icmp frag needed error. */
......
......@@ -3720,6 +3720,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated);
/* Set socket backlog limit. */
sk->sk_backlog.limit = sysctl_sctp_rmem[1];
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment