Commit 049b3ff5 authored by Neil Horman's avatar Neil Horman Committed by David S. Miller

[SCTP]: Include ulpevents in socket receive buffer accounting.

Also introduces a sysctl option to configure the receive buffer
accounting policy to be either at socket or association level.
Default is all the associations on the same socket share the
receive buffer.
Signed-off-by: default avatarNeil Horman <nhorman@tuxdriver.com>
Signed-off-by: default avatarSridhar Samudrala <sri@us.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 19c7e9ee
...@@ -715,6 +715,7 @@ enum { ...@@ -715,6 +715,7 @@ enum {
NET_SCTP_PRSCTP_ENABLE = 14, NET_SCTP_PRSCTP_ENABLE = 14,
NET_SCTP_SNDBUF_POLICY = 15, NET_SCTP_SNDBUF_POLICY = 15,
NET_SCTP_SACK_TIMEOUT = 16, NET_SCTP_SACK_TIMEOUT = 16,
NET_SCTP_RCVBUF_POLICY = 17,
}; };
/* /proc/sys/net/bridge */ /* /proc/sys/net/bridge */
......
...@@ -161,6 +161,13 @@ extern struct sctp_globals { ...@@ -161,6 +161,13 @@ extern struct sctp_globals {
*/ */
int sndbuf_policy; int sndbuf_policy;
/*
* Policy for preforming sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_rcvbuf
* 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
*/
int rcvbuf_policy;
/* Delayed SACK timeout 200ms default*/ /* Delayed SACK timeout 200ms default*/
int sack_timeout; int sack_timeout;
...@@ -218,6 +225,7 @@ extern struct sctp_globals { ...@@ -218,6 +225,7 @@ extern struct sctp_globals {
#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable) #define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable)
#define sctp_max_retrans_association (sctp_globals.max_retrans_association) #define sctp_max_retrans_association (sctp_globals.max_retrans_association)
#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy) #define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
#define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy)
#define sctp_max_retrans_path (sctp_globals.max_retrans_path) #define sctp_max_retrans_path (sctp_globals.max_retrans_path)
#define sctp_max_retrans_init (sctp_globals.max_retrans_init) #define sctp_max_retrans_init (sctp_globals.max_retrans_init)
#define sctp_sack_timeout (sctp_globals.sack_timeout) #define sctp_sack_timeout (sctp_globals.sack_timeout)
...@@ -1224,6 +1232,9 @@ struct sctp_endpoint { ...@@ -1224,6 +1232,9 @@ struct sctp_endpoint {
/* sendbuf acct. policy. */ /* sendbuf acct. policy. */
__u32 sndbuf_policy; __u32 sndbuf_policy;
/* rcvbuf acct. policy. */
__u32 rcvbuf_policy;
}; };
/* Recover the outter endpoint structure. */ /* Recover the outter endpoint structure. */
...@@ -1550,6 +1561,11 @@ struct sctp_association { ...@@ -1550,6 +1561,11 @@ struct sctp_association {
*/ */
int sndbuf_used; int sndbuf_used;
/* This is the amount of memory that this association has allocated
* in the receive path at any given time.
*/
atomic_t rmem_alloc;
/* This is the wait queue head for send requests waiting on /* This is the wait queue head for send requests waiting on
* the association sndbuf space. * the association sndbuf space.
*/ */
......
...@@ -177,10 +177,10 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a ...@@ -177,10 +177,10 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* RFC 6 - A SCTP receiver MUST be able to receive a minimum of * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
* 1500 bytes in one SCTP packet. * 1500 bytes in one SCTP packet.
*/ */
if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW) if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
asoc->rwnd = SCTP_DEFAULT_MINWINDOW; asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
else else
asoc->rwnd = sk->sk_rcvbuf; asoc->rwnd = sk->sk_rcvbuf/2;
asoc->a_rwnd = asoc->rwnd; asoc->a_rwnd = asoc->rwnd;
...@@ -192,6 +192,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a ...@@ -192,6 +192,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Set the sndbuf size for transmit. */ /* Set the sndbuf size for transmit. */
asoc->sndbuf_used = 0; asoc->sndbuf_used = 0;
/* Initialize the receive memory counter */
atomic_set(&asoc->rmem_alloc, 0);
init_waitqueue_head(&asoc->wait); init_waitqueue_head(&asoc->wait);
asoc->c.my_vtag = sctp_generate_tag(ep); asoc->c.my_vtag = sctp_generate_tag(ep);
...@@ -400,6 +403,8 @@ static void sctp_association_destroy(struct sctp_association *asoc) ...@@ -400,6 +403,8 @@ static void sctp_association_destroy(struct sctp_association *asoc)
spin_unlock_bh(&sctp_assocs_id_lock); spin_unlock_bh(&sctp_assocs_id_lock);
} }
BUG_TRAP(!atomic_read(&asoc->rmem_alloc));
if (asoc->base.malloced) { if (asoc->base.malloced) {
kfree(asoc); kfree(asoc);
SCTP_DBG_OBJCNT_DEC(assoc); SCTP_DBG_OBJCNT_DEC(assoc);
......
...@@ -104,6 +104,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, ...@@ -104,6 +104,9 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
sk->sk_write_space = sctp_write_space; sk->sk_write_space = sctp_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
/* Get the receive buffer policy for this endpoint */
ep->rcvbuf_policy = sctp_rcvbuf_policy;
/* Initialize the secret key used with cookie. */ /* Initialize the secret key used with cookie. */
get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
ep->last_key = ep->current_key = 0; ep->last_key = ep->current_key = 0;
......
...@@ -100,21 +100,6 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb) ...@@ -100,21 +100,6 @@ static inline int sctp_rcv_checksum(struct sk_buff *skb)
return 0; return 0;
} }
/* The free routine for skbuffs that sctp receives */
static void sctp_rfree(struct sk_buff *skb)
{
atomic_sub(sizeof(struct sctp_chunk),&skb->sk->sk_rmem_alloc);
sock_rfree(skb);
}
/* The ownership wrapper routine to do receive buffer accounting */
static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb_set_owner_r(skb,sk);
skb->destructor = sctp_rfree;
atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc);
}
struct sctp_input_cb { struct sctp_input_cb {
union { union {
struct inet_skb_parm h4; struct inet_skb_parm h4;
...@@ -217,9 +202,6 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -217,9 +202,6 @@ int sctp_rcv(struct sk_buff *skb)
rcvr = &ep->base; rcvr = &ep->base;
} }
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto discard_release;
/* /*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets. * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
* An SCTP packet is called an "out of the blue" (OOTB) * An SCTP packet is called an "out of the blue" (OOTB)
...@@ -256,8 +238,6 @@ int sctp_rcv(struct sk_buff *skb) ...@@ -256,8 +238,6 @@ int sctp_rcv(struct sk_buff *skb)
} }
SCTP_INPUT_CB(skb)->chunk = chunk; SCTP_INPUT_CB(skb)->chunk = chunk;
sctp_rcv_set_owner_r(skb,sk);
/* Remember what endpoint is to handle this packet. */ /* Remember what endpoint is to handle this packet. */
chunk->rcvr = rcvr; chunk->rcvr = rcvr;
......
...@@ -1050,6 +1050,9 @@ SCTP_STATIC __init int sctp_init(void) ...@@ -1050,6 +1050,9 @@ SCTP_STATIC __init int sctp_init(void)
/* Sendbuffer growth - do per-socket accounting */ /* Sendbuffer growth - do per-socket accounting */
sctp_sndbuf_policy = 0; sctp_sndbuf_policy = 0;
/* Rcvbuffer growth - do per-socket accounting */
sctp_rcvbuf_policy = 0;
/* HB.interval - 30 seconds */ /* HB.interval - 30 seconds */
sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
......
...@@ -5160,6 +5160,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -5160,6 +5160,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
sctp_verb_t deliver; sctp_verb_t deliver;
int tmp; int tmp;
__u32 tsn; __u32 tsn;
int account_value;
struct sock *sk = asoc->base.sk;
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
...@@ -5169,6 +5171,26 @@ static int sctp_eat_data(const struct sctp_association *asoc, ...@@ -5169,6 +5171,26 @@ static int sctp_eat_data(const struct sctp_association *asoc,
/* ASSERT: Now skb->data is really the user data. */ /* ASSERT: Now skb->data is really the user data. */
/*
* if we are established, and we have used up our receive
* buffer memory, drop the frame
*/
if (asoc->state == SCTP_STATE_ESTABLISHED) {
/*
* If the receive buffer policy is 1, then each
* association can allocate up to sk_rcvbuf bytes
* otherwise, all the associations in aggregate
* may allocate up to sk_rcvbuf bytes
*/
if (asoc->ep->rcvbuf_policy)
account_value = atomic_read(&asoc->rmem_alloc);
else
account_value = atomic_read(&sk->sk_rmem_alloc);
if (account_value > sk->sk_rcvbuf)
return SCTP_IERROR_IGNORE_TSN;
}
/* Process ECN based congestion. /* Process ECN based congestion.
* *
* Since the chunk structure is reused for all chunks within * Since the chunk structure is reused for all chunks within
......
...@@ -5114,8 +5114,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -5114,8 +5114,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
if (event->asoc == assoc) { if (event->asoc == assoc) {
sock_rfree(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue); __skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb); __skb_queue_tail(&newsk->sk_receive_queue, skb);
skb_set_owner_r(skb, newsk);
} }
} }
...@@ -5143,8 +5145,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, ...@@ -5143,8 +5145,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb); event = sctp_skb2event(skb);
if (event->asoc == assoc) { if (event->asoc == assoc) {
sock_rfree(skb);
__skb_unlink(skb, &oldsp->pd_lobby); __skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb); __skb_queue_tail(queue, skb);
skb_set_owner_r(skb, newsk);
} }
} }
......
...@@ -120,6 +120,14 @@ static ctl_table sctp_table[] = { ...@@ -120,6 +120,14 @@ static ctl_table sctp_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec .proc_handler = &proc_dointvec
}, },
{
.ctl_name = NET_SCTP_RCVBUF_POLICY,
.procname = "rcvbuf_policy",
.data = &sctp_rcvbuf_policy,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
},
{ {
.ctl_name = NET_SCTP_PATH_MAX_RETRANS, .ctl_name = NET_SCTP_PATH_MAX_RETRANS,
.procname = "path_max_retrans", .procname = "path_max_retrans",
......
...@@ -52,19 +52,6 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, ...@@ -52,19 +52,6 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
struct sctp_association *asoc); struct sctp_association *asoc);
static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); static void sctp_ulpevent_release_data(struct sctp_ulpevent *event);
/* Stub skb destructor. */
static void sctp_stub_rfree(struct sk_buff *skb)
{
/* WARNING: This function is just a warning not to use the
* skb destructor. If the skb is shared, we may get the destructor
* callback on some processor that does not own the sock_lock. This
* was occuring with PACKET socket applications that were monitoring
* our skbs. We can't take the sock_lock, because we can't risk
* recursing if we do really own the sock lock. Instead, do all
* of our rwnd manipulation while we own the sock_lock outright.
*/
}
/* Initialize an ULP event from an given skb. */ /* Initialize an ULP event from an given skb. */
SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags)
{ {
...@@ -111,15 +98,19 @@ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, ...@@ -111,15 +98,19 @@ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
*/ */
sctp_association_hold((struct sctp_association *)asoc); sctp_association_hold((struct sctp_association *)asoc);
skb = sctp_event2skb(event); skb = sctp_event2skb(event);
skb->sk = asoc->base.sk;
event->asoc = (struct sctp_association *)asoc; event->asoc = (struct sctp_association *)asoc;
skb->destructor = sctp_stub_rfree; atomic_add(skb->truesize, &event->asoc->rmem_alloc);
skb_set_owner_r(skb, asoc->base.sk);
} }
/* A simple destructor to give up the reference to the association. */ /* A simple destructor to give up the reference to the association. */
static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
{ {
sctp_association_put(event->asoc); struct sctp_association *asoc = event->asoc;
struct sk_buff *skb = sctp_event2skb(event);
atomic_sub(skb->truesize, &asoc->rmem_alloc);
sctp_association_put(asoc);
} }
/* Create and initialize an SCTP_ASSOC_CHANGE event. /* Create and initialize an SCTP_ASSOC_CHANGE event.
...@@ -922,7 +913,6 @@ done: ...@@ -922,7 +913,6 @@ done:
/* Free a ulpevent that has an owner. It includes releasing the reference /* Free a ulpevent that has an owner. It includes releasing the reference
* to the owner, updating the rwnd in case of a DATA event and freeing the * to the owner, updating the rwnd in case of a DATA event and freeing the
* skb. * skb.
* See comments in sctp_stub_rfree().
*/ */
void sctp_ulpevent_free(struct sctp_ulpevent *event) void sctp_ulpevent_free(struct sctp_ulpevent *event)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment