Commit 0db1d6fc authored by Matt Mackall's avatar Matt Mackall Committed by David S. Miller

[NETPOLL]: add retry timeout

Add limited retry logic to netpoll_send_skb

Each time we attempt to send, decrement our per-device retry counter.
On every successful send, we reset the counter. 

We delay 50us between attempts with up to 20000 retries for a total of
1 second. After we've exhausted our retries, subsequent failed
attempts will try only once until reset by success.
Signed-off-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f0d3459d
...@@ -26,6 +26,7 @@ struct netpoll { ...@@ -26,6 +26,7 @@ struct netpoll {
struct netpoll_info { struct netpoll_info {
spinlock_t poll_lock; spinlock_t poll_lock;
int poll_owner; int poll_owner;
int tries;
int rx_flags; int rx_flags;
spinlock_t rx_lock; spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct netpoll *rx_np; /* netpoll that registered an rx_hook */
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define MAX_UDP_CHUNK 1460 #define MAX_UDP_CHUNK 1460
#define MAX_SKBS 32 #define MAX_SKBS 32
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2) #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
#define MAX_RETRIES 20000
static DEFINE_SPINLOCK(skb_list_lock); static DEFINE_SPINLOCK(skb_list_lock);
static int nr_skbs; static int nr_skbs;
...@@ -265,7 +266,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -265,7 +266,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
return; return;
} }
while (1) { do {
npinfo->tries--;
spin_lock(&np->dev->xmit_lock); spin_lock(&np->dev->xmit_lock);
np->dev->xmit_lock_owner = smp_processor_id(); np->dev->xmit_lock_owner = smp_processor_id();
...@@ -277,6 +279,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -277,6 +279,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
np->dev->xmit_lock_owner = -1; np->dev->xmit_lock_owner = -1;
spin_unlock(&np->dev->xmit_lock); spin_unlock(&np->dev->xmit_lock);
netpoll_poll(np); netpoll_poll(np);
udelay(50);
continue; continue;
} }
...@@ -285,12 +288,15 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) ...@@ -285,12 +288,15 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
spin_unlock(&np->dev->xmit_lock); spin_unlock(&np->dev->xmit_lock);
/* success */ /* success */
if(!status) if(!status) {
npinfo->tries = MAX_RETRIES; /* reset */
return; return;
}
/* transmit busy */ /* transmit busy */
netpoll_poll(np); netpoll_poll(np);
} udelay(50);
} while (npinfo->tries > 0);
} }
void netpoll_send_udp(struct netpoll *np, const char *msg, int len) void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
...@@ -642,6 +648,7 @@ int netpoll_setup(struct netpoll *np) ...@@ -642,6 +648,7 @@ int netpoll_setup(struct netpoll *np)
npinfo->rx_np = NULL; npinfo->rx_np = NULL;
npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
npinfo->poll_owner = -1; npinfo->poll_owner = -1;
npinfo->tries = MAX_RETRIES;
npinfo->rx_lock = SPIN_LOCK_UNLOCKED; npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
} else } else
npinfo = ndev->npinfo; npinfo = ndev->npinfo;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment