Commit c7b6ea24 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

[NETPOLL]: Don't need rx_flags.

The rx_flags variable is redundant. Turning rx on/off is done
via setting the rx_np pointer.
Signed-off-by: default avatarStephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 33f807ba
...@@ -25,7 +25,6 @@ struct netpoll { ...@@ -25,7 +25,6 @@ struct netpoll {
struct netpoll_info { struct netpoll_info {
atomic_t refcnt; atomic_t refcnt;
int rx_flags;
spinlock_t rx_lock; spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */ struct sk_buff_head arp_tx; /* list of arp requests to reply to */
...@@ -51,12 +50,12 @@ static inline int netpoll_rx(struct sk_buff *skb) ...@@ -51,12 +50,12 @@ static inline int netpoll_rx(struct sk_buff *skb)
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags)) if (!npinfo || !npinfo->rx_np)
return 0; return 0;
spin_lock_irqsave(&npinfo->rx_lock, flags); spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_flags again with the lock held */ /* check rx_np again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb)) if (npinfo->rx_np && __netpoll_rx(skb))
ret = 1; ret = 1;
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
......
...@@ -39,7 +39,6 @@ static struct sk_buff_head skb_pool; ...@@ -39,7 +39,6 @@ static struct sk_buff_head skb_pool;
static atomic_t trapped; static atomic_t trapped;
#define USEC_PER_POLL 50 #define USEC_PER_POLL 50
#define NETPOLL_RX_ENABLED 1
#define MAX_SKB_SIZE \ #define MAX_SKB_SIZE \
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \ (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
...@@ -675,7 +674,6 @@ int netpoll_setup(struct netpoll *np) ...@@ -675,7 +674,6 @@ int netpoll_setup(struct netpoll *np)
goto release; goto release;
} }
npinfo->rx_flags = 0;
npinfo->rx_np = NULL; npinfo->rx_np = NULL;
spin_lock_init(&npinfo->rx_lock); spin_lock_init(&npinfo->rx_lock);
...@@ -757,7 +755,6 @@ int netpoll_setup(struct netpoll *np) ...@@ -757,7 +755,6 @@ int netpoll_setup(struct netpoll *np)
if (np->rx_hook) { if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags); spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
npinfo->rx_np = np; npinfo->rx_np = np;
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
} }
...@@ -799,7 +796,6 @@ void netpoll_cleanup(struct netpoll *np) ...@@ -799,7 +796,6 @@ void netpoll_cleanup(struct netpoll *np)
if (npinfo->rx_np == np) { if (npinfo->rx_np == np) {
spin_lock_irqsave(&npinfo->rx_lock, flags); spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_np = NULL; npinfo->rx_np = NULL;
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment