Commit 7b3d3e4f authored by Krishna Kumar's avatar Krishna Kumar Committed by David S. Miller

netdevice: Consolidate to use existing macros where available.

Patch compiled and 32 simultaneous netperf testing ran fine.
Signed-off-by: default avatarKrishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6ca8b990
...@@ -1257,7 +1257,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) ...@@ -1257,7 +1257,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{ {
#ifdef CONFIG_NETPOLL_TRAP #ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) { if (netpoll_trap()) {
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); netif_tx_start_queue(dev_queue);
return; return;
} }
#endif #endif
...@@ -1363,7 +1363,8 @@ static inline int netif_running(const struct net_device *dev) ...@@ -1363,7 +1363,8 @@ static inline int netif_running(const struct net_device *dev)
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{ {
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
clear_bit(__QUEUE_STATE_XOFF, &txq->state);
netif_tx_start_queue(txq);
} }
/** /**
...@@ -1380,7 +1381,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) ...@@ -1380,7 +1381,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
if (netpoll_trap()) if (netpoll_trap())
return; return;
#endif #endif
set_bit(__QUEUE_STATE_XOFF, &txq->state); netif_tx_stop_queue(txq);
} }
/** /**
...@@ -1394,7 +1395,8 @@ static inline int __netif_subqueue_stopped(const struct net_device *dev, ...@@ -1394,7 +1395,8 @@ static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index) u16 queue_index)
{ {
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return test_bit(__QUEUE_STATE_XOFF, &txq->state);
return netif_tx_queue_stopped(txq);
} }
static inline int netif_subqueue_stopped(const struct net_device *dev, static inline int netif_subqueue_stopped(const struct net_device *dev,
...@@ -1746,8 +1748,7 @@ static inline void netif_tx_unlock(struct net_device *dev) ...@@ -1746,8 +1748,7 @@ static inline void netif_tx_unlock(struct net_device *dev)
* force a schedule. * force a schedule.
*/ */
clear_bit(__QUEUE_STATE_FROZEN, &txq->state); clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) netif_schedule_queue(txq);
__netif_schedule(txq->qdisc);
} }
spin_unlock(&dev->tx_global_lock); spin_unlock(&dev->tx_global_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment