Commit 79d16385 authored by David S. Miller's avatar David S. Miller

netdev: Move atomic queue state bits into netdev_queue.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b19fa1fa
...@@ -281,14 +281,12 @@ struct header_ops { ...@@ -281,14 +281,12 @@ struct header_ops {
enum netdev_state_t enum netdev_state_t
{ {
__LINK_STATE_XOFF=0,
__LINK_STATE_START, __LINK_STATE_START,
__LINK_STATE_PRESENT, __LINK_STATE_PRESENT,
__LINK_STATE_SCHED, __LINK_STATE_SCHED,
__LINK_STATE_NOCARRIER, __LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT, __LINK_STATE_DORMANT,
__LINK_STATE_QDISC_RUNNING,
}; };
...@@ -448,10 +446,17 @@ static inline void napi_synchronize(const struct napi_struct *n) ...@@ -448,10 +446,17 @@ static inline void napi_synchronize(const struct napi_struct *n)
# define napi_synchronize(n) barrier() # define napi_synchronize(n) barrier()
#endif #endif
enum netdev_queue_state_t
{
__QUEUE_STATE_XOFF,
__QUEUE_STATE_QDISC_RUNNING,
};
struct netdev_queue { struct netdev_queue {
spinlock_t lock; spinlock_t lock;
struct net_device *dev; struct net_device *dev;
struct Qdisc *qdisc; struct Qdisc *qdisc;
unsigned long state;
struct sk_buff *gso_skb; struct sk_buff *gso_skb;
spinlock_t _xmit_lock; spinlock_t _xmit_lock;
int xmit_lock_owner; int xmit_lock_owner;
...@@ -952,9 +957,7 @@ extern void __netif_schedule(struct netdev_queue *txq); ...@@ -952,9 +957,7 @@ extern void __netif_schedule(struct netdev_queue *txq);
static inline void netif_schedule_queue(struct netdev_queue *txq) static inline void netif_schedule_queue(struct netdev_queue *txq)
{ {
struct net_device *dev = txq->dev; if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
if (!test_bit(__LINK_STATE_XOFF, &dev->state))
__netif_schedule(txq); __netif_schedule(txq);
} }
...@@ -969,9 +972,14 @@ static inline void netif_schedule(struct net_device *dev) ...@@ -969,9 +972,14 @@ static inline void netif_schedule(struct net_device *dev)
* *
* Allow upper layers to call the device hard_start_xmit routine. * Allow upper layers to call the device hard_start_xmit routine.
*/ */
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
static inline void netif_start_queue(struct net_device *dev) static inline void netif_start_queue(struct net_device *dev)
{ {
clear_bit(__LINK_STATE_XOFF, &dev->state); netif_tx_start_queue(&dev->tx_queue);
} }
/** /**
...@@ -981,16 +989,21 @@ static inline void netif_start_queue(struct net_device *dev) ...@@ -981,16 +989,21 @@ static inline void netif_start_queue(struct net_device *dev)
* Allow upper layers to call the device hard_start_xmit routine. * Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available. * Used for flow control when transmit resources are available.
*/ */
static inline void netif_wake_queue(struct net_device *dev) static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{ {
#ifdef CONFIG_NETPOLL_TRAP #ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) { if (netpoll_trap()) {
clear_bit(__LINK_STATE_XOFF, &dev->state); clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
return; return;
} }
#endif #endif
if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
__netif_schedule(&dev->tx_queue); __netif_schedule(dev_queue);
}
static inline void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(&dev->tx_queue);
} }
/** /**
...@@ -1000,9 +1013,14 @@ static inline void netif_wake_queue(struct net_device *dev) ...@@ -1000,9 +1013,14 @@ static inline void netif_wake_queue(struct net_device *dev)
* Stop upper layers calling the device hard_start_xmit routine. * Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable. * Used for flow control when transmit resources are unavailable.
*/ */
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
static inline void netif_stop_queue(struct net_device *dev) static inline void netif_stop_queue(struct net_device *dev)
{ {
set_bit(__LINK_STATE_XOFF, &dev->state); netif_tx_stop_queue(&dev->tx_queue);
} }
/** /**
...@@ -1011,9 +1029,14 @@ static inline void netif_stop_queue(struct net_device *dev) ...@@ -1011,9 +1029,14 @@ static inline void netif_stop_queue(struct net_device *dev)
* *
* Test if transmit queue on device is currently unable to send. * Test if transmit queue on device is currently unable to send.
*/ */
static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
static inline int netif_queue_stopped(const struct net_device *dev) static inline int netif_queue_stopped(const struct net_device *dev)
{ {
return test_bit(__LINK_STATE_XOFF, &dev->state); return netif_tx_queue_stopped(&dev->tx_queue);
} }
/** /**
...@@ -1043,7 +1066,7 @@ static inline int netif_running(const struct net_device *dev) ...@@ -1043,7 +1066,7 @@ static inline int netif_running(const struct net_device *dev)
*/ */
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{ {
clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
} }
/** /**
...@@ -1059,7 +1082,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) ...@@ -1059,7 +1082,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
if (netpoll_trap()) if (netpoll_trap())
return; return;
#endif #endif
set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state); set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
} }
/** /**
...@@ -1072,7 +1095,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) ...@@ -1072,7 +1095,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
static inline int __netif_subqueue_stopped(const struct net_device *dev, static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index) u16 queue_index)
{ {
return test_bit(__LINK_STATE_XOFF, return test_bit(__QUEUE_STATE_XOFF,
&dev->egress_subqueue[queue_index].state); &dev->egress_subqueue[queue_index].state);
} }
...@@ -1095,7 +1118,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) ...@@ -1095,7 +1118,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
if (netpoll_trap()) if (netpoll_trap())
return; return;
#endif #endif
if (test_and_clear_bit(__LINK_STATE_XOFF, if (test_and_clear_bit(__QUEUE_STATE_XOFF,
&dev->egress_subqueue[queue_index].state)) &dev->egress_subqueue[queue_index].state))
__netif_schedule(&dev->tx_queue); __netif_schedule(&dev->tx_queue);
} }
......
...@@ -91,7 +91,7 @@ static inline void qdisc_run(struct netdev_queue *txq) ...@@ -91,7 +91,7 @@ static inline void qdisc_run(struct netdev_queue *txq)
struct net_device *dev = txq->dev; struct net_device *dev = txq->dev;
if (!netif_queue_stopped(dev) && if (!netif_queue_stopped(dev) &&
!test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) !test_and_set_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state))
__qdisc_run(txq); __qdisc_run(txq);
} }
......
...@@ -121,9 +121,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, ...@@ -121,9 +121,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
/* /*
* NOTE: Called under queue->lock with locally disabled BH. * NOTE: Called under queue->lock with locally disabled BH.
* *
* __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process
* device at a time. queue->lock serializes queue accesses for * this queue at a time. queue->lock serializes queue accesses for
* this device AND txq->qdisc pointer itself. * this queue AND txq->qdisc pointer itself.
* *
* netif_tx_lock serializes accesses to device driver. * netif_tx_lock serializes accesses to device driver.
* *
...@@ -206,7 +206,7 @@ void __qdisc_run(struct netdev_queue *txq) ...@@ -206,7 +206,7 @@ void __qdisc_run(struct netdev_queue *txq)
} }
} }
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state);
} }
static void dev_watchdog(unsigned long arg) static void dev_watchdog(unsigned long arg)
...@@ -605,9 +605,10 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue, ...@@ -605,9 +605,10 @@ static void dev_deactivate_queue(struct netdev_queue *dev_queue,
void dev_deactivate(struct net_device *dev) void dev_deactivate(struct net_device *dev)
{ {
struct netdev_queue *dev_queue = &dev->tx_queue;
int running; int running;
dev_deactivate_queue(&dev->tx_queue, &noop_qdisc); dev_deactivate_queue(dev_queue, &noop_qdisc);
dev_watchdog_down(dev); dev_watchdog_down(dev);
...@@ -616,16 +617,17 @@ void dev_deactivate(struct net_device *dev) ...@@ -616,16 +617,17 @@ void dev_deactivate(struct net_device *dev)
/* Wait for outstanding qdisc_run calls. */ /* Wait for outstanding qdisc_run calls. */
do { do {
while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) while (test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state))
yield(); yield();
/* /*
* Double-check inside queue lock to ensure that all effects * Double-check inside queue lock to ensure that all effects
* of the queue run are visible when we return. * of the queue run are visible when we return.
*/ */
spin_lock_bh(&dev->tx_queue.lock); spin_lock_bh(&dev_queue->lock);
running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); running = test_bit(__QUEUE_STATE_QDISC_RUNNING,
spin_unlock_bh(&dev->tx_queue.lock); &dev_queue->state);
spin_unlock_bh(&dev_queue->lock);
/* /*
* The running flag should never be set at this point because * The running flag should never be set at this point because
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment