Commit 543a589a authored by Ingo Molnar's avatar Ingo Molnar Committed by Thomas Gleixner

net: xmit lock owner cleanup

- __netif_tx_lock() always passes in 'current' as the lock owner,
  so eliminate this parameter.

- likewise for HARD_TX_LOCK()
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ea481113
......@@ -2836,7 +2836,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
__netif_tx_lock(txq, (void *)current);
__netif_tx_lock(txq);
if ((netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
netif_tx_wake_queue(txq);
......
......@@ -508,7 +508,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, (void *)current);
__netif_tx_lock(nq);
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
......@@ -899,7 +899,7 @@ static void txq_kick(struct tx_queue *txq)
u32 hw_desc_ptr;
u32 expected_ptr;
__netif_tx_lock(nq, (void *)current);
__netif_tx_lock(nq);
if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
goto out;
......@@ -923,7 +923,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
int reclaimed;
__netif_tx_lock(nq, (void *)current);
__netif_tx_lock(nq);
reclaimed = 0;
while (reclaimed < budget && txq->tx_desc_count > 0) {
......
......@@ -1408,7 +1408,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
smp_mb();
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
__netif_tx_lock(tx_ring->txq, smp_processor_id());
__netif_tx_lock(tx_ring->txq);
if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_wake_queue(netdev);
__netif_tx_unlock(tx_ring->txq);
......
......@@ -3681,7 +3681,7 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
__netif_tx_lock(txq, (void *)current);
__netif_tx_lock(txq);
if (netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
netif_tx_wake_queue(txq);
......
......@@ -1665,10 +1665,18 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1 << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr)
static inline void __netif_tx_lock(struct netdev_queue *txq)
{
spin_lock(&txq->_xmit_lock);
txq->xmit_lock_owner = curr;
txq->xmit_lock_owner = (void *)current;
}
/*
* Do we hold the xmit_lock already?
*/
static inline int netif_tx_lock_recursion(struct netdev_queue *txq)
{
return txq->xmit_lock_owner == (void *)current;
}
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
......@@ -1712,10 +1720,8 @@ static inline void txq_trans_update(struct netdev_queue *txq)
static inline void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
void *curr;
spin_lock(&dev->tx_global_lock);
curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
......@@ -1725,7 +1731,7 @@ static inline void netif_tx_lock(struct net_device *dev)
* the ->hard_start_xmit() handler and already
* checked the frozen bit.
*/
__netif_tx_lock(txq, curr);
__netif_tx_lock(txq);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
......@@ -1761,9 +1767,9 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
local_bh_enable();
}
#define HARD_TX_LOCK(dev, txq, curr) { \
#define HARD_TX_LOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_lock(txq, curr); \
__netif_tx_lock(txq); \
} \
}
......@@ -1776,14 +1782,12 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
void *curr;
local_bh_disable();
curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
__netif_tx_lock(txq, curr);
__netif_tx_lock(txq);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
......
......@@ -1893,9 +1893,9 @@ gso:
/*
* No need to check for recursion with threaded interrupts:
*/
if (txq->xmit_lock_owner != (void *)current) {
if (!netif_tx_lock_recursion(txq)) {
HARD_TX_LOCK(dev, txq, (void *)current);
HARD_TX_LOCK(dev, txq);
if (!netif_tx_queue_stopped(txq)) {
rc = 0;
......
......@@ -70,7 +70,7 @@ static void queue_process(struct work_struct *work)
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save_nort(flags);
__netif_tx_lock(txq, (void *)current);
__netif_tx_lock(txq);
if (netif_tx_queue_stopped(txq) ||
netif_tx_queue_frozen(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
......
......@@ -80,7 +80,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
{
int ret;
if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) {
if (unlikely(netif_tx_lock_recursion(dev_queue))) {
/*
* Same CPU holding the lock. It may be a transient
* configuration error, when hard_start_xmit() recurses. We
......@@ -143,7 +143,7 @@ static inline int qdisc_restart(struct Qdisc *q)
dev = qdisc_dev(q);
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
HARD_TX_LOCK(dev, txq, (void *)current);
HARD_TX_LOCK(dev, txq);
if (!netif_tx_queue_stopped(txq) &&
!netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment