Commit ea481113 authored by mbeauch's avatar mbeauch Committed by Thomas Gleixner

net: detect recursive calls to dev_queue_xmit() on RT

Changed the real-time patch code to detect recursive calls
to dev_queue_xmit and drop the packet when detected.
Signed-off-by: default avatarMark Beauchemin <mark.beauchemin@sycamorenet.com>
[ ported to latest upstream ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent b6ecb933
......@@ -2836,7 +2836,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
__netif_tx_lock(txq, (void *)current);
if ((netif_tx_queue_stopped(txq)) &&
(bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
netif_tx_wake_queue(txq);
......
......@@ -508,7 +508,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, smp_processor_id());
__netif_tx_lock(nq, (void *)current);
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
......@@ -899,7 +899,7 @@ static void txq_kick(struct tx_queue *txq)
u32 hw_desc_ptr;
u32 expected_ptr;
__netif_tx_lock(nq, smp_processor_id());
__netif_tx_lock(nq, (void *)current);
if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
goto out;
......@@ -923,7 +923,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
int reclaimed;
__netif_tx_lock(nq, smp_processor_id());
__netif_tx_lock(nq, (void *)current);
reclaimed = 0;
while (reclaimed < budget && txq->tx_desc_count > 0) {
......
......@@ -3681,7 +3681,7 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
__netif_tx_lock(txq, smp_processor_id());
__netif_tx_lock(txq, (void *)current);
if (netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
netif_tx_wake_queue(txq);
......
......@@ -477,7 +477,7 @@ struct netdev_queue {
* write mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
void *xmit_lock_owner;
/*
* please use this field instead of dev->trans_start
*/
......@@ -1665,41 +1665,41 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1 << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
static inline void __netif_tx_lock(struct netdev_queue *txq, void *curr)
{
spin_lock(&txq->_xmit_lock);
txq->xmit_lock_owner = cpu;
txq->xmit_lock_owner = curr;
}
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
txq->xmit_lock_owner = raw_smp_processor_id();
txq->xmit_lock_owner = (void *)current;
}
static inline int __netif_tx_trylock(struct netdev_queue *txq)
{
int ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
txq->xmit_lock_owner = raw_smp_processor_id();
txq->xmit_lock_owner = (void *)current;
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
txq->xmit_lock_owner = (void *)-1;
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
txq->xmit_lock_owner = (void *)-1;
spin_unlock_bh(&txq->_xmit_lock);
}
static inline void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
if (txq->xmit_lock_owner != (void *)-1)
txq->trans_start = jiffies;
}
......@@ -1712,10 +1712,10 @@ static inline void txq_trans_update(struct netdev_queue *txq)
static inline void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
int cpu;
void *curr;
spin_lock(&dev->tx_global_lock);
cpu = raw_smp_processor_id();
curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
......@@ -1725,7 +1725,7 @@ static inline void netif_tx_lock(struct net_device *dev)
* the ->hard_start_xmit() handler and already
* checked the frozen bit.
*/
__netif_tx_lock(txq, cpu);
__netif_tx_lock(txq, curr);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
......@@ -1761,9 +1761,9 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
local_bh_enable();
}
#define HARD_TX_LOCK(dev, txq, cpu) { \
#define HARD_TX_LOCK(dev, txq, curr) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_lock(txq, cpu); \
__netif_tx_lock(txq, curr); \
} \
}
......@@ -1776,14 +1776,14 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
int cpu;
void *curr;
local_bh_disable();
cpu = raw_smp_processor_id();
curr = (void *)current;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
__netif_tx_lock(txq, cpu);
__netif_tx_lock(txq, curr);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
......
......@@ -1893,13 +1893,9 @@ gso:
/*
* No need to check for recursion with threaded interrupts:
*/
#ifdef CONFIG_PREEMPT_RT
if (1) {
#else
if (txq->xmit_lock_owner != cpu) {
#endif
if (txq->xmit_lock_owner != (void *)current) {
HARD_TX_LOCK(dev, txq, cpu);
HARD_TX_LOCK(dev, txq, (void *)current);
if (!netif_tx_queue_stopped(txq)) {
rc = 0;
......@@ -4670,7 +4666,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
{
spin_lock_init(&dev_queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
dev_queue->xmit_lock_owner = -1;
dev_queue->xmit_lock_owner = (void *)-1;
}
static void netdev_init_queue_locks(struct net_device *dev)
......
......@@ -70,7 +70,7 @@ static void queue_process(struct work_struct *work)
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save_nort(flags);
__netif_tx_lock(txq, smp_processor_id());
__netif_tx_lock(txq, (void *)current);
if (netif_tx_queue_stopped(txq) ||
netif_tx_queue_frozen(txq) ||
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
......
......@@ -80,7 +80,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
{
int ret;
if (unlikely(dev_queue->xmit_lock_owner == raw_smp_processor_id())) {
if (unlikely(dev_queue->xmit_lock_owner == (void *)current)) {
/*
* Same CPU holding the lock. It may be a transient
* configuration error, when hard_start_xmit() recurses. We
......@@ -143,7 +143,7 @@ static inline int qdisc_restart(struct Qdisc *q)
dev = qdisc_dev(q);
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
HARD_TX_LOCK(dev, txq, raw_smp_processor_id());
HARD_TX_LOCK(dev, txq, (void *)current);
if (!netif_tx_queue_stopped(txq) &&
!netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment