Commit 637dae3f authored by Michael Buesch's avatar Michael Buesch Committed by John W. Linville

b43: Remove DMA/PIO queue locks

This removes the DMA/PIO queue locks. Locking is handled by
wl->mutex now.
Signed-off-by: default avatarMichael Buesch <mb@bu3sch.de>
Tested-by: default avatarLarry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent f5d40eed
...@@ -856,7 +856,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, ...@@ -856,7 +856,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
} else } else
B43_WARN_ON(1); B43_WARN_ON(1);
} }
spin_lock_init(&ring->lock);
#ifdef CONFIG_B43_DEBUG #ifdef CONFIG_B43_DEBUG
ring->last_injected_overflow = jiffies; ring->last_injected_overflow = jiffies;
#endif #endif
...@@ -1315,7 +1314,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1315,7 +1314,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
struct b43_dmaring *ring; struct b43_dmaring *ring;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
int err = 0; int err = 0;
unsigned long flags;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
hdr = (struct ieee80211_hdr *)skb->data; hdr = (struct ieee80211_hdr *)skb->data;
...@@ -1331,8 +1329,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1331,8 +1329,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
dev, skb_get_queue_mapping(skb)); dev, skb_get_queue_mapping(skb));
} }
spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx); B43_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) { if (unlikely(ring->stopped)) {
...@@ -1343,7 +1339,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1343,7 +1339,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) if (b43_debug(dev, B43_DBG_DMAVERBOSE))
b43err(dev->wl, "Packet after queue stopped\n"); b43err(dev->wl, "Packet after queue stopped\n");
err = -ENOSPC; err = -ENOSPC;
goto out_unlock; goto out;
} }
if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
...@@ -1351,7 +1347,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1351,7 +1347,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
* full, but queues not stopped. */ * full, but queues not stopped. */
b43err(dev->wl, "DMA queue overflow\n"); b43err(dev->wl, "DMA queue overflow\n");
err = -ENOSPC; err = -ENOSPC;
goto out_unlock; goto out;
} }
/* Assign the queue number to the ring (if not already done before) /* Assign the queue number to the ring (if not already done before)
...@@ -1365,11 +1361,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1365,11 +1361,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
* anymore and must not transmit it unencrypted. */ * anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
err = 0; err = 0;
goto out_unlock; goto out;
} }
if (unlikely(err)) { if (unlikely(err)) {
b43err(dev->wl, "DMA tx mapping failure\n"); b43err(dev->wl, "DMA tx mapping failure\n");
goto out_unlock; goto out;
} }
ring->nr_tx_packets++; ring->nr_tx_packets++;
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
...@@ -1381,8 +1377,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -1381,8 +1377,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
} }
} }
out_unlock: out:
spin_unlock_irqrestore(&ring->lock, flags);
return err; return err;
} }
...@@ -1401,8 +1396,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, ...@@ -1401,8 +1396,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
if (unlikely(!ring)) if (unlikely(!ring))
return; return;
spin_lock_irq(&ring->lock);
B43_WARN_ON(!ring->tx); B43_WARN_ON(!ring->tx);
ops = ring->ops; ops = ring->ops;
while (1) { while (1) {
...@@ -1461,8 +1454,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, ...@@ -1461,8 +1454,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
} }
} }
spin_unlock_irq(&ring->lock);
} }
void b43_dma_get_tx_stats(struct b43_wldev *dev, void b43_dma_get_tx_stats(struct b43_wldev *dev,
...@@ -1470,17 +1461,14 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev, ...@@ -1470,17 +1461,14 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
{ {
const int nr_queues = dev->wl->hw->queues; const int nr_queues = dev->wl->hw->queues;
struct b43_dmaring *ring; struct b43_dmaring *ring;
unsigned long flags;
int i; int i;
for (i = 0; i < nr_queues; i++) { for (i = 0; i < nr_queues; i++) {
ring = select_ring_by_priority(dev, i); ring = select_ring_by_priority(dev, i);
spin_lock_irqsave(&ring->lock, flags);
stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME; stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME; stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
stats[i].count = ring->nr_tx_packets; stats[i].count = ring->nr_tx_packets;
spin_unlock_irqrestore(&ring->lock, flags);
} }
} }
...@@ -1591,22 +1579,14 @@ void b43_dma_rx(struct b43_dmaring *ring) ...@@ -1591,22 +1579,14 @@ void b43_dma_rx(struct b43_dmaring *ring)
static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
{ {
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx); B43_WARN_ON(!ring->tx);
ring->ops->tx_suspend(ring); ring->ops->tx_suspend(ring);
spin_unlock_irqrestore(&ring->lock, flags);
} }
static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
{ {
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx); B43_WARN_ON(!ring->tx);
ring->ops->tx_resume(ring); ring->ops->tx_resume(ring);
spin_unlock_irqrestore(&ring->lock, flags);
} }
void b43_dma_tx_suspend(struct b43_wldev *dev) void b43_dma_tx_suspend(struct b43_wldev *dev)
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
#define B43_DMA_H_ #define B43_DMA_H_
#include <linux/ieee80211.h> #include <linux/ieee80211.h>
#include <linux/spinlock.h>
#include "b43.h" #include "b43.h"
...@@ -244,8 +243,6 @@ struct b43_dmaring { ...@@ -244,8 +243,6 @@ struct b43_dmaring {
/* The QOS priority assigned to this ring. Only used for TX rings. /* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */ * This is the mac80211 "queue" value. */
u8 queue_prio; u8 queue_prio;
/* Lock, only used for TX. */
spinlock_t lock;
struct b43_wldev *dev; struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG #ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */ /* Maximum number of used slots. */
......
...@@ -144,7 +144,6 @@ static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev, ...@@ -144,7 +144,6 @@ static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
q = kzalloc(sizeof(*q), GFP_KERNEL); q = kzalloc(sizeof(*q), GFP_KERNEL);
if (!q) if (!q)
return NULL; return NULL;
spin_lock_init(&q->lock);
q->dev = dev; q->dev = dev;
q->rev = dev->dev->id.revision; q->rev = dev->dev->id.revision;
q->mmio_base = index_to_pioqueue_base(dev, index) + q->mmio_base = index_to_pioqueue_base(dev, index) +
...@@ -179,7 +178,6 @@ static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev, ...@@ -179,7 +178,6 @@ static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
q = kzalloc(sizeof(*q), GFP_KERNEL); q = kzalloc(sizeof(*q), GFP_KERNEL);
if (!q) if (!q)
return NULL; return NULL;
spin_lock_init(&q->lock);
q->dev = dev; q->dev = dev;
q->rev = dev->dev->id.revision; q->rev = dev->dev->id.revision;
q->mmio_base = index_to_pioqueue_base(dev, index) + q->mmio_base = index_to_pioqueue_base(dev, index) +
...@@ -494,7 +492,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -494,7 +492,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
{ {
struct b43_pio_txqueue *q; struct b43_pio_txqueue *q;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
unsigned long flags;
unsigned int hdrlen, total_len; unsigned int hdrlen, total_len;
int err = 0; int err = 0;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
...@@ -512,20 +509,18 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -512,20 +509,18 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
q = select_queue_by_priority(dev, skb_get_queue_mapping(skb)); q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
} }
spin_lock_irqsave(&q->lock, flags);
hdrlen = b43_txhdr_size(dev); hdrlen = b43_txhdr_size(dev);
total_len = roundup(skb->len + hdrlen, 4); total_len = roundup(skb->len + hdrlen, 4);
if (unlikely(total_len > q->buffer_size)) { if (unlikely(total_len > q->buffer_size)) {
err = -ENOBUFS; err = -ENOBUFS;
b43dbg(dev->wl, "PIO: TX packet longer than queue.\n"); b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
goto out_unlock; goto out;
} }
if (unlikely(q->free_packet_slots == 0)) { if (unlikely(q->free_packet_slots == 0)) {
err = -ENOBUFS; err = -ENOBUFS;
b43warn(dev->wl, "PIO: TX packet overflow.\n"); b43warn(dev->wl, "PIO: TX packet overflow.\n");
goto out_unlock; goto out;
} }
B43_WARN_ON(q->buffer_used > q->buffer_size); B43_WARN_ON(q->buffer_used > q->buffer_size);
...@@ -534,7 +529,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -534,7 +529,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
err = -EBUSY; err = -EBUSY;
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
q->stopped = 1; q->stopped = 1;
goto out_unlock; goto out;
} }
/* Assign the queue number to the ring (if not already done before) /* Assign the queue number to the ring (if not already done before)
...@@ -548,11 +543,11 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -548,11 +543,11 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
* anymore and must not transmit it unencrypted. */ * anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
err = 0; err = 0;
goto out_unlock; goto out;
} }
if (unlikely(err)) { if (unlikely(err)) {
b43err(dev->wl, "PIO transmission failure\n"); b43err(dev->wl, "PIO transmission failure\n");
goto out_unlock; goto out;
} }
q->nr_tx_packets++; q->nr_tx_packets++;
...@@ -564,9 +559,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb) ...@@ -564,9 +559,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
q->stopped = 1; q->stopped = 1;
} }
out_unlock: out:
spin_unlock_irqrestore(&q->lock, flags);
return err; return err;
} }
...@@ -583,8 +576,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev, ...@@ -583,8 +576,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
return; return;
B43_WARN_ON(!pack); B43_WARN_ON(!pack);
spin_lock_irq(&q->lock);
info = IEEE80211_SKB_CB(pack->skb); info = IEEE80211_SKB_CB(pack->skb);
b43_fill_txstatus_report(dev, info, status); b43_fill_txstatus_report(dev, info, status);
...@@ -602,8 +593,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev, ...@@ -602,8 +593,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
ieee80211_wake_queue(dev->wl->hw, q->queue_prio); ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
q->stopped = 0; q->stopped = 0;
} }
spin_unlock_irq(&q->lock);
} }
void b43_pio_get_tx_stats(struct b43_wldev *dev, void b43_pio_get_tx_stats(struct b43_wldev *dev,
...@@ -611,17 +600,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev, ...@@ -611,17 +600,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev,
{ {
const int nr_queues = dev->wl->hw->queues; const int nr_queues = dev->wl->hw->queues;
struct b43_pio_txqueue *q; struct b43_pio_txqueue *q;
unsigned long flags;
int i; int i;
for (i = 0; i < nr_queues; i++) { for (i = 0; i < nr_queues; i++) {
q = select_queue_by_priority(dev, i); q = select_queue_by_priority(dev, i);
spin_lock_irqsave(&q->lock, flags);
stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots; stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
stats[i].limit = B43_PIO_MAX_NR_TXPACKETS; stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
stats[i].count = q->nr_tx_packets; stats[i].count = q->nr_tx_packets;
spin_unlock_irqrestore(&q->lock, flags);
} }
} }
...@@ -768,9 +754,9 @@ static void b43_pio_rx_work(struct work_struct *work) ...@@ -768,9 +754,9 @@ static void b43_pio_rx_work(struct work_struct *work)
bool stop; bool stop;
do { do {
spin_lock_irq(&q->lock); mutex_lock(&q->dev->wl->mutex);
stop = (pio_rx_frame(q) == 0); stop = (pio_rx_frame(q) == 0);
spin_unlock_irq(&q->lock); mutex_unlock(&q->dev->wl->mutex);
cond_resched(); cond_resched();
if (stop) if (stop)
break; break;
...@@ -787,9 +773,6 @@ void b43_pio_rx(struct b43_pio_rxqueue *q) ...@@ -787,9 +773,6 @@ void b43_pio_rx(struct b43_pio_rxqueue *q)
static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q) static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
{ {
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->rev >= 8) { if (q->rev >= 8) {
b43_piotx_write32(q, B43_PIO8_TXCTL, b43_piotx_write32(q, B43_PIO8_TXCTL,
b43_piotx_read32(q, B43_PIO8_TXCTL) b43_piotx_read32(q, B43_PIO8_TXCTL)
...@@ -799,14 +782,10 @@ static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q) ...@@ -799,14 +782,10 @@ static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
b43_piotx_read16(q, B43_PIO_TXCTL) b43_piotx_read16(q, B43_PIO_TXCTL)
| B43_PIO_TXCTL_SUSPREQ); | B43_PIO_TXCTL_SUSPREQ);
} }
spin_unlock_irqrestore(&q->lock, flags);
} }
static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q) static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
{ {
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->rev >= 8) { if (q->rev >= 8) {
b43_piotx_write32(q, B43_PIO8_TXCTL, b43_piotx_write32(q, B43_PIO8_TXCTL,
b43_piotx_read32(q, B43_PIO8_TXCTL) b43_piotx_read32(q, B43_PIO8_TXCTL)
...@@ -816,7 +795,6 @@ static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q) ...@@ -816,7 +795,6 @@ static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
b43_piotx_read16(q, B43_PIO_TXCTL) b43_piotx_read16(q, B43_PIO_TXCTL)
& ~B43_PIO_TXCTL_SUSPREQ); & ~B43_PIO_TXCTL_SUSPREQ);
} }
spin_unlock_irqrestore(&q->lock, flags);
} }
void b43_pio_tx_suspend(struct b43_wldev *dev) void b43_pio_tx_suspend(struct b43_wldev *dev)
......
...@@ -70,7 +70,6 @@ struct b43_pio_txpacket { ...@@ -70,7 +70,6 @@ struct b43_pio_txpacket {
struct b43_pio_txqueue { struct b43_pio_txqueue {
struct b43_wldev *dev; struct b43_wldev *dev;
spinlock_t lock;
u16 mmio_base; u16 mmio_base;
/* The device queue buffer size in bytes. */ /* The device queue buffer size in bytes. */
...@@ -103,7 +102,6 @@ struct b43_pio_txqueue { ...@@ -103,7 +102,6 @@ struct b43_pio_txqueue {
struct b43_pio_rxqueue { struct b43_pio_rxqueue {
struct b43_wldev *dev; struct b43_wldev *dev;
spinlock_t lock;
u16 mmio_base; u16 mmio_base;
/* Work to reduce latency issues on RX. */ /* Work to reduce latency issues on RX. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment