Commit 9218e02b authored by Michael Buesch's avatar Michael Buesch Committed by John W. Linville

[PATCH] bcm43xx: >1G and 64bit DMA support

This is a rewrite of the bcm43xx DMA engine. It adds support
for >1G of memory (for chips that support the extension bits)
and 64-bit DMA (for chips that support it).
Signed-off-by: default avatarMichael Buesch <mb@bu3sch.de>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 3b4c7d64
...@@ -33,14 +33,18 @@ ...@@ -33,14 +33,18 @@
#define BCM43xx_PCICFG_ICR 0x94 #define BCM43xx_PCICFG_ICR 0x94
/* MMIO offsets */ /* MMIO offsets */
#define BCM43xx_MMIO_DMA1_REASON 0x20 #define BCM43xx_MMIO_DMA0_REASON 0x20
#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x24 #define BCM43xx_MMIO_DMA0_IRQ_MASK 0x24
#define BCM43xx_MMIO_DMA2_REASON 0x28 #define BCM43xx_MMIO_DMA1_REASON 0x28
#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x2C #define BCM43xx_MMIO_DMA1_IRQ_MASK 0x2C
#define BCM43xx_MMIO_DMA3_REASON 0x30 #define BCM43xx_MMIO_DMA2_REASON 0x30
#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x34 #define BCM43xx_MMIO_DMA2_IRQ_MASK 0x34
#define BCM43xx_MMIO_DMA4_REASON 0x38 #define BCM43xx_MMIO_DMA3_REASON 0x38
#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x3C #define BCM43xx_MMIO_DMA3_IRQ_MASK 0x3C
#define BCM43xx_MMIO_DMA4_REASON 0x40
#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x44
#define BCM43xx_MMIO_DMA5_REASON 0x48
#define BCM43xx_MMIO_DMA5_IRQ_MASK 0x4C
#define BCM43xx_MMIO_STATUS_BITFIELD 0x120 #define BCM43xx_MMIO_STATUS_BITFIELD 0x120
#define BCM43xx_MMIO_STATUS2_BITFIELD 0x124 #define BCM43xx_MMIO_STATUS2_BITFIELD 0x124
#define BCM43xx_MMIO_GEN_IRQ_REASON 0x128 #define BCM43xx_MMIO_GEN_IRQ_REASON 0x128
...@@ -56,14 +60,27 @@ ...@@ -56,14 +60,27 @@
#define BCM43xx_MMIO_XMITSTAT_1 0x174 #define BCM43xx_MMIO_XMITSTAT_1 0x174
#define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ #define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */
#define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ #define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */
#define BCM43xx_MMIO_DMA1_BASE 0x200
#define BCM43xx_MMIO_DMA2_BASE 0x220 /* 32-bit DMA */
#define BCM43xx_MMIO_DMA3_BASE 0x240 #define BCM43xx_MMIO_DMA32_BASE0 0x200
#define BCM43xx_MMIO_DMA4_BASE 0x260 #define BCM43xx_MMIO_DMA32_BASE1 0x220
#define BCM43xx_MMIO_DMA32_BASE2 0x240
#define BCM43xx_MMIO_DMA32_BASE3 0x260
#define BCM43xx_MMIO_DMA32_BASE4 0x280
#define BCM43xx_MMIO_DMA32_BASE5 0x2A0
/* 64-bit DMA */
#define BCM43xx_MMIO_DMA64_BASE0 0x200
#define BCM43xx_MMIO_DMA64_BASE1 0x240
#define BCM43xx_MMIO_DMA64_BASE2 0x280
#define BCM43xx_MMIO_DMA64_BASE3 0x2C0
#define BCM43xx_MMIO_DMA64_BASE4 0x300
#define BCM43xx_MMIO_DMA64_BASE5 0x340
/* PIO */
#define BCM43xx_MMIO_PIO1_BASE 0x300 #define BCM43xx_MMIO_PIO1_BASE 0x300
#define BCM43xx_MMIO_PIO2_BASE 0x310 #define BCM43xx_MMIO_PIO2_BASE 0x310
#define BCM43xx_MMIO_PIO3_BASE 0x320 #define BCM43xx_MMIO_PIO3_BASE 0x320
#define BCM43xx_MMIO_PIO4_BASE 0x330 #define BCM43xx_MMIO_PIO4_BASE 0x330
#define BCM43xx_MMIO_PHY_VER 0x3E0 #define BCM43xx_MMIO_PHY_VER 0x3E0
#define BCM43xx_MMIO_PHY_RADIO 0x3E2 #define BCM43xx_MMIO_PHY_RADIO 0x3E2
#define BCM43xx_MMIO_ANTENNA 0x3E8 #define BCM43xx_MMIO_ANTENNA 0x3E8
...@@ -233,8 +250,14 @@ ...@@ -233,8 +250,14 @@
#define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000 #define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000
/* sbtmstatehigh state flags */ /* sbtmstatehigh state flags */
#define BCM43xx_SBTMSTATEHIGH_SERROR 0x1 #define BCM43xx_SBTMSTATEHIGH_SERROR 0x00000001
#define BCM43xx_SBTMSTATEHIGH_BUSY 0x4 #define BCM43xx_SBTMSTATEHIGH_BUSY 0x00000004
#define BCM43xx_SBTMSTATEHIGH_TIMEOUT 0x00000020
#define BCM43xx_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000
#define BCM43xx_SBTMSTATEHIGH_DMA64BIT 0x10000000
#define BCM43xx_SBTMSTATEHIGH_GATEDCLK 0x20000000
#define BCM43xx_SBTMSTATEHIGH_BISTFAILED 0x40000000
#define BCM43xx_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000
/* sbimstate flags */ /* sbimstate flags */
#define BCM43xx_SBIMSTATE_IB_ERROR 0x20000 #define BCM43xx_SBIMSTATE_IB_ERROR 0x20000
...@@ -574,8 +597,11 @@ struct bcm43xx_dma { ...@@ -574,8 +597,11 @@ struct bcm43xx_dma {
struct bcm43xx_dmaring *tx_ring1; struct bcm43xx_dmaring *tx_ring1;
struct bcm43xx_dmaring *tx_ring2; struct bcm43xx_dmaring *tx_ring2;
struct bcm43xx_dmaring *tx_ring3; struct bcm43xx_dmaring *tx_ring3;
struct bcm43xx_dmaring *tx_ring4;
struct bcm43xx_dmaring *tx_ring5;
struct bcm43xx_dmaring *rx_ring0; struct bcm43xx_dmaring *rx_ring0;
struct bcm43xx_dmaring *rx_ring1; /* only available on core.rev < 5 */ struct bcm43xx_dmaring *rx_ring3; /* only available on core.rev < 5 */
}; };
/* Data structures for PIO transmission, per 80211 core. */ /* Data structures for PIO transmission, per 80211 core. */
...@@ -739,7 +765,7 @@ struct bcm43xx_private { ...@@ -739,7 +765,7 @@ struct bcm43xx_private {
/* Reason code of the last interrupt. */ /* Reason code of the last interrupt. */
u32 irq_reason; u32 irq_reason;
u32 dma_reason[4]; u32 dma_reason[6];
/* saved irq enable/disable state bitfield. */ /* saved irq enable/disable state bitfield. */
u32 irq_savedstate; u32 irq_savedstate;
/* Link Quality calculation context. */ /* Link Quality calculation context. */
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
DMA ringbuffer and descriptor allocation/management DMA ringbuffer and descriptor allocation/management
Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de> Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
Some code in this file is derived from the b44.c driver Some code in this file is derived from the b44.c driver
Copyright (C) 2002 David S. Miller Copyright (C) 2002 David S. Miller
...@@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot) ...@@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot)
} }
} }
u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
{
static const u16 map64[] = {
BCM43xx_MMIO_DMA64_BASE0,
BCM43xx_MMIO_DMA64_BASE1,
BCM43xx_MMIO_DMA64_BASE2,
BCM43xx_MMIO_DMA64_BASE3,
BCM43xx_MMIO_DMA64_BASE4,
BCM43xx_MMIO_DMA64_BASE5,
};
static const u16 map32[] = {
BCM43xx_MMIO_DMA32_BASE0,
BCM43xx_MMIO_DMA32_BASE1,
BCM43xx_MMIO_DMA32_BASE2,
BCM43xx_MMIO_DMA32_BASE3,
BCM43xx_MMIO_DMA32_BASE4,
BCM43xx_MMIO_DMA32_BASE5,
};
if (dma64bit) {
assert(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map64));
return map64[controller_idx];
}
assert(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map32));
return map32[controller_idx];
}
static inline static inline
dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
unsigned char *buf, unsigned char *buf,
...@@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring, ...@@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
/* Unmap and free a descriptor buffer. */ /* Unmap and free a descriptor buffer. */
static inline static inline
void free_descriptor_buffer(struct bcm43xx_dmaring *ring, void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc *desc,
struct bcm43xx_dmadesc_meta *meta, struct bcm43xx_dmadesc_meta *meta,
int irq_context) int irq_context)
{ {
...@@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring) ...@@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
{ {
struct device *dev = &(ring->bcm->pci_dev->dev); struct device *dev = &(ring->bcm->pci_dev->dev);
ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
&(ring->dmabase), GFP_KERNEL); &(ring->dmabase), GFP_KERNEL);
if (!ring->vbase) { if (!ring->descbase) {
printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
"(0x%llx, len: %lu)\n",
(unsigned long long)ring->dmabase,
BCM43xx_DMA_RINGMEMSIZE);
dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
ring->vbase, ring->dmabase);
return -ENOMEM;
}
assert(!(ring->dmabase & 0x000003FF));
memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
return 0; return 0;
} }
...@@ -214,26 +232,34 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring) ...@@ -214,26 +232,34 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring)
struct device *dev = &(ring->bcm->pci_dev->dev); struct device *dev = &(ring->bcm->pci_dev->dev);
dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
ring->vbase, ring->dmabase); ring->descbase, ring->dmabase);
} }
/* Reset the RX DMA channel */ /* Reset the RX DMA channel */
int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
u16 mmio_base) u16 mmio_base, int dma64)
{ {
int i; int i;
u32 value; u32 value;
u16 offset;
bcm43xx_write32(bcm, offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
mmio_base + BCM43xx_DMA_RX_CONTROL, bcm43xx_write32(bcm, mmio_base + offset, 0);
0x00000000);
for (i = 0; i < 1000; i++) { for (i = 0; i < 1000; i++) {
value = bcm43xx_read32(bcm, offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
mmio_base + BCM43xx_DMA_RX_STATUS); value = bcm43xx_read32(bcm, mmio_base + offset);
value &= BCM43xx_DMA_RXSTAT_STAT_MASK; if (dma64) {
if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) { value &= BCM43xx_DMA64_RXSTAT;
i = -1; if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
break; i = -1;
break;
}
} else {
value &= BCM43xx_DMA32_RXSTATE;
if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
i = -1;
break;
}
} }
udelay(10); udelay(10);
} }
...@@ -247,31 +273,47 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, ...@@ -247,31 +273,47 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
/* Reset the RX DMA channel */ /* Reset the RX DMA channel */
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 mmio_base) u16 mmio_base, int dma64)
{ {
int i; int i;
u32 value; u32 value;
u16 offset;
for (i = 0; i < 1000; i++) { for (i = 0; i < 1000; i++) {
value = bcm43xx_read32(bcm, offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
mmio_base + BCM43xx_DMA_TX_STATUS); value = bcm43xx_read32(bcm, mmio_base + offset);
value &= BCM43xx_DMA_TXSTAT_STAT_MASK; if (dma64) {
if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED || value &= BCM43xx_DMA64_TXSTAT;
value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT || if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
value == BCM43xx_DMA_TXSTAT_STAT_STOPPED) value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
break; value == BCM43xx_DMA64_TXSTAT_STOPPED)
break;
} else {
value &= BCM43xx_DMA32_TXSTATE;
if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
value == BCM43xx_DMA32_TXSTAT_STOPPED)
break;
}
udelay(10); udelay(10);
} }
bcm43xx_write32(bcm, offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
mmio_base + BCM43xx_DMA_TX_CONTROL, bcm43xx_write32(bcm, mmio_base + offset, 0);
0x00000000);
for (i = 0; i < 1000; i++) { for (i = 0; i < 1000; i++) {
value = bcm43xx_read32(bcm, offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
mmio_base + BCM43xx_DMA_TX_STATUS); value = bcm43xx_read32(bcm, mmio_base + offset);
value &= BCM43xx_DMA_TXSTAT_STAT_MASK; if (dma64) {
if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) { value &= BCM43xx_DMA64_TXSTAT;
i = -1; if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
break; i = -1;
break;
}
} else {
value &= BCM43xx_DMA32_TXSTATE;
if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
i = -1;
break;
}
} }
udelay(10); udelay(10);
} }
...@@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, ...@@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
return 0; return 0;
} }
static void fill_descriptor(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc_generic *desc,
dma_addr_t dmaaddr,
u16 bufsize,
int start, int end, int irq)
{
int slot;
slot = bcm43xx_dma_desc2idx(ring, desc);
assert(slot >= 0 && slot < ring->nr_slots);
if (ring->dma64) {
u32 ctl0 = 0, ctl1 = 0;
u32 addrlo, addrhi;
u32 addrext;
addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
addrhi |= ring->routing;
if (slot == ring->nr_slots - 1)
ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
if (start)
ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
if (end)
ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
if (irq)
ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
ctl1 |= (bufsize - ring->frameoffset)
& BCM43xx_DMA64_DCTL1_BYTECNT;
ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
& BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
desc->dma64.control0 = cpu_to_le32(ctl0);
desc->dma64.control1 = cpu_to_le32(ctl1);
desc->dma64.address_low = cpu_to_le32(addrlo);
desc->dma64.address_high = cpu_to_le32(addrhi);
} else {
u32 ctl;
u32 addr;
u32 addrext;
addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
>> BCM43xx_DMA32_ROUTING_SHIFT;
addr |= ring->routing;
ctl = (bufsize - ring->frameoffset)
& BCM43xx_DMA32_DCTL_BYTECNT;
if (slot == ring->nr_slots - 1)
ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
if (start)
ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
if (end)
ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
if (irq)
ctl |= BCM43xx_DMA32_DCTL_IRQ;
ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
& BCM43xx_DMA32_DCTL_ADDREXT_MASK;
desc->dma32.control = cpu_to_le32(ctl);
desc->dma32.address = cpu_to_le32(addr);
}
}
static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc *desc, struct bcm43xx_dmadesc_generic *desc,
struct bcm43xx_dmadesc_meta *meta, struct bcm43xx_dmadesc_meta *meta,
gfp_t gfp_flags) gfp_t gfp_flags)
{ {
struct bcm43xx_rxhdr *rxhdr; struct bcm43xx_rxhdr *rxhdr;
struct bcm43xx_hwxmitstatus *xmitstat;
dma_addr_t dmaaddr; dma_addr_t dmaaddr;
u32 desc_addr;
u32 desc_ctl;
const int slot = (int)(desc - ring->vbase);
struct sk_buff *skb; struct sk_buff *skb;
assert(slot >= 0 && slot < ring->nr_slots);
assert(!ring->tx); assert(!ring->tx);
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb_any(skb);
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
"(0x%llx, len: %u)\n",
(unsigned long long)dmaaddr, ring->rx_buffersize);
return -ENOMEM;
}
meta->skb = skb; meta->skb = skb;
meta->dmaaddr = dmaaddr; meta->dmaaddr = dmaaddr;
skb->dev = ring->bcm->net_dev; skb->dev = ring->bcm->net_dev;
desc_addr = (u32)(dmaaddr + ring->memoffset);
desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK & fill_descriptor(ring, desc, dmaaddr,
(u32)(ring->rx_buffersize - ring->frameoffset)); ring->rx_buffersize, 0, 0, 0);
if (slot == ring->nr_slots - 1)
desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
set_desc_addr(desc, desc_addr);
set_desc_ctl(desc, desc_ctl);
rxhdr = (struct bcm43xx_rxhdr *)(skb->data); rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
rxhdr->frame_length = 0; rxhdr->frame_length = 0;
rxhdr->flags1 = 0; rxhdr->flags1 = 0;
xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
xmitstat->cookie = 0;
return 0; return 0;
} }
...@@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, ...@@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
{ {
int i, err = -ENOMEM; int i, err = -ENOMEM;
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
for (i = 0; i < ring->nr_slots; i++) { for (i = 0; i < ring->nr_slots; i++) {
desc = ring->vbase + i; desc = bcm43xx_dma_idx2desc(ring, i, &meta);
meta = ring->meta + i;
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
if (err) if (err)
goto err_unwind; goto err_unwind;
} }
mb();
ring->used_slots = ring->nr_slots; ring->used_slots = ring->nr_slots;
err = 0; err = 0;
out: out:
...@@ -354,8 +447,7 @@ out: ...@@ -354,8 +447,7 @@ out:
err_unwind: err_unwind:
for (i--; i >= 0; i--) { for (i--; i >= 0; i--) {
desc = ring->vbase + i; desc = bcm43xx_dma_idx2desc(ring, i, &meta);
meta = ring->meta + i;
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb(meta->skb); dev_kfree_skb(meta->skb);
...@@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring) ...@@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
{ {
int err = 0; int err = 0;
u32 value; u32 value;
u32 addrext;
if (ring->tx) { if (ring->tx) {
/* Set Transmit Control register to "transmit enable" */ if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, u64 ringbase = (u64)(ring->dmabase);
BCM43xx_DMA_TXCTRL_ENABLE);
/* Set Transmit Descriptor ring address. */ addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, value = BCM43xx_DMA64_TXENABLE;
ring->dmabase + ring->memoffset); value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
& BCM43xx_DMA64_TXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
(ringbase & 0xFFFFFFFF));
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
| ring->routing);
} else {
u32 ringbase = (u32)(ring->dmabase);
addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
value = BCM43xx_DMA32_TXENABLE;
value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
& BCM43xx_DMA32_TXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
(ringbase & ~BCM43xx_DMA32_ROUTING)
| ring->routing);
}
} else { } else {
err = alloc_initial_descbuffers(ring); err = alloc_initial_descbuffers(ring);
if (err) if (err)
goto out; goto out;
/* Set Receive Control "receive enable" and frame offset */ if (ring->dma64) {
value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT); u64 ringbase = (u64)(ring->dmabase);
value |= BCM43xx_DMA_RXCTRL_ENABLE;
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value); addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
/* Set Receive Descriptor ring address. */ value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, value |= BCM43xx_DMA64_RXENABLE;
ring->dmabase + ring->memoffset); value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
/* Init the descriptor pointer. */ & BCM43xx_DMA64_RXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200); bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
(ringbase & 0xFFFFFFFF));
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
| ring->routing);
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
} else {
u32 ringbase = (u32)(ring->dmabase);
addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
value |= BCM43xx_DMA32_RXENABLE;
value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
& BCM43xx_DMA32_RXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
(ringbase & ~BCM43xx_DMA32_ROUTING)
| ring->routing);
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
}
} }
out: out:
...@@ -402,27 +534,32 @@ out: ...@@ -402,27 +534,32 @@ out:
static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
{ {
if (ring->tx) { if (ring->tx) {
bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base); bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
/* Zero out Transmit Descriptor ring address. */ if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0); bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
} else
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
} else { } else {
bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base); bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
/* Zero out Receive Descriptor ring address. */ if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0); bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
} else
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
} }
} }
static void free_all_descbuffers(struct bcm43xx_dmaring *ring) static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
{ {
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
int i; int i;
if (!ring->used_slots) if (!ring->used_slots)
return; return;
for (i = 0; i < ring->nr_slots; i++) { for (i = 0; i < ring->nr_slots; i++) {
desc = ring->vbase + i; desc = bcm43xx_dma_idx2desc(ring, i, &meta);
meta = ring->meta + i;
if (!meta->skb) { if (!meta->skb) {
assert(ring->tx); assert(ring->tx);
...@@ -430,62 +567,67 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring) ...@@ -430,62 +567,67 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
} }
if (ring->tx) { if (ring->tx) {
unmap_descbuffer(ring, meta->dmaaddr, unmap_descbuffer(ring, meta->dmaaddr,
meta->skb->len, 1); meta->skb->len, 1);
} else { } else {
unmap_descbuffer(ring, meta->dmaaddr, unmap_descbuffer(ring, meta->dmaaddr,
ring->rx_buffersize, 0); ring->rx_buffersize, 0);
} }
free_descriptor_buffer(ring, desc, meta, 0); free_descriptor_buffer(ring, meta, 0);
} }
} }
/* Main initialization function. */ /* Main initialization function. */
static static
struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
u16 dma_controller_base, int controller_index,
int nr_descriptor_slots, int for_tx,
int tx) int dma64)
{ {
struct bcm43xx_dmaring *ring; struct bcm43xx_dmaring *ring;
int err; int err;
int nr_slots;
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) if (!ring)
goto out; goto out;
ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots, nr_slots = BCM43xx_RXRING_SLOTS;
if (for_tx)
nr_slots = BCM43xx_TXRING_SLOTS;
ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
GFP_KERNEL); GFP_KERNEL);
if (!ring->meta) if (!ring->meta)
goto err_kfree_ring; goto err_kfree_ring;
ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET; ring->routing = BCM43xx_DMA32_CLIENTTRANS;
if (dma64)
ring->routing = BCM43xx_DMA64_CLIENTTRANS;
#ifdef CONFIG_BCM947XX #ifdef CONFIG_BCM947XX
if (bcm->pci_dev->bus->number == 0) if (bcm->pci_dev->bus->number == 0)
ring->memoffset = 0; ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
#endif #endif
ring->bcm = bcm; ring->bcm = bcm;
ring->nr_slots = nr_descriptor_slots; ring->nr_slots = nr_slots;
ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
assert(ring->suspend_mark < ring->resume_mark); assert(ring->suspend_mark < ring->resume_mark);
ring->mmio_base = dma_controller_base; ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
if (tx) { ring->index = controller_index;
ring->dma64 = !!dma64;
if (for_tx) {
ring->tx = 1; ring->tx = 1;
ring->current_slot = -1; ring->current_slot = -1;
} else { } else {
switch (dma_controller_base) { if (ring->index == 0) {
case BCM43xx_MMIO_DMA1_BASE: ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE; ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET; } else if (ring->index == 3) {
break; ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
case BCM43xx_MMIO_DMA4_BASE: ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE; } else
ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
break;
default:
assert(0); assert(0);
}
} }
err = alloc_ringmemory(ring); err = alloc_ringmemory(ring);
...@@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring) ...@@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
if (!ring) if (!ring)
return; return;
dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n", dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
(ring->dma64) ? "64" : "32",
ring->mmio_base, ring->mmio_base,
(ring->tx) ? "TX" : "RX", (ring->tx) ? "TX" : "RX",
ring->max_used_slots, ring->nr_slots); ring->max_used_slots, ring->nr_slots);
...@@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm) ...@@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
return; return;
dma = bcm43xx_current_dma(bcm); dma = bcm43xx_current_dma(bcm);
bcm43xx_destroy_dmaring(dma->rx_ring1); bcm43xx_destroy_dmaring(dma->rx_ring3);
dma->rx_ring1 = NULL; dma->rx_ring3 = NULL;
bcm43xx_destroy_dmaring(dma->rx_ring0); bcm43xx_destroy_dmaring(dma->rx_ring0);
dma->rx_ring0 = NULL; dma->rx_ring0 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring5);
dma->tx_ring5 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring4);
dma->tx_ring4 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring3); bcm43xx_destroy_dmaring(dma->tx_ring3);
dma->tx_ring3 = NULL; dma->tx_ring3 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring2); bcm43xx_destroy_dmaring(dma->tx_ring2);
...@@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm) ...@@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
struct bcm43xx_dmaring *ring; struct bcm43xx_dmaring *ring;
int err = -ENOMEM; int err = -ENOMEM;
int dma64 = 0;
u32 sbtmstatehi;
sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
dma64 = 1;
/* setup TX DMA channels. */ /* setup TX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE, ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto out; goto out;
dma->tx_ring0 = ring; dma->tx_ring0 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE, ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto err_destroy_tx0; goto err_destroy_tx0;
dma->tx_ring1 = ring; dma->tx_ring1 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE, ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto err_destroy_tx1; goto err_destroy_tx1;
dma->tx_ring2 = ring; dma->tx_ring2 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
BCM43xx_TXRING_SLOTS, 1);
if (!ring) if (!ring)
goto err_destroy_tx2; goto err_destroy_tx2;
dma->tx_ring3 = ring; dma->tx_ring3 = ring;
/* setup RX DMA channels. */ ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
BCM43xx_RXRING_SLOTS, 0);
if (!ring) if (!ring)
goto err_destroy_tx3; goto err_destroy_tx3;
dma->tx_ring4 = ring;
ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
if (!ring)
goto err_destroy_tx4;
dma->tx_ring5 = ring;
/* setup RX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
if (!ring)
goto err_destroy_tx5;
dma->rx_ring0 = ring; dma->rx_ring0 = ring;
if (bcm->current_core->rev < 5) { if (bcm->current_core->rev < 5) {
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
BCM43xx_RXRING_SLOTS, 0);
if (!ring) if (!ring)
goto err_destroy_rx0; goto err_destroy_rx0;
dma->rx_ring1 = ring; dma->rx_ring3 = ring;
} }
dprintk(KERN_INFO PFX "DMA initialized\n"); dprintk(KERN_INFO PFX "%s DMA initialized\n",
dma64 ? "64-bit" : "32-bit");
err = 0; err = 0;
out: out:
return err; return err;
...@@ -605,6 +764,12 @@ out: ...@@ -605,6 +764,12 @@ out:
err_destroy_rx0: err_destroy_rx0:
bcm43xx_destroy_dmaring(dma->rx_ring0); bcm43xx_destroy_dmaring(dma->rx_ring0);
dma->rx_ring0 = NULL; dma->rx_ring0 = NULL;
err_destroy_tx5:
bcm43xx_destroy_dmaring(dma->tx_ring5);
dma->tx_ring5 = NULL;
err_destroy_tx4:
bcm43xx_destroy_dmaring(dma->tx_ring4);
dma->tx_ring4 = NULL;
err_destroy_tx3: err_destroy_tx3:
bcm43xx_destroy_dmaring(dma->tx_ring3); bcm43xx_destroy_dmaring(dma->tx_ring3);
dma->tx_ring3 = NULL; dma->tx_ring3 = NULL;
...@@ -624,7 +789,7 @@ err_destroy_tx0: ...@@ -624,7 +789,7 @@ err_destroy_tx0:
static u16 generate_cookie(struct bcm43xx_dmaring *ring, static u16 generate_cookie(struct bcm43xx_dmaring *ring,
int slot) int slot)
{ {
u16 cookie = 0xF000; u16 cookie = 0x1000;
/* Use the upper 4 bits of the cookie as /* Use the upper 4 bits of the cookie as
* DMA controller ID and store the slot number * DMA controller ID and store the slot number
...@@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring, ...@@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring,
* Note that the cookie must never be 0, as this * Note that the cookie must never be 0, as this
* is a special value used in RX path. * is a special value used in RX path.
*/ */
switch (ring->mmio_base) { switch (ring->index) {
default: case 0:
assert(0);
case BCM43xx_MMIO_DMA1_BASE:
cookie = 0xA000; cookie = 0xA000;
break; break;
case BCM43xx_MMIO_DMA2_BASE: case 1:
cookie = 0xB000; cookie = 0xB000;
break; break;
case BCM43xx_MMIO_DMA3_BASE: case 2:
cookie = 0xC000; cookie = 0xC000;
break; break;
case BCM43xx_MMIO_DMA4_BASE: case 3:
cookie = 0xD000; cookie = 0xD000;
break; break;
case 4:
cookie = 0xE000;
break;
case 5:
cookie = 0xF000;
break;
} }
assert(((u16)slot & 0xF000) == 0x0000); assert(((u16)slot & 0xF000) == 0x0000);
cookie |= (u16)slot; cookie |= (u16)slot;
...@@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, ...@@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
case 0xD000: case 0xD000:
ring = dma->tx_ring3; ring = dma->tx_ring3;
break; break;
case 0xE000:
ring = dma->tx_ring4;
break;
case 0xF000:
ring = dma->tx_ring5;
break;
default: default:
assert(0); assert(0);
} }
...@@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm, ...@@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
int slot) int slot)
{ {
u16 offset;
int descsize;
/* Everything is ready to start. Buffers are DMA mapped and /* Everything is ready to start. Buffers are DMA mapped and
* associated with slots. * associated with slots.
* "slot" is the last slot of the new frame we want to transmit. * "slot" is the last slot of the new frame we want to transmit.
...@@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, ...@@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
*/ */
wmb(); wmb();
slot = next_slot(ring, slot); slot = next_slot(ring, slot);
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX, offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
(u32)(slot * sizeof(struct bcm43xx_dmadesc))); descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
: sizeof(struct bcm43xx_dmadesc32);
bcm43xx_dma_write(ring, offset,
(u32)(slot * descsize));
} }
static int dma_tx_fragment(struct bcm43xx_dmaring *ring, static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
struct sk_buff *skb, struct sk_buff *skb,
u8 cur_frag) u8 cur_frag)
{ {
int slot; int slot;
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
u32 desc_ctl; dma_addr_t dmaaddr;
u32 desc_addr;
assert(skb_shinfo(skb)->nr_frags == 0); assert(skb_shinfo(skb)->nr_frags == 0);
slot = request_slot(ring); slot = request_slot(ring);
desc = ring->vbase + slot; desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
meta = ring->meta + slot;
/* Add a device specific TX header. */ /* Add a device specific TX header. */
assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
...@@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring, ...@@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
generate_cookie(ring, slot)); generate_cookie(ring, slot));
meta->skb = skb; meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) { meta->dmaaddr = dmaaddr;
return_slot(ring, slot);
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
"(0x%llx, len: %u)\n",
(unsigned long long)meta->dmaaddr, skb->len);
return -ENOMEM;
}
desc_addr = (u32)(meta->dmaaddr + ring->memoffset); fill_descriptor(ring, desc, dmaaddr,
desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND; skb->len, 1, 1, 1);
desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
(u32)(meta->skb->len - ring->frameoffset));
if (slot == ring->nr_slots - 1)
desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
set_desc_ctl(desc, desc_ctl);
set_desc_addr(desc, desc_addr);
/* Now transfer the whole frame. */ /* Now transfer the whole frame. */
dmacontroller_poke_tx(ring, slot); dmacontroller_poke_tx(ring, slot);
return 0;
} }
int bcm43xx_dma_tx(struct bcm43xx_private *bcm, int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
...@@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm, ...@@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
/* Take skb from ieee80211_txb_free */ /* Take skb from ieee80211_txb_free */
txb->fragments[i] = NULL; txb->fragments[i] = NULL;
dma_tx_fragment(ring, skb, i); dma_tx_fragment(ring, skb, i);
//TODO: handle failure of dma_tx_fragment
} }
ieee80211_txb_free(txb); ieee80211_txb_free(txb);
...@@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, ...@@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
struct bcm43xx_xmitstatus *status) struct bcm43xx_xmitstatus *status)
{ {
struct bcm43xx_dmaring *ring; struct bcm43xx_dmaring *ring;
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
int is_last_fragment; int is_last_fragment;
int slot; int slot;
u32 tmp;
ring = parse_cookie(bcm, status->cookie, &slot); ring = parse_cookie(bcm, status->cookie, &slot);
assert(ring); assert(ring);
assert(ring->tx); assert(ring->tx);
assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
while (1) { while (1) {
assert(slot >= 0 && slot < ring->nr_slots); assert(slot >= 0 && slot < ring->nr_slots);
desc = ring->vbase + slot; desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
meta = ring->meta + slot;
is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND); if (ring->dma64) {
tmp = le32_to_cpu(desc->dma64.control0);
is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
} else {
tmp = le32_to_cpu(desc->dma32.control);
is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
}
unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
free_descriptor_buffer(ring, desc, meta, 1); free_descriptor_buffer(ring, meta, 1);
/* Everything belonging to the slot is unmapped /* Everything belonging to the slot is unmapped
* and freed, so we can return it. * and freed, so we can return it.
*/ */
...@@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm, ...@@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
static void dma_rx(struct bcm43xx_dmaring *ring, static void dma_rx(struct bcm43xx_dmaring *ring,
int *slot) int *slot)
{ {
struct bcm43xx_dmadesc *desc; struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
struct bcm43xx_rxhdr *rxhdr; struct bcm43xx_rxhdr *rxhdr;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring, ...@@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
int err; int err;
dma_addr_t dmaaddr; dma_addr_t dmaaddr;
desc = ring->vbase + *slot; desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
meta = ring->meta + *slot;
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
skb = meta->skb; skb = meta->skb;
if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) { if (ring->index == 3) {
/* We received an xmit status. */ /* We received an xmit status. */
struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
struct bcm43xx_xmitstatus stat; struct bcm43xx_xmitstatus stat;
...@@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring, ...@@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
s32 tmp = len; s32 tmp = len;
while (1) { while (1) {
desc = ring->vbase + *slot; desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
meta = ring->meta + *slot;
/* recycle the descriptor buffer. */ /* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr, sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize); ring->rx_buffersize);
...@@ -906,8 +1072,8 @@ static void dma_rx(struct bcm43xx_dmaring *ring, ...@@ -906,8 +1072,8 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
break; break;
} }
printkl(KERN_ERR PFX "DMA RX buffer too small " printkl(KERN_ERR PFX "DMA RX buffer too small "
"(len: %u, buffer: %u, nr-dropped: %d)\n", "(len: %u, buffer: %u, nr-dropped: %d)\n",
len, ring->rx_buffersize, cnt); len, ring->rx_buffersize, cnt);
goto drop; goto drop;
} }
len -= IEEE80211_FCS_LEN; len -= IEEE80211_FCS_LEN;
...@@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) ...@@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
#endif #endif
assert(!ring->tx); assert(!ring->tx);
status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS); if (ring->dma64) {
descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK); status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
current_slot = descptr / sizeof(struct bcm43xx_dmadesc); descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
} else {
status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
descptr = (status & BCM43xx_DMA32_RXDPTR);
current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
}
assert(current_slot >= 0 && current_slot < ring->nr_slots); assert(current_slot >= 0 && current_slot < ring->nr_slots);
slot = ring->current_slot; slot = ring->current_slot;
...@@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring) ...@@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
ring->max_used_slots = used_slots; ring->max_used_slots = used_slots;
#endif #endif
} }
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, if (ring->dma64) {
(u32)(slot * sizeof(struct bcm43xx_dmadesc))); bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
(u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
} else {
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
(u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
}
ring->current_slot = slot; ring->current_slot = slot;
} }
...@@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring) ...@@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
{ {
assert(ring->tx); assert(ring->tx);
bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, if (ring->dma64) {
bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
| BCM43xx_DMA_TXCTRL_SUSPEND); bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
| BCM43xx_DMA64_TXSUSPEND);
} else {
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
| BCM43xx_DMA32_TXSUSPEND);
}
} }
void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
{ {
assert(ring->tx); assert(ring->tx);
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, if (ring->dma64) {
bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
& ~BCM43xx_DMA_TXCTRL_SUSPEND); bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
& ~BCM43xx_DMA64_TXSUSPEND);
} else {
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
& ~BCM43xx_DMA32_TXSUSPEND);
}
bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1); bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
} }
...@@ -14,63 +14,179 @@ ...@@ -14,63 +14,179 @@
#define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13) #define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13)
#define BCM43xx_DMAIRQ_RX_DONE (1 << 16) #define BCM43xx_DMAIRQ_RX_DONE (1 << 16)
/* DMA controller register offsets. (relative to BCM43xx_DMA#_BASE) */
#define BCM43xx_DMA_TX_CONTROL 0x00 /*** 32-bit DMA Engine. ***/
#define BCM43xx_DMA_TX_DESC_RING 0x04
#define BCM43xx_DMA_TX_DESC_INDEX 0x08 /* 32-bit DMA controller registers. */
#define BCM43xx_DMA_TX_STATUS 0x0c #define BCM43xx_DMA32_TXCTL 0x00
#define BCM43xx_DMA_RX_CONTROL 0x10 #define BCM43xx_DMA32_TXENABLE 0x00000001
#define BCM43xx_DMA_RX_DESC_RING 0x14 #define BCM43xx_DMA32_TXSUSPEND 0x00000002
#define BCM43xx_DMA_RX_DESC_INDEX 0x18 #define BCM43xx_DMA32_TXLOOPBACK 0x00000004
#define BCM43xx_DMA_RX_STATUS 0x1c #define BCM43xx_DMA32_TXFLUSH 0x00000010
#define BCM43xx_DMA32_TXADDREXT_MASK 0x00030000
/* DMA controller channel control word values. */ #define BCM43xx_DMA32_TXADDREXT_SHIFT 16
#define BCM43xx_DMA_TXCTRL_ENABLE (1 << 0) #define BCM43xx_DMA32_TXRING 0x04
#define BCM43xx_DMA_TXCTRL_SUSPEND (1 << 1) #define BCM43xx_DMA32_TXINDEX 0x08
#define BCM43xx_DMA_TXCTRL_LOOPBACK (1 << 2) #define BCM43xx_DMA32_TXSTATUS 0x0C
#define BCM43xx_DMA_TXCTRL_FLUSH (1 << 4) #define BCM43xx_DMA32_TXDPTR 0x00000FFF
#define BCM43xx_DMA_RXCTRL_ENABLE (1 << 0) #define BCM43xx_DMA32_TXSTATE 0x0000F000
#define BCM43xx_DMA_RXCTRL_FRAMEOFF_MASK 0x000000fe #define BCM43xx_DMA32_TXSTAT_DISABLED 0x00000000
#define BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT 1 #define BCM43xx_DMA32_TXSTAT_ACTIVE 0x00001000
#define BCM43xx_DMA_RXCTRL_PIO (1 << 8) #define BCM43xx_DMA32_TXSTAT_IDLEWAIT 0x00002000
/* DMA controller channel status word values. */ #define BCM43xx_DMA32_TXSTAT_STOPPED 0x00003000
#define BCM43xx_DMA_TXSTAT_DPTR_MASK 0x00000fff #define BCM43xx_DMA32_TXSTAT_SUSP 0x00004000
#define BCM43xx_DMA_TXSTAT_STAT_MASK 0x0000f000 #define BCM43xx_DMA32_TXERROR 0x000F0000
#define BCM43xx_DMA_TXSTAT_STAT_DISABLED 0x00000000 #define BCM43xx_DMA32_TXERR_NOERR 0x00000000
#define BCM43xx_DMA_TXSTAT_STAT_ACTIVE 0x00001000 #define BCM43xx_DMA32_TXERR_PROT 0x00010000
#define BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT 0x00002000 #define BCM43xx_DMA32_TXERR_UNDERRUN 0x00020000
#define BCM43xx_DMA_TXSTAT_STAT_STOPPED 0x00003000 #define BCM43xx_DMA32_TXERR_BUFREAD 0x00030000
#define BCM43xx_DMA_TXSTAT_STAT_SUSP 0x00004000 #define BCM43xx_DMA32_TXERR_DESCREAD 0x00040000
#define BCM43xx_DMA_TXSTAT_ERROR_MASK 0x000f0000 #define BCM43xx_DMA32_TXACTIVE 0xFFF00000
#define BCM43xx_DMA_TXSTAT_FLUSHED (1 << 20) #define BCM43xx_DMA32_RXCTL 0x10
#define BCM43xx_DMA_RXSTAT_DPTR_MASK 0x00000fff #define BCM43xx_DMA32_RXENABLE 0x00000001
#define BCM43xx_DMA_RXSTAT_STAT_MASK 0x0000f000 #define BCM43xx_DMA32_RXFROFF_MASK 0x000000FE
#define BCM43xx_DMA_RXSTAT_STAT_DISABLED 0x00000000 #define BCM43xx_DMA32_RXFROFF_SHIFT 1
#define BCM43xx_DMA_RXSTAT_STAT_ACTIVE 0x00001000 #define BCM43xx_DMA32_RXDIRECTFIFO 0x00000100
#define BCM43xx_DMA_RXSTAT_STAT_IDLEWAIT 0x00002000 #define BCM43xx_DMA32_RXADDREXT_MASK 0x00030000
#define BCM43xx_DMA_RXSTAT_STAT_RESERVED 0x00003000 #define BCM43xx_DMA32_RXADDREXT_SHIFT 16
#define BCM43xx_DMA_RXSTAT_STAT_ERRORS 0x00004000 #define BCM43xx_DMA32_RXRING 0x14
#define BCM43xx_DMA_RXSTAT_ERROR_MASK 0x000f0000 #define BCM43xx_DMA32_RXINDEX 0x18
#define BCM43xx_DMA32_RXSTATUS 0x1C
/* DMA descriptor control field values. */ #define BCM43xx_DMA32_RXDPTR 0x00000FFF
#define BCM43xx_DMADTOR_BYTECNT_MASK 0x00001fff #define BCM43xx_DMA32_RXSTATE 0x0000F000
#define BCM43xx_DMADTOR_DTABLEEND (1 << 28) /* End of descriptor table */ #define BCM43xx_DMA32_RXSTAT_DISABLED 0x00000000
#define BCM43xx_DMADTOR_COMPIRQ (1 << 29) /* IRQ on completion request */ #define BCM43xx_DMA32_RXSTAT_ACTIVE 0x00001000
#define BCM43xx_DMADTOR_FRAMEEND (1 << 30) #define BCM43xx_DMA32_RXSTAT_IDLEWAIT 0x00002000
#define BCM43xx_DMADTOR_FRAMESTART (1 << 31) #define BCM43xx_DMA32_RXSTAT_STOPPED 0x00003000
#define BCM43xx_DMA32_RXERROR 0x000F0000
#define BCM43xx_DMA32_RXERR_NOERR 0x00000000
#define BCM43xx_DMA32_RXERR_PROT 0x00010000
#define BCM43xx_DMA32_RXERR_OVERFLOW 0x00020000
#define BCM43xx_DMA32_RXERR_BUFWRITE 0x00030000
#define BCM43xx_DMA32_RXERR_DESCREAD 0x00040000
#define BCM43xx_DMA32_RXACTIVE 0xFFF00000
/* 32-bit DMA descriptor. */
struct bcm43xx_dmadesc32 {
__le32 control;
__le32 address;
} __attribute__((__packed__));
#define BCM43xx_DMA32_DCTL_BYTECNT 0x00001FFF
#define BCM43xx_DMA32_DCTL_ADDREXT_MASK 0x00030000
#define BCM43xx_DMA32_DCTL_ADDREXT_SHIFT 16
#define BCM43xx_DMA32_DCTL_DTABLEEND 0x10000000
#define BCM43xx_DMA32_DCTL_IRQ 0x20000000
#define BCM43xx_DMA32_DCTL_FRAMEEND 0x40000000
#define BCM43xx_DMA32_DCTL_FRAMESTART 0x80000000
/* Address field Routing value. */
#define BCM43xx_DMA32_ROUTING 0xC0000000
#define BCM43xx_DMA32_ROUTING_SHIFT 30
#define BCM43xx_DMA32_NOTRANS 0x00000000
#define BCM43xx_DMA32_CLIENTTRANS 0x40000000
/*** 64-bit DMA Engine. ***/
/* 64-bit DMA controller registers. */
#define BCM43xx_DMA64_TXCTL 0x00
#define BCM43xx_DMA64_TXENABLE 0x00000001
#define BCM43xx_DMA64_TXSUSPEND 0x00000002
#define BCM43xx_DMA64_TXLOOPBACK 0x00000004
#define BCM43xx_DMA64_TXFLUSH 0x00000010
#define BCM43xx_DMA64_TXADDREXT_MASK 0x00030000
#define BCM43xx_DMA64_TXADDREXT_SHIFT 16
#define BCM43xx_DMA64_TXINDEX 0x04
#define BCM43xx_DMA64_TXRINGLO 0x08
#define BCM43xx_DMA64_TXRINGHI 0x0C
#define BCM43xx_DMA64_TXSTATUS 0x10
#define BCM43xx_DMA64_TXSTATDPTR 0x00001FFF
#define BCM43xx_DMA64_TXSTAT 0xF0000000
#define BCM43xx_DMA64_TXSTAT_DISABLED 0x00000000
#define BCM43xx_DMA64_TXSTAT_ACTIVE 0x10000000
#define BCM43xx_DMA64_TXSTAT_IDLEWAIT 0x20000000
#define BCM43xx_DMA64_TXSTAT_STOPPED 0x30000000
#define BCM43xx_DMA64_TXSTAT_SUSP 0x40000000
#define BCM43xx_DMA64_TXERROR 0x14
#define BCM43xx_DMA64_TXERRDPTR 0x0001FFFF
#define BCM43xx_DMA64_TXERR 0xF0000000
#define BCM43xx_DMA64_TXERR_NOERR 0x00000000
#define BCM43xx_DMA64_TXERR_PROT 0x10000000
#define BCM43xx_DMA64_TXERR_UNDERRUN 0x20000000
#define BCM43xx_DMA64_TXERR_TRANSFER 0x30000000
#define BCM43xx_DMA64_TXERR_DESCREAD 0x40000000
#define BCM43xx_DMA64_TXERR_CORE 0x50000000
#define BCM43xx_DMA64_RXCTL 0x20
#define BCM43xx_DMA64_RXENABLE 0x00000001
#define BCM43xx_DMA64_RXFROFF_MASK 0x000000FE
#define BCM43xx_DMA64_RXFROFF_SHIFT 1
#define BCM43xx_DMA64_RXDIRECTFIFO 0x00000100
#define BCM43xx_DMA64_RXADDREXT_MASK 0x00030000
#define BCM43xx_DMA64_RXADDREXT_SHIFT 16
#define BCM43xx_DMA64_RXINDEX 0x24
#define BCM43xx_DMA64_RXRINGLO 0x28
#define BCM43xx_DMA64_RXRINGHI 0x2C
#define BCM43xx_DMA64_RXSTATUS 0x30
#define BCM43xx_DMA64_RXSTATDPTR 0x00001FFF
#define BCM43xx_DMA64_RXSTAT 0xF0000000
#define BCM43xx_DMA64_RXSTAT_DISABLED 0x00000000
#define BCM43xx_DMA64_RXSTAT_ACTIVE 0x10000000
#define BCM43xx_DMA64_RXSTAT_IDLEWAIT 0x20000000
#define BCM43xx_DMA64_RXSTAT_STOPPED 0x30000000
#define BCM43xx_DMA64_RXSTAT_SUSP 0x40000000
#define BCM43xx_DMA64_RXERROR 0x34
#define BCM43xx_DMA64_RXERRDPTR 0x0001FFFF
#define BCM43xx_DMA64_RXERR 0xF0000000
#define BCM43xx_DMA64_RXERR_NOERR 0x00000000
#define BCM43xx_DMA64_RXERR_PROT 0x10000000
#define BCM43xx_DMA64_RXERR_UNDERRUN 0x20000000
#define BCM43xx_DMA64_RXERR_TRANSFER 0x30000000
#define BCM43xx_DMA64_RXERR_DESCREAD 0x40000000
#define BCM43xx_DMA64_RXERR_CORE 0x50000000
/* 64-bit DMA descriptor. */
struct bcm43xx_dmadesc64 {
__le32 control0;
__le32 control1;
__le32 address_low;
__le32 address_high;
} __attribute__((__packed__));
#define BCM43xx_DMA64_DCTL0_DTABLEEND 0x10000000
#define BCM43xx_DMA64_DCTL0_IRQ 0x20000000
#define BCM43xx_DMA64_DCTL0_FRAMEEND 0x40000000
#define BCM43xx_DMA64_DCTL0_FRAMESTART 0x80000000
#define BCM43xx_DMA64_DCTL1_BYTECNT 0x00001FFF
#define BCM43xx_DMA64_DCTL1_ADDREXT_MASK 0x00030000
#define BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT 16
/* Address field Routing value. */
#define BCM43xx_DMA64_ROUTING 0xC0000000
#define BCM43xx_DMA64_ROUTING_SHIFT 30
#define BCM43xx_DMA64_NOTRANS 0x00000000
#define BCM43xx_DMA64_CLIENTTRANS 0x80000000
struct bcm43xx_dmadesc_generic {
union {
struct bcm43xx_dmadesc32 dma32;
struct bcm43xx_dmadesc64 dma64;
} __attribute__((__packed__));
} __attribute__((__packed__));
/* Misc DMA constants */ /* Misc DMA constants */
#define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE #define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE
#define BCM43xx_DMA_BUSADDRMAX 0x3FFFFFFF #define BCM43xx_DMA0_RX_FRAMEOFFSET 30
#define BCM43xx_DMA_DMABUSADDROFFSET (1 << 30) #define BCM43xx_DMA3_RX_FRAMEOFFSET 0
#define BCM43xx_DMA1_RX_FRAMEOFFSET 30
#define BCM43xx_DMA4_RX_FRAMEOFFSET 0
/* DMA engine tuning knobs */ /* DMA engine tuning knobs */
#define BCM43xx_TXRING_SLOTS 512 #define BCM43xx_TXRING_SLOTS 512
#define BCM43xx_RXRING_SLOTS 64 #define BCM43xx_RXRING_SLOTS 64
#define BCM43xx_DMA1_RXBUFFERSIZE (2304 + 100) #define BCM43xx_DMA0_RX_BUFFERSIZE (2304 + 100)
#define BCM43xx_DMA4_RXBUFFERSIZE 16 #define BCM43xx_DMA3_RX_BUFFERSIZE 16
/* Suspend the tx queue, if less than this percent slots are free. */ /* Suspend the tx queue, if less than this percent slots are free. */
#define BCM43xx_TXSUSPEND_PERCENT 20 #define BCM43xx_TXSUSPEND_PERCENT 20
/* Resume the tx queue, if more than this percent slots are free. */ /* Resume the tx queue, if more than this percent slots are free. */
...@@ -86,17 +202,6 @@ struct bcm43xx_private; ...@@ -86,17 +202,6 @@ struct bcm43xx_private;
struct bcm43xx_xmitstatus; struct bcm43xx_xmitstatus;
struct bcm43xx_dmadesc {
__le32 _control;
__le32 _address;
} __attribute__((__packed__));
/* Macros to access the bcm43xx_dmadesc struct */
#define get_desc_ctl(desc) le32_to_cpu((desc)->_control)
#define set_desc_ctl(desc, ctl) do { (desc)->_control = cpu_to_le32(ctl); } while (0)
#define get_desc_addr(desc) le32_to_cpu((desc)->_address)
#define set_desc_addr(desc, addr) do { (desc)->_address = cpu_to_le32(addr); } while (0)
struct bcm43xx_dmadesc_meta { struct bcm43xx_dmadesc_meta {
/* The kernel DMA-able buffer. */ /* The kernel DMA-able buffer. */
struct sk_buff *skb; struct sk_buff *skb;
...@@ -105,15 +210,14 @@ struct bcm43xx_dmadesc_meta { ...@@ -105,15 +210,14 @@ struct bcm43xx_dmadesc_meta {
}; };
struct bcm43xx_dmaring { struct bcm43xx_dmaring {
struct bcm43xx_private *bcm;
/* Kernel virtual base address of the ring memory. */ /* Kernel virtual base address of the ring memory. */
struct bcm43xx_dmadesc *vbase; void *descbase;
/* DMA memory offset */
dma_addr_t memoffset;
/* (Unadjusted) DMA base bus-address of the ring memory. */
dma_addr_t dmabase;
/* Meta data about all descriptors. */ /* Meta data about all descriptors. */
struct bcm43xx_dmadesc_meta *meta; struct bcm43xx_dmadesc_meta *meta;
/* DMA Routing value. */
u32 routing;
/* (Unadjusted) DMA base bus-address of the ring memory. */
dma_addr_t dmabase;
/* Number of descriptor slots in the ring. */ /* Number of descriptor slots in the ring. */
int nr_slots; int nr_slots;
/* Number of used descriptor slots. */ /* Number of used descriptor slots. */
...@@ -127,12 +231,14 @@ struct bcm43xx_dmaring { ...@@ -127,12 +231,14 @@ struct bcm43xx_dmaring {
u32 frameoffset; u32 frameoffset;
/* Descriptor buffer size. */ /* Descriptor buffer size. */
u16 rx_buffersize; u16 rx_buffersize;
/* The MMIO base register of the DMA controller, this /* The MMIO base register of the DMA controller. */
* ring is posted to.
*/
u16 mmio_base; u16 mmio_base;
/* DMA controller index number (0-5). */
int index;
u8 tx:1, /* TRUE, if this is a TX ring. */ u8 tx:1, /* TRUE, if this is a TX ring. */
dma64:1, /* TRUE, if 64-bit DMA is enabled (FALSE if 32bit). */
suspended:1; /* TRUE, if transfers are suspended on this ring. */ suspended:1; /* TRUE, if transfers are suspended on this ring. */
struct bcm43xx_private *bcm;
#ifdef CONFIG_BCM43XX_DEBUG #ifdef CONFIG_BCM43XX_DEBUG
/* Maximum number of used slots. */ /* Maximum number of used slots. */
int max_used_slots; int max_used_slots;
...@@ -140,6 +246,34 @@ struct bcm43xx_dmaring { ...@@ -140,6 +246,34 @@ struct bcm43xx_dmaring {
}; };
static inline
int bcm43xx_dma_desc2idx(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc_generic *desc)
{
if (ring->dma64) {
struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
return (int)(&(desc->dma64) - dd64);
} else {
struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
return (int)(&(desc->dma32) - dd32);
}
}
static inline
struct bcm43xx_dmadesc_generic * bcm43xx_dma_idx2desc(struct bcm43xx_dmaring *ring,
int slot,
struct bcm43xx_dmadesc_meta **meta)
{
*meta = &(ring->meta[slot]);
if (ring->dma64) {
struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
return (struct bcm43xx_dmadesc_generic *)(&(dd64[slot]));
} else {
struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
return (struct bcm43xx_dmadesc_generic *)(&(dd32[slot]));
}
}
static inline static inline
u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring, u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring,
u16 offset) u16 offset)
...@@ -159,9 +293,13 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm); ...@@ -159,9 +293,13 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm);
void bcm43xx_dma_free(struct bcm43xx_private *bcm); void bcm43xx_dma_free(struct bcm43xx_private *bcm);
int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base); u16 dmacontroller_mmio_base,
int dma64);
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base); u16 dmacontroller_mmio_base,
int dma64);
u16 bcm43xx_dmacontroller_base(int dma64bit, int dmacontroller_idx);
void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring); void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring);
void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring); void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring);
...@@ -173,7 +311,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm, ...@@ -173,7 +311,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
struct ieee80211_txb *txb); struct ieee80211_txb *txb);
void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring); void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
#else /* CONFIG_BCM43XX_DMA */ #else /* CONFIG_BCM43XX_DMA */
...@@ -188,13 +325,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm) ...@@ -188,13 +325,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
} }
static inline static inline
int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base) u16 dmacontroller_mmio_base,
int dma64)
{ {
return 0; return 0;
} }
static inline static inline
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base) u16 dmacontroller_mmio_base,
int dma64)
{ {
return 0; return 0;
} }
......
...@@ -1371,6 +1371,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy) ...@@ -1371,6 +1371,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
if ((bcm43xx_core_enabled(bcm)) && if ((bcm43xx_core_enabled(bcm)) &&
!bcm43xx_using_pio(bcm)) { !bcm43xx_using_pio(bcm)) {
//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here? //FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
#if 0
#ifndef CONFIG_BCM947XX #ifndef CONFIG_BCM947XX
/* reset all used DMA controllers. */ /* reset all used DMA controllers. */
bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE); bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
...@@ -1380,6 +1381,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy) ...@@ -1380,6 +1381,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA1_BASE); bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
if (bcm->current_core->rev < 5) if (bcm->current_core->rev < 5)
bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE); bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
#endif
#endif #endif
} }
if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) { if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
...@@ -1671,8 +1673,9 @@ static void handle_irq_beacon(struct bcm43xx_private *bcm) ...@@ -1671,8 +1673,9 @@ static void handle_irq_beacon(struct bcm43xx_private *bcm)
static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
{ {
u32 reason; u32 reason;
u32 dma_reason[4]; u32 dma_reason[6];
int activity = 0; u32 merged_dma_reason = 0;
int i, activity = 0;
unsigned long flags; unsigned long flags;
#ifdef CONFIG_BCM43XX_DEBUG #ifdef CONFIG_BCM43XX_DEBUG
...@@ -1684,10 +1687,10 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) ...@@ -1684,10 +1687,10 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
spin_lock_irqsave(&bcm->irq_lock, flags); spin_lock_irqsave(&bcm->irq_lock, flags);
reason = bcm->irq_reason; reason = bcm->irq_reason;
dma_reason[0] = bcm->dma_reason[0]; for (i = 5; i >= 0; i--) {
dma_reason[1] = bcm->dma_reason[1]; dma_reason[i] = bcm->dma_reason[i];
dma_reason[2] = bcm->dma_reason[2]; merged_dma_reason |= dma_reason[i];
dma_reason[3] = bcm->dma_reason[3]; }
if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) { if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) {
/* TX error. We get this when Template Ram is written in wrong endianess /* TX error. We get this when Template Ram is written in wrong endianess
...@@ -1698,27 +1701,25 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) ...@@ -1698,27 +1701,25 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n"); printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n");
bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR); bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR);
} }
if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_FATALMASK) | if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_FATALMASK)) {
(dma_reason[1] & BCM43xx_DMAIRQ_FATALMASK) |
(dma_reason[2] & BCM43xx_DMAIRQ_FATALMASK) |
(dma_reason[3] & BCM43xx_DMAIRQ_FATALMASK))) {
printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: " printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: "
"0x%08X, 0x%08X, 0x%08X, 0x%08X\n", "0x%08X, 0x%08X, 0x%08X, "
"0x%08X, 0x%08X, 0x%08X\n",
dma_reason[0], dma_reason[1], dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3]); dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
bcm43xx_controller_restart(bcm, "DMA error"); bcm43xx_controller_restart(bcm, "DMA error");
mmiowb(); mmiowb();
spin_unlock_irqrestore(&bcm->irq_lock, flags); spin_unlock_irqrestore(&bcm->irq_lock, flags);
return; return;
} }
if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) | if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_NONFATALMASK)) {
(dma_reason[1] & BCM43xx_DMAIRQ_NONFATALMASK) |
(dma_reason[2] & BCM43xx_DMAIRQ_NONFATALMASK) |
(dma_reason[3] & BCM43xx_DMAIRQ_NONFATALMASK))) {
printkl(KERN_ERR PFX "DMA error: " printkl(KERN_ERR PFX "DMA error: "
"0x%08X, 0x%08X, 0x%08X, 0x%08X\n", "0x%08X, 0x%08X, 0x%08X, "
"0x%08X, 0x%08X, 0x%08X\n",
dma_reason[0], dma_reason[1], dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3]); dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
} }
if (reason & BCM43xx_IRQ_PS) { if (reason & BCM43xx_IRQ_PS) {
...@@ -1753,8 +1754,6 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) ...@@ -1753,8 +1754,6 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
} }
/* Check the DMA reason registers for received data. */ /* Check the DMA reason registers for received data. */
assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) { if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) {
if (bcm43xx_using_pio(bcm)) if (bcm43xx_using_pio(bcm))
bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0); bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0);
...@@ -1762,13 +1761,17 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) ...@@ -1762,13 +1761,17 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0); bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0);
/* We intentionally don't set "activity" to 1, here. */ /* We intentionally don't set "activity" to 1, here. */
} }
assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) { if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) {
if (bcm43xx_using_pio(bcm)) if (bcm43xx_using_pio(bcm))
bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3); bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3);
else else
bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring1); bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring3);
activity = 1; activity = 1;
} }
assert(!(dma_reason[4] & BCM43xx_DMAIRQ_RX_DONE));
assert(!(dma_reason[5] & BCM43xx_DMAIRQ_RX_DONE));
bcmirq_handled(BCM43xx_IRQ_RX); bcmirq_handled(BCM43xx_IRQ_RX);
if (reason & BCM43xx_IRQ_XMIT_STATUS) { if (reason & BCM43xx_IRQ_XMIT_STATUS) {
...@@ -1825,14 +1828,18 @@ static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason) ...@@ -1825,14 +1828,18 @@ static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason)
bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason); bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON, bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_REASON,
bcm->dma_reason[0]); bcm->dma_reason[0]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON, bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
bcm->dma_reason[1]); bcm->dma_reason[1]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON, bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
bcm->dma_reason[2]); bcm->dma_reason[2]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON, bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
bcm->dma_reason[3]); bcm->dma_reason[3]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
bcm->dma_reason[4]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_REASON,
bcm->dma_reason[5]);
} }
/* Interrupt handler top-half */ /* Interrupt handler top-half */
...@@ -1860,14 +1867,18 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re ...@@ -1860,14 +1867,18 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
if (!reason) if (!reason)
goto out; goto out;
bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON) bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON)
& 0x0001dc00; & 0x0001DC00;
bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON) bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
& 0x0000dc00; & 0x0000DC00;
bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON) bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
& 0x0000dc00; & 0x0000DC00;
bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON) bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
& 0x0001dc00; & 0x0001DC00;
bcm->dma_reason[4] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
& 0x0000DC00;
bcm->dma_reason[5] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA5_REASON)
& 0x0000DC00;
bcm43xx_interrupt_ack(bcm, reason); bcm43xx_interrupt_ack(bcm, reason);
...@@ -2448,10 +2459,12 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm) ...@@ -2448,10 +2459,12 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
bcm43xx_write32(bcm, 0x018C, 0x02000000); bcm43xx_write32(bcm, 0x018C, 0x02000000);
} }
bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000); bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0001DC00); bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00); bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0000DC00); bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0001DC00); bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW); value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
value32 |= 0x00100000; value32 |= 0x00100000;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment