Commit a6a9a1a3 authored by David Brownell's avatar David Brownell Committed by Tony Lindgren

musb_hdrc, CamelCase begone (cppi struct, locals ...)

Even more CamelCase removal from CPPI:
 - members of cppi_channel struct
 - parameters to cppi_channel_program()
 - "buffSz" local variables
 - "chNum" local variables
 - "regVal" local variables

Also
 - remove the FIXME about using container_of(), that's resolved now
 - stop using the now-pointless private_data fields
 - remove the redundant actualLen field (use dma_channel.actual_len)
 - remove some redundent local variables
Signed-off-by: default avatarDavid Brownell <dbrownell@users.sourceforge.net>
parent 2c63a07e
...@@ -54,10 +54,10 @@ static inline void cpu_drain_writebuffer(void) ...@@ -54,10 +54,10 @@ static inline void cpu_drain_writebuffer(void)
static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c) static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
{ {
struct cppi_descriptor *bd = c->bdPoolHead; struct cppi_descriptor *bd = c->freelist;
if (bd) if (bd)
c->bdPoolHead = bd->next; c->freelist = bd->next;
return bd; return bd;
} }
...@@ -66,8 +66,8 @@ cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd) ...@@ -66,8 +66,8 @@ cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
{ {
if (!bd) if (!bd)
return; return;
bd->next = c->bdPoolHead; bd->next = c->freelist;
c->bdPoolHead = bd; c->freelist = bd;
} }
/* /*
...@@ -106,14 +106,13 @@ static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c) ...@@ -106,14 +106,13 @@ static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
int j; int j;
/* initialize channel fields */ /* initialize channel fields */
c->activeQueueHead = NULL; c->head = NULL;
c->activeQueueTail = NULL; c->tail = NULL;
c->lastHwBDProcessed = NULL; c->last_processed = NULL;
c->Channel.status = MUSB_DMA_STATUS_UNKNOWN; c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = cppi; c->controller = cppi;
c->bLastModeRndis = 0; c->is_rndis = 0;
c->Channel.private_data = c; c->freelist = NULL;
c->bdPoolHead = NULL;
/* build the BD Free list for the channel */ /* build the BD Free list for the channel */
for (j = 0; j < NUM_TXCHAN_BD + 1; j++) { for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
...@@ -133,18 +132,18 @@ static void cppi_pool_free(struct cppi_channel *c) ...@@ -133,18 +132,18 @@ static void cppi_pool_free(struct cppi_channel *c)
struct cppi *cppi = c->controller; struct cppi *cppi = c->controller;
struct cppi_descriptor *bd; struct cppi_descriptor *bd;
(void) cppi_channel_abort(&c->Channel); (void) cppi_channel_abort(&c->channel);
c->Channel.status = MUSB_DMA_STATUS_UNKNOWN; c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
c->controller = NULL; c->controller = NULL;
/* free all its bds */ /* free all its bds */
bd = c->lastHwBDProcessed; bd = c->last_processed;
do { do {
if (bd) if (bd)
dma_pool_free(cppi->pool, bd, bd->dma); dma_pool_free(cppi->pool, bd, bd->dma);
bd = cppi_bd_alloc(c); bd = cppi_bd_alloc(c);
} while (bd); } while (bd);
c->lastHwBDProcessed = NULL; c->last_processed = NULL;
} }
static int __init cppi_controller_start(struct dma_controller *c) static int __init cppi_controller_start(struct dma_controller *c)
...@@ -158,11 +157,11 @@ static int __init cppi_controller_start(struct dma_controller *c) ...@@ -158,11 +157,11 @@ static int __init cppi_controller_start(struct dma_controller *c)
/* do whatever is necessary to start controller */ /* do whatever is necessary to start controller */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
controller->tx[i].transmit = true; controller->tx[i].transmit = true;
controller->tx[i].chNo = i; controller->tx[i].index = i;
} }
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
controller->rx[i].transmit = false; controller->rx[i].transmit = false;
controller->rx[i].chNo = i; controller->rx[i].index = i;
} }
/* setup BD list on a per channel basis */ /* setup BD list on a per channel basis */
...@@ -176,23 +175,23 @@ static int __init cppi_controller_start(struct dma_controller *c) ...@@ -176,23 +175,23 @@ static int __init cppi_controller_start(struct dma_controller *c)
/* initialise tx/rx channel head pointers to zero */ /* initialise tx/rx channel head pointers to zero */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
struct cppi_channel *txChannel = controller->tx + i; struct cppi_channel *tx_ch = controller->tx + i;
struct cppi_tx_stateram __iomem *tx; struct cppi_tx_stateram __iomem *tx;
INIT_LIST_HEAD(&txChannel->tx_complete); INIT_LIST_HEAD(&tx_ch->tx_complete);
tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
txChannel->state_ram = tx; tx_ch->state_ram = tx;
cppi_reset_tx(tx, 0); cppi_reset_tx(tx, 0);
} }
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) { for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
struct cppi_channel *rxChannel = controller->rx + i; struct cppi_channel *rx_ch = controller->rx + i;
struct cppi_rx_stateram __iomem *rx; struct cppi_rx_stateram __iomem *rx;
INIT_LIST_HEAD(&rxChannel->tx_complete); INIT_LIST_HEAD(&rx_ch->tx_complete);
rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i); rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
rxChannel->state_ram = rx; rx_ch->state_ram = rx;
cppi_reset_rx(rx); cppi_reset_rx(rx);
} }
...@@ -237,7 +236,7 @@ static int cppi_controller_stop(struct dma_controller *c) ...@@ -237,7 +236,7 @@ static int cppi_controller_stop(struct dma_controller *c)
DBG(1, "Tearing down RX and TX Channels\n"); DBG(1, "Tearing down RX and TX Channels\n");
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
/* FIXME restructure of txdma to use bds like rxdma */ /* FIXME restructure of txdma to use bds like rxdma */
controller->tx[i].lastHwBDProcessed = NULL; controller->tx[i].last_processed = NULL;
cppi_pool_free(controller->tx + i); cppi_pool_free(controller->tx + i);
} }
for (i = 0; i < ARRAY_SIZE(controller->rx); i++) for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
...@@ -285,47 +284,45 @@ cppi_channel_allocate(struct dma_controller *c, ...@@ -285,47 +284,45 @@ cppi_channel_allocate(struct dma_controller *c,
u8 transmit) u8 transmit)
{ {
struct cppi *controller; struct cppi *controller;
u8 chNum; u8 index;
struct cppi_channel *cppi_ch; struct cppi_channel *cppi_ch;
void __iomem *tibase; void __iomem *tibase;
int local_end = ep->epnum;
controller = container_of(c, struct cppi, controller); controller = container_of(c, struct cppi, controller);
tibase = controller->tibase; tibase = controller->tibase;
/* remember local_end: 1..Max_EndPt, and cppi ChNum:0..Max_EndPt-1 */ /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
chNum = local_end - 1; index = ep->epnum - 1;
/* return the corresponding CPPI Channel Handle, and /* return the corresponding CPPI Channel Handle, and
* probably disable the non-CPPI irq until we need it. * probably disable the non-CPPI irq until we need it.
*/ */
if (transmit) { if (transmit) {
if (local_end > ARRAY_SIZE(controller->tx)) { if (index >= ARRAY_SIZE(controller->tx)) {
DBG(1, "no %cX DMA channel for ep%d\n", 'T', local_end); DBG(1, "no %cX%d CPPI channel\n", 'T', index);
return NULL; return NULL;
} }
cppi_ch = controller->tx + chNum; cppi_ch = controller->tx + index;
} else { } else {
if (local_end > ARRAY_SIZE(controller->rx)) { if (index >= ARRAY_SIZE(controller->rx)) {
DBG(1, "no %cX DMA channel for ep%d\n", 'R', local_end); DBG(1, "no %cX%d CPPI channel\n", 'R', index);
return NULL; return NULL;
} }
cppi_ch = controller->rx + chNum; cppi_ch = controller->rx + index;
core_rxirq_disable(tibase, local_end); core_rxirq_disable(tibase, ep->epnum);
} }
/* REVISIT make this an error later once the same driver code works /* REVISIT make this an error later once the same driver code works
* with the Mentor DMA engine too * with the other DMA engine too
*/ */
if (cppi_ch->hw_ep) if (cppi_ch->hw_ep)
DBG(1, "re-allocating DMA%d %cX channel %p\n", DBG(1, "re-allocating DMA%d %cX channel %p\n",
chNum, transmit ? 'T' : 'R', cppi_ch); index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep; cppi_ch->hw_ep = ep;
cppi_ch->Channel.status = MUSB_DMA_STATUS_FREE; cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
DBG(4, "Allocate CPPI%d %cX\n", chNum, transmit ? 'T' : 'R'); DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
cppi_ch->Channel.private_data = cppi_ch; return &cppi_ch->channel;
return &cppi_ch->Channel;
} }
/* Release a CPPI Channel. */ /* Release a CPPI Channel. */
...@@ -333,17 +330,15 @@ static void cppi_channel_release(struct dma_channel *channel) ...@@ -333,17 +330,15 @@ static void cppi_channel_release(struct dma_channel *channel)
{ {
struct cppi_channel *c; struct cppi_channel *c;
void __iomem *tibase; void __iomem *tibase;
unsigned epnum;
/* REVISIT: for paranoia, check state and abort if needed... */ /* REVISIT: for paranoia, check state and abort if needed... */
c = container_of(channel, struct cppi_channel, Channel); c = container_of(channel, struct cppi_channel, channel);
epnum = c->chNo + 1;
tibase = c->controller->tibase; tibase = c->controller->tibase;
if (!c->hw_ep) if (!c->hw_ep)
DBG(1, "releasing idle DMA channel %p\n", c); DBG(1, "releasing idle DMA channel %p\n", c);
else if (!c->transmit) else if (!c->transmit)
core_rxirq_enable(tibase, epnum); core_rxirq_enable(tibase, c->index + 1);
/* for now, leave its cppi IRQ enabled (we won't trigger it) */ /* for now, leave its cppi IRQ enabled (we won't trigger it) */
c->hw_ep = NULL; c->hw_ep = NULL;
...@@ -357,15 +352,15 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag) ...@@ -357,15 +352,15 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
void __iomem *base = c->controller->mregs; void __iomem *base = c->controller->mregs;
struct cppi_rx_stateram __iomem *rx = c->state_ram; struct cppi_rx_stateram __iomem *rx = c->state_ram;
musb_ep_select(base, c->chNo + 1); musb_ep_select(base, c->index + 1);
DBG(level, "RX DMA%d%s: %d left, csr %04x, " DBG(level, "RX DMA%d%s: %d left, csr %04x, "
"%08x H%08x S%08x C%08x, " "%08x H%08x S%08x C%08x, "
"B%08x L%08x %08x .. %08x" "B%08x L%08x %08x .. %08x"
"\n", "\n",
c->chNo, tag, c->index, tag,
musb_readl(c->controller->tibase, musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 *c->chNo), DAVINCI_RXCPPI_BUFCNT0_REG + 4 *c->index),
musb_readw(c->hw_ep->regs, MUSB_RXCSR), musb_readw(c->hw_ep->regs, MUSB_RXCSR),
musb_readl(&rx->rx_skipbytes, 0), musb_readl(&rx->rx_skipbytes, 0),
...@@ -387,13 +382,13 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag) ...@@ -387,13 +382,13 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
void __iomem *base = c->controller->mregs; void __iomem *base = c->controller->mregs;
struct cppi_tx_stateram __iomem *tx = c->state_ram; struct cppi_tx_stateram __iomem *tx = c->state_ram;
musb_ep_select(base, c->chNo + 1); musb_ep_select(base, c->index + 1);
DBG(level, "TX DMA%d%s: csr %04x, " DBG(level, "TX DMA%d%s: csr %04x, "
"H%08x S%08x C%08x %08x, " "H%08x S%08x C%08x %08x, "
"F%08x L%08x .. %08x" "F%08x L%08x .. %08x"
"\n", "\n",
c->chNo, tag, c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR), musb_readw(c->hw_ep->regs, MUSB_TXCSR),
musb_readl(&tx->tx_head, 0), musb_readl(&tx->tx_head, 0),
...@@ -414,18 +409,18 @@ cppi_rndis_update(struct cppi_channel *c, int is_rx, ...@@ -414,18 +409,18 @@ cppi_rndis_update(struct cppi_channel *c, int is_rx,
void __iomem *tibase, int is_rndis) void __iomem *tibase, int is_rndis)
{ {
/* we may need to change the rndis flag for this cppi channel */ /* we may need to change the rndis flag for this cppi channel */
if (c->bLastModeRndis != is_rndis) { if (c->is_rndis != is_rndis) {
u32 regVal = musb_readl(tibase, DAVINCI_RNDIS_REG); u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
u32 temp = 1 << (c->chNo); u32 temp = 1 << (c->index);
if (is_rx) if (is_rx)
temp <<= 16; temp <<= 16;
if (is_rndis) if (is_rndis)
regVal |= temp; value |= temp;
else else
regVal &= ~temp; value &= ~temp;
musb_writel(tibase, DAVINCI_RNDIS_REG, regVal); musb_writel(tibase, DAVINCI_RNDIS_REG, value);
c->bLastModeRndis = is_rndis; c->is_rndis = is_rndis;
} }
} }
...@@ -446,9 +441,9 @@ static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx) ...@@ -446,9 +441,9 @@ static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
if (!_dbg_level(level)) if (!_dbg_level(level))
return; return;
cppi_dump_rx(level, rx, tag); cppi_dump_rx(level, rx, tag);
if (rx->lastHwBDProcessed) if (rx->last_processed)
cppi_dump_rxbd("last", rx->lastHwBDProcessed); cppi_dump_rxbd("last", rx->last_processed);
for (bd = rx->activeQueueHead; bd; bd = bd->next) for (bd = rx->head; bd; bd = bd->next)
cppi_dump_rxbd("active", bd); cppi_dump_rxbd("active", bd);
#endif #endif
} }
...@@ -468,7 +463,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx, ...@@ -468,7 +463,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
/* start from "AutoReq never" */ /* start from "AutoReq never" */
tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG); tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
val = tmp & ~((0x3) << (rx->chNo * 2)); val = tmp & ~((0x3) << (rx->index * 2));
/* HCD arranged reqpkt for packet #1. we arrange int /* HCD arranged reqpkt for packet #1. we arrange int
* for all but the last one, maybe in two segments. * for all but the last one, maybe in two segments.
...@@ -476,11 +471,11 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx, ...@@ -476,11 +471,11 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
if (!onepacket) { if (!onepacket) {
#if 0 #if 0
/* use two segments, autoreq "all" then the last "never" */ /* use two segments, autoreq "all" then the last "never" */
val |= ((0x3) << (rx->chNo * 2)); val |= ((0x3) << (rx->index * 2));
n_bds--; n_bds--;
#else #else
/* one segment, autoreq "all-but-last" */ /* one segment, autoreq "all-but-last" */
val |= ((0x1) << (rx->chNo * 2)); val |= ((0x1) << (rx->index * 2));
#endif #endif
} }
...@@ -499,7 +494,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx, ...@@ -499,7 +494,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
#endif #endif
/* REQPKT is turned off after each segment */ /* REQPKT is turned off after each segment */
if (n_bds && rx->actualLen) { if (n_bds && rx->channel.actual_len) {
void __iomem *regs = rx->hw_ep->regs; void __iomem *regs = rx->hw_ep->regs;
val = musb_readw(regs, MUSB_RXCSR); val = musb_readw(regs, MUSB_RXCSR);
...@@ -567,9 +562,9 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx, ...@@ -567,9 +562,9 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
static void static void
cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
{ {
unsigned maxpacket = tx->pktSize; unsigned maxpacket = tx->maxpacket;
dma_addr_t addr = tx->startAddr + tx->currOffset; dma_addr_t addr = tx->buf_dma + tx->offset;
size_t length = tx->transferSize - tx->currOffset; size_t length = tx->buf_len - tx->offset;
struct cppi_descriptor *bd; struct cppi_descriptor *bd;
unsigned n_bds; unsigned n_bds;
unsigned i; unsigned i;
...@@ -597,7 +592,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) ...@@ -597,7 +592,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
} }
DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
tx->chNo, tx->index,
maxpacket, maxpacket,
rndis ? "rndis" : "transparent", rndis ? "rndis" : "transparent",
n_bds, n_bds,
...@@ -611,10 +606,13 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) ...@@ -611,10 +606,13 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
* the implicit ones of an iso urb). * the implicit ones of an iso urb).
*/ */
bd = tx->bdPoolHead; bd = tx->freelist;
tx->activeQueueHead = tx->bdPoolHead; tx->head = bd;
tx->lastHwBDProcessed = NULL; tx->last_processed = NULL;
/* FIXME use BD pool like RX side does, and just queue
* the minimum number for this request.
*/
/* Prepare queue of BDs first, then hand it to hardware. /* Prepare queue of BDs first, then hand it to hardware.
* All BDs except maybe the last should be of full packet * All BDs except maybe the last should be of full packet
...@@ -626,28 +624,27 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) ...@@ -626,28 +624,27 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
else else
bd->hw_next = 0; bd->hw_next = 0;
bd->hw_bufp = tx->startAddr + tx->currOffset; bd->hw_bufp = tx->buf_dma + tx->offset;
/* FIXME set EOP only on the last packet, /* FIXME set EOP only on the last packet,
* SOP only on the first ... avoid IRQs * SOP only on the first ... avoid IRQs
*/ */
if ((tx->currOffset + maxpacket) if ((tx->offset + maxpacket) <= tx->buf_len) {
<= tx->transferSize) { tx->offset += maxpacket;
tx->currOffset += maxpacket;
bd->hw_off_len = maxpacket; bd->hw_off_len = maxpacket;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | maxpacket; | CPPI_OWN_SET | maxpacket;
} else { } else {
/* only this one may be a partial USB Packet */ /* only this one may be a partial USB Packet */
u32 buffSz; u32 partial_len;
buffSz = tx->transferSize - tx->currOffset; partial_len = tx->buf_len - tx->offset;
tx->currOffset = tx->transferSize; tx->offset = tx->buf_len;
bd->hw_off_len = buffSz; bd->hw_off_len = partial_len;
bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
| CPPI_OWN_SET | buffSz; | CPPI_OWN_SET | partial_len;
if (buffSz == 0) if (partial_len == 0)
bd->hw_options |= CPPI_ZERO_SET; bd->hw_options |= CPPI_ZERO_SET;
} }
...@@ -656,7 +653,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) ...@@ -656,7 +653,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
bd->hw_off_len, bd->hw_options); bd->hw_off_len, bd->hw_options);
/* update the last BD enqueued to the list */ /* update the last BD enqueued to the list */
tx->activeQueueTail = bd; tx->tail = bd;
bd = bd->next; bd = bd->next;
} }
...@@ -664,7 +661,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) ...@@ -664,7 +661,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
cpu_drain_writebuffer(); cpu_drain_writebuffer();
/* Write to the HeadPtr in state RAM to trigger */ /* Write to the HeadPtr in state RAM to trigger */
musb_writel(&tx_ram->tx_head, 0, (u32)tx->bdPoolHead->dma); musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
cppi_dump_tx(5, tx, "/S"); cppi_dump_tx(5, tx, "/S");
} }
...@@ -764,9 +761,9 @@ MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic"); ...@@ -764,9 +761,9 @@ MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
static void static void
cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
{ {
unsigned maxpacket = rx->pktSize; unsigned maxpacket = rx->maxpacket;
dma_addr_t addr = rx->startAddr + rx->currOffset; dma_addr_t addr = rx->buf_dma + rx->offset;
size_t length = rx->transferSize - rx->currOffset; size_t length = rx->buf_len - rx->offset;
struct cppi_descriptor *bd, *tail; struct cppi_descriptor *bd, *tail;
unsigned n_bds; unsigned n_bds;
unsigned i; unsigned i;
...@@ -818,25 +815,25 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) ...@@ -818,25 +815,25 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
"dma 0x%x len %u %u/%u\n", "dma 0x%x len %u %u/%u\n",
rx->chNo, maxpacket, rx->index, maxpacket,
onepacket onepacket
? (is_rndis ? "rndis" : "onepacket") ? (is_rndis ? "rndis" : "onepacket")
: "multipacket", : "multipacket",
n_bds, n_bds,
musb_readl(tibase, musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4)) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff, & 0xffff,
addr, length, rx->actualLen, rx->transferSize); addr, length, rx->channel.actual_len, rx->buf_len);
/* only queue one segment at a time, since the hardware prevents /* only queue one segment at a time, since the hardware prevents
* correct queue shutdown after unexpected short packets * correct queue shutdown after unexpected short packets
*/ */
bd = cppi_bd_alloc(rx); bd = cppi_bd_alloc(rx);
rx->activeQueueHead = bd; rx->head = bd;
/* Build BDs for all packets in this segment */ /* Build BDs for all packets in this segment */
for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
u32 buffSz; u32 bd_len;
if (i) { if (i) {
bd = cppi_bd_alloc(rx); bd = cppi_bd_alloc(rx);
...@@ -849,33 +846,33 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) ...@@ -849,33 +846,33 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
/* all but the last packet will be maxpacket size */ /* all but the last packet will be maxpacket size */
if (maxpacket < length) if (maxpacket < length)
buffSz = maxpacket; bd_len = maxpacket;
else else
buffSz = length; bd_len = length;
bd->hw_bufp = addr; bd->hw_bufp = addr;
addr += buffSz; addr += bd_len;
rx->currOffset += buffSz; rx->offset += bd_len;
bd->hw_off_len = (0 /*offset*/ << 16) + buffSz; bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
bd->buflen = buffSz; bd->buflen = bd_len;
bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0); bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
length -= buffSz; length -= bd_len;
} }
/* we always expect at least one reusable BD! */ /* we always expect at least one reusable BD! */
if (!tail) { if (!tail) {
WARN("rx dma%d -- no BDs? need %d\n", rx->chNo, n_bds); WARN("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
return; return;
} else if (i < n_bds) } else if (i < n_bds)
WARN("rx dma%d -- only %d of %d BDs\n", rx->chNo, i, n_bds); WARN("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
tail->next = NULL; tail->next = NULL;
tail->hw_next = 0; tail->hw_next = 0;
bd = rx->activeQueueHead; bd = rx->head;
rx->activeQueueTail = tail; rx->tail = tail;
/* short reads and other faults should terminate this entire /* short reads and other faults should terminate this entire
* dma segment. we want one "dma packet" per dma segment, not * dma segment. we want one "dma packet" per dma segment, not
...@@ -888,18 +885,18 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) ...@@ -888,18 +885,18 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
if (debug >= 5) { if (debug >= 5) {
struct cppi_descriptor *d; struct cppi_descriptor *d;
for (d = rx->activeQueueHead; d; d = d->next) for (d = rx->head; d; d = d->next)
cppi_dump_rxbd("S", d); cppi_dump_rxbd("S", d);
} }
/* in case the preceding transfer left some state... */ /* in case the preceding transfer left some state... */
tail = rx->lastHwBDProcessed; tail = rx->last_processed;
if (tail) { if (tail) {
tail->next = bd; tail->next = bd;
tail->hw_next = bd->dma; tail->hw_next = bd->dma;
} }
core_rxirq_enable(tibase, rx->chNo + 1); core_rxirq_enable(tibase, rx->index + 1);
/* BDs live in DMA-coherent memory, but writes might be pending */ /* BDs live in DMA-coherent memory, but writes might be pending */
cpu_drain_writebuffer(); cpu_drain_writebuffer();
...@@ -914,26 +911,26 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) ...@@ -914,26 +911,26 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
* growing ... grr. * growing ... grr.
*/ */
i = musb_readl(tibase, i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4)) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff; & 0xffff;
if (!i) if (!i)
musb_writel(tibase, musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4), DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2); n_bds + 2);
else if (n_bds > (i - 3)) else if (n_bds > (i - 3))
musb_writel(tibase, musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4), DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds - (i - 3)); n_bds - (i - 3));
i = musb_readl(tibase, i = musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4)) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff; & 0xffff;
if (i < (2 + n_bds)) { if (i < (2 + n_bds)) {
DBG(2, "bufcnt%d underrun - %d (for %d)\n", DBG(2, "bufcnt%d underrun - %d (for %d)\n",
rx->chNo, i, n_bds); rx->index, i, n_bds);
musb_writel(tibase, musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->chNo * 4), DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
n_bds + 2); n_bds + 2);
} }
...@@ -942,8 +939,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) ...@@ -942,8 +939,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
/** /**
* cppi_channel_program - program channel for data transfer * cppi_channel_program - program channel for data transfer
* @pChannel: the channel * @ch: the channel
* @wPacketSz: max packet size * @maxpacket: max packet size
* @mode: For RX, 1 unless the usb protocol driver promised to treat * @mode: For RX, 1 unless the usb protocol driver promised to treat
* all short reads as errors and kick in high level fault recovery. * all short reads as errors and kick in high level fault recovery.
* For TX, ignored because of RNDIS mode races/glitches. * For TX, ignored because of RNDIS mode races/glitches.
...@@ -951,46 +948,49 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket) ...@@ -951,46 +948,49 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
* @len: length of buffer * @len: length of buffer
* Context: controller irqlocked * Context: controller irqlocked
*/ */
static int cppi_channel_program(struct dma_channel *pChannel, static int cppi_channel_program(struct dma_channel *ch,
u16 wPacketSz, u8 mode, u16 maxpacket, u8 mode,
dma_addr_t dma_addr, u32 len) dma_addr_t dma_addr, u32 len)
{ {
struct cppi_channel *cppi_ch = pChannel->private_data; struct cppi_channel *cppi_ch;
struct cppi *controller = cppi_ch->controller; struct cppi *controller;
struct musb *musb = controller->musb; struct musb *musb;
switch (pChannel->status) { cppi_ch = container_of(ch, struct cppi_channel, channel);
controller = cppi_ch->controller;
musb = controller->musb;
switch (ch->status) {
case MUSB_DMA_STATUS_BUS_ABORT: case MUSB_DMA_STATUS_BUS_ABORT:
case MUSB_DMA_STATUS_CORE_ABORT: case MUSB_DMA_STATUS_CORE_ABORT:
/* fault irq handler should have handled cleanup */ /* fault irq handler should have handled cleanup */
WARN("%cX DMA%d not cleaned up after abort!\n", WARN("%cX DMA%d not cleaned up after abort!\n",
cppi_ch->transmit ? 'T' : 'R', cppi_ch->transmit ? 'T' : 'R',
cppi_ch->chNo); cppi_ch->index);
/* WARN_ON(1); */ /* WARN_ON(1); */
break; break;
case MUSB_DMA_STATUS_BUSY: case MUSB_DMA_STATUS_BUSY:
WARN("program active channel? %cX DMA%d\n", WARN("program active channel? %cX DMA%d\n",
cppi_ch->transmit ? 'T' : 'R', cppi_ch->transmit ? 'T' : 'R',
cppi_ch->chNo); cppi_ch->index);
/* WARN_ON(1); */ /* WARN_ON(1); */
break; break;
case MUSB_DMA_STATUS_UNKNOWN: case MUSB_DMA_STATUS_UNKNOWN:
DBG(1, "%cX DMA%d not allocated!\n", DBG(1, "%cX DMA%d not allocated!\n",
cppi_ch->transmit ? 'T' : 'R', cppi_ch->transmit ? 'T' : 'R',
cppi_ch->chNo); cppi_ch->index);
/* FALLTHROUGH */ /* FALLTHROUGH */
case MUSB_DMA_STATUS_FREE: case MUSB_DMA_STATUS_FREE:
break; break;
} }
pChannel->status = MUSB_DMA_STATUS_BUSY; ch->status = MUSB_DMA_STATUS_BUSY;
/* set transfer parameters, then queue up its first segment */ /* set transfer parameters, then queue up its first segment */
cppi_ch->startAddr = dma_addr; cppi_ch->buf_dma = dma_addr;
cppi_ch->currOffset = 0; cppi_ch->offset = 0;
cppi_ch->pktSize = wPacketSz; cppi_ch->maxpacket = maxpacket;
cppi_ch->actualLen = 0; cppi_ch->buf_len = len;
cppi_ch->transferSize = len;
/* TX channel? or RX? */ /* TX channel? or RX? */
if (cppi_ch->transmit) if (cppi_ch->transmit)
...@@ -1006,7 +1006,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1006,7 +1006,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
struct cppi_channel *rx = &cppi->rx[ch]; struct cppi_channel *rx = &cppi->rx[ch];
struct cppi_rx_stateram __iomem *state = rx->state_ram; struct cppi_rx_stateram __iomem *state = rx->state_ram;
struct cppi_descriptor *bd; struct cppi_descriptor *bd;
struct cppi_descriptor *last = rx->lastHwBDProcessed; struct cppi_descriptor *last = rx->last_processed;
int completed = 0, acked = 0; int completed = 0, acked = 0;
int i; int i;
dma_addr_t safe2ack; dma_addr_t safe2ack;
...@@ -1014,7 +1014,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1014,7 +1014,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
cppi_dump_rx(6, rx, "/K"); cppi_dump_rx(6, rx, "/K");
bd = last ? last->next : rx->activeQueueHead; bd = last ? last->next : rx->head;
if (!bd) if (!bd)
return 0; return 0;
...@@ -1032,7 +1032,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1032,7 +1032,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
"off.len %08x opt.len %08x (%d)\n", "off.len %08x opt.len %08x (%d)\n",
bd->dma, bd->hw_next, bd->hw_bufp, bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options, bd->hw_off_len, bd->hw_options,
rx->actualLen); rx->channel.actual_len);
/* actual packet received length */ /* actual packet received length */
if ((bd->hw_options & CPPI_SOP_SET) && !completed) if ((bd->hw_options & CPPI_SOP_SET) && !completed)
...@@ -1051,7 +1051,8 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1051,7 +1051,8 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
*/ */
completed = 1; completed = 1;
DBG(3, "rx short %d/%d (%d)\n", DBG(3, "rx short %d/%d (%d)\n",
len, bd->buflen, rx->actualLen); len, bd->buflen,
rx->channel.actual_len);
} }
/* If we got here, we expect to ack at least one BD; meanwhile /* If we got here, we expect to ack at least one BD; meanwhile
...@@ -1070,7 +1071,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1070,7 +1071,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
safe2ack = 0; safe2ack = 0;
} }
rx->actualLen += len; rx->channel.actual_len += len;
cppi_bd_free(rx, last); cppi_bd_free(rx, last);
last = bd; last = bd;
...@@ -1079,31 +1080,31 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1079,31 +1080,31 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
if (bd->hw_next == 0) if (bd->hw_next == 0)
completed = 1; completed = 1;
} }
rx->lastHwBDProcessed = last; rx->last_processed = last;
/* dma abort, lost ack, or ... */ /* dma abort, lost ack, or ... */
if (!acked && last) { if (!acked && last) {
int csr; int csr;
if (safe2ack == 0 || safe2ack == rx->lastHwBDProcessed->dma) if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
musb_writel(&state->rx_complete, 0, safe2ack); musb_writel(&state->rx_complete, 0, safe2ack);
if (safe2ack == 0) { if (safe2ack == 0) {
cppi_bd_free(rx, last); cppi_bd_free(rx, last);
rx->lastHwBDProcessed = NULL; rx->last_processed = NULL;
/* if we land here on the host side, H_REQPKT will /* if we land here on the host side, H_REQPKT will
* be clear and we need to restart the queue... * be clear and we need to restart the queue...
*/ */
WARN_ON(rx->activeQueueHead); WARN_ON(rx->head);
} }
musb_ep_select(cppi->mregs, rx->chNo + 1); musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR); csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) { if (csr & MUSB_RXCSR_DMAENAB) {
DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
rx->chNo, rx->index,
rx->activeQueueHead, rx->activeQueueTail, rx->head, rx->tail,
rx->lastHwBDProcessed rx->last_processed
? rx->lastHwBDProcessed->dma ? rx->last_processed->dma
: 0, : 0,
completed ? ", completed" : "", completed ? ", completed" : "",
csr); csr);
...@@ -1113,7 +1114,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1113,7 +1114,7 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
if (!completed) { if (!completed) {
int csr; int csr;
rx->activeQueueHead = bd; rx->head = bd;
/* REVISIT seems like "autoreq all but EOP" doesn't... /* REVISIT seems like "autoreq all but EOP" doesn't...
* setting it here "should" be racey, but seems to work * setting it here "should" be racey, but seems to work
...@@ -1128,8 +1129,8 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch) ...@@ -1128,8 +1129,8 @@ static int cppi_rx_scan(struct cppi *cppi, unsigned ch)
csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR); csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
} }
} else { } else {
rx->activeQueueHead = NULL; rx->head = NULL;
rx->activeQueueTail = NULL; rx->tail = NULL;
} }
cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned"); cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
...@@ -1153,19 +1154,19 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) ...@@ -1153,19 +1154,19 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
/* process TX channels */ /* process TX channels */
for (chanNum = 0; tx; tx = tx >> 1, chanNum++) { for (chanNum = 0; tx; tx = tx >> 1, chanNum++) {
if (tx & 1) { if (tx & 1) {
struct cppi_channel *txChannel; struct cppi_channel *tx_ch;
struct cppi_tx_stateram __iomem *txState; struct cppi_tx_stateram __iomem *txState;
txChannel = cppi->tx + chanNum; tx_ch = cppi->tx + chanNum;
txState = txChannel->state_ram; txState = tx_ch->state_ram;
/* FIXME need a cppi_tx_scan() routine, which /* FIXME need a cppi_tx_scan() routine, which
* can also be called from abort code * can also be called from abort code
*/ */
cppi_dump_tx(5, txChannel, "/E"); cppi_dump_tx(5, tx_ch, "/E");
bdPtr = txChannel->activeQueueHead; bdPtr = tx_ch->head;
if (NULL == bdPtr) { if (NULL == bdPtr) {
DBG(1, "null BD\n"); DBG(1, "null BD\n");
...@@ -1196,10 +1197,10 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) ...@@ -1196,10 +1197,10 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
bdPtr->hw_options); bdPtr->hw_options);
len = bdPtr->hw_off_len & CPPI_BUFFER_LEN_MASK; len = bdPtr->hw_off_len & CPPI_BUFFER_LEN_MASK;
txChannel->actualLen += len; tx_ch->channel.actual_len += len;
numCompleted++; numCompleted++;
txChannel->lastHwBDProcessed = bdPtr; tx_ch->last_processed = bdPtr;
/* write completion register to acknowledge /* write completion register to acknowledge
* processing of completed BDs, and possibly * processing of completed BDs, and possibly
...@@ -1220,20 +1221,16 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) ...@@ -1220,20 +1221,16 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
/* on end of segment, maybe go to next one */ /* on end of segment, maybe go to next one */
if (bReqComplete) { if (bReqComplete) {
/* cppi_dump_tx(4, txChannel, "/complete"); */ /* cppi_dump_tx(4, tx_ch, "/complete"); */
/* transfer more, or report completion */ /* transfer more, or report completion */
if (txChannel->currOffset if (tx_ch->offset >= tx_ch->buf_len) {
>= txChannel->transferSize) { tx_ch->head = NULL;
txChannel->activeQueueHead = NULL; tx_ch->tail = NULL;
txChannel->activeQueueTail = NULL; tx_ch->channel.status =
txChannel->Channel.status =
MUSB_DMA_STATUS_FREE; MUSB_DMA_STATUS_FREE;
hw_ep = txChannel->hw_ep; hw_ep = tx_ch->hw_ep;
txChannel->Channel.actual_len =
txChannel->actualLen;
/* Peripheral role never repurposes the /* Peripheral role never repurposes the
* endpoint, so immediate completion is * endpoint, so immediate completion is
...@@ -1262,10 +1259,10 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) ...@@ -1262,10 +1259,10 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
/* Bigger transfer than we could fit in /* Bigger transfer than we could fit in
* that first batch of descriptors... * that first batch of descriptors...
*/ */
cppi_next_tx_segment(musb, txChannel); cppi_next_tx_segment(musb, tx_ch);
} }
} else } else
txChannel->activeQueueHead = bdPtr; tx_ch->head = bdPtr;
} }
} }
...@@ -1273,9 +1270,9 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) ...@@ -1273,9 +1270,9 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
for (chanNum = 0; rx; rx = rx >> 1, chanNum++) { for (chanNum = 0; rx; rx = rx >> 1, chanNum++) {
if (rx & 1) { if (rx & 1) {
struct cppi_channel *rxChannel; struct cppi_channel *rx_ch;
rxChannel = cppi->rx + chanNum; rx_ch = cppi->rx + chanNum;
bReqComplete = cppi_rx_scan(cppi, chanNum); bReqComplete = cppi_rx_scan(cppi, chanNum);
/* let incomplete dma segments finish */ /* let incomplete dma segments finish */
...@@ -1283,20 +1280,18 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx) ...@@ -1283,20 +1280,18 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
continue; continue;
/* start another dma segment if needed */ /* start another dma segment if needed */
if (rxChannel->actualLen != rxChannel->transferSize if (rx_ch->channel.actual_len != rx_ch->buf_len
&& rxChannel->actualLen && rx_ch->channel.actual_len
== rxChannel->currOffset) { == rx_ch->offset) {
cppi_next_rx_segment(musb, rxChannel, 1); cppi_next_rx_segment(musb, rx_ch, 1);
continue; continue;
} }
/* all segments completed! */ /* all segments completed! */
rxChannel->Channel.status = MUSB_DMA_STATUS_FREE; rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
hw_ep = rxChannel->hw_ep; hw_ep = rx_ch->hw_ep;
rxChannel->Channel.actual_len =
rxChannel->actualLen;
core_rxirq_disable(tibase, chanNum + 1); core_rxirq_disable(tibase, chanNum + 1);
musb_dma_completion(musb, chanNum + 1, 0); musb_dma_completion(musb, chanNum + 1, 0);
} }
...@@ -1320,7 +1315,6 @@ dma_controller_create(struct musb *musb, void __iomem *mregs) ...@@ -1320,7 +1315,6 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
controller->tibase = mregs - DAVINCI_BASE_OFFSET; controller->tibase = mregs - DAVINCI_BASE_OFFSET;
controller->musb = musb; controller->musb = musb;
controller->controller.private_data = controller;
controller->controller.start = cppi_controller_start; controller->controller.start = cppi_controller_start;
controller->controller.stop = cppi_controller_stop; controller->controller.stop = cppi_controller_stop;
controller->controller.channel_alloc = cppi_channel_allocate; controller->controller.channel_alloc = cppi_channel_allocate;
...@@ -1367,17 +1361,15 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1367,17 +1361,15 @@ static int cppi_channel_abort(struct dma_channel *channel)
{ {
struct cppi_channel *cppi_ch; struct cppi_channel *cppi_ch;
struct cppi *controller; struct cppi *controller;
int chNum;
void __iomem *mbase; void __iomem *mbase;
void __iomem *tibase; void __iomem *tibase;
void __iomem *regs; void __iomem *regs;
u32 regVal; u32 value;
struct cppi_descriptor *queue; struct cppi_descriptor *queue;
cppi_ch = container_of(channel, struct cppi_channel, Channel); cppi_ch = container_of(channel, struct cppi_channel, channel);
controller = cppi_ch->controller; controller = cppi_ch->controller;
chNum = cppi_ch->chNo;
switch (channel->status) { switch (channel->status) {
case MUSB_DMA_STATUS_BUS_ABORT: case MUSB_DMA_STATUS_BUS_ABORT:
...@@ -1394,21 +1386,21 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1394,21 +1386,21 @@ static int cppi_channel_abort(struct dma_channel *channel)
return -EINVAL; return -EINVAL;
} }
if (!cppi_ch->transmit && cppi_ch->activeQueueHead) if (!cppi_ch->transmit && cppi_ch->head)
cppi_dump_rxq(3, "/abort", cppi_ch); cppi_dump_rxq(3, "/abort", cppi_ch);
mbase = controller->mregs; mbase = controller->mregs;
tibase = controller->tibase; tibase = controller->tibase;
queue = cppi_ch->activeQueueHead; queue = cppi_ch->head;
cppi_ch->activeQueueHead = NULL; cppi_ch->head = NULL;
cppi_ch->activeQueueTail = NULL; cppi_ch->tail = NULL;
/* REVISIT should rely on caller having done this, /* REVISIT should rely on caller having done this,
* and caller should rely on us not changing it. * and caller should rely on us not changing it.
* peripheral code is safe ... check host too. * peripheral code is safe ... check host too.
*/ */
musb_ep_select(mbase, chNum + 1); musb_ep_select(mbase, cppi_ch->index + 1);
if (cppi_ch->transmit) { if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *txState; struct cppi_tx_stateram __iomem *txState;
...@@ -1416,10 +1408,10 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1416,10 +1408,10 @@ static int cppi_channel_abort(struct dma_channel *channel)
/* mask interrupts raised to signal teardown complete. */ /* mask interrupts raised to signal teardown complete. */
enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
& (1 << cppi_ch->chNo); & (1 << cppi_ch->index);
if (enabled) if (enabled)
musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
(1 << cppi_ch->chNo)); (1 << cppi_ch->index));
/* REVISIT put timeouts on these controller handshakes */ /* REVISIT put timeouts on these controller handshakes */
...@@ -1427,14 +1419,14 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1427,14 +1419,14 @@ static int cppi_channel_abort(struct dma_channel *channel)
/* teardown DMA engine then usb core */ /* teardown DMA engine then usb core */
do { do {
regVal = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG); value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
} while (!(regVal & CPPI_TEAR_READY)); } while (!(value & CPPI_TEAR_READY));
musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, chNum); musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
txState = cppi_ch->state_ram; txState = cppi_ch->state_ram;
do { do {
regVal = musb_readl(&txState->tx_complete, 0); value = musb_readl(&txState->tx_complete, 0);
} while (0xFFFFFFFC != regVal); } while (0xFFFFFFFC != value);
musb_writel(&txState->tx_complete, 0, 0xFFFFFFFC); musb_writel(&txState->tx_complete, 0, 0xFFFFFFFC);
/* FIXME clean up the transfer state ... here? /* FIXME clean up the transfer state ... here?
...@@ -1442,16 +1434,16 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1442,16 +1434,16 @@ static int cppi_channel_abort(struct dma_channel *channel)
* an appropriate status code. * an appropriate status code.
*/ */
regVal = musb_readw(regs, MUSB_TXCSR); value = musb_readw(regs, MUSB_TXCSR);
regVal &= ~MUSB_TXCSR_DMAENAB; value &= ~MUSB_TXCSR_DMAENAB;
regVal |= MUSB_TXCSR_FLUSHFIFO; value |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(regs, MUSB_TXCSR, regVal); musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, regVal); musb_writew(regs, MUSB_TXCSR, value);
/* re-enable interrupt */ /* re-enable interrupt */
if (enabled) if (enabled)
musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
(1 << cppi_ch->chNo)); (1 << cppi_ch->index));
/* While we scrub the TX state RAM, ensure that we clean /* While we scrub the TX state RAM, ensure that we clean
* up any interrupt that's currently asserted: * up any interrupt that's currently asserted:
...@@ -1480,13 +1472,13 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1480,13 +1472,13 @@ static int cppi_channel_abort(struct dma_channel *channel)
* current RX DMA state iff any pending fifo transfer is done. * current RX DMA state iff any pending fifo transfer is done.
*/ */
core_rxirq_disable(tibase, cppi_ch->chNo + 1); core_rxirq_disable(tibase, cppi_ch->index + 1);
/* for host, ensure ReqPkt is never set again */ /* for host, ensure ReqPkt is never set again */
if (is_host_active(cppi_ch->controller->musb)) { if (is_host_active(cppi_ch->controller->musb)) {
regVal = musb_readl(tibase, DAVINCI_AUTOREQ_REG); value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
regVal &= ~((0x3) << (cppi_ch->chNo * 2)); value &= ~((0x3) << (cppi_ch->index * 2));
musb_writel(tibase, DAVINCI_AUTOREQ_REG, regVal); musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
} }
csr = musb_readw(regs, MUSB_RXCSR); csr = musb_readw(regs, MUSB_RXCSR);
...@@ -1514,7 +1506,7 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1514,7 +1506,7 @@ static int cppi_channel_abort(struct dma_channel *channel)
/* scan the current list, reporting any data that was /* scan the current list, reporting any data that was
* transferred and acking any IRQ * transferred and acking any IRQ
*/ */
cppi_rx_scan(controller, chNum); cppi_rx_scan(controller, cppi_ch->index);
/* clobber the existing state once it's idle /* clobber the existing state once it's idle
* *
...@@ -1535,8 +1527,8 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1535,8 +1527,8 @@ static int cppi_channel_abort(struct dma_channel *channel)
cppi_dump_rx(5, cppi_ch, " (done abort)"); cppi_dump_rx(5, cppi_ch, " (done abort)");
/* clean up previously pending bds */ /* clean up previously pending bds */
cppi_bd_free(cppi_ch, cppi_ch->lastHwBDProcessed); cppi_bd_free(cppi_ch, cppi_ch->last_processed);
cppi_ch->lastHwBDProcessed = NULL; cppi_ch->last_processed = NULL;
while (queue) { while (queue) {
struct cppi_descriptor *tmp = queue->next; struct cppi_descriptor *tmp = queue->next;
...@@ -1546,10 +1538,10 @@ static int cppi_channel_abort(struct dma_channel *channel) ...@@ -1546,10 +1538,10 @@ static int cppi_channel_abort(struct dma_channel *channel)
} }
channel->status = MUSB_DMA_STATUS_FREE; channel->status = MUSB_DMA_STATUS_FREE;
cppi_ch->startAddr = 0; cppi_ch->buf_dma = 0;
cppi_ch->currOffset = 0; cppi_ch->offset = 0;
cppi_ch->transferSize = 0; cppi_ch->buf_len = 0;
cppi_ch->pktSize = 0; cppi_ch->maxpacket = 0;
return 0; return 0;
} }
......
...@@ -78,10 +78,7 @@ struct cppi; ...@@ -78,10 +78,7 @@ struct cppi;
/* CPPI Channel Control structure */ /* CPPI Channel Control structure */
struct cppi_channel { struct cppi_channel {
/* First field must be dma_channel for easy type casting struct dma_channel channel;
* FIXME just use container_of() and be typesafe instead!
*/
struct dma_channel Channel;
/* back pointer to the DMA controller structure */ /* back pointer to the DMA controller structure */
struct cppi *controller; struct cppi *controller;
...@@ -89,25 +86,25 @@ struct cppi_channel { ...@@ -89,25 +86,25 @@ struct cppi_channel {
/* which direction of which endpoint? */ /* which direction of which endpoint? */
struct musb_hw_ep *hw_ep; struct musb_hw_ep *hw_ep;
bool transmit; bool transmit;
u8 chNo; u8 index;
/* DMA modes: RNDIS or "transparent" */ /* DMA modes: RNDIS or "transparent" */
u8 bLastModeRndis; u8 is_rndis;
/* book keeping for current transfer request */ /* book keeping for current transfer request */
dma_addr_t startAddr; dma_addr_t buf_dma;
u32 transferSize; u32 buf_len;
u32 pktSize; u32 maxpacket;
u32 currOffset; /* requested segments */ u32 offset; /* dma requested */
u32 actualLen; /* completed (Channel.actual) */
void __iomem *state_ram; /* CPPI state */ void __iomem *state_ram; /* CPPI state */
struct cppi_descriptor *freelist;
/* BD management fields */ /* BD management fields */
struct cppi_descriptor *bdPoolHead; struct cppi_descriptor *head;
struct cppi_descriptor *activeQueueHead; struct cppi_descriptor *tail;
struct cppi_descriptor *activeQueueTail; struct cppi_descriptor *last_processed;
struct cppi_descriptor *lastHwBDProcessed;
/* use tx_complete in host role to track endpoints waiting for /* use tx_complete in host role to track endpoints waiting for
* FIFONOTEMPTY to clear. * FIFONOTEMPTY to clear.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment