Commit 4368bafd authored by Felipe Balbi's avatar Felipe Balbi

usb: musb: cleanup most of dma ifdefery

Most of the ifdefs for DMA engines are gone and changed to
normal 'if' statements. No functional changes, just improvement
of code readability.
Signed-off-by: default avatarFelipe Balbi <felipe.balbi@nokia.com>
parent cd66af4a
...@@ -265,7 +265,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci) ...@@ -265,7 +265,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
/* CPPI interrupts share the same IRQ line, but have their own /* CPPI interrupts share the same IRQ line, but have their own
* mask, state, "vector", and EOI registers. * mask, state, "vector", and EOI registers.
*/ */
if (is_cppi_enabled()) { if (cppi_ti_dma()) {
u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
......
...@@ -1596,7 +1596,7 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) ...@@ -1596,7 +1596,7 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
if (!epnum) { if (!epnum) {
#ifndef CONFIG_USB_TUSB_OMAP_DMA #ifndef CONFIG_USB_TUSB_OMAP_DMA
if (!is_cppi_enabled()) { if (!cppi_ti_dma()) {
/* endpoint 0 */ /* endpoint 0 */
if (devctl & MUSB_DEVCTL_HM) if (devctl & MUSB_DEVCTL_HM)
musb_h_ep0_irq(musb); musb_h_ep0_irq(musb);
......
...@@ -69,15 +69,21 @@ struct musb_hw_ep; ...@@ -69,15 +69,21 @@ struct musb_hw_ep;
#endif #endif
#ifdef CONFIG_USB_TI_CPPI_DMA #ifdef CONFIG_USB_TI_CPPI_DMA
#define is_cppi_enabled() 1 #define cppi_ti_dma() true
#else #else
#define is_cppi_enabled() 0 #define cppi_ti_dma() false
#endif #endif
#ifdef CONFIG_USB_TUSB_OMAP_DMA #ifdef CONFIG_USB_TUSB_OMAP_DMA
#define tusb_dma_omap() 1 #define tusb_dma_omap() true
#else #else
#define tusb_dma_omap() 0 #define tusb_dma_omap() false
#endif
#ifdef CONFIG_USB_INVENTRA_DMA
#define musb_inventra_dma() true
#else
#define musb_inventra_dma() false
#endif #endif
/* /*
......
...@@ -297,8 +297,7 @@ static void txstate(struct musb *musb, struct musb_request *req) ...@@ -297,8 +297,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
/* MUSB_TXCSR_P_ISO is still set correctly */ /* MUSB_TXCSR_P_ISO is still set correctly */
#ifdef CONFIG_USB_INVENTRA_DMA if (musb_inventra_dma()) {
{
size_t request_size; size_t request_size;
/* setup DMA, then program endpoint CSR */ /* setup DMA, then program endpoint CSR */
...@@ -332,49 +331,51 @@ static void txstate(struct musb *musb, struct musb_request *req) ...@@ -332,49 +331,51 @@ static void txstate(struct musb *musb, struct musb_request *req)
} }
} }
#elif defined(CONFIG_USB_TI_CPPI_DMA) if (cppi_ti_dma()) {
/* program endpoint CSR first, then setup DMA */ /* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_AUTOSET csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_P_UNDERRUN
| MUSB_TXCSR_TXPKTRDY); | MUSB_TXCSR_TXPKTRDY);
csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, musb_writew(epio, MUSB_TXCSR,
(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
| csr); | csr);
/* ensure writebuffer is empty */
csr = musb_readw(epio, MUSB_TXCSR);
/* NOTE host side sets DMAENAB later than this; both are /* ensure writebuffer is empty */
* OK since the transfer dma glue (between CPPI and Mentor csr = musb_readw(epio, MUSB_TXCSR);
* fifos) just tells CPPI it could start. Data only moves
* to the USB TX fifo when both fifos are ready.
*/
/* "mode" is irrelevant here; handle terminating ZLPs like /* NOTE host side sets DMAENAB later than this; both are
* PIO does, since the hardware RNDIS mode seems unreliable * OK since the transfer dma glue (between CPPI and Mentor
* except for the last-packet-is-already-short case. * fifos) just tells CPPI it could start. Data only moves
*/ * to the USB TX fifo when both fifos are ready.
use_dma = use_dma && c->channel_program( */
musb_ep->dma, musb_ep->packet_sz,
0, /* "mode" is irrelevant here; handle terminating ZLPs like
request->dma, * PIO does, since the hardware RNDIS mode seems unreliable
request->length); * except for the last-packet-is-already-short case.
if (!use_dma) { */
c->channel_release(musb_ep->dma); use_dma = use_dma && c->channel_program(
musb_ep->dma = NULL; musb_ep->dma, musb_ep->packet_sz,
/* ASSERT: DMAENAB clear */ 0,
csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE); request->dma,
/* invariant: prequest->buf is non-null */ request->length);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
/* ASSERT: DMAENAB clear */
csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
/* invariant: prequest->buf is non-null */
}
}
if (tusb_dma_omap()) {
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
request->dma,
request->length);
} }
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
request->dma,
request->length);
#endif
} }
#endif #endif
...@@ -580,7 +581,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -580,7 +581,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
csr = musb_readw(epio, MUSB_RXCSR); csr = musb_readw(epio, MUSB_RXCSR);
if (is_cppi_enabled() && musb_ep->dma) { if (cppi_ti_dma() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma; struct dma_channel *channel = musb_ep->dma;
...@@ -610,8 +611,28 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -610,8 +611,28 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (csr & MUSB_RXCSR_RXPKTRDY) { if (csr & MUSB_RXCSR_RXPKTRDY) {
len = musb_readw(epio, MUSB_RXCOUNT); len = musb_readw(epio, MUSB_RXCOUNT);
if (request->actual < request->length) { if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_dma_capable() && musb_ep->dma) { /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
* mode 0 only. So we do not get endpoint interrupts due to DMA
* completion. We only get interrupts from DMA controller.
*
* We could operate in DMA mode 1 if we knew the size of the tranfer
* in advance. For mass storage class, request->length = what the host
* sends, so that'd work. But for pretty much everything else,
* request->length is routinely more than what the host sends. For
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
* we don't get DMA completion interrrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
* to work reliably.
*
* REVISIT an updated g_file_storage can set req->short_not_ok, which
* then becomes usable as a runtime "use mode 1" hint...
*/
if (musb_inventra_dma() && musb_ep->dma) {
struct dma_controller *c; struct dma_controller *c;
struct dma_channel *channel; struct dma_channel *channel;
int use_dma = 0; int use_dma = 0;
...@@ -619,27 +640,6 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -619,27 +640,6 @@ static void rxstate(struct musb *musb, struct musb_request *req)
c = musb->dma_controller; c = musb->dma_controller;
channel = musb_ep->dma; channel = musb_ep->dma;
/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
* mode 0 only. So we do not get endpoint interrupts due to DMA
* completion. We only get interrupts from DMA controller.
*
* We could operate in DMA mode 1 if we knew the size of the tranfer
* in advance. For mass storage class, request->length = what the host
* sends, so that'd work. But for pretty much everything else,
* request->length is routinely more than what the host sends. For
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
* we don't get DMA completion interrrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
* to work reliably.
*
* REVISIT an updated g_file_storage can set req->short_not_ok, which
* then becomes usable as a runtime "use mode 1" hint...
*/
csr |= MUSB_RXCSR_DMAENAB; csr |= MUSB_RXCSR_DMAENAB;
#ifdef USE_MODE1 #ifdef USE_MODE1
csr |= MUSB_RXCSR_AUTOCLEAR; csr |= MUSB_RXCSR_AUTOCLEAR;
...@@ -650,7 +650,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -650,7 +650,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* to get DMAReq to activate * to get DMAReq to activate
*/ */
musb_writew(epio, MUSB_RXCSR, musb_writew(epio, MUSB_RXCSR,
csr | MUSB_RXCSR_DMAMODE); csr | MUSB_RXCSR_DMAMODE);
#endif #endif
musb_writew(epio, MUSB_RXCSR, csr); musb_writew(epio, MUSB_RXCSR, csr);
...@@ -679,17 +679,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -679,17 +679,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (use_dma) if (use_dma)
return; return;
} }
#endif /* Mentor's DMA */
fifo_count = request->length - request->actual;
DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
musb_ep->end_point.name,
len, fifo_count,
musb_ep->packet_sz);
fifo_count = min(len, fifo_count);
#ifdef CONFIG_USB_TUSB_OMAP_DMA
if (tusb_dma_omap() && musb_ep->dma) { if (tusb_dma_omap() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma; struct dma_channel *channel = musb_ep->dma;
...@@ -704,7 +694,14 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -704,7 +694,14 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (ret) if (ret)
return; return;
} }
#endif
fifo_count = request->length - request->actual;
DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
musb_ep->end_point.name,
len, fifo_count,
musb_ep->packet_sz);
fifo_count = min(len, fifo_count);
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual)); (request->buf + request->actual));
...@@ -800,11 +797,11 @@ void musb_g_rx(struct musb *musb, u8 epnum) ...@@ -800,11 +797,11 @@ void musb_g_rx(struct musb *musb, u8 epnum)
musb_readw(epio, MUSB_RXCSR), musb_readw(epio, MUSB_RXCSR),
musb_ep->dma->actual_len, request); musb_ep->dma->actual_len, request);
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */ /* Autoclear doesn't clear RxPktRdy for short packets */
if ((dma->desired_mode == 0) if ((tusb_dma_omap() || musb_inventra_dma())
&& ((dma->desired_mode == 0)
|| (dma->actual_len || (dma->actual_len
& (musb_ep->packet_sz - 1))) { & (musb_ep->packet_sz - 1)))) {
/* ack the read! */ /* ack the read! */
csr &= ~MUSB_RXCSR_RXPKTRDY; csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr); musb_writew(epio, MUSB_RXCSR, csr);
...@@ -815,7 +812,7 @@ void musb_g_rx(struct musb *musb, u8 epnum) ...@@ -815,7 +812,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
&& (musb_ep->dma->actual_len && (musb_ep->dma->actual_len
== musb_ep->packet_sz)) == musb_ep->packet_sz))
goto done; goto done;
#endif
musb_g_giveback(musb_ep, request, 0); musb_g_giveback(musb_ep, request, 0);
request = next_request(musb_ep); request = next_request(musb_ep);
......
...@@ -260,7 +260,7 @@ start: ...@@ -260,7 +260,7 @@ start:
if (!hw_ep->tx_channel) if (!hw_ep->tx_channel)
musb_h_tx_start(hw_ep); musb_h_tx_start(hw_ep);
else if (is_cppi_enabled() || tusb_dma_omap()) else if (cppi_ti_dma() || tusb_dma_omap())
cppi_host_txdma_start(hw_ep); cppi_host_txdma_start(hw_ep);
} }
} }
...@@ -745,8 +745,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum, ...@@ -745,8 +745,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
else else
load_count = min((u32) packet_sz, len); load_count = min((u32) packet_sz, len);
#ifdef CONFIG_USB_INVENTRA_DMA if (musb_inventra_dma() && dma_channel) {
if (dma_channel) {
/* clear previous state */ /* clear previous state */
csr = musb_readw(epio, MUSB_TXCSR); csr = musb_readw(epio, MUSB_TXCSR);
...@@ -793,10 +792,9 @@ static void musb_ep_program(struct musb *musb, u8 epnum, ...@@ -793,10 +792,9 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
dma_channel = NULL; dma_channel = NULL;
} }
} }
#endif
/* candidate for DMA */ /* candidate for DMA */
if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
/* program endpoint CSRs first, then setup DMA. /* program endpoint CSRs first, then setup DMA.
* assume CPPI setup succeeds. * assume CPPI setup succeeds.
...@@ -888,7 +886,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum, ...@@ -888,7 +886,7 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* kick things off */ /* kick things off */
if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { if ((cppi_ti_dma() || tusb_dma_omap()) && dma_channel) {
/* candidate for DMA */ /* candidate for DMA */
if (dma_channel) { if (dma_channel) {
dma_channel->actual_len = 0L; dma_channel->actual_len = 0L;
...@@ -1473,8 +1471,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) ...@@ -1473,8 +1471,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* FIXME this is _way_ too much in-line logic for Mentor DMA */ /* FIXME this is _way_ too much in-line logic for Mentor DMA */
#ifndef CONFIG_USB_INVENTRA_DMA if (!musb_inventra_dma() && (rx_csr & MUSB_RXCSR_H_REQPKT)) {
if (rx_csr & MUSB_RXCSR_H_REQPKT) {
/* REVISIT this happened for a while on some short reads... /* REVISIT this happened for a while on some short reads...
* the cleanup still needs investigation... looks bad... * the cleanup still needs investigation... looks bad...
* and also duplicates dma cleanup code above ... plus, * and also duplicates dma cleanup code above ... plus,
...@@ -1495,7 +1492,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) ...@@ -1495,7 +1492,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
musb_writew(epio, MUSB_RXCSR, musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_H_WZC_BITS | rx_csr); MUSB_RXCSR_H_WZC_BITS | rx_csr);
} }
#endif
if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
xfer_len = dma->actual_len; xfer_len = dma->actual_len;
...@@ -1561,8 +1558,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) ...@@ -1561,8 +1558,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
} }
/* we are expecting IN packets */ /* we are expecting IN packets */
#ifdef CONFIG_USB_INVENTRA_DMA if (musb_inventra_dma() && dma) {
if (dma) {
struct dma_controller *c; struct dma_controller *c;
u16 rx_count; u16 rx_count;
int ret, length; int ret, length;
...@@ -1671,7 +1667,6 @@ void musb_host_rx(struct musb *musb, u8 epnum) ...@@ -1671,7 +1667,6 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* REVISIT reset CSR */ /* REVISIT reset CSR */
} }
} }
#endif /* Mentor DMA */
if (!dma) { if (!dma) {
done = musb_host_packet_rx(musb, urb, done = musb_host_packet_rx(musb, urb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment