musb_gadget.c 54.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/******************************************************************
 * Copyright 2005 Mentor Graphics Corporation
 * Copyright (C) 2005-2006 by Texas Instruments
 *
 * This file is part of the Inventra Controller Driver for Linux.
 *
 * The Inventra Controller Driver for Linux is free software; you
 * can redistribute it and/or modify it under the terms of the GNU
 * General Public License version 2 as published by the Free Software
 * Foundation.
 *
 * The Inventra Controller Driver for Linux is distributed in
 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
 * without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
 * License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with The Inventra Controller Driver for Linux ; if not,
 * write to the Free Software Foundation, Inc., 59 Temple Place,
 * Suite 330, Boston, MA  02111-1307  USA
 *
 * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
 * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
 * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
 * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
 * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
 * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
 * NON-INFRINGEMENT.  MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
 * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
 * GRAPHICS SUPPORT CUSTOMER.
 ******************************************************************/

#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include <linux/dma-mapping.h>

#include "musbdefs.h"


/* MUSB PERIPHERAL status 3-mar:
 *
 * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
 *   Minor glitches:
 *
 *     + remote wakeup to Linux hosts work, but saw USBCV failures;
 *       in one test run (operator error?)
 *     + endpoint halt tests -- in both usbtest and usbcv -- seem
 *       to break when dma is enabled ... is something wrongly
 *       clearing SENDSTALL?
 *
 * - Mass storage behaved ok when last tested.  Network traffic patterns
 *   (with lots of short transfers etc) need retesting; they turn up the
 *   worst cases of the DMA, since short packets are typical but are not
 *   required.
 *
 * - TX/IN
 *     + both pio and dma behave in with network and g_zero tests
 *     + no cppi throughput issues other than no-hw-queueing
 *     + failed with FLAT_REG (DaVinci)
 *     + seems to behave with double buffering, PIO -and- CPPI
 *     + with gadgetfs + AIO, requests got lost?
 *
 * - RX/OUT
 *     + both pio and dma behave in with network and g_zero tests
 *     + dma is slow in typical case (short_not_ok is clear)
 *     + double buffering ok with PIO
 *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
 *     + request lossage observed with gadgetfs
 *
 * - ISO not tested ... might work, but only weakly isochronous
 *
 * - Gadget driver disabling of softconnect during bind() is ignored; so
 *   drivers can't hold off host requests until userspace is ready.
 *   (Workaround:  they can turn it off later.)
 *
 * - PORTABILITY (assumes PIO works):
 *     + DaVinci, basically works with cppi dma
 *     + OMAP 2430, ditto with mentor dma
 *     + TUSB 6010, platform-specific dma in the works
 */

/**************************************************************************
Handling completion
**************************************************************************/

/*
 * Immediately complete a request.
 *
 * @param pRequest the request to complete
 * @param status the status to complete the request with
 * Context: controller locked, IRQs blocked.
 */
void musb_g_giveback(
	struct musb_ep		*ep,
	struct usb_request	*pRequest,
	int status)
105 106
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
107 108 109 110 111 112 113 114 115 116 117 118 119
{
	struct musb_request	*req;
	struct musb		*musb;
	int			busy = ep->busy;

	req = to_musb_request(pRequest);

	list_del(&pRequest->list);
	if (req->request.status == -EINPROGRESS)
		req->request.status = status;
	musb = req->musb;

	ep->busy = 1;
120
	spin_unlock(&musb->lock);
121 122 123 124 125 126 127 128 129 130
	if (is_dma_capable()) {
		if (req->mapped) {
			dma_unmap_single(musb->controller,
					req->request.dma,
					req->request.length,
					req->bTx
						? DMA_TO_DEVICE
						: DMA_FROM_DEVICE);
			req->request.dma = DMA_ADDR_INVALID;
			req->mapped = 0;
131
		} else if (req->request.dma != DMA_ADDR_INVALID)
132 133 134 135 136 137
			dma_sync_single_for_cpu(musb->controller,
					req->request.dma,
					req->request.length,
					req->bTx
						? DMA_TO_DEVICE
						: DMA_FROM_DEVICE);
138 139 140 141 142 143 144 145 146 147 148
	}
	if (pRequest->status == 0)
		DBG(5, "%s done request %p,  %d/%d\n",
				ep->end_point.name, pRequest,
				req->request.actual, req->request.length);
	else
		DBG(2, "%s request %p, %d/%d fault %d\n",
				ep->end_point.name, pRequest,
				req->request.actual, req->request.length,
				pRequest->status);
	req->request.complete(&req->ep->end_point, &req->request);
149
	spin_lock(&musb->lock);
150 151 152 153 154 155 156 157 158 159 160 161
	ep->busy = busy;
}

/* ----------------------------------------------------------------------- */

/*
 * Abort requests queued to an endpoint using the status. Synchronous.
 * caller locked controller and blocked irqs, and selected this ep.
 */
static void nuke(struct musb_ep *ep, const int status)
{
	struct musb_request	*req = NULL;
162
	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
163 164 165 166

	ep->busy = 1;

	if (is_dma_capable() && ep->dma) {
167
		struct dma_controller	*c = ep->musb->dma_controller;
168
		int value;
169 170 171 172 173 174 175 176 177 178 179
		if (ep->is_in) {
			musb_writew(epio, MGC_O_HDRC_TXCSR,
					0 | MGC_M_TXCSR_FLUSHFIFO);
			musb_writew(epio, MGC_O_HDRC_TXCSR,
					0 | MGC_M_TXCSR_FLUSHFIFO);
		} else {
			musb_writew(epio, MGC_O_HDRC_RXCSR,
					0 | MGC_M_RXCSR_FLUSHFIFO);
			musb_writew(epio, MGC_O_HDRC_RXCSR,
					0 | MGC_M_RXCSR_FLUSHFIFO);
		}
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202

		value = c->channel_abort(ep->dma);
		DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
		c->channel_release(ep->dma);
		ep->dma = NULL;
	}

	while (!list_empty(&(ep->req_list))) {
		req = container_of(ep->req_list.next, struct musb_request,
				request.list);
		musb_g_giveback(ep, &req->request, status);
	}
}

/**************************************************************************
 * TX/IN and RX/OUT Data transfers
 **************************************************************************/

/*
 * This assumes the separate CPPI engine is responding to DMA requests
 * from the usb core ... sequenced a bit differently from mentor dma.
 */

203
static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
204
{
205
	if (can_bulk_split(musb, ep->type))
206
		return ep->hw_ep->max_packet_sz_tx;
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
	else
		return ep->wPacketSize;
}


#ifdef CONFIG_USB_INVENTRA_DMA

/* Peripheral tx (IN) using Mentor DMA works as follows:
	Only mode 0 is used for transfers <= wPktSize,
	mode 1 is used for larger transfers,

	One of the following happens:
	- Host sends IN token which causes an endpoint interrupt
		-> TxAvail
			-> if DMA is currently busy, exit.
			-> if queue is non-empty, txstate().

	- Request is queued by the gadget driver.
225
		-> if queue was previously empty, txstate()
226 227

	txstate()
228
		-> start
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
		  /\	-> setup DMA
		  |     (data is transferred to the FIFO, then sent out when
		  |	IN token(s) are recd from Host.
		  |		-> DMA interrupt on completion
		  |		   calls TxAvail.
		  |		      -> stop DMA, ~DmaEenab,
		  |		      -> set TxPktRdy for last short pkt or zlp
		  |		      -> Complete Request
		  |		      -> Continue next request (call txstate)
		  |___________________________________|

 * Non-Mentor DMA engines can of course work differently, such as by
 * upleveling from irq-per-packet to irq-per-buffer.
 */

#endif

/*
 * An endpoint is transmitting data. This can be called either from
 * the IRQ routine or from ep.queue() to kickstart a request on an
 * endpoint.
 *
 * Context: controller locked, IRQs blocked, endpoint selected
 */
253
static void txstate(struct musb *musb, struct musb_request *req)
254
{
255
	u8			epnum = req->epnum;
256
	struct musb_ep		*musb_ep;
257
	void __iomem		*epio = musb->endpoints[epnum].regs;
258
	struct usb_request	*pRequest;
259
	u16			fifo_count = 0, wCsrVal;
260 261
	int			use_dma = 0;

262
	musb_ep = req->ep;
263 264

	/* we shouldn't get here while DMA is active ... but we do ... */
265
	if (dma_channel_status(musb_ep->dma) == MGC_DMA_STATUS_BUSY) {
266 267 268 269 270
		DBG(4, "dma pending...\n");
		return;
	}

	/* read TXCSR before */
271
	wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
272 273

	pRequest = &req->request;
274
	fifo_count = min(max_ep_writesize(musb, musb_ep),
275
			(int)(pRequest->length - pRequest->actual));
276 277 278

	if (wCsrVal & MGC_M_TXCSR_TXPKTRDY) {
		DBG(5, "%s old packet still ready , txcsr %03x\n",
279
				musb_ep->end_point.name, wCsrVal);
280 281 282 283 284
		return;
	}

	if (wCsrVal & MGC_M_TXCSR_P_SENDSTALL) {
		DBG(5, "%s stalling, txcsr %03x\n",
285
				musb_ep->end_point.name, wCsrVal);
286 287 288 289
		return;
	}

	DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
290
			epnum, musb_ep->wPacketSize, fifo_count,
291 292 293
			wCsrVal);

#ifndef	CONFIG_USB_INVENTRA_FIFO
294
	if (is_dma_capable() && musb_ep->dma) {
295
		struct dma_controller	*c = musb->dma_controller;
296 297 298 299 300 301 302 303 304 305 306

		use_dma = (pRequest->dma != DMA_ADDR_INVALID);

		/* MGC_M_TXCSR_P_ISO is still set correctly */

#ifdef CONFIG_USB_INVENTRA_DMA
		{
			size_t request_size;

			/* setup DMA, then program endpoint CSR */
			request_size = min(pRequest->length,
307 308 309
						musb_ep->dma->dwMaxLength);
			if (request_size <= musb_ep->wPacketSize)
				musb_ep->dma->bDesiredMode = 0;
310
			else
311
				musb_ep->dma->bDesiredMode = 1;
312 313

			use_dma = use_dma && c->channel_program(
314 315
					musb_ep->dma, musb_ep->wPacketSize,
					musb_ep->dma->bDesiredMode,
316 317
					pRequest->dma, request_size);
			if (use_dma) {
318
				if (musb_ep->dma->bDesiredMode == 0) {
319
					/* ASSERT: DMAENAB is clear */
320
					wCsrVal &= ~(MGC_M_TXCSR_AUTOSET |
321
							MGC_M_TXCSR_DMAMODE);
322
					wCsrVal |= (MGC_M_TXCSR_DMAENAB |
323
							MGC_M_TXCSR_MODE);
324
					// against programming guide
325 326
				}
				else
327 328 329 330
					wCsrVal |= (MGC_M_TXCSR_AUTOSET
							| MGC_M_TXCSR_DMAENAB
							| MGC_M_TXCSR_DMAMODE
							| MGC_M_TXCSR_MODE);
331 332

				wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
333
				musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
334 335 336 337 338 339 340 341 342 343
			}
		}

#elif defined(CONFIG_USB_TI_CPPI_DMA)
		/* program endpoint CSR first, then setup DMA */
		wCsrVal &= ~(MGC_M_TXCSR_AUTOSET
				| MGC_M_TXCSR_DMAMODE
				| MGC_M_TXCSR_P_UNDERRUN
				| MGC_M_TXCSR_TXPKTRDY);
		wCsrVal |= MGC_M_TXCSR_MODE | MGC_M_TXCSR_DMAENAB;
344
		musb_writew(epio, MGC_O_HDRC_TXCSR,
345 346 347 348
			(MGC_M_TXCSR_P_WZC_BITS & ~MGC_M_TXCSR_P_UNDERRUN)
				| wCsrVal);

		/* ensure writebuffer is empty */
349
		wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
350 351 352 353 354 355 356 357 358 359 360 361

		/* NOTE host side sets DMAENAB later than this; both are
		 * OK since the transfer dma glue (between CPPI and Mentor
		 * fifos) just tells CPPI it could start.  Data only moves
		 * to the USB TX fifo when both fifos are ready.
		 */

		/* "mode" is irrelevant here; handle terminating ZLPs like
		 * PIO does, since the hardware RNDIS mode seems unreliable
		 * except for the last-packet-is-already-short case.
		 */
		use_dma = use_dma && c->channel_program(
362
				musb_ep->dma, musb_ep->wPacketSize,
363 364 365 366
				0,
				pRequest->dma,
				pRequest->length);
		if (!use_dma) {
367 368
			c->channel_release(musb_ep->dma);
			musb_ep->dma = NULL;
369
			/* ASSERT: DMAENAB clear */
370 371 372 373 374
			wCsrVal &= ~(MGC_M_TXCSR_DMAMODE | MGC_M_TXCSR_MODE);
			/* invariant: prequest->buf is non-null */
		}
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
		use_dma = use_dma && c->channel_program(
375
				musb_ep->dma, musb_ep->wPacketSize,
376 377 378 379 380 381 382 383
				pRequest->zero,
				pRequest->dma,
				pRequest->length);
#endif
	}
#endif

	if (!use_dma) {
384
		musb_write_fifo(musb_ep->hw_ep, fifo_count,
385
				(u8 *) (pRequest->buf + pRequest->actual));
386
		pRequest->actual += fifo_count;
387 388
		wCsrVal |= MGC_M_TXCSR_TXPKTRDY;
		wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
389
		musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
390 391 392 393
	}

	/* host may already have the data when this message shows... */
	DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
394
			musb_ep->end_point.name, use_dma ? "dma" : "pio",
395
			pRequest->actual, pRequest->length,
396
			musb_readw(epio, MGC_O_HDRC_TXCSR),
397
			fifo_count,
398
			musb_readw(epio, MGC_O_HDRC_TXMAXP));
399 400 401 402 403 404
}

/*
 * FIFO state update (e.g. data ready).
 * Called from IRQ,  with controller locked.
 */
405
void musb_g_tx(struct musb *musb, u8 epnum)
406 407 408
{
	u16			wCsrVal;
	struct usb_request	*pRequest;
409
	u8 __iomem		*mbase = musb->mregs;
410 411
	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
	void __iomem		*epio = musb->endpoints[epnum].regs;
412 413
	struct dma_channel	*dma;

414
	musb_ep_select(mbase, epnum);
415
	pRequest = next_request(musb_ep);
416

417
	wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
418
	DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, wCsrVal);
419

420
	dma = is_dma_capable() ? musb_ep->dma : NULL;
421 422 423 424 425 426 427
	do {
		/* REVISIT for high bandwidth, MGC_M_TXCSR_P_INCOMPTX
		 * probably rates reporting as a host error
		 */
		if (wCsrVal & MGC_M_TXCSR_P_SENTSTALL) {
			wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
			wCsrVal &= ~MGC_M_TXCSR_P_SENTSTALL;
428
			musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
429 430
			if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
				dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
431
				musb->dma_controller->channel_abort(dma);
432 433 434
			}

			if (pRequest)
435
				musb_g_giveback(musb_ep, pRequest, -EPIPE);
436 437 438 439 440 441 442 443 444

			break;
		}

		if (wCsrVal & MGC_M_TXCSR_P_UNDERRUN) {
			/* we NAKed, no big deal ... little reason to care */
			wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
			wCsrVal &= ~(MGC_M_TXCSR_P_UNDERRUN
					| MGC_M_TXCSR_TXPKTRDY);
445
			musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
446
			DBG(20, "underrun on ep%d, req %p\n", epnum, pRequest);
447 448 449 450 451 452
		}

		if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
			/* SHOULD NOT HAPPEN ... has with cppi though, after
			 * changing SENDSTALL (and other cases); harmless?
			 */
453
			DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
454 455 456 457 458 459 460 461 462 463 464 465
			break;
		}

		if (pRequest) {
			u8	is_dma = 0;

			if (dma && (wCsrVal & MGC_M_TXCSR_DMAENAB)) {
				is_dma = 1;
				wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
				wCsrVal &= ~(MGC_M_TXCSR_DMAENAB
						| MGC_M_TXCSR_P_UNDERRUN
						| MGC_M_TXCSR_TXPKTRDY);
466
				musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
467
				/* ensure writebuffer is empty */
468
				wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
469
				pRequest->actual += musb_ep->dma->dwActualLength;
470 471
				DBG(4, "TXCSR%d %04x, dma off, "
						"len %Zd, req %p\n",
472
					epnum, wCsrVal,
473
					musb_ep->dma->dwActualLength,
474 475 476 477 478 479 480 481 482 483 484 485
					pRequest);
			}

			if (is_dma || pRequest->actual == pRequest->length) {

				/* First, maybe a terminating short packet.
				 * Some DMA engines might handle this by
				 * themselves.
				 */
				if ((pRequest->zero
						&& pRequest->length
						&& (pRequest->length
486
							% musb_ep->wPacketSize)
487 488 489
							== 0)
#ifdef CONFIG_USB_INVENTRA_DMA
					|| (is_dma &&
490 491
						((!dma->bDesiredMode) ||
						    (pRequest->actual &
492
						    (musb_ep->wPacketSize - 1))))
493 494 495 496 497 498 499 500 501
#endif
				) {
					/* on dma completion, fifo may not
					 * be available yet ...
					 */
					if (wCsrVal & MGC_M_TXCSR_TXPKTRDY)
						break;

					DBG(4, "sending zero pkt\n");
502
					musb_writew(epio, MGC_O_HDRC_TXCSR,
503 504
							MGC_M_TXCSR_MODE
							| MGC_M_TXCSR_TXPKTRDY);
505
					pRequest->zero = 0;
506 507 508
				}

				/* ... or if not, then complete it */
509
				musb_g_giveback(musb_ep, pRequest, 0);
510 511 512 513 514 515 516

				/* kickstart next transfer if appropriate;
				 * the packet that just completed might not
				 * be transmitted for hours or days.
				 * REVISIT for double buffering...
				 * FIXME revisit for stalls too...
				 */
517
				musb_ep_select(mbase, epnum);
518
				wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
519 520
				if (wCsrVal & MGC_M_TXCSR_FIFONOTEMPTY)
					break;
521 522
				pRequest = musb_ep->desc
						? next_request(musb_ep)
523 524 525
						: NULL;
				if (!pRequest) {
					DBG(4, "%s idle now\n",
526
							musb_ep->end_point.name);
527 528 529 530
					break;
				}
			}

531
			txstate(musb, to_musb_request(pRequest));
532 533 534 535 536 537 538 539 540 541 542 543 544
		}

	} while (0);
}

/* ------------------------------------------------------------ */

#ifdef CONFIG_USB_INVENTRA_DMA

/* Peripheral rx (OUT) using Mentor DMA works as follows:
	- Only mode 0 is used.

	- Request is queued by the gadget class driver.
545
		-> if queue was previously empty, rxstate()
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572

	- Host sends OUT token which causes an endpoint interrupt
	  /\      -> RxReady
	  |	      -> if request queued, call rxstate
	  |		/\	-> setup DMA
	  |		|	     -> DMA interrupt on completion
	  |		|		-> RxReady
	  |		|		      -> stop DMA
	  |		|		      -> ack the read
	  |		|		      -> if data recd = max expected
	  |		|				by the request, or host
	  |		|				sent a short packet,
	  |		|				complete the request,
	  |		|				and start the next one.
	  |		|_____________________________________|
	  |					 else just wait for the host
	  |					    to send the next OUT token.
	  |__________________________________________________|

 * Non-Mentor DMA engines can of course work differently.
 */

#endif

/*
 * Context: controller locked, IRQs blocked, endpoint selected
 */
573
static void rxstate(struct musb *musb, struct musb_request *req)
574 575
{
	u16			wCsrVal = 0;
576
	const u8		epnum = req->epnum;
577
	struct usb_request	*pRequest = &req->request;
578 579
	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_out;
	void __iomem		*epio = musb->endpoints[epnum].regs;
580
	u16			fifo_count = 0;
581
	u16			len = musb_ep->wPacketSize;
582

583
	wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
584

585
	if (is_cppi_enabled() && musb_ep->dma) {
586
		struct dma_controller	*c = musb->dma_controller;
587
		struct dma_channel	*channel = musb_ep->dma;
588 589 590 591 592 593 594

		/* NOTE:  CPPI won't actually stop advancing the DMA
		 * queue after short packet transfers, so this is almost
		 * always going to run as IRQ-per-packet DMA so that
		 * faults will be handled correctly.
		 */
		if (c->channel_program(channel,
595
				musb_ep->wPacketSize,
596 597 598 599 600 601 602 603 604 605 606
				!pRequest->short_not_ok,
				pRequest->dma + pRequest->actual,
				pRequest->length - pRequest->actual)) {

			/* make sure that if an rxpkt arrived after the irq,
			 * the cppi engine will be ready to take it as soon
			 * as DMA is enabled
			 */
			wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
					| MGC_M_RXCSR_DMAMODE);
			wCsrVal |= MGC_M_RXCSR_DMAENAB | MGC_M_RXCSR_P_WZC_BITS;
607
			musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
608 609 610 611 612
			return;
		}
	}

	if (wCsrVal & MGC_M_RXCSR_RXPKTRDY) {
613
		len = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
614 615
		if (pRequest->actual < pRequest->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
616
			if (is_dma_capable() && musb_ep->dma) {
617 618 619 620
				struct dma_controller	*c;
				struct dma_channel	*channel;
				int			use_dma = 0;

621
				c = musb->dma_controller;
622
				channel = musb_ep->dma;
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653

	/* We use DMA Req mode 0 in RxCsr, and DMA controller operates in
	 * mode 0 only. So we do not get endpoint interrupts due to DMA
	 * completion. We only get interrupts from DMA controller.
	 *
	 * We could operate in DMA mode 1 if we knew the size of the tranfer
	 * in advance. For mass storage class, request->length = what the host
	 * sends, so that'd work.  But for pretty much everything else,
	 * request->length is routinely more than what the host sends. For
	 * most these gadgets, end of is signified either by a short packet,
	 * or filling the last byte of the buffer.  (Sending extra data in
	 * that last pckate should trigger an overflow fault.)  But in mode 1,
	 * we don't get DMA completion interrrupt for short packets.
	 *
	 * Theoretically, we could enable DMAReq interrupt (RxCsr_DMAMODE = 1),
	 * to get endpoint interrupt on every DMA req, but that didn't seem
	 * to work reliably.
	 *
	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
	 * then becomes usable as a runtime "use mode 1" hint...
	 */

				wCsrVal |= MGC_M_RXCSR_DMAENAB;
#ifdef USE_MODE1
				wCsrVal |= MGC_M_RXCSR_AUTOCLEAR;
//				wCsrVal |= MGC_M_RXCSR_DMAMODE;

				/* this special sequence (enabling and then
				   disabling MGC_M_RXCSR_DMAMODE) is required
				   to get DMAReq to activate
				 */
654
				musb_writew(epio, MGC_O_HDRC_RXCSR,
655 656
					wCsrVal | MGC_M_RXCSR_DMAMODE);
#endif
657
				musb_writew(epio, MGC_O_HDRC_RXCSR,
658
						wCsrVal);
659 660 661 662 663

				if (pRequest->actual < pRequest->length) {
					int transfer_size = 0;
#ifdef USE_MODE1
					transfer_size = min(pRequest->length,
664
							channel->dwMaxLength);
665
#else
666
					transfer_size = len;
667
#endif
668 669
					if (transfer_size <= musb_ep->wPacketSize)
						musb_ep->dma->bDesiredMode = 0;
670
					else
671
						musb_ep->dma->bDesiredMode = 1;
672 673 674

					use_dma = c->channel_program(
							channel,
675
							musb_ep->wPacketSize,
676 677 678 679 680 681 682 683 684 685 686
							channel->bDesiredMode,
							pRequest->dma
							+ pRequest->actual,
							transfer_size);
				}

				if (use_dma)
					return;
			}
#endif	/* Mentor's USB */

687
			fifo_count = pRequest->length - pRequest->actual;
688
			DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
689
					musb_ep->end_point.name,
690
					len, fifo_count,
691
					musb_ep->wPacketSize);
692

693
			fifo_count = min(len, fifo_count);
694 695

#ifdef	CONFIG_USB_TUSB_OMAP_DMA
696
			if (tusb_dma_omap() && musb_ep->dma) {
697
				struct dma_controller *c = musb->dma_controller;
698
				struct dma_channel *channel = musb_ep->dma;
699 700 701 702
				u32 dma_addr = pRequest->dma + pRequest->actual;
				int ret;

				ret = c->channel_program(channel,
703
						musb_ep->wPacketSize,
704 705
						channel->bDesiredMode,
						dma_addr,
706
						fifo_count);
707 708 709 710 711
				if (ret == TRUE)
					return;
			}
#endif

712
			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
713
					(pRequest->buf + pRequest->actual));
714
			pRequest->actual += fifo_count;
715 716 717 718 719 720 721 722

			/* REVISIT if we left anything in the fifo, flush
			 * it and report -EOVERFLOW
			 */

			/* ack the read! */
			wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
			wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
723
			musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
724 725 726 727
		}
	}

	/* reach the end or short packet detected */
728
	if (pRequest->actual == pRequest->length || len < musb_ep->wPacketSize)
729
		musb_g_giveback(musb_ep, pRequest, 0);
730 731 732 733 734
}

/*
 * Data ready for a request; called from IRQ
 */
735
void musb_g_rx(struct musb *musb, u8 epnum)
736 737 738
{
	u16			wCsrVal;
	struct usb_request	*pRequest;
739
	void __iomem		*mbase = musb->mregs;
740 741
	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_out;
	void __iomem		*epio = musb->endpoints[epnum].regs;
742 743
	struct dma_channel	*dma;

744
	musb_ep_select(mbase, epnum);
745

746
	pRequest = next_request(musb_ep);
747

748
	wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
749
	dma = is_dma_capable() ? musb_ep->dma : NULL;
750

751
	DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
752 753 754 755 756
			wCsrVal, dma ? " (dma)" : "", pRequest);

	if (wCsrVal & MGC_M_RXCSR_P_SENTSTALL) {
		if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
			dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
757
			(void) musb->dma_controller->channel_abort(dma);
758
			pRequest->actual += musb_ep->dma->dwActualLength;
759 760 761 762
		}

		wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
		wCsrVal &= ~MGC_M_RXCSR_P_SENTSTALL;
763
		musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
764 765

		if (pRequest)
766
			musb_g_giveback(musb_ep, pRequest, -EPIPE);
767 768 769 770 771 772
		goto done;
	}

	if (wCsrVal & MGC_M_RXCSR_P_OVERRUN) {
		// wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
		wCsrVal &= ~MGC_M_RXCSR_P_OVERRUN;
773
		musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
774

775
		DBG(3, "%s iso overrun on %p\n", musb_ep->name, pRequest);
776 777 778 779 780
		if (pRequest && pRequest->status == -EINPROGRESS)
			pRequest->status = -EOVERFLOW;
	}
	if (wCsrVal & MGC_M_RXCSR_INCOMPRX) {
		/* REVISIT not necessarily an error */
781
		DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
782 783 784 785 786 787
	}

	if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
		/* "should not happen"; likely RXPKTRDY pending for DMA */
		DBG((wCsrVal & MGC_M_RXCSR_DMAENAB) ? 4 : 1,
			"%s busy, csr %04x\n",
788
			musb_ep->end_point.name, wCsrVal);
789 790 791 792
		goto done;
	}

	if (dma && (wCsrVal & MGC_M_RXCSR_DMAENAB)) {
793 794 795
		wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
				| MGC_M_RXCSR_DMAENAB
				| MGC_M_RXCSR_DMAMODE);
796
		musb_writew(epio, MGC_O_HDRC_RXCSR,
797 798
			MGC_M_RXCSR_P_WZC_BITS | wCsrVal);

799
		pRequest->actual += musb_ep->dma->dwActualLength;
800 801

		DBG(4, "RXCSR%d %04x, dma off, %04x, len %Zd, req %p\n",
802
			epnum, wCsrVal,
803
			musb_readw(epio, MGC_O_HDRC_RXCSR),
804
			musb_ep->dma->dwActualLength, pRequest);
805 806 807

#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
		/* Autoclear doesn't clear RxPktRdy for short packets */
808 809
		if ((dma->bDesiredMode == 0)
				|| (dma->dwActualLength
810
					& (musb_ep->wPacketSize - 1))) {
811 812
			/* ack the read! */
			wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
813
			musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
814 815 816 817
		}

		/* incomplete, and not short? wait for next IN packet */
                if ((pRequest->actual < pRequest->length)
818 819
				&& (musb_ep->dma->dwActualLength
					== musb_ep->wPacketSize))
820 821
			goto done;
#endif
822
		musb_g_giveback(musb_ep, pRequest, 0);
823

824
		pRequest = next_request(musb_ep);
825 826 827 828
		if (!pRequest)
			goto done;

		/* don't start more i/o till the stall clears */
829
		musb_ep_select(mbase, epnum);
830
		wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
831 832 833 834 835 836 837
		if (wCsrVal & MGC_M_RXCSR_P_SENDSTALL)
			goto done;
	}


	/* analyze request if the ep is hot */
	if (pRequest)
838
		rxstate(musb, to_musb_request(pRequest));
839 840
	else
		DBG(3, "packet waiting for %s%s request\n",
841 842
				musb_ep->desc ? "" : "inactive ",
				musb_ep->end_point.name);
843 844 845 846 847 848 849 850

done:
	return;
}

/* ------------------------------------------------------------ */

static int musb_gadget_enable(struct usb_ep *ep,
851
			const struct usb_endpoint_descriptor *desc)
852
{
853
	unsigned long		flags;
854
	struct musb_ep		*musb_ep;
855 856
	struct musb_hw_ep	*hw_ep;
	void __iomem		*regs;
857
	struct musb		*musb;
858
	void __iomem	*mbase;
859
	u8		epnum;
860 861 862 863 864 865 866
	u16		csr;
	unsigned	tmp;
	int		status = -EINVAL;

	if (!ep || !desc)
		return -EINVAL;

867 868
	musb_ep = to_musb_ep(ep);
	hw_ep = musb_ep->hw_ep;
869
	regs = hw_ep->regs;
870
	musb = musb_ep->musb;
871
	mbase = musb->mregs;
872
	epnum = musb_ep->current_epnum;
873

874
	spin_lock_irqsave(&musb->lock, flags);
875

876
	if (musb_ep->desc) {
877 878 879
		status = -EBUSY;
		goto fail;
	}
880
	musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
881 882

	/* check direction and (later) maxpacket size against endpoint */
883
	if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
884 885 886 887 888 889
		goto fail;

	/* REVISIT this rules out high bandwidth periodic transfers */
	tmp = le16_to_cpu(desc->wMaxPacketSize);
	if (tmp & ~0x07ff)
		goto fail;
890
	musb_ep->wPacketSize = tmp;
891 892 893 894

	/* enable the interrupts for the endpoint, set the endpoint
	 * packet size (or fail), set the mode, clear the fifo
	 */
895
	musb_ep_select(mbase, epnum);
896
	if (desc->bEndpointAddress & USB_DIR_IN) {
897
		u16 wIntrTxE = musb_readw(mbase, MGC_O_HDRC_INTRTXE);
898

899
		if (hw_ep->is_shared_fifo)
900 901
			musb_ep->is_in = 1;
		if (!musb_ep->is_in)
902
			goto fail;
903
		if (tmp > hw_ep->max_packet_sz_tx)
904 905
			goto fail;

906
		wIntrTxE |= (1 << epnum);
907
		musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE);
908 909 910 911

		/* REVISIT if can_bulk_split(), use by updating "tmp";
		 * likewise high bandwidth periodic tx
		 */
912
		musb_writew(regs, MGC_O_HDRC_TXMAXP, tmp);
913

914 915 916 917
		csr = MGC_M_TXCSR_MODE | MGC_M_TXCSR_CLRDATATOG;
		if (musb_readw(regs, MGC_O_HDRC_TXCSR)
				& MGC_M_TXCSR_FIFONOTEMPTY)
			csr |= MGC_M_TXCSR_FLUSHFIFO;
918
		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
919 920 921
			csr |= MGC_M_TXCSR_P_ISO;

		/* set twice in case of double buffering */
922 923 924
		musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
		musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
925 926

	} else {
927
		u16 wIntrRxE = musb_readw(mbase, MGC_O_HDRC_INTRRXE);
928

929
		if (hw_ep->is_shared_fifo)
930 931
			musb_ep->is_in = 0;
		if (musb_ep->is_in)
932
			goto fail;
933
		if (tmp > hw_ep->max_packet_sz_rx)
934 935
			goto fail;

936
		wIntrRxE |= (1 << epnum);
937
		musb_writew(mbase, MGC_O_HDRC_INTRRXE, wIntrRxE);
938 939 940 941

		/* REVISIT if can_bulk_combine() use by updating "tmp"
		 * likewise high bandwidth periodic rx
		 */
942
		musb_writew(regs, MGC_O_HDRC_RXMAXP, tmp);
943 944

		/* force shared fifo to OUT-only mode */
945
		if (hw_ep->is_shared_fifo) {
946
			csr = musb_readw(regs, MGC_O_HDRC_TXCSR);
947
			csr &= ~(MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY);
948
			musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
949 950 951
		}

		csr = MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_CLRDATATOG;
952
		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
953
			csr |= MGC_M_RXCSR_P_ISO;
954
		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
955 956 957
			csr |= MGC_M_RXCSR_DISNYET;

		/* set twice in case of double buffering */
958 959
		musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
		musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
960 961 962 963 964
	}

	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
	 * for some reason you run out of channels here.
	 */
965 966
	if (is_dma_capable() && musb->dma_controller) {
		struct dma_controller	*c = musb->dma_controller;
967

968
		musb_ep->dma = c->channel_alloc(c, hw_ep,
969 970
				(desc->bEndpointAddress & USB_DIR_IN));
	} else
971
		musb_ep->dma = NULL;
972

973 974
	musb_ep->desc = desc;
	musb_ep->busy = 0;
975 976 977
	status = 0;

	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
978 979
			musb_driver_name, musb_ep->end_point.name,
			({ char *s; switch (musb_ep->type) {
980 981 982 983
			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
			default:			s = "iso"; break;
			}; s; }),
984 985 986
			musb_ep->is_in ? "IN" : "OUT",
			musb_ep->dma ? "dma, " : "",
			musb_ep->wPacketSize);
987

988
	schedule_work(&musb->irq_work);
989 990

fail:
991
	spin_unlock_irqrestore(&musb->lock, flags);
992 993 994 995 996 997 998 999 1000
	return status;
}

/*
 * Disable an endpoint flushing all requests queued.
 */
static int musb_gadget_disable(struct usb_ep *ep)
{
	unsigned long	flags;
1001
	struct musb	*musb;
1002
	u8		epnum;
1003
	struct musb_ep	*musb_ep;
1004
	void __iomem	*epio;
1005 1006
	int		status = 0;

1007 1008
	musb_ep = to_musb_ep(ep);
	musb = musb_ep->musb;
1009 1010
	epnum = musb_ep->current_epnum;
	epio = musb->endpoints[epnum].regs;
1011

1012
	spin_lock_irqsave(&musb->lock, flags);
1013
	musb_ep_select(musb->mregs, epnum);
1014 1015

	/* zero the endpoint sizes */
1016
	if (musb_ep->is_in) {
1017
		u16 wIntrTxE = musb_readw(musb->mregs, MGC_O_HDRC_INTRTXE);
1018
		wIntrTxE &= ~(1 << epnum);
1019
		musb_writew(musb->mregs, MGC_O_HDRC_INTRTXE, wIntrTxE);
1020
		musb_writew(epio, MGC_O_HDRC_TXMAXP, 0);
1021
	} else {
1022
		u16 wIntrRxE = musb_readw(musb->mregs, MGC_O_HDRC_INTRRXE);
1023
		wIntrRxE &= ~(1 << epnum);
1024
		musb_writew(musb->mregs, MGC_O_HDRC_INTRRXE, wIntrRxE);
1025
		musb_writew(epio, MGC_O_HDRC_RXMAXP, 0);
1026 1027
	}

1028
	musb_ep->desc = NULL;
1029 1030

	/* abort all pending DMA and requests */
1031
	nuke(musb_ep, -ESHUTDOWN);
1032

1033
	schedule_work(&musb->irq_work);
1034

1035
	spin_unlock_irqrestore(&(musb->lock), flags);
1036

1037
	DBG(2, "%s\n", musb_ep->end_point.name);
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054

	return status;
}

/*
 * Allocate a request for an endpoint.
 * Reused by ep0 code.
 */
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
	struct musb_ep		*musb_ep = to_musb_ep(ep);
	struct musb_request	*pRequest = NULL;

	pRequest = kzalloc(sizeof *pRequest, gfp_flags);
	if (pRequest) {
		INIT_LIST_HEAD(&pRequest->request.list);
		pRequest->request.dma = DMA_ADDR_INVALID;
1055
		pRequest->epnum = musb_ep->current_epnum;
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
		pRequest->ep = musb_ep;
	}

	return &pRequest->request;
}

/*
 * Free a request
 * Reused by ep0 code.
 */
void musb_free_request(struct usb_ep *ep, struct usb_request *req)
{
	kfree(to_musb_request(req));
}

static LIST_HEAD(buffers);

struct free_record {
	struct list_head	list;
	struct device		*dev;
	unsigned		bytes;
	dma_addr_t		dma;
};

/*
 * Context: controller locked, IRQs blocked.
 */
1083
static void musb_ep_restart(struct musb *musb, struct musb_request *req)
1084 1085 1086
{
	DBG(3, "<== %s request %p len %u on hw_ep%d\n",
		req->bTx ? "TX/IN" : "RX/OUT",
1087
		&req->request, req->request.length, req->epnum);
1088

1089
	musb_ep_select(musb->mregs, req->epnum);
1090
	if (req->bTx)
1091
		txstate(musb, req);
1092
	else
1093
		rxstate(musb, req);
1094 1095 1096
}

static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1097
			gfp_t gfp_flags)
1098
{
1099
	struct musb_ep		*musb_ep;
1100
	struct musb_request	*pRequest;
1101
	struct musb		*musb;
1102 1103 1104 1105 1106
	int			status = 0;
	unsigned long		lockflags;

	if (!ep || !req)
		return -EINVAL;
1107 1108
	if (!req->buf)
		return -ENODATA;
1109

1110 1111
	musb_ep = to_musb_ep(ep);
	musb = musb_ep->musb;
1112 1113

	pRequest = to_musb_request(req);
1114
	pRequest->musb = musb;
1115

1116
	if (pRequest->ep != musb_ep)
1117 1118 1119 1120 1121 1122 1123
		return -EINVAL;

	DBG(4, "<== to %s request=%p\n", ep->name, req);

	/* request is mine now... */
	pRequest->request.actual = 0;
	pRequest->request.status = -EINPROGRESS;
1124
	pRequest->epnum = musb_ep->current_epnum;
1125
	pRequest->bTx = musb_ep->is_in;
1126

1127
	if (is_dma_capable() && musb_ep->dma) {
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
		if (pRequest->request.dma == DMA_ADDR_INVALID) {
			pRequest->request.dma = dma_map_single(
					musb->controller,
					pRequest->request.buf,
					pRequest->request.length,
					pRequest->bTx
						? DMA_TO_DEVICE
						: DMA_FROM_DEVICE);
			pRequest->mapped = 1;
		} else {
			dma_sync_single_for_device(musb->controller,
					pRequest->request.dma,
					pRequest->request.length,
					pRequest->bTx
						? DMA_TO_DEVICE
						: DMA_FROM_DEVICE);
			pRequest->mapped = 0;
		}
1146 1147 1148 1149 1150
	} else if (!req->buf) {
		return -ENODATA;
	} else
		pRequest->mapped = 0;

1151
	spin_lock_irqsave(&musb->lock, lockflags);
1152 1153

	/* don't queue if the ep is down */
1154
	if (!musb_ep->desc) {
1155 1156 1157 1158 1159 1160 1161
		DBG(4, "req %p queued to %s while ep %s\n",
				req, ep->name, "disabled");
		status = -ESHUTDOWN;
		goto cleanup;
	}

	/* add pRequest to the list */
1162
	list_add_tail(&(pRequest->request.list), &(musb_ep->req_list));
1163 1164

	/* it this is the head of the queue, start i/o ... */
1165
	if (!musb_ep->busy && &pRequest->request.list == musb_ep->req_list.next)
1166
		musb_ep_restart(musb, pRequest);
1167 1168

cleanup:
1169
	spin_unlock_irqrestore(&musb->lock, lockflags);
1170 1171 1172 1173 1174
	return status;
}

static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *pRequest)
{
1175
	struct musb_ep		*musb_ep = to_musb_ep(ep);
1176 1177 1178
	struct usb_request	*r;
	unsigned long		flags;
	int			status = 0;
1179
	struct musb		*musb = musb_ep->musb;
1180

1181
	if (!ep || !pRequest || to_musb_request(pRequest)->ep != musb_ep)
1182 1183
		return -EINVAL;

1184
	spin_lock_irqsave(&musb->lock, flags);
1185

1186
	list_for_each_entry(r, &musb_ep->req_list, list) {
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
		if (r == pRequest)
			break;
	}
	if (r != pRequest) {
		DBG(3, "request %p not queued to %s\n", pRequest, ep->name);
		status = -EINVAL;
		goto done;
	}

	/* if the hardware doesn't have the request, easy ... */
1197 1198
	if (musb_ep->req_list.next != &pRequest->list || musb_ep->busy)
		musb_g_giveback(musb_ep, pRequest, -ECONNRESET);
1199 1200

	/* ... else abort the dma transfer ... */
1201
	else if (is_dma_capable() && musb_ep->dma) {
1202
		struct dma_controller	*c = musb->dma_controller;
1203

1204
		musb_ep_select(musb->mregs, musb_ep->current_epnum);
1205
		if (c->channel_abort)
1206
			status = c->channel_abort(musb_ep->dma);
1207 1208 1209
		else
			status = -EBUSY;
		if (status == 0)
1210
			musb_g_giveback(musb_ep, pRequest, -ECONNRESET);
1211 1212 1213 1214
	} else {
		/* NOTE: by sticking to easily tested hardware/driver states,
		 * we leave counting of in-flight packets imprecise.
		 */
1215
		musb_g_giveback(musb_ep, pRequest, -ECONNRESET);
1216 1217 1218
	}

done:
1219
	spin_unlock_irqrestore(&musb->lock, flags);
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	return status;
}

/*
 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
 * data but will queue requests.
 *
 * exported to ep0 code
 */
int musb_gadget_set_halt(struct usb_ep *ep, int value)
{
1231
	struct musb_ep		*musb_ep = to_musb_ep(ep);
1232
	u8			epnum = musb_ep->current_epnum;
1233
	struct musb		*musb = musb_ep->musb;
1234
	void __iomem		*epio = musb->endpoints[epnum].regs;
1235
	void __iomem		*mbase;
1236 1237 1238 1239 1240
	unsigned long		flags;
	u16			wCsr;
	struct musb_request	*pRequest = NULL;
	int			status = 0;

1241 1242
	if (!ep)
		return -EINVAL;
1243
	mbase = musb->mregs;
1244

1245
	spin_lock_irqsave(&musb->lock, flags);
1246

1247
	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1248 1249 1250 1251
		status = -EINVAL;
		goto done;
	}

1252
	musb_ep_select(mbase, epnum);
1253 1254

	/* cannot portably stall with non-empty FIFO */
1255 1256
	pRequest = to_musb_request(next_request(musb_ep));
	if (value && musb_ep->is_in) {
1257
		wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1258 1259
		if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
			DBG(3, "%s fifo busy, cannot halt\n", ep->name);
1260
			spin_unlock_irqrestore(&musb->lock, flags);
1261 1262 1263 1264 1265 1266 1267
			return -EAGAIN;
		}

	}

	/* set/clear the stall and toggle bits */
	DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1268
	if (musb_ep->is_in) {
1269
		wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1270 1271
		if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY)
			wCsr |= MGC_M_TXCSR_FLUSHFIFO;
1272
		wCsr |= MGC_M_TXCSR_P_WZC_BITS
1273
			| MGC_M_TXCSR_CLRDATATOG;
1274 1275 1276 1277 1278 1279
		if (value)
			wCsr |= MGC_M_TXCSR_P_SENDSTALL;
		else
			wCsr &= ~(MGC_M_TXCSR_P_SENDSTALL
				| MGC_M_TXCSR_P_SENTSTALL);
		wCsr &= ~MGC_M_TXCSR_TXPKTRDY;
1280
		musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1281
	} else {
1282
		wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
1283 1284 1285 1286 1287 1288 1289 1290
		wCsr |= MGC_M_RXCSR_P_WZC_BITS
			| MGC_M_RXCSR_FLUSHFIFO
			| MGC_M_RXCSR_CLRDATATOG;
		if (value)
			wCsr |= MGC_M_RXCSR_P_SENDSTALL;
		else
			wCsr &= ~(MGC_M_RXCSR_P_SENDSTALL
				| MGC_M_RXCSR_P_SENTSTALL);
1291
		musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1292 1293 1294 1295 1296
	}

done:

	/* maybe start the first request in the queue */
1297
	if (!musb_ep->busy && !value && pRequest) {
1298
		DBG(3, "restarting the request\n");
1299
		musb_ep_restart(musb, pRequest);
1300 1301
	}

1302
	spin_unlock_irqrestore(&musb->lock, flags);
1303 1304 1305 1306 1307 1308
	return status;
}

static int musb_gadget_fifo_status(struct usb_ep *ep)
{
	struct musb_ep		*musb_ep = to_musb_ep(ep);
1309
	void __iomem		*epio = musb_ep->hw_ep->regs;
1310 1311 1312
	int			retval = -EINVAL;

	if (musb_ep->desc && !musb_ep->is_in) {
1313
		struct musb		*musb = musb_ep->musb;
1314
		int			epnum = musb_ep->current_epnum;
1315
		void __iomem		*mbase = musb->mregs;
1316 1317
		unsigned long		flags;

1318
		spin_lock_irqsave(&musb->lock, flags);
1319

1320
		musb_ep_select(mbase, epnum);
1321
		/* FIXME return zero unless RXPKTRDY is set */
1322
		retval = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
1323

1324
		spin_unlock_irqrestore(&musb->lock, flags);
1325 1326 1327 1328 1329 1330 1331
	}
	return retval;
}

static void musb_gadget_fifo_flush(struct usb_ep *ep)
{
	struct musb_ep	*musb_ep = to_musb_ep(ep);
1332
	struct musb	*musb = musb_ep->musb;
1333
	u8		nEnd = musb_ep->current_epnum;
1334
	void __iomem	*epio = musb->endpoints[nEnd].regs;
1335 1336 1337 1338
	void __iomem	*mbase;
	unsigned long	flags;
	u16		wCsr, wIntrTxE;

1339
	mbase = musb->mregs;
1340

1341
	spin_lock_irqsave(&musb->lock, flags);
1342
	musb_ep_select(mbase, (u8) nEnd);
1343 1344 1345 1346 1347 1348

	/* disable interrupts */
	wIntrTxE = musb_readw(mbase, MGC_O_HDRC_INTRTXE);
	musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE & ~(1 << nEnd));

	if (musb_ep->is_in) {
1349
		wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1350 1351
		if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
			wCsr |= MGC_M_TXCSR_FLUSHFIFO | MGC_M_TXCSR_P_WZC_BITS;
1352
			musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1353
			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1354
			musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1355
		}
1356
	} else {
1357
		wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
1358
		wCsr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_P_WZC_BITS;
1359 1360
		musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
		musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1361 1362 1363 1364
	}

	/* re-enable interrupt */
	musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE);
1365
	spin_unlock_irqrestore(&musb->lock, flags);
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
}

static const struct usb_ep_ops musb_ep_ops = {
	.enable		= musb_gadget_enable,
	.disable	= musb_gadget_disable,
	.alloc_request	= musb_alloc_request,
	.free_request	= musb_free_request,
	.queue		= musb_gadget_queue,
	.dequeue	= musb_gadget_dequeue,
	.set_halt	= musb_gadget_set_halt,
	.fifo_status	= musb_gadget_fifo_status,
	.fifo_flush	= musb_gadget_fifo_flush
};

/***********************************************************************/

static int musb_gadget_get_frame(struct usb_gadget *gadget)
{
1384
	struct musb	*musb = gadget_to_musb(gadget);
1385

1386
	return (int)musb_readw(musb->mregs, MGC_O_HDRC_FRAME);
1387 1388 1389 1390 1391
}

static int musb_gadget_wakeup(struct usb_gadget *gadget)
{
	struct musb	*musb = gadget_to_musb(gadget);
1392
	void __iomem	*mregs = musb->mregs;
1393
	unsigned long	flags;
1394
	int		status = -EINVAL;
1395
	u8		power, devctl;
1396
	int		retries;
1397

1398
	spin_lock_irqsave(&musb->lock, flags);
1399 1400 1401

	switch (musb->xceiv.state) {
	case OTG_STATE_B_PERIPHERAL:
1402 1403 1404 1405
		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
		 * that's part of the standard usb 1.1 state machine, and
		 * doesn't affect OTG transitions.
		 */
1406
		if (musb->may_wakeup && musb->is_suspended)
1407
			break;
1408
		goto done;
1409
	case OTG_STATE_B_IDLE:
1410 1411
		/* Start SRP ... OTG not required. */
		devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1412
		DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1413 1414
		devctl |= MGC_M_DEVCTL_SESSION;
		musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl);
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
		devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
		retries = 100;
		while (!(devctl & MGC_M_DEVCTL_SESSION)) {
			devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
			if (retries-- < 1)
				break;
		}
		retries = 10000;
		while (devctl & MGC_M_DEVCTL_SESSION) {
			devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
			if (retries-- < 1)
				break;
		}
1428 1429 1430

		/* Block idling for at least 1s */
		musb_platform_try_idle(musb,
1431
			jiffies + msecs_to_jiffies(1 * HZ));
1432

1433 1434
		status = 0;
		goto done;
1435 1436 1437 1438
	default:
		goto done;
	}

1439
	status = 0;
1440 1441

	power = musb_readb(mregs, MGC_O_HDRC_POWER);
1442
	power |= MGC_M_POWER_RESUME;
1443 1444
	musb_writeb(mregs, MGC_O_HDRC_POWER, power);
	DBG(2, "issue wakeup\n");
1445 1446

	/* FIXME do this next chunk in a timer callback, no udelay */
1447
	mdelay(2);
1448

1449
	power = musb_readb(mregs, MGC_O_HDRC_POWER);
1450
	power &= ~MGC_M_POWER_RESUME;
1451
	musb_writeb(mregs, MGC_O_HDRC_POWER, power);
1452
done:
1453
	spin_unlock_irqrestore(&musb->lock, flags);
1454 1455 1456 1457 1458 1459
	return status;
}

static int
musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
{
1460
	struct musb	*musb = gadget_to_musb(gadget);
1461

1462
	musb->is_self_powered = !!is_selfpowered;
1463 1464 1465 1466 1467 1468 1469
	return 0;
}

static void musb_pullup(struct musb *musb, int is_on)
{
	u8 power;

1470
	power = musb_readb(musb->mregs, MGC_O_HDRC_POWER);
1471 1472 1473 1474 1475 1476 1477 1478
	if (is_on)
		power |= MGC_M_POWER_SOFTCONN;
	else
		power &= ~MGC_M_POWER_SOFTCONN;

	/* FIXME if on, HdrcStart; if off, HdrcStop */

	DBG(3, "gadget %s D+ pullup %s\n",
1479
		musb->gadget_driver->function, is_on ? "on" : "off");
1480
	musb_writeb(musb->mregs, MGC_O_HDRC_POWER, power);
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
}

#if 0
static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
{
	DBG(2, "<= %s =>\n", __FUNCTION__);

	// FIXME iff driver's softconnect flag is set (as it is during probe,
	// though that can clear it), just musb_pullup().

	return -EINVAL;
}

static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
	/* FIXME -- delegate to otg_transciever logic */

	DBG(2, "<= vbus_draw %u =>\n", mA);
	return 0;
}
#endif

David Brownell's avatar
David Brownell committed
1503 1504 1505 1506 1507 1508 1509 1510 1511
static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
	struct musb	*musb = gadget_to_musb(gadget);

	if (!musb->xceiv.set_power)
		return -EOPNOTSUPP;
	return otg_set_power(&musb->xceiv, mA);
}

1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
	struct musb	*musb = gadget_to_musb(gadget);
	unsigned long	flags;

	is_on = !!is_on;

	/* NOTE: this assumes we are sensing vbus; we'd rather
	 * not pullup unless the B-session is active.
	 */
1522
	spin_lock_irqsave(&musb->lock, flags);
1523 1524 1525 1526
	if (is_on != musb->softconnect) {
		musb->softconnect = is_on;
		musb_pullup(musb, is_on);
	}
1527
	spin_unlock_irqrestore(&musb->lock, flags);
1528 1529 1530 1531 1532 1533 1534 1535
	return 0;
}

static const struct usb_gadget_ops musb_gadget_operations = {
	.get_frame		= musb_gadget_get_frame,
	.wakeup			= musb_gadget_wakeup,
	.set_selfpowered	= musb_gadget_set_self_powered,
	//.vbus_session		= musb_gadget_vbus_session,
David Brownell's avatar
David Brownell committed
1536
	.vbus_draw		= musb_gadget_vbus_draw,
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
	.pullup			= musb_gadget_pullup,
};

/****************************************************************
 * Registration operations
 ****************************************************************/

/* Only this registration code "knows" the rule (from USB standards)
 * about there being only one external upstream port.  It assumes
 * all peripheral ports are external...
 */
static struct musb *the_gadget;

static void musb_gadget_release(struct device *dev)
{
	// kref_put(WHAT)
	dev_dbg(dev, "%s\n", __FUNCTION__);
}


1557
static void __init
1558
init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1559
{
1560
	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1561 1562 1563

	memset(ep, 0, sizeof *ep);

1564
	ep->current_epnum = epnum;
1565
	ep->musb = musb;
1566 1567 1568 1569 1570
	ep->hw_ep = hw_ep;
	ep->is_in = is_in;

	INIT_LIST_HEAD(&ep->req_list);

1571
	sprintf(ep->name, "ep%d%s", epnum,
1572
			(!epnum || hw_ep->is_shared_fifo) ? "" : (
1573 1574 1575
				is_in ? "in" : "out"));
	ep->end_point.name = ep->name;
	INIT_LIST_HEAD(&ep->end_point.ep_list);
1576
	if (!epnum) {
1577 1578 1579 1580 1581
		ep->end_point.maxpacket = 64;
		ep->end_point.ops = &musb_g_ep0_ops;
		musb->g.ep0 = &ep->end_point;
	} else {
		if (is_in)
1582
			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1583
		else
1584
			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1585 1586 1587 1588 1589 1590 1591 1592 1593
		ep->end_point.ops = &musb_ep_ops;
		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
	}
}

/*
 * Initialize the endpoints exposed to peripheral drivers, with backlinks
 * to the rest of the driver state.
 */
1594
static inline void __init musb_g_init_endpoints(struct musb *musb)
1595
{
1596
	u8			epnum;
1597 1598 1599 1600
	struct musb_hw_ep	*hw_ep;
	unsigned		count = 0;

	/* intialize endpoint list just once */
1601
	INIT_LIST_HEAD(&(musb->g.ep_list));
1602

1603 1604 1605
	for (epnum = 0, hw_ep = musb->endpoints;
			epnum < musb->nr_endpoints;
			epnum++, hw_ep++) {
1606
		if (hw_ep->is_shared_fifo /* || !epnum */) {
1607
			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1608 1609
			count++;
		} else {
1610
			if (hw_ep->max_packet_sz_tx) {
1611
				init_peripheral_ep(musb, &hw_ep->ep_in,
1612
							epnum, 1);
1613 1614
				count++;
			}
1615
			if (hw_ep->max_packet_sz_rx) {
1616
				init_peripheral_ep(musb, &hw_ep->ep_out,
1617
							epnum, 0);
1618 1619 1620 1621 1622 1623 1624 1625 1626
				count++;
			}
		}
	}
}

/* called once during driver setup to initialize and link into
 * the driver model; memory is zeroed.
 */
David Brownell's avatar
David Brownell committed
1627
int __init musb_gadget_setup(struct musb *musb)
1628 1629 1630 1631 1632 1633 1634 1635 1636
{
	int status;

	/* REVISIT minor race:  if (erroneously) setting up two
	 * musb peripherals at the same time, only the bus lock
	 * is probably held.
	 */
	if (the_gadget)
		return -EBUSY;
David Brownell's avatar
David Brownell committed
1637
	the_gadget = musb;
1638

David Brownell's avatar
David Brownell committed
1639 1640 1641
	musb->g.ops = &musb_gadget_operations;
	musb->g.is_dualspeed = 1;
	musb->g.speed = USB_SPEED_UNKNOWN;
1642 1643

	/* this "gadget" abstracts/virtualizes the controller */
David Brownell's avatar
David Brownell committed
1644 1645 1646 1647 1648
	strcpy(musb->g.dev.bus_id, "gadget");
	musb->g.dev.parent = musb->controller;
	musb->g.dev.dma_mask = musb->controller->dma_mask;
	musb->g.dev.release = musb_gadget_release;
	musb->g.name = musb_driver_name;
1649

David Brownell's avatar
David Brownell committed
1650 1651
	if (is_otg_enabled(musb))
		musb->g.is_otg = 1;
1652

David Brownell's avatar
David Brownell committed
1653 1654 1655
	musb_g_init_endpoints(musb);

	musb->is_active = 0;
1656
	musb_platform_try_idle(musb, 0);
David Brownell's avatar
David Brownell committed
1657

David Brownell's avatar
David Brownell committed
1658
	status = device_register(&musb->g.dev);
1659 1660 1661 1662 1663
	if (status != 0)
		the_gadget = NULL;
	return status;
}

David Brownell's avatar
David Brownell committed
1664
void musb_gadget_cleanup(struct musb *musb)
1665
{
David Brownell's avatar
David Brownell committed
1666
	if (musb != the_gadget)
1667 1668
		return;

David Brownell's avatar
David Brownell committed
1669
	device_unregister(&musb->g.dev);
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	the_gadget = NULL;
}

/*
 * Register the gadget driver. Used by gadget drivers when
 * registering themselves with the controller.
 *
 * -EINVAL something went wrong (not driver)
 * -EBUSY another gadget is already using the controller
 * -ENOMEM no memeory to perform the operation
 *
 * @param driver the gadget driver
 * @return <0 if error, 0 if everything is fine
 */
int usb_gadget_register_driver(struct usb_gadget_driver *driver)
{
	int retval;
	unsigned long flags;
1688
	struct musb *musb = the_gadget;
1689 1690 1691 1692 1693 1694 1695 1696

	if (!driver
			|| driver->speed != USB_SPEED_HIGH
			|| !driver->bind
			|| !driver->setup)
		return -EINVAL;

	/* driver must be initialized to support peripheral mode */
1697 1698
	if (!musb || !(musb->board_mode == MUSB_OTG
				|| musb->board_mode != MUSB_OTG)) {
1699 1700 1701 1702 1703
		DBG(1,"%s, no dev??\n", __FUNCTION__);
		return -ENODEV;
	}

	DBG(3, "registering driver %s\n", driver->function);
1704
	spin_lock_irqsave(&musb->lock, flags);
1705

1706
	if (musb->gadget_driver) {
1707 1708
		DBG(1, "%s is already bound to %s\n",
				musb_driver_name,
1709
				musb->gadget_driver->driver.name);
1710 1711
		retval = -EBUSY;
	} else {
1712
		musb->gadget_driver = driver;
1713
		musb->g.dev.driver = &driver->driver;
1714
		driver->driver.bus = NULL;
1715
		musb->softconnect = 1;
1716 1717 1718
		retval = 0;
	}

1719
	spin_unlock_irqrestore(&musb->lock, flags);
1720 1721

	if (retval == 0)
1722
		retval = driver->bind(&musb->g);
1723 1724
	if (retval != 0) {
		DBG(3, "bind to driver %s failed --> %d\n",
1725
			driver->driver.name, retval);
1726
		musb->gadget_driver = NULL;
1727
		musb->g.dev.driver = NULL;
1728 1729 1730 1731
	}

	/* start peripheral and/or OTG engines */
	if (retval == 0) {
1732
		spin_lock_irqsave(&musb->lock, flags);
1733 1734 1735 1736

		/* REVISIT always use otg_set_peripheral(), handling
		 * issues including the root hub one below ...
		 */
1737 1738 1739
		musb->xceiv.gadget = &musb->g;
		musb->xceiv.state = OTG_STATE_B_IDLE;
		musb->is_active = 1;
1740 1741 1742 1743 1744 1745 1746

		/* FIXME this ignores the softconnect flag.  Drivers are
		 * allowed hold the peripheral inactive until for example
		 * userspace hooks up printer hardware or DSP codecs, so
		 * hosts only see fully functional devices.
		 */

1747 1748
		if (!is_otg_enabled(musb))
			musb_start(musb);
David Brownell's avatar
David Brownell committed
1749

1750
		spin_unlock_irqrestore(&musb->lock, flags);
1751

1752
		if (is_otg_enabled(musb)) {
1753 1754 1755 1756 1757 1758
			DBG(3, "OTG startup...\n");

			/* REVISIT:  funcall to other code, which also
			 * handles power budgeting ... this way also
			 * ensures HdrcStart is indirectly called.
			 */
1759
			retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1760 1761
			if (retval < 0) {
				DBG(1, "add_hcd failed, %d\n", retval);
1762
				spin_lock_irqsave(&musb->lock, flags);
1763 1764
				musb->xceiv.gadget = NULL;
				musb->xceiv.state = OTG_STATE_UNDEFINED;
1765
				musb->gadget_driver = NULL;
1766
				musb->g.dev.driver = NULL;
1767
				spin_unlock_irqrestore(&musb->lock, flags);
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
			}
		}
	}

	return retval;
}
EXPORT_SYMBOL(usb_gadget_register_driver);

static void
stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
{
	int			i;
	struct musb_hw_ep	*hw_ep;

	/* don't disconnect if it's not connected */
	if (musb->g.speed == USB_SPEED_UNKNOWN)
		driver = NULL;
	else
		musb->g.speed = USB_SPEED_UNKNOWN;

	/* deactivate the hardware */
	if (musb->softconnect) {
		musb->softconnect = 0;
		musb_pullup(musb, 0);
	}
	musb_stop(musb);

	/* killing any outstanding requests will quiesce the driver;
	 * then report disconnect
	 */
	if (driver) {
1799
		for (i = 0, hw_ep = musb->endpoints;
1800
				i < musb->nr_endpoints;
1801
				i++, hw_ep++) {
1802
			musb_ep_select(musb->mregs, i);
1803
			if (hw_ep->is_shared_fifo /* || !epnum */) {
1804 1805
				nuke(&hw_ep->ep_in, -ESHUTDOWN);
			} else {
1806
				if (hw_ep->max_packet_sz_tx)
1807
					nuke(&hw_ep->ep_in, -ESHUTDOWN);
1808
				if (hw_ep->max_packet_sz_rx)
1809 1810 1811 1812
					nuke(&hw_ep->ep_out, -ESHUTDOWN);
			}
		}

1813
		spin_unlock(&musb->lock);
1814
		driver->disconnect (&musb->g);
1815
		spin_lock(&musb->lock);
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	}
}

/*
 * Unregister the gadget driver. Used by gadget drivers when
 * unregistering themselves from the controller.
 *
 * @param driver the gadget driver to unregister
 */
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
	unsigned long	flags;
	int		retval = 0;
	struct musb	*musb = the_gadget;

David Brownell's avatar
David Brownell committed
1831
	if (!driver || !driver->unbind || !musb)
1832 1833 1834 1835 1836 1837
		return -EINVAL;

	/* REVISIT always use otg_set_peripheral() here too;
	 * this needs to shut down the OTG engine.
	 */

1838
	spin_lock_irqsave(&musb->lock, flags);
1839 1840 1841 1842 1843

#ifdef	CONFIG_USB_MUSB_OTG
	musb_hnp_stop(musb);
#endif

1844
	if (musb->gadget_driver == driver) {
1845 1846 1847 1848
		musb->xceiv.state = OTG_STATE_UNDEFINED;
		stop_activity(musb, driver);

		DBG(3, "unregistering driver %s\n", driver->function);
1849
		spin_unlock_irqrestore(&musb->lock, flags);
1850
		driver->unbind(&musb->g);
1851
		spin_lock_irqsave(&musb->lock, flags);
1852

1853
		musb->gadget_driver = NULL;
1854 1855
		musb->g.dev.driver = NULL;

David Brownell's avatar
David Brownell committed
1856
		musb->is_active = 0;
1857
		musb_platform_try_idle(musb, 0);
1858 1859
	} else
		retval = -EINVAL;
1860
	spin_unlock_irqrestore(&musb->lock, flags);
1861

David Brownell's avatar
David Brownell committed
1862
	if (is_otg_enabled(musb) && retval == 0) {
1863 1864 1865 1866 1867 1868
		usb_remove_hcd(musb_to_hcd(musb));
		/* FIXME we need to be able to register another
		 * gadget driver here and have everything work;
		 * that currently misbehaves.
		 */
	}
David Brownell's avatar
David Brownell committed
1869

1870 1871 1872 1873 1874 1875 1876 1877 1878
	return retval;
}
EXPORT_SYMBOL(usb_gadget_unregister_driver);


/***********************************************************************/

/* lifecycle operations called through plat_uds.c */

1879
void musb_g_resume(struct musb *musb)
1880
{
1881
	musb->is_suspended = 0;
1882
	switch (musb->xceiv.state) {
1883 1884 1885 1886
	case OTG_STATE_B_IDLE:
		break;
	case OTG_STATE_B_WAIT_ACON:
	case OTG_STATE_B_PERIPHERAL:
1887
		musb->is_active = 1;
1888
		if (musb->gadget_driver && musb->gadget_driver->resume) {
1889
			spin_unlock(&musb->lock);
1890
			musb->gadget_driver->resume(&musb->g);
1891
			spin_lock(&musb->lock);
1892 1893 1894 1895
		}
		break;
	default:
		WARN("unhandled RESUME transition (%s)\n",
1896
				otg_state_string(musb));
1897 1898 1899 1900
	}
}

/* called when SOF packets stop for 3+ msec */
1901
void musb_g_suspend(struct musb *musb)
1902 1903 1904
{
	u8	devctl;

1905
	devctl = musb_readb(musb->mregs, MGC_O_HDRC_DEVCTL);
1906 1907
	DBG(3, "devctl %02x\n", devctl);

1908
	switch (musb->xceiv.state) {
1909 1910
	case OTG_STATE_B_IDLE:
		if ((devctl & MGC_M_DEVCTL_VBUS) == MGC_M_DEVCTL_VBUS)
1911
			musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
1912 1913
		break;
	case OTG_STATE_B_PERIPHERAL:
1914
		musb->is_suspended = 1;
1915
		if (musb->gadget_driver && musb->gadget_driver->suspend) {
1916
			spin_unlock(&musb->lock);
1917
			musb->gadget_driver->suspend(&musb->g);
1918
			spin_lock(&musb->lock);
1919 1920 1921 1922 1923 1924
		}
		break;
	default:
		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
		 * A_PERIPHERAL may need care too
		 */
1925
		WARN("unhandled SUSPEND transition (%s)\n",
1926
				otg_state_string(musb));
1927 1928 1929
	}
}

1930 1931 1932 1933 1934 1935
/* Called during SRP. Caller must hold lock */
void musb_g_wakeup(struct musb *musb)
{
	musb_gadget_wakeup(&musb->g);
}

1936
/* called when VBUS drops below session threshold, and in other cases */
1937
void musb_g_disconnect(struct musb *musb)
1938
{
1939
	void __iomem	*mregs = musb->mregs;
David Brownell's avatar
David Brownell committed
1940 1941 1942 1943 1944 1945
	u8	devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);

	DBG(3, "devctl %02x\n", devctl);

	/* clear HR */
	musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl & MGC_M_DEVCTL_SESSION);
1946

David Brownell's avatar
David Brownell committed
1947
	/* don't draw vbus until new b-default session */
1948
	(void) musb_gadget_vbus_draw(&musb->g, 0);
David Brownell's avatar
David Brownell committed
1949

1950
	musb->g.speed = USB_SPEED_UNKNOWN;
1951
	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1952
		spin_unlock(&musb->lock);
1953
		musb->gadget_driver->disconnect(&musb->g);
1954
		spin_lock(&musb->lock);
1955 1956
	}

1957
	switch (musb->xceiv.state) {
1958 1959
	default:
#ifdef	CONFIG_USB_MUSB_OTG
1960
		musb->xceiv.state = OTG_STATE_A_IDLE;
1961
		break;
1962 1963 1964
	case OTG_STATE_A_PERIPHERAL:
		musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
		break;
1965 1966 1967 1968
	case OTG_STATE_B_WAIT_ACON:
	case OTG_STATE_B_HOST:
#endif
	case OTG_STATE_B_PERIPHERAL:
1969
		musb->xceiv.state = OTG_STATE_B_IDLE;
1970 1971 1972 1973
		break;
	case OTG_STATE_B_SRP_INIT:
		break;
	}
David Brownell's avatar
David Brownell committed
1974

1975
	musb->is_active = 0;
1976 1977
}

1978
void musb_g_reset(struct musb *musb)
1979 1980
__releases(musb->lock)
__acquires(musb->lock)
1981
{
1982 1983
	void __iomem	*mbase = musb->mregs;
	u8		devctl = musb_readb(mbase, MGC_O_HDRC_DEVCTL);
1984 1985 1986 1987 1988
	u8		power;

	DBG(3, "<== %s addr=%x driver '%s'\n",
			(devctl & MGC_M_DEVCTL_BDEVICE)
				? "B-Device" : "A-Device",
1989
			musb_readb(mbase, MGC_O_HDRC_FADDR),
1990 1991
			musb->gadget_driver
				? musb->gadget_driver->driver.name
1992 1993 1994 1995
				: NULL
			);

	/* report disconnect, if we didn't already (flushing EP state) */
1996 1997
	if (musb->g.speed != USB_SPEED_UNKNOWN)
		musb_g_disconnect(musb);
1998

David Brownell's avatar
David Brownell committed
1999 2000
	/* clear HR */
	else if (devctl & MGC_M_DEVCTL_HR)
2001
		musb_writeb(mbase, MGC_O_HDRC_DEVCTL, MGC_M_DEVCTL_SESSION);
David Brownell's avatar
David Brownell committed
2002 2003


2004
	/* what speed did we negotiate? */
2005
	power = musb_readb(mbase, MGC_O_HDRC_POWER);
2006
	musb->g.speed = (power & MGC_M_POWER_HSMODE)
2007 2008 2009
			? USB_SPEED_HIGH : USB_SPEED_FULL;

	/* start in USB_STATE_DEFAULT */
2010
	musb->is_active = 1;
2011
	musb->is_suspended = 0;
2012
	MUSB_DEV_MODE(musb);
2013
	musb->address = 0;
2014
	musb->ep0_state = MGC_END0_STAGE_SETUP;
2015

2016 2017 2018 2019
	musb->may_wakeup = 0;
	musb->g.b_hnp_enable = 0;
	musb->g.a_alt_hnp_support = 0;
	musb->g.a_hnp_support = 0;
2020 2021 2022 2023 2024

	/* Normal reset, as B-Device;
	 * or else after HNP, as A-Device
	 */
	if (devctl & MGC_M_DEVCTL_BDEVICE) {
2025 2026 2027 2028 2029
		musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
		musb->g.is_a_peripheral = 0;
	} else if (is_otg_enabled(musb)) {
		musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
		musb->g.is_a_peripheral = 1;
2030 2031
	} else
		WARN_ON(1);
David Brownell's avatar
David Brownell committed
2032 2033

	/* start with default limits on VBUS power draw */
2034 2035
	(void) musb_gadget_vbus_draw(&musb->g,
			is_otg_enabled(musb) ? 8 : 100);
2036
}