Commit 23da5183 authored by David Brownell's avatar David Brownell Committed by Tony Lindgren

musb_hdrc: fix some bugs in the updated host side code

 - Remove an oops in the queue fault cleanup path, caused by using the
   wrong giveback routine (heavy weight vs lighter-weight).  Move a few
   key bits of code into that lighter weight version.

 - Trigger that lighter-weight cleanup during URB completion, when the
   URB in question can't yet been handed off to the hardware even if
   it's at the queue head.

 - Reduce syslog noise when those fault cleanup paths are triggered from
   scatterlist calls by usb-storage.

 - Paranoia: when scheduling, insist that the endpoint fifo isn't smaller
   than required.

Oops observed courtesy of a slightly flakey usb-storage device, which
once again proves itself handy for testing.
Signed-off-by: default avatarDavid Brownell <dbrownell@users.sourceforge.net>
parent e9b7cce8
...@@ -243,14 +243,12 @@ start: ...@@ -243,14 +243,12 @@ start:
} }
} }
/* caller owns no controller locks, irqs are blocked */ /* caller owns controller lock, irqs are blocked */
static inline void static void
__musb_giveback(struct musb_hw_ep *hw_ep, struct urb *urb, int status) __musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(urb->lock) __releases(musb->Lock)
__acquires(urb->lock) __acquires(musb->Lock)
{ {
struct musb *musb = hw_ep->musb;
if ((urb->transfer_flags & URB_SHORT_NOT_OK) if ((urb->transfer_flags & URB_SHORT_NOT_OK)
&& (urb->actual_length < urb->transfer_buffer_length) && (urb->actual_length < urb->transfer_buffer_length)
&& status == 0 && status == 0
...@@ -270,6 +268,7 @@ __acquires(urb->lock) ...@@ -270,6 +268,7 @@ __acquires(urb->lock)
/* common/boring faults */ /* common/boring faults */
case -EREMOTEIO: case -EREMOTEIO:
case -ESHUTDOWN: case -ESHUTDOWN:
case -ECONNRESET:
case -EPIPE: case -EPIPE:
level = 3; level = 3;
break; break;
...@@ -285,7 +284,9 @@ __acquires(urb->lock) ...@@ -285,7 +284,9 @@ __acquires(urb->lock)
urb->actual_length, urb->transfer_buffer_length urb->actual_length, urb->transfer_buffer_length
); );
spin_unlock(&musb->Lock);
usb_hcd_giveback_urb(musb_to_hcd(musb), urb, musb->int_regs); usb_hcd_giveback_urb(musb_to_hcd(musb), urb, musb->int_regs);
spin_lock(&musb->Lock);
} }
/* for bulk/interrupt endpoints only */ /* for bulk/interrupt endpoints only */
...@@ -323,8 +324,6 @@ static inline void musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb ...@@ -323,8 +324,6 @@ static inline void musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb
/* caller owns controller lock, irqs are blocked */ /* caller owns controller lock, irqs are blocked */
static struct musb_qh * static struct musb_qh *
musb_giveback(struct musb_qh *qh, struct urb *urb, int status) musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
__releases(qh->hw_ep->musb->Lock)
__acquires(qh->hw_ep->musb->Lock)
{ {
int is_in; int is_in;
struct musb_hw_ep *ep = qh->hw_ep; struct musb_hw_ep *ep = qh->hw_ep;
...@@ -349,9 +348,7 @@ __acquires(qh->hw_ep->musb->Lock) ...@@ -349,9 +348,7 @@ __acquires(qh->hw_ep->musb->Lock)
} }
qh->is_ready = 0; qh->is_ready = 0;
spin_unlock(&musb->Lock); __musb_giveback(musb, urb, status);
__musb_giveback(ep, urb, status);
spin_lock(&musb->Lock);
qh->is_ready = ready; qh->is_ready = ready;
/* reclaim resources (and bandwidth) ASAP; deschedule it, and /* reclaim resources (and bandwidth) ASAP; deschedule it, and
...@@ -1744,7 +1741,7 @@ static int musb_schedule( ...@@ -1744,7 +1741,7 @@ static int musb_schedule(
else else
diff = hw_ep->wMaxPacketSizeTx - qh->maxpacket; diff = hw_ep->wMaxPacketSizeTx - qh->maxpacket;
if (wBestDiff > diff) { if (diff > 0 && wBestDiff > diff) {
wBestDiff = diff; wBestDiff = diff;
nBestEnd = nEnd; nBestEnd = nEnd;
} }
...@@ -2018,7 +2015,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) ...@@ -2018,7 +2015,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
* has already been updated. This is a synchronous abort; it'd be * has already been updated. This is a synchronous abort; it'd be
* OK to hold off until after some IRQ, though. * OK to hold off until after some IRQ, though.
*/ */
if (urb->urb_list.prev != &qh->hep->urb_list) if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
status = -EINPROGRESS; status = -EINPROGRESS;
else { else {
switch (qh->type) { switch (qh->type) {
...@@ -2043,8 +2040,12 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) ...@@ -2043,8 +2040,12 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
if (status < 0 || (sched && qh != first_qh(sched))) { if (status < 0 || (sched && qh != first_qh(sched))) {
status = -EINPROGRESS; int ready = qh->is_ready;
musb_giveback(qh, urb, 0);
status = 0;
qh->is_ready = 0;
__musb_giveback(musb, urb, 0);
qh->is_ready = ready;
} else } else
status = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); status = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
done: done:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment