Commit de07c29a authored by Stephen Rothwell's avatar Stephen Rothwell

Merge branch 'quilt/usb.current'

parents 3f06326e b80dedba
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/serial.h>
#include <linux/tty_driver.h> #include <linux/tty_driver.h>
#include <linux/tty_flip.h> #include <linux/tty_flip.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -609,6 +610,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) ...@@ -609,6 +610,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
acm->throttle = 0; acm->throttle = 0;
tasklet_schedule(&acm->urb_task); tasklet_schedule(&acm->urb_task);
set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
rv = tty_port_block_til_ready(&acm->port, tty, filp); rv = tty_port_block_til_ready(&acm->port, tty, filp);
done: done:
mutex_unlock(&acm->mutex); mutex_unlock(&acm->mutex);
......
...@@ -313,8 +313,13 @@ static ssize_t wdm_write ...@@ -313,8 +313,13 @@ static ssize_t wdm_write
r = usb_autopm_get_interface(desc->intf); r = usb_autopm_get_interface(desc->intf);
if (r < 0) if (r < 0)
goto outnp; goto outnp;
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
&desc->flags)); if (!file->f_flags && O_NONBLOCK)
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
&desc->flags));
else
if (test_bit(WDM_IN_USE, &desc->flags))
r = -EAGAIN;
if (r < 0) if (r < 0)
goto out; goto out;
...@@ -377,7 +382,7 @@ outnl: ...@@ -377,7 +382,7 @@ outnl:
static ssize_t wdm_read static ssize_t wdm_read
(struct file *file, char __user *buffer, size_t count, loff_t *ppos) (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{ {
int rv, cntr; int rv, cntr = 0;
int i = 0; int i = 0;
struct wdm_device *desc = file->private_data; struct wdm_device *desc = file->private_data;
...@@ -389,10 +394,23 @@ static ssize_t wdm_read ...@@ -389,10 +394,23 @@ static ssize_t wdm_read
if (desc->length == 0) { if (desc->length == 0) {
desc->read = 0; desc->read = 0;
retry: retry:
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto err;
}
i++; i++;
rv = wait_event_interruptible(desc->wait, if (file->f_flags & O_NONBLOCK) {
test_bit(WDM_READ, &desc->flags)); if (!test_bit(WDM_READ, &desc->flags)) {
rv = cntr ? cntr : -EAGAIN;
goto err;
}
rv = 0;
} else {
rv = wait_event_interruptible(desc->wait,
test_bit(WDM_READ, &desc->flags));
}
/* may have happened while we slept */
if (test_bit(WDM_DISCONNECTING, &desc->flags)) { if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV; rv = -ENODEV;
goto err; goto err;
...@@ -448,7 +466,7 @@ retry: ...@@ -448,7 +466,7 @@ retry:
err: err:
mutex_unlock(&desc->rlock); mutex_unlock(&desc->rlock);
if (rv < 0) if (rv < 0 && rv != -EAGAIN)
dev_err(&desc->intf->dev, "wdm_read: exit error\n"); dev_err(&desc->intf->dev, "wdm_read: exit error\n");
return rv; return rv;
} }
......
...@@ -105,7 +105,7 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ...@@ -105,7 +105,7 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->ss_ep_comp->extralen = i; ep->ss_ep_comp->extralen = i;
buffer += i; buffer += i;
size -= i; size -= i;
retval = buffer - buffer_start + i; retval = buffer - buffer_start;
if (num_skipped > 0) if (num_skipped > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n", dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
num_skipped, plural(num_skipped), num_skipped, plural(num_skipped),
......
...@@ -719,8 +719,12 @@ retry: ...@@ -719,8 +719,12 @@ retry:
/* port status seems weird until after reset, so /* port status seems weird until after reset, so
* force the reset and make khubd clean up later. * force the reset and make khubd clean up later.
*/ */
sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION) if (sl811->stat_insrmv & 1)
| (1 << USB_PORT_FEAT_CONNECTION); sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
else
sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION);
sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION;
} else if (irqstat & SL11H_INTMASK_RD) { } else if (irqstat & SL11H_INTMASK_RD) {
if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) { if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) {
......
...@@ -413,7 +413,8 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) ...@@ -413,7 +413,8 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
int i; int i;
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); dma_addr_t dma = ctx->dma +
((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
xhci_dbg(xhci, "Slot Context:\n"); xhci_dbg(xhci, "Slot Context:\n");
...@@ -459,7 +460,7 @@ void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, ...@@ -459,7 +460,7 @@ void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
for (i = 0; i < last_ep_ctx; ++i) { for (i = 0; i < last_ep_ctx; ++i) {
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
dma_addr_t dma = ctx->dma + dma_addr_t dma = ctx->dma +
((unsigned long)ep_ctx - (unsigned long)ctx); ((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
xhci_dbg(xhci, "Endpoint %02d Context:\n", i); xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
......
...@@ -22,12 +22,18 @@ ...@@ -22,12 +22,18 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h>
#include "xhci.h" #include "xhci.h"
#define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
static int link_quirk;
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
/* TODO: copied from ehci-hcd.c - can this be refactored? */ /* TODO: copied from ehci-hcd.c - can this be refactored? */
/* /*
* handshake - spin reading hc until handshake completes or fails * handshake - spin reading hc until handshake completes or fails
...@@ -214,6 +220,12 @@ int xhci_init(struct usb_hcd *hcd) ...@@ -214,6 +220,12 @@ int xhci_init(struct usb_hcd *hcd)
xhci_dbg(xhci, "xhci_init\n"); xhci_dbg(xhci, "xhci_init\n");
spin_lock_init(&xhci->lock); spin_lock_init(&xhci->lock);
if (link_quirk) {
xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
}
retval = xhci_mem_init(xhci, GFP_KERNEL); retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg(xhci, "Finished xhci_init\n"); xhci_dbg(xhci, "Finished xhci_init\n");
...@@ -555,13 +567,22 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) ...@@ -555,13 +567,22 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
return 1 << (xhci_get_endpoint_index(desc) + 1); return 1 << (xhci_get_endpoint_index(desc) + 1);
} }
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
{
return 1 << (ep_index + 1);
}
/* Compute the last valid endpoint context index. Basically, this is the /* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint, * endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags. * we find the most significant bit set in the added contexts flags.
* e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
* fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
*/ */
static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs) unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
{ {
return fls(added_ctxs) - 1; return fls(added_ctxs) - 1;
} }
...@@ -589,6 +610,70 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -589,6 +610,70 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
return 1; return 1;
} }
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev, struct xhci_virt_device *virt_dev,
bool ctx_change);
/*
* Full speed devices may have a max packet size greater than 8 bytes, but the
* USB core doesn't know that until it reads the first 8 bytes of the
* descriptor. If the usb_device's max packet size changes after that point,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, struct urb *urb)
{
struct xhci_container_ctx *in_ctx;
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
int max_packet_size;
int hw_max_packet_size;
int ret = 0;
out_ctx = xhci->devs[slot_id]->out_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
if (hw_max_packet_size != max_packet_size) {
xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
max_packet_size);
xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
hw_max_packet_size);
xhci_dbg(xhci, "Issuing evaluate context command.\n");
/* Set up the modified control endpoint 0 */
xhci_endpoint_copy(xhci, xhci->devs[slot_id], ep_index);
in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
/* Set up the input context flags for the command */
/* FIXME: This won't work if a non-default control endpoint
* changes max packet sizes.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ctrl_ctx->add_flags = EP0_FLAG;
ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Slot %d input context\n", slot_id);
xhci_dbg_ctx(xhci, in_ctx, ep_index);
xhci_dbg(xhci, "Slot %d output context\n", slot_id);
xhci_dbg_ctx(xhci, out_ctx, ep_index);
ret = xhci_configure_endpoint(xhci, urb->dev,
xhci->devs[slot_id], true);
/* Clean up the input context for later use by bandwidth
* functions.
*/
ctrl_ctx->add_flags = SLOT_FLAG;
}
return ret;
}
/* /*
* non-error returns are a promise to giveback() the urb later * non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it * we drop ownership so next owner (or urb unlink) can get it
...@@ -600,13 +685,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) ...@@ -600,13 +685,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
int ret = 0; int ret = 0;
unsigned int slot_id, ep_index; unsigned int slot_id, ep_index;
if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
return -EINVAL; return -EINVAL;
slot_id = urb->dev->slot_id; slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_index = xhci_get_endpoint_index(&urb->ep->desc);
spin_lock_irqsave(&xhci->lock, flags);
if (!xhci->devs || !xhci->devs[slot_id]) { if (!xhci->devs || !xhci->devs[slot_id]) {
if (!in_interrupt()) if (!in_interrupt())
dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
...@@ -619,19 +704,38 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) ...@@ -619,19 +704,38 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = -ESHUTDOWN; ret = -ESHUTDOWN;
goto exit; goto exit;
} }
if (usb_endpoint_xfer_control(&urb->ep->desc)) if (usb_endpoint_xfer_control(&urb->ep->desc)) {
/* Check to see if the max packet size for the default control
* endpoint changed during FS device enumeration
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
if (ret < 0)
return ret;
}
/* We have a spinlock and interrupts disabled, so we must pass /* We have a spinlock and interrupts disabled, so we must pass
* atomic context to this function, which may allocate memory. * atomic context to this function, which may allocate memory.
*/ */
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index); slot_id, ep_index);
else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index); slot_id, ep_index);
else spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
ret = -EINVAL; ret = -EINVAL;
}
exit: exit:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret; return ret;
} }
...@@ -930,6 +1034,122 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir ...@@ -930,6 +1034,122 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
} }
} }
static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
struct usb_device *udev, struct xhci_virt_device *virt_dev)
{
int ret;
switch (virt_dev->cmd_status) {
case COMP_ENOMEM:
dev_warn(&udev->dev, "Not enough host controller resources "
"for new device state.\n");
ret = -ENOMEM;
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
/* FIXME: can we go back to the old state? */
break;
case COMP_TRB_ERR:
/* the HCD set up something wrong */
dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
"add flag = 1, "
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", virt_dev->cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
struct usb_device *udev, struct xhci_virt_device *virt_dev)
{
int ret;
switch (virt_dev->cmd_status) {
case COMP_EINVAL:
dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
"context command.\n");
ret = -EINVAL;
break;
case COMP_EBADSLT:
dev_warn(&udev->dev, "WARN: slot not enabled for"
"evaluate context command.\n");
case COMP_CTX_STATE:
dev_warn(&udev->dev, "WARN: invalid context state for "
"evaluate context command.\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful evaluate context command\n");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", virt_dev->cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev, struct xhci_virt_device *virt_dev,
bool ctx_change)
{
int ret;
int timeleft;
unsigned long flags;
spin_lock_irqsave(&xhci->lock, flags);
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
else
ret = xhci_queue_evaluate_context(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
if (ret < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
timeleft = wait_for_completion_interruptible_timeout(
&virt_dev->cmd_completion,
USB_CTRL_SET_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for %s command\n",
timeleft == 0 ? "Timeout" : "Signal",
ctx_change == 0 ?
"configure endpoint" :
"evaluate context");
/* FIXME cancel the configure endpoint command */
return -ETIME;
}
if (!ctx_change)
return xhci_configure_endpoint_result(xhci, udev, virt_dev);
return xhci_evaluate_context_result(xhci, udev, virt_dev);
}
/* Called after one or more calls to xhci_add_endpoint() or /* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected * xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth(). * to call xhci_reset_bandwidth().
...@@ -944,8 +1164,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -944,8 +1164,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{ {
int i; int i;
int ret = 0; int ret = 0;
int timeleft;
unsigned long flags;
struct xhci_hcd *xhci; struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev; struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx; struct xhci_input_control_ctx *ctrl_ctx;
...@@ -975,56 +1193,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -975,56 +1193,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg_ctx(xhci, virt_dev->in_ctx, xhci_dbg_ctx(xhci, virt_dev->in_ctx,
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
spin_lock_irqsave(&xhci->lock, flags); ret = xhci_configure_endpoint(xhci, udev, virt_dev, false);
ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
if (ret < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
timeleft = wait_for_completion_interruptible_timeout(
&virt_dev->cmd_completion,
USB_CTRL_SET_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
timeleft == 0 ? "Timeout" : "Signal");
/* FIXME cancel the configure endpoint command */
return -ETIME;
}
switch (virt_dev->cmd_status) {
case COMP_ENOMEM:
dev_warn(&udev->dev, "Not enough host controller resources "
"for new device state.\n");
ret = -ENOMEM;
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
/* FIXME: can we go back to the old state? */
break;
case COMP_TRB_ERR:
/* the HCD set up something wrong */
dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", virt_dev->cmd_status);
ret = -EINVAL;
break;
}
if (ret) { if (ret) {
/* Callee should call reset_bandwidth() */ /* Callee should call reset_bandwidth() */
return ret; return ret;
...@@ -1075,6 +1244,75 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1075,6 +1244,75 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_zero_in_ctx(xhci, virt_dev); xhci_zero_in_ctx(xhci, virt_dev);
} }
void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
struct xhci_container_ctx *in_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
dma_addr_t addr;
xhci_endpoint_copy(xhci, xhci->devs[slot_id], ep_index);
in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
deq_state->new_deq_ptr);
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit config ep after "
"reset ep command\n");
xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
deq_state->new_deq_seg,
deq_state->new_deq_ptr);
return;
}
ep_ctx->deq = addr | deq_state->new_cycle_state;
xhci_slot_copy(xhci, xhci->devs[slot_id]);
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
ctrl_ctx->add_flags = added_ctxs | SLOT_FLAG;
ctrl_ctx->drop_flags = added_ctxs;
xhci_dbg(xhci, "Slot ID %d Input Context:\n", slot_id);
xhci_dbg_ctx(xhci, in_ctx, ep_index);
}
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev,
unsigned int ep_index, struct xhci_ring *ep_ring)
{
struct xhci_dequeue_state deq_state;
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep_ring->stopped_td,
&deq_state);
/* HW with the reset endpoint quirk will use the saved dequeue state to
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, ep_ring,
udev->slot_id,
ep_index, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
* reset endpoint completion!
*/
xhci_dbg(xhci, "Setting up input context for "
"configure endpoint command\n");
xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
ep_index, &deq_state);
}
}
/* Deal with stalled endpoints. The core should have sent the control message /* Deal with stalled endpoints. The core should have sent the control message
* to clear the halt condition. However, we need to make the xHCI hardware * to clear the halt condition. However, we need to make the xHCI hardware
* reset its sequence number, since a device will expect a sequence number of * reset its sequence number, since a device will expect a sequence number of
...@@ -1089,7 +1327,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1089,7 +1327,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned int ep_index; unsigned int ep_index;
unsigned long flags; unsigned long flags;
int ret; int ret;
struct xhci_dequeue_state deq_state;
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
xhci = hcd_to_xhci(hcd); xhci = hcd_to_xhci(hcd);
...@@ -1106,6 +1343,10 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1106,6 +1343,10 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
ep->desc.bEndpointAddress); ep->desc.bEndpointAddress);
return; return;
} }
if (usb_endpoint_xfer_control(&ep->desc)) {
xhci_dbg(xhci, "Control endpoint stall already handled.\n");
return;
}
xhci_dbg(xhci, "Queueing reset endpoint command\n"); xhci_dbg(xhci, "Queueing reset endpoint command\n");
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
...@@ -1116,16 +1357,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, ...@@ -1116,16 +1357,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
* command. Better hope that last command worked! * command. Better hope that last command worked!
*/ */
if (!ret) { if (!ret) {
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); xhci_cleanup_stalled_ring(xhci, udev, ep_index, ep_ring);
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep_ring->stopped_td, &deq_state);
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, ep_ring,
udev->slot_id,
ep_index, &deq_state);
kfree(ep_ring->stopped_td); kfree(ep_ring->stopped_td);
xhci_ring_cmd_db(xhci); xhci_ring_cmd_db(xhci);
} }
......
...@@ -94,6 +94,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, ...@@ -94,6 +94,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
val &= ~TRB_TYPE_BITMASK; val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK); val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
if (xhci_link_trb_quirk(xhci))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
} }
xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
...@@ -398,15 +401,28 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -398,15 +401,28 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* Step 5 */ /* Step 5 */
ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
/* /*
* See section 4.3 bullet 6:
* The default Max Packet size for ep0 is "8 bytes for a USB2
* LS/FS/HS device or 512 bytes for a USB3 SS device"
* XXX: Not sure about wireless USB devices. * XXX: Not sure about wireless USB devices.
*/ */
if (udev->speed == USB_SPEED_SUPER) switch (udev->speed) {
case USB_SPEED_SUPER:
ep0_ctx->ep_info2 |= MAX_PACKET(512); ep0_ctx->ep_info2 |= MAX_PACKET(512);
else break;
case USB_SPEED_HIGH:
/* USB core guesses at a 64-byte max packet first for FS devices */
case USB_SPEED_FULL:
ep0_ctx->ep_info2 |= MAX_PACKET(64);
break;
case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8); ep0_ctx->ep_info2 |= MAX_PACKET(8);
break;
case USB_SPEED_VARIABLE:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
default:
/* New speed? */
BUG();
}
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= MAX_BURST(0); ep0_ctx->ep_info2 |= MAX_BURST(0);
ep0_ctx->ep_info2 |= ERROR_COUNT(3); ep0_ctx->ep_info2 |= ERROR_COUNT(3);
...@@ -598,6 +614,44 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, ...@@ -598,6 +614,44 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
*/ */
} }
/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command.
*/
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_virt_device *vdev, unsigned int ep_index)
{
struct xhci_ep_ctx *out_ep_ctx;
struct xhci_ep_ctx *in_ep_ctx;
out_ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
in_ep_ctx = xhci_get_ep_ctx(xhci, vdev->in_ctx, ep_index);
in_ep_ctx->ep_info = out_ep_ctx->ep_info;
in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
in_ep_ctx->deq = out_ep_ctx->deq;
in_ep_ctx->tx_info = out_ep_ctx->tx_info;
}
/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command. Only the context entries field matters,
* but we'll copy the whole thing anyway.
*/
void xhci_slot_copy(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
{
struct xhci_slot_ctx *in_slot_ctx;
struct xhci_slot_ctx *out_slot_ctx;
in_slot_ctx = xhci_get_slot_ctx(xhci, vdev->in_ctx);
out_slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
in_slot_ctx->dev_info = out_slot_ctx->dev_info;
in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
in_slot_ctx->tt_info = out_slot_ctx->tt_info;
in_slot_ctx->dev_state = out_slot_ctx->dev_state;
}
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{ {
......
...@@ -24,6 +24,10 @@ ...@@ -24,6 +24,10 @@
#include "xhci.h" #include "xhci.h"
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
static const char hcd_name[] = "xhci_hcd"; static const char hcd_name[] = "xhci_hcd";
/* called after powerup, by probe or system-pm "wakeup" */ /* called after powerup, by probe or system-pm "wakeup" */
...@@ -62,6 +66,15 @@ static int xhci_pci_setup(struct usb_hcd *hcd) ...@@ -62,6 +66,15 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
xhci_print_registers(xhci); xhci_print_registers(xhci);
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
" endpoint cmd after reset endpoint\n");
}
/* Make sure the HC is halted. */ /* Make sure the HC is halted. */
retval = xhci_halt(xhci); retval = xhci_halt(xhci);
if (retval) if (retval)
......
...@@ -172,8 +172,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -172,8 +172,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* have their chain bit cleared (so that each Link TRB is a separate TD). * have their chain bit cleared (so that each Link TRB is a separate TD).
* *
* Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
* set, but other sections talk about dealing with the chain bit set. * set, but other sections talk about dealing with the chain bit set. This was
* Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
* xHCI hardware can't handle the chain bit being cleared on a link TRB.
*/ */
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
{ {
...@@ -191,8 +192,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -191,8 +192,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->enq_seg, next)) { while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) { if (!consumer) {
if (ring != xhci->event_ring) { if (ring != xhci->event_ring) {
next->link.control &= ~TRB_CHAIN; /* If we're not dealing with 0.95 hardware,
next->link.control |= chain; * carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!xhci_link_trb_quirk(xhci)) {
next->link.control &= ~TRB_CHAIN;
next->link.control |= chain;
}
/* Give this link TRB to the hardware */ /* Give this link TRB to the hardware */
wmb(); wmb();
if (next->link.control & TRB_CYCLE) if (next->link.control & TRB_CYCLE)
...@@ -462,7 +469,6 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -462,7 +469,6 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
* ring running. * ring running.
*/ */
ep_ring->state |= SET_DEQ_PENDING; ep_ring->state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci);
} }
/* /*
...@@ -531,6 +537,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -531,6 +537,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_queue_new_dequeue_state(xhci, ep_ring, xhci_queue_new_dequeue_state(xhci, ep_ring,
slot_id, ep_index, &deq_state); slot_id, ep_index, &deq_state);
xhci_ring_cmd_db(xhci);
} else { } else {
/* Otherwise just ring the doorbell to restart the ring */ /* Otherwise just ring the doorbell to restart the ring */
ring_ep_doorbell(xhci, slot_id, ep_index); ring_ep_doorbell(xhci, slot_id, ep_index);
...@@ -644,18 +651,31 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, ...@@ -644,18 +651,31 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
{ {
int slot_id; int slot_id;
unsigned int ep_index; unsigned int ep_index;
struct xhci_ring *ep_ring;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
/* This command will only fail if the endpoint wasn't halted, /* This command will only fail if the endpoint wasn't halted,
* but we don't care. * but we don't care.
*/ */
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n", xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
(unsigned int) GET_COMP_CODE(event->status)); (unsigned int) GET_COMP_CODE(event->status));
/* Clear our internal halted state and restart the ring */ /* HW with the reset endpoint quirk needs to have a configure endpoint
xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED; * command complete before the endpoint can be used. Queue that here
ring_ep_doorbell(xhci, slot_id, ep_index); * because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
xhci_dbg(xhci, "Queueing configure endpoint command\n");
xhci_queue_configure_endpoint(xhci,
xhci->devs[slot_id]->in_ctx->dma, slot_id);
xhci_ring_cmd_db(xhci);
} else {
/* Clear our internal halted state and restart the ring */
ep_ring->state &= ~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
}
} }
static void handle_cmd_completion(struct xhci_hcd *xhci, static void handle_cmd_completion(struct xhci_hcd *xhci,
...@@ -664,6 +684,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -664,6 +684,10 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
int slot_id = TRB_TO_SLOT_ID(event->flags); int slot_id = TRB_TO_SLOT_ID(event->flags);
u64 cmd_dma; u64 cmd_dma;
dma_addr_t cmd_dequeue_dma; dma_addr_t cmd_dequeue_dma;
struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
struct xhci_ring *ep_ring;
unsigned int ep_state;
cmd_dma = event->cmd_trb; cmd_dma = event->cmd_trb;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
...@@ -691,6 +715,41 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -691,6 +715,41 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_free_virt_device(xhci, slot_id); xhci_free_virt_device(xhci, slot_id);
break; break;
case TRB_TYPE(TRB_CONFIG_EP): case TRB_TYPE(TRB_CONFIG_EP):
/*
* Configure endpoint commands can come from the USB core
* configuration or alt setting changes, or because the HW
* needed an extra configure endpoint command after a reset
* endpoint command. In the latter case, the xHCI driver is
* not waiting on the configure endpoint command.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci,
xhci->devs[slot_id]->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
if (!ep_ring) {
/* This must have been an initial configure endpoint */
xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(event->status);
complete(&xhci->devs[slot_id]->cmd_completion);
break;
}
ep_state = ep_ring->state;
xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
"state = %d\n", ep_index, ep_state);
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
ep_state & EP_HALTED) {
/* Clear our internal halted state and restart ring */
xhci->devs[slot_id]->ep_rings[ep_index]->state &=
~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
} else {
xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(event->status);
complete(&xhci->devs[slot_id]->cmd_completion);
}
break;
case TRB_TYPE(TRB_EVAL_CONTEXT):
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
complete(&xhci->devs[slot_id]->cmd_completion); complete(&xhci->devs[slot_id]->cmd_completion);
break; break;
...@@ -806,6 +865,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -806,6 +865,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
{ {
struct xhci_virt_device *xdev; struct xhci_virt_device *xdev;
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index; int ep_index;
struct xhci_td *td = 0; struct xhci_td *td = 0;
dma_addr_t event_dma; dma_addr_t event_dma;
...@@ -814,9 +874,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -814,9 +874,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
struct urb *urb = 0; struct urb *urb = 0;
int status = -EINPROGRESS; int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
xhci_dbg(xhci, "In %s\n", __func__); xhci_dbg(xhci, "In %s\n", __func__);
xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; slot_id = TRB_TO_SLOT_ID(event->flags);
xdev = xhci->devs[slot_id];
if (!xdev) { if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
return -ENODEV; return -ENODEV;
...@@ -870,7 +932,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -870,7 +932,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
(unsigned int) event->flags); (unsigned int) event->flags);
/* Look for common error cases */ /* Look for common error cases */
switch (GET_COMP_CODE(event->transfer_len)) { trb_comp_code = GET_COMP_CODE(event->transfer_len);
switch (trb_comp_code) {
/* Skip codes that require special handling depending on /* Skip codes that require special handling depending on
* transfer type * transfer type
*/ */
...@@ -913,7 +976,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -913,7 +976,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Was this a control transfer? */ /* Was this a control transfer? */
if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
xhci_debug_trb(xhci, xhci->event_ring->dequeue); xhci_debug_trb(xhci, xhci->event_ring->dequeue);
switch (GET_COMP_CODE(event->transfer_len)) { switch (trb_comp_code) {
case COMP_SUCCESS: case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) { if (event_trb == ep_ring->dequeue) {
xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
...@@ -928,8 +991,39 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -928,8 +991,39 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break; break;
case COMP_SHORT_TX: case COMP_SHORT_TX:
xhci_warn(xhci, "WARN: short transfer on control ep\n"); xhci_warn(xhci, "WARN: short transfer on control ep\n");
status = -EREMOTEIO; if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
status = -EREMOTEIO;
else
status = 0;
break; break;
case COMP_BABBLE:
/* The 0.96 spec says a babbling control endpoint
* is not halted. The 0.96 spec says it is. Some HW
* claims to be 0.95 compliant, but it halts the control
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
if (ep_ctx->ep_info != EP_STATE_HALTED)
break;
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
if (event_trb != ep_ring->dequeue &&
event_trb != td->last_trb)
td->urb->actual_length =
td->urb->transfer_buffer_length
- TRB_LEN(event->transfer_len);
else
td->urb->actual_length = 0;
ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb;
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci,
td->urb->dev,
ep_index, ep_ring);
xhci_ring_cmd_db(xhci);
goto td_cleanup;
default: default:
/* Others already handled above */ /* Others already handled above */
break; break;
...@@ -943,7 +1037,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -943,7 +1037,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (event_trb == td->last_trb) { if (event_trb == td->last_trb) {
if (td->urb->actual_length != 0) { if (td->urb->actual_length != 0) {
/* Don't overwrite a previously set error code */ /* Don't overwrite a previously set error code */
if (status == -EINPROGRESS || status == 0) if ((status == -EINPROGRESS ||
status == 0) &&
(td->urb->transfer_flags
& URB_SHORT_NOT_OK))
/* Did we already see a short data stage? */ /* Did we already see a short data stage? */
status = -EREMOTEIO; status = -EREMOTEIO;
} else { } else {
...@@ -952,7 +1049,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -952,7 +1049,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
} }
} else { } else {
/* Maybe the event was for the data stage? */ /* Maybe the event was for the data stage? */
if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) { if (trb_comp_code != COMP_STOP_INVAL) {
/* We didn't stop on a link TRB in the middle */ /* We didn't stop on a link TRB in the middle */
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length - td->urb->transfer_buffer_length -
...@@ -964,7 +1061,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -964,7 +1061,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
} }
} }
} else { } else {
switch (GET_COMP_CODE(event->transfer_len)) { switch (trb_comp_code) {
case COMP_SUCCESS: case COMP_SUCCESS:
/* Double check that the HW transferred everything. */ /* Double check that the HW transferred everything. */
if (event_trb != td->last_trb) { if (event_trb != td->last_trb) {
...@@ -975,7 +1072,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -975,7 +1072,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
else else
status = 0; status = 0;
} else { } else {
xhci_dbg(xhci, "Successful bulk transfer!\n"); if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
xhci_dbg(xhci, "Successful bulk "
"transfer!\n");
else
xhci_dbg(xhci, "Successful interrupt "
"transfer!\n");
status = 0; status = 0;
} }
break; break;
...@@ -1001,11 +1103,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1001,11 +1103,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length - td->urb->transfer_buffer_length -
TRB_LEN(event->transfer_len); TRB_LEN(event->transfer_len);
if (td->urb->actual_length < 0) { if (td->urb->transfer_buffer_length <
td->urb->actual_length) {
xhci_warn(xhci, "HC gave bad length " xhci_warn(xhci, "HC gave bad length "
"of %d bytes left\n", "of %d bytes left\n",
TRB_LEN(event->transfer_len)); TRB_LEN(event->transfer_len));
td->urb->actual_length = 0; td->urb->actual_length = 0;
if (td->urb->transfer_flags &
URB_SHORT_NOT_OK)
status = -EREMOTEIO;
else
status = 0;
} }
/* Don't overwrite a previously set error code */ /* Don't overwrite a previously set error code */
if (status == -EINPROGRESS) { if (status == -EINPROGRESS) {
...@@ -1041,14 +1149,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1041,14 +1149,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* If the ring didn't stop on a Link or No-op TRB, add /* If the ring didn't stop on a Link or No-op TRB, add
* in the actual bytes transferred from the Normal TRB * in the actual bytes transferred from the Normal TRB
*/ */
if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) if (trb_comp_code != COMP_STOP_INVAL)
td->urb->actual_length += td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]) - TRB_LEN(cur_trb->generic.field[2]) -
TRB_LEN(event->transfer_len); TRB_LEN(event->transfer_len);
} }
} }
if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || if (trb_comp_code == COMP_STOP_INVAL ||
GET_COMP_CODE(event->transfer_len) == COMP_STOP) { trb_comp_code == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any /* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update * stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet. * the ring dequeue pointer or take this TD off any lists yet.
...@@ -1056,7 +1164,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1056,7 +1164,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep_ring->stopped_td = td; ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb; ep_ring->stopped_trb = event_trb;
} else { } else {
if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) { if (trb_comp_code == COMP_STALL ||
trb_comp_code == COMP_BABBLE) {
/* The transfer is completed from the driver's /* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue * perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue * command for this stalled endpoint to move the dequeue
...@@ -1072,16 +1181,41 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -1072,16 +1181,41 @@ static int handle_tx_event(struct xhci_hcd *xhci,
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring, false);
} }
td_cleanup:
/* Clean up the endpoint's TD list */ /* Clean up the endpoint's TD list */
urb = td->urb; urb = td->urb;
/* Do one last check of the actual transfer length.
* If the host controller said we transferred more data than
* the buffer length, urb->actual_length will be a very big
* number (since it's unsigned). Play it safe and say we didn't
* transfer anything.
*/
if (urb->actual_length > urb->transfer_buffer_length) {
xhci_warn(xhci, "URB transfer length is wrong, "
"xHC issue? req. len = %u, "
"act. len = %u\n",
urb->transfer_buffer_length,
urb->actual_length);
urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
status = -EREMOTEIO;
else
status = 0;
}
list_del(&td->td_list); list_del(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */ /* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list)) { if (!list_empty(&td->cancelled_td_list)) {
list_del(&td->cancelled_td_list); list_del(&td->cancelled_td_list);
ep_ring->cancels_pending--; ep_ring->cancels_pending--;
} }
/* Leave the TD around for the reset endpoint function to use */ /* Leave the TD around for the reset endpoint function to use
if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) { * (but only if it's not a control endpoint, since we already
* queued the Set TR dequeue pointer command for stalled
* control endpoints).
*/
if (usb_endpoint_xfer_control(&urb->ep->desc) ||
(trb_comp_code != COMP_STALL &&
trb_comp_code != COMP_BABBLE)) {
kfree(td); kfree(td);
} }
urb->hcpriv = NULL; urb->hcpriv = NULL;
...@@ -1094,7 +1228,7 @@ cleanup: ...@@ -1094,7 +1228,7 @@ cleanup:
if (urb) { if (urb) {
usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n", xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
urb, td->urb->actual_length, status); urb, urb->actual_length, status);
spin_unlock(&xhci->lock); spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
spin_lock(&xhci->lock); spin_lock(&xhci->lock);
...@@ -1335,6 +1469,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, ...@@ -1335,6 +1469,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
ring_ep_doorbell(xhci, slot_id, ep_index); ring_ep_doorbell(xhci, slot_id, ep_index);
} }
/*
* xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
* endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
* (comprised of sg list entries) can take several service intervals to
* transmit.
*/
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
xhci->devs[slot_id]->out_ctx, ep_index);
int xhci_interval;
int ep_interval;
xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (!printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
ep_interval,
ep_interval == 1 ? "" : "s",
xhci_interval,
xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index) struct urb *urb, int slot_id, unsigned int ep_index)
{ {
...@@ -1733,6 +1908,15 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, ...@@ -1733,6 +1908,15 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
} }
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id));
}
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index) unsigned int ep_index)
{ {
......
...@@ -581,6 +581,7 @@ struct xhci_ep_ctx { ...@@ -581,6 +581,7 @@ struct xhci_ep_ctx {
/* bit 15 is Linear Stream Array */ /* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */ /* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) ((p & 0xff) << 16) #define EP_INTERVAL(p) ((p & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
/* ep_info2 bitmasks */ /* ep_info2 bitmasks */
/* /*
...@@ -589,6 +590,7 @@ struct xhci_ep_ctx { ...@@ -589,6 +590,7 @@ struct xhci_ep_ctx {
*/ */
#define FORCE_EVENT (0x1) #define FORCE_EVENT (0x1)
#define ERROR_COUNT(p) (((p) & 0x3) << 1) #define ERROR_COUNT(p) (((p) & 0x3) << 1)
#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
#define EP_TYPE(p) ((p) << 3) #define EP_TYPE(p) ((p) << 3)
#define ISOC_OUT_EP 1 #define ISOC_OUT_EP 1
#define BULK_OUT_EP 2 #define BULK_OUT_EP 2
...@@ -601,6 +603,8 @@ struct xhci_ep_ctx { ...@@ -601,6 +603,8 @@ struct xhci_ep_ctx {
/* bit 7 is Host Initiate Disable - for disabling stream selection */ /* bit 7 is Host Initiate Disable - for disabling stream selection */
#define MAX_BURST(p) (((p)&0xff) << 8) #define MAX_BURST(p) (((p)&0xff) << 8)
#define MAX_PACKET(p) (((p)&0xffff) << 16) #define MAX_PACKET(p) (((p)&0xffff) << 16)
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
/** /**
...@@ -926,6 +930,12 @@ struct xhci_td { ...@@ -926,6 +930,12 @@ struct xhci_td {
union xhci_trb *last_trb; union xhci_trb *last_trb;
}; };
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
struct xhci_ring { struct xhci_ring {
struct xhci_segment *first_seg; struct xhci_segment *first_seg;
union xhci_trb *enqueue; union xhci_trb *enqueue;
...@@ -952,12 +962,6 @@ struct xhci_ring { ...@@ -952,12 +962,6 @@ struct xhci_ring {
u32 cycle_state; u32 cycle_state;
}; };
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
struct xhci_erst_entry { struct xhci_erst_entry {
/* 64-bit event ring segment address */ /* 64-bit event ring segment address */
u64 seg_addr; u64 seg_addr;
...@@ -1058,6 +1062,9 @@ struct xhci_hcd { ...@@ -1058,6 +1062,9 @@ struct xhci_hcd {
int noops_submitted; int noops_submitted;
int noops_handled; int noops_handled;
int error_bitmask; int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
}; };
/* For testing purposes */ /* For testing purposes */
...@@ -1136,6 +1143,13 @@ static inline void xhci_write_64(struct xhci_hcd *xhci, ...@@ -1136,6 +1143,13 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
writel(val_hi, ptr + 1); writel(val_hi, ptr + 1);
} }
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
u32 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
return ((HC_VERSION(temp) == 0x95) &&
(xhci->quirks & XHCI_LINK_TRB_QUIRK));
}
/* xHCI debugging */ /* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci); void xhci_print_registers(struct xhci_hcd *xhci);
...@@ -1158,7 +1172,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device ...@@ -1158,7 +1172,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc); unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep); void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_virt_device *vdev, unsigned int ep_index);
void xhci_slot_copy(struct xhci_hcd *xhci, struct xhci_virt_device *vdev);
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep, struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags); gfp_t mem_flags);
...@@ -1205,8 +1224,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, ...@@ -1205,8 +1224,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index); int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index); int slot_id, unsigned int ep_index);
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id); u32 slot_id);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index); unsigned int ep_index);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
...@@ -1215,6 +1238,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -1215,6 +1238,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id, struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state); unsigned int ep_index, struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev,
unsigned int ep_index, struct xhci_ring *ep_ring);
void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
/* xHCI roothub code */ /* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
......
...@@ -175,6 +175,7 @@ static int usb_console_setup(struct console *co, char *options) ...@@ -175,6 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
/* The console is special in terms of closing the device so /* The console is special in terms of closing the device so
* indicate this port is now acting as a system console. */ * indicate this port is now acting as a system console. */
port->console = 1; port->console = 1;
port->console_init_baud = baud;
retval = 0; retval = 0;
out: out:
......
...@@ -176,6 +176,9 @@ static struct usb_device_id id_table_combined [] = { ...@@ -176,6 +176,9 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
...@@ -694,6 +697,8 @@ static struct usb_device_id id_table_combined [] = { ...@@ -694,6 +697,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(DE_VID, WHT_PID) }, { USB_DEVICE(DE_VID, WHT_PID) },
{ USB_DEVICE(ADI_VID, ADI_GNICE_PID), { USB_DEVICE(ADI_VID, ADI_GNICE_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
...@@ -702,6 +707,8 @@ static struct usb_device_id id_table_combined [] = { ...@@ -702,6 +707,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
{ }, /* Optional parameter entry */ { }, /* Optional parameter entry */
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
......
...@@ -81,6 +81,9 @@ ...@@ -81,6 +81,9 @@
/* OpenDCC (www.opendcc.de) product id */ /* OpenDCC (www.opendcc.de) product id */
#define FTDI_OPENDCC_PID 0xBFD8 #define FTDI_OPENDCC_PID 0xBFD8
#define FTDI_OPENDCC_SNIFFER_PID 0xBFD9
#define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
#define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
/* Sprog II (Andrew Crosland's SprogII DCC interface) */ /* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8 #define FTDI_SPROG_II 0xF0C8
...@@ -930,6 +933,7 @@ ...@@ -930,6 +933,7 @@
*/ */
#define ADI_VID 0x0456 #define ADI_VID 0x0456
#define ADI_GNICE_PID 0xF000 #define ADI_GNICE_PID 0xF000
#define ADI_GNICEPLUS_PID 0xF001
/* /*
* JETI SPECTROMETER SPECBOS 1201 * JETI SPECTROMETER SPECBOS 1201
...@@ -967,6 +971,12 @@ ...@@ -967,6 +971,12 @@
*/ */
#define MARVELL_OPENRD_PID 0x9e90 #define MARVELL_OPENRD_PID 0x9e90
/*
* Hameg HO820 and HO870 interface (using VID 0x0403)
*/
#define HAMEG_HO820_PID 0xed74
#define HAMEG_HO870_PID 0xed71
/* /*
* BmRequestType: 1100 0000b * BmRequestType: 1100 0000b
* bRequest: FTDI_E2_READ * bRequest: FTDI_E2_READ
......
...@@ -292,6 +292,7 @@ static int option_resume(struct usb_serial *serial); ...@@ -292,6 +292,7 @@ static int option_resume(struct usb_serial *serial);
#define TELIT_VENDOR_ID 0x1bc7 #define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003 #define TELIT_PRODUCT_UC864E 0x1003
#define TELIT_PRODUCT_UC864G 0x1004
/* ZTE PRODUCTS */ /* ZTE PRODUCTS */
#define ZTE_VENDOR_ID 0x19d2 #define ZTE_VENDOR_ID 0x19d2
...@@ -300,6 +301,7 @@ static int option_resume(struct usb_serial *serial); ...@@ -300,6 +301,7 @@ static int option_resume(struct usb_serial *serial);
#define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_CDMA_TECH 0xfffe #define ZTE_PRODUCT_CDMA_TECH 0xfffe
#define ZTE_PRODUCT_AC8710 0xfff1 #define ZTE_PRODUCT_AC8710 0xfff1
#define ZTE_PRODUCT_AC2726 0xfff5
#define BENQ_VENDOR_ID 0x04a5 #define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068 #define BENQ_PRODUCT_H10 0x4068
...@@ -503,6 +505,7 @@ static struct usb_device_id option_ids[] = { ...@@ -503,6 +505,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
...@@ -572,6 +575,7 @@ static struct usb_device_id option_ids[] = { ...@@ -572,6 +575,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
......
...@@ -96,6 +96,7 @@ static struct usb_device_id id_table [] = { ...@@ -96,6 +96,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
......
...@@ -130,3 +130,7 @@ ...@@ -130,3 +130,7 @@
/* Sony, USB data cable for CMD-Jxx mobile phones */ /* Sony, USB data cable for CMD-Jxx mobile phones */
#define SONY_VENDOR_ID 0x054c #define SONY_VENDOR_ID 0x054c
#define SONY_QN3USB_PRODUCT_ID 0x0437 #define SONY_QN3USB_PRODUCT_ID 0x0437
/* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */
#define SANWA_VENDOR_ID 0x11ad
#define SANWA_PRODUCT_ID 0x0001
...@@ -223,8 +223,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) ...@@ -223,8 +223,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
tty->driver_data = port; tty->driver_data = port;
tty_port_tty_set(&port->port, tty); tty_port_tty_set(&port->port, tty);
/* If the console is attached, the device is already open */ if (port->port.count == 1) {
if (port->port.count == 1 && !port->console) {
first = 1; first = 1;
/* lock this module before we call it /* lock this module before we call it
* this may fail, which means we must bail out, * this may fail, which means we must bail out,
...@@ -242,11 +241,17 @@ static int serial_open (struct tty_struct *tty, struct file *filp) ...@@ -242,11 +241,17 @@ static int serial_open (struct tty_struct *tty, struct file *filp)
if (retval) if (retval)
goto bailout_module_put; goto bailout_module_put;
/* only call the device specific open if this /* only call the device specific open if this is the
* is the first time the port is opened */ * first time the port is opened and it is not a
retval = serial->type->open(tty, port, filp); * console port where the HW has already been
if (retval) * initialized */
goto bailout_interface_put; if (port->console) {
tty_encode_baud_rate(tty, port->console_init_baud, port->console_init_baud);
} else {
retval = serial->type->open(tty, port, filp);
if (retval)
goto bailout_interface_put;
}
mutex_unlock(&serial->disc_mutex); mutex_unlock(&serial->disc_mutex);
set_bit(ASYNCB_INITIALIZED, &port->port.flags); set_bit(ASYNCB_INITIALIZED, &port->port.flags);
} }
......
...@@ -163,7 +163,7 @@ static void usb_onetouch_pm_hook(struct us_data *us, int action) ...@@ -163,7 +163,7 @@ static void usb_onetouch_pm_hook(struct us_data *us, int action)
usb_kill_urb(onetouch->irq); usb_kill_urb(onetouch->irq);
break; break;
case US_RESUME: case US_RESUME:
if (usb_submit_urb(onetouch->irq, GFP_KERNEL) != 0) if (usb_submit_urb(onetouch->irq, GFP_NOIO) != 0)
dev_err(&onetouch->irq->dev->dev, dev_err(&onetouch->irq->dev->dev,
"usb_submit_urb failed\n"); "usb_submit_urb failed\n");
break; break;
......
...@@ -107,6 +107,7 @@ struct usb_serial_port { ...@@ -107,6 +107,7 @@ struct usb_serial_port {
char throttled; char throttled;
char throttle_req; char throttle_req;
char console; char console;
int console_init_baud;
unsigned long sysrq; /* sysrq timeout */ unsigned long sysrq; /* sysrq timeout */
struct device dev; struct device dev;
enum port_dev_state dev_state; enum port_dev_state dev_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment