Commit 82af8ce8 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by Rusty Russell

virtio_pci: optional MSI-X support

This implements optional MSI-X support in virtio_pci.
MSI-X is used whenever the host supports at least 2 MSI-X
vectors: 1 for configuration changes and 1 for virtqueues.
Per-virtqueue vectors are allocated if enough vectors
available.
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Acked-by: default avatarAnthony Liguori <aliguori@us.ibm.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (+ whitespace, style)
parent 77cf5246
...@@ -42,6 +42,26 @@ struct virtio_pci_device ...@@ -42,6 +42,26 @@ struct virtio_pci_device
/* a list of queues so we can dispatch IRQs */ /* a list of queues so we can dispatch IRQs */
spinlock_t lock; spinlock_t lock;
struct list_head virtqueues; struct list_head virtqueues;
/* MSI-X support */
int msix_enabled;
int intx_enabled;
struct msix_entry *msix_entries;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
char (*msix_names)[256];
/* Number of available vectors */
unsigned msix_vectors;
/* Vectors allocated */
unsigned msix_used_vectors;
};
/* Constants for MSI-X */
/* Use first vector for configuration changes, second and the rest for
* virtqueues Thus, we need at least 2 vectors for MSI. */
enum {
VP_MSIX_CONFIG_VECTOR = 0,
VP_MSIX_VQ_VECTOR = 1,
}; };
struct virtio_pci_vq_info struct virtio_pci_vq_info
...@@ -60,6 +80,9 @@ struct virtio_pci_vq_info ...@@ -60,6 +80,9 @@ struct virtio_pci_vq_info
/* the list node for the virtqueues list */ /* the list node for the virtqueues list */
struct list_head node; struct list_head node;
/* MSI-X vector (or none) */
unsigned vector;
}; };
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
...@@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset, ...@@ -109,7 +132,8 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len) void *buf, unsigned len)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; void __iomem *ioaddr = vp_dev->ioaddr +
VIRTIO_PCI_CONFIG(vp_dev) + offset;
u8 *ptr = buf; u8 *ptr = buf;
int i; int i;
...@@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, ...@@ -123,7 +147,8 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len) const void *buf, unsigned len)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; void __iomem *ioaddr = vp_dev->ioaddr +
VIRTIO_PCI_CONFIG(vp_dev) + offset;
const u8 *ptr = buf; const u8 *ptr = buf;
int i; int i;
...@@ -221,7 +246,122 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) ...@@ -221,7 +246,122 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
return vp_vring_interrupt(irq, opaque); return vp_vring_interrupt(irq, opaque);
} }
/* the config->find_vq() implementation */ static void vp_free_vectors(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
if (vp_dev->intx_enabled) {
free_irq(vp_dev->pci_dev->irq, vp_dev);
vp_dev->intx_enabled = 0;
}
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(vp_dev->msix_entries[i].vector, vp_dev);
vp_dev->msix_used_vectors = 0;
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
iowrite16(VIRTIO_MSI_NO_VECTOR,
vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
/* Flush the write out to device */
ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
vp_dev->msix_enabled = 0;
pci_disable_msix(vp_dev->pci_dev);
}
}
static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
int *options, int noptions)
{
int i;
for (i = 0; i < noptions; ++i)
if (!pci_enable_msix(dev, entries, options[i]))
return options[i];
return -EBUSY;
}
static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
const char *name = dev_name(&vp_dev->vdev.dev);
unsigned i, v;
int err = -ENOMEM;
/* We want at most one vector per queue and one for config changes.
* Fallback to separate vectors for config and a shared for queues.
* Finally fall back to regular interrupts. */
int options[] = { max_vqs + 1, 2 };
int nvectors = max(options[0], options[1]);
vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
GFP_KERNEL);
if (!vp_dev->msix_entries)
goto error_entries;
vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
GFP_KERNEL);
if (!vp_dev->msix_names)
goto error_names;
for (i = 0; i < nvectors; ++i)
vp_dev->msix_entries[i].entry = i;
err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries,
options, ARRAY_SIZE(options));
if (err < 0) {
/* Can't allocate enough MSI-X vectors, use regular interrupt */
vp_dev->msix_vectors = 0;
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
IRQF_SHARED, name, vp_dev);
if (err)
goto error_irq;
vp_dev->intx_enabled = 1;
} else {
vp_dev->msix_vectors = err;
vp_dev->msix_enabled = 1;
/* Set the vector used for configuration */
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name);
err = request_irq(vp_dev->msix_entries[v].vector,
vp_config_changed, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
goto error_irq;
++vp_dev->msix_used_vectors;
iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
/* Verify we had enough resources to assign the vector */
v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
if (v == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto error_irq;
}
}
if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) {
/* Shared vector for all VQs */
v = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name);
err = request_irq(vp_dev->msix_entries[v].vector,
vp_vring_interrupt, 0, vp_dev->msix_names[v],
vp_dev);
if (err)
goto error_irq;
++vp_dev->msix_used_vectors;
}
return 0;
error_irq:
vp_free_vectors(vdev);
kfree(vp_dev->msix_names);
error_names:
kfree(vp_dev->msix_entries);
error_entries:
return err;
}
static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
void (*callback)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq),
const char *name) const char *name)
...@@ -230,7 +370,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, ...@@ -230,7 +370,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
struct virtio_pci_vq_info *info; struct virtio_pci_vq_info *info;
struct virtqueue *vq; struct virtqueue *vq;
unsigned long flags, size; unsigned long flags, size;
u16 num; u16 num, vector;
int err; int err;
/* Select the queue we're interested in */ /* Select the queue we're interested in */
...@@ -249,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, ...@@ -249,6 +389,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
info->queue_index = index; info->queue_index = index;
info->num = num; info->num = num;
info->vector = VIRTIO_MSI_NO_VECTOR;
size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
...@@ -272,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, ...@@ -272,12 +413,43 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index,
vq->priv = info; vq->priv = info;
info->vq = vq; info->vq = vq;
/* allocate per-vq vector if available and necessary */
if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) {
vector = vp_dev->msix_used_vectors;
snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names,
"%s-%s", dev_name(&vp_dev->vdev.dev), name);
err = request_irq(vp_dev->msix_entries[vector].vector,
vring_interrupt, 0,
vp_dev->msix_names[vector], vq);
if (err)
goto out_request_irq;
info->vector = vector;
++vp_dev->msix_used_vectors;
} else
vector = VP_MSIX_VQ_VECTOR;
if (callback && vp_dev->msix_enabled) {
iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
if (vector == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto out_assign;
}
}
spin_lock_irqsave(&vp_dev->lock, flags); spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues); list_add(&info->node, &vp_dev->virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags); spin_unlock_irqrestore(&vp_dev->lock, flags);
return vq; return vq;
out_assign:
if (info->vector != VIRTIO_MSI_NO_VECTOR) {
free_irq(vp_dev->msix_entries[info->vector].vector, vq);
--vp_dev->msix_used_vectors;
}
out_request_irq:
vring_del_virtqueue(vq);
out_activate_queue: out_activate_queue:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
free_pages_exact(info->queue, size); free_pages_exact(info->queue, size);
...@@ -286,17 +458,27 @@ out_info: ...@@ -286,17 +458,27 @@ out_info:
return ERR_PTR(err); return ERR_PTR(err);
} }
/* the config->del_vq() implementation */
static void vp_del_vq(struct virtqueue *vq) static void vp_del_vq(struct virtqueue *vq)
{ {
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vq->priv; struct virtio_pci_vq_info *info = vq->priv;
unsigned long size; unsigned long size;
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
if (info->vector != VIRTIO_MSI_NO_VECTOR)
free_irq(vp_dev->msix_entries[info->vector].vector, vq);
if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
/* Flush the write out to device */
ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
}
vring_del_virtqueue(vq); vring_del_virtqueue(vq);
/* Select and deactivate the queue */ /* Select and deactivate the queue */
iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
...@@ -304,30 +486,46 @@ static void vp_del_vq(struct virtqueue *vq) ...@@ -304,30 +486,46 @@ static void vp_del_vq(struct virtqueue *vq)
kfree(info); kfree(info);
} }
/* the config->del_vqs() implementation */
static void vp_del_vqs(struct virtio_device *vdev) static void vp_del_vqs(struct virtio_device *vdev)
{ {
struct virtqueue *vq, *n; struct virtqueue *vq, *n;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) list_for_each_entry_safe(vq, n, &vdev->vqs, list)
vp_del_vq(vq); vp_del_vq(vq);
vp_free_vectors(vdev);
} }
/* the config->find_vqs() implementation */
static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[], struct virtqueue *vqs[],
vq_callback_t *callbacks[], vq_callback_t *callbacks[],
const char *names[]) const char *names[])
{ {
int i; int vectors = 0;
int i, err;
/* How many vectors would we like? */
for (i = 0; i < nvqs; ++i)
if (callbacks[i])
++vectors;
err = vp_request_vectors(vdev, vectors);
if (err)
goto error_request;
for (i = 0; i < nvqs; ++i) { for (i = 0; i < nvqs; ++i) {
vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]);
if (IS_ERR(vqs[i])) if (IS_ERR(vqs[i]))
goto error; goto error_find;
} }
return 0; return 0;
error: error_find:
vp_del_vqs(vdev); vp_del_vqs(vdev);
error_request:
return PTR_ERR(vqs[i]); return PTR_ERR(vqs[i]);
} }
...@@ -349,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d) ...@@ -349,7 +547,7 @@ static void virtio_pci_release_dev(struct device *_d)
struct virtio_pci_device *vp_dev = to_vp_device(dev); struct virtio_pci_device *vp_dev = to_vp_device(dev);
struct pci_dev *pci_dev = vp_dev->pci_dev; struct pci_dev *pci_dev = vp_dev->pci_dev;
free_irq(pci_dev->irq, vp_dev); vp_del_vqs(dev);
pci_set_drvdata(pci_dev, NULL); pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr); pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev); pci_release_regions(pci_dev);
...@@ -408,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, ...@@ -408,21 +606,13 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
vp_dev->vdev.id.device = pci_dev->subsystem_device; vp_dev->vdev.id.device = pci_dev->subsystem_device;
/* register a handler for the queue with the PCI device's interrupt */
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vp_dev->vdev.dev), vp_dev);
if (err)
goto out_set_drvdata;
/* finally register the virtio device */ /* finally register the virtio device */
err = register_virtio_device(&vp_dev->vdev); err = register_virtio_device(&vp_dev->vdev);
if (err) if (err)
goto out_req_irq; goto out_set_drvdata;
return 0; return 0;
out_req_irq:
free_irq(pci_dev->irq, vp_dev);
out_set_drvdata: out_set_drvdata:
pci_set_drvdata(pci_dev, NULL); pci_set_drvdata(pci_dev, NULL);
pci_iounmap(pci_dev, vp_dev->ioaddr); pci_iounmap(pci_dev, vp_dev->ioaddr);
......
...@@ -47,9 +47,17 @@ ...@@ -47,9 +47,17 @@
/* The bit of the ISR which indicates a device configuration change. */ /* The bit of the ISR which indicates a device configuration change. */
#define VIRTIO_PCI_ISR_CONFIG 0x2 #define VIRTIO_PCI_ISR_CONFIG 0x2
/* MSI-X registers: only enabled if MSI-X is enabled. */
/* A 16-bit vector for configuration changes. */
#define VIRTIO_MSI_CONFIG_VECTOR 20
/* A 16-bit vector for selected queue notifications. */
#define VIRTIO_MSI_QUEUE_VECTOR 22
/* Vector value used to disable MSI for queue */
#define VIRTIO_MSI_NO_VECTOR 0xffff
/* The remaining space is defined by each driver as the per-driver /* The remaining space is defined by each driver as the per-driver
* configuration space */ * configuration space */
#define VIRTIO_PCI_CONFIG 20 #define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20)
/* Virtio ABI version, this must match exactly */ /* Virtio ABI version, this must match exactly */
#define VIRTIO_PCI_ABI_VERSION 0 #define VIRTIO_PCI_ABI_VERSION 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment