Commit 547de29e authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: protect assigned dev workqueue, int handler and irq acker

kvm_assigned_dev_ack_irq is vulnerable to a race condition with the
interrupt handler function. It does:

        if (dev->host_irq_disabled) {
                enable_irq(dev->host_irq);
                dev->host_irq_disabled = false;
        }

If an interrupt triggers before the host->dev_irq_disabled assignment,
it will disable the interrupt and set dev->host_irq_disabled to true.

On return to kvm_assigned_dev_ack_irq, dev->host_irq_disabled is set to
false, and the next kvm_assigned_dev_ack_irq call will fail to reenable
it.

Other than that, having the interrupt handler and work handlers run in
parallel sounds like asking for trouble (could not spot any obvious
problem, but better not have to, its fragile).

CC: sheng.yang@intel.com
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 32f88400
...@@ -345,6 +345,7 @@ struct kvm_assigned_dev_kernel { ...@@ -345,6 +345,7 @@ struct kvm_assigned_dev_kernel {
int flags; int flags;
struct pci_dev *dev; struct pci_dev *dev;
struct kvm *kvm; struct kvm *kvm;
spinlock_t assigned_dev_lock;
}; };
struct kvm_irq_mask_notifier { struct kvm_irq_mask_notifier {
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/spinlock.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -130,6 +131,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) ...@@ -130,6 +131,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
* finer-grained lock, update this * finer-grained lock, update this
*/ */
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
spin_lock_irq(&assigned_dev->assigned_dev_lock);
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
struct kvm_guest_msix_entry *guest_entries = struct kvm_guest_msix_entry *guest_entries =
assigned_dev->guest_msix_entries; assigned_dev->guest_msix_entries;
...@@ -156,18 +158,21 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) ...@@ -156,18 +158,21 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
} }
} }
spin_unlock_irq(&assigned_dev->assigned_dev_lock);
mutex_unlock(&assigned_dev->kvm->lock); mutex_unlock(&assigned_dev->kvm->lock);
} }
static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
{ {
unsigned long flags;
struct kvm_assigned_dev_kernel *assigned_dev = struct kvm_assigned_dev_kernel *assigned_dev =
(struct kvm_assigned_dev_kernel *) dev_id; (struct kvm_assigned_dev_kernel *) dev_id;
spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
int index = find_index_from_host_irq(assigned_dev, irq); int index = find_index_from_host_irq(assigned_dev, irq);
if (index < 0) if (index < 0)
return IRQ_HANDLED; goto out;
assigned_dev->guest_msix_entries[index].flags |= assigned_dev->guest_msix_entries[index].flags |=
KVM_ASSIGNED_MSIX_PENDING; KVM_ASSIGNED_MSIX_PENDING;
} }
...@@ -177,6 +182,8 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) ...@@ -177,6 +182,8 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
disable_irq_nosync(irq); disable_irq_nosync(irq);
assigned_dev->host_irq_disabled = true; assigned_dev->host_irq_disabled = true;
out:
spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -184,6 +191,7 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) ...@@ -184,6 +191,7 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
{ {
struct kvm_assigned_dev_kernel *dev; struct kvm_assigned_dev_kernel *dev;
unsigned long flags;
if (kian->gsi == -1) if (kian->gsi == -1)
return; return;
...@@ -196,10 +204,12 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) ...@@ -196,10 +204,12 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
/* The guest irq may be shared so this ack may be /* The guest irq may be shared so this ack may be
* from another device. * from another device.
*/ */
spin_lock_irqsave(&dev->assigned_dev_lock, flags);
if (dev->host_irq_disabled) { if (dev->host_irq_disabled) {
enable_irq(dev->host_irq); enable_irq(dev->host_irq);
dev->host_irq_disabled = false; dev->host_irq_disabled = false;
} }
spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
} }
static void deassign_guest_irq(struct kvm *kvm, static void deassign_guest_irq(struct kvm *kvm,
...@@ -615,6 +625,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, ...@@ -615,6 +625,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
match->host_devfn = assigned_dev->devfn; match->host_devfn = assigned_dev->devfn;
match->flags = assigned_dev->flags; match->flags = assigned_dev->flags;
match->dev = dev; match->dev = dev;
spin_lock_init(&match->assigned_dev_lock);
match->irq_source_id = -1; match->irq_source_id = -1;
match->kvm = kvm; match->kvm = kvm;
match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment