Commit c11ac05e authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'kvm/linux-next'

parents 15235fa9 cba2be0e
...@@ -60,6 +60,7 @@ struct kvm_ioapic_state { ...@@ -60,6 +60,7 @@ struct kvm_ioapic_state {
#define KVM_IRQCHIP_PIC_MASTER 0 #define KVM_IRQCHIP_PIC_MASTER 0
#define KVM_IRQCHIP_PIC_SLAVE 1 #define KVM_IRQCHIP_PIC_SLAVE 1
#define KVM_IRQCHIP_IOAPIC 2 #define KVM_IRQCHIP_IOAPIC 2
#define KVM_NR_IRQCHIPS 3
#define KVM_CONTEXT_SIZE 8*1024 #define KVM_CONTEXT_SIZE 8*1024
......
...@@ -475,7 +475,6 @@ struct kvm_arch { ...@@ -475,7 +475,6 @@ struct kvm_arch {
struct list_head assigned_dev_head; struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain; struct iommu_domain *iommu_domain;
int iommu_flags; int iommu_flags;
struct hlist_head irq_ack_notifier_list;
unsigned long irq_sources_bitmap; unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
......
...@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ ...@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
coalesced_mmio.o irq_comm.o) coalesced_mmio.o irq_comm.o assigned-dev.o)
ifeq ($(CONFIG_IOMMU_API),y) ifeq ($(CONFIG_IOMMU_API),y)
common-objs += $(addprefix ../../../virt/kvm/, iommu.o) common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
......
...@@ -851,8 +851,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, ...@@ -851,8 +851,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
r = 0; r = 0;
switch (chip->chip_id) { switch (chip->chip_id) {
case KVM_IRQCHIP_IOAPIC: case KVM_IRQCHIP_IOAPIC:
memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
sizeof(struct kvm_ioapic_state));
break; break;
default: default:
r = -EINVAL; r = -EINVAL;
...@@ -868,9 +867,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) ...@@ -868,9 +867,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
r = 0; r = 0;
switch (chip->chip_id) { switch (chip->chip_id) {
case KVM_IRQCHIP_IOAPIC: case KVM_IRQCHIP_IOAPIC:
memcpy(ioapic_irqchip(kvm), r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
&chip->chip.ioapic,
sizeof(struct kvm_ioapic_state));
break; break;
default: default:
r = -EINVAL; r = -EINVAL;
...@@ -944,7 +941,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -944,7 +941,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
{ {
struct kvm *kvm = filp->private_data; struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
int r = -EINVAL; int r = -ENOTTY;
switch (ioctl) { switch (ioctl) {
case KVM_SET_MEMORY_REGION: { case KVM_SET_MEMORY_REGION: {
...@@ -985,10 +982,8 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -985,10 +982,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out; goto out;
if (irqchip_in_kernel(kvm)) { if (irqchip_in_kernel(kvm)) {
__s32 status; __s32 status;
mutex_lock(&kvm->irq_lock);
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level); irq_event.irq, irq_event.level);
mutex_unlock(&kvm->irq_lock);
if (ioctl == KVM_IRQ_LINE_STATUS) { if (ioctl == KVM_IRQ_LINE_STATUS) {
irq_event.status = status; irq_event.status = status;
if (copy_to_user(argp, &irq_event, if (copy_to_user(argp, &irq_event,
......
...@@ -421,7 +421,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -421,7 +421,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) { switch (ioctl) {
default: default:
r = -EINVAL; r = -ENOTTY;
} }
return r; return r;
......
...@@ -150,7 +150,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -150,7 +150,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
break; break;
} }
default: default:
r = -EINVAL; r = -ENOTTY;
} }
return r; return r;
......
...@@ -79,6 +79,7 @@ struct kvm_ioapic_state { ...@@ -79,6 +79,7 @@ struct kvm_ioapic_state {
#define KVM_IRQCHIP_PIC_MASTER 0 #define KVM_IRQCHIP_PIC_MASTER 0
#define KVM_IRQCHIP_PIC_SLAVE 1 #define KVM_IRQCHIP_PIC_SLAVE 1
#define KVM_IRQCHIP_IOAPIC 2 #define KVM_IRQCHIP_IOAPIC 2
#define KVM_NR_IRQCHIPS 3
/* for KVM_GET_REGS and KVM_SET_REGS */ /* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs { struct kvm_regs {
......
...@@ -397,7 +397,6 @@ struct kvm_arch{ ...@@ -397,7 +397,6 @@ struct kvm_arch{
struct kvm_pic *vpic; struct kvm_pic *vpic;
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_pit *vpit; struct kvm_pit *vpit;
struct hlist_head irq_ack_notifier_list;
int vapics_in_nmi_mode; int vapics_in_nmi_mode;
unsigned int tss_addr; unsigned int tss_addr;
...@@ -410,7 +409,6 @@ struct kvm_arch{ ...@@ -410,7 +409,6 @@ struct kvm_arch{
gpa_t ept_identity_map_addr; gpa_t ept_identity_map_addr;
unsigned long irq_sources_bitmap; unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
u64 vm_init_tsc; u64 vm_init_tsc;
}; };
...@@ -506,8 +504,8 @@ struct kvm_x86_ops { ...@@ -506,8 +504,8 @@ struct kvm_x86_ops {
void (*tlb_flush)(struct kvm_vcpu *vcpu); void (*tlb_flush)(struct kvm_vcpu *vcpu);
void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); void (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); int (*handle_exit)(struct kvm_vcpu *vcpu);
void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
...@@ -568,7 +566,7 @@ enum emulation_result { ...@@ -568,7 +566,7 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2) #define EMULTYPE_SKIP (1 << 2)
int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, int emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2, u16 error_code, int emulation_type); unsigned long cr2, u16 error_code, int emulation_type);
void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
...@@ -585,9 +583,9 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); ...@@ -585,9 +583,9 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
struct x86_emulate_ctxt; struct x86_emulate_ctxt;
int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in,
int size, unsigned port); int size, unsigned port);
int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
int size, unsigned long count, int down, int size, unsigned long count, int down,
gva_t address, int rep, unsigned port); gva_t address, int rep, unsigned port);
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
......
...@@ -6,7 +6,8 @@ CFLAGS_svm.o := -I. ...@@ -6,7 +6,8 @@ CFLAGS_svm.o := -I.
CFLAGS_vmx.o := -I. CFLAGS_vmx.o := -I.
kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
coalesced_mmio.o irq_comm.o eventfd.o) coalesced_mmio.o irq_comm.o eventfd.o \
assigned-dev.o)
kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o) kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
......
...@@ -75,6 +75,8 @@ ...@@ -75,6 +75,8 @@
#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
#define GroupMask 0xff /* Group number stored in bits 0:7 */ #define GroupMask 0xff /* Group number stored in bits 0:7 */
/* Misc flags */
#define No64 (1<<28)
/* Source 2 operand type */ /* Source 2 operand type */
#define Src2None (0<<29) #define Src2None (0<<29)
#define Src2CL (1<<29) #define Src2CL (1<<29)
...@@ -92,19 +94,23 @@ static u32 opcode_table[256] = { ...@@ -92,19 +94,23 @@ static u32 opcode_table[256] = {
/* 0x00 - 0x07 */ /* 0x00 - 0x07 */
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
/* 0x08 - 0x0F */ /* 0x08 - 0x0F */
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
0, 0, 0, 0, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, 0,
/* 0x10 - 0x17 */ /* 0x10 - 0x17 */
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
/* 0x18 - 0x1F */ /* 0x18 - 0x1F */
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
/* 0x20 - 0x27 */ /* 0x20 - 0x27 */
ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
...@@ -133,7 +139,8 @@ static u32 opcode_table[256] = { ...@@ -133,7 +139,8 @@ static u32 opcode_table[256] = {
DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
/* 0x60 - 0x67 */ /* 0x60 - 0x67 */
0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0x68 - 0x6F */ /* 0x68 - 0x6F */
SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0, SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
...@@ -158,7 +165,7 @@ static u32 opcode_table[256] = { ...@@ -158,7 +165,7 @@ static u32 opcode_table[256] = {
/* 0x90 - 0x97 */ /* 0x90 - 0x97 */
DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
/* 0x98 - 0x9F */ /* 0x98 - 0x9F */
0, 0, SrcImm | Src2Imm16, 0, 0, 0, SrcImm | Src2Imm16 | No64, 0,
ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
/* 0xA0 - 0xA7 */ /* 0xA0 - 0xA7 */
ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
...@@ -185,7 +192,7 @@ static u32 opcode_table[256] = { ...@@ -185,7 +192,7 @@ static u32 opcode_table[256] = {
ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov, ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
/* 0xC8 - 0xCF */ /* 0xC8 - 0xCF */
0, 0, 0, ImplicitOps | Stack, 0, 0, 0, ImplicitOps | Stack,
ImplicitOps, SrcImmByte, ImplicitOps, ImplicitOps, ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps,
/* 0xD0 - 0xD7 */ /* 0xD0 - 0xD7 */
ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
...@@ -198,7 +205,7 @@ static u32 opcode_table[256] = { ...@@ -198,7 +205,7 @@ static u32 opcode_table[256] = {
ByteOp | SrcImmUByte, SrcImmUByte, ByteOp | SrcImmUByte, SrcImmUByte,
/* 0xE8 - 0xEF */ /* 0xE8 - 0xEF */
SrcImm | Stack, SrcImm | ImplicitOps, SrcImm | Stack, SrcImm | ImplicitOps,
SrcImmU | Src2Imm16, SrcImmByte | ImplicitOps, SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps,
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
/* 0xF0 - 0xF7 */ /* 0xF0 - 0xF7 */
...@@ -244,11 +251,13 @@ static u32 twobyte_table[256] = { ...@@ -244,11 +251,13 @@ static u32 twobyte_table[256] = {
/* 0x90 - 0x9F */ /* 0x90 - 0x9F */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* 0xA0 - 0xA7 */ /* 0xA0 - 0xA7 */
0, 0, 0, DstMem | SrcReg | ModRM | BitOp, ImplicitOps | Stack, ImplicitOps | Stack,
0, DstMem | SrcReg | ModRM | BitOp,
DstMem | SrcReg | Src2ImmByte | ModRM, DstMem | SrcReg | Src2ImmByte | ModRM,
DstMem | SrcReg | Src2CL | ModRM, 0, 0, DstMem | SrcReg | Src2CL | ModRM, 0, 0,
/* 0xA8 - 0xAF */ /* 0xA8 - 0xAF */
0, 0, 0, DstMem | SrcReg | ModRM | BitOp, ImplicitOps | Stack, ImplicitOps | Stack,
0, DstMem | SrcReg | ModRM | BitOp,
DstMem | SrcReg | Src2ImmByte | ModRM, DstMem | SrcReg | Src2ImmByte | ModRM,
DstMem | SrcReg | Src2CL | ModRM, DstMem | SrcReg | Src2CL | ModRM,
ModRM, 0, ModRM, 0,
...@@ -962,6 +971,11 @@ done_prefixes: ...@@ -962,6 +971,11 @@ done_prefixes:
} }
} }
if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");;
return -1;
}
if (c->d & Group) { if (c->d & Group) {
group = c->d & GroupMask; group = c->d & GroupMask;
c->modrm = insn_fetch(u8, 1, c->eip); c->modrm = insn_fetch(u8, 1, c->eip);
...@@ -1186,6 +1200,69 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, ...@@ -1186,6 +1200,69 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
return rc; return rc;
} }
static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
{
struct decode_cache *c = &ctxt->decode;
struct kvm_segment segment;
kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg);
c->src.val = segment.selector;
emulate_push(ctxt);
}
static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, int seg)
{
struct decode_cache *c = &ctxt->decode;
unsigned long selector;
int rc;
rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
if (rc != 0)
return rc;
rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg);
return rc;
}
static void emulate_pusha(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
unsigned long old_esp = c->regs[VCPU_REGS_RSP];
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
(c->src.val = old_esp) : (c->src.val = c->regs[reg]);
emulate_push(ctxt);
++reg;
}
}
static int emulate_popa(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
int rc = 0;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
register_address_increment(c, &c->regs[VCPU_REGS_RSP],
c->op_bytes);
--reg;
}
rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
if (rc != 0)
break;
--reg;
}
return rc;
}
static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops) struct x86_emulate_ops *ops)
{ {
...@@ -1707,18 +1784,45 @@ special_insn: ...@@ -1707,18 +1784,45 @@ special_insn:
add: /* add */ add: /* add */
emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
break; break;
case 0x06: /* push es */
emulate_push_sreg(ctxt, VCPU_SREG_ES);
break;
case 0x07: /* pop es */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
if (rc != 0)
goto done;
break;
case 0x08 ... 0x0d: case 0x08 ... 0x0d:
or: /* or */ or: /* or */
emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
break; break;
case 0x0e: /* push cs */
emulate_push_sreg(ctxt, VCPU_SREG_CS);
break;
case 0x10 ... 0x15: case 0x10 ... 0x15:
adc: /* adc */ adc: /* adc */
emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
break; break;
case 0x16: /* push ss */
emulate_push_sreg(ctxt, VCPU_SREG_SS);
break;
case 0x17: /* pop ss */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
if (rc != 0)
goto done;
break;
case 0x18 ... 0x1d: case 0x18 ... 0x1d:
sbb: /* sbb */ sbb: /* sbb */
emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
break; break;
case 0x1e: /* push ds */
emulate_push_sreg(ctxt, VCPU_SREG_DS);
break;
case 0x1f: /* pop ds */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
if (rc != 0)
goto done;
break;
case 0x20 ... 0x25: case 0x20 ... 0x25:
and: /* and */ and: /* and */
emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
...@@ -1750,6 +1854,14 @@ special_insn: ...@@ -1750,6 +1854,14 @@ special_insn:
if (rc != 0) if (rc != 0)
goto done; goto done;
break; break;
case 0x60: /* pusha */
emulate_pusha(ctxt);
break;
case 0x61: /* popa */
rc = emulate_popa(ctxt, ops);
if (rc != 0)
goto done;
break;
case 0x63: /* movsxd */ case 0x63: /* movsxd */
if (ctxt->mode != X86EMUL_MODE_PROT64) if (ctxt->mode != X86EMUL_MODE_PROT64)
goto cannot_emulate; goto cannot_emulate;
...@@ -1761,7 +1873,7 @@ special_insn: ...@@ -1761,7 +1873,7 @@ special_insn:
break; break;
case 0x6c: /* insb */ case 0x6c: /* insb */
case 0x6d: /* insw/insd */ case 0x6d: /* insw/insd */
if (kvm_emulate_pio_string(ctxt->vcpu, NULL, if (kvm_emulate_pio_string(ctxt->vcpu,
1, 1,
(c->d & ByteOp) ? 1 : c->op_bytes, (c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ? c->rep_prefix ?
...@@ -1777,7 +1889,7 @@ special_insn: ...@@ -1777,7 +1889,7 @@ special_insn:
return 0; return 0;
case 0x6e: /* outsb */ case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */ case 0x6f: /* outsw/outsd */
if (kvm_emulate_pio_string(ctxt->vcpu, NULL, if (kvm_emulate_pio_string(ctxt->vcpu,
0, 0,
(c->d & ByteOp) ? 1 : c->op_bytes, (c->d & ByteOp) ? 1 : c->op_bytes,
c->rep_prefix ? c->rep_prefix ?
...@@ -2070,7 +2182,7 @@ special_insn: ...@@ -2070,7 +2182,7 @@ special_insn:
case 0xef: /* out (e/r)ax,dx */ case 0xef: /* out (e/r)ax,dx */
port = c->regs[VCPU_REGS_RDX]; port = c->regs[VCPU_REGS_RDX];
io_dir_in = 0; io_dir_in = 0;
do_io: if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in, do_io: if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
(c->d & ByteOp) ? 1 : c->op_bytes, (c->d & ByteOp) ? 1 : c->op_bytes,
port) != 0) { port) != 0) {
c->eip = saved_eip; c->eip = saved_eip;
...@@ -2297,6 +2409,14 @@ twobyte_insn: ...@@ -2297,6 +2409,14 @@ twobyte_insn:
jmp_rel(c, c->src.val); jmp_rel(c, c->src.val);
c->dst.type = OP_NONE; c->dst.type = OP_NONE;
break; break;
case 0xa0: /* push fs */
emulate_push_sreg(ctxt, VCPU_SREG_FS);
break;
case 0xa1: /* pop fs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
if (rc != 0)
goto done;
break;
case 0xa3: case 0xa3:
bt: /* bt */ bt: /* bt */
c->dst.type = OP_NONE; c->dst.type = OP_NONE;
...@@ -2308,6 +2428,14 @@ twobyte_insn: ...@@ -2308,6 +2428,14 @@ twobyte_insn:
case 0xa5: /* shld cl, r, r/m */ case 0xa5: /* shld cl, r, r/m */
emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
break; break;
case 0xa8: /* push gs */
emulate_push_sreg(ctxt, VCPU_SREG_GS);
break;
case 0xa9: /* pop gs */
rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
if (rc != 0)
goto done;
break;
case 0xab: case 0xab:
bts: /* bts */ bts: /* bts */
/* only subword offset */ /* only subword offset */
......
...@@ -688,10 +688,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm) ...@@ -688,10 +688,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int i; int i;
mutex_lock(&kvm->irq_lock);
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
mutex_unlock(&kvm->irq_lock);
/* /*
* Provides NMI watchdog support via Virtual Wire mode. * Provides NMI watchdog support via Virtual Wire mode.
......
...@@ -38,7 +38,15 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq) ...@@ -38,7 +38,15 @@ static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
s->isr_ack |= (1 << irq); s->isr_ack |= (1 << irq);
if (s != &s->pics_state->pics[0]) if (s != &s->pics_state->pics[0])
irq += 8; irq += 8;
/*
* We are dropping lock while calling ack notifiers since ack
* notifier callbacks for assigned devices call into PIC recursively.
* Other interrupt may be delivered to PIC while lock is dropped but
* it should be safe since PIC state is already updated at this stage.
*/
spin_unlock(&s->pics_state->lock);
kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
spin_lock(&s->pics_state->lock);
} }
void kvm_pic_clear_isr_ack(struct kvm *kvm) void kvm_pic_clear_isr_ack(struct kvm *kvm)
...@@ -176,16 +184,18 @@ int kvm_pic_set_irq(void *opaque, int irq, int level) ...@@ -176,16 +184,18 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
static inline void pic_intack(struct kvm_kpic_state *s, int irq) static inline void pic_intack(struct kvm_kpic_state *s, int irq)
{ {
s->isr |= 1 << irq; s->isr |= 1 << irq;
if (s->auto_eoi) {
if (s->rotate_on_auto_eoi)
s->priority_add = (irq + 1) & 7;
pic_clear_isr(s, irq);
}
/* /*
* We don't clear a level sensitive interrupt here * We don't clear a level sensitive interrupt here
*/ */
if (!(s->elcr & (1 << irq))) if (!(s->elcr & (1 << irq)))
s->irr &= ~(1 << irq); s->irr &= ~(1 << irq);
if (s->auto_eoi) {
if (s->rotate_on_auto_eoi)
s->priority_add = (irq + 1) & 7;
pic_clear_isr(s, irq);
}
} }
int kvm_pic_read_irq(struct kvm *kvm) int kvm_pic_read_irq(struct kvm *kvm)
...@@ -225,22 +235,11 @@ int kvm_pic_read_irq(struct kvm *kvm) ...@@ -225,22 +235,11 @@ int kvm_pic_read_irq(struct kvm *kvm)
void kvm_pic_reset(struct kvm_kpic_state *s) void kvm_pic_reset(struct kvm_kpic_state *s)
{ {
int irq, irqbase, n; int irq;
struct kvm *kvm = s->pics_state->irq_request_opaque; struct kvm *kvm = s->pics_state->irq_request_opaque;
struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu; struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu;
u8 irr = s->irr, isr = s->imr;
if (s == &s->pics_state->pics[0])
irqbase = 0;
else
irqbase = 8;
for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
if (s->irr & (1 << irq) || s->isr & (1 << irq)) {
n = irq + irqbase;
kvm_notify_acked_irq(kvm, SELECT_PIC(n), n);
}
}
s->last_irr = 0; s->last_irr = 0;
s->irr = 0; s->irr = 0;
s->imr = 0; s->imr = 0;
...@@ -256,6 +255,13 @@ void kvm_pic_reset(struct kvm_kpic_state *s) ...@@ -256,6 +255,13 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
s->rotate_on_auto_eoi = 0; s->rotate_on_auto_eoi = 0;
s->special_fully_nested_mode = 0; s->special_fully_nested_mode = 0;
s->init4 = 0; s->init4 = 0;
for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
if (irr & (1 << irq) || isr & (1 << irq)) {
pic_clear_isr(s, irq);
}
}
} }
static void pic_ioport_write(void *opaque, u32 addr, u32 val) static void pic_ioport_write(void *opaque, u32 addr, u32 val)
...@@ -298,9 +304,9 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) ...@@ -298,9 +304,9 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
priority = get_priority(s, s->isr); priority = get_priority(s, s->isr);
if (priority != 8) { if (priority != 8) {
irq = (priority + s->priority_add) & 7; irq = (priority + s->priority_add) & 7;
pic_clear_isr(s, irq);
if (cmd == 5) if (cmd == 5)
s->priority_add = (irq + 1) & 7; s->priority_add = (irq + 1) & 7;
pic_clear_isr(s, irq);
pic_update_irq(s->pics_state); pic_update_irq(s->pics_state);
} }
break; break;
......
...@@ -71,6 +71,7 @@ struct kvm_pic { ...@@ -71,6 +71,7 @@ struct kvm_pic {
int output; /* intr from master PIC */ int output; /* intr from master PIC */
struct kvm_io_device dev; struct kvm_io_device dev;
void (*ack_notifier)(void *opaque, int irq); void (*ack_notifier)(void *opaque, int irq);
unsigned long irq_states[16];
}; };
struct kvm_pic *kvm_create_pic(struct kvm *kvm); struct kvm_pic *kvm_create_pic(struct kvm *kvm);
......
...@@ -471,11 +471,8 @@ static void apic_set_eoi(struct kvm_lapic *apic) ...@@ -471,11 +471,8 @@ static void apic_set_eoi(struct kvm_lapic *apic)
trigger_mode = IOAPIC_LEVEL_TRIG; trigger_mode = IOAPIC_LEVEL_TRIG;
else else
trigger_mode = IOAPIC_EDGE_TRIG; trigger_mode = IOAPIC_EDGE_TRIG;
if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) { if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
mutex_lock(&apic->vcpu->kvm->irq_lock);
kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
mutex_unlock(&apic->vcpu->kvm->irq_lock);
}
} }
static void apic_send_ipi(struct kvm_lapic *apic) static void apic_send_ipi(struct kvm_lapic *apic)
...@@ -504,9 +501,7 @@ static void apic_send_ipi(struct kvm_lapic *apic) ...@@ -504,9 +501,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
irq.vector); irq.vector);
mutex_lock(&apic->vcpu->kvm->irq_lock);
kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
mutex_unlock(&apic->vcpu->kvm->irq_lock);
} }
static u32 apic_get_tmcct(struct kvm_lapic *apic) static u32 apic_get_tmcct(struct kvm_lapic *apic)
......
...@@ -2737,7 +2737,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) ...@@ -2737,7 +2737,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
if (r) if (r)
goto out; goto out;
er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0); er = emulate_instruction(vcpu, cr2, error_code, 0);
switch (er) { switch (er) {
case EMULATE_DONE: case EMULATE_DONE:
......
This diff is collapsed.
This diff is collapsed.
...@@ -2036,9 +2036,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) ...@@ -2036,9 +2036,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
sizeof(struct kvm_pic_state)); sizeof(struct kvm_pic_state));
break; break;
case KVM_IRQCHIP_IOAPIC: case KVM_IRQCHIP_IOAPIC:
memcpy(&chip->chip.ioapic, r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
ioapic_irqchip(kvm),
sizeof(struct kvm_ioapic_state));
break; break;
default: default:
r = -EINVAL; r = -EINVAL;
...@@ -2068,11 +2066,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) ...@@ -2068,11 +2066,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
spin_unlock(&pic_irqchip(kvm)->lock); spin_unlock(&pic_irqchip(kvm)->lock);
break; break;
case KVM_IRQCHIP_IOAPIC: case KVM_IRQCHIP_IOAPIC:
mutex_lock(&kvm->irq_lock); r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
memcpy(ioapic_irqchip(kvm),
&chip->chip.ioapic,
sizeof(struct kvm_ioapic_state));
mutex_unlock(&kvm->irq_lock);
break; break;
default: default:
r = -EINVAL; r = -EINVAL;
...@@ -2180,7 +2174,7 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -2180,7 +2174,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
{ {
struct kvm *kvm = filp->private_data; struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg; void __user *argp = (void __user *)arg;
int r = -EINVAL; int r = -ENOTTY;
/* /*
* This union makes it completely explicit to gcc-3.x * This union makes it completely explicit to gcc-3.x
* that these two variables' stack usage should be * that these two variables' stack usage should be
...@@ -2290,10 +2284,8 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -2290,10 +2284,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out; goto out;
if (irqchip_in_kernel(kvm)) { if (irqchip_in_kernel(kvm)) {
__s32 status; __s32 status;
mutex_lock(&kvm->irq_lock);
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level); irq_event.irq, irq_event.level);
mutex_unlock(&kvm->irq_lock);
if (ioctl == KVM_IRQ_LINE_STATUS) { if (ioctl == KVM_IRQ_LINE_STATUS) {
irq_event.status = status; irq_event.status = status;
if (copy_to_user(argp, &irq_event, if (copy_to_user(argp, &irq_event,
...@@ -2755,13 +2747,13 @@ static void cache_all_regs(struct kvm_vcpu *vcpu) ...@@ -2755,13 +2747,13 @@ static void cache_all_regs(struct kvm_vcpu *vcpu)
} }
int emulate_instruction(struct kvm_vcpu *vcpu, int emulate_instruction(struct kvm_vcpu *vcpu,
struct kvm_run *run,
unsigned long cr2, unsigned long cr2,
u16 error_code, u16 error_code,
int emulation_type) int emulation_type)
{ {
int r, shadow_mask; int r, shadow_mask;
struct decode_cache *c; struct decode_cache *c;
struct kvm_run *run = vcpu->run;
kvm_clear_exception_queue(vcpu); kvm_clear_exception_queue(vcpu);
vcpu->arch.mmio_fault_cr2 = cr2; vcpu->arch.mmio_fault_cr2 = cr2;
...@@ -2967,8 +2959,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu) ...@@ -2967,8 +2959,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu)
return r; return r;
} }
int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
int size, unsigned port)
{ {
unsigned long val; unsigned long val;
...@@ -2997,7 +2988,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, ...@@ -2997,7 +2988,7 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
} }
EXPORT_SYMBOL_GPL(kvm_emulate_pio); EXPORT_SYMBOL_GPL(kvm_emulate_pio);
int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
int size, unsigned long count, int down, int size, unsigned long count, int down,
gva_t address, int rep, unsigned port) gva_t address, int rep, unsigned port)
{ {
...@@ -3451,17 +3442,17 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); ...@@ -3451,17 +3442,17 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
* *
* No need to exit to userspace if we already have an interrupt queued. * No need to exit to userspace if we already have an interrupt queued.
*/ */
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu, static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run)
{ {
return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
kvm_run->request_interrupt_window && vcpu->run->request_interrupt_window &&
kvm_arch_interrupt_allowed(vcpu)); kvm_arch_interrupt_allowed(vcpu));
} }
static void post_kvm_run_save(struct kvm_vcpu *vcpu, static void post_kvm_run_save(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run)
{ {
struct kvm_run *kvm_run = vcpu->run;
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu);
...@@ -3523,7 +3514,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) ...@@ -3523,7 +3514,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
} }
static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void inject_pending_event(struct kvm_vcpu *vcpu)
{ {
/* try to reinject previous events if any */ /* try to reinject previous events if any */
if (vcpu->arch.exception.pending) { if (vcpu->arch.exception.pending) {
...@@ -3559,11 +3550,11 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3559,11 +3550,11 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
} }
static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{ {
int r; int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
kvm_run->request_interrupt_window; vcpu->run->request_interrupt_window;
if (vcpu->requests) if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
...@@ -3584,12 +3575,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3584,12 +3575,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_x86_ops->tlb_flush(vcpu); kvm_x86_ops->tlb_flush(vcpu);
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) { &vcpu->requests)) {
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS; vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0; r = 0;
goto out; goto out;
} }
if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) { if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0; r = 0;
goto out; goto out;
} }
...@@ -3613,7 +3604,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3613,7 +3604,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
goto out; goto out;
} }
inject_pending_event(vcpu, kvm_run); inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */ /* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending) if (vcpu->arch.nmi_pending)
...@@ -3639,7 +3630,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3639,7 +3630,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
trace_kvm_entry(vcpu->vcpu_id); trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu, kvm_run); kvm_x86_ops->run(vcpu);
if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) { if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) {
set_debugreg(current->thread.debugreg0, 0); set_debugreg(current->thread.debugreg0, 0);
...@@ -3680,13 +3671,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3680,13 +3671,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_lapic_sync_from_vapic(vcpu); kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(kvm_run, vcpu); r = kvm_x86_ops->handle_exit(vcpu);
out: out:
return r; return r;
} }
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static int __vcpu_run(struct kvm_vcpu *vcpu)
{ {
int r; int r;
...@@ -3706,7 +3697,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3706,7 +3697,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
r = 1; r = 1;
while (r > 0) { while (r > 0) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = vcpu_enter_guest(vcpu, kvm_run); r = vcpu_enter_guest(vcpu);
else { else {
up_read(&vcpu->kvm->slots_lock); up_read(&vcpu->kvm->slots_lock);
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
...@@ -3734,14 +3725,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3734,14 +3725,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (kvm_cpu_has_pending_timer(vcpu)) if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu); kvm_inject_pending_timer_irqs(vcpu);
if (dm_request_for_irq_injection(vcpu, kvm_run)) { if (dm_request_for_irq_injection(vcpu)) {
r = -EINTR; r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR; vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits; ++vcpu->stat.request_irq_exits;
} }
if (signal_pending(current)) { if (signal_pending(current)) {
r = -EINTR; r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR; vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits; ++vcpu->stat.signal_exits;
} }
if (need_resched()) { if (need_resched()) {
...@@ -3752,7 +3743,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3752,7 +3743,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
up_read(&vcpu->kvm->slots_lock); up_read(&vcpu->kvm->slots_lock);
post_kvm_run_save(vcpu, kvm_run); post_kvm_run_save(vcpu);
vapic_exit(vcpu); vapic_exit(vcpu);
...@@ -3792,8 +3783,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3792,8 +3783,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
down_read(&vcpu->kvm->slots_lock); down_read(&vcpu->kvm->slots_lock);
r = emulate_instruction(vcpu, kvm_run, r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE); EMULTYPE_NO_DECODE);
up_read(&vcpu->kvm->slots_lock); up_read(&vcpu->kvm->slots_lock);
if (r == EMULATE_DO_MMIO) { if (r == EMULATE_DO_MMIO) {
...@@ -3809,7 +3799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -3809,7 +3799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_register_write(vcpu, VCPU_REGS_RAX, kvm_register_write(vcpu, VCPU_REGS_RAX,
kvm_run->hypercall.ret); kvm_run->hypercall.ret);
r = __vcpu_run(vcpu, kvm_run); r = __vcpu_run(vcpu);
out: out:
if (vcpu->sigset_active) if (vcpu->sigset_active)
......
...@@ -121,7 +121,7 @@ struct kvm_kernel_irq_routing_entry { ...@@ -121,7 +121,7 @@ struct kvm_kernel_irq_routing_entry {
u32 gsi; u32 gsi;
u32 type; u32 type;
int (*set)(struct kvm_kernel_irq_routing_entry *e, int (*set)(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int level); struct kvm *kvm, int irq_source_id, int level);
union { union {
struct { struct {
unsigned irqchip; unsigned irqchip;
...@@ -129,9 +129,28 @@ struct kvm_kernel_irq_routing_entry { ...@@ -129,9 +129,28 @@ struct kvm_kernel_irq_routing_entry {
} irqchip; } irqchip;
struct msi_msg msi; struct msi_msg msi;
}; };
struct list_head link; struct hlist_node link;
};
#ifdef __KVM_HAVE_IOAPIC
struct kvm_irq_routing_table {
int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
struct kvm_kernel_irq_routing_entry *rt_entries;
u32 nr_rt_entries;
/*
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
struct hlist_head map[0];
}; };
#else
struct kvm_irq_routing_table {};
#endif
struct kvm { struct kvm {
spinlock_t mmu_lock; spinlock_t mmu_lock;
spinlock_t requests_lock; spinlock_t requests_lock;
...@@ -167,8 +186,9 @@ struct kvm { ...@@ -167,8 +186,9 @@ struct kvm {
struct mutex irq_lock; struct mutex irq_lock;
#ifdef CONFIG_HAVE_KVM_IRQCHIP #ifdef CONFIG_HAVE_KVM_IRQCHIP
struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ struct kvm_irq_routing_table *irq_routing;
struct hlist_head mask_notifier_list; struct hlist_head mask_notifier_list;
struct hlist_head irq_ack_notifier_list;
#endif #endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
...@@ -391,7 +411,12 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, ...@@ -391,7 +411,12 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
struct kvm_irq_mask_notifier *kimn); struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); #ifdef __KVM_HAVE_IOAPIC
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
union kvm_ioapic_redirect_entry *entry,
unsigned long *deliver_bitmask);
#endif
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm, void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian); struct kvm_irq_ack_notifier *kian);
...@@ -553,4 +578,21 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) ...@@ -553,4 +578,21 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
} }
#endif #endif
#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
unsigned long arg);
#else
static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
unsigned long arg)
{
return -ENOTTY;
}
#endif #endif
#endif
This diff is collapsed.
...@@ -61,10 +61,8 @@ irqfd_inject(struct work_struct *work) ...@@ -61,10 +61,8 @@ irqfd_inject(struct work_struct *work)
struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
struct kvm *kvm = irqfd->kvm; struct kvm *kvm = irqfd->kvm;
mutex_lock(&kvm->irq_lock);
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
mutex_unlock(&kvm->irq_lock);
} }
/* /*
......
...@@ -182,6 +182,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) ...@@ -182,6 +182,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
union kvm_ioapic_redirect_entry entry; union kvm_ioapic_redirect_entry entry;
int ret = 1; int ret = 1;
mutex_lock(&ioapic->lock);
if (irq >= 0 && irq < IOAPIC_NUM_PINS) { if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
entry = ioapic->redirtbl[irq]; entry = ioapic->redirtbl[irq];
level ^= entry.fields.polarity; level ^= entry.fields.polarity;
...@@ -198,34 +199,51 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level) ...@@ -198,34 +199,51 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
} }
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
} }
mutex_unlock(&ioapic->lock);
return ret; return ret;
} }
static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int pin, static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
int trigger_mode) int trigger_mode)
{ {
union kvm_ioapic_redirect_entry *ent; int i;
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
ent = &ioapic->redirtbl[pin]; if (ent->fields.vector != vector)
continue;
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin); /*
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC
* recursively. Since remote_irr is cleared only after call
* to notifiers if the same vector will be delivered while lock
* is dropped it will be put into irr and will be delivered
* after ack notifier returns.
*/
mutex_unlock(&ioapic->lock);
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
mutex_lock(&ioapic->lock);
if (trigger_mode != IOAPIC_LEVEL_TRIG)
continue;
if (trigger_mode == IOAPIC_LEVEL_TRIG) {
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
ent->fields.remote_irr = 0; ent->fields.remote_irr = 0;
if (!ent->fields.mask && (ioapic->irr & (1 << pin))) if (!ent->fields.mask && (ioapic->irr & (1 << i)))
ioapic_service(ioapic, pin); ioapic_service(ioapic, i);
} }
} }
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{ {
struct kvm_ioapic *ioapic = kvm->arch.vioapic; struct kvm_ioapic *ioapic = kvm->arch.vioapic;
int i;
for (i = 0; i < IOAPIC_NUM_PINS; i++) mutex_lock(&ioapic->lock);
if (ioapic->redirtbl[i].fields.vector == vector) __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
__kvm_ioapic_update_eoi(ioapic, i, trigger_mode); mutex_unlock(&ioapic->lock);
} }
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
...@@ -250,8 +268,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -250,8 +268,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
ioapic_debug("addr %lx\n", (unsigned long)addr); ioapic_debug("addr %lx\n", (unsigned long)addr);
ASSERT(!(addr & 0xf)); /* check alignment */ ASSERT(!(addr & 0xf)); /* check alignment */
mutex_lock(&ioapic->kvm->irq_lock);
addr &= 0xff; addr &= 0xff;
mutex_lock(&ioapic->lock);
switch (addr) { switch (addr) {
case IOAPIC_REG_SELECT: case IOAPIC_REG_SELECT:
result = ioapic->ioregsel; result = ioapic->ioregsel;
...@@ -265,6 +283,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -265,6 +283,8 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
result = 0; result = 0;
break; break;
} }
mutex_unlock(&ioapic->lock);
switch (len) { switch (len) {
case 8: case 8:
*(u64 *) val = result; *(u64 *) val = result;
...@@ -277,7 +297,6 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -277,7 +297,6 @@ static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
default: default:
printk(KERN_WARNING "ioapic: wrong length %d\n", len); printk(KERN_WARNING "ioapic: wrong length %d\n", len);
} }
mutex_unlock(&ioapic->kvm->irq_lock);
return 0; return 0;
} }
...@@ -293,15 +312,15 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -293,15 +312,15 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
(void*)addr, len, val); (void*)addr, len, val);
ASSERT(!(addr & 0xf)); /* check alignment */ ASSERT(!(addr & 0xf)); /* check alignment */
mutex_lock(&ioapic->kvm->irq_lock);
if (len == 4 || len == 8) if (len == 4 || len == 8)
data = *(u32 *) val; data = *(u32 *) val;
else { else {
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
goto unlock; return 0;
} }
addr &= 0xff; addr &= 0xff;
mutex_lock(&ioapic->lock);
switch (addr) { switch (addr) {
case IOAPIC_REG_SELECT: case IOAPIC_REG_SELECT:
ioapic->ioregsel = data; ioapic->ioregsel = data;
...@@ -312,15 +331,14 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, ...@@ -312,15 +331,14 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
break; break;
#ifdef CONFIG_IA64 #ifdef CONFIG_IA64
case IOAPIC_REG_EOI: case IOAPIC_REG_EOI:
kvm_ioapic_update_eoi(ioapic->kvm, data, IOAPIC_LEVEL_TRIG); __kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG);
break; break;
#endif #endif
default: default:
break; break;
} }
unlock: mutex_unlock(&ioapic->lock);
mutex_unlock(&ioapic->kvm->irq_lock);
return 0; return 0;
} }
...@@ -349,6 +367,7 @@ int kvm_ioapic_init(struct kvm *kvm) ...@@ -349,6 +367,7 @@ int kvm_ioapic_init(struct kvm *kvm)
ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
if (!ioapic) if (!ioapic)
return -ENOMEM; return -ENOMEM;
mutex_init(&ioapic->lock);
kvm->arch.vioapic = ioapic; kvm->arch.vioapic = ioapic;
kvm_ioapic_reset(ioapic); kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
...@@ -360,3 +379,26 @@ int kvm_ioapic_init(struct kvm *kvm) ...@@ -360,3 +379,26 @@ int kvm_ioapic_init(struct kvm *kvm)
return ret; return ret;
} }
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
if (!ioapic)
return -EINVAL;
mutex_lock(&ioapic->lock);
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
mutex_unlock(&ioapic->lock);
return 0;
}
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
if (!ioapic)
return -EINVAL;
mutex_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
mutex_unlock(&ioapic->lock);
return 0;
}
...@@ -41,9 +41,11 @@ struct kvm_ioapic { ...@@ -41,9 +41,11 @@ struct kvm_ioapic {
u32 irr; u32 irr;
u32 pad; u32 pad;
union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS]; union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS];
unsigned long irq_states[IOAPIC_NUM_PINS];
struct kvm_io_device dev; struct kvm_io_device dev;
struct kvm *kvm; struct kvm *kvm;
void (*ack_notifier)(void *opaque, int irq); void (*ack_notifier)(void *opaque, int irq);
struct mutex lock;
}; };
#ifdef DEBUG #ifdef DEBUG
...@@ -73,4 +75,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); ...@@ -73,4 +75,7 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic); void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq); struct kvm_lapic_irq *irq);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
#endif #endif
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment