Commit cd0a0d23 authored by Avi Kivity's avatar Avi Kivity Committed by Greg Kroah-Hartman

KVM: Avoid instruction emulation when event delivery is pending

(cherry-picked from commit 577bdc49)

When an event (such as an interrupt) is injected, and the stack is
shadowed (and therefore write protected), the guest will exit.  The
current code will see that the stack is shadowed and emulate a few
instructions, each time postponing the injection.  Eventually the
injection may succeed, but at that time the guest may be unwilling
to accept the interrupt (for example, the TPR may have changed).

This occurs every once in a while during a Windows 2008 boot.

Fix by unshadowing the fault address if the fault was due to an event
injection.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 3756d6be
...@@ -1792,6 +1792,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -1792,6 +1792,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
return r; return r;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{ {
......
...@@ -1007,13 +1007,18 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1007,13 +1007,18 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
struct kvm *kvm = svm->vcpu.kvm; struct kvm *kvm = svm->vcpu.kvm;
u64 fault_address; u64 fault_address;
u32 error_code; u32 error_code;
bool event_injection = false;
if (!irqchip_in_kernel(kvm) && if (!irqchip_in_kernel(kvm) &&
is_external_interrupt(exit_int_info)) is_external_interrupt(exit_int_info)) {
event_injection = true;
push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
}
fault_address = svm->vmcb->control.exit_info_2; fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1; error_code = svm->vmcb->control.exit_info_1;
if (event_injection)
kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
} }
......
...@@ -2258,6 +2258,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2258,6 +2258,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
cr2 = vmcs_readl(EXIT_QUALIFICATION); cr2 = vmcs_readl(EXIT_QUALIFICATION);
KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
(u32)((u64)cr2 >> 32), handler); (u32)((u64)cr2 >> 32), handler);
if (vect_info & VECTORING_INFO_VALID_MASK)
kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code); return kvm_mmu_page_fault(vcpu, cr2, error_code);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment