Commit 8986ecc0 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: x86: check for cr3 validity in mmu_alloc_roots

Verify the cr3 address stored in vcpu->arch.cr3 points to an existant
memslot. If not, inject a triple fault.
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent b43b1901
...@@ -1912,7 +1912,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) ...@@ -1912,7 +1912,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu.root_hpa = INVALID_PAGE;
} }
static void mmu_alloc_roots(struct kvm_vcpu *vcpu) static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
int ret = 0;
if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
ret = 1;
}
return ret;
}
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{ {
int i; int i;
gfn_t root_gfn; gfn_t root_gfn;
...@@ -1927,13 +1939,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -1927,13 +1939,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root)); ASSERT(!VALID_PAGE(root));
if (tdp_enabled) if (tdp_enabled)
direct = 1; direct = 1;
if (mmu_check_root(vcpu, root_gfn))
return 1;
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, direct, PT64_ROOT_LEVEL, direct,
ACC_ALL, NULL); ACC_ALL, NULL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
vcpu->arch.mmu.root_hpa = root; vcpu->arch.mmu.root_hpa = root;
return; return 0;
} }
direct = !is_paging(vcpu); direct = !is_paging(vcpu);
if (tdp_enabled) if (tdp_enabled)
...@@ -1950,6 +1964,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -1950,6 +1964,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT; root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
} else if (vcpu->arch.mmu.root_level == 0) } else if (vcpu->arch.mmu.root_level == 0)
root_gfn = 0; root_gfn = 0;
if (mmu_check_root(vcpu, root_gfn))
return 1;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, direct, PT32_ROOT_LEVEL, direct,
ACC_ALL, NULL); ACC_ALL, NULL);
...@@ -1958,6 +1974,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -1958,6 +1974,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
} }
vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
return 0;
} }
static void mmu_sync_roots(struct kvm_vcpu *vcpu) static void mmu_sync_roots(struct kvm_vcpu *vcpu)
...@@ -1976,7 +1993,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) ...@@ -1976,7 +1993,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu.pae_root[i];
if (root) { if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
sp = page_header(root); sp = page_header(root);
mmu_sync_children(vcpu, sp); mmu_sync_children(vcpu, sp);
...@@ -2311,9 +2328,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -2311,9 +2328,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
goto out; goto out;
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
mmu_alloc_roots(vcpu); r = mmu_alloc_roots(vcpu);
mmu_sync_roots(vcpu); mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
if (r)
goto out;
kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
kvm_mmu_flush_tlb(vcpu); kvm_mmu_flush_tlb(vcpu);
out: out:
......
...@@ -4568,6 +4568,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -4568,6 +4568,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
void kvm_arch_flush_shadow(struct kvm *kvm) void kvm_arch_flush_shadow(struct kvm *kvm)
{ {
kvm_mmu_zap_all(kvm); kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm);
} }
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment