Commit 1b0973bd authored by Avi Kivity's avatar Avi Kivity Committed by Linus Torvalds

[PATCH] KVM: MMU: Use the guest pdptrs instead of mapping cr3 in pae mode

This lets us not write protect a partial page, and is anyway what a real
processor does.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 17ac10ad
...@@ -1491,6 +1491,8 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) ...@@ -1491,6 +1491,8 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
mmu_reset_needed |= vcpu->cr4 != sregs->cr4; mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
kvm_arch_ops->set_cr4(vcpu, sregs->cr4); kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu))
load_pdptrs(vcpu, vcpu->cr3);
if (mmu_reset_needed) if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
......
...@@ -67,18 +67,28 @@ static void FNAME(walk_addr)(struct guest_walker *walker, ...@@ -67,18 +67,28 @@ static void FNAME(walk_addr)(struct guest_walker *walker,
hpa_t hpa; hpa_t hpa;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
pt_element_t *ptep; pt_element_t *ptep;
pt_element_t root;
walker->level = vcpu->mmu.root_level; walker->level = vcpu->mmu.root_level;
walker->table_gfn = (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; walker->table = NULL;
root = vcpu->cr3;
#if PTTYPE == 64
if (!is_long_mode(vcpu)) {
walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
root = *walker->ptep;
if (!(root & PT_PRESENT_MASK))
return;
--walker->level;
}
#endif
walker->table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
slot = gfn_to_memslot(vcpu->kvm, walker->table_gfn); slot = gfn_to_memslot(vcpu->kvm, walker->table_gfn);
hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
walker->table = (pt_element_t *)( (unsigned long)walker->table |
(unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) );
walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
for (;;) { for (;;) {
...@@ -89,11 +99,8 @@ static void FNAME(walk_addr)(struct guest_walker *walker, ...@@ -89,11 +99,8 @@ static void FNAME(walk_addr)(struct guest_walker *walker,
ASSERT(((unsigned long)walker->table & PAGE_MASK) == ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
((unsigned long)ptep & PAGE_MASK)); ((unsigned long)ptep & PAGE_MASK));
/* Don't set accessed bit on PAE PDPTRs */ if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK))
if (vcpu->mmu.root_level != 3 || walker->level != 3) *ptep |= PT_ACCESSED_MASK;
if ((*ptep & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
== PT_PRESENT_MASK)
*ptep |= PT_ACCESSED_MASK;
if (!is_present_pte(*ptep) || if (!is_present_pte(*ptep) ||
walker->level == PT_PAGE_TABLE_LEVEL || walker->level == PT_PAGE_TABLE_LEVEL ||
...@@ -116,7 +123,8 @@ static void FNAME(walk_addr)(struct guest_walker *walker, ...@@ -116,7 +123,8 @@ static void FNAME(walk_addr)(struct guest_walker *walker,
static void FNAME(release_walker)(struct guest_walker *walker) static void FNAME(release_walker)(struct guest_walker *walker)
{ {
kunmap_atomic(walker->table, KM_USER0); if (walker->table)
kunmap_atomic(walker->table, KM_USER0);
} }
static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment