Commit 4db35314 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Rename variables of type 'struct kvm_mmu_page *'

These are traditionally named 'page', but even more traditionally, that name
is reserved for variables that point to a 'struct page'.  Rename them to 'sp'
(for "shadow page").
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 1d28f5f4
...@@ -367,7 +367,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn) ...@@ -367,7 +367,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
*/ */
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
struct kvm_rmap_desc *desc; struct kvm_rmap_desc *desc;
unsigned long *rmapp; unsigned long *rmapp;
int i; int i;
...@@ -375,8 +375,8 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -375,8 +375,8 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
if (!is_rmap_pte(*spte)) if (!is_rmap_pte(*spte))
return; return;
gfn = unalias_gfn(vcpu->kvm, gfn); gfn = unalias_gfn(vcpu->kvm, gfn);
page = page_header(__pa(spte)); sp = page_header(__pa(spte));
page->gfns[spte - page->spt] = gfn; sp->gfns[spte - sp->spt] = gfn;
rmapp = gfn_to_rmap(vcpu->kvm, gfn); rmapp = gfn_to_rmap(vcpu->kvm, gfn);
if (!*rmapp) { if (!*rmapp) {
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
...@@ -429,20 +429,20 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -429,20 +429,20 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
{ {
struct kvm_rmap_desc *desc; struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc; struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
struct page *release_page; struct page *release_page;
unsigned long *rmapp; unsigned long *rmapp;
int i; int i;
if (!is_rmap_pte(*spte)) if (!is_rmap_pte(*spte))
return; return;
page = page_header(__pa(spte)); sp = page_header(__pa(spte));
release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
if (is_writeble_pte(*spte)) if (is_writeble_pte(*spte))
kvm_release_page_dirty(release_page); kvm_release_page_dirty(release_page);
else else
kvm_release_page_clean(release_page); kvm_release_page_clean(release_page);
rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]); rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
if (!*rmapp) { if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
BUG(); BUG();
...@@ -537,14 +537,13 @@ static int is_empty_shadow_page(u64 *spt) ...@@ -537,14 +537,13 @@ static int is_empty_shadow_page(u64 *spt)
} }
#endif #endif
static void kvm_mmu_free_page(struct kvm *kvm, static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
struct kvm_mmu_page *page_head)
{ {
ASSERT(is_empty_shadow_page(page_head->spt)); ASSERT(is_empty_shadow_page(sp->spt));
list_del(&page_head->link); list_del(&sp->link);
__free_page(virt_to_page(page_head->spt)); __free_page(virt_to_page(sp->spt));
__free_page(virt_to_page(page_head->gfns)); __free_page(virt_to_page(sp->gfns));
kfree(page_head); kfree(sp);
++kvm->n_free_mmu_pages; ++kvm->n_free_mmu_pages;
} }
...@@ -556,27 +555,26 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn) ...@@ -556,27 +555,26 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn)
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
u64 *parent_pte) u64 *parent_pte)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
if (!vcpu->kvm->n_free_mmu_pages) if (!vcpu->kvm->n_free_mmu_pages)
return NULL; return NULL;
page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp);
sizeof *page); sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
set_page_private(virt_to_page(page->spt), (unsigned long)page); list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
list_add(&page->link, &vcpu->kvm->active_mmu_pages); ASSERT(is_empty_shadow_page(sp->spt));
ASSERT(is_empty_shadow_page(page->spt)); sp->slot_bitmap = 0;
page->slot_bitmap = 0; sp->multimapped = 0;
page->multimapped = 0; sp->parent_pte = parent_pte;
page->parent_pte = parent_pte;
--vcpu->kvm->n_free_mmu_pages; --vcpu->kvm->n_free_mmu_pages;
return page; return sp;
} }
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page, u64 *parent_pte) struct kvm_mmu_page *sp, u64 *parent_pte)
{ {
struct kvm_pte_chain *pte_chain; struct kvm_pte_chain *pte_chain;
struct hlist_node *node; struct hlist_node *node;
...@@ -584,20 +582,20 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, ...@@ -584,20 +582,20 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
if (!parent_pte) if (!parent_pte)
return; return;
if (!page->multimapped) { if (!sp->multimapped) {
u64 *old = page->parent_pte; u64 *old = sp->parent_pte;
if (!old) { if (!old) {
page->parent_pte = parent_pte; sp->parent_pte = parent_pte;
return; return;
} }
page->multimapped = 1; sp->multimapped = 1;
pte_chain = mmu_alloc_pte_chain(vcpu); pte_chain = mmu_alloc_pte_chain(vcpu);
INIT_HLIST_HEAD(&page->parent_ptes); INIT_HLIST_HEAD(&sp->parent_ptes);
hlist_add_head(&pte_chain->link, &page->parent_ptes); hlist_add_head(&pte_chain->link, &sp->parent_ptes);
pte_chain->parent_ptes[0] = old; pte_chain->parent_ptes[0] = old;
} }
hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) { hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
continue; continue;
for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
...@@ -608,23 +606,23 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, ...@@ -608,23 +606,23 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
} }
pte_chain = mmu_alloc_pte_chain(vcpu); pte_chain = mmu_alloc_pte_chain(vcpu);
BUG_ON(!pte_chain); BUG_ON(!pte_chain);
hlist_add_head(&pte_chain->link, &page->parent_ptes); hlist_add_head(&pte_chain->link, &sp->parent_ptes);
pte_chain->parent_ptes[0] = parent_pte; pte_chain->parent_ptes[0] = parent_pte;
} }
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte) u64 *parent_pte)
{ {
struct kvm_pte_chain *pte_chain; struct kvm_pte_chain *pte_chain;
struct hlist_node *node; struct hlist_node *node;
int i; int i;
if (!page->multimapped) { if (!sp->multimapped) {
BUG_ON(page->parent_pte != parent_pte); BUG_ON(sp->parent_pte != parent_pte);
page->parent_pte = NULL; sp->parent_pte = NULL;
return; return;
} }
hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
if (!pte_chain->parent_ptes[i]) if (!pte_chain->parent_ptes[i])
break; break;
...@@ -640,9 +638,9 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, ...@@ -640,9 +638,9 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
if (i == 0) { if (i == 0) {
hlist_del(&pte_chain->link); hlist_del(&pte_chain->link);
mmu_free_pte_chain(pte_chain); mmu_free_pte_chain(pte_chain);
if (hlist_empty(&page->parent_ptes)) { if (hlist_empty(&sp->parent_ptes)) {
page->multimapped = 0; sp->multimapped = 0;
page->parent_pte = NULL; sp->parent_pte = NULL;
} }
} }
return; return;
...@@ -650,22 +648,21 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, ...@@ -650,22 +648,21 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
BUG(); BUG();
} }
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
gfn_t gfn)
{ {
unsigned index; unsigned index;
struct hlist_head *bucket; struct hlist_head *bucket;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
struct hlist_node *node; struct hlist_node *node;
pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &kvm->mmu_page_hash[index]; bucket = &kvm->mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link) hlist_for_each_entry(sp, node, bucket, hash_link)
if (page->gfn == gfn && !page->role.metaphysical) { if (sp->gfn == gfn && !sp->role.metaphysical) {
pgprintk("%s: found role %x\n", pgprintk("%s: found role %x\n",
__FUNCTION__, page->role.word); __FUNCTION__, sp->role.word);
return page; return sp;
} }
return NULL; return NULL;
} }
...@@ -682,7 +679,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -682,7 +679,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned index; unsigned index;
unsigned quadrant; unsigned quadrant;
struct hlist_head *bucket; struct hlist_head *bucket;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
struct hlist_node *node; struct hlist_node *node;
role.word = 0; role.word = 0;
...@@ -699,35 +696,35 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -699,35 +696,35 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
gfn, role.word); gfn, role.word);
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index]; bucket = &vcpu->kvm->mmu_page_hash[index];
hlist_for_each_entry(page, node, bucket, hash_link) hlist_for_each_entry(sp, node, bucket, hash_link)
if (page->gfn == gfn && page->role.word == role.word) { if (sp->gfn == gfn && sp->role.word == role.word) {
mmu_page_add_parent_pte(vcpu, page, parent_pte); mmu_page_add_parent_pte(vcpu, sp, parent_pte);
pgprintk("%s: found\n", __FUNCTION__); pgprintk("%s: found\n", __FUNCTION__);
return page; return sp;
} }
page = kvm_mmu_alloc_page(vcpu, parent_pte); sp = kvm_mmu_alloc_page(vcpu, parent_pte);
if (!page) if (!sp)
return page; return sp;
pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
page->gfn = gfn; sp->gfn = gfn;
page->role = role; sp->role = role;
hlist_add_head(&page->hash_link, bucket); hlist_add_head(&sp->hash_link, bucket);
vcpu->mmu.prefetch_page(vcpu, page); vcpu->mmu.prefetch_page(vcpu, sp);
if (!metaphysical) if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn); rmap_write_protect(vcpu->kvm, gfn);
return page; return sp;
} }
static void kvm_mmu_page_unlink_children(struct kvm *kvm, static void kvm_mmu_page_unlink_children(struct kvm *kvm,
struct kvm_mmu_page *page) struct kvm_mmu_page *sp)
{ {
unsigned i; unsigned i;
u64 *pt; u64 *pt;
u64 ent; u64 ent;
pt = page->spt; pt = sp->spt;
if (page->role.level == PT_PAGE_TABLE_LEVEL) { if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
if (is_shadow_present_pte(pt[i])) if (is_shadow_present_pte(pt[i]))
rmap_remove(kvm, &pt[i]); rmap_remove(kvm, &pt[i]);
...@@ -749,10 +746,9 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm, ...@@ -749,10 +746,9 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} }
static void kvm_mmu_put_page(struct kvm_mmu_page *page, static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
u64 *parent_pte)
{ {
mmu_page_remove_parent_pte(page, parent_pte); mmu_page_remove_parent_pte(sp, parent_pte);
} }
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
...@@ -764,32 +760,31 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) ...@@ -764,32 +760,31 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
kvm->vcpus[i]->last_pte_updated = NULL; kvm->vcpus[i]->last_pte_updated = NULL;
} }
static void kvm_mmu_zap_page(struct kvm *kvm, static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
struct kvm_mmu_page *page)
{ {
u64 *parent_pte; u64 *parent_pte;
++kvm->stat.mmu_shadow_zapped; ++kvm->stat.mmu_shadow_zapped;
while (page->multimapped || page->parent_pte) { while (sp->multimapped || sp->parent_pte) {
if (!page->multimapped) if (!sp->multimapped)
parent_pte = page->parent_pte; parent_pte = sp->parent_pte;
else { else {
struct kvm_pte_chain *chain; struct kvm_pte_chain *chain;
chain = container_of(page->parent_ptes.first, chain = container_of(sp->parent_ptes.first,
struct kvm_pte_chain, link); struct kvm_pte_chain, link);
parent_pte = chain->parent_ptes[0]; parent_pte = chain->parent_ptes[0];
} }
BUG_ON(!parent_pte); BUG_ON(!parent_pte);
kvm_mmu_put_page(page, parent_pte); kvm_mmu_put_page(sp, parent_pte);
set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte); set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
} }
kvm_mmu_page_unlink_children(kvm, page); kvm_mmu_page_unlink_children(kvm, sp);
if (!page->root_count) { if (!sp->root_count) {
hlist_del(&page->hash_link); hlist_del(&sp->hash_link);
kvm_mmu_free_page(kvm, page); kvm_mmu_free_page(kvm, sp);
} else } else
list_move(&page->link, &kvm->active_mmu_pages); list_move(&sp->link, &kvm->active_mmu_pages);
kvm_mmu_reset_last_pte_updated(kvm); kvm_mmu_reset_last_pte_updated(kvm);
} }
...@@ -831,7 +826,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) ...@@ -831,7 +826,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
{ {
unsigned index; unsigned index;
struct hlist_head *bucket; struct hlist_head *bucket;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
struct hlist_node *node, *n; struct hlist_node *node, *n;
int r; int r;
...@@ -839,11 +834,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) ...@@ -839,11 +834,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
r = 0; r = 0;
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &kvm->mmu_page_hash[index]; bucket = &kvm->mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link) hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
if (page->gfn == gfn && !page->role.metaphysical) { if (sp->gfn == gfn && !sp->role.metaphysical) {
pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
page->role.word); sp->role.word);
kvm_mmu_zap_page(kvm, page); kvm_mmu_zap_page(kvm, sp);
r = 1; r = 1;
} }
return r; return r;
...@@ -851,21 +846,20 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) ...@@ -851,21 +846,20 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
pgprintk("%s: zap %lx %x\n", pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
__FUNCTION__, gfn, page->role.word); kvm_mmu_zap_page(kvm, sp);
kvm_mmu_zap_page(kvm, page);
} }
} }
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{ {
int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
struct kvm_mmu_page *page_head = page_header(__pa(pte)); struct kvm_mmu_page *sp = page_header(__pa(pte));
__set_bit(slot, &page_head->slot_bitmap); __set_bit(slot, &sp->slot_bitmap);
} }
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
...@@ -951,7 +945,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, ...@@ -951,7 +945,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
static void mmu_free_roots(struct kvm_vcpu *vcpu) static void mmu_free_roots(struct kvm_vcpu *vcpu)
{ {
int i; int i;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
if (!VALID_PAGE(vcpu->mmu.root_hpa)) if (!VALID_PAGE(vcpu->mmu.root_hpa))
return; return;
...@@ -959,8 +953,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) ...@@ -959,8 +953,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->mmu.root_hpa; hpa_t root = vcpu->mmu.root_hpa;
page = page_header(root); sp = page_header(root);
--page->root_count; --sp->root_count;
vcpu->mmu.root_hpa = INVALID_PAGE; vcpu->mmu.root_hpa = INVALID_PAGE;
return; return;
} }
...@@ -970,8 +964,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) ...@@ -970,8 +964,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
if (root) { if (root) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
page = page_header(root); sp = page_header(root);
--page->root_count; --sp->root_count;
} }
vcpu->mmu.pae_root[i] = INVALID_PAGE; vcpu->mmu.pae_root[i] = INVALID_PAGE;
} }
...@@ -982,7 +976,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -982,7 +976,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
{ {
int i; int i;
gfn_t root_gfn; gfn_t root_gfn;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
root_gfn = vcpu->cr3 >> PAGE_SHIFT; root_gfn = vcpu->cr3 >> PAGE_SHIFT;
...@@ -991,10 +985,10 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -991,10 +985,10 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->mmu.root_hpa; hpa_t root = vcpu->mmu.root_hpa;
ASSERT(!VALID_PAGE(root)); ASSERT(!VALID_PAGE(root));
page = kvm_mmu_get_page(vcpu, root_gfn, 0, sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, 0, 0, NULL); PT64_ROOT_LEVEL, 0, 0, NULL);
root = __pa(page->spt); root = __pa(sp->spt);
++page->root_count; ++sp->root_count;
vcpu->mmu.root_hpa = root; vcpu->mmu.root_hpa = root;
return; return;
} }
...@@ -1011,11 +1005,11 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -1011,11 +1005,11 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
} else if (vcpu->mmu.root_level == 0) } else if (vcpu->mmu.root_level == 0)
root_gfn = 0; root_gfn = 0;
page = kvm_mmu_get_page(vcpu, root_gfn, i << 30, sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu), PT32_ROOT_LEVEL, !is_paging(vcpu),
0, NULL); 0, NULL);
root = __pa(page->spt); root = __pa(sp->spt);
++page->root_count; ++sp->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
} }
vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
...@@ -1196,7 +1190,7 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) ...@@ -1196,7 +1190,7 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
} }
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page, struct kvm_mmu_page *sp,
u64 *spte) u64 *spte)
{ {
u64 pte; u64 pte;
...@@ -1204,7 +1198,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, ...@@ -1204,7 +1198,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
pte = *spte; pte = *spte;
if (is_shadow_present_pte(pte)) { if (is_shadow_present_pte(pte)) {
if (page->role.level == PT_PAGE_TABLE_LEVEL) if (sp->role.level == PT_PAGE_TABLE_LEVEL)
rmap_remove(vcpu->kvm, spte); rmap_remove(vcpu->kvm, spte);
else { else {
child = page_header(pte & PT64_BASE_ADDR_MASK); child = page_header(pte & PT64_BASE_ADDR_MASK);
...@@ -1215,23 +1209,21 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, ...@@ -1215,23 +1209,21 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
} }
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page, struct kvm_mmu_page *sp,
u64 *spte, u64 *spte,
const void *new, int bytes, const void *new, int bytes,
int offset_in_pte) int offset_in_pte)
{ {
if (page->role.level != PT_PAGE_TABLE_LEVEL) { if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped; ++vcpu->kvm->stat.mmu_pde_zapped;
return; return;
} }
++vcpu->kvm->stat.mmu_pte_updated; ++vcpu->kvm->stat.mmu_pte_updated;
if (page->role.glevels == PT32_ROOT_LEVEL) if (sp->role.glevels == PT32_ROOT_LEVEL)
paging32_update_pte(vcpu, page, spte, new, bytes, paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
offset_in_pte);
else else
paging64_update_pte(vcpu, page, spte, new, bytes, paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
offset_in_pte);
} }
static bool need_remote_flush(u64 old, u64 new) static bool need_remote_flush(u64 old, u64 new)
...@@ -1266,7 +1258,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1266,7 +1258,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes) const u8 *new, int bytes)
{ {
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
struct hlist_node *node, *n; struct hlist_node *node, *n;
struct hlist_head *bucket; struct hlist_head *bucket;
unsigned index; unsigned index;
...@@ -1296,10 +1288,10 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1296,10 +1288,10 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
} }
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index]; bucket = &vcpu->kvm->mmu_page_hash[index];
hlist_for_each_entry_safe(page, node, n, bucket, hash_link) { hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
if (page->gfn != gfn || page->role.metaphysical) if (sp->gfn != gfn || sp->role.metaphysical)
continue; continue;
pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
misaligned |= bytes < 4; misaligned |= bytes < 4;
if (misaligned || flooded) { if (misaligned || flooded) {
...@@ -1314,15 +1306,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1314,15 +1306,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
* page. * page.
*/ */
pgprintk("misaligned: gpa %llx bytes %d role %x\n", pgprintk("misaligned: gpa %llx bytes %d role %x\n",
gpa, bytes, page->role.word); gpa, bytes, sp->role.word);
kvm_mmu_zap_page(vcpu->kvm, page); kvm_mmu_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_flooded; ++vcpu->kvm->stat.mmu_flooded;
continue; continue;
} }
page_offset = offset; page_offset = offset;
level = page->role.level; level = sp->role.level;
npte = 1; npte = 1;
if (page->role.glevels == PT32_ROOT_LEVEL) { if (sp->role.glevels == PT32_ROOT_LEVEL) {
page_offset <<= 1; /* 32->64 */ page_offset <<= 1; /* 32->64 */
/* /*
* A 32-bit pde maps 4MB while the shadow pdes map * A 32-bit pde maps 4MB while the shadow pdes map
...@@ -1336,14 +1328,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1336,14 +1328,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
} }
quadrant = page_offset >> PAGE_SHIFT; quadrant = page_offset >> PAGE_SHIFT;
page_offset &= ~PAGE_MASK; page_offset &= ~PAGE_MASK;
if (quadrant != page->role.quadrant) if (quadrant != sp->role.quadrant)
continue; continue;
} }
spte = &page->spt[page_offset / sizeof(*spte)]; spte = &sp->spt[page_offset / sizeof(*spte)];
while (npte--) { while (npte--) {
entry = *spte; entry = *spte;
mmu_pte_write_zap_pte(vcpu, page, spte); mmu_pte_write_zap_pte(vcpu, sp, spte);
mmu_pte_write_new_pte(vcpu, page, spte, new, bytes, mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
page_offset & (pte_size - 1)); page_offset & (pte_size - 1));
mmu_pte_write_flush_tlb(vcpu, entry, *spte); mmu_pte_write_flush_tlb(vcpu, entry, *spte);
++spte; ++spte;
...@@ -1362,11 +1354,11 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -1362,11 +1354,11 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{ {
while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
page = container_of(vcpu->kvm->active_mmu_pages.prev, sp = container_of(vcpu->kvm->active_mmu_pages.prev,
struct kvm_mmu_page, link); struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu->kvm, page); kvm_mmu_zap_page(vcpu->kvm, sp);
++vcpu->kvm->stat.mmu_recycled; ++vcpu->kvm->stat.mmu_recycled;
} }
} }
...@@ -1413,12 +1405,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); ...@@ -1413,12 +1405,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
static void free_mmu_pages(struct kvm_vcpu *vcpu) static void free_mmu_pages(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
while (!list_empty(&vcpu->kvm->active_mmu_pages)) { while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
page = container_of(vcpu->kvm->active_mmu_pages.next, sp = container_of(vcpu->kvm->active_mmu_pages.next,
struct kvm_mmu_page, link); struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu->kvm, page); kvm_mmu_zap_page(vcpu->kvm, sp);
} }
free_page((unsigned long)vcpu->mmu.pae_root); free_page((unsigned long)vcpu->mmu.pae_root);
} }
...@@ -1480,16 +1472,16 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) ...@@ -1480,16 +1472,16 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
list_for_each_entry(page, &kvm->active_mmu_pages, link) { list_for_each_entry(sp, &kvm->active_mmu_pages, link) {
int i; int i;
u64 *pt; u64 *pt;
if (!test_bit(slot, &page->slot_bitmap)) if (!test_bit(slot, &sp->slot_bitmap))
continue; continue;
pt = page->spt; pt = sp->spt;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
/* avoid RMW */ /* avoid RMW */
if (pt[i] & PT_WRITABLE_MASK) if (pt[i] & PT_WRITABLE_MASK)
...@@ -1499,10 +1491,10 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) ...@@ -1499,10 +1491,10 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
void kvm_mmu_zap_all(struct kvm *kvm) void kvm_mmu_zap_all(struct kvm *kvm)
{ {
struct kvm_mmu_page *page, *node; struct kvm_mmu_page *sp, *node;
list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link) list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link)
kvm_mmu_zap_page(kvm, page); kvm_mmu_zap_page(kvm, sp);
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
} }
...@@ -1668,13 +1660,13 @@ static int count_rmaps(struct kvm_vcpu *vcpu) ...@@ -1668,13 +1660,13 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
static int count_writable_mappings(struct kvm_vcpu *vcpu) static int count_writable_mappings(struct kvm_vcpu *vcpu)
{ {
int nmaps = 0; int nmaps = 0;
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
int i; int i;
list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
u64 *pt = page->spt; u64 *pt = sp->spt;
if (page->role.level != PT_PAGE_TABLE_LEVEL) if (sp->role.level != PT_PAGE_TABLE_LEVEL)
continue; continue;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
...@@ -1702,23 +1694,23 @@ static void audit_rmap(struct kvm_vcpu *vcpu) ...@@ -1702,23 +1694,23 @@ static void audit_rmap(struct kvm_vcpu *vcpu)
static void audit_write_protection(struct kvm_vcpu *vcpu) static void audit_write_protection(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_page *page; struct kvm_mmu_page *sp;
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
unsigned long *rmapp; unsigned long *rmapp;
gfn_t gfn; gfn_t gfn;
list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) {
if (page->role.metaphysical) if (sp->role.metaphysical)
continue; continue;
slot = gfn_to_memslot(vcpu->kvm, page->gfn); slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
gfn = unalias_gfn(vcpu->kvm, page->gfn); gfn = unalias_gfn(vcpu->kvm, sp->gfn);
rmapp = &slot->rmap[gfn - slot->base_gfn]; rmapp = &slot->rmap[gfn - slot->base_gfn];
if (*rmapp) if (*rmapp)
printk(KERN_ERR "%s: (%s) shadow page has writable" printk(KERN_ERR "%s: (%s) shadow page has writable"
" mappings: gfn %lx role %x\n", " mappings: gfn %lx role %x\n",
__FUNCTION__, audit_msg, page->gfn, __FUNCTION__, audit_msg, sp->gfn,
page->role.word); sp->role.word);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment