Commit 8dae4445 authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Marcelo Tosatti

KVM: rename is_writeble_pte() to is_writable_pte()

There are two spellings of "writable" in
arch/x86/kvm/mmu.c and paging_tmpl.h .

This patch renames is_writeble_pte() to is_writable_pte()
and makes grepping easy.

  New name is consistent with the definition of itself:
  return pte & PT_WRITABLE_MASK;
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent c25bc163
...@@ -250,7 +250,7 @@ static int is_large_pte(u64 pte) ...@@ -250,7 +250,7 @@ static int is_large_pte(u64 pte)
return pte & PT_PAGE_SIZE_MASK; return pte & PT_PAGE_SIZE_MASK;
} }
static int is_writeble_pte(unsigned long pte) static int is_writable_pte(unsigned long pte)
{ {
return pte & PT_WRITABLE_MASK; return pte & PT_WRITABLE_MASK;
} }
...@@ -632,7 +632,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -632,7 +632,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
pfn = spte_to_pfn(*spte); pfn = spte_to_pfn(*spte);
if (*spte & shadow_accessed_mask) if (*spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
if (is_writeble_pte(*spte)) if (is_writable_pte(*spte))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
if (!*rmapp) { if (!*rmapp) {
...@@ -708,7 +708,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) ...@@ -708,7 +708,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON(!spte); BUG_ON(!spte);
BUG_ON(!(*spte & PT_PRESENT_MASK)); BUG_ON(!(*spte & PT_PRESENT_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
if (is_writeble_pte(*spte)) { if (is_writable_pte(*spte)) {
__set_spte(spte, *spte & ~PT_WRITABLE_MASK); __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
write_protected = 1; write_protected = 1;
} }
...@@ -732,7 +732,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) ...@@ -732,7 +732,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
BUG_ON(!(*spte & PT_PRESENT_MASK)); BUG_ON(!(*spte & PT_PRESENT_MASK));
BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
if (is_writeble_pte(*spte)) { if (is_writable_pte(*spte)) {
rmap_remove(kvm, spte); rmap_remove(kvm, spte);
--kvm->stat.lpages; --kvm->stat.lpages;
__set_spte(spte, shadow_trap_nonpresent_pte); __set_spte(spte, shadow_trap_nonpresent_pte);
...@@ -787,7 +787,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -787,7 +787,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
new_spte &= ~PT_WRITABLE_MASK; new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE; new_spte &= ~SPTE_HOST_WRITEABLE;
if (is_writeble_pte(*spte)) if (is_writable_pte(*spte))
kvm_set_pfn_dirty(spte_to_pfn(*spte)); kvm_set_pfn_dirty(spte_to_pfn(*spte));
__set_spte(spte, new_spte); __set_spte(spte, new_spte);
spte = rmap_next(kvm, rmapp, spte); spte = rmap_next(kvm, rmapp, spte);
...@@ -1847,7 +1847,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -1847,7 +1847,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
* is responsibility of mmu_get_page / kvm_sync_page. * is responsibility of mmu_get_page / kvm_sync_page.
* Same reasoning can be applied to dirty page accounting. * Same reasoning can be applied to dirty page accounting.
*/ */
if (!can_unsync && is_writeble_pte(*sptep)) if (!can_unsync && is_writable_pte(*sptep))
goto set_pte; goto set_pte;
if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
...@@ -1855,7 +1855,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -1855,7 +1855,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
__func__, gfn); __func__, gfn);
ret = 1; ret = 1;
pte_access &= ~ACC_WRITE_MASK; pte_access &= ~ACC_WRITE_MASK;
if (is_writeble_pte(spte)) if (is_writable_pte(spte))
spte &= ~PT_WRITABLE_MASK; spte &= ~PT_WRITABLE_MASK;
} }
} }
...@@ -1876,7 +1876,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -1876,7 +1876,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
bool reset_host_protection) bool reset_host_protection)
{ {
int was_rmapped = 0; int was_rmapped = 0;
int was_writeble = is_writeble_pte(*sptep); int was_writable = is_writable_pte(*sptep);
int rmap_count; int rmap_count;
pgprintk("%s: spte %llx access %x write_fault %d" pgprintk("%s: spte %llx access %x write_fault %d"
...@@ -1927,7 +1927,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -1927,7 +1927,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (rmap_count > RMAP_RECYCLE_THRESHOLD) if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn); rmap_recycle(vcpu, sptep, gfn);
} else { } else {
if (was_writeble) if (was_writable)
kvm_release_pfn_dirty(pfn); kvm_release_pfn_dirty(pfn);
else else
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
......
...@@ -162,7 +162,7 @@ walk: ...@@ -162,7 +162,7 @@ walk:
if (rsvd_fault) if (rsvd_fault)
goto access_error; goto access_error;
if (write_fault && !is_writeble_pte(pte)) if (write_fault && !is_writable_pte(pte))
if (user_fault || is_write_protection(vcpu)) if (user_fault || is_write_protection(vcpu))
goto access_error; goto access_error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment