Commit 954bbbc2 authored by Avi Kivity's avatar Avi Kivity

KVM: Simply gfn_to_page()

Mapping a guest page to a host page is a common operation.  Currently,
one has first to find the memory slot where the page belongs (gfn_to_memslot),
then locate the page itself (gfn_to_page()).

This is clumsy, and also won't work well with memory aliases.  So simplify
gfn_to_page() not to require memory slot translation first, and instead do it
internally.
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent e0fa826f
...@@ -443,11 +443,7 @@ void kvm_emulator_want_group7_invlpg(void); ...@@ -443,11 +443,7 @@ void kvm_emulator_want_group7_invlpg(void);
extern hpa_t bad_page_address; extern hpa_t bad_page_address;
static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn) struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
{
return slot->phys_mem[gfn - slot->base_gfn];
}
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
...@@ -523,12 +519,6 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -523,12 +519,6 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
return vcpu->mmu.page_fault(vcpu, gva, error_code); return vcpu->mmu.page_fault(vcpu, gva, error_code);
} }
static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
}
static inline int is_long_mode(struct kvm_vcpu *vcpu) static inline int is_long_mode(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -420,12 +420,12 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -420,12 +420,12 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
u64 pdpte; u64 pdpte;
u64 *pdpt; u64 *pdpt;
int ret; int ret;
struct kvm_memory_slot *memslot; struct page *page;
spin_lock(&vcpu->kvm->lock); spin_lock(&vcpu->kvm->lock);
memslot = gfn_to_memslot(vcpu->kvm, pdpt_gfn); page = gfn_to_page(vcpu->kvm, pdpt_gfn);
/* FIXME: !memslot - emulate? 0xff? */ /* FIXME: !page - emulate? 0xff? */
pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); pdpt = kmap_atomic(page, KM_USER0);
ret = 1; ret = 1;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
...@@ -861,6 +861,17 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) ...@@ -861,6 +861,17 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_memslot); EXPORT_SYMBOL_GPL(gfn_to_memslot);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn);
if (!slot)
return NULL;
return slot->phys_mem[gfn - slot->base_gfn];
}
EXPORT_SYMBOL_GPL(gfn_to_page);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn) void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{ {
int i; int i;
...@@ -899,20 +910,20 @@ static int emulator_read_std(unsigned long addr, ...@@ -899,20 +910,20 @@ static int emulator_read_std(unsigned long addr,
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
unsigned long pfn; unsigned long pfn;
struct kvm_memory_slot *memslot; struct page *page;
void *page; void *page_virt;
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT; return X86EMUL_PROPAGATE_FAULT;
pfn = gpa >> PAGE_SHIFT; pfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(vcpu->kvm, pfn); page = gfn_to_page(vcpu->kvm, pfn);
if (!memslot) if (!page)
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
page = kmap_atomic(gfn_to_page(memslot, pfn), KM_USER0); page_virt = kmap_atomic(page, KM_USER0);
memcpy(data, page + offset, tocopy); memcpy(data, page_virt + offset, tocopy);
kunmap_atomic(page, KM_USER0); kunmap_atomic(page_virt, KM_USER0);
bytes -= tocopy; bytes -= tocopy;
data += tocopy; data += tocopy;
...@@ -963,16 +974,14 @@ static int emulator_read_emulated(unsigned long addr, ...@@ -963,16 +974,14 @@ static int emulator_read_emulated(unsigned long addr,
static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
unsigned long val, int bytes) unsigned long val, int bytes)
{ {
struct kvm_memory_slot *m;
struct page *page; struct page *page;
void *virt; void *virt;
if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
return 0; return 0;
m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (!m) if (!page)
return 0; return 0;
page = gfn_to_page(m, gpa >> PAGE_SHIFT);
kvm_mmu_pre_write(vcpu, gpa, bytes); kvm_mmu_pre_write(vcpu, gpa, bytes);
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0); virt = kmap_atomic(page, KM_USER0);
...@@ -2516,15 +2525,11 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma, ...@@ -2516,15 +2525,11 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
{ {
struct kvm *kvm = vma->vm_file->private_data; struct kvm *kvm = vma->vm_file->private_data;
unsigned long pgoff; unsigned long pgoff;
struct kvm_memory_slot *slot;
struct page *page; struct page *page;
*type = VM_FAULT_MINOR; *type = VM_FAULT_MINOR;
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
slot = gfn_to_memslot(kvm, pgoff); page = gfn_to_page(kvm, pgoff);
if (!slot)
return NOPAGE_SIGBUS;
page = gfn_to_page(slot, pgoff);
if (!page) if (!page)
return NOPAGE_SIGBUS; return NOPAGE_SIGBUS;
get_page(page); get_page(page);
......
...@@ -390,13 +390,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) ...@@ -390,13 +390,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct page *page; struct page *page;
struct kvm_memory_slot *slot;
struct kvm_rmap_desc *desc; struct kvm_rmap_desc *desc;
u64 *spte; u64 *spte;
slot = gfn_to_memslot(kvm, gfn); page = gfn_to_page(kvm, gfn);
BUG_ON(!slot); BUG_ON(!page);
page = gfn_to_page(slot, gfn);
while (page_private(page)) { while (page_private(page)) {
if (!(page_private(page) & 1)) if (!(page_private(page) & 1))
...@@ -711,14 +709,12 @@ hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) ...@@ -711,14 +709,12 @@ hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa) hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{ {
struct kvm_memory_slot *slot;
struct page *page; struct page *page;
ASSERT((gpa & HPA_ERR_MASK) == 0); ASSERT((gpa & HPA_ERR_MASK) == 0);
slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (!slot) if (!page)
return gpa | HPA_ERR_MASK; return gpa | HPA_ERR_MASK;
page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
| (gpa & (PAGE_SIZE-1)); | (gpa & (PAGE_SIZE-1));
} }
......
...@@ -926,9 +926,9 @@ static int init_rmode_tss(struct kvm* kvm) ...@@ -926,9 +926,9 @@ static int init_rmode_tss(struct kvm* kvm)
gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
char *page; char *page;
p1 = _gfn_to_page(kvm, fn++); p1 = gfn_to_page(kvm, fn++);
p2 = _gfn_to_page(kvm, fn++); p2 = gfn_to_page(kvm, fn++);
p3 = _gfn_to_page(kvm, fn); p3 = gfn_to_page(kvm, fn);
if (!p1 || !p2 || !p3) { if (!p1 || !p2 || !p3) {
kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__); kvm_printf(kvm,"%s: gfn_to_page failed\n", __FUNCTION__);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment