Commit 3200f405 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: MMU: unify slots_lock usage

Unify slots_lock acquision around vcpu_run(). This is simpler and less
error-prone.

Also fix some callsites that were not grabbing the lock properly.

[avi: drop slots_lock while in guest mode to avoid holding the lock
      for indefinite periods]
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 25c5f225
...@@ -1204,8 +1204,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) ...@@ -1204,8 +1204,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
struct page *page; struct page *page;
down_read(&vcpu->kvm->slots_lock);
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1); gfn &= ~(KVM_PAGES_PER_HPAGE-1);
...@@ -1218,7 +1216,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) ...@@ -1218,7 +1216,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
/* mmio */ /* mmio */
if (is_error_page(page)) { if (is_error_page(page)) {
kvm_release_page_clean(page); kvm_release_page_clean(page);
up_read(&vcpu->kvm->slots_lock);
return 1; return 1;
} }
...@@ -1228,7 +1225,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) ...@@ -1228,7 +1225,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
PT32E_ROOT_LEVEL); PT32E_ROOT_LEVEL);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
up_read(&vcpu->kvm->slots_lock);
return r; return r;
} }
...@@ -1376,9 +1372,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, ...@@ -1376,9 +1372,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
largepage = 1; largepage = 1;
} }
page = gfn_to_page(vcpu->kvm, gfn); page = gfn_to_page(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
if (is_error_page(page)) { if (is_error_page(page)) {
kvm_release_page_clean(page); kvm_release_page_clean(page);
up_read(&current->mm->mmap_sem);
return 1; return 1;
} }
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
...@@ -1386,7 +1382,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, ...@@ -1386,7 +1382,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
largepage, gfn, page, TDP_ROOT_LEVEL); largepage, gfn, page, TDP_ROOT_LEVEL);
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
up_read(&current->mm->mmap_sem);
return r; return r;
} }
...@@ -1808,9 +1803,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -1808,9 +1803,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa; gpa_t gpa;
int r; int r;
down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
up_read(&vcpu->kvm->slots_lock);
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
...@@ -2063,7 +2056,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, ...@@ -2063,7 +2056,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
if (r) if (r)
return r; return r;
if (!__emulator_write_phys(vcpu, addr, &value, bytes)) if (!emulator_write_phys(vcpu, addr, &value, bytes))
return -EFAULT; return -EFAULT;
return 1; return 1;
...@@ -2127,7 +2120,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, ...@@ -2127,7 +2120,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
int r; int r;
struct kvm_pv_mmu_op_buffer buffer; struct kvm_pv_mmu_op_buffer buffer;
down_read(&vcpu->kvm->slots_lock);
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
buffer.ptr = buffer.buf; buffer.ptr = buffer.buf;
...@@ -2150,7 +2142,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, ...@@ -2150,7 +2142,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
out: out:
*ret = buffer.processed; *ret = buffer.processed;
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
up_read(&vcpu->kvm->slots_lock);
return r; return r;
} }
......
...@@ -388,7 +388,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -388,7 +388,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (r) if (r)
return r; return r;
down_read(&vcpu->kvm->slots_lock);
/* /*
* Look up the shadow pte for the faulting address. * Look up the shadow pte for the faulting address.
*/ */
...@@ -402,7 +401,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -402,7 +401,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
pgprintk("%s: guest page fault\n", __func__); pgprintk("%s: guest page fault\n", __func__);
inject_page_fault(vcpu, addr, walker.error_code); inject_page_fault(vcpu, addr, walker.error_code);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
up_read(&vcpu->kvm->slots_lock);
return 0; return 0;
} }
...@@ -422,7 +420,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -422,7 +420,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (is_error_page(page)) { if (is_error_page(page)) {
pgprintk("gfn %x is mmio\n", walker.gfn); pgprintk("gfn %x is mmio\n", walker.gfn);
kvm_release_page_clean(page); kvm_release_page_clean(page);
up_read(&vcpu->kvm->slots_lock);
return 1; return 1;
} }
...@@ -440,7 +437,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -440,7 +437,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
++vcpu->stat.pf_fixed; ++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)"); kvm_mmu_audit(vcpu, "post page fault (fixed)");
spin_unlock(&vcpu->kvm->mmu_lock); spin_unlock(&vcpu->kvm->mmu_lock);
up_read(&vcpu->kvm->slots_lock);
return write_pt; return write_pt;
} }
......
...@@ -1505,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm) ...@@ -1505,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm)
int ret = 0; int ret = 0;
int r; int r;
down_read(&kvm->slots_lock);
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0) if (r < 0)
goto out; goto out;
...@@ -1528,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm) ...@@ -1528,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm)
ret = 1; ret = 1;
out: out:
up_read(&kvm->slots_lock);
return ret; return ret;
} }
...@@ -1730,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -1730,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
u64 msr; u64 msr;
int ret; int ret;
down_read(&vcpu->kvm->slots_lock);
if (!init_rmode_tss(vmx->vcpu.kvm)) { if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -1833,9 +1832,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -1833,9 +1832,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vpid_sync_vcpu_all(vmx); vpid_sync_vcpu_all(vmx);
return 0; ret = 0;
out: out:
up_read(&vcpu->kvm->slots_lock);
return ret; return ret;
} }
......
...@@ -201,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -201,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
int ret; int ret;
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
down_read(&vcpu->kvm->slots_lock);
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte)); offset * sizeof(u64), sizeof(pdpte));
if (ret < 0) { if (ret < 0) {
...@@ -218,7 +217,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -218,7 +217,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
out: out:
up_read(&vcpu->kvm->slots_lock);
return ret; return ret;
} }
...@@ -233,13 +231,11 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) ...@@ -233,13 +231,11 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
if (is_long_mode(vcpu) || !is_pae(vcpu)) if (is_long_mode(vcpu) || !is_pae(vcpu))
return false; return false;
down_read(&vcpu->kvm->slots_lock);
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0) if (r < 0)
goto out; goto out;
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
out: out:
up_read(&vcpu->kvm->slots_lock);
return changed; return changed;
} }
...@@ -377,7 +373,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -377,7 +373,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
*/ */
} }
down_read(&vcpu->kvm->slots_lock);
/* /*
* Does the new cr3 value map to physical memory? (Note, we * Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would * catch an invalid cr3 even in real-mode, because it would
...@@ -393,7 +388,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -393,7 +388,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu->arch.cr3 = cr3; vcpu->arch.cr3 = cr3;
vcpu->arch.mmu.new_cr3(vcpu); vcpu->arch.mmu.new_cr3(vcpu);
} }
up_read(&vcpu->kvm->slots_lock);
} }
EXPORT_SYMBOL_GPL(kvm_set_cr3); EXPORT_SYMBOL_GPL(kvm_set_cr3);
...@@ -503,7 +497,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) ...@@ -503,7 +497,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
version++; version++;
down_read(&kvm->slots_lock);
kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
wc_ts = current_kernel_time(); wc_ts = current_kernel_time();
...@@ -515,7 +508,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) ...@@ -515,7 +508,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
version++; version++;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
up_read(&kvm->slots_lock);
} }
static void kvm_write_guest_time(struct kvm_vcpu *v) static void kvm_write_guest_time(struct kvm_vcpu *v)
...@@ -609,10 +601,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -609,10 +601,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
vcpu->arch.hv_clock.tsc_shift = 22; vcpu->arch.hv_clock.tsc_shift = 22;
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
down_read(&vcpu->kvm->slots_lock);
vcpu->arch.time_page = vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
up_read(&vcpu->kvm->slots_lock);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
if (is_error_page(vcpu->arch.time_page)) { if (is_error_page(vcpu->arch.time_page)) {
...@@ -715,9 +705,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, ...@@ -715,9 +705,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
vcpu_load(vcpu); vcpu_load(vcpu);
down_read(&vcpu->kvm->slots_lock);
for (i = 0; i < msrs->nmsrs; ++i) for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data)) if (do_msr(vcpu, entries[i].index, &entries[i].data))
break; break;
up_read(&vcpu->kvm->slots_lock);
vcpu_put(vcpu); vcpu_put(vcpu);
...@@ -1768,7 +1760,6 @@ int emulator_read_std(unsigned long addr, ...@@ -1768,7 +1760,6 @@ int emulator_read_std(unsigned long addr,
void *data = val; void *data = val;
int r = X86EMUL_CONTINUE; int r = X86EMUL_CONTINUE;
down_read(&vcpu->kvm->slots_lock);
while (bytes) { while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
...@@ -1790,7 +1781,6 @@ int emulator_read_std(unsigned long addr, ...@@ -1790,7 +1781,6 @@ int emulator_read_std(unsigned long addr,
addr += tocopy; addr += tocopy;
} }
out: out:
up_read(&vcpu->kvm->slots_lock);
return r; return r;
} }
EXPORT_SYMBOL_GPL(emulator_read_std); EXPORT_SYMBOL_GPL(emulator_read_std);
...@@ -1809,9 +1799,7 @@ static int emulator_read_emulated(unsigned long addr, ...@@ -1809,9 +1799,7 @@ static int emulator_read_emulated(unsigned long addr,
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
up_read(&vcpu->kvm->slots_lock);
/* For APIC access vmexit */ /* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
...@@ -1844,7 +1832,7 @@ mmio: ...@@ -1844,7 +1832,7 @@ mmio:
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
} }
int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes) const void *val, int bytes)
{ {
int ret; int ret;
...@@ -1856,17 +1844,6 @@ int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1856,17 +1844,6 @@ int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
return 1; return 1;
} }
static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes)
{
int ret;
down_read(&vcpu->kvm->slots_lock);
ret =__emulator_write_phys(vcpu, gpa, val, bytes);
up_read(&vcpu->kvm->slots_lock);
return ret;
}
static int emulator_write_emulated_onepage(unsigned long addr, static int emulator_write_emulated_onepage(unsigned long addr,
const void *val, const void *val,
unsigned int bytes, unsigned int bytes,
...@@ -1875,9 +1852,7 @@ static int emulator_write_emulated_onepage(unsigned long addr, ...@@ -1875,9 +1852,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct kvm_io_device *mmio_dev; struct kvm_io_device *mmio_dev;
gpa_t gpa; gpa_t gpa;
down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
up_read(&vcpu->kvm->slots_lock);
if (gpa == UNMAPPED_GVA) { if (gpa == UNMAPPED_GVA) {
kvm_inject_page_fault(vcpu, addr, 2); kvm_inject_page_fault(vcpu, addr, 2);
...@@ -1954,7 +1929,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr, ...@@ -1954,7 +1929,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
char *kaddr; char *kaddr;
u64 val; u64 val;
down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
if (gpa == UNMAPPED_GVA || if (gpa == UNMAPPED_GVA ||
...@@ -1974,9 +1948,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr, ...@@ -1974,9 +1948,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
kvm_release_page_dirty(page); kvm_release_page_dirty(page);
emul_write:
up_read(&vcpu->kvm->slots_lock);
} }
emul_write:
#endif #endif
return emulator_write_emulated(addr, new, bytes, vcpu); return emulator_write_emulated(addr, new, bytes, vcpu);
...@@ -2368,10 +2341,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, ...@@ -2368,10 +2341,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_x86_ops->skip_emulated_instruction(vcpu);
for (i = 0; i < nr_pages; ++i) { for (i = 0; i < nr_pages; ++i) {
down_read(&vcpu->kvm->slots_lock);
page = gva_to_page(vcpu, address + i * PAGE_SIZE); page = gva_to_page(vcpu, address + i * PAGE_SIZE);
vcpu->arch.pio.guest_pages[i] = page; vcpu->arch.pio.guest_pages[i] = page;
up_read(&vcpu->kvm->slots_lock);
if (!page) { if (!page) {
kvm_inject_gp(vcpu, 0); kvm_inject_gp(vcpu, 0);
free_pio_guest_pages(vcpu); free_pio_guest_pages(vcpu);
...@@ -2445,7 +2416,9 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ...@@ -2445,7 +2416,9 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
++vcpu->stat.halt_exits; ++vcpu->stat.halt_exits;
if (irqchip_in_kernel(vcpu->kvm)) { if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
up_read(&vcpu->kvm->slots_lock);
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
down_read(&vcpu->kvm->slots_lock);
if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
return -EINTR; return -EINTR;
return 1; return 1;
...@@ -2738,6 +2711,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2738,6 +2711,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
} }
down_read(&vcpu->kvm->slots_lock);
vapic_enter(vcpu); vapic_enter(vcpu);
preempted: preempted:
...@@ -2811,6 +2785,8 @@ again: ...@@ -2811,6 +2785,8 @@ again:
kvm_lapic_sync_to_vapic(vcpu); kvm_lapic_sync_to_vapic(vcpu);
up_read(&vcpu->kvm->slots_lock);
vcpu->guest_mode = 1; vcpu->guest_mode = 1;
kvm_guest_enter(); kvm_guest_enter();
...@@ -2837,6 +2813,8 @@ again: ...@@ -2837,6 +2813,8 @@ again:
preempt_enable(); preempt_enable();
down_read(&vcpu->kvm->slots_lock);
/* /*
* Profile KVM exit RIPs: * Profile KVM exit RIPs:
*/ */
...@@ -2864,14 +2842,18 @@ again: ...@@ -2864,14 +2842,18 @@ again:
} }
out: out:
up_read(&vcpu->kvm->slots_lock);
if (r > 0) { if (r > 0) {
kvm_resched(vcpu); kvm_resched(vcpu);
down_read(&vcpu->kvm->slots_lock);
goto preempted; goto preempted;
} }
post_kvm_run_save(vcpu, kvm_run); post_kvm_run_save(vcpu, kvm_run);
down_read(&vcpu->kvm->slots_lock);
vapic_exit(vcpu); vapic_exit(vcpu);
up_read(&vcpu->kvm->slots_lock);
return r; return r;
} }
...@@ -2906,9 +2888,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2906,9 +2888,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
vcpu->mmio_read_completed = 1; vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
down_read(&vcpu->kvm->slots_lock);
r = emulate_instruction(vcpu, kvm_run, r = emulate_instruction(vcpu, kvm_run,
vcpu->arch.mmio_fault_cr2, 0, vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE); EMULTYPE_NO_DECODE);
up_read(&vcpu->kvm->slots_lock);
if (r == EMULATE_DO_MMIO) { if (r == EMULATE_DO_MMIO) {
/* /*
* Read-modify-write. Back to userspace. * Read-modify-write. Back to userspace.
...@@ -3817,7 +3802,9 @@ fail: ...@@ -3817,7 +3802,9 @@ fail:
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{ {
kvm_free_lapic(vcpu); kvm_free_lapic(vcpu);
down_read(&vcpu->kvm->slots_lock);
kvm_mmu_destroy(vcpu); kvm_mmu_destroy(vcpu);
up_read(&vcpu->kvm->slots_lock);
free_page((unsigned long)vcpu->arch.pio_data); free_page((unsigned long)vcpu->arch.pio_data);
} }
......
...@@ -446,7 +446,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); ...@@ -446,7 +446,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes); const void *val, int bytes);
int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
gpa_t addr, unsigned long *ret); gpa_t addr, unsigned long *ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment