Commit decc9016 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity

KVM: ia64: Fix halt emulation logic

Common halt logic was changed by x86 and did not update ia64.  This patch
updates halt for ia64.

Fixes a regression causing guests to hang with more than 2 vcpus.
Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 5550af4d
...@@ -365,7 +365,8 @@ struct kvm_vcpu_arch { ...@@ -365,7 +365,8 @@ struct kvm_vcpu_arch {
long itc_offset; long itc_offset;
unsigned long itc_check; unsigned long itc_check;
unsigned long timer_check; unsigned long timer_check;
unsigned long timer_pending; unsigned int timer_pending;
unsigned int timer_fired;
unsigned long vrr[8]; unsigned long vrr[8];
unsigned long ibr[8]; unsigned long ibr[8];
......
...@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -385,6 +385,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct call_data call_data; struct call_data call_data;
int i; int i;
call_data.ptc_g_data = p->u.ptc_g_data; call_data.ptc_g_data = p->u.ptc_g_data;
for (i = 0; i < KVM_MAX_VCPUS; i++) { for (i = 0; i < KVM_MAX_VCPUS; i++) {
...@@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ...@@ -418,33 +419,41 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
ktime_t kt; ktime_t kt;
long itc_diff; long itc_diff;
unsigned long vcpu_now_itc; unsigned long vcpu_now_itc;
unsigned long expires; unsigned long expires;
struct hrtimer *p_ht = &vcpu->arch.hlt_timer; struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec; unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset; if (irqchip_in_kernel(vcpu->kvm)) {
if (time_after(vcpu_now_itc, vpd->itm)) { vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
vcpu->arch.timer_check = 1;
return 1;
}
itc_diff = vpd->itm - vcpu_now_itc;
if (itc_diff < 0)
itc_diff = -itc_diff;
expires = div64_u64(itc_diff, cyc_per_usec); if (time_after(vcpu_now_itc, vpd->itm)) {
kt = ktime_set(0, 1000 * expires); vcpu->arch.timer_check = 1;
vcpu->arch.ht_active = 1; return 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); }
itc_diff = vpd->itm - vcpu_now_itc;
if (itc_diff < 0)
itc_diff = -itc_diff;
expires = div64_u64(itc_diff, cyc_per_usec);
kt = ktime_set(0, 1000 * expires);
down_read(&vcpu->kvm->slots_lock);
vcpu->arch.ht_active = 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED; vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
hrtimer_cancel(p_ht); hrtimer_cancel(p_ht);
vcpu->arch.ht_active = 0; vcpu->arch.ht_active = 0;
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
up_read(&vcpu->kvm->slots_lock);
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR; return -EINTR;
return 1; return 1;
...@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, ...@@ -484,10 +493,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
static const int kvm_vti_max_exit_handlers = static const int kvm_vti_max_exit_handlers =
sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers); sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu) static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
{ {
struct exit_ctl_data *p_exit_data; struct exit_ctl_data *p_exit_data;
...@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -600,8 +605,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
again: again:
preempt_disable(); preempt_disable();
kvm_prepare_guest_switch(vcpu);
local_irq_disable(); local_irq_disable();
if (signal_pending(current)) { if (signal_pending(current)) {
...@@ -614,7 +617,7 @@ again: ...@@ -614,7 +617,7 @@ again:
vcpu->guest_mode = 1; vcpu->guest_mode = 1;
kvm_guest_enter(); kvm_guest_enter();
down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run); r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) { if (r < 0) {
local_irq_enable(); local_irq_enable();
...@@ -634,9 +637,8 @@ again: ...@@ -634,9 +637,8 @@ again:
* But we need to prevent reordering, hence this barrier(): * But we need to prevent reordering, hence this barrier():
*/ */
barrier(); barrier();
kvm_guest_exit(); kvm_guest_exit();
up_read(&vcpu->kvm->slots_lock);
preempt_enable(); preempt_enable();
r = kvm_handle_exit(kvm_run, vcpu); r = kvm_handle_exit(kvm_run, vcpu);
...@@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -673,6 +675,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu_put(vcpu); vcpu_put(vcpu);
return -EAGAIN; return -EAGAIN;
} }
...@@ -1125,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data) ...@@ -1125,15 +1128,16 @@ static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
wait_queue_head_t *q; wait_queue_head_t *q;
vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer); vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
q = &vcpu->wq;
if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED) if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
goto out; goto out;
q = &vcpu->wq; if (waitqueue_active(q))
if (waitqueue_active(q)) {
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
wake_up_interruptible(q); wake_up_interruptible(q);
}
out: out:
vcpu->arch.timer_fired = 1;
vcpu->arch.timer_check = 1; vcpu->arch.timer_check = 1;
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -1702,12 +1706,14 @@ static void vcpu_kick_intr(void *info) ...@@ -1702,12 +1706,14 @@ static void vcpu_kick_intr(void *info)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu) void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{ {
int ipi_pcpu = vcpu->cpu; int ipi_pcpu = vcpu->cpu;
int cpu = get_cpu();
if (waitqueue_active(&vcpu->wq)) if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
if (vcpu->guest_mode) if (vcpu->guest_mode && cpu != ipi_pcpu)
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
put_cpu();
} }
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
...@@ -1717,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) ...@@ -1717,13 +1723,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
if (!test_and_set_bit(vec, &vpd->irr[0])) { if (!test_and_set_bit(vec, &vpd->irr[0])) {
vcpu->arch.irq_new_pending = 1; vcpu->arch.irq_new_pending = 1;
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) kvm_vcpu_kick(vcpu);
kvm_vcpu_kick(vcpu);
else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
}
return 1; return 1;
} }
return 0; return 0;
...@@ -1793,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -1793,7 +1793,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{ {
return 0; return vcpu->arch.timer_fired;
} }
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
......
...@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu) ...@@ -286,6 +286,12 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
return index; return index;
} }
static void prepare_for_halt(struct kvm_vcpu *vcpu)
{
vcpu->arch.timer_pending = 1;
vcpu->arch.timer_fired = 0;
}
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
...@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -304,11 +310,10 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
break; break;
case PAL_HALT_LIGHT: case PAL_HALT_LIGHT:
{ {
vcpu->arch.timer_pending = 1;
INIT_PAL_STATUS_SUCCESS(result); INIT_PAL_STATUS_SUCCESS(result);
prepare_for_halt(vcpu);
if (kvm_highest_pending_irq(vcpu) == -1) if (kvm_highest_pending_irq(vcpu) == -1)
ret = kvm_emulate_halt(vcpu); ret = kvm_emulate_halt(vcpu);
} }
break; break;
......
...@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void) ...@@ -713,7 +713,7 @@ void leave_hypervisor_tail(void)
if (!(VCPU(v, itv) & (1 << 16))) { if (!(VCPU(v, itv) & (1 << 16))) {
vcpu_pend_interrupt(v, VCPU(v, itv) vcpu_pend_interrupt(v, VCPU(v, itv)
& 0xff); & 0xff);
VMX(v, itc_check) = 0; VMX(v, itc_check) = 0;
} else { } else {
v->arch.timer_pending = 1; v->arch.timer_pending = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment