Commit f0242478 authored by Rusty Russell's avatar Rusty Russell Committed by Avi Kivity

KVM: Add and use pr_unimpl for standard formatting of unimplemented features

All guest-invokable printks should be ratelimited to prevent malicious
guests from flooding logs.  This is a start.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 33830b4f
...@@ -474,6 +474,14 @@ struct kvm_arch_ops { ...@@ -474,6 +474,14 @@ struct kvm_arch_ops {
extern struct kvm_arch_ops *kvm_arch_ops; extern struct kvm_arch_ops *kvm_arch_ops;
/* The guest did something we don't support. */
#define pr_unimpl(vcpu, fmt, ...) \
do { \
if (printk_ratelimit()) \
printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
} while(0)
#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
......
...@@ -962,8 +962,7 @@ static int emulator_write_std(unsigned long addr, ...@@ -962,8 +962,7 @@ static int emulator_write_std(unsigned long addr,
unsigned int bytes, unsigned int bytes,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
printk(KERN_ERR "emulator_write_std: addr %lx n %d\n", pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
addr, bytes);
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
} }
...@@ -1138,8 +1137,7 @@ int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest) ...@@ -1138,8 +1137,7 @@ int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
*dest = kvm_arch_ops->get_dr(vcpu, dr); *dest = kvm_arch_ops->get_dr(vcpu, dr);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
default: default:
printk(KERN_DEBUG "%s: unexpected dr %u\n", pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
__FUNCTION__, dr);
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
} }
} }
...@@ -1488,7 +1486,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1488,7 +1486,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
break; break;
#endif #endif
default: default:
printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr); pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1; return 1;
} }
*pdata = data; *pdata = data;
...@@ -1543,11 +1541,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1543,11 +1541,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
break; break;
#endif #endif
case MSR_IA32_MC0_STATUS: case MSR_IA32_MC0_STATUS:
printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
__FUNCTION__, data); __FUNCTION__, data);
break; break;
case MSR_IA32_MCG_STATUS: case MSR_IA32_MCG_STATUS:
printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
__FUNCTION__, data); __FUNCTION__, data);
break; break;
case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_REV:
...@@ -1567,7 +1565,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1567,7 +1565,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return vcpu_register_para(vcpu, data); return vcpu_register_para(vcpu, data);
default: default:
printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr); pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
return 1; return 1;
} }
return 0; return 0;
...@@ -1798,7 +1796,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, ...@@ -1798,7 +1796,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
/* /*
* String I/O in reverse. Yuck. Kill the guest, fix later. * String I/O in reverse. Yuck. Kill the guest, fix later.
*/ */
printk(KERN_ERR "kvm: guest string pio down\n"); pr_unimpl(vcpu, "guest string pio down\n");
inject_gp(vcpu); inject_gp(vcpu);
return 1; return 1;
} }
...@@ -1829,7 +1827,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, ...@@ -1829,7 +1827,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
ret = 1; ret = 1;
} }
} else if (pio_dev) } else if (pio_dev)
printk(KERN_ERR "no string pio read support yet, " pr_unimpl(vcpu, "no string pio read support yet, "
"port %x size %d count %ld\n", "port %x size %d count %ld\n",
port, size, count); port, size, count);
......
...@@ -1167,7 +1167,7 @@ static int invalid_op_interception(struct vcpu_svm *svm, ...@@ -1167,7 +1167,7 @@ static int invalid_op_interception(struct vcpu_svm *svm,
static int task_switch_interception(struct vcpu_svm *svm, static int task_switch_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run) struct kvm_run *kvm_run)
{ {
printk(KERN_DEBUG "%s: task swiche is unsupported\n", __FUNCTION__); pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
kvm_run->exit_reason = KVM_EXIT_UNKNOWN; kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
return 0; return 0;
} }
...@@ -1183,7 +1183,7 @@ static int emulate_on_interception(struct vcpu_svm *svm, ...@@ -1183,7 +1183,7 @@ static int emulate_on_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run) struct kvm_run *kvm_run)
{ {
if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE) if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE)
printk(KERN_ERR "%s: failed\n", __FUNCTION__); pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
return 1; return 1;
} }
......
...@@ -1920,7 +1920,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1920,7 +1920,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
break; break;
} }
kvm_run->exit_reason = 0; kvm_run->exit_reason = 0;
printk(KERN_ERR "kvm: unhandled control register: op %d cr %d\n", pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
(int)(exit_qualification >> 4) & 3, cr); (int)(exit_qualification >> 4) & 3, cr);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment