uint64_t *val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_read_intercept(reg, val);
+ int rc = hvm_msr_read_intercept(reg, val);
+
+ if ( rc == X86EMUL_EXCEPTION )
+ x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+ return rc;
}
static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- return hvm_msr_write_intercept(reg, val, 1);
+ int rc = hvm_msr_write_intercept(reg, val, 1);
+
+ if ( rc == X86EMUL_EXCEPTION )
+ x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
+
+ return rc;
}
static int hvmemul_wbinvd(
if ( w->do_write.msr )
{
- hvm_msr_write_intercept(w->msr, w->value, 0);
+ if ( hvm_msr_write_intercept(w->msr, w->value, 0) ==
+ X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+
w->do_write.msr = 0;
}
return ret;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
ret = X86EMUL_EXCEPTION;
*msr_content = -1ull;
goto out;
return ret;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
return X86EMUL_OKAY;
gpf:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
return result;
gpf:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
if ( rc == X86EMUL_OKAY )
__update_guest_eip(regs, inst_len);
+ else if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
}
static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
return X86EMUL_OKAY;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
return X86EMUL_OKAY;
gp_fault:
- hvm_inject_hw_exception(TRAP_gp_fault, 0);
return X86EMUL_EXCEPTION;
}
break;
case EXIT_REASON_MSR_READ:
{
- uint64_t msr_content;
- if ( hvm_msr_read_intercept(regs->_ecx, &msr_content) == X86EMUL_OKAY )
+ uint64_t msr_content = 0;
+
+ switch ( hvm_msr_read_intercept(regs->_ecx, &msr_content) )
{
+ case X86EMUL_OKAY:
msr_split(regs, msr_content);
update_guest_eip(); /* Safe: RDMSR */
+ break;
+
+ case X86EMUL_EXCEPTION:
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ break;
}
break;
}
case EXIT_REASON_MSR_WRITE:
- if ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) == X86EMUL_OKAY )
+ switch ( hvm_msr_write_intercept(regs->_ecx, msr_fold(regs), 1) )
+ {
+ case X86EMUL_OKAY:
update_guest_eip(); /* Safe: WRMSR */
+ break;
+
+ case X86EMUL_EXCEPTION:
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ break;
+ }
break;
case EXIT_REASON_VMXOFF:
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
u32 control;
u64 cr_gh_mask, cr_read_shadow;
+ int rc;
static const u16 vmentry_fields[] = {
VM_ENTRY_INTR_INFO,
if ( control & VM_ENTRY_LOAD_GUEST_PAT )
hvm_set_guest_pat(v, get_vvmcs(v, GUEST_PAT));
if ( control & VM_ENTRY_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+ {
+ rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ get_vvmcs(v, GUEST_PERF_GLOBAL_CTRL), 0);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ }
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
static void load_vvmcs_host_state(struct vcpu *v)
{
- int i;
+ int i, rc;
u64 r;
u32 control;
if ( control & VM_EXIT_LOAD_HOST_PAT )
hvm_set_guest_pat(v, get_vvmcs(v, HOST_PAT));
if ( control & VM_EXIT_LOAD_PERF_GLOBAL_CTRL )
- hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
- get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+ {
+ rc = hvm_msr_write_intercept(MSR_CORE_PERF_GLOBAL_CTRL,
+ get_vvmcs(v, HOST_PERF_GLOBAL_CTRL), 1);
+ if ( rc == X86EMUL_EXCEPTION )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ }
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset, 0);
int hvm_set_cr0(unsigned long value, bool_t may_defer);
int hvm_set_cr3(unsigned long value, bool_t may_defer);
int hvm_set_cr4(unsigned long value, bool_t may_defer);
-int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
-int hvm_msr_write_intercept(
- unsigned int msr, uint64_t msr_content, bool_t may_defer);
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
void hvm_ud_intercept(struct cpu_user_regs *);
+/*
+ * May return X86EMUL_EXCEPTION, at which point the caller is responsible for
+ * injecting a #GP fault. Used to support speculative reads.
+ */
+int __must_check hvm_msr_read_intercept(
+ unsigned int msr, uint64_t *msr_content);
+int __must_check hvm_msr_write_intercept(
+ unsigned int msr, uint64_t msr_content, bool_t may_defer);
+
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
/*