static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
{
- if ( vcpu_nestedhvm(current).nv_vvmcxaddr != INVALID_PADDR )
+ if ( vcpu_nestedhvm(current).nv_vvmcxaddr != INVALID_PADDR &&
+ errno != VMX_INSN_FAIL_INVALID )
vmfail_valid(regs, errno);
else
vmfail_invalid(regs);
#define REX64_PREFIX "rex64/"
#endif
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+# define ASM_FLAG_OUT(yes, no) yes
+#else
+# define ASM_FLAG_OUT(yes, no) no
+#endif
+
#endif /* __X86_ASM_DEFNS_H__ */
VMX_INSN_VMPTRLD_INVALID_PHYADDR = 9,
VMX_INSN_UNSUPPORTED_VMCS_COMPONENT = 12,
VMX_INSN_VMXON_IN_VMX_ROOT = 15,
+ VMX_INSN_FAIL_INVALID = ~0,
};
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
#define INVVPID_ALL_CONTEXT 2
#define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3
+#ifdef HAVE_GAS_VMX
+# define GAS_VMX_OP(yes, no) yes
+#else
+# define GAS_VMX_OP(yes, no) no
+#endif
+
static always_inline void __vmptrld(u64 addr)
{
asm volatile (
return okay;
}
+static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
+ unsigned long value)
+{
+ unsigned long ret = 0;
+ bool fail_invalid, fail_valid;
+
+ asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
+ VMWRITE_OPCODE MODRM_EAX_ECX)
+ ASM_FLAG_OUT(, "setc %[invalid]\n\t")
+ ASM_FLAG_OUT(, "setz %[valid]\n\t")
+ : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
+ ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
+ : [field] GAS_VMX_OP("r", "a") (field),
+ [value] GAS_VMX_OP("rm", "c") (value));
+
+ if ( unlikely(fail_invalid) )
+ ret = VMX_INSN_FAIL_INVALID;
+ else if ( unlikely(fail_valid) )
+ __vmread(VM_INSTRUCTION_ERROR, &ret);
+
+ return ret;
+}
+
static always_inline void __invept(unsigned long type, u64 eptp, u64 gpa)
{
struct {