static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
-static u32 msr_index[VMX_MSR_COUNT] =
+static u32 msr_index[] =
{
MSR_LSTAR, MSR_STAR, MSR_SYSCALL_MASK
};
+#define MSR_INDEX_SIZE (ARRAY_SIZE(msr_index))
+
static void vmx_save_host_msrs(void)
{
struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
int i;
- for ( i = 0; i < VMX_MSR_COUNT; i++ )
+ /*
+ * If new MSR is needed to add into msr_index[] and VMX_INDEX_MSR_*** enum,
+ * please note that elements in msr_index[] and VMX_INDEX_MSR_*** enum
+ * are not the same. Currently we only save three MSRs(MSR_LSTAR, MSR_STAR,
+ * and MSR_SYSCALL_MASK into host state.
+ */
+ BUILD_BUG_ON(MSR_INDEX_SIZE != VMX_INDEX_MSR_TSC_AUX ||
+ VMX_INDEX_MSR_TSC_AUX != VMX_MSR_COUNT - 1);
+ for ( i = 0; i < MSR_INDEX_SIZE; i++ )
rdmsrl(msr_index[i], host_msr_state->msrs[i]);
+
+ if ( cpu_has_rdtscp )
+ rdmsrl(MSR_TSC_AUX, host_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
}
#define WRITE_MSR(address) \
msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
break;
+ case MSR_TSC_AUX:
+ if ( cpu_has_rdtscp )
+ {
+ msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX];
+ break;
+ }
+ else
+ {
+ HVM_DBG_LOG(DBG_LEVEL_0, "Reading from nonexistence msr 0x%x\n",
+ ecx);
+ vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ return HNDL_exception_raised;
+ }
+
+
default:
return HNDL_unhandled;
}
case MSR_SYSCALL_MASK:
WRITE_MSR(SYSCALL_MASK);
+ case MSR_TSC_AUX:
+ if ( cpu_has_rdtscp )
+ {
+ struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
+ guest_state->msrs[VMX_INDEX_MSR_TSC_AUX] = msr_content;
+ wrmsrl(MSR_TSC_AUX, msr_content);
+ }
+ else
+ {
+ HVM_DBG_LOG(DBG_LEVEL_0, "Writing to nonexistence msr 0x%x\n", ecx);
+ vmx_inject_hw_exception(TRAP_gp_fault, 0);
+ return HNDL_exception_raised;
+ }
+
default:
return HNDL_unhandled;
}
wrmsrl(msr_index[i], host_msr_state->msrs[i]);
clear_bit(i, &host_msr_state->flags);
}
+
+ if ( cpu_has_rdtscp )
+ wrmsrl(MSR_TSC_AUX, host_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
}
static void vmx_save_guest_msrs(struct vcpu *v)
{
+ struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
/*
* We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
* be updated at any time via SWAPGS, which we cannot trap.
*/
rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.shadow_gs);
+ if ( cpu_has_rdtscp )
+ rdmsrl(MSR_TSC_AUX, guest_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
}
static void vmx_restore_guest_msrs(struct vcpu *v)
write_efer((read_efer() & ~EFER_SCE) |
(v->arch.hvm_vcpu.guest_efer & EFER_SCE));
}
+
+ if ( cpu_has_rdtscp )
+ wrmsrl(MSR_TSC_AUX, guest_msr_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
}
#else /* __i386__ */
data->msr_lstar = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
data->msr_star = guest_state->msrs[VMX_INDEX_MSR_STAR];
data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
+ if ( cpu_has_rdtscp )
+ data->msr_tsc_aux = guest_state->msrs[VMX_INDEX_MSR_TSC_AUX];
#endif
data->tsc = hvm_get_guest_tsc(v);
v->arch.hvm_vmx.cstar = data->msr_cstar;
v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
+ if ( cpu_has_rdtscp )
+ guest_state->msrs[VMX_INDEX_MSR_TSC_AUX] = data->msr_tsc_aux;
#endif
hvm_set_guest_tsc(v, data->tsc);
*edx |= bitmaskof(X86_FEATURE_SYSCALL);
else
*edx &= ~(bitmaskof(X86_FEATURE_SYSCALL));
+
+#ifdef __x86_64__
+ if ( cpu_has_rdtscp )
+ *edx |= bitmaskof(X86_FEATURE_RDTSCP);
+ else
+ *edx &= ~(bitmaskof(X86_FEATURE_RDTSCP));
+#endif
+
break;
}
__update_guest_eip(inst_len);
hvm_rdtsc_intercept(regs);
break;
+ case EXIT_REASON_RDTSCP:
+ {
+ struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
+ inst_len = __get_instruction_length();
+ __update_guest_eip(inst_len);
+ hvm_rdtsc_intercept(regs);
+ regs->ecx = (uint32_t)(guest_state->msrs[VMX_INDEX_MSR_TSC_AUX]);
+ break;
+ }
case EXIT_REASON_VMCALL:
{
int rc;