/* Architecture-specific vmcs/vmcb bits */
hvm_funcs.save_cpu_ctxt(v, &ctxt);
- ctxt.msr_tsc_aux = v->arch.hvm_vcpu.msr_tsc_aux;
+ ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
hvm_get_segment_register(v, x86_seg_idtr, &seg);
ctxt.idtr_limit = seg.limit;
}
}
break;
+ case 0x80000001:
+ /* Don't expose RDTSCP feature when in PVRDTSCP mode. */
+ if ( v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP )
+ *edx &= ~bitmaskof(X86_FEATURE_RDTSCP);
+ break;
}
}
break;
case MSR_TSC_AUX:
- msr_content = v->arch.hvm_vcpu.msr_tsc_aux;
+ msr_content = hvm_msr_tsc_aux(v);
break;
case MSR_IA32_APICBASE:
case MSR_TSC_AUX:
v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
- if ( cpu_has_rdtscp )
+ if ( cpu_has_rdtscp
+ && (v->domain->arch.tsc_mode != TSC_MODE_PVRDTSCP) )
wrmsrl(MSR_TSC_AUX, (uint32_t)msr_content);
break;
svm_vmsave(root_vmcb[cpu]);
svm_vmload(v->arch.hvm_svm.vmcb);
+
+ if ( cpu_has_rdtscp )
+ wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
}
static void svm_do_resume(struct vcpu *v)
hvm_triple_fault();
break;
+ case VMEXIT_RDTSCP:
+ regs->ecx = hvm_msr_tsc_aux(v);
+ /* fall through */
case VMEXIT_RDTSC:
svm_vmexit_do_rdtsc(regs);
break;
- case VMEXIT_RDTSCP:
case VMEXIT_MONITOR:
case VMEXIT_MWAIT:
case VMEXIT_VMRUN:
GENERAL2_INTERCEPT_VMRUN | GENERAL2_INTERCEPT_VMMCALL |
GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE |
GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI |
- GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP |
- GENERAL2_INTERCEPT_WBINVD | GENERAL2_INTERCEPT_MONITOR |
- GENERAL2_INTERCEPT_MWAIT;
+ GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_MWAIT |
+ GENERAL2_INTERCEPT_WBINVD | GENERAL2_INTERCEPT_MONITOR;
/* Intercept all debug-register writes. */
vmcb->dr_intercepts = ~0u;
/* TSC. */
vmcb->tsc_offset = 0;
if ( v->domain->arch.vtsc )
+ {
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_RDTSC;
+ vmcb->general2_intercepts |= GENERAL2_INTERCEPT_RDTSCP;
+ }
/* Guest EFER. */
v->arch.hvm_vcpu.guest_efer = 0;
}
if ( cpu_has_rdtscp )
- wrmsrl(MSR_TSC_AUX, v->arch.hvm_vcpu.msr_tsc_aux);
+ wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
}
#else /* __i386__ */
vmx_invlpg_intercept(exit_qualification);
break;
case EXIT_REASON_RDTSCP:
- regs->ecx = v->arch.hvm_vcpu.msr_tsc_aux;
+ regs->ecx = hvm_msr_tsc_aux(v);
/* fall through */
case EXIT_REASON_RDTSC:
inst_len = __get_instruction_length();
bool_t hvm_hap_nested_page_fault(unsigned long gfn);
+#define hvm_msr_tsc_aux(v) ({ \
+ struct domain *__d = (v)->domain; \
+ (__d->arch.tsc_mode == TSC_MODE_PVRDTSCP) \
+ ? (u32)__d->arch.incarnation : (u32)(v)->arch.hvm_vcpu.msr_tsc_aux; \
+})
+
#endif /* __ASM_X86_HVM_HVM_H__ */