unsigned int opt_hvm_debug_level __read_mostly;
integer_param("hvm_debug", opt_hvm_debug_level);
+int opt_softtsc;
+boolean_param("softtsc", opt_softtsc);
+
struct hvm_function_table hvm_funcs __read_mostly;
/* I/O permission bitmap is globally shared by all HVM guests. */
{
u64 host_tsc;
- rdtscll(host_tsc);
+ if ( opt_softtsc )
+ host_tsc = hvm_get_guest_time(v);
+ else
+ rdtscll(host_tsc);
+
return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
}
}
}
+void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
+{
+ uint64_t tsc;
+ struct vcpu *v = current;
+
+ tsc = hvm_get_guest_tsc(v);
+ regs->eax = (uint32_t)tsc;
+ regs->edx = (uint32_t)(tsc >> 32);
+}
+
int hvm_msr_read_intercept(struct cpu_user_regs *regs)
{
uint32_t ecx = regs->ecx;
hvm_triple_fault();
break;
+ case VMEXIT_RDTSC:
+ hvm_rdtsc_intercept(regs);
+ break;
+
case VMEXIT_RDTSCP:
case VMEXIT_MONITOR:
case VMEXIT_MWAIT:
/* TSC. */
vmcb->tsc_offset = 0;
-
+ if ( opt_softtsc )
+ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_RDTSC;
+
/* Guest EFER: *must* contain SVME or VMRUN will fail. */
vmcb->efer = EFER_SVME;
CPU_BASED_MWAIT_EXITING |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_ACTIVATE_IO_BITMAP |
- CPU_BASED_USE_TSC_OFFSETING);
+ CPU_BASED_USE_TSC_OFFSETING |
+ (opt_softtsc ? CPU_BASED_RDTSC_EXITING : 0));
opt = (CPU_BASED_ACTIVATE_MSR_BITMAP |
CPU_BASED_TPR_SHADOW |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
vmx_invlpg_intercept(exit_qualification);
break;
}
+ case EXIT_REASON_RDTSC:
+ inst_len = __get_instruction_length();
+ __update_guest_eip(inst_len);
+ hvm_rdtsc_intercept(regs);
+ break;
case EXIT_REASON_VMCALL:
{
int rc;
void hvm_hlt(unsigned long rflags);
void hvm_triple_fault(void);
+extern int opt_softtsc;
+void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
+
/* These functions all return X86EMUL return codes. */
int hvm_set_efer(uint64_t value);
int hvm_set_cr0(unsigned long value);