#define S_TO_NS 1000000000ULL /* 1s = 10^9 ns */
#define S_TO_FS 1000000000000000ULL /* 1s = 10^15 fs */
-/* Frequency_of_TSC / frequency_of_HPET = 32 */
-#define TSC_PER_HPET_TICK 32
-#define guest_time_hpet(v) (hvm_get_guest_time(v) / TSC_PER_HPET_TICK)
+/* Frequency_of_Xen_systeme_time / frequency_of_HPET = 16 */
+#define STIME_PER_HPET_TICK 16
+#define guest_time_hpet(v) (hvm_get_guest_time(v) / STIME_PER_HPET_TICK)
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
/* the number of HPET tick that stands for
* 1/(2^10) second, namely, 0.9765625 milliseconds */
-#define HPET_TINY_TIME_SPAN ((h->tsc_freq >> 10) / TSC_PER_HPET_TICK)
+#define HPET_TINY_TIME_SPAN ((h->stime_freq >> 10) / STIME_PER_HPET_TICK)
static void hpet_set_timer(HPETState *h, unsigned int tn)
{
spin_lock_init(&h->lock);
h->vcpu = v;
- h->tsc_freq = ticks_per_sec(v);
+ h->stime_freq = S_TO_NS;
- h->hpet_to_ns_scale = ((S_TO_NS * TSC_PER_HPET_TICK) << 10) / h->tsc_freq;
+ h->hpet_to_ns_scale = ((S_TO_NS * STIME_PER_HPET_TICK) << 10) / h->stime_freq;
h->hpet_to_ns_limit = ~0ULL / h->hpet_to_ns_scale;
/* 64-bit main counter; 3 timers supported; LegacyReplacementRoute. */
h->hpet.capability = 0x8086A201ULL;
/* This is the number of femptoseconds per HPET tick. */
- /* Here we define HPET's frequency to be 1/32 of the TSC's */
- h->hpet.capability |= ((S_TO_FS*TSC_PER_HPET_TICK/h->tsc_freq) << 32);
+ /* Here we define HPET's frequency to be 1/16 of Xen system time */
+ h->hpet.capability |= ((S_TO_FS*STIME_PER_HPET_TICK/h->stime_freq) << 32);
for ( i = 0; i < HPET_TIMER_NUM; i++ )
{
spin_lock_init(&d->arch.hvm_domain.irq_lock);
spin_lock_init(&d->arch.hvm_domain.uc_lock);
+ hvm_init_guest_time(d);
+
d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
hvm_init_cacheattr_region_list(d);
hpet_init(v);
/* Init guest TSC to start from zero. */
- hvm_set_guest_time(v, 0);
+ hvm_set_guest_tsc(v, 0);
/* Can start up without SIPI-SIPI or setvcpucontext domctl. */
v->is_initialised = 1;
switch ( ecx )
{
case MSR_IA32_TSC:
- msr_content = hvm_get_guest_time(v);
+ msr_content = hvm_get_guest_tsc(v);
break;
case MSR_IA32_APICBASE:
switch ( ecx )
{
case MSR_IA32_TSC:
- hvm_set_guest_time(v, msr_content);
+ hvm_set_guest_tsc(v, msr_content);
pt_reset(v);
break;
#include <xen/lib.h>
#include <xen/errno.h>
#include <xen/sched.h>
+#include <asm/time.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/io.h>
#include <asm/hvm/support.h>
ASSERT(spin_is_locked(&pit->lock));
d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel],
- PIT_FREQ, ticks_per_sec(v));
+ PIT_FREQ, SYSTEM_TIME_HZ);
switch ( c->mode )
{
ASSERT(spin_is_locked(&pit->lock));
d = muldiv64(hvm_get_guest_time(v) - pit->count_load_time[channel],
- PIT_FREQ, ticks_per_sec(v));
+ PIT_FREQ, SYSTEM_TIME_HZ);
switch ( s->mode )
{
val = 0x10000;
if ( v == NULL )
- rdtscll(pit->count_load_time[channel]);
+ pit->count_load_time[channel] = 0;
else
pit->count_load_time[channel] = hvm_get_guest_time(v);
s->count = val;
- period = DIV_ROUND((val * 1000000000ULL), PIT_FREQ);
+ period = DIV_ROUND(val * SYSTEM_TIME_HZ, PIT_FREQ);
if ( (v == NULL) || !is_hvm_vcpu(v) || (channel != 0) )
return;
spin_lock_init(&s->lock);
- s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / ticks_per_sec(v);
+ s->scale = ((uint64_t)FREQUENCE_PMTIMER << 32) / SYSTEM_TIME_HZ;
s->vcpu = v;
/* Intercept port I/O (need two handlers because PM1a_CNT is between
data->msr_efer = v->arch.hvm_vcpu.guest_efer;
data->msr_flags = -1ULL;
- data->tsc = hvm_get_guest_time(v);
+ data->tsc = hvm_get_guest_tsc(v);
}
v->arch.hvm_vcpu.guest_efer = data->msr_efer;
svm_update_guest_efer(v);
- hvm_set_guest_time(v, data->tsc);
+ hvm_set_guest_tsc(v, data->tsc);
}
static void svm_save_vmcb_ctxt(struct vcpu *v, struct hvm_hw_cpu *ctxt)
uint64_t counter_passed;
counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update)
- * 1000000000ULL / ticks_per_sec(v)
/ APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor);
tmcct = tmict - counter_passed;
data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
#endif
- data->tsc = hvm_get_guest_time(v);
+ data->tsc = hvm_get_guest_tsc(v);
}
static void vmx_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
v->arch.hvm_vmx.shadow_gs = data->shadow_gs;
#endif
- hvm_set_guest_time(v, data->tsc);
+ hvm_set_guest_tsc(v, data->tsc);
}
#define mode_is(d, name) \
((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
+void hvm_init_guest_time(struct domain *d)
+{
+ struct pl_time *pl = &d->arch.hvm_domain.pl_time;
+
+ spin_lock_init(&pl->pl_time_lock);
+ pl->stime_offset = -(u64)get_s_time();
+ pl->last_guest_time = 0;
+}
+
+u64 hvm_get_guest_time(struct vcpu *v)
+{
+ struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
+ u64 now;
+
+ spin_lock(&pl->pl_time_lock);
+ now = get_s_time() + pl->stime_offset;
+ if ( (int64_t)(now - pl->last_guest_time) >= 0 )
+ pl->last_guest_time = now;
+ else
+ now = pl->last_guest_time;
+ spin_unlock(&pl->pl_time_lock);
+
+ return now + v->arch.hvm_vcpu.stime_offset;
+}
+
+void hvm_set_guest_time(struct vcpu *v, u64 guest_time)
+{
+ v->arch.hvm_vcpu.stime_offset += guest_time - hvm_get_guest_time(v);
+}
+
static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
{
struct vcpu *v = pt->vcpu;
pt->vcpu = v;
pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
pt->irq = irq;
- pt->period_cycles = (u64)period * cpu_khz / 1000000L;
+ pt->period_cycles = (u64)period;
pt->one_shot = one_shot;
pt->scheduled = NOW() + period;
/*
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc);
u64 hvm_get_guest_tsc(struct vcpu *v);
-#define hvm_set_guest_time(vcpu, gtime) hvm_set_guest_tsc(vcpu, gtime)
-#define hvm_get_guest_time(vcpu) hvm_get_guest_tsc(vcpu)
+
+void hvm_init_guest_time(struct domain *d);
+void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
+u64 hvm_get_guest_time(struct vcpu *v);
#define hvm_paging_enabled(v) \
(!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
struct mtrr_state mtrr;
u64 pat_cr;
+ /* In mode delay_for_missed_ticks, VCPUs have differing guest times. */
+ int64_t stime_offset;
+
/* Which cache mode is this VCPU in (CR0:CD/NW)? */
u8 cache_mode;
void vmx_asm_do_vmentry(void);
void vmx_intr_assist(void);
void vmx_do_resume(struct vcpu *);
-void set_guest_time(struct vcpu *v, u64 gtime);
void vmx_vlapic_msr_changed(struct vcpu *v);
void vmx_realmode(struct cpu_user_regs *regs);
typedef struct HPETState {
struct hpet_registers hpet;
struct vcpu *vcpu;
- uint64_t tsc_freq;
+ uint64_t stime_freq;
uint64_t hpet_to_ns_scale; /* hpet ticks to ns (multiplied by 2^10) */
uint64_t hpet_to_ns_limit; /* max hpet ticks convertable to ns */
uint64_t mc_offset;
struct RTCState vrtc;
struct HPETState vhpet;
struct PMTState vpmt;
+ /* guest_time = Xen sys time + stime_offset */
+ int64_t stime_offset;
+ /* Ensures monotonicity in appropriate timer modes. */
+ uint64_t last_guest_time;
+ spinlock_t pl_time_lock;
};
#define ticks_per_sec(v) (v->domain->arch.hvm_domain.tsc_frequency)
};
struct tm gmtime(unsigned long t);
+#define SYSTEM_TIME_HZ 1000000000ULL
#define NOW() ((s_time_t)get_s_time())
#define SECONDS(_s) ((s_time_t)((_s) * 1000000000ULL))
#define MILLISECS(_ms) ((s_time_t)((_ms) * 1000000ULL))