'fda', 'fdb', 'keymap', 'isa', 'localtime', 'monitor',
'nographic', 'pae', 'rtc_timeoffset', 'serial', 'sdl',
'soundhw','stdvga', 'usb', 'usbdevice', 'vnc',
- 'vncconsole', 'vncdisplay', 'vnclisten',
+ 'vncconsole', 'vncdisplay', 'vnclisten', 'timer_mode',
'vncpasswd', 'vncunused', 'xauthority', 'pci', 'vhpt']
# Xen API console 'other_config' keys.
HVM_PARAM_NVRAM_FD = 7
HVM_PARAM_VHPT_SIZE = 8
HVM_PARAM_BUFPIOREQ_PFN = 9
+HVM_PARAM_TIMER_MODE = 10
restart_modes = [
"restart",
self._recreateDom()
+ # Set timer configration of domain
+ if hvm:
+ xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
+ long(self.info["platform"].get("timer_mode")))
+
# Set maximum number of vcpus in domain
xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
fn=set_int, default=1,
use="Disable or enable PAE of HVM domain.")
+gopts.var('timer_mode', val='TIMER_MODE',
+ fn=set_int, default=0,
+ use="""Timer mode (0=delay virtual time when ticks are missed;
+ 1=virtual time is always wallclock time.""")
+
gopts.var('acpi', val='ACPI',
fn=set_int, default=1,
use="Disable or enable ACPI of HVM domain.")
def configure_hvm(config_image, vals):
"""Create the config for HVM devices.
"""
- args = [ 'device_model', 'pae', 'vcpus', 'boot', 'fda', 'fdb',
+ args = [ 'device_model', 'pae', 'vcpus', 'boot', 'fda', 'fdb', 'timer_mode',
'localtime', 'serial', 'stdvga', 'isa', 'nographic', 'soundhw',
'vnc', 'vncdisplay', 'vncunused', 'vncconsole', 'vnclisten',
'sdl', 'display', 'xauthority', 'rtc_timeoffset', 'monitor',
def extract_platform(self, image, document):
- platform_keys = ['acpi', 'apic', 'pae', 'vhpt']
+ platform_keys = ['acpi', 'apic', 'pae', 'vhpt', 'timer_mode']
def extract_platform_key(key):
platform = document.createElement("platform")
local_irq_disable();
if ( is_hvm_vcpu(prev) && !list_empty(&prev->arch.hvm_vcpu.tm_list) )
- pt_freeze_time(prev);
+ pt_save_timer(prev);
set_current(next);
}
}
-void hvm_set_guest_time(struct vcpu *v, u64 gtime)
+void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
{
u64 host_tsc;
rdtscll(host_tsc);
- v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
+ v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - host_tsc;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
}
-u64 hvm_get_guest_time(struct vcpu *v)
+u64 hvm_get_guest_tsc(struct vcpu *v)
{
u64 host_tsc;
if ( !v->fpu_dirtied )
hvm_funcs.stts(v);
- pt_thaw_time(v);
+ pt_restore_timer(v);
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
p = &get_ioreq(v)->vp_ioreq;
hvm_set_callback_via(d, a.value);
hvm_latch_shinfo_size(d);
break;
+ case HVM_PARAM_TIMER_MODE:
+ rc = -EINVAL;
+ if ( (a.value != HVMPTM_delay_for_missed_ticks) &&
+ (a.value != HVMPTM_no_delay_for_missed_ticks) )
+ goto param_fail;
+ break;
}
d->arch.hvm_domain.params[a.index] = a.value;
rc = 0;
uint32_t tmcct, tmict = vlapic_get_reg(vlapic, APIC_TMICT);
uint64_t counter_passed;
- counter_passed = (hvm_get_guest_time(v) - vlapic->pt.last_plt_gtime) // TSC
+ counter_passed = (hvm_get_guest_time(v) - vlapic->timer_last_update) // TSC
* 1000000000ULL / ticks_per_sec(v) // NS
/ APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor;
tmcct = tmict - counter_passed;
return 0;
}
+void vlapic_pt_cb(struct vcpu *v, void *data)
+{
+ *(s_time_t *)data = hvm_get_guest_time(v);
+}
+
static void vlapic_write(struct vcpu *v, unsigned long address,
unsigned long len, unsigned long val)
{
vlapic_set_reg(vlapic, APIC_TMICT, val);
create_periodic_time(current, &vlapic->pt, period, vlapic->pt.irq,
- !vlapic_lvtt_period(vlapic), NULL, vlapic);
+ !vlapic_lvtt_period(vlapic), vlapic_pt_cb,
+ &vlapic->timer_last_update);
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
"bus cycle is %uns, "
s->pt.irq = lvtt & APIC_VECTOR_MASK;
create_periodic_time(vlapic_vcpu(s), &s->pt, period, s->pt.irq,
- !vlapic_lvtt_period(s), NULL, s);
+ !vlapic_lvtt_period(s), vlapic_pt_cb,
+ &s->timer_last_update);
printk("lapic_load to rearm the actimer:"
"bus cycle is %uns, "
#include <asm/hvm/vpt.h>
#include <asm/event.h>
+static int pt_support_time_frozen(struct domain *d)
+{
+ return (d->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] ==
+ HVMPTM_delay_for_missed_ticks);
+}
+
static void pt_lock(struct periodic_time *pt)
{
struct vcpu *v;
pt->scheduled += missed_ticks * pt->period;
}
-void pt_freeze_time(struct vcpu *v)
+static __inline__ void pt_freeze_time(struct vcpu *v)
+{
+ v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
+}
+
+void pt_save_timer(struct vcpu *v)
{
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
spin_lock(&v->arch.hvm_vcpu.tm_lock);
- v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
-
list_for_each_entry ( pt, head, list )
stop_timer(&pt->timer);
+ if ( pt_support_time_frozen(v->domain) )
+ pt_freeze_time(v);
+
spin_unlock(&v->arch.hvm_vcpu.tm_lock);
}
-void pt_thaw_time(struct vcpu *v)
+static __inline__ void pt_thaw_time(struct vcpu *v)
+{
+ if ( v->arch.hvm_vcpu.guest_time )
+ {
+ hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
+ v->arch.hvm_vcpu.guest_time = 0;
+ }
+}
+
+void pt_restore_timer(struct vcpu *v)
{
struct list_head *head = &v->arch.hvm_vcpu.tm_list;
struct periodic_time *pt;
spin_lock(&v->arch.hvm_vcpu.tm_lock);
- if ( v->arch.hvm_vcpu.guest_time )
+ list_for_each_entry ( pt, head, list )
{
- hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
- v->arch.hvm_vcpu.guest_time = 0;
-
- list_for_each_entry ( pt, head, list )
- {
- missed_ticks(pt);
- set_timer(&pt->timer, pt->scheduled);
- }
+ missed_ticks(pt);
+ set_timer(&pt->timer, pt->scheduled);
}
+ if ( pt_support_time_frozen(v->domain) )
+ pt_thaw_time(v);
+
spin_unlock(&v->arch.hvm_vcpu.tm_lock);
}
pt->last_plt_gtime += pt->period_cycles;
}
- if ( hvm_get_guest_time(v) < pt->last_plt_gtime )
+ if ( pt_support_time_frozen(v->domain) &&
+ hvm_get_guest_time(v) < pt->last_plt_gtime )
hvm_set_guest_time(v, pt->last_plt_gtime);
cb = pt->cb;
void hvm_send_assist_req(struct vcpu *v);
-void hvm_set_guest_time(struct vcpu *v, u64 gtime);
-u64 hvm_get_guest_time(struct vcpu *v);
+void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc);
+u64 hvm_get_guest_tsc(struct vcpu *v);
+#define hvm_set_guest_time(vcpu, gtime) hvm_set_guest_tsc(vcpu, gtime)
+#define hvm_get_guest_time(vcpu) hvm_get_guest_tsc(vcpu)
#define hvm_paging_enabled(v) \
(!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
#define ticks_per_sec(v) (v->domain->arch.hvm_domain.tsc_frequency)
-void pt_freeze_time(struct vcpu *v);
-void pt_thaw_time(struct vcpu *v);
+void pt_save_timer(struct vcpu *v);
+void pt_restore_timer(struct vcpu *v);
void pt_update_irq(struct vcpu *v);
void pt_intr_post(struct vcpu *v, struct hvm_intack intack);
void pt_reset(struct vcpu *v);
#define HVM_PARAM_NVRAM_FD 7
#define HVM_PARAM_VHPT_SIZE 8
#define HVM_PARAM_BUFPIOREQ_PFN 9
-#define HVM_NR_PARAMS 10
-#else
-#define HVM_NR_PARAMS 7
#endif
+/*
+ * Set mode for virtual timers (currently x86 only):
+ * delay_for_missed_ticks (default):
+ * Do not advance a vcpu's time beyond the correct delivery time for
+ * interrupts that have been missed due to preemption. Deliver missed
+ * interrupts when the vcpu is rescheduled and advance the vcpu's virtual
+ * time stepwise for each one.
+ * no_delay_for_missed_ticks:
+ * As above, missed interrupts are delivered, but guest time always tracks
+ * wallclock (i.e., real) time while doing so.
+ */
+#define HVM_PARAM_TIMER_MODE 10
+#define HVMPTM_delay_for_missed_ticks 0
+#define HVMPTM_no_delay_for_missed_ticks 1
+
+#define HVM_NR_PARAMS 11
+
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */