can wake it up now.
Signed-off-by: Xin Li <xin.b.li@intel.com>
get_vio(v->domain, v->vcpu_id)->vp_eport =
v->arch.hvm_vcpu.xen_port;
- init_timer(&v->arch.hvm_vcpu.hlt_timer, hlt_timer_fn, v, v->processor);
-
if ( v->vcpu_id != 0 )
return 0;
void hvm_vcpu_destroy(struct vcpu *v)
{
- kill_timer(&v->arch.hvm_vcpu.hlt_timer);
vlapic_destroy(v);
hvm_funcs.vcpu_destroy(v);
void hvm_hlt(unsigned long rflags)
{
- struct vcpu *v = current;
- struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
- s_time_t next_pt = -1, next_wakeup;
-
/*
* If we halt with interrupts disabled, that's a pretty sure sign that we
* want to shut down. In a real processor, NMIs are the only way to break
if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
return hvm_vcpu_down();
- if ( !v->vcpu_id )
- next_pt = get_scheduled(v, pt->irq, pt);
- next_wakeup = get_apictime_scheduled(v);
- if ( (next_pt != -1 && next_pt < next_wakeup) || next_wakeup == -1 )
- next_wakeup = next_pt;
- if ( next_wakeup != - 1 )
- set_timer(¤t->arch.hvm_vcpu.hlt_timer, next_wakeup);
do_sched_op_compat(SCHEDOP_block, 0);
}
return 1;
}
-/* Hook function for the HLT instruction emulation wakeup. */
-void hlt_timer_fn(void *data)
-{
- struct vcpu *v = data;
- vcpu_kick(v);
-}
-
static __inline__ void missed_ticks(struct periodic_time *pt)
{
s_time_t missed_ticks;
void pt_timer_fn(void *data)
{
struct vcpu *v = data;
- struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
+ struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
pt->pending_intr_nr++;
pt->scheduled += pt->period;
- /* pick up missed timer tick */
+ /* Pick up missed timer ticks. */
missed_ticks(pt);
- if ( test_bit(_VCPUF_running, &v->vcpu_flags) ) {
+
+ /* No need to run the timer while a VCPU is descheduled. */
+ if ( test_bit(_VCPUF_running, &v->vcpu_flags) )
set_timer(&pt->timer, pt->scheduled);
- }
+
+ vcpu_kick(v);
}
/* pick up missed timer ticks at deactive time */
static void svm_freeze_time(struct vcpu *v)
{
struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
-
- if ( pt->enabled && pt->first_injected && v->vcpu_id == pt->bind_vcpu
+
+ if ( pt->enabled && pt->first_injected
+ && (v->vcpu_id == pt->bind_vcpu)
&& !v->arch.hvm_vcpu.guest_time ) {
v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
- stop_timer(&(pt->timer));
+ if ( test_bit(_VCPUF_blocked, &v->vcpu_flags) )
+ stop_timer(&pt->timer);
}
}
if ( pt->enabled )
{
migrate_timer(&pt->timer, v->processor);
- migrate_timer(&v->arch.hvm_vcpu.hlt_timer, v->processor);
}
migrate_timer(&vcpu_vlapic(v)->vlapic_timer, v->processor);
migrate_timer(&vrtc->second_timer, v->processor);
else
vlapic_set_reg(vlapic, APIC_TMCCT, 0);
+ vcpu_kick(vlapic_vcpu(vlapic));
+
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
"now 0x%016"PRIx64", expire @ 0x%016"PRIx64", "
"timer initial count 0x%x, timer current count 0x%x.",
static void vmx_freeze_time(struct vcpu *v)
{
struct periodic_time *pt=&v->domain->arch.hvm_domain.pl_time.periodic_tm;
-
- if ( pt->enabled && pt->first_injected && v->vcpu_id == pt->bind_vcpu
+
+ if ( pt->enabled && pt->first_injected
+ && (v->vcpu_id == pt->bind_vcpu)
&& !v->arch.hvm_vcpu.guest_time ) {
v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
- stop_timer(&(pt->timer));
+ if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
+ stop_timer(&pt->timer);
}
}
if ( pt->enabled )
{
migrate_timer(&pt->timer, v->processor);
- migrate_timer(&v->arch.hvm_vcpu.hlt_timer, v->processor);
}
migrate_timer(&vcpu_vlapic(v)->vlapic_timer, v->processor);
migrate_timer(&vrtc->second_timer, v->processor);
/* Flags */
int flag_dr_dirty;
- /* hlt ins emulation wakeup timer */
- struct timer hlt_timer;
-
unsigned long hvm_trace_values[5];
union {