int interrupt_request;
CPU_COMMON
-
- int send_event;
} CPUX86State;
CPUX86State *cpu_x86_init(void);
/* No state change if state = STATE_IORESP_HOOK */
if (req->state == STATE_IOREQ_INPROCESS) {
- mb();
req->state = STATE_IORESP_READY;
- }
- env->send_event = 1;
+ xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
+ } else
+ destroy_hvm_domain();
}
}
qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, env);
- env->send_event = 0;
-
while (1) {
if (vm_running) {
if (shutdown_requested)
/* Wait up to 10 msec. */
main_loop_wait(10);
-
- if (env->send_event) {
- env->send_event = 0;
- xc_evtchn_notify(xce_handle, ioreq_local_port[send_vcpu]);
- }
}
destroy_hvm_domain();
return 0;
p = &vio->vp_ioreq;
if (p->state == STATE_IORESP_READY) {
- p->state = STATE_INVALID;
+ p->state = STATE_IOREQ_NONE;
}
else {
/* Can't block here, for the same reason as other places to
ioreq_t *p;
p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- if (unlikely(p->state != STATE_INVALID)) {
+ if (unlikely(p->state != STATE_IOREQ_NONE)) {
/* This indicates a bug in the device model. Crash the
domain. */
printk("Device model set bad IO state %d.\n", p->state);
void hvm_stts(struct vcpu *v)
{
/* FPU state already dirty? Then no need to setup_fpu() lazily. */
- if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
- return;
-
- hvm_funcs.stts(v);
+ if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
+ hvm_funcs.stts(v);
}
void hvm_set_guest_time(struct vcpu *v, u64 gtime)
void hvm_do_resume(struct vcpu *v)
{
ioreq_t *p;
- struct periodic_time *pt =
- &v->domain->arch.hvm_domain.pl_time.periodic_tm;
+ struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
hvm_stts(v);
- /* pick up the elapsed PIT ticks and re-enable pit_timer */
- if ( pt->enabled && v->vcpu_id == pt->bind_vcpu && pt->first_injected ) {
- if ( v->arch.hvm_vcpu.guest_time ) {
+ /* Pick up the elapsed PIT ticks and re-enable pit_timer. */
+ if ( pt->enabled && (v->vcpu_id == pt->bind_vcpu) && pt->first_injected )
+ {
+ if ( v->arch.hvm_vcpu.guest_time )
+ {
hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
v->arch.hvm_vcpu.guest_time = 0;
}
pickup_deactive_ticks(pt);
}
+ /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
- p->state != STATE_IOREQ_READY &&
- p->state != STATE_IOREQ_INPROCESS);
- switch ( p->state )
+ while ( p->state != STATE_IOREQ_NONE )
{
- case STATE_IORESP_READY:
- hvm_io_assist(v);
- break;
- case STATE_INVALID:
- break;
- default:
- printk("Weird HVM iorequest state %d.\n", p->state);
- domain_crash(v->domain);
+ switch ( p->state )
+ {
+ case STATE_IORESP_READY: /* IORESP_READY -> NONE */
+ hvm_io_assist(v);
+ break;
+ case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
+ case STATE_IOREQ_INPROCESS:
+ wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
+ (p->state != STATE_IOREQ_READY) &&
+ (p->state != STATE_IOREQ_INPROCESS));
+ break;
+ default:
+ gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
+ domain_crash_synchronous();
+ }
}
}
io_opp = &v->arch.hvm_vcpu.io_op;
regs = &io_opp->io_context;
+ vio = get_vio(v->domain, v->vcpu_id);
- vio = get_vio(v->domain, v->vcpu_id);
-
- if ( vio == 0 ) {
- printk("bad shared page: %lx\n", (unsigned long)vio);
+ p = &vio->vp_ioreq;
+ if ( p->state != STATE_IORESP_READY )
+ {
+ gdprintk(XENLOG_ERR, "Unexpected HVM iorequest state %d.\n", p->state);
domain_crash_synchronous();
}
- p = &vio->vp_ioreq;
+ p->state = STATE_IOREQ_NONE;
- if ( p->state == STATE_IORESP_READY ) {
- p->state = STATE_INVALID;
- if ( p->type == IOREQ_TYPE_PIO )
- hvm_pio_assist(regs, p, io_opp);
- else
- hvm_mmio_assist(regs, p, io_opp);
+ if ( p->type == IOREQ_TYPE_PIO )
+ hvm_pio_assist(regs, p, io_opp);
+ else
+ hvm_mmio_assist(regs, p, io_opp);
- /* Copy register changes back into current guest state. */
- hvm_load_cpu_guest_regs(v, regs);
- memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
- }
+ /* Copy register changes back into current guest state. */
+ hvm_load_cpu_guest_regs(v, regs);
+ memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
}
/*
ioreq_t *p;
p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- if ( unlikely(p->state != STATE_INVALID) ) {
+ if ( unlikely(p->state != STATE_IOREQ_NONE) ) {
/* This indicates a bug in the device model. Crash the
domain. */
printk("Device model set bad IO state %d.\n", p->state);
}
p = &vio->vp_ioreq;
- if ( p->state != STATE_INVALID )
+ if ( p->state != STATE_IOREQ_NONE )
printk("WARNING: send pio with something already pending (%d)?\n",
p->state);
p = &vio->vp_ioreq;
- if ( p->state != STATE_INVALID )
+ if ( p->state != STATE_IOREQ_NONE )
printk("WARNING: send mmio with something already pending (%d)?\n",
p->state);
p->dir = dir;
#define IOREQ_READ 1
#define IOREQ_WRITE 0
-#define STATE_INVALID 0
+#define STATE_IOREQ_NONE 0
#define STATE_IOREQ_READY 1
#define STATE_IOREQ_INPROCESS 2
#define STATE_IORESP_READY 3