#include <asm/hvm/trace.h>
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/event.h>
+#include <asm/hvm/vmx/vmx.h>
#include <asm/mtrr.h>
#include <asm/apic.h>
#include <public/sched.h>
vio->io_state = HVMIO_completed;
vio->io_data = p->data;
break;
- case HVMIO_handle_mmio_awaiting_completion:
- vio->io_state = HVMIO_completed;
- vio->io_data = p->data;
- (void)handle_mmio();
- break;
- case HVMIO_handle_pio_awaiting_completion:
- if ( vio->io_size == 4 ) /* Needs zero extension. */
- guest_cpu_user_regs()->rax = (uint32_t)p->data;
- else
- memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
- break;
default:
break;
}
struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
struct domain *d = v->domain;
struct hvm_ioreq_server *s;
+ enum hvm_io_completion io_completion;
check_wakeup_from_wait();
}
}
- if ( vio->mmio_retry )
+ io_completion = vio->io_completion;
+ vio->io_completion = HVMIO_no_completion;
+
+ switch ( io_completion )
+ {
+ case HVMIO_no_completion:
+ break;
+ case HVMIO_mmio_completion:
handle_mmio();
+ break;
+ case HVMIO_pio_completion:
+ if ( vio->io_size == 4 ) /* Needs zero extension. */
+ guest_cpu_user_regs()->rax = (uint32_t)vio->io_data;
+ else
+ memcpy(&guest_cpu_user_regs()->rax, &vio->io_data, vio->io_size);
+ vio->io_state = HVMIO_none;
+ break;
+ case HVMIO_realmode_completion:
+ {
+ struct hvm_emulate_ctxt ctxt;
+
+ hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
+ vmx_realmode_emulate_one(&ctxt);
+ hvm_emulate_writeback(&ctxt);
+
+ break;
+ }
+ default:
+ ASSERT_UNREACHABLE();
+ break;
+ }
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
if ( rc != X86EMUL_RETRY )
vio->io_state = HVMIO_none;
- if ( vio->io_state == HVMIO_awaiting_completion )
- vio->io_state = HVMIO_handle_mmio_awaiting_completion;
+ if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry )
+ vio->io_completion = HVMIO_mmio_completion;
else
vio->mmio_access = (struct npfec){};
return 0;
/* Completion in hvm_io_assist() with no re-emulation required. */
ASSERT(dir == IOREQ_READ);
- vio->io_state = HVMIO_handle_pio_awaiting_completion;
+ vio->io_completion = HVMIO_pio_completion;
break;
default:
gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
}
}
-static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
+void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
int rc;
perfc_incr(realmode_emulations);
rc = hvm_emulate_one(hvmemul_ctxt);
+ if ( vio->io_state == HVMIO_awaiting_completion || vio->mmio_retry )
+ vio->io_completion = HVMIO_realmode_completion;
+
if ( rc == X86EMUL_UNHANDLEABLE )
{
gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
hvm_emulate_prepare(&hvmemul_ctxt, regs);
- if ( vio->io_state == HVMIO_completed )
- realmode_emulate_one(&hvmemul_ctxt);
-
/* Only deliver interrupts into emulated real mode. */
if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
(intr_info & INTR_INFO_VALID_MASK) )
curr->arch.hvm_vmx.vmx_emulate = 1;
while ( curr->arch.hvm_vmx.vmx_emulate &&
- !softirq_pending(smp_processor_id()) &&
- (vio->io_state == HVMIO_none) )
+ !softirq_pending(smp_processor_id()) )
{
/*
* Check for pending interrupts only every 16 instructions, because
hvm_local_events_need_delivery(curr) )
break;
- realmode_emulate_one(&hvmemul_ctxt);
+ vmx_realmode_emulate_one(&hvmemul_ctxt);
+
+ if ( vio->io_state != HVMIO_none || vio->mmio_retry )
+ break;
/* Stop emulating unless our segment state is not safe */
if ( curr->arch.hvm_vmx.vmx_realmode )
if ( intr_info & INTR_INFO_VALID_MASK )
__vmwrite(VM_ENTRY_INTR_INFO, intr_info);
}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
HVMIO_none = 0,
HVMIO_dispatched,
HVMIO_awaiting_completion,
- HVMIO_handle_mmio_awaiting_completion,
- HVMIO_handle_pio_awaiting_completion,
HVMIO_completed
};
+enum hvm_io_completion {
+ HVMIO_no_completion,
+ HVMIO_mmio_completion,
+ HVMIO_pio_completion,
+ HVMIO_realmode_completion
+};
+
struct hvm_vcpu_asid {
uint64_t generation;
uint32_t asid;
struct hvm_vcpu_io {
/* I/O request in flight to device model. */
- enum hvm_io_state io_state;
- unsigned long io_data;
- int io_size;
+ enum hvm_io_state io_state;
+ enum hvm_io_completion io_completion;
+ unsigned long io_data;
+ unsigned int io_size;
/*
* HVM emulation:
void vmx_intr_assist(void);
void noreturn vmx_do_resume(struct vcpu *);
void vmx_vlapic_msr_changed(struct vcpu *v);
+void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
void vmx_realmode(struct cpu_user_regs *regs);
void vmx_update_debug_state(struct vcpu *v);
void vmx_update_exception_bitmap(struct vcpu *v);