trace_var(event, 0/*!cycles*/, size, buffer);
}
+static int null_read(const struct hvm_io_handler *io_handler,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t *data)
+{
+ *data = ~0ul;
+ return X86EMUL_OKAY;
+}
+
+static int null_write(const struct hvm_io_handler *handler,
+ uint64_t addr,
+ uint32_t size,
+ uint64_t data)
+{
+ return X86EMUL_OKAY;
+}
+
+static const struct hvm_io_ops null_ops = {
+ .read = null_read,
+ .write = null_write
+};
+
+static const struct hvm_io_handler null_handler = {
+ .ops = &null_ops
+};
+
static int hvmemul_do_io(
bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size,
uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
switch ( rc )
{
case X86EMUL_OKAY:
- p.state = STATE_IORESP_READY;
- hvm_io_assist(&p);
+ vio->io_data = p.data;
vio->io_state = HVMIO_none;
break;
case X86EMUL_UNHANDLEABLE:
/* If there is no suitable backing DM, just ignore accesses */
if ( !s )
{
- hvm_complete_assist_req(&p);
- rc = X86EMUL_OKAY;
+ rc = hvm_process_io_intercept(&null_handler, &p);
+ if ( rc == X86EMUL_OKAY )
+ vio->io_data = p.data;
vio->io_state = HVMIO_none;
}
else
return 0;
}
+static void hvm_io_assist(ioreq_t *p)
+{
+ struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+ enum hvm_io_state io_state;
+
+ p->state = STATE_IOREQ_NONE;
+
+ io_state = vio->io_state;
+ vio->io_state = HVMIO_none;
+
+ switch ( io_state )
+ {
+ case HVMIO_awaiting_completion:
+ vio->io_state = HVMIO_completed;
+ vio->io_data = p->data;
+ break;
+ case HVMIO_handle_mmio_awaiting_completion:
+ vio->io_state = HVMIO_completed;
+ vio->io_data = p->data;
+ (void)handle_mmio();
+ break;
+ case HVMIO_handle_pio_awaiting_completion:
+ if ( vio->io_size == 4 ) /* Needs zero extension. */
+ guest_cpu_user_regs()->rax = (uint32_t)p->data;
+ else
+ memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
+ break;
+ default:
+ break;
+ }
+
+ msix_write_completion(curr);
+ vcpu_end_shutdown_deferral(curr);
+}
+
static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
{
/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
return X86EMUL_UNHANDLEABLE;
}
-void hvm_complete_assist_req(ioreq_t *p)
-{
- switch ( p->type )
- {
- case IOREQ_TYPE_PCI_CONFIG:
- ASSERT_UNREACHABLE();
- break;
- case IOREQ_TYPE_COPY:
- case IOREQ_TYPE_PIO:
- if ( p->dir == IOREQ_READ )
- {
- if ( !p->data_is_ptr )
- p->data = ~0ul;
- else
- {
- int i, step = p->df ? -p->size : p->size;
- uint32_t data = ~0;
-
- for ( i = 0; i < p->count; i++ )
- hvm_copy_to_guest_phys(p->data + step * i, &data,
- p->size);
- }
- }
- /* FALLTHRU */
- default:
- p->state = STATE_IORESP_READY;
- hvm_io_assist(p);
- break;
- }
-}
-
void hvm_broadcast_assist_req(ioreq_t *p)
{
struct domain *d = current->domain;
return 1;
}
-void hvm_io_assist(ioreq_t *p)
-{
- struct vcpu *curr = current;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
- enum hvm_io_state io_state;
-
- p->state = STATE_IOREQ_NONE;
-
- io_state = vio->io_state;
- vio->io_state = HVMIO_none;
-
- switch ( io_state )
- {
- case HVMIO_awaiting_completion:
- vio->io_state = HVMIO_completed;
- vio->io_data = p->data;
- break;
- case HVMIO_handle_mmio_awaiting_completion:
- vio->io_state = HVMIO_completed;
- vio->io_data = p->data;
- (void)handle_mmio();
- break;
- case HVMIO_handle_pio_awaiting_completion:
- if ( vio->io_size == 4 ) /* Needs zero extension. */
- guest_cpu_user_regs()->rax = (uint32_t)p->data;
- else
- memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
- break;
- default:
- break;
- }
-
- if ( p->state == STATE_IOREQ_NONE )
- {
- msix_write_completion(curr);
- vcpu_end_shutdown_deferral(curr);
- }
-}
-
static bool_t dpci_portio_accept(const struct hvm_io_handler *handler,
const ioreq_t *p)
{
ioreq_t *p);
int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
void hvm_broadcast_assist_req(ioreq_t *p);
-void hvm_complete_assist_req(ioreq_t *p);
void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
struct npfec);
int handle_pio(uint16_t port, unsigned int size, int dir);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(ioreq_t *p);
void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
const union vioapic_redir_entry *ent);
void msix_write_completion(struct vcpu *);