static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
const ioreq_t *p)
{
+ paddr_t first = hvm_mmio_first_byte(p);
+ paddr_t last = hvm_mmio_last_byte(p);
+
BUG_ON(handler->type != IOREQ_TYPE_COPY);
- return handler->mmio.ops->check(current, p->addr);
+ if ( !handler->mmio.ops->check(current, first) )
+ return 0;
+
+ /* Make sure the handler will accept the whole access */
+ if ( p->size > 1 &&
+ !handler->mmio.ops->check(current, last) )
+ domain_crash(current->domain);
+
+ return 1;
}
static int hvm_mmio_read(const struct hvm_io_handler *handler,
int hvm_process_io_intercept(const struct hvm_io_handler *handler,
ioreq_t *p)
{
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+ struct vcpu *curr = current;
+ struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
const struct hvm_io_ops *ops = (p->type == IOREQ_TYPE_COPY) ?
&mmio_ops : &portio_ops;
int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
if ( i != 0 )
{
+ if ( rc == X86EMUL_UNHANDLEABLE )
+ domain_crash(curr->domain);
+
p->count = i;
rc = X86EMUL_OKAY;
}
{
ioreq_t p = {
.type = IOREQ_TYPE_COPY,
- .addr = gpa
+ .addr = gpa,
+ .count = 1,
+ .size = 1,
};
return hvm_find_io_handler(&p) != NULL;
hvm_mmio_write_t write;
};
+static inline paddr_t hvm_mmio_first_byte(const ioreq_t *p)
+{
+ return p->df ?
+ p->addr - (p->count - 1ul) * p->size :
+ p->addr;
+}
+
+static inline paddr_t hvm_mmio_last_byte(const ioreq_t *p)
+{
+ unsigned long count = p->count;
+
+ return p->df ?
+ p->addr + p->size - 1:
+ p->addr + (count * p->size) - 1;
+}
+
typedef int (*portio_action_t)(
int dir, unsigned int port, unsigned int bytes, uint32_t *val);