};
static int hvmemul_do_io(
- bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size,
+ bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size,
uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
{
struct vcpu *curr = current;
.type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
.addr = addr,
.size = size,
- .count = reps,
+ .count = *reps,
.dir = dir,
.df = df,
.data = data,
if ( (p.type != is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO) ||
(p.addr != addr) ||
(p.size != size) ||
- (p.count != reps) ||
+ (p.count != *reps) ||
(p.dir != dir) ||
(p.df != df) ||
(p.data_is_ptr != data_is_addr) )
BUG_ON(buffer == NULL);
- rc = hvmemul_do_io(is_mmio, addr, *reps, size, dir, df, 0,
+ rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 0,
(uintptr_t)buffer);
if ( rc == X86EMUL_UNHANDLEABLE && dir == IOREQ_READ )
memset(buffer, 0xff, size);
count = 1;
}
- rc = hvmemul_do_io(is_mmio, addr, count, size, dir, df, 1,
+ rc = hvmemul_do_io(is_mmio, addr, &count, size, dir, df, 1,
ram_gpa);
+
if ( rc == X86EMUL_OKAY )
- {
v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps);
- *reps = count;
- }
+
+ *reps = count;
out:
while ( nr_pages )
ASSERT_UNREACHABLE();
/* fall through */
default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
+ domain_crash(current->domain);
+ return X86EMUL_UNHANDLEABLE;
}
if ( rc != X86EMUL_OKAY )
break;
ASSERT_UNREACHABLE();
/* fall through */
default:
- rc = X86EMUL_UNHANDLEABLE;
- break;
+ domain_crash(current->domain);
+ return X86EMUL_UNHANDLEABLE;
}
if ( rc != X86EMUL_OKAY )
break;
}
}
- if ( i != 0 && rc == X86EMUL_UNHANDLEABLE )
- domain_crash(current->domain);
+ if ( i )
+ {
+ p->count = i;
+ rc = X86EMUL_OKAY;
+ }
+ else if ( rc == X86EMUL_UNHANDLEABLE )
+ {
+ /*
+ * Don't forward entire batches to the device model: This would
+ * prevent the internal handlers to see subsequent iterations of
+ * the request.
+ */
+ p->count = 1;
+ }
return rc;
}