while ( off & (chunk - 1) )
chunk >>= 1;
- if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
+ if ( ((access_type != hvm_access_insn_fetch
+ ? vio->mmio_access.read_access
+ : vio->mmio_access.insn_fetch)) &&
+ (vio->mmio_gva == (addr & PAGE_MASK)) )
{
- if ( access_type == hvm_access_insn_fetch )
- return X86EMUL_UNHANDLEABLE;
gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
while ( (off + chunk) <= PAGE_SIZE )
{
while ( off & (chunk - 1) )
chunk >>= 1;
- if ( unlikely(vio->mmio_gva == (addr & PAGE_MASK)) && vio->mmio_gva )
+ if ( vio->mmio_access.write_access &&
+ (vio->mmio_gva == (addr & PAGE_MASK)) )
{
gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
while ( (off + chunk) <= PAGE_SIZE )
&& is_hvm_vcpu(v)
&& hvm_mmio_internal(gpa) )
{
- if ( !handle_mmio() )
+ if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
rc = 1;
goto out;
if ( unlikely(is_pvh_vcpu(v)) )
goto out;
- if ( !handle_mmio() )
+ if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
hvm_inject_hw_exception(TRAP_gp_fault, 0);
rc = 1;
goto out;
if ( vio->io_state == HVMIO_awaiting_completion )
vio->io_state = HVMIO_handle_mmio_awaiting_completion;
else
- vio->mmio_gva = 0;
+ vio->mmio_access = (struct npfec){};
switch ( rc )
{
return 1;
}
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+ struct npfec access)
{
struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+
+ vio->mmio_access = access.gla_valid &&
+ access.kind == npfec_kind_with_gla
+ ? access : (struct npfec){};
vio->mmio_gva = gva & PAGE_MASK;
vio->mmio_gpfn = gpfn;
return handle_mmio();
p2m_type_t p2mt;
uint32_t rc;
int version;
+ struct npfec access = {
+ .read_access = 1,
+ .gla_valid = 1,
+ .kind = npfec_kind_with_gla
+ };
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
int fast_emul = 0;
#endif
perfc_incr(shadow_fault);
+ if ( regs->error_code & PFEC_write_access )
+ access.write_access = 1;
+
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
/* If faulting frame is successfully emulated in last shadow fault
* it's highly likely to reach same emulation action for this frame.
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
reset_early_unshadow(v);
trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
- return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+ return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
? EXCRET_fault_fixed : 0);
}
else
paging_unlock(d);
put_gfn(d, gfn_x(gfn));
trace_shadow_gen(TRC_SHADOW_MMIO, va);
- return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
+ return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
? EXCRET_fault_fixed : 0);
not_a_shadow_fault:
void send_timeoffset_req(unsigned long timeoff);
void send_invalidate_req(void);
int handle_mmio(void);
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
+int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+ struct npfec);
int handle_pio(uint16_t port, unsigned int size, int dir);
void hvm_interrupt_post(struct vcpu *v, int vector, int type);
void hvm_io_assist(ioreq_t *p);
* HVM emulation:
* Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
* The latter is known to be an MMIO frame (not RAM).
- * This translation is only valid if @mmio_gva is non-zero.
+ * This translation is only valid for accesses as per @mmio_access.
*/
+ struct npfec mmio_access;
unsigned long mmio_gva;
unsigned long mmio_gpfn;