pfec, hvmemul_ctxt, translate);
}
+static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec)
+{
+ const struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
+
+ if ( pfec & PFEC_write_access )
+ {
+ if ( !vio->mmio_access.write_access )
+ return false;
+ }
+ else if ( pfec & PFEC_insn_fetch )
+ {
+ if ( !vio->mmio_access.insn_fetch )
+ return false;
+ }
+ else if ( !vio->mmio_access.read_access )
+ return false;
+
+ return vio->mmio_gla == (addr & PAGE_MASK);
+}
+
static int __hvmemul_read(
enum x86_segment seg,
unsigned long offset,
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- struct vcpu *curr = current;
pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
- struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
if ( is_x86_system_segment(seg) )
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( ((access_type != hvm_access_insn_fetch
- ? vio->mmio_access.read_access
- : vio->mmio_access.insn_fetch)) &&
- (vio->mmio_gla == (addr & PAGE_MASK)) )
+ if ( known_gla(addr, bytes, pfec) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
int rc;
void *mapping;
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( vio->mmio_access.write_access &&
- (vio->mmio_gla == (addr & PAGE_MASK)) )
+ if ( known_gla(addr, bytes, pfec) )
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = ¤t->arch.hvm.hvm_io;
int rc;
void *mapping;
else
{
unsigned long data = 0;
- bool known_gpfn = vio->mmio_access.write_access &&
- vio->mmio_gla == (addr & PAGE_MASK);
+ bool known_gpfn = known_gla(addr, bytes, pfec);
if ( bytes > sizeof(data) )
return X86EMUL_UNHANDLEABLE;