int value_is_ptr = (p_data == NULL);
struct vcpu *curr = current;
ioreq_t *p = get_ioreq(curr);
+ unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
+ p2m_type_t p2mt;
+ mfn_t ram_mfn;
int rc;
+ /* Check for paged out page */
+ ram_mfn = gfn_to_mfn(current->domain, ram_gfn, &p2mt);
+ if ( p2m_is_paging(p2mt) )
+ {
+ p2m_mem_paging_populate(curr->domain, ram_gfn);
+ return X86EMUL_RETRY;
+ }
+
/*
* Weird-sized accesses have undefined behaviour: we discard writes
* and read all-ones.
}
else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN )
{
+ if ( pfec == PFEC_page_paged )
+ return X86EMUL_RETRY;
hvm_inject_exception(TRAP_page_fault, pfec, addr);
return X86EMUL_EXCEPTION;
}
/* Is it contiguous with the preceding PFNs? If not then we're done. */
if ( (npfn == INVALID_GFN) || (npfn != (pfn + (reverse ? -i : i))) )
{
+ if ( pfec == PFEC_page_paged )
+ return X86EMUL_RETRY;
done /= bytes_per_rep;
if ( done == 0 )
{
if ( rc != X86EMUL_OKAY )
return rc;
return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data);
+ case HVMCOPY_gfn_paged_out:
+ return X86EMUL_RETRY;
default:
break;
}
return rc;
return hvmemul_do_mmio(gpa, &reps, bytes, 0,
IOREQ_WRITE, 0, p_data);
+ case HVMCOPY_gfn_paged_out:
+ return X86EMUL_RETRY;
default:
break;
}
xfree(buf);
+ if ( rc == HVMCOPY_gfn_paged_out )
+ return X86EMUL_RETRY;
if ( rc != HVMCOPY_okay )
{
gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%"
mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt));
if ( !p2m_is_ram(p2mt) )
return -EINVAL;
+ if ( p2m_is_paging(p2mt) )
+ {
+ p2m_mem_paging_populate(d, gmfn);
+ return -ENOENT;
+ }
ASSERT(mfn_valid(mfn));
page = mfn_to_page(mfn);
* we still treat it as a kernel-mode read (i.e. no access checks). */
pfec = PFEC_page_present;
gfn = paging_gva_to_gfn(current, va, &pfec);
+ if ( pfec == PFEC_page_paged )
+ return NULL;
mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
if ( p2m_is_paging(p2mt) )
{
&tss, prev_tr.base, sizeof(tss), PFEC_page_present);
if ( rc == HVMCOPY_bad_gva_to_gfn )
goto out;
+ if ( rc == HVMCOPY_gfn_paged_out )
+ goto out;
eflags = regs->eflags;
if ( taskswitch_reason == TSW_iret )
prev_tr.base, &tss, sizeof(tss), PFEC_page_present);
if ( rc == HVMCOPY_bad_gva_to_gfn )
goto out;
+ if ( rc == HVMCOPY_gfn_paged_out )
+ goto out;
rc = hvm_copy_from_guest_virt(
&tss, tr.base, sizeof(tss), PFEC_page_present);
if ( rc == HVMCOPY_bad_gva_to_gfn )
goto out;
+ if ( rc == HVMCOPY_gfn_paged_out )
+ goto out;
if ( hvm_set_cr3(tss.cr3) )
goto out;
tr.base, &tss, sizeof(tss), PFEC_page_present);
if ( rc == HVMCOPY_bad_gva_to_gfn )
exn_raised = 1;
+ if ( rc == HVMCOPY_gfn_paged_out )
+ goto out;
if ( (tss.trace & 1) && !exn_raised )
hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0);
gfn = paging_gva_to_gfn(curr, addr, &pfec);
if ( gfn == INVALID_GFN )
{
+ if ( pfec == PFEC_page_paged )
+ return HVMCOPY_gfn_paged_out;
if ( flags & HVMCOPY_fault )
hvm_inject_exception(TRAP_page_fault, pfec, addr);
return HVMCOPY_bad_gva_to_gfn;
mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
+ if ( p2m_is_paging(p2mt) )
+ {
+ p2m_mem_paging_populate(curr->domain, gfn);
+ return HVMCOPY_gfn_paged_out;
+ }
if ( p2m_is_grant(p2mt) )
return HVMCOPY_unhandleable;
if ( !p2m_is_ram(p2mt) )
{
for ( i = 0; i < p->count; i++ )
{
- rc = read_handler(
- v,
- p->addr + (sign * i * p->size),
- p->size, &data);
+ rc = read_handler(v, p->addr + (sign * i * p->size), p->size,
+ &data);
if ( rc != X86EMUL_OKAY )
break;
- (void)hvm_copy_to_guest_phys(
- p->data + (sign * i * p->size),
- &data,
- p->size);
+ if ( hvm_copy_to_guest_phys(p->data + (sign * i * p->size), &data,
+ p->size) == HVMCOPY_gfn_paged_out )
+ {
+ rc = X86EMUL_RETRY;
+ break;
+ }
}
}
else
{
for ( i = 0; i < p->count; i++ )
{
- (void)hvm_copy_from_guest_phys(
- &data,
- p->data + (sign * i * p->size),
- p->size);
- rc = write_handler(
- v,
- p->addr + (sign * i * p->size),
- p->size, data);
+ if ( hvm_copy_from_guest_phys(&data,
+ p->data + (sign * i * p->size),
+ p->size) == HVMCOPY_gfn_paged_out )
+ {
+ rc = X86EMUL_RETRY;
+ break;
+ }
+ rc = write_handler(v, p->addr + (sign * i * p->size), p->size,
+ data);
if ( rc != X86EMUL_OKAY )
break;
}
int i;
unsigned long addr, size;
- if ( (type == HVM_PORTIO) && (dpci_ioport_intercept(p)) )
- return X86EMUL_OKAY;
+ if ( type == HVM_PORTIO )
+ {
+ int rc = dpci_ioport_intercept(p);
+ if ( (rc == X86EMUL_OKAY) || (rc == X86EMUL_RETRY) )
+ return rc;
+ }
for ( i = 0; i < handler->num_slot; i++ )
{
vcpu_end_shutdown_deferral(curr);
}
-static void dpci_ioport_read(uint32_t mport, ioreq_t *p)
+static int dpci_ioport_read(uint32_t mport, ioreq_t *p)
{
int i, sign = p->df ? -1 : 1;
uint32_t data = 0;
}
if ( p->data_is_ptr )
- (void)hvm_copy_to_guest_phys(
- p->data + (sign * i * p->size), &data, p->size);
+ {
+ if ( hvm_copy_to_guest_phys(p->data + (sign * i * p->size), &data,
+ p->size) == HVMCOPY_gfn_paged_out )
+ return X86EMUL_RETRY;
+ }
else
p->data = data;
}
+
+ return X86EMUL_OKAY;
}
-static void dpci_ioport_write(uint32_t mport, ioreq_t *p)
+static int dpci_ioport_write(uint32_t mport, ioreq_t *p)
{
int i, sign = p->df ? -1 : 1;
uint32_t data;
{
data = p->data;
if ( p->data_is_ptr )
- (void)hvm_copy_from_guest_phys(
- &data, p->data + (sign * i * p->size), p->size);
+ {
+ if ( hvm_copy_from_guest_phys(&data, p->data + (sign * i * p->size),
+ p->size) == HVMCOPY_gfn_paged_out )
+ return X86EMUL_RETRY;
+ }
switch ( p->size )
{
BUG();
}
}
+
+ return X86EMUL_OKAY;
}
int dpci_ioport_intercept(ioreq_t *p)
struct g2m_ioport *g2m_ioport;
unsigned int mport, gport = p->addr;
unsigned int s = 0, e = 0;
+ int rc;
list_for_each_entry( g2m_ioport, &hd->g2m_ioport_list, list )
{
goto found;
}
- return 0;
+ return X86EMUL_UNHANDLEABLE;
found:
mport = (gport - s) + g2m_ioport->mport;
{
gdprintk(XENLOG_ERR, "Error: access to gport=0x%x denied!\n",
(uint32_t)p->addr);
- return 0;
+ return X86EMUL_UNHANDLEABLE;
}
switch ( p->dir )
{
case IOREQ_READ:
- dpci_ioport_read(mport, p);
+ rc = dpci_ioport_read(mport, p);
break;
case IOREQ_WRITE:
- dpci_ioport_write(mport, p);
+ rc = dpci_ioport_write(mport, p);
break;
default:
gdprintk(XENLOG_ERR, "Error: couldn't handle p->dir = %d", p->dir);
+ rc = X86EMUL_UNHANDLEABLE;
}
- return 1;
+ return rc;
}
/*
HVMCOPY_okay = 0,
HVMCOPY_bad_gva_to_gfn,
HVMCOPY_bad_gfn_to_mfn,
- HVMCOPY_unhandleable
+ HVMCOPY_unhandleable,
+ HVMCOPY_gfn_paged_out,
};
/*