{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- unsigned long saddr, daddr, bytes;
+ unsigned long saddr, daddr, bytes, sgfn, dgfn;
paddr_t sgpa, dgpa;
uint32_t pfec = PFEC_page_present;
p2m_type_t p2mt;
if ( rc != X86EMUL_OKAY )
return rc;
- /* Unlocked works here because we get_gfn for real in whatever
- * we call later. */
- (void)get_gfn_unlocked(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
+ /* XXX In a fine-grained p2m locking scenario, we need to sort this
+ * get_gfn's, or else we might deadlock */
+ sgfn = sgpa >> PAGE_SHIFT;
+ (void)get_gfn(current->domain, sgfn, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
- return hvmemul_do_mmio(
+ {
+ rc = hvmemul_do_mmio(
sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
+ put_gfn(current->domain, sgfn);
+ return rc;
+ }
- (void)get_gfn_unlocked(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
+ dgfn = dgpa >> PAGE_SHIFT;
+ (void)get_gfn(current->domain, dgfn, &p2mt);
if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
- return hvmemul_do_mmio(
+ {
+ rc = hvmemul_do_mmio(
dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
+ return rc;
+ }
/* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */
bytes = *reps * bytes_per_rep;
* can be emulated by a source-to-buffer-to-destination block copy.
*/
if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) )
+ {
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
return X86EMUL_UNHANDLEABLE;
+ }
/* Adjust destination address for reverse copy. */
if ( df )
/* Allocate temporary buffer. Fall back to slow emulation if this fails. */
buf = xmalloc_bytes(bytes);
if ( buf == NULL )
+ {
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
return X86EMUL_UNHANDLEABLE;
+ }
/*
* We do a modicum of checking here, just for paranoia's sake and to
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
xfree(buf);
+ put_gfn(current->domain, sgfn);
+ put_gfn(current->domain, dgfn);
if ( rc == HVMCOPY_gfn_paged_out )
return X86EMUL_RETRY;
if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
HVMCOPY_okay )
{
- (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+ (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
/*
* The only case we handle is vga_mem <-> vga_mem.
* Anything else disables caching and leaves it to qemu-dm.
*/
if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+ {
+ put_gfn(d, data >> PAGE_SHIFT);
return 0;
+ }
stdvga_mem_write(data, tmp, p->size);
+ put_gfn(d, data >> PAGE_SHIFT);
}
data += sign * p->size;
addr += sign * p->size;
if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
HVMCOPY_okay )
{
- (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+ (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+ {
+ put_gfn(d, data >> PAGE_SHIFT);
return 0;
+ }
tmp = stdvga_mem_read(data, p->size);
+ put_gfn(d, data >> PAGE_SHIFT);
}
stdvga_mem_write(addr, tmp, p->size);
data += sign * p->size;