*/
if ( (p2mt == p2m_mmio_dm) ||
(npfec.write_access &&
- (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
+ (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
{
__put_gfn(p2m, gfn);
if ( ap2m_active )
get_gfn_query_unlocked(d, a.pfn, &t);
if ( p2m_is_mmio(t) )
a.mem_type = HVMMEM_mmio_dm;
+ else if ( t == p2m_ioreq_server )
+ a.mem_type = HVMMEM_ioreq_server;
else if ( p2m_is_readonly(t) )
a.mem_type = HVMMEM_ram_ro;
else if ( p2m_is_ram(t) )
[HVMMEM_ram_rw] = p2m_ram_rw,
[HVMMEM_ram_ro] = p2m_ram_ro,
[HVMMEM_mmio_dm] = p2m_mmio_dm,
- [HVMMEM_unused] = p2m_invalid
+ [HVMMEM_unused] = p2m_invalid,
+ [HVMMEM_ioreq_server] = p2m_ioreq_server
};
if ( copy_from_guest(&a, arg, 1) )
}
if ( !p2m_is_ram(t) &&
(!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
- (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
+ (t != p2m_ioreq_server || a.hvmmem_type != HVMMEM_ram_rw) )
{
put_gfn(d, pfn);
goto setmemtype_fail;
entry->a = entry->d = !!cpu_has_vmx_ept_ad;
break;
case p2m_grant_map_ro:
- case p2m_mmio_write_dm:
+ case p2m_ioreq_server:
entry->r = 1;
entry->w = entry->x = 0;
entry->a = !!cpu_has_vmx_ept_ad;
default:
return flags | _PAGE_NX_BIT;
case p2m_grant_map_ro:
- case p2m_mmio_write_dm:
+ case p2m_ioreq_server:
return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
case p2m_ram_ro:
case p2m_ram_logdirty:
/* Need to hand off device-model MMIO to the device model */
if ( p2mt == p2m_mmio_dm
- || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) )
+ || (p2mt == p2m_ioreq_server && ft == ft_demand_write) )
{
gpa = guest_walk_to_gpa(&gw);
goto mmio;
p2m_ram_shared = 12, /* Shared or sharable memory */
p2m_ram_broken = 13, /* Broken page, access cause domain crash */
p2m_map_foreign = 14, /* ram pages from foreign domain */
- p2m_mmio_write_dm = 15, /* Read-only; writes go to the device model */
+ p2m_ioreq_server = 15,
} p2m_type_t;
/* Modifiers to the query */
| p2m_to_mask(p2m_ram_ro) \
| p2m_to_mask(p2m_grant_map_ro) \
| p2m_to_mask(p2m_ram_shared) \
- | p2m_to_mask(p2m_mmio_write_dm))
+ | p2m_to_mask(p2m_ioreq_server))
/* Write-discard types, which should discard the write operations */
#define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro) \
HVMMEM_ram_ro, /* Read-only; writes are discarded */
HVMMEM_mmio_dm, /* Reads and write go to the device model */
#if __XEN_INTERFACE_VERSION__ < 0x00040700
- HVMMEM_mmio_write_dm /* Read-only; writes go to the device model */
+ HVMMEM_mmio_write_dm, /* Read-only; writes go to the device model */
#else
- HVMMEM_unused /* Placeholder; setting memory to this type
+ HVMMEM_unused, /* Placeholder; setting memory to this type
will fail for code after 4.7.0 */
#endif
+ HVMMEM_ioreq_server
} hvmmem_type_t;
/* Following tools-only interfaces may change in future. */