int xenmem_add_to_physmap_one(
struct domain *d,
unsigned int space,
- domid_t foreign_domid,
+ union xen_add_to_physmap_batch_extra extra,
unsigned long idx,
xen_pfn_t gpfn)
{
{
struct domain *od;
p2m_type_t p2mt;
- od = rcu_lock_domain_by_any_id(foreign_domid);
+
+ od = rcu_lock_domain_by_any_id(extra.foreign_domid);
if ( od == NULL )
return -ESRCH;
break;
}
case XENMAPSPACE_dev_mmio:
+ /* extra should be 0. Reserved for future use. */
+ if ( extra.res0 )
+ return -EOPNOTSUPP;
+
rc = map_dev_mmio_region(d, gpfn, 1, idx);
return rc;
int xenmem_add_to_physmap_one(
struct domain *d,
unsigned int space,
- domid_t foreign_domid,
+ union xen_add_to_physmap_batch_extra extra,
unsigned long idx,
xen_pfn_t gpfn)
{
break;
}
case XENMAPSPACE_gmfn_foreign:
- return p2m_add_foreign(d, idx, gpfn, foreign_domid);
+ return p2m_add_foreign(d, idx, gpfn, extra.foreign_domid);
default:
break;
}
unsigned int size = cmp.atpb.size;
xen_ulong_t *idxs = (void *)(nat.atpb + 1);
xen_pfn_t *gpfns = (void *)(idxs + limit);
+ /*
+ * The union will always be 16-bit width. So it is not
+ * necessary to have the exact field which correspond to the
+ * space.
+ */
+ enum XLAT_add_to_physmap_batch_u u =
+ XLAT_add_to_physmap_batch_u_res0;
if ( copy_from_guest(&cmp.atpb, compat, 1) ||
!compat_handle_okay(cmp.atpb.idxs, size) ||
{
unsigned int done = 0;
long rc = 0;
+ union xen_add_to_physmap_batch_extra extra;
+
+ if ( xatp->space != XENMAPSPACE_gmfn_foreign )
+ extra.res0 = 0;
+ else
+ extra.foreign_domid = DOMID_INVALID;
if ( xatp->space != XENMAPSPACE_gmfn_range )
- return xenmem_add_to_physmap_one(d, xatp->space, DOMID_INVALID,
+ return xenmem_add_to_physmap_one(d, xatp->space, extra,
xatp->idx, xatp->gpfn);
if ( xatp->size < start )
while ( xatp->size > done )
{
- rc = xenmem_add_to_physmap_one(d, xatp->space, DOMID_INVALID,
+ rc = xenmem_add_to_physmap_one(d, xatp->space, extra,
xatp->idx, xatp->gpfn);
if ( rc < 0 )
break;
}
rc = xenmem_add_to_physmap_one(d, xatpb->space,
- xatpb->foreign_domid,
+ xatpb->u,
idx, gpfn);
if ( unlikely(__copy_to_guest_offset(xatpb->errs, 0, &rc, 1)) )
/* Number of pages to go through */
uint16_t size;
- domid_t foreign_domid; /* IFF gmfn_foreign */
+
+#if __XEN_INTERFACE_VERSION__ < 0x00040700
+ domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
+#else
+ union xen_add_to_physmap_batch_extra {
+ domid_t foreign_domid; /* gmfn_foreign */
+ uint16_t res0; /* All the other spaces. Should be 0 */
+ } u;
+#endif
/* Indexes into space being mapped. */
XEN_GUEST_HANDLE(xen_ulong_t) idxs;
#include <xen/list.h>
#include <xen/spinlock.h>
#include <xen/typesafe.h>
+#include <public/memory.h>
TYPE_SAFE(unsigned long, mfn);
#define PRI_mfn "05lx"
#endif
int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
- domid_t foreign_domid,
+ union xen_add_to_physmap_batch_extra extra,
unsigned long idx, xen_pfn_t gpfn);
/* Returns 1 on success, 0 on error, negative if the ring