This patch extends the existing hypercall to support rdm reservation policy.
We return error or just throw out a warning message depending on whether
the policy is "strict" or "relaxed" when reserving RDM regions in pfn space.
Note in some special cases, e.g. add a device to hwdomain, and remove a
device from user domain, 'relaxed' is fine enough since this is always safe
to hwdomain.
CC: Tim Deegan <tim@xen.org>
CC: Keir Fraser <keir@xen.org>
CC: Jan Beulich <jbeulich@suse.com>
CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
CC: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>
CC: Ian Campbell <ian.campbell@citrix.com>
CC: Stefano Stabellini <stefano.stabellini@citrix.com>
CC: Yang Zhang <yang.z.zhang@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Tiejun Chen <tiejun.chen@intel.com>
Reviewed-by: George Dunlap <george.dunlap@eu.citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
--
v13a: Fix build on ARM by passing 0 for flags to arm_smmu_assign_dev.
}
int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
- p2m_access_t p2ma)
+ p2m_access_t p2ma, unsigned int flag)
{
p2m_type_t p2mt;
p2m_access_t a;
ret = 0;
else
{
- ret = -EBUSY;
+ if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
+ ret = 0;
+ else
+ ret = -EBUSY;
printk(XENLOG_G_WARNING
"Cannot setup identity map d%d:%lx,"
" gfn already mapped to %lx.\n",
}
static int amd_iommu_assign_device(struct domain *d, u8 devfn,
- struct pci_dev *pdev)
+ struct pci_dev *pdev,
+ u32 flag)
{
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
int bdf = PCI_BDF2(pdev->bus, devfn);
}
static int arm_smmu_assign_dev(struct domain *d, u8 devfn,
- struct device *dev)
+ struct device *dev, u32 flag)
{
struct iommu_domain *domain;
struct arm_smmu_xen_domain *xen_domain;
return ret;
if (t) {
- ret = arm_smmu_assign_dev(t, devfn, dev);
+ /* No flags are defined for ARM. */
+ ret = arm_smmu_assign_dev(t, devfn, dev, 0);
if (ret)
return ret;
}
goto fail;
}
- rc = hd->platform_ops->assign_device(d, 0, dt_to_dev(dev));
+ /* The flag field doesn't matter to DT device. */
+ rc = hd->platform_ops->assign_device(d, 0, dt_to_dev(dev), 0);
if ( rc )
goto fail;
return pdev ? 0 : -EBUSY;
}
-static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
+static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct pci_dev *pdev;
pdev->fault.count = 0;
- if ( (rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev))) )
+ if ( (rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), flag)) )
goto done;
for ( ; pdev->phantom_stride; rc = 0 )
devfn += pdev->phantom_stride;
if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
break;
- rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev));
+ rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), flag);
if ( rc )
printk(XENLOG_G_WARNING "d%d: assign %04x:%02x:%02x.%u failed (%d)\n",
d->domain_id, seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
{
u16 seg;
u8 bus, devfn;
+ u32 flag;
int ret = 0;
uint32_t machine_sbdf;
seg = machine_sbdf >> 16;
bus = PCI_BUS(machine_sbdf);
devfn = PCI_DEVFN2(machine_sbdf);
+ flag = domctl->u.assign_device.flag;
+ if ( flag & ~XEN_DOMCTL_DEV_RDM_RELAXED )
+ {
+ ret = -EINVAL;
+ break;
+ }
ret = device_assigned(seg, bus, devfn) ?:
- assign_device(d, seg, bus, devfn);
+ assign_device(d, seg, bus, devfn, flag);
if ( ret == -ERESTART )
ret = hypercall_create_continuation(__HYPERVISOR_domctl,
"h", u_domctl);
}
static int rmrr_identity_mapping(struct domain *d, bool_t map,
- const struct acpi_rmrr_unit *rmrr)
+ const struct acpi_rmrr_unit *rmrr,
+ u32 flag)
{
unsigned long base_pfn = rmrr->base_address >> PAGE_SHIFT_4K;
unsigned long end_pfn = PAGE_ALIGN_4K(rmrr->end_address) >> PAGE_SHIFT_4K;
while ( base_pfn < end_pfn )
{
- int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw);
+ int err = set_identity_p2m_entry(d, base_pfn, p2m_access_rw, flag);
if ( err )
return err;
PCI_BUS(bdf) == pdev->bus &&
PCI_DEVFN2(bdf) == devfn )
{
- ret = rmrr_identity_mapping(pdev->domain, 1, rmrr);
+ /*
+ * iommu_add_device() is only called for the hardware
+ * domain (see xen/drivers/passthrough/pci.c:pci_add_device()).
+ * Since RMRRs are always reserved in the e820 map for the hardware
+ * domain, there shouldn't be a conflict.
+ */
+ ret = rmrr_identity_mapping(pdev->domain, 1, rmrr, 0);
if ( ret )
dprintk(XENLOG_ERR VTDPREFIX, "d%d: RMRR mapping failed\n",
pdev->domain->domain_id);
PCI_DEVFN2(bdf) != devfn )
continue;
- rmrr_identity_mapping(pdev->domain, 0, rmrr);
+ /*
+ * Any flag is nothing to clear these mappings but here
+ * its always safe and strict to set 0.
+ */
+ rmrr_identity_mapping(pdev->domain, 0, rmrr, 0);
}
return domain_context_unmap(pdev->domain, devfn, pdev);
spin_lock(&pcidevs_lock);
for_each_rmrr_device ( rmrr, bdf, i )
{
- ret = rmrr_identity_mapping(d, 1, rmrr);
+ /*
+ * Here means we're add a device to the hardware domain.
+ * Since RMRRs are always reserved in the e820 map for the hardware
+ * domain, there shouldn't be a conflict. So its always safe and
+ * strict to set 0.
+ */
+ ret = rmrr_identity_mapping(d, 1, rmrr, 0);
if ( ret )
dprintk(XENLOG_ERR VTDPREFIX,
"IOMMU: mapping reserved region failed\n");
PCI_BUS(bdf) == pdev->bus &&
PCI_DEVFN2(bdf) == devfn )
{
- ret = rmrr_identity_mapping(source, 0, rmrr);
+ /*
+ * Any RMRR flag is always ignored when remove a device,
+ * but its always safe and strict to set 0.
+ */
+ ret = rmrr_identity_mapping(source, 0, rmrr, 0);
if ( ret != -ENOENT )
return ret;
}
}
static int intel_iommu_assign_device(
- struct domain *d, u8 devfn, struct pci_dev *pdev)
+ struct domain *d, u8 devfn, struct pci_dev *pdev, u32 flag)
{
struct acpi_rmrr_unit *rmrr;
int ret = 0, i;
PCI_BUS(bdf) == bus &&
PCI_DEVFN2(bdf) == devfn )
{
- ret = rmrr_identity_mapping(d, 1, rmrr);
+ ret = rmrr_identity_mapping(d, 1, rmrr, flag);
if ( ret )
{
reassign_device_ownership(d, hardware_domain, devfn, pdev);
/* Set identity addresses in the p2m table (for pass-through) */
int set_identity_p2m_entry(struct domain *d, unsigned long gfn,
- p2m_access_t p2ma);
+ p2m_access_t p2ma, unsigned int flag);
#define clear_identity_p2m_entry(d, gfn, page_order) \
guest_physmap_remove_page(d, gfn, gfn, page_order)
XEN_GUEST_HANDLE_64(char) path; /* path to the device tree node */
} dt;
} u;
+ /* IN */
+#define XEN_DOMCTL_DEV_RDM_RELAXED 1
+ uint32_t flag; /* flag of assigned device */
};
typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
int (*add_device)(u8 devfn, device_t *dev);
int (*enable_device)(device_t *dev);
int (*remove_device)(u8 devfn, device_t *dev);
- int (*assign_device)(struct domain *, u8 devfn, device_t *dev);
+ int (*assign_device)(struct domain *, u8 devfn, device_t *dev, u32 flag);
int (*reassign_device)(struct domain *s, struct domain *t,
u8 devfn, device_t *dev);
#ifdef HAS_PCI