#include <xen/sched.h>
#include <xen/iommu.h>
+#include <xen/paging.h>
extern struct iommu_ops intel_iommu_ops;
extern struct iommu_ops amd_iommu_ops;
+static int iommu_populate_page_table(struct domain *d);
int intel_vtd_setup(void);
int amd_iov_detect(void);
int iommu_enabled = 1;
boolean_param("iommu", iommu_enabled);
+int iommu_pv_enabled = 0;
+boolean_param("iommu_pv", iommu_pv_enabled);
+
int iommu_domain_init(struct domain *domain)
{
struct hvm_iommu *hd = domain_hvm_iommu(domain);
int assign_device(struct domain *d, u8 bus, u8 devfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ int rc;
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- return hd->platform_ops->assign_device(d, bus, devfn);
+ if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
+ return rc;
+
+ if ( has_iommu_pdevs(d) && !need_iommu(d) )
+ {
+ d->need_iommu = 1;
+ return iommu_populate_page_table(d);
+ }
+ return 0;
+}
+
+static int iommu_populate_page_table(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+ struct page_info *page;
+ int rc;
+
+ spin_lock(&d->page_alloc_lock);
+
+ list_for_each_entry ( page, &d->page_list, list )
+ {
+ if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
+ {
+ rc = hd->platform_ops->map_page(
+ d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
+ if (rc)
+ {
+ spin_unlock(&d->page_alloc_lock);
+ hd->platform_ops->teardown(d);
+ return rc;
+ }
+ }
+ }
+ spin_unlock(&d->page_alloc_lock);
+ return 0;
}
void iommu_domain_destroy(struct domain *d)
if ( !iommu_enabled || !hd->platform_ops )
return;
- return hd->platform_ops->reassign_device(d, dom0, bus, devfn);
+ hd->platform_ops->reassign_device(d, dom0, bus, devfn);
+
+ if ( !has_iommu_pdevs(d) && need_iommu(d) )
+ {
+ d->need_iommu = 0;
+ hd->platform_ops->teardown(d);
+ }
}
static int iommu_setup(void)
iommu_enabled = (rc == 0);
out:
+ if ( !iommu_enabled || !vtd_enabled )
+ iommu_pv_enabled = 0;
printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
+ if (iommu_enabled)
+ printk("I/O virtualisation for PV guests %sabled\n",
+ iommu_pv_enabled ? "en" : "dis");
return rc;
}
__initcall(iommu_setup);
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
}
spin_lock_irqsave(&iommu->lock, flags);
- /*
- * domain_id 0 is not valid on Intel's IOMMU, force domain_id to
- * be 1 based as required by intel's iommu hw.
- */
- context_set_domain_id(context, domain);
- context_set_address_width(*context, hd->agaw);
- if ( ecap_pass_thru(iommu->ecap) )
- context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
#ifdef CONTEXT_PASSTHRU
+ if ( ecap_pass_thru(iommu->ecap) && (domain->domain_id == 0) )
+ context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
else
{
#endif
- ASSERT(hd->pgd_maddr != 0);
+ if ( hd->pgd_maddr == 0 )
+ {
+ hd->pgd_maddr = alloc_pgtable_maddr();
+ if ( hd->pgd_maddr == 0 )
+ {
+ unmap_vtd_domain_page(context_entries);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return -ENOMEM;
+ }
+ }
context_set_address_root(*context, hd->pgd_maddr);
context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
#ifdef CONTEXT_PASSTHRU
}
#endif
+ /*
+ * domain_id 0 is not valid on Intel's IOMMU, force domain_id to
+ * be 1 based as required by intel's iommu hw.
+ */
+ context_set_domain_id(context, domain);
+ context_set_address_width(*context, hd->agaw);
context_set_fault_enable(*context);
context_set_present(*context);
iommu_flush_cache_entry(iommu, context);
/* Is this an HVM guest? */
bool_t is_hvm;
+ /* Does this guest need iommu mappings? */
+ bool_t need_iommu;
/* Is this guest fully privileged (aka dom0)? */
bool_t is_privileged;
/* Which guest this guest has privileges on */
#define is_hvm_domain(d) ((d)->is_hvm)
#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
+#define need_iommu(d) ((d)->need_iommu && !(d)->is_hvm)
extern enum cpufreq_controller {
FREQCTL_none, FREQCTL_dom0_kernel