unsigned long flag;
unsigned long start_time;
+ /* Domain id in context is 1 based */
+ did++;
+
/*
* In the non-present entry flush case, if hardware doesn't cache
* non-present entry we do nothing and if hardware cache non-present
unsigned long flag;
unsigned long start_time;
+ /* Domain id in context is 1 based */
+ did++;
+
/*
* In the non-present entry flush case, if hardware doesn't cache
* non-present entry we do nothing and if hardware cache non-present
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
else
{
+ if ( !hd->pgd )
+ {
+ struct dma_pte *pgd = (struct dma_pte *)alloc_xenheap_page();
+ if ( !pgd )
+ {
+ spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ return -ENOMEM;
+ }
+ memset(pgd, 0, PAGE_SIZE);
+ hd->pgd = pgd;
+ }
+
context_set_address_root(*context, virt_to_maddr(hd->pgd));
context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
}
return_devices_to_dom0(d);
}
-static int domain_context_mapped(struct domain *domain, struct pci_dev *pdev)
+static int domain_context_mapped(struct pci_dev *pdev)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
if ( ret )
return ret;
- if ( domain_context_mapped(d, pdev) == 0 )
+ if ( domain_context_mapped(pdev) == 0 )
{
drhd = acpi_find_matched_drhd_unit(pdev);
ret = domain_context_mapping(d, drhd->iommu, pdev);
#define cap_plmr(c) (((c) >> 5) & 1)
#define cap_rwbf(c) (((c) >> 4) & 1)
#define cap_afl(c) (((c) >> 3) & 1)
-#define cap_ndoms(c) (2 ^ (4 + 2 * ((c) & 0x7)))
+#define cap_ndoms(c) (1 << (4 + 2 * ((c) & 0x7)))
+
/*
* Extended Capability Register
*/