unsigned long old_root_mfn;
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ if ( gfn == INVALID_MFN )
+ return -EADDRNOTAVAIL;
+ ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
+
level = hd->arch.paging_mode;
old_root = hd->arch.root_table;
offset = gfn >> (PTE_PER_TABLE_SHIFT * (level - 1));
* we might need a deeper page table for lager gfn now */
if ( is_hvm_domain(d) )
{
- if ( update_paging_mode(d, gfn) )
+ int rc = update_paging_mode(d, gfn);
+
+ if ( rc )
{
spin_unlock(&hd->arch.mapping_lock);
AMD_IOMMU_DEBUG("Update page mode failed gfn = %lx\n", gfn);
- domain_crash(d);
- return -EFAULT;
+ if ( rc != -EADDRNOTAVAIL )
+ domain_crash(d);
+ return rc;
}
}
#define VTD_PAGE_TABLE_LEVEL_3 3
#define VTD_PAGE_TABLE_LEVEL_4 4
-#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
#define MAX_IOMMU_REGS 0xc0
extern struct list_head acpi_drhd_units;
if ( has_hvm_container_domain(d) ||
(page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
{
- BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
- rc = hd->platform_ops->map_page(
- d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ unsigned long mfn = page_to_mfn(page);
+ unsigned long gfn = mfn_to_gmfn(d, mfn);
+
+ if ( gfn != INVALID_MFN )
+ {
+ ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
+ BUG_ON(SHARED_M2P(gfn));
+ rc = hd->platform_ops->map_page(d, gfn, mfn,
+ IOMMUF_readable |
+ IOMMUF_writable);
+ }
if ( rc )
{
page_list_add(page, &d->page_list);
unsigned int np;
};
+#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+
struct arch_hvm_iommu
{
u64 pgd_maddr; /* io page directory machine address */
#define IOMMU_CONTROL_DISABLED 0
#define IOMMU_CONTROL_ENABLED 1
-#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
-
/* interrupt remapping table */
#define INT_REMAP_ENTRY_REMAPEN_MASK 0x00000001
#define INT_REMAP_ENTRY_REMAPEN_SHIFT 0