*/
#include <xen/sched.h>
+#include <xen/iocap.h>
#include <xen/iommu.h>
#include <xen/paging.h>
#include <xen/guest_access.h>
}
}
-static bool __hwdom_init hwdom_iommu_map(const struct domain *d,
- unsigned long pfn,
- unsigned long max_pfn)
+static unsigned int __hwdom_init hwdom_iommu_map(const struct domain *d,
+ unsigned long pfn,
+ unsigned long max_pfn)
{
mfn_t mfn = _mfn(pfn);
- unsigned int i, type;
+ unsigned int i, type, perms = IOMMUF_readable | IOMMUF_writable;
/*
* Set up 1:1 mapping for dom0. Default to include only conventional RAM
* that fall in unusable ranges for PV Dom0.
*/
if ( (pfn > max_pfn && !mfn_valid(mfn)) || xen_in_range(pfn) )
- return false;
+ return 0;
switch ( type = page_get_ram_type(mfn) )
{
case RAM_TYPE_UNUSABLE:
- return false;
+ return 0;
case RAM_TYPE_CONVENTIONAL:
if ( iommu_hwdom_strict )
- return false;
+ return 0;
break;
default:
if ( type & RAM_TYPE_RESERVED )
{
if ( !iommu_hwdom_inclusive && !iommu_hwdom_reserved )
- return false;
+ perms = 0;
}
- else if ( is_hvm_domain(d) || !iommu_hwdom_inclusive || pfn > max_pfn )
- return false;
+ else if ( is_hvm_domain(d) )
+ return 0;
+ else if ( !iommu_hwdom_inclusive || pfn > max_pfn )
+ perms = 0;
}
/* Check that it doesn't overlap with the Interrupt Address Range. */
if ( pfn >= 0xfee00 && pfn <= 0xfeeff )
- return false;
+ return 0;
/* ... or the IO-APIC */
- for ( i = 0; has_vioapic(d) && i < d->arch.hvm.nr_vioapics; i++ )
- if ( pfn == PFN_DOWN(domain_vioapic(d, i)->base_address) )
- return false;
+ if ( has_vioapic(d) )
+ {
+ for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
+ if ( pfn == PFN_DOWN(domain_vioapic(d, i)->base_address) )
+ return 0;
+ }
+ else if ( is_pv_domain(d) )
+ {
+ /*
+ * Be consistent with CPU mappings: Dom0 is permitted to establish r/o
+ * ones there (also for e.g. HPET in certain cases), so it should also
+ * have such established for IOMMUs.
+ */
+ if ( iomem_access_permitted(d, pfn, pfn) &&
+ rangeset_contains_singleton(mmio_ro_ranges, pfn) )
+ perms = IOMMUF_readable;
+ }
/*
* ... or the PCIe MCFG regions.
* TODO: runtime added MMCFG regions are not checked to make sure they
* don't overlap with already mapped regions, thus preventing trapping.
*/
if ( has_vpci(d) && vpci_is_mmcfg_address(d, pfn_to_paddr(pfn)) )
- return false;
+ return 0;
- return true;
+ return perms;
}
void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
for ( ; i < top; i++ )
{
unsigned long pfn = pdx_to_pfn(i);
+ unsigned int perms = hwdom_iommu_map(d, pfn, max_pfn);
int rc;
- if ( !hwdom_iommu_map(d, pfn, max_pfn) )
+ if ( !perms )
rc = 0;
else if ( paging_mode_translate(d) )
- rc = p2m_add_identity_entry(d, pfn, p2m_access_rw, 0);
+ rc = p2m_add_identity_entry(d, pfn,
+ perms & IOMMUF_writable ? p2m_access_rw
+ : p2m_access_r,
+ 0);
else
rc = iommu_map(d, _dfn(pfn), _mfn(pfn), 1ul << PAGE_ORDER_4K,
- IOMMUF_readable | IOMMUF_writable, &flush_flags);
+ perms, &flush_flags);
if ( rc )
printk(XENLOG_WARNING "%pd: identity %smapping of %lx failed: %d\n",