uint8_t ipat = 0;
bool_t need_modify_vtd_table = 1;
bool_t vtd_pte_present = 0;
- unsigned int iommu_flags = p2m_get_iommu_flags(p2mt);
+ unsigned int iommu_flags = p2m_get_iommu_flags(p2mt, mfn);
bool_t needs_sync = 1;
ept_entry_t old_entry = { .epte = 0 };
ept_entry_t new_entry = { .epte = 0 };
/* Safe to read-then-write because we hold the p2m lock */
if ( ept_entry->mfn == new_entry.mfn &&
- p2m_get_iommu_flags(ept_entry->sa_p2mt) == iommu_flags )
+ p2m_get_iommu_flags(ept_entry->sa_p2mt, _mfn(ept_entry->mfn)) ==
+ iommu_flags )
need_modify_vtd_table = 0;
ept_p2m_type_to_flags(p2m, &new_entry, p2mt, p2ma);
l2_pgentry_t l2e_content;
l3_pgentry_t l3e_content;
int rc;
- unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt);
+ unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
/*
* old_mfn and iommu_old_flags control possible flush/update needs on the
* IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
{
if ( flags & _PAGE_PSE )
{
- iommu_old_flags =
- p2m_get_iommu_flags(p2m_flags_to_type(flags));
old_mfn = l1e_get_pfn(*p2m_entry);
+ iommu_old_flags =
+ p2m_get_iommu_flags(p2m_flags_to_type(flags),
+ _mfn(old_mfn));
}
else
{
p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
0, L1_PAGETABLE_ENTRIES);
ASSERT(p2m_entry);
- iommu_old_flags =
- p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)));
old_mfn = l1e_get_pfn(*p2m_entry);
+ iommu_old_flags =
+ p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
+ _mfn(old_mfn));
if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
{
if ( flags & _PAGE_PSE )
{
- iommu_old_flags =
- p2m_get_iommu_flags(p2m_flags_to_type(flags));
old_mfn = l1e_get_pfn(*p2m_entry);
+ iommu_old_flags =
+ p2m_get_iommu_flags(p2m_flags_to_type(flags),
+ _mfn(old_mfn));
}
else
{
ret = p2m_set_entry(p2m, gfn, _mfn(gfn), PAGE_ORDER_4K,
p2m_mmio_direct, p2ma);
else if ( mfn_x(mfn) == gfn && p2mt == p2m_mmio_direct && a == p2ma )
- {
ret = 0;
- /*
- * PVH fixme: during Dom0 PVH construction, p2m entries are being set
- * but iomem regions are not mapped with IOMMU. This makes sure that
- * RMRRs are correctly mapped with IOMMU.
- */
- if ( is_hardware_domain(d) && !iommu_use_hap_pt(d) )
- ret = iommu_map_page(d, gfn, gfn, IOMMUF_readable|IOMMUF_writable);
- }
else
{
if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
/*
* p2m type to IOMMU flags
*/
-static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt)
+static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt, mfn_t mfn)
{
unsigned int flags;
case p2m_grant_map_ro:
flags = IOMMUF_readable;
break;
+ case p2m_mmio_direct:
+ flags = IOMMUF_readable;
+ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
+ flags |= IOMMUF_writable;
default:
flags = 0;
break;