MATTR_DEV, p2m_mmio_direct);
}
+int unmap_mmio_regions(struct domain *d,
+ unsigned long start_gfn,
+ unsigned long nr_mfns,
+ unsigned long mfn)
+{
+ return apply_p2m_changes(d, REMOVE,
+ pfn_to_paddr(start_gfn),
+ pfn_to_paddr(start_gfn + nr_mfns),
+ pfn_to_paddr(mfn),
+ MATTR_DEV, p2m_invalid);
+}
+
int guest_physmap_add_entry(struct domain *d,
unsigned long gpfn,
unsigned long mfn,
d->domain_id, gfn, mfn, nr_mfns);
ret = iomem_permit_access(d, mfn, mfn_end);
- if ( !ret && paging_mode_translate(d) )
+ if ( !ret )
{
- for ( i = 0; !ret && i < nr_mfns; i++ )
- ret = set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i));
+ ret = map_mmio_regions(d, gfn, nr_mfns, mfn);
if ( ret )
{
printk(XENLOG_G_WARNING
- "memory_map:fail: dom%d gfn=%lx mfn=%lx ret:%ld\n",
- d->domain_id, gfn + i, mfn + i, ret);
- while ( i-- )
- clear_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i));
+ "memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n",
+ d->domain_id, gfn, mfn, nr_mfns, ret);
if ( iomem_deny_access(d, mfn, mfn_end) &&
is_hardware_domain(current->domain) )
printk(XENLOG_ERR
"memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- if ( paging_mode_translate(d) )
- for ( i = 0; i < nr_mfns; i++ )
- {
- ret = clear_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i));
- if ( ret )
- rc = ret;
- }
+ rc = unmap_mmio_regions(d, gfn, nr_mfns, mfn);
ret = iomem_deny_access(d, mfn, mfn_end);
if ( !ret )
ret = rc;
return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
}
+int map_mmio_regions(struct domain *d,
+ unsigned long start_gfn,
+ unsigned long nr,
+ unsigned long mfn)
+{
+ int ret = 0;
+ unsigned long i;
+
+ if ( !paging_mode_translate(d) )
+ return 0;
+
+ for ( i = 0; !ret && i < nr; i++ )
+ {
+ ret = set_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i));
+ if ( ret )
+ {
+ unmap_mmio_regions(d, start_gfn, i, mfn);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int unmap_mmio_regions(struct domain *d,
+ unsigned long start_gfn,
+ unsigned long nr,
+ unsigned long mfn)
+{
+ int err = 0;
+ unsigned long i;
+
+ if ( !paging_mode_translate(d) )
+ return 0;
+
+ for ( i = 0; i < nr; i++ )
+ {
+ int ret = clear_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i));
+ if ( ret )
+ err = ret;
+ }
+
+ return err;
+}
+
/*** Audit ***/
#if P2M_AUDIT
unsigned long start_gfn,
unsigned long nr_mfns,
unsigned long mfn);
+int unmap_mmio_regions(struct domain *d,
+ unsigned long start_gfn,
+ unsigned long nr_mfns,
+ unsigned long mfn);
int guest_physmap_add_entry(struct domain *d,
unsigned long gfn,
#include <asm/mem_sharing.h>
#include <asm/page.h> /* for pagetable_t */
+/* Map MMIO regions in the p2m: start_gfn and nr describe the range in
+ * the guest physical address space to map, starting from the machine
+ * frame number mfn. */
+int map_mmio_regions(struct domain *d,
+ unsigned long start_gfn,
+ unsigned long nr,
+ unsigned long mfn);
+int unmap_mmio_regions(struct domain *d,
+ unsigned long start_gfn,
+ unsigned long nr,
+ unsigned long mfn);
+
extern bool_t opt_hap_1gb, opt_hap_2mb;
/*