to avoid mixing machine frame with guest frame.
Signed-off-by: Julien Grall <julien.grall@arm.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Stefano Stabellini <sstabellini@kernel.org>
Acked-by: George Dunlap <george.dunlap@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
if ( need_mapping )
{
res = map_mmio_regions(d,
- paddr_to_pfn(addr),
+ _gfn(paddr_to_pfn(addr)),
DIV_ROUND_UP(len, PAGE_SIZE),
- paddr_to_pfn(addr));
+ _mfn(paddr_to_pfn(addr)));
if ( res < 0 )
{
printk(XENLOG_ERR "Unable to map 0x%"PRIx64
d->domain_id, v2m_data->addr, v2m_data->size,
v2m_data->spi_start, v2m_data->nr_spis);
- ret = map_mmio_regions(d, paddr_to_pfn(v2m_data->addr),
+ ret = map_mmio_regions(d, _gfn(paddr_to_pfn(v2m_data->addr)),
DIV_ROUND_UP(v2m_data->size, PAGE_SIZE),
- paddr_to_pfn(v2m_data->addr));
+ _mfn(paddr_to_pfn(v2m_data->addr)));
if ( ret )
{
printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
}
int map_mmio_regions(struct domain *d,
- unsigned long start_gfn,
+ gfn_t start_gfn,
unsigned long nr,
- unsigned long mfn)
+ mfn_t mfn)
{
return apply_p2m_changes(d, INSERT,
- pfn_to_paddr(start_gfn),
- pfn_to_paddr(start_gfn + nr),
- pfn_to_paddr(mfn),
+ pfn_to_paddr(gfn_x(start_gfn)),
+ pfn_to_paddr(gfn_x(start_gfn) + nr),
+ pfn_to_paddr(mfn_x(mfn)),
MATTR_DEV, 0, p2m_mmio_direct,
d->arch.p2m.default_access);
}
int unmap_mmio_regions(struct domain *d,
- unsigned long start_gfn,
+ gfn_t start_gfn,
unsigned long nr,
- unsigned long mfn)
+ mfn_t mfn)
{
return apply_p2m_changes(d, REMOVE,
- pfn_to_paddr(start_gfn),
- pfn_to_paddr(start_gfn + nr),
- pfn_to_paddr(mfn),
+ pfn_to_paddr(gfn_x(start_gfn)),
+ pfn_to_paddr(gfn_x(start_gfn) + nr),
+ pfn_to_paddr(mfn_x(mfn)),
MATTR_DEV, 0, p2m_invalid,
d->arch.p2m.default_access);
}
if ( !(nr && iomem_access_permitted(d, mfn, mfn + nr - 1)) )
return 0;
- res = map_mmio_regions(d, start_gfn, nr, mfn);
+ res = map_mmio_regions(d, _gfn(start_gfn), nr, _mfn(mfn));
if ( res < 0 )
{
printk(XENLOG_G_ERR "Unable to map [%#lx - %#lx] in Dom%d\n",
static int exynos5250_specific_mapping(struct domain *d)
{
/* Map the chip ID */
- map_mmio_regions(d, paddr_to_pfn(EXYNOS5_PA_CHIPID), 1,
- paddr_to_pfn(EXYNOS5_PA_CHIPID));
+ map_mmio_regions(d, _gfn(paddr_to_pfn(EXYNOS5_PA_CHIPID)), 1,
+ _mfn(paddr_to_pfn(EXYNOS5_PA_CHIPID)));
/* Map the PWM region */
- map_mmio_regions(d, paddr_to_pfn(EXYNOS5_PA_TIMER), 2,
- paddr_to_pfn(EXYNOS5_PA_TIMER));
+ map_mmio_regions(d, _gfn(paddr_to_pfn(EXYNOS5_PA_TIMER)), 2,
+ _mfn(paddr_to_pfn(EXYNOS5_PA_TIMER)));
return 0;
}
static int omap5_specific_mapping(struct domain *d)
{
/* Map the PRM module */
- map_mmio_regions(d, paddr_to_pfn(OMAP5_PRM_BASE), 2,
- paddr_to_pfn(OMAP5_PRM_BASE));
+ map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_PRM_BASE)), 2,
+ _mfn(paddr_to_pfn(OMAP5_PRM_BASE)));
/* Map the PRM_MPU */
- map_mmio_regions(d, paddr_to_pfn(OMAP5_PRCM_MPU_BASE), 1,
- paddr_to_pfn(OMAP5_PRCM_MPU_BASE));
+ map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_PRCM_MPU_BASE)), 1,
+ _mfn(paddr_to_pfn(OMAP5_PRCM_MPU_BASE)));
/* Map the Wakeup Gen */
- map_mmio_regions(d, paddr_to_pfn(OMAP5_WKUPGEN_BASE), 1,
- paddr_to_pfn(OMAP5_WKUPGEN_BASE));
+ map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_WKUPGEN_BASE)), 1,
+ _mfn(paddr_to_pfn(OMAP5_WKUPGEN_BASE)));
/* Map the on-chip SRAM */
- map_mmio_regions(d, paddr_to_pfn(OMAP5_SRAM_PA), 32,
- paddr_to_pfn(OMAP5_SRAM_PA));
+ map_mmio_regions(d, _gfn(paddr_to_pfn(OMAP5_SRAM_PA)), 32,
+ _mfn(paddr_to_pfn(OMAP5_SRAM_PA)));
return 0;
}
* Map the gic virtual cpu interface in the gic cpu interface
* region of the guest.
*/
- ret = map_mmio_regions(d, paddr_to_pfn(cbase), csize / PAGE_SIZE,
- paddr_to_pfn(vbase));
+ ret = map_mmio_regions(d, _gfn(paddr_to_pfn(cbase)), csize / PAGE_SIZE,
+ _mfn(paddr_to_pfn(vbase)));
if ( ret )
return ret;
#define MAP_MMIO_MAX_ITER 64 /* pretty arbitrary */
int map_mmio_regions(struct domain *d,
- unsigned long start_gfn,
+ gfn_t start_gfn,
unsigned long nr,
- unsigned long mfn)
+ mfn_t mfn)
{
int ret = 0;
unsigned long i;
i += 1UL << order, ++iter )
{
/* OR'ing gfn and mfn values will return an order suitable to both. */
- for ( order = mmio_order(d, (start_gfn + i) | (mfn + i), nr - i); ;
+ for ( order = mmio_order(d, (gfn_x(start_gfn) + i) | (mfn_x(mfn) + i), nr - i); ;
order = ret - 1 )
{
- ret = set_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i), order,
+ ret = set_mmio_p2m_entry(d, gfn_x(start_gfn) + i,
+ mfn_add(mfn, i), order,
p2m_get_hostp2m(d)->default_access);
if ( ret <= 0 )
break;
}
int unmap_mmio_regions(struct domain *d,
- unsigned long start_gfn,
+ gfn_t start_gfn,
unsigned long nr,
- unsigned long mfn)
+ mfn_t mfn)
{
int ret = 0;
unsigned long i;
i += 1UL << order, ++iter )
{
/* OR'ing gfn and mfn values will return an order suitable to both. */
- for ( order = mmio_order(d, (start_gfn + i) | (mfn + i), nr - i); ;
+ for ( order = mmio_order(d, (gfn_x(start_gfn) + i) | (mfn_x(mfn) + i), nr - i); ;
order = ret - 1 )
{
- ret = clear_mmio_p2m_entry(d, start_gfn + i, _mfn(mfn + i), order);
+ ret = clear_mmio_p2m_entry(d, gfn_x(start_gfn) + i,
+ mfn_add(mfn, i), order);
if ( ret <= 0 )
break;
ASSERT(ret <= order);
"memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- ret = map_mmio_regions(d, gfn, nr_mfns, mfn);
+ ret = map_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
if ( ret < 0 )
printk(XENLOG_G_WARNING
"memory_map:fail: dom%d gfn=%lx mfn=%lx nr=%lx ret:%ld\n",
"memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
d->domain_id, gfn, mfn, nr_mfns);
- ret = unmap_mmio_regions(d, gfn, nr_mfns, mfn);
+ ret = unmap_mmio_regions(d, _gfn(gfn), nr_mfns, _mfn(mfn));
if ( ret < 0 && is_hardware_domain(current->domain) )
printk(XENLOG_ERR
"memory_map: error %ld removing dom%d access to [%lx,%lx]\n",
* * the guest physical address space to map, starting from the machine
* * frame number mfn. */
int map_mmio_regions(struct domain *d,
- unsigned long start_gfn,
+ gfn_t start_gfn,
unsigned long nr,
- unsigned long mfn);
+ mfn_t mfn);
int unmap_mmio_regions(struct domain *d,
- unsigned long start_gfn,
+ gfn_t start_gfn,
unsigned long nr,
- unsigned long mfn);
+ mfn_t mfn);
/*
* Set access type for a region of gfns.