No functional changes.
Signed-off-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Stefano Stabellini <sstabellini@kernel.org>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
gfn_t gpfn)
{
struct page_info *page = NULL;
- unsigned long gfn = 0; /* gcc ... */
- unsigned long prev_mfn, old_gpfn;
+ unsigned long gfn = 0 /* gcc ... */, old_gpfn;
+ mfn_t prev_mfn;
int rc = 0;
mfn_t mfn = INVALID_MFN;
p2m_type_t p2mt;
}
/* Remove previously mapped page if it was present. */
- prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
- if ( mfn_valid(_mfn(prev_mfn)) )
+ prev_mfn = get_gfn(d, gfn_x(gpfn), &p2mt);
+ if ( mfn_valid(prev_mfn) )
{
if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot. */
- rc = guest_physmap_remove_page(d, gpfn, _mfn(prev_mfn), PAGE_ORDER_4K);
+ rc = guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
else
/* Normal domain memory is freed, to avoid leaking memory. */
rc = guest_remove_page(d, gfn_x(gpfn));
prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
if ( mfn_valid(prev_mfn) )
{
- if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
+ if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot */
rc = guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
else
* caching attributes in the shadows to match what was asked for.
*/
if ( (level == 1) && is_hvm_domain(d) &&
- !is_xen_heap_mfn(mfn_x(target_mfn)) )
+ !is_xen_heap_mfn(target_mfn) )
{
int type;
* Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
* prevent merging of power-of-two blocks across the zone boundary.
*/
- if ( ps && !is_xen_heap_mfn(paddr_to_pfn(ps)-1) )
+ if ( ps && !is_xen_heap_mfn(mfn_add(maddr_to_mfn(ps), -1)) )
ps += PAGE_SIZE;
- if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) )
+ if ( !is_xen_heap_mfn(maddr_to_mfn(pe)) )
pe -= PAGE_SIZE;
memguard_guard_range(maddr_to_virt(ps), pe - ps);
#endif
#ifdef CONFIG_ARM_32
-#define is_xen_heap_page(page) is_xen_heap_mfn(mfn_x(page_to_mfn(page)))
+#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
#define is_xen_heap_mfn(mfn) ({ \
- unsigned long mfn_ = (mfn); \
+ unsigned long mfn_ = mfn_x(mfn); \
(mfn_ >= mfn_x(xenheap_mfn_start) && \
mfn_ < mfn_x(xenheap_mfn_end)); \
})
#else
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
- (mfn_valid(_mfn(mfn)) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
+ (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
#endif
#define is_xen_fixed_mfn(mfn) \
#ifdef CONFIG_ARM_32
static inline void *maddr_to_virt(paddr_t ma)
{
- ASSERT(is_xen_heap_mfn(ma >> PAGE_SHIFT));
+ ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
ma -= mfn_to_maddr(xenheap_mfn_start);
return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
}
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
#define is_xen_heap_mfn(mfn) \
- (__mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(_mfn(mfn))))
+ (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn)))
#define is_xen_fixed_mfn(mfn) \
((((mfn) << PAGE_SHIFT) >= __pa(&_stext)) && \
(((mfn) << PAGE_SHIFT) <= __pa(&__2M_rwdata_end)))