#define scrub_page(p) clear_page(p)
#endif
-#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 0 : ((b) - PAGE_SHIFT - 1))
-
static DEFINE_SPINLOCK(page_scrub_lock);
LIST_HEAD(page_scrub_list);
static unsigned long scrub_pages;
#define MEMZONE_XEN 0
#define NR_ZONES (PADDR_BITS - PAGE_SHIFT)
-#define pfn_dom_zone_type(_pfn) (fls(_pfn) - 1)
+#define bits_to_zone(b) (((b) < (PAGE_SHIFT + 1)) ? 0 : ((b) - PAGE_SHIFT - 1))
+#define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN : \
+ (fls(page_to_mfn(pg)) - 1))
typedef struct list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
/* Free 2^@order set of pages. */
static void free_heap_pages(
- unsigned int zone, struct page_info *pg, unsigned int order)
+ struct page_info *pg, unsigned int order)
{
unsigned long mask;
unsigned int i, node = phys_to_nid(page_to_maddr(pg));
+ unsigned int zone = page_to_zone(pg);
struct domain *d;
- ASSERT(zone < NR_ZONES);
ASSERT(order <= MAX_ORDER);
ASSERT(node >= 0);
ASSERT(node < num_online_nodes());
*/
#define MAX_ORDER_ALIGNED (1UL << (MAX_ORDER))
static void init_heap_pages(
- unsigned int zone, struct page_info *pg, unsigned long nr_pages)
+ struct page_info *pg, unsigned long nr_pages)
{
unsigned int nid_curr, nid_prev;
unsigned long i;
- ASSERT(zone < NR_ZONES);
-
- if ( likely(page_to_mfn(pg) != 0) )
- nid_prev = phys_to_nid(page_to_maddr(pg-1));
- else
- nid_prev = phys_to_nid(page_to_maddr(pg));
+ nid_prev = phys_to_nid(page_to_maddr(pg-1));
for ( i = 0; i < nr_pages; i++ )
{
*/
if ( (nid_curr == nid_prev) || (page_to_maddr(pg+i) &
MAX_ORDER_ALIGNED) )
- free_heap_pages(zone, pg+i, 0);
+ free_heap_pages(pg+i, 0);
else
printk("Reserving non-aligned node boundary @ mfn %lu\n",
page_to_mfn(pg+i));
if ( next_free )
map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
if ( curr_free )
- init_heap_pages(pfn_dom_zone_type(i), mfn_to_page(i), 1);
+ init_heap_pages(mfn_to_page(i), 1);
}
if ( !dma_bitsize && (num_online_nodes() > 1) )
if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) )
pe -= PAGE_SIZE;
- init_heap_pages(MEMZONE_XEN, maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
+ init_heap_pages(maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
}
memguard_guard_range(v, 1 << (order + PAGE_SHIFT));
- free_heap_pages(MEMZONE_XEN, virt_to_page(v), order);
+ free_heap_pages(virt_to_page(v), order);
}
#else
for ( i = 0; i < (1u << order); i++ )
pg[i].count_info &= ~PGC_xen_heap;
- free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+ free_heap_pages(pg, order);
}
#endif
void init_domheap_pages(paddr_t ps, paddr_t pe)
{
- unsigned long s_tot, e_tot;
- unsigned int zone;
+ unsigned long smfn, emfn;
ASSERT(!in_irq());
- s_tot = round_pgup(ps) >> PAGE_SHIFT;
- e_tot = round_pgdown(pe) >> PAGE_SHIFT;
-
- zone = fls(s_tot) - 1;
- BUG_ON(zone <= MEMZONE_XEN);
+ smfn = round_pgup(ps) >> PAGE_SHIFT;
+ emfn = round_pgdown(pe) >> PAGE_SHIFT;
- while ( s_tot < e_tot )
- {
- unsigned long end = e_tot;
-
- BUILD_BUG_ON(NR_ZONES > BITS_PER_LONG);
- if ( zone < BITS_PER_LONG - 1 && end > 1UL << (zone + 1) )
- end = 1UL << (zone + 1);
- init_heap_pages(zone, mfn_to_page(s_tot), end - s_tot);
- s_tot = end;
- zone++;
- }
+ init_heap_pages(mfn_to_page(smfn), emfn - smfn);
}
if ( (d != NULL) && assign_pages(d, pg, order, memflags) )
{
- free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+ free_heap_pages(pg, order);
return NULL;
}
if ( likely(!d->is_dying) )
{
- free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+ free_heap_pages(pg, order);
}
else
{
else
{
/* Freeing anonymous domain-heap pages. */
- free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, order);
+ free_heap_pages(pg, order);
drop_dom_ref = 0;
}
p = map_domain_page(page_to_mfn(pg));
scrub_page(p);
unmap_domain_page(p);
- free_heap_pages(pfn_dom_zone_type(page_to_mfn(pg)), pg, 0);
+ free_heap_pages(pg, 0);
}
} while ( (NOW() - start) < MILLISECS(1) );