#define heap(node, zone, order) ((*_heap[node])[zone][order])
static unsigned long *avail[MAX_NUMNODES];
+static long total_avail_pages;
static DEFINE_SPINLOCK(heap_lock);
ASSERT(avail[node][zone] >= request);
avail[node][zone] -= request;
+ total_avail_pages -= request;
+ ASSERT(total_avail_pages >= 0);
spin_unlock(&heap_lock);
continue;
avail[node][zone]--;
+ total_avail_pages--;
+ ASSERT(total_avail_pages >= 0);
page_list_add_tail(cur_head,
test_bit(_PGC_broken, &cur_head->count_info) ?
spin_lock(&heap_lock);
avail[node][zone] += 1 << order;
+ total_avail_pages += 1 << order;
/* Merge chunks as far as possible. */
while ( order < MAX_ORDER )
return free_pages;
}
+unsigned long total_free_pages(void)
+{
+ return total_avail_pages;
+}
+
void __init end_boot_allocator(void)
{
unsigned int i;
unsigned int online_page(unsigned long mfn, uint32_t *status);
int offline_page(unsigned long mfn, int broken, uint32_t *status);
int query_page_offline(unsigned long mfn, uint32_t *status);
+unsigned long total_free_pages(void);
void scrub_heap_pages(void);