PAGE_LIST_HEAD(page_scrub_list);
static unsigned long scrub_pages;
-/* Offlined page list, protected by heap_lock */
+/* Offlined page list, protected by heap_lock. */
PAGE_LIST_HEAD(page_offlined_list);
-
-/* Broken page list, protected by heap_lock */
+/* Broken page list, protected by heap_lock. */
PAGE_LIST_HEAD(page_broken_list);
+
/*********************
* ALLOCATION BITMAP
* One bit per page of memory. Bit set => page is allocated.
return pg;
}
-/*
- * Remove any offlined page in the buddy poined by head
- */
+/* Remove any offlined page in the buddy pointed to by head. */
static int reserve_offlined_page(struct page_info *head)
{
unsigned int node = phys_to_nid(page_to_maddr(head));
struct page_info *pg;
int next_order;
- if (test_bit(_PGC_offlined, &cur_head->count_info))
+ if ( test_bit(_PGC_offlined, &cur_head->count_info) )
{
cur_head++;
continue;
next_order = cur_order = 0;
- while (cur_order < head_order)
+ while ( cur_order < head_order )
{
next_order = cur_order + 1;
- if ( (cur_head + (1 << next_order)) >= (head + ( 1 << head_order)))
+ if ( (cur_head + (1 << next_order)) >= (head + ( 1 << head_order)) )
goto merge;
- for (i = (1 << cur_order), pg = cur_head + (1 << cur_order);
- i < (1 << next_order);
- i++, pg ++)
- if (test_bit(_PGC_offlined, &pg->count_info))
+ for ( i = (1 << cur_order), pg = cur_head + (1 << cur_order );
+ i < (1 << next_order);
+ i++, pg++ )
+ if ( test_bit(_PGC_offlined, &pg->count_info) )
break;
- if (i == ( 1 << next_order))
+ if ( i == ( 1 << next_order) )
{
cur_order = next_order;
continue;
}
else
{
- /*
- * We don't need considering merge outside the head_order
- */
-merge:
+ merge:
+ /* We don't consider merging outside the head_order. */
page_list_add_tail(cur_head, &heap(node, zone, cur_order));
PFN_ORDER(cur_head) = cur_order;
cur_head += (1 << cur_order);
}
}
- for (cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++)
+ for ( cur_head = head; cur_head < head + ( 1UL << head_order); cur_head++ )
{
- if (!test_bit(_PGC_offlined, &cur_head->count_info))
+ if ( !test_bit(_PGC_offlined, &cur_head->count_info) )
continue;
- avail[node][zone] --;
+ avail[node][zone]--;
map_alloc(page_to_mfn(cur_head), 1);
- if (test_bit(_PGC_broken, &cur_head->count_info))
- page_list_add_tail(cur_head, &page_broken_list);
- else
- page_list_add_tail(cur_head, &page_offlined_list);
+ page_list_add_tail(cur_head,
+ test_bit(_PGC_broken, &cur_head->count_info) ?
+ &page_broken_list : &page_offlined_list);
- count ++;
+ count++;
}
return count;
*/
ASSERT(!(pg[i].count_info & PGC_offlined));
pg[i].count_info &= PGC_offlining | PGC_broken;
- if (pg[i].count_info & PGC_offlining)
+ if ( pg[i].count_info & PGC_offlining )
{
pg[i].count_info &= ~PGC_offlining;
pg[i].count_info |= PGC_offlined;
PFN_ORDER(pg) = order;
page_list_add_tail(pg, &heap(node, zone, order));
- if (tainted)
+ if ( tainted )
reserve_offlined_page(pg);
spin_unlock(&heap_lock);
unsigned long nx, x, y = pg->count_info;
ASSERT(page_is_ram_type(page_to_mfn(pg), RAM_TYPE_CONVENTIONAL));
- /*
- * Caller gurantee the page will not be reassigned during this process
- */
ASSERT(spin_is_locked(&heap_lock));
do {
if ( ((x & PGC_offlined_broken) == PGC_offlined_broken) )
return y;
- /* PGC_offlined means it is free pages */
- if (x & PGC_offlined)
+
+ if ( x & PGC_offlined )
{
- if (broken && !(nx & PGC_broken))
+ /* PGC_offlined means it is a free page. */
+ if ( broken && !(nx & PGC_broken) )
nx |= PGC_broken;
else
return y;
}
- /* It is not offlined, not reserved page */
- else if ( allocated_in_map(page_to_mfn(pg)) )
- nx |= PGC_offlining;
else
- nx |= PGC_offlined;
+ {
+ /* It is not offlined, not reserved page */
+ nx |= (allocated_in_map(page_to_mfn(pg)) ?
+ PGC_offlining : PGC_offlined);
+ }
- if (broken)
+ if ( broken )
nx |= PGC_broken;
} while ( (y = cmpxchg(&pg->count_info, x, nx)) != x );
unsigned int i, node = phys_to_nid(page_to_maddr(pg));
unsigned int zone = page_to_zone(pg);
- /* get the header */
for ( i = 0; i <= MAX_ORDER; i++ )
{
struct page_info *tmp;
if ( page_list_empty(&heap(node, zone, i)) )
continue;
- page_list_for_each_safe(head, tmp, &heap(node, zone, i))
+ page_list_for_each_safe ( head, tmp, &heap(node, zone, i) )
{
if ( (head <= pg) &&
(head + (1UL << i) > pg) )
}
-/*
- * offline one page
- */
int offline_page(unsigned long mfn, int broken, uint32_t *status)
{
unsigned long old_info = 0;
int ret = 0;
struct page_info *pg;
- if (mfn > max_page)
+ if ( mfn > max_page )
{
dprintk(XENLOG_WARNING,
"try to offline page out of range %lx\n", mfn);
*status = 0;
pg = mfn_to_page(mfn);
-
#if defined(__x86_64__)
/* Xen's txt mfn in x86_64 is reserved in e820 */
if ( is_xen_fixed_mfn(mfn) )
* N.B. xen's txt in x86_64 is marked reserved and handled already
* Also kexec range is reserved
*/
- if (!page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL))
+ if ( !page_is_ram_type(mfn, RAM_TYPE_CONVENTIONAL) )
{
*status = PG_OFFLINE_FAILED | PG_OFFLINE_NOT_CONV_RAM;
return -EINVAL;
reserve_heap_page(pg);
*status = PG_OFFLINE_OFFLINED;
}
- else if (test_bit(_PGC_offlined, &pg->count_info))
+ else if ( test_bit(_PGC_offlined, &pg->count_info) )
{
*status = PG_OFFLINE_OFFLINED;
}
- else if ((owner = page_get_owner_and_reference(pg)))
+ else if ( (owner = page_get_owner_and_reference(pg)) )
{
*status = PG_OFFLINE_OWNED | PG_OFFLINE_PENDING |
(owner->domain_id << PG_OFFLINE_OWNER_SHIFT);
(DOMID_INVALID << PG_OFFLINE_OWNER_SHIFT );
}
- if (broken)
+ if ( broken )
*status |= PG_OFFLINE_BROKEN;
spin_unlock(&heap_lock);
ret = -EINVAL;
*status = PG_ONLINE_FAILED |PG_ONLINE_BROKEN;
}
- else if (pg->count_info & PGC_offlined)
+ else if ( pg->count_info & PGC_offlined )
{
clear_bit(_PGC_offlined, &pg->count_info);
page_list_del(pg, &page_offlined_list);
*status = PG_ONLINE_ONLINED;
free = 1;
}
- else if (pg->count_info & PGC_offlining)
+ else if ( pg->count_info & PGC_offlining )
{
clear_bit(_PGC_offlining, &pg->count_info);
*status = PG_ONLINE_ONLINED;
}
spin_unlock(&heap_lock);
- if (free)
+ if ( free )
free_heap_pages(pg, 0);
return ret;
/* 3-bit PAT/PCD/PWT cache-attribute hint. */
#define PGC_cacheattr_base PG_shift(6)
#define PGC_cacheattr_mask PG_mask(7, 6)
-
- /* Page is broken? */
- #define _PGC_broken PG_shift(7)
- #define PGC_broken PG_mask(1, 7)
- /* Page is offline pending ? */
- #define _PGC_offlining PG_shift(8)
- #define PGC_offlining PG_mask(1, 8)
- /* Page is offlined */
- #define _PGC_offlined PG_shift(9)
- #define PGC_offlined PG_mask(1, 9)
- #define PGC_offlined_broken (PGC_offlined | PGC_broken)
-
- #define is_page_offlining(page) ((page)->count_info & PGC_offlining)
- #define is_page_offlined(page) ((page)->count_info & PGC_offlined)
- #define is_page_broken(page) ((page)->count_info & PGC_broken)
- #define is_page_online(page) (!is_page_offlined(page))
+ /* Page is broken? */
+#define _PGC_broken PG_shift(7)
+#define PGC_broken PG_mask(1, 7)
+ /* Page is offline pending ? */
+#define _PGC_offlining PG_shift(8)
+#define PGC_offlining PG_mask(1, 8)
+ /* Page is offlined */
+#define _PGC_offlined PG_shift(9)
+#define PGC_offlined PG_mask(1, 9)
+#define PGC_offlined_broken (PGC_offlined | PGC_broken)
/* Count of references to this frame. */
#define PGC_count_width PG_shift(9)
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
+#define is_page_offlining(page) ((page)->count_info & PGC_offlining)
+#define is_page_offlined(page) ((page)->count_info & PGC_offlined)
+#define is_page_broken(page) ((page)->count_info & PGC_broken)
+#define is_page_online(page) (!is_page_offlined(page))
+
#if defined(__i386__)
#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
#define is_xen_heap_mfn(mfn) ({ \