x86/mm/p2m: use defines for page sizes
authorChristoph Egger <Christoph.Egger@amd.com>
Fri, 26 Aug 2011 12:00:52 +0000 (13:00 +0100)
committerChristoph Egger <Christoph.Egger@amd.com>
Fri, 26 Aug 2011 12:00:52 +0000 (13:00 +0100)
Use defines for page sizes instead of hardcoding the value.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Acked-by: Tim Deegan <tim@xen.org>
Committed-by: Tim Deegan <tim@xen.org>
xen/arch/x86/mm/p2m-pod.c
xen/arch/x86/mm/p2m-pt.c
xen/arch/x86/mm/p2m.c
xen/include/asm-x86/page.h

index e83dfffc9c3a5ee40feede4556e0ad5a3023a5c5..1207f33873b2eb603ad9c614145a23075d0f9488 100644 (file)
@@ -112,11 +112,11 @@ p2m_pod_cache_add(struct p2m_domain *p2m,
     /* Then add the first one to the appropriate populate-on-demand list */
     switch(order)
     {
-    case 9:
+    case PAGE_ORDER_2M:
         page_list_add_tail(page, &p2m->pod.super); /* lock: page_alloc */
         p2m->pod.count += 1 << order;
         break;
-    case 0:
+    case PAGE_ORDER_4K:
         page_list_add_tail(page, &p2m->pod.single); /* lock: page_alloc */
         p2m->pod.count += 1;
         break;
@@ -143,11 +143,11 @@ static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m,
     struct page_info *p = NULL;
     int i;
 
-    if ( order == 9 && page_list_empty(&p2m->pod.super) )
+    if ( order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) )
     {
         return NULL;
     }
-    else if ( order == 0 && page_list_empty(&p2m->pod.single) )
+    else if ( order == PAGE_ORDER_4K && page_list_empty(&p2m->pod.single) )
     {
         unsigned long mfn;
         struct page_info *q;
@@ -168,12 +168,12 @@ static struct page_info * p2m_pod_cache_get(struct p2m_domain *p2m,
 
     switch ( order )
     {
-    case 9:
+    case PAGE_ORDER_2M:
         BUG_ON( page_list_empty(&p2m->pod.super) );
         p = page_list_remove_head(&p2m->pod.super);
         p2m->pod.count -= 1 << order; /* Lock: page_alloc */
         break;
-    case 0:
+    case PAGE_ORDER_4K:
         BUG_ON( page_list_empty(&p2m->pod.single) );
         p = page_list_remove_head(&p2m->pod.single);
         p2m->pod.count -= 1;
@@ -206,17 +206,17 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int p
         int order;
 
         if ( (pod_target - p2m->pod.count) >= SUPERPAGE_PAGES )
-            order = 9;
+            order = PAGE_ORDER_2M;
         else
-            order = 0;
+            order = PAGE_ORDER_4K;
     retry:
-        page = alloc_domheap_pages(d, order, 0);
+        page = alloc_domheap_pages(d, order, PAGE_ORDER_4K);
         if ( unlikely(page == NULL) )
         {
-            if ( order == 9 )
+            if ( order == PAGE_ORDER_2M )
             {
                 /* If we can't allocate a superpage, try singleton pages */
-                order = 0;
+                order = PAGE_ORDER_4K;
                 goto retry;
             }   
             
@@ -249,9 +249,9 @@ p2m_pod_set_cache_target(struct p2m_domain *p2m, unsigned long pod_target, int p
 
         if ( (p2m->pod.count - pod_target) > SUPERPAGE_PAGES
              && !page_list_empty(&p2m->pod.super) )
-            order = 9;
+            order = PAGE_ORDER_2M;
         else
-            order = 0;
+            order = PAGE_ORDER_4K;
 
         page = p2m_pod_cache_get(p2m, order);
 
@@ -468,12 +468,12 @@ p2m_pod_offline_or_broken_replace(struct page_info *p)
 
     free_domheap_page(p);
 
-    p = alloc_domheap_page(d, 0);
+    p = alloc_domheap_page(d, PAGE_ORDER_4K);
     if ( unlikely(!p) )
         return;
 
     p2m_lock(p2m);
-    p2m_pod_cache_add(p2m, p, 0);
+    p2m_pod_cache_add(p2m, p, PAGE_ORDER_4K);
     p2m_unlock(p2m);
     return;
 }
@@ -688,7 +688,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
     }
 
     /* Try to remove the page, restoring old mapping if it fails. */
-    set_p2m_entry(p2m, gfn, _mfn(0), 9,
+    set_p2m_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
                   p2m_populate_on_demand, p2m->default_access);
 
     /* Make none of the MFNs are used elsewhere... for example, mapped
@@ -739,7 +739,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
 
     /* Finally!  We've passed all the checks, and can add the mfn superpage
      * back on the PoD cache, and account for the new p2m PoD entries */
-    p2m_pod_cache_add(p2m, mfn_to_page(mfn0), 9);
+    p2m_pod_cache_add(p2m, mfn_to_page(mfn0), PAGE_ORDER_2M);
     p2m->pod.entry_count += SUPERPAGE_PAGES;
 
 out_reset:
@@ -800,7 +800,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
         }
 
         /* Try to remove the page, restoring old mapping if it fails. */
-        set_p2m_entry(p2m, gfns[i], _mfn(0), 0,
+        set_p2m_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
 
         /* See if the page was successfully unmapped.  (Allow one refcount
@@ -810,7 +810,8 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
             unmap_domain_page(map[i]);
             map[i] = NULL;
 
-            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
+            set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+                types[i], p2m->default_access);
 
             continue;
         }
@@ -832,7 +833,8 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
          * check timing.  */
         if ( j < PAGE_SIZE/sizeof(*map[i]) )
         {
-            set_p2m_entry(p2m, gfns[i], mfns[i], 0, types[i], p2m->default_access);
+            set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+                types[i], p2m->default_access);
         }
         else
         {
@@ -852,7 +854,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
             }
 
             /* Add to cache, and account for the new p2m PoD entry */
-            p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), 0);
+            p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), PAGE_ORDER_4K);
             p2m->pod.entry_count++;
         }
     }
@@ -867,7 +869,7 @@ p2m_pod_emergency_sweep_super(struct p2m_domain *p2m)
 
     if ( p2m->pod.reclaim_super == 0 )
     {
-        p2m->pod.reclaim_super = (p2m->pod.max_guest>>9)<<9;
+        p2m->pod.reclaim_super = (p2m->pod.max_guest>>PAGE_ORDER_2M)<<PAGE_ORDER_2M;
         p2m->pod.reclaim_super -= SUPERPAGE_PAGES;
     }
     
@@ -956,7 +958,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
 
     /* Because PoD does not have cache list for 1GB pages, it has to remap
      * 1GB region to 2MB chunks for a retry. */
-    if ( order == 18 )
+    if ( order == PAGE_ORDER_1G )
     {
         gfn_aligned = (gfn >> order) << order;
         /* Note that we are supposed to call set_p2m_entry() 512 times to 
@@ -964,7 +966,7 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
          * set_p2m_entry() should automatically shatter the 1GB page into 
          * 512 2MB pages. The rest of 511 calls are unnecessary.
          */
-        set_p2m_entry(p2m, gfn_aligned, _mfn(0), 9,
+        set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
                       p2m_populate_on_demand, p2m->default_access);
         audit_p2m(p2m, 1);
         p2m_unlock(p2m);
@@ -979,12 +981,12 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
     {
 
         /* If we're low, start a sweep */
-        if ( order == 9 && page_list_empty(&p2m->pod.super) )
+        if ( order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) )
             p2m_pod_emergency_sweep_super(p2m);
 
         if ( page_list_empty(&p2m->pod.single) &&
-             ( ( order == 0 )
-               || (order == 9 && page_list_empty(&p2m->pod.super) ) ) )
+             ( ( order == PAGE_ORDER_4K )
+               || (order == PAGE_ORDER_2M && page_list_empty(&p2m->pod.super) ) ) )
             p2m_pod_emergency_sweep(p2m);
     }
 
@@ -1046,13 +1048,13 @@ out_of_memory:
 out_fail:
     return -1;
 remap_and_retry:
-    BUG_ON(order != 9);
+    BUG_ON(order != PAGE_ORDER_2M);
     spin_unlock(&d->page_alloc_lock);
 
     /* Remap this 2-meg region in singleton chunks */
     gfn_aligned = (gfn>>order)<<order;
     for(i=0; i<(1<<order); i++)
-        set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), 0,
+        set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
     if ( tb_init_done )
     {
index 1b4a5cb6cf09d4280c2676d720ae2590c4ef93b8..65fa3c97167f7bb14af87d86b4e5874a1e1c6f37 100644 (file)
@@ -121,12 +121,12 @@ static void
 p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
 {
     /* End if the entry is a leaf entry. */
-    if ( page_order == 0
+    if ( page_order == PAGE_ORDER_4K 
          || !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT)
          || (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
         return;
 
-    if ( page_order > 9 )
+    if ( page_order > PAGE_ORDER_2M )
     {
         l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
         for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
@@ -323,7 +323,7 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     /*
      * Try to allocate 1GB page table if this feature is supported.
      */
-    if ( page_order == 18 )
+    if ( page_order == PAGE_ORDER_1G )
     {
         l1_pgentry_t old_entry = l1e_empty();
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -373,7 +373,7 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
                               PGT_l2_page_table) )
         goto out;
 
-    if ( page_order == 0 )
+    if ( page_order == PAGE_ORDER_4K )
     {
         if ( !p2m_next_level(p2m, &table_mfn, &table, &gfn_remainder, gfn,
                              L2_PAGETABLE_SHIFT - PAGE_SHIFT,
@@ -399,7 +399,7 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, table_mfn, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
     }
-    else if ( page_order == 9 )
+    else if ( page_order == PAGE_ORDER_2M )
     {
         l1_pgentry_t old_entry = l1e_empty();
         p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
@@ -541,7 +541,7 @@ pod_retry_l3:
             /* The read has succeeded, so we know that mapping exists */
             if ( q != p2m_query )
             {
-                if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
+                if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
                     goto pod_retry_l3;
                 p2mt = p2m_invalid;
                 printk("%s: Allocate 1GB failed!\n", __func__);
@@ -735,7 +735,7 @@ pod_retry_l3:
             {
                 if ( q != p2m_query )
                 {
-                    if ( !p2m_pod_demand_populate(p2m, gfn, 18, q) )
+                    if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) )
                         goto pod_retry_l3;
                 }
                 else
@@ -771,7 +771,7 @@ pod_retry_l2:
         {
             if ( q != p2m_query ) {
                 if ( !p2m_pod_check_and_populate(p2m, gfn,
-                                                 (l1_pgentry_t *)l2e, 9, q) )
+                                                 (l1_pgentry_t *)l2e, PAGE_ORDER_2M, q) )
                     goto pod_retry_l2;
             } else
                 *t = p2m_populate_on_demand;
@@ -803,7 +803,7 @@ pod_retry_l1:
         {
             if ( q != p2m_query ) {
                 if ( !p2m_pod_check_and_populate(p2m, gfn,
-                                                 (l1_pgentry_t *)l1e, 0, q) )
+                                                 (l1_pgentry_t *)l1e, PAGE_ORDER_4K, q) )
                     goto pod_retry_l1;
             } else
                 *t = p2m_populate_on_demand;
index 3ae5a49752ce5c222de3f6ba9acb0c2288225cf6..324d0f96cd613b53d8eed060a550f3bcb74ccf27 100644 (file)
@@ -149,10 +149,10 @@ int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
     while ( todo )
     {
         if ( hap_enabled(d) )
-            order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << 18) - 1)) == 0) &&
-                      hvm_hap_has_1gb(d) && opt_hap_1gb ) ? 18 :
-                      ((((gfn | mfn_x(mfn) | todo) & ((1ul << 9) - 1)) == 0) &&
-                      hvm_hap_has_2mb(d) && opt_hap_2mb) ? 9 : 0;
+            order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_1G) - 1)) == 0) &&
+                      hvm_hap_has_1gb(d) && opt_hap_1gb ) ? PAGE_ORDER_1G :
+                      ((((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_2M) - 1)) == 0) &&
+                      hvm_hap_has_2mb(d) && opt_hap_2mb) ? PAGE_ORDER_2M : PAGE_ORDER_4K;
         else
             order = 0;
 
index 332770598350580cb771966a48b310bda99237ef..59b42cc348bc19d065cb1ff770b3d7d004a075fe 100644 (file)
 #define PAGE_MASK           (~(PAGE_SIZE-1))
 #define PAGE_FLAG_MASK      (~0)
 
+#define PAGE_ORDER_4K       0
+#define PAGE_ORDER_2M       9
+#define PAGE_ORDER_1G       18
+
 #ifndef __ASSEMBLY__
 # include <asm/types.h>
 # include <xen/lib.h>