tmem: partial adjustments for x86 16Tb support
authorJan Beulich <jbeulich@suse.com>
Wed, 23 Jan 2013 13:14:34 +0000 (14:14 +0100)
committerJan Beulich <jbeulich@suse.com>
Wed, 23 Jan 2013 13:14:34 +0000 (14:14 +0100)
Despite the changes below, tmem still has code assuming to be able to
directly access all memory, or mapping arbitrary amounts of not
directly accessible memory. I cannot see how to fix this without
converting _all_ its domheap allocations to xenheap ones. And even then
I wouldn't be certain about there not being other cases where the "all
memory is always mapped" assumption would be broken. Therefore, tmem
gets disabled by the next patch for the time being if the full 1:1
mapping isn't always visible.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Acked-by: Keir Fraser <keir@xen.org>
xen/common/tmem_xen.c

index 25fbd6c39b7394d2a9b09f549b4d4149e9790b96..3a1f3c912b214b5d9d206137d0be5194d2905e40 100644 (file)
@@ -393,7 +393,8 @@ static void tmh_persistent_pool_page_put(void *page_va)
     struct page_info *pi;
 
     ASSERT(IS_PAGE_ALIGNED(page_va));
-    pi = virt_to_page(page_va);
+    pi = mfn_to_page(domain_page_map_to_mfn(page_va));
+    unmap_domain_page(page_va);
     ASSERT(IS_VALID_PAGE(pi));
     _tmh_free_page_thispool(pi);
 }
@@ -441,39 +442,28 @@ static int cpu_callback(
     {
     case CPU_UP_PREPARE: {
         if ( per_cpu(dstmem, cpu) == NULL )
-        {
-            struct page_info *p = alloc_domheap_pages(0, dstmem_order, 0);
-            per_cpu(dstmem, cpu) = p ? page_to_virt(p) : NULL;
-        }
+            per_cpu(dstmem, cpu) = alloc_xenheap_pages(dstmem_order, 0);
         if ( per_cpu(workmem, cpu) == NULL )
-        {
-            struct page_info *p = alloc_domheap_pages(0, workmem_order, 0);
-            per_cpu(workmem, cpu) = p ? page_to_virt(p) : NULL;
-        }
+            per_cpu(workmem, cpu) = alloc_xenheap_pages(workmem_order, 0);
         if ( per_cpu(scratch_page, cpu) == NULL )
-        {
-            struct page_info *p = alloc_domheap_page(NULL, 0);
-            per_cpu(scratch_page, cpu) = p ? page_to_virt(p) : NULL;
-        }
+            per_cpu(scratch_page, cpu) = alloc_xenheap_page();
         break;
     }
     case CPU_DEAD:
     case CPU_UP_CANCELED: {
         if ( per_cpu(dstmem, cpu) != NULL )
         {
-            struct page_info *p = virt_to_page(per_cpu(dstmem, cpu));
-            free_domheap_pages(p, dstmem_order);
+            free_xenheap_pages(per_cpu(dstmem, cpu), dstmem_order);
             per_cpu(dstmem, cpu) = NULL;
         }
         if ( per_cpu(workmem, cpu) != NULL )
         {
-            struct page_info *p = virt_to_page(per_cpu(workmem, cpu));
-            free_domheap_pages(p, workmem_order);
+            free_xenheap_pages(per_cpu(workmem, cpu), workmem_order);
             per_cpu(workmem, cpu) = NULL;
         }
         if ( per_cpu(scratch_page, cpu) != NULL )
         {
-            free_domheap_page(virt_to_page(per_cpu(scratch_page, cpu)));
+            free_xenheap_page(per_cpu(scratch_page, cpu));
             per_cpu(scratch_page, cpu) = NULL;
         }
         break;