Replace the MMUEXTOP 'pfn hole' commands with a new
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 28 Jan 2006 14:31:43 +0000 (15:31 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 28 Jan 2006 14:31:43 +0000 (15:31 +0100)
arch-specific XENMEM_ operation.

Signed-off-by: Keir Fraser <keir@xensource.com>
13 files changed:
tools/libxc/xc_linux_build.c
tools/libxc/xc_misc.c
tools/libxc/xc_private.c
tools/libxc/xenctrl.h
xen/arch/x86/mm.c
xen/arch/x86/x86_32/mm.c
xen/arch/x86/x86_64/mm.c
xen/include/asm-x86/config.h
xen/include/asm-x86/domain.h
xen/include/asm-x86/mm.h
xen/include/public/memory.h
xen/include/public/xen.h
xen/include/xen/sched.h

index 9ad93e27bd4506a4a09af2c254f654b83b5f3372..0744acb64afa7f0fb50731d196d228c2b9bbf294 100644 (file)
@@ -114,9 +114,12 @@ static int setup_pg_tables(int xc_handle, uint32_t dom,
             vl2e++;
         }
 
-        if (shadow_mode_enabled) {
+        if ( shadow_mode_enabled )
+        {
             *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        } else {
+        }
+        else
+        {
             *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) && 
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
@@ -196,9 +199,12 @@ static int setup_pg_tables_pae(int xc_handle, uint32_t dom,
                 *vl2e++ = l1tab | L2_PROT;
         }
         
-        if (shadow_mode_enabled) {
+        if ( shadow_mode_enabled )
+        {
             *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        } else {
+        }
+        else
+        {
             *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) ) 
@@ -289,9 +295,12 @@ static int setup_pg_tables_64(int xc_handle, uint32_t dom,
             vl2e++;
         }
         
-        if (shadow_mode_enabled) {
+        if ( shadow_mode_enabled )
+        {
             *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        } else {
+        }
+        else
+        {
             *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) ) 
@@ -442,7 +451,9 @@ static int setup_guest(int xc_handle,
     {
         ctxt->initrd.start    = vinitrd_start;
         ctxt->initrd.size     = initrd_len;
-    } else {
+    }
+    else
+    {
         ctxt->initrd.start    = 0;
         ctxt->initrd.size     = 0;
     }
@@ -553,12 +564,15 @@ static int setup_guest(int xc_handle,
         if ( (v_end - vstack_end) < (512UL << 10) )
             v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
 #if defined(__i386__)
-        if (dsi.pae_kernel) {
+        if ( dsi.pae_kernel )
+        {
             /* FIXME: assumes one L2 pgtable @ 0xc0000000 */
             if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT_PAE)-1)) >> 
                    L2_PAGETABLE_SHIFT_PAE) + 2) <= nr_pt_pages )
                 break;
-        } else {
+        }
+        else
+        {
             if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >> 
                    L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
                 break;
@@ -676,23 +690,33 @@ static int setup_guest(int xc_handle,
     if ( xc_finish_mmu_updates(xc_handle, mmu) )
         goto error_out;
 
-    if (shadow_mode_enabled) {
+    if ( shadow_mode_enabled )
+    {
+        struct xen_reserved_phys_area xrpa;
+
         /* Enable shadow translate mode */
-        if (xc_shadow_control(xc_handle, dom,
-                              DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE,
-                              NULL, 0, NULL) < 0) {
+        if ( xc_shadow_control(xc_handle, dom,
+                               DOM0_SHADOW_CONTROL_OP_ENABLE_TRANSLATE,
+                               NULL, 0, NULL) < 0 )
+        {
             PERROR("Could not enable translation mode");
             goto error_out;
         }
 
         /* Find the shared info frame.  It's guaranteed to be at the
            start of the PFN hole. */
-        guest_shared_info_mfn = xc_get_pfn_hole_start(xc_handle, dom);
-        if (guest_shared_info_mfn <= 0) {
+        xrpa.domid = dom;
+        xrpa.idx   = 0;
+        rc = xc_memory_op(xc_handle, XENMEM_reserved_phys_area, &xrpa);
+        if ( rc != 0 )
+        {
             PERROR("Cannot find shared info pfn");
             goto error_out;
         }
-    } else {
+        guest_shared_info_mfn = xrpa.first_pfn;
+    }
+    else
+    {
         guest_shared_info_mfn = shared_info_frame;
     }
 
@@ -723,12 +747,16 @@ static int setup_guest(int xc_handle,
      * Pin down l2tab addr as page dir page - causes hypervisor to provide
      * correct protection for the page
      */
-    if (!shadow_mode_enabled) {
-        if (dsi.pae_kernel) {
+    if ( !shadow_mode_enabled )
+    {
+        if ( dsi.pae_kernel )
+        {
             if ( pin_table(xc_handle, MMUEXT_PIN_L3_TABLE,
                            ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
                 goto error_out;
-        } else {
+        }
+        else
+        {
             if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE,
                            ctxt->ctrlreg[3] >> PAGE_SHIFT, dom) )
                 goto error_out;
@@ -751,10 +779,13 @@ static int setup_guest(int xc_handle,
     if ( xc_clear_domain_page(xc_handle, dom, *store_mfn) ||
          xc_clear_domain_page(xc_handle, dom, *console_mfn) )
         goto error_out;
-    if (shadow_mode_enabled) {
+    if ( shadow_mode_enabled )
+    {
         guest_store_mfn = (vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT;
         guest_console_mfn = (vconsole_start-dsi.v_start) >> PAGE_SHIFT;
-    } else {
+    }
+    else
+    {
         guest_store_mfn = *store_mfn;
         guest_console_mfn = *console_mfn;
     }
index 1f5ae861a54273be82b5dbcb79ed92050b8e9de3..da8dcc946d4d587c0601d96d8774d513070b4b77 100644 (file)
@@ -131,13 +131,6 @@ int xc_msr_write(int xc_handle, int cpu_mask, int msr, unsigned int low,
     return rc;
 }
 
-int xc_get_pfn_hole_start(int xc_handle, domid_t dom)
-{
-    struct mmuext_op op = {0};
-    op.cmd = MMUEXT_PFN_HOLE_BASE;
-    return xc_mmuext_op(xc_handle, &op, 1, dom);
-}
-
 
 /*
  * Local variables:
index 75556f6847cad04feb3c68366fd78ecbea77031f..6b58e1ddf25accedcbfced9f4d22e688cd781002 100644 (file)
@@ -201,6 +201,7 @@ int xc_memory_op(int xc_handle,
     {
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
+    case XENMEM_populate_physmap:
         if ( mlock(reservation, sizeof(*reservation)) != 0 )
         {
             PERROR("Could not mlock");
@@ -229,6 +230,13 @@ int xc_memory_op(int xc_handle,
             goto out1;
         }
         break;
+    case XENMEM_reserved_phys_area:
+        if ( mlock(arg, sizeof(struct xen_reserved_phys_area)) )
+        {
+            PERROR("Could not mlock");
+            goto out1;
+        }
+        break;
     }
 
     ret = do_xen_hypercall(xc_handle, &hypercall);
@@ -237,6 +245,7 @@ int xc_memory_op(int xc_handle,
     {
     case XENMEM_increase_reservation:
     case XENMEM_decrease_reservation:
+    case XENMEM_populate_physmap:
         safe_munlock(reservation, sizeof(*reservation));
         if ( reservation->extent_start != NULL )
             safe_munlock(reservation->extent_start,
@@ -247,6 +256,9 @@ int xc_memory_op(int xc_handle,
         safe_munlock(xmml->extent_start,
                      xmml->max_extents * sizeof(unsigned long));
         break;
+    case XENMEM_reserved_phys_area:
+        safe_munlock(arg, sizeof(struct xen_reserved_phys_area));
+        break;
     }
 
  out1:
index 6585d1a6e276278a8ce77388f798eaccab147fce..91d717c55c7ea20cc8fe9303458329f5496f2ace 100644 (file)
@@ -528,6 +528,4 @@ int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu,
                    unsigned long long ptr, unsigned long long val);
 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
 
-int xc_get_pfn_hole_start(int xc_handle, domid_t dom);
-
 #endif
index 1ee802a0163d7c079c5be04e182c67e49303f069..4788c2ba1fba329af21a16337ec68cf4601a2b1a 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/ldt.h>
 #include <asm/x86_emulate.h>
+#include <public/memory.h>
 
 #ifdef VERBOSE
 #define MEM_LOG(_f, _a...)                           \
@@ -1930,56 +1931,6 @@ int do_mmuext_op(
             break;
         }
 
-        case MMUEXT_PFN_HOLE_BASE:
-        {
-            if (FOREIGNDOM->start_pfn_hole) {
-                rc = FOREIGNDOM->start_pfn_hole;
-                okay = 1;
-            } else {
-                rc = FOREIGNDOM->start_pfn_hole =
-                    FOREIGNDOM->max_pages;
-                okay = 1;
-                if (shadow_mode_translate(FOREIGNDOM)) {
-                    /* Fill in a few entries in the hole.  At the
-                       moment, this means the shared info page and the
-                       grant table pages. */
-                    struct domain_mmap_cache c1, c2;
-                    unsigned long pfn, mfn, x;
-                    domain_mmap_cache_init(&c1);
-                    domain_mmap_cache_init(&c2);
-                    shadow_lock(FOREIGNDOM);
-                    pfn = FOREIGNDOM->start_pfn_hole;
-                    mfn = virt_to_phys(FOREIGNDOM->shared_info) >> PAGE_SHIFT;
-                    set_p2m_entry(FOREIGNDOM, pfn, mfn, &c1, &c2);
-                    set_pfn_from_mfn(mfn, pfn);
-                    pfn++;
-                    for (x = 0; x < NR_GRANT_FRAMES; x++) {
-                        mfn = gnttab_shared_mfn(FOREIGNDOM,
-                                                FOREIGNDOM->grant_table,
-                                                x);
-                        set_p2m_entry(FOREIGNDOM, pfn, mfn, &c1, &c2);
-                        set_pfn_from_mfn(mfn, pfn);
-                        pfn++;
-                    }
-                    shadow_unlock(FOREIGNDOM);
-                    domain_mmap_cache_destroy(&c1);
-                    domain_mmap_cache_destroy(&c2);
-                }
-            }
-            break;
-        }
-
-        case MMUEXT_PFN_HOLE_SIZE:
-        {
-            if (shadow_mode_translate(FOREIGNDOM)) {
-                rc = PFN_HOLE_SIZE;
-            } else {
-                rc = 0;
-            }
-            okay = 1;
-            break;
-        }
-
         default:
             MEM_LOG("Invalid extended pt command 0x%x", op.cmd);
             okay = 0;
@@ -2815,6 +2766,62 @@ long do_update_descriptor(u64 pa, u64 desc)
 }
 
 
+long arch_memory_op(int op, void *arg)
+{
+    struct xen_reserved_phys_area xrpa;
+    unsigned long pfn;
+    struct domain *d;
+    unsigned int i;
+
+    switch ( op )
+    {
+    case XENMEM_reserved_phys_area:
+        if ( copy_from_user(&xrpa, arg, sizeof(xrpa)) )
+            return -EFAULT;
+
+        /* No guest has more than one reserved area. */
+        if ( xrpa.idx != 0 )
+            return -ESRCH;
+
+        if ( (d = find_domain_by_id(xrpa.domid)) == NULL )
+            return -ESRCH;
+
+        /* Only initialised translated guests have a reserved area. */
+        if ( !shadow_mode_translate(d) || (d->max_pages == 0) )
+        {
+            put_domain(d);
+            return -ESRCH;
+        }
+
+        LOCK_BIGLOCK(d);
+        if ( d->arch.first_reserved_pfn == 0 )
+        {
+            d->arch.first_reserved_pfn = pfn = d->max_pages;
+            guest_physmap_add_page(
+                d, pfn + 0, virt_to_phys(d->shared_info) >> PAGE_SHIFT);
+            for ( i = 0; i < NR_GRANT_FRAMES; i++ )
+                guest_physmap_add_page(
+                    d, pfn + 1 + i, gnttab_shared_mfn(d, d->grant_table, i));
+        }
+        UNLOCK_BIGLOCK(d);
+
+        xrpa.first_pfn = d->arch.first_reserved_pfn;
+        xrpa.nr_pfns   = 32;
+
+        put_domain(d);
+
+        if ( copy_to_user(arg, &xrpa, sizeof(xrpa)) )
+            return -EFAULT;
+
+        break;
+
+    default:
+        return subarch_memory_op(op, arg);
+    }
+
+    return 0;
+}
+
 
 /*************************
  * Writable Pagetables
index 95def3f2b4ad1564983019bf3d93a878ec4af9df..f3aac93ee3fa79d8accb8dc8e15ca3aa5bc2c69c 100644 (file)
@@ -182,7 +182,7 @@ void subarch_init_memory(struct domain *dom_xen)
     }
 }
 
-long arch_memory_op(int op, void *arg)
+long subarch_memory_op(int op, void *arg)
 {
     struct xen_machphys_mfn_list xmml;
     unsigned long mfn;
index 085fb4d22e485613a4ce17efbb43d557056bf1fd..008da7d2fffd50390b5e3a14e7abca42e1ba9950 100644 (file)
@@ -182,7 +182,7 @@ void subarch_init_memory(struct domain *dom_xen)
     }
 }
 
-long arch_memory_op(int op, void *arg)
+long subarch_memory_op(int op, void *arg)
 {
     struct xen_machphys_mfn_list xmml;
     l3_pgentry_t l3e;
index b6376d3d6bc2629f6fb686030122342ed7f4ab6f..d5d0e0acb003c76270a1e733f83bb9ff33f0060a 100644 (file)
@@ -60,9 +60,6 @@
 #endif
 #endif
 
-/* How large is the PFN reserved area, when we have one? */
-#define PFN_HOLE_SIZE 32
-
 #ifndef STACK_ORDER
 #define STACK_ORDER 1
 #endif
index 687dce5a3007f14a8346df0f1b690989c0f5924b..e4d3f6c03ab45536fbc9f865fa01857e42c0e1f9 100644 (file)
@@ -109,6 +109,9 @@ struct arch_domain
 
     pagetable_t         phys_table;         /* guest 1:1 pagetable */
     struct vmx_platform vmx_platform;
+
+    /* Shadow-translated guest: Pseudophys base address of reserved area. */
+    unsigned long first_reserved_pfn;
 } __cacheline_aligned;
 
 struct arch_vcpu
index 2e0937ea2dfdc5d9e4a30eed6cfbd1a46ee3f33f..515fad611bb11193a76ff9564e98f02950429a86 100644 (file)
@@ -382,5 +382,6 @@ int __sync_lazy_execstate(void);
 
 /* Arch-specific portion of memory_op hypercall. */
 long arch_memory_op(int op, void *arg);
+long subarch_memory_op(int op, void *arg);
 
 #endif /* __ASM_X86_MM_H__ */
index 70c8197a17b9d068ea0214afb79b5b5ff95bdf41..a3a45eb04079dc2b365e31a2fc706abf7077eaa4 100644 (file)
@@ -94,6 +94,26 @@ typedef struct xen_machphys_mfn_list {
     unsigned int nr_extents;
 } xen_machphys_mfn_list_t;
 
+/*
+ * Returns the base and size of the specified reserved 'RAM hole' in the
+ * specified guest's pseudophysical address space.
+ * arg == addr of xen_reserved_phys_area_t.
+ */
+#define XENMEM_reserved_phys_area   7
+typedef struct xen_reserved_phys_area {
+    /* Which request to report about? */
+    domid_t domid;
+
+    /*
+     * Which reserved area to report? Out-of-range request reports
+     * -ESRCH. Currently no architecture will have more than one reserved area.
+     */
+    unsigned int idx;
+
+    /* Base and size of the specified reserved area. */
+    unsigned long first_pfn, nr_pfns;
+} xen_reserved_phys_area_t;
+
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
 
 /*
index 0eda94abdbc8850bf4857a92edaead65f8b733a9..9baf7f0bbe5ae8339428eb5e9fd683b819133af4 100644 (file)
  * cmd: MMUEXT_SET_LDT
  * linear_addr: Linear address of LDT base (NB. must be page-aligned).
  * nr_ents: Number of entries in LDT.
- *
- * cmd: MMUEXT_PFN_HOLE_BASE
- * No additional arguments.  Returns the first pfn in the Xen-reserved
- * pfn hole.  Note that we delay allocating the hole until the first
- * time this is called.
- *
- * cmd: MMUEXT_PFN_HOLE_SIZE
- * No additional arguments.  Returns the number of pfns in the
- * Xen-reserved pfn hole.
  */
 #define MMUEXT_PIN_L1_TABLE      0
 #define MMUEXT_PIN_L2_TABLE      1
 #define MMUEXT_FLUSH_CACHE      12
 #define MMUEXT_SET_LDT          13
 #define MMUEXT_NEW_USER_BASEPTR 15
-#define MMUEXT_PFN_HOLE_BASE    16
-#define MMUEXT_PFN_HOLE_SIZE    17
 
 #ifndef __ASSEMBLY__
 struct mmuext_op {
index f164a7cc7a41b6c10cfc7bcfb7a8857fb6a17e41..0d265badb8a491f1c6e924295b3b5a27d2dc1af6 100644 (file)
@@ -153,9 +153,6 @@ struct domain
 
     /* Control-plane tools handle for this domain. */
     xen_domain_handle_t handle;
-
-    /* Start of the PFN hole */
-    unsigned long start_pfn_hole;
 };
 
 struct domain_setup_info