Clean up shadow-translate xen patches. Add abstractions
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 28 Jan 2006 12:01:19 +0000 (13:01 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Sat, 28 Jan 2006 12:01:19 +0000 (13:01 +0100)
for adding/removing pages from a translated guest's
physmap. Define dummy functions so other architectures
will continue to build without errors.

Remove setting of XENFEAT_writable_mmu_structures. This
should set only if the hypervisor supports writable
mappings of all MMU structures (all page tables and
descriptor tables). If we want a mode where only PTEs
can be writable, we should add a feature flag for that
(but I don't think this is a useful mode to support).

TODO: The call to get the pfn hole should be a
XENMEM_ function, not another MMUEXT_OP (hopefully the
latter hypercall is not goign to grow any more as it's
gross enough already).

Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/domain.c
xen/common/grant_table.c
xen/common/kernel.c
xen/common/memory.c
xen/include/asm-x86/shadow.h
xen/include/xen/shadow.h

index 2a38f4afbcdc97ec74fa332df1db28c4921b667e..efdb3b49762b13d124072311a182e98fa02e537c 100644 (file)
@@ -348,7 +348,6 @@ int arch_set_info_guest(
     struct domain *d = v->domain;
     unsigned long phys_basetab;
     int i, rc;
-    unsigned got_basetab_type;
 
     /*
      * This is sufficient! If the descriptor DPL differs from CS RPL then we'll
@@ -408,27 +407,25 @@ int arch_set_info_guest(
 
     v->arch.guest_table = mk_pagetable(phys_basetab);
 
-    got_basetab_type = 0;
+    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
+        return rc;
+
     if ( shadow_mode_refcounts(d) )
     {
         if ( !get_page(pfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
+        {
+            destroy_gdt(v);
             return -EINVAL;
+        }
     }
     else if ( !(c->flags & VGCF_VMX_GUEST) )
     {
         if ( !get_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT), d,
                                 PGT_base_page_table) )
+        {
+            destroy_gdt(v);
             return -EINVAL;
-        got_basetab_type = 1;
-    }
-
-    if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
-    {
-        if (got_basetab_type)
-            put_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT));
-        else
-            put_page(pfn_to_page(phys_basetab>>PAGE_SHIFT));
-        return rc;
+        }
     }
 
     if ( c->flags & VGCF_VMX_GUEST )
index 30ac6a5f0c09f8eb0b36c72323ec8ee5f69c385d..e8b5d05b8aaa75599a84aa4265fa6971598f3ff8 100644 (file)
@@ -521,7 +521,8 @@ gnttab_setup_table(
     {
         ASSERT(d->grant_table != NULL);
         (void)put_user(GNTST_okay, &uop->status);
-        for ( i = 0; i < op.nr_frames; i++ ) {
+        for ( i = 0; i < op.nr_frames; i++ )
+        {
             mfn = __mfn_to_gpfn(d, gnttab_shared_mfn(d, d->grant_table, i));
             (void)put_user(mfn, &op.frame_list[i]);
         }
@@ -709,7 +710,7 @@ gnttab_transfer(
     int i;
     grant_entry_t *sha;
     gnttab_transfer_t gop;
-    unsigned long real_mfn;
+    unsigned long mfn;
 
     for ( i = 0; i < count; i++ )
     {
@@ -730,8 +731,8 @@ gnttab_transfer(
             continue;
         }
 
-        real_mfn = __gpfn_to_mfn(d, gop.mfn);
-        page = pfn_to_page(real_mfn);
+        mfn = __gpfn_to_mfn(d, gop.mfn);
+        page = pfn_to_page(mfn);
         if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
         { 
             DPRINTK("gnttab_transfer: xen frame %lx\n",
@@ -792,21 +793,8 @@ gnttab_transfer(
 
         /* Tell the guest about its new page frame. */
         sha = &e->grant_table->shared[gop.ref];
-        if (shadow_mode_translate(e)) {
-            struct domain_mmap_cache c1, c2;
-            unsigned long pfn = sha->frame;
-            domain_mmap_cache_init(&c1);
-            domain_mmap_cache_init(&c2);
-            shadow_lock(e);
-            shadow_sync_and_drop_references(e, page);
-            set_p2m_entry(e, pfn, real_mfn, &c1, &c2);
-            set_pfn_from_mfn(real_mfn, pfn);
-            shadow_unlock(e);
-            domain_mmap_cache_destroy(&c1);
-            domain_mmap_cache_destroy(&c2);
-        } else {
-            sha->frame = real_mfn;
-        }
+        guest_physmap_add_page(e, sha->frame, mfn);
+        sha->frame = mfn;
         wmb();
         sha->flags |= GTF_transfer_completed;
 
index 2edf64fae016de16c206a5e8c11c3a74ff1931b2..3a4e420bc1336c91abe14c0ef7d299d1e3107535 100644 (file)
@@ -13,7 +13,6 @@
 #include <asm/current.h>
 #include <public/nmi.h>
 #include <public/version.h>
-#include <asm/shadow.h>
 
 void cmdline_parse(char *cmdline)
 {
@@ -156,10 +155,7 @@ long do_xen_version(int cmd, void *arg)
         switch ( fi.submap_idx )
         {
         case 0:
-            if (shadow_mode_wr_pt_pte(current->domain))
-                fi.submap = XENFEAT_writable_mmu_structures;
-            else
-                fi.submap = 0;
+            fi.submap = 0;
             break;
         default:
             return -EINVAL;
index b4ab57593fcb8bbc6ade375bb0ce53e7f6c4cada..dadcf25afefbfe4cc9e145c0e32dd837840b6a13 100644 (file)
@@ -75,9 +75,8 @@ populate_physmap(
     unsigned int   flags,
     int           *preempted)
 {
-    struct pfn_info         *page;
-    unsigned long            i, j, pfn, mfn;
-    struct domain_mmap_cache cache1, cache2;
+    struct pfn_info *page;
+    unsigned long    i, j, pfn, mfn;
 
     if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
         return 0;
@@ -86,12 +85,6 @@ populate_physmap(
          !multipage_allocation_permitted(current->domain) )
         return 0;
 
-    if (shadow_mode_translate(d)) {
-        domain_mmap_cache_init(&cache1);
-        domain_mmap_cache_init(&cache2);
-        shadow_lock(d);
-    }
-
     for ( i = 0; i < nr_extents; i++ )
     {
         if ( hypercall_preempt_check() )
@@ -114,13 +107,16 @@ populate_physmap(
         if ( unlikely(__get_user(pfn, &extent_list[i]) != 0) )
             goto out;
 
-        for ( j = 0; j < (1 << extent_order); j++ ) {
-            if (shadow_mode_translate(d))
-                set_p2m_entry(d, pfn + j, mfn + j, &cache1, &cache2);
-            set_pfn_from_mfn(mfn + j, pfn + j);
+        if ( unlikely(shadow_mode_translate(d)) )
+        {
+            for ( j = 0; j < (1 << extent_order); j++ )
+                guest_physmap_add_page(d, pfn + j, mfn + j);
         }
+        else
+        {
+            for ( j = 0; j < (1 << extent_order); j++ )
+                set_pfn_from_mfn(mfn + j, pfn + j);
 
-        if (!shadow_mode_translate(d)) {
             /* Inform the domain of the new page's machine address. */ 
             if ( __put_user(mfn, &extent_list[i]) != 0 )
                 goto out;
@@ -128,12 +124,6 @@ populate_physmap(
     }
 
  out:
-    if (shadow_mode_translate(d)) {
-        shadow_unlock(d);
-        domain_mmap_cache_destroy(&cache1);
-        domain_mmap_cache_destroy(&cache2);
-    }
-
     return i;
 }
     
@@ -168,8 +158,8 @@ decrease_reservation(
             mfn = __gpfn_to_mfn(d, gpfn + j);
             if ( unlikely(mfn >= max_page) )
             {
-                DPRINTK("Domain %u page number out of range (%lx(%lx) >= %lx)\n", 
-                        d->domain_id, mfn, gpfn, max_page);
+                DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
+                        d->domain_id, mfn, max_page);
                 return i;
             }
             
@@ -186,18 +176,8 @@ decrease_reservation(
             if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
                 put_page(page);
 
-            if (shadow_mode_translate(d)) {
-                struct domain_mmap_cache c1, c2;
-                domain_mmap_cache_init(&c1);
-                domain_mmap_cache_init(&c2);
-                shadow_lock(d);
-                shadow_sync_and_drop_references(d, page);
-                set_p2m_entry(d, gpfn + j, -1, &c1, &c2);
-                set_pfn_from_mfn(mfn + j, INVALID_M2P_ENTRY);
-                shadow_unlock(d);
-                domain_mmap_cache_destroy(&c1);
-                domain_mmap_cache_destroy(&c2);
-            }
+            guest_physmap_remove_page(d, gpfn + j, mfn);
+
             put_page(page);
         }
     }
index 6d36d111171a215cd25a431ae12b0c21699eba27..2cab9fc69e3d0cabfe66429a9da84f033f59b4b8 100644 (file)
@@ -636,6 +636,44 @@ static inline void shadow_sync_and_drop_references(
 }
 #endif
 
+static inline void guest_physmap_add_page(
+    struct domain *d, unsigned long gpfn, unsigned long mfn)
+{
+    struct domain_mmap_cache c1, c2;
+
+    if ( likely(!shadow_mode_translate(d)) )
+        return;
+
+    domain_mmap_cache_init(&c1);
+    domain_mmap_cache_init(&c2);
+    shadow_lock(d);
+    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
+    set_p2m_entry(d, gpfn, mfn, &c1, &c2);
+    set_pfn_from_mfn(mfn, gpfn);
+    shadow_unlock(d);
+    domain_mmap_cache_destroy(&c1);
+    domain_mmap_cache_destroy(&c2);
+}
+
+static inline void guest_physmap_remove_page(
+    struct domain *d, unsigned long gpfn, unsigned long mfn)
+{
+    struct domain_mmap_cache c1, c2;
+
+    if ( likely(!shadow_mode_translate(d)) )
+        return;
+
+    domain_mmap_cache_init(&c1);
+    domain_mmap_cache_init(&c2);
+    shadow_lock(d);
+    shadow_sync_and_drop_references(d, pfn_to_page(mfn));
+    set_p2m_entry(d, gpfn, -1, &c1, &c2);
+    set_pfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+    shadow_unlock(d);
+    domain_mmap_cache_destroy(&c1);
+    domain_mmap_cache_destroy(&c2);
+}
+
 /************************************************************************/
 
 /*
index a69bd59802da627cadb06f84508bfe188c5089cb..9bedd205cea456d9494d27173d51685a0ad3fef8 100644 (file)
 
 #else
 
-#define shadow_drop_references(_d, _p)          ((void)0)
-#define shadow_sync_and_drop_references(_d, _p) ((void)0)
+#define shadow_drop_references(d, p)          ((void)0)
+#define shadow_sync_and_drop_references(d, p) ((void)0)
+
+#define shadow_mode_translate(d)              (0)
+
+#define __gpfn_to_mfn(d, p)                   (p)
+#define __mfn_to_gpfn(d, p)                   (p)
+#define guest_physmap_add_page(d, p, m)       ((void)0)
+#define guest_physmap_remove_page(d, p, m)    ((void)0)
 
 #endif