x86: rename p2m_mmio_write_dm to p2m_ioreq_server
authorPaul Durrant <paul.durrant@citrix.com>
Thu, 4 Aug 2016 08:01:17 +0000 (10:01 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 4 Aug 2016 08:01:17 +0000 (10:01 +0200)
Previously p2m type p2m_mmio_write_dm was introduced for write-
protected memory pages whose write operations are supposed to be
forwarded to and emulated by an ioreq server. Yet limitations of
rangeset restrict the number of guest pages to be write-protected.

This patch replaces the p2m type p2m_mmio_write_dm with a new name:
p2m_ioreq_server, which means this p2m type can be claimed by one
ioreq server, instead of being tracked inside the rangeset of ioreq
server. And a new memory type, HVMMEM_ioreq_server, is now used in
the HVMOP_set/get_mem_type interface to set/get this p2m type.

Patches following up will add the related HVMOP handling code which
map/unmap type p2m_ioreq_server to/from an ioreq server. Without
following patches, memory type changes to HVMMEM_ioreq_server can
still be allowed, and in such cases, p2m_ioreq_server pages will be
treated the same as ones with previous type p2m_mmio_write_dm, and
are tracked inside the ioreq server's rangeset.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/mm/p2m-ept.c
xen/arch/x86/mm/p2m-pt.c
xen/arch/x86/mm/shadow/multi.c
xen/include/asm-x86/p2m.h
xen/include/public/hvm/hvm_op.h

index daaee1d63b890a693fb9175086a8f5f0baf0d858..229255d9872f5030a3012606b617a14e72300f09 100644 (file)
@@ -1863,7 +1863,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
      */
     if ( (p2mt == p2m_mmio_dm) || 
          (npfec.write_access &&
-          (p2m_is_discard_write(p2mt) || (p2mt == p2m_mmio_write_dm))) )
+          (p2m_is_discard_write(p2mt) || (p2mt == p2m_ioreq_server))) )
     {
         __put_gfn(p2m, gfn);
         if ( ap2m_active )
@@ -5563,6 +5563,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
             get_gfn_query_unlocked(d, a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
+            else if ( t == p2m_ioreq_server )
+                a.mem_type = HVMMEM_ioreq_server;
             else if ( p2m_is_readonly(t) )
                 a.mem_type =  HVMMEM_ram_ro;
             else if ( p2m_is_ram(t) )
@@ -5593,7 +5595,8 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
             [HVMMEM_ram_rw]  = p2m_ram_rw,
             [HVMMEM_ram_ro]  = p2m_ram_ro,
             [HVMMEM_mmio_dm] = p2m_mmio_dm,
-            [HVMMEM_unused] = p2m_invalid
+            [HVMMEM_unused] = p2m_invalid,
+            [HVMMEM_ioreq_server] = p2m_ioreq_server
         };
 
         if ( copy_from_guest(&a, arg, 1) )
@@ -5642,7 +5645,7 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
             }
             if ( !p2m_is_ram(t) &&
                  (!p2m_is_hole(t) || a.hvmmem_type != HVMMEM_mmio_dm) &&
-                 (t != p2m_mmio_write_dm || a.hvmmem_type != HVMMEM_ram_rw) )
+                 (t != p2m_ioreq_server || a.hvmmem_type != HVMMEM_ram_rw) )
             {
                 put_gfn(d, pfn);
                 goto setmemtype_fail;
index 6d037360be54a3e7d2f1c04dfea5108ac4df6c13..13cab249dd6d89f0d2a268a8d25fc2da0b4dd764 100644 (file)
@@ -171,7 +171,7 @@ static void ept_p2m_type_to_flags(struct p2m_domain *p2m, ept_entry_t *entry,
             entry->a = entry->d = !!cpu_has_vmx_ept_ad;
             break;
         case p2m_grant_map_ro:
-        case p2m_mmio_write_dm:
+        case p2m_ioreq_server:
             entry->r = 1;
             entry->w = entry->x = 0;
             entry->a = !!cpu_has_vmx_ept_ad;
index 2b6e89eeaa8bec68c584d496cb5cb3b7a4823cc2..3b025d569b373e962709440ae4a4763d4f2ffa3a 100644 (file)
@@ -94,7 +94,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn,
     default:
         return flags | _PAGE_NX_BIT;
     case p2m_grant_map_ro:
-    case p2m_mmio_write_dm:
+    case p2m_ioreq_server:
         return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
index e54c8b729949bf8d3fbd1f258327fc5120cd1e22..833f279c64c2df3a77ccf35ae8606895e77c96e0 100644 (file)
@@ -3226,7 +3226,7 @@ static int sh_page_fault(struct vcpu *v,
 
     /* Need to hand off device-model MMIO to the device model */
     if ( p2mt == p2m_mmio_dm
-         || (p2mt == p2m_mmio_write_dm && ft == ft_demand_write) )
+         || (p2mt == p2m_ioreq_server && ft == ft_demand_write) )
     {
         gpa = guest_walk_to_gpa(&gw);
         goto mmio;
index 035ca92b7f7a68dc8ec60b14567cec2049024c82..d5fd546d455fb10ce66d6ccc82a0302cff8588dd 100644 (file)
@@ -71,7 +71,7 @@ typedef enum {
     p2m_ram_shared = 12,          /* Shared or sharable memory */
     p2m_ram_broken = 13,          /* Broken page, access cause domain crash */
     p2m_map_foreign  = 14,        /* ram pages from foreign domain */
-    p2m_mmio_write_dm = 15,       /* Read-only; writes go to the device model */
+    p2m_ioreq_server = 15,
 } p2m_type_t;
 
 /* Modifiers to the query */
@@ -112,7 +112,7 @@ typedef unsigned int p2m_query_t;
                       | p2m_to_mask(p2m_ram_ro)         \
                       | p2m_to_mask(p2m_grant_map_ro)   \
                       | p2m_to_mask(p2m_ram_shared)     \
-                      | p2m_to_mask(p2m_mmio_write_dm))
+                      | p2m_to_mask(p2m_ioreq_server))
 
 /* Write-discard types, which should discard the write operations */
 #define P2M_DISCARD_WRITE_TYPES (p2m_to_mask(p2m_ram_ro)     \
index ebb907ad3d03d1069a32ab7ae1d7059adee3209b..b3e45cf1fc49986a507328d21221435de421f68b 100644 (file)
@@ -84,11 +84,12 @@ typedef enum {
     HVMMEM_ram_ro,             /* Read-only; writes are discarded */
     HVMMEM_mmio_dm,            /* Reads and write go to the device model */
 #if __XEN_INTERFACE_VERSION__ < 0x00040700
-    HVMMEM_mmio_write_dm       /* Read-only; writes go to the device model */
+    HVMMEM_mmio_write_dm,      /* Read-only; writes go to the device model */
 #else
-    HVMMEM_unused              /* Placeholder; setting memory to this type
+    HVMMEM_unused,             /* Placeholder; setting memory to this type
                                   will fail for code after 4.7.0 */
 #endif
+    HVMMEM_ioreq_server
 } hvmmem_type_t;
 
 /* Following tools-only interfaces may change in future. */