vm-assist: slightly reduce source code size
authorJan Beulich <jbeulich@suse.com>
Mon, 23 Mar 2015 15:52:51 +0000 (16:52 +0100)
committerJan Beulich <jbeulich@suse.com>
Mon, 23 Mar 2015 15:52:51 +0000 (16:52 +0100)
Adjust VM_ASSIST() to prepend VMASST_TYPE_.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
xen/arch/x86/domain.c
xen/arch/x86/mm.c
xen/arch/x86/mm/shadow/multi.c
xen/arch/x86/traps.c
xen/common/kernel.c
xen/include/xen/sched.h

index bebefbf86640353ec3bb187e1edea678c1da423b..aaa9ff9a22772978cbb8338c588c67ffe55e3794 100644 (file)
@@ -972,7 +972,7 @@ int arch_set_info_guest(
         case -ERESTART:
             break;
         case 0:
-            if ( !compat && !VM_ASSIST(d, VMASST_TYPE_m2p_strict) &&
+            if ( !compat && !VM_ASSIST(d, m2p_strict) &&
                  !paging_mode_refcounts(d) )
                 fill_ro_mpt(cr3_gfn);
             break;
@@ -1011,7 +1011,7 @@ int arch_set_info_guest(
                         cr3_page = NULL;
                     break;
                 case 0:
-                    if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+                    if ( VM_ASSIST(d, m2p_strict) )
                         zap_ro_mpt(cr3_gfn);
                     break;
                 }
index 87ed78ea2bc14ebff9751231e180aa6a491ead67..8e29675582b9e299ec54785234539174245305d0 100644 (file)
@@ -1464,7 +1464,7 @@ static int alloc_l4_table(struct page_info *page)
         adjust_guest_l4e(pl4e[i], d);
     }
 
-    init_guest_l4_table(pl4e, d, !VM_ASSIST(d, VMASST_TYPE_m2p_strict));
+    init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
     unmap_domain_page(pl4e);
 
     return rc > 0 ? 0 : rc;
@@ -2775,7 +2775,7 @@ int new_guest_cr3(unsigned long mfn)
 
     invalidate_shadow_ldt(curr, 0);
 
-    if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) && !paging_mode_refcounts(d) )
+    if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
         fill_ro_mpt(mfn);
     curr->arch.guest_table = pagetable_from_pfn(mfn);
     update_cr3(curr);
@@ -3134,8 +3134,7 @@ long do_mmuext_op(
                                 op.arg1.mfn);
                     break;
                 }
-                if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) &&
-                     !paging_mode_refcounts(d) )
+                if ( VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
                     zap_ro_mpt(op.arg1.mfn);
             }
 
index 6e0d03e94c21d89097332395b3a90ee1599e75b4..bf8a46979f8a4fdb014db7dc5bd29bacbf54fdc4 100644 (file)
@@ -1435,7 +1435,7 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
         shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
                             __PAGE_HYPERVISOR);
 
-    if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+    if ( !VM_ASSIST(d, m2p_strict) )
         sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
 
     /* Shadow linear mapping for 4-level shadows.  N.B. for 3-level
@@ -3983,11 +3983,11 @@ sh_update_cr3(struct vcpu *v, int do_locking)
             shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
 
             if ( (v->arch.flags & TF_kernel_mode) &&
-                 !VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+                 !VM_ASSIST(d, m2p_strict) )
                 sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
                     idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
             else if ( !(v->arch.flags & TF_kernel_mode) &&
-                      VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+                      VM_ASSIST(d, m2p_strict) )
                 sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
                     shadow_l4e_empty();
         }
index 7a2e2d408ddebe265a71535af1c591f5ca6238f0..ac96ad9a5cbdb97ba2e6e1ff34020d4d49f83d40 100644 (file)
@@ -1441,7 +1441,7 @@ static int fixup_page_fault(unsigned long addr, struct cpu_user_regs *regs)
          !(regs->error_code & (PFEC_reserved_bit | PFEC_insn_fetch)) &&
          (regs->error_code & PFEC_write_access) )
     {
-        if ( VM_ASSIST(d, VMASST_TYPE_writable_pagetables) &&
+        if ( VM_ASSIST(d, writable_pagetables) &&
              /* Do not check if access-protection fault since the page may
                 legitimately be not present in shadow page tables */
              (paging_mode_enabled(d) ||
index 3a29d9bbbad4ec786a61802df47f71ced1090d5e..6a3196ae3b2d1143039e4b1021db9a59a27c4053 100644 (file)
@@ -306,7 +306,7 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
         {
         case 0:
             fi.submap = (1U << XENFEAT_memory_op_vnode_supported);
-            if ( VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3) )
+            if ( VM_ASSIST(d, pae_extended_cr3) )
                 fi.submap |= (1U << XENFEAT_pae_pgdir_above_4gb);
             if ( paging_mode_translate(d) )
                 fi.submap |= 
index ccd7ed8d91a9cf41bed82a46ad46f57d36255f4d..ecdcdecdafea17b15532aca301c46863f6413d4a 100644 (file)
@@ -833,7 +833,7 @@ void watchdog_domain_destroy(struct domain *d);
 /* This check is for functionality specific to a control domain */
 #define is_control_domain(_d) ((_d)->is_privileged)
 
-#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
+#define VM_ASSIST(d, t) (test_bit(VMASST_TYPE_ ## t, &(d)->vm_assist))
 
 #define is_pv_domain(d) ((d)->guest_type == guest_type_pv)
 #define is_pv_vcpu(v)   (is_pv_domain((v)->domain))