x86/hvm: Rework nested hap functions to reduce parameters
authorAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 30 Nov 2021 17:05:09 +0000 (17:05 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Thu, 13 Jan 2022 15:19:43 +0000 (15:19 +0000)
Most functions in this call chain have 8 parameters, meaning that the final
two booleans are spilled to the stack for calls.

First, delete nestedhap_walk_L1_p2m and introduce nhvm_hap_walk_L1_p2m() as a
thin wrapper around hvm_funcs, just like all the other nhvm_*() hooks.  This
involves including xen/mm.h as the forward declaration of struct npfec is no
longer enough.

Next, replace the triple of booleans with struct npfec, which contains the
same information in the bottom 3 bits.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/nestedsvm.c
xen/arch/x86/hvm/vmx/vvmx.c
xen/arch/x86/include/asm/hvm/hvm.h
xen/arch/x86/include/asm/hvm/nestedhvm.h
xen/arch/x86/include/asm/hvm/svm/nestedsvm.h
xen/arch/x86/include/asm/hvm/vmx/vvmx.h
xen/arch/x86/mm/hap/nested_hap.c
xen/arch/x86/mm/p2m.c

index d233550ae47bb2438cef70f0f9384febcf0ceeca..b0ee49a61ab06d3c7e524ce59538cfea38c7efdd 100644 (file)
@@ -1775,10 +1775,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
          * the same as for shadow paging.
          */
 
-         rv = nestedhvm_hap_nested_page_fault(curr, &gpa,
-                                              npfec.read_access,
-                                              npfec.write_access,
-                                              npfec.insn_fetch);
+        rv = nestedhvm_hap_nested_page_fault(curr, &gpa, npfec);
         switch (rv) {
         case NESTEDHVM_PAGEFAULT_DONE:
         case NESTEDHVM_PAGEFAULT_RETRY:
index 6d906300407723b876f90964fd775f6b53f23e7c..abc178d8d482e413c8f2811baf1118ae2eaba7f4 100644 (file)
@@ -1216,10 +1216,9 @@ nsvm_vmcb_hap_enabled(struct vcpu *v)
  * walk is successful, the translated value is returned in
  * L1_gpa. The result value tells what to do next.
  */
-int
-nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order, uint8_t *p2m_acc,
-                     bool_t access_r, bool_t access_w, bool_t access_x)
+int nsvm_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec)
 {
     uint32_t pfec;
     unsigned long nested_cr3, gfn;
@@ -1227,9 +1226,9 @@ nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
     nested_cr3 = nhvm_vcpu_p2m_base(v);
 
     pfec = PFEC_user_mode | PFEC_page_present;
-    if ( access_w )
+    if ( npfec.write_access )
         pfec |= PFEC_write_access;
-    if ( access_x )
+    if ( npfec.insn_fetch )
         pfec |= PFEC_insn_fetch;
 
     /* Walk the guest-supplied NPT table, just as if it were a pagetable */
index e9f94daf64933a821412a1aaa76918bf4f4f6e6f..7419ee9dd0bc4535e3aafd2ca8ce1e70ef109fe2 100644 (file)
@@ -2346,16 +2346,16 @@ int nvmx_msr_read_intercept(unsigned int msr, u64 *msr_content)
  * walk is successful, the translated value is returned in
  * L1_gpa. The result value tells what to do next.
  */
-int
-nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order, uint8_t *p2m_acc,
-                     bool_t access_r, bool_t access_w, bool_t access_x)
+int nvmx_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec)
 {
     int rc;
     unsigned long gfn;
     uint64_t exit_qual;
     uint32_t exit_reason = EXIT_REASON_EPT_VIOLATION;
-    uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
+    uint32_t rwx_rights =
+        (npfec.insn_fetch << 2) | (npfec.write_access << 1) | npfec.read_access;
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
     vmx_vmcs_enter(v);
index 2767427aa938150c3eb339ef02be28131ad17d86..cd9e73e266f3593540713bbc154996262c10f3ba 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __ASM_X86_HVM_HVM_H__
 #define __ASM_X86_HVM_HVM_H__
 
+#include <xen/mm.h>
+
 #include <asm/alternative.h>
 #include <asm/asm_defns.h>
 #include <asm/current.h>
@@ -203,8 +205,7 @@ struct hvm_function_table {
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
                                 paddr_t *L1_gpa, unsigned int *page_order,
-                                uint8_t *p2m_acc, bool_t access_r,
-                                bool_t access_w, bool_t access_x);
+                                uint8_t *p2m_acc, struct npfec npfec);
 
     void (*enable_msr_interception)(struct domain *d, uint32_t msr);
     bool_t (*is_singlestep_supported)(void);
@@ -350,7 +351,6 @@ int hvm_debug_op(struct vcpu *v, int32_t op);
 void hvm_toggle_singlestep(struct vcpu *v);
 void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);
 
-struct npfec;
 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
                               struct npfec npfec);
 
@@ -631,6 +631,14 @@ static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
     return hvm_funcs.nhvm_intr_blocked(v);
 }
 
+static inline int nhvm_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec)
+{
+    return hvm_funcs.nhvm_hap_walk_L1_p2m(
+        v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
+}
+
 static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     hvm_funcs.enable_msr_interception(d, msr);
index d263925786214f90d5f29548d2ee2f08b1c4a925..7184928c2bb179a424554cdab534f0373305a9a9 100644 (file)
@@ -58,11 +58,7 @@ bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v);
 #define NESTEDHVM_PAGEFAULT_RETRY      5
 #define NESTEDHVM_PAGEFAULT_DIRECT_MMIO 6
 int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
-    bool_t access_r, bool_t access_w, bool_t access_x);
-
-int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                          unsigned int *page_order, uint8_t *p2m_acc,
-                          bool_t access_r, bool_t access_w, bool_t access_x);
+                                    struct npfec npfec);
 
 /* IO permission map */
 unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed);
index 087369845761cba41fcc9691ba35dd4be3cbf007..c3ef2354140cce384c3378d537b3397e5acb7f2c 100644 (file)
@@ -122,9 +122,9 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v);
 void svm_vmexit_do_clgi(struct cpu_user_regs *regs, struct vcpu *v);
 void svm_vmexit_do_stgi(struct cpu_user_regs *regs, struct vcpu *v);
 bool_t nestedsvm_gif_isset(struct vcpu *v);
-int nsvm_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                         unsigned int *page_order, uint8_t *p2m_acc,
-                         bool_t access_r, bool_t access_w, bool_t access_x);
+int nsvm_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec);
 
 #define NSVM_INTR_NOTHANDLED     3
 #define NSVM_INTR_NOTINTERCEPTED 2
index d5f68f30b1293df8fbb4efaf29293d2eacbf2d42..e4ca3bc6ee2bafd76241c3c1a05d9cfc23f1e675 100644 (file)
@@ -100,10 +100,10 @@ bool_t nvmx_ept_enabled(struct vcpu *v);
 #define EPT_TRANSLATE_MISCONFIG     2
 #define EPT_TRANSLATE_RETRY         3
 
-int
-nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                     unsigned int *page_order, uint8_t *p2m_acc,
-                     bool_t access_r, bool_t access_w, bool_t access_x);
+int nvmx_hap_walk_L1_p2m(
+    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
+    uint8_t *p2m_acc, struct npfec npfec);
+
 /*
  * Virtual VMCS layout
  *
index 50fa2dd9f405b9289a600a3621b3f8707d29999a..d8a7b3b40167a4cc4c517b0ff26c43e394ce16ed 100644 (file)
@@ -113,31 +113,13 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
     }
 }
 
-/* This function uses L2_gpa to walk the P2M page table in L1. If the
- * walk is successful, the translated value is returned in
- * L1_gpa. The result value tells what to do next.
- */
-int
-nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order, uint8_t *p2m_acc,
-                      bool_t access_r, bool_t access_w, bool_t access_x)
-{
-    ASSERT(hvm_funcs.nhvm_hap_walk_L1_p2m);
-
-    return hvm_funcs.nhvm_hap_walk_L1_p2m(v, L2_gpa, L1_gpa, page_order,
-        p2m_acc, access_r, access_w, access_x);
-}
-
-
 /* This function uses L1_gpa to walk the P2M table in L0 hypervisor. If the
  * walk is successful, the translated value is returned in L0_gpa. The return 
  * value tells the upper level what to do.
  */
-static int
-nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
-                      p2m_type_t *p2mt, p2m_access_t *p2ma,
-                      unsigned int *page_order,
-                      bool_t access_r, bool_t access_w, bool_t access_x)
+static int nestedhap_walk_L0_p2m(
+    struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa, p2m_type_t *p2mt,
+    p2m_access_t *p2ma, unsigned int *page_order, struct npfec npfec)
 {
     mfn_t mfn;
     int rc;
@@ -154,7 +136,7 @@ nestedhap_walk_L0_p2m(struct p2m_domain *p2m, paddr_t L1_gpa, paddr_t *L0_gpa,
         goto out;
 
     rc = NESTEDHVM_PAGEFAULT_L0_ERROR;
-    if ( access_w && p2m_is_readonly(*p2mt) )
+    if ( npfec.write_access && p2m_is_readonly(*p2mt) )
         goto out;
 
     if ( p2m_is_paging(*p2mt) || p2m_is_shared(*p2mt) || !p2m_is_ram(*p2mt) )
@@ -176,9 +158,8 @@ out:
  *
  * Returns:
  */
-int
-nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
-    bool_t access_r, bool_t access_w, bool_t access_x)
+int nestedhvm_hap_nested_page_fault(
+    struct vcpu *v, paddr_t *L2_gpa, struct npfec npfec)
 {
     int rv;
     paddr_t L1_gpa, L0_gpa;
@@ -192,8 +173,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
     p2m = p2m_get_hostp2m(d); /* L0 p2m */
 
     /* walk the L1 P2M table */
-    rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
-        access_r, access_w, access_x);
+    rv = nhvm_hap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
+                              npfec);
 
     /* let caller to handle these two cases */
     switch (rv) {
@@ -209,9 +190,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
     }
 
     /* ==> we have to walk L0 P2M */
-    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa,
-        &p2mt_10, &p2ma_10, &page_order_10,
-        access_r, access_w, access_x);
+    rv = nestedhap_walk_L0_p2m(p2m, L1_gpa, &L0_gpa, &p2mt_10, &p2ma_10,
+                               &page_order_10, npfec);
 
     /* let upper level caller to handle these two cases */
     switch (rv) {
index 98c8201f92c6b9c93986ab45644363858384fada..3f81e99fd6aaf77e7ce7480272ed548ecd9a1c40 100644 (file)
@@ -1930,6 +1930,11 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
         const struct paging_mode *mode;
         uint8_t l1_p2ma;
         unsigned int l1_page_order;
+        struct npfec npfec = {
+            .read_access  = 1,
+            .write_access = *pfec & PFEC_write_access,
+            .insn_fetch   = *pfec & PFEC_insn_fetch,
+        };
         int rv;
 
         /* translate l2 guest va into l2 guest gfn */
@@ -1940,11 +1945,8 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
         if ( l2_gfn == gfn_x(INVALID_GFN) )
             return gfn_x(INVALID_GFN);
 
-        rv = nestedhap_walk_L1_p2m(v, pfn_to_paddr(l2_gfn), &l1_gpa,
-                                   &l1_page_order, &l1_p2ma,
-                                   1,
-                                   !!(*pfec & PFEC_write_access),
-                                   !!(*pfec & PFEC_insn_fetch));
+        rv = nhvm_hap_walk_L1_p2m(
+            v, pfn_to_paddr(l2_gfn), &l1_gpa, &l1_page_order, &l1_p2ma, npfec);
 
         if ( rv != NESTEDHVM_PAGEFAULT_DONE )
             return gfn_x(INVALID_GFN);