nestedhvm: initialize pfec for l1 pagetable walk
authorChristoph Egger <Christoph.Egger@amd.com>
Thu, 19 Jul 2012 10:15:58 +0000 (11:15 +0100)
committerChristoph Egger <Christoph.Egger@amd.com>
Thu, 19 Jul 2012 10:15:58 +0000 (11:15 +0100)
Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
Acked-by: Tim Deegan <tim@xen.org>
Committed-by: Tim Deegan <tim@xen.org>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/mm/hap/nested_hap.c
xen/include/asm-x86/hvm/nestedhvm.h

index 7e6b5bbbb5bb96e793ed85c8030da770fc721316..22c136b9d991bdb8c7314aa033ead25a1a65c5de 100644 (file)
@@ -1278,7 +1278,8 @@ int hvm_hap_nested_page_fault(unsigned long gpa,
          * into l1 guest if not fixable. The algorithm is
          * the same as for shadow paging.
          */
-        rv = nestedhvm_hap_nested_page_fault(v, gpa);
+        rv = nestedhvm_hap_nested_page_fault(v, gpa,
+                                             access_r, access_w, access_x);
         switch (rv) {
         case NESTEDHVM_PAGEFAULT_DONE:
             return 1;
index c49ec5335d8718add5f42b29fbbefdcf2c811e3a..53963e098fc08b6a95d8a29d24c587ad4b91b014 100644 (file)
@@ -177,13 +177,20 @@ out:
  */
 static int
 nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
-                      unsigned int *page_order)
+                      unsigned int *page_order,
+                      bool_t access_r, bool_t access_w, bool_t access_x)
 {
     uint32_t pfec;
     unsigned long nested_cr3, gfn;
     
     nested_cr3 = nhvm_vcpu_hostcr3(v);
 
+    pfec = PFEC_user_mode | PFEC_page_present;
+    if (access_w)
+        pfec |= PFEC_write_access;
+    if (access_x)
+        pfec |= PFEC_insn_fetch;
+
     /* Walk the guest-supplied NPT table, just as if it were a pagetable */
     gfn = paging_ga_to_gfn_cr3(v, nested_cr3, L2_gpa, &pfec, page_order);
 
@@ -200,7 +207,8 @@ nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
  * Returns:
  */
 int
-nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa)
+nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa,
+    bool_t access_r, bool_t access_w, bool_t access_x)
 {
     int rv;
     paddr_t L1_gpa, L0_gpa;
@@ -212,7 +220,8 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa)
     nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
 
     /* walk the L1 P2M table */
-    rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, &page_order_21);
+    rv = nestedhap_walk_L1_p2m(v, L2_gpa, &L1_gpa, &page_order_21,
+        access_r, access_w, access_x);
 
     /* let caller to handle these two cases */
     switch (rv) {
index 7c1c16a601a8d6b1f8e5e30e15ed9cd784d403e2..22a3c54841c894f2b544a913429922261a19ea53 100644 (file)
@@ -51,7 +51,8 @@ bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v);
 #define NESTEDHVM_PAGEFAULT_INJECT 1
 #define NESTEDHVM_PAGEFAULT_ERROR  2
 #define NESTEDHVM_PAGEFAULT_MMIO   3
-int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa);
+int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t L2_gpa,
+    bool_t access_r, bool_t access_w, bool_t access_x);
 
 /* IO permission map */
 unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed);