x86/vvmx: Fix deadlock with MSR bitmap merging
authorAndrew Cooper <andrew.cooper3@citrix.com>
Wed, 11 Mar 2020 18:22:37 +0000 (18:22 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 17 Mar 2020 13:18:19 +0000 (13:18 +0000)
c/s c47984aabead "nvmx: implement support for MSR bitmaps" introduced a use of
map_domain_page() which may get used in the middle of context switch.

This is not safe, and causes Xen to deadlock on the mapcache lock:

  (XEN) Xen call trace:
  (XEN)    [<ffff82d08022d6ae>] R _spin_lock+0x34/0x5e
  (XEN)    [<ffff82d0803219d7>] F map_domain_page+0x250/0x527
  (XEN)    [<ffff82d080356332>] F do_page_fault+0x420/0x780
  (XEN)    [<ffff82d08038da3d>] F x86_64/entry.S#handle_exception_saved+0x68/0x94
  (XEN)    [<ffff82d08031729f>] F __find_next_zero_bit+0x28/0x69
  (XEN)    [<ffff82d080321a4d>] F map_domain_page+0x2c6/0x527
  (XEN)    [<ffff82d08029eeb2>] F nvmx_update_exec_control+0x1d7/0x323
  (XEN)    [<ffff82d080299f5a>] F vmx_update_cpu_exec_control+0x23/0x40
  (XEN)    [<ffff82d08029a3f7>] F arch/x86/hvm/vmx/vmx.c#vmx_ctxt_switch_from+0xb7/0x121
  (XEN)    [<ffff82d08031d796>] F arch/x86/domain.c#__context_switch+0x124/0x4a9
  (XEN)    [<ffff82d080320925>] F context_switch+0x154/0x62c
  (XEN)    [<ffff82d080252f3e>] F common/sched/core.c#sched_context_switch+0x16a/0x175
  (XEN)    [<ffff82d080253877>] F common/sched/core.c#schedule+0x2ad/0x2bc
  (XEN)    [<ffff82d08022cc97>] F common/softirq.c#__do_softirq+0xb7/0xc8
  (XEN)    [<ffff82d08022cd38>] F do_softirq+0x18/0x1a
  (XEN)    [<ffff82d0802a2fbb>] F vmx_asm_do_vmentry+0x2b/0x30

Convert the domheap page into being a xenheap page.

Fixes: c47984aabead - nvmx: implement support for MSR bitmaps
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
xen/arch/x86/hvm/vmx/vvmx.c
xen/include/asm-x86/hvm/vmx/vvmx.h

index 926a11c15f6df357a36e085c075ca4268aaa61c1..f0499201966ad89cb4f27609d6bd9ce28c3d44a8 100644 (file)
@@ -130,12 +130,9 @@ int nvmx_vcpu_initialise(struct vcpu *v)
 
     if ( cpu_has_vmx_msr_bitmap )
     {
-        nvmx->msr_merged = alloc_domheap_page(d, MEMF_no_owner);
+        nvmx->msr_merged = alloc_xenheap_page();
         if ( !nvmx->msr_merged )
-        {
-            gdprintk(XENLOG_ERR, "nest: allocation for MSR bitmap failed\n");
             return -ENOMEM;
-        }
     }
 
     nvmx->ept.enabled = 0;
@@ -198,11 +195,7 @@ static void vcpu_relinquish_resources(struct vcpu *v)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
 
-    if ( nvmx->msr_merged )
-    {
-        free_domheap_page(nvmx->msr_merged);
-        nvmx->msr_merged = NULL;
-    }
+    FREE_XENHEAP_PAGE(nvmx->msr_merged);
 }
 
 void nvmx_domain_relinquish_resources(struct domain *d)
@@ -575,14 +568,12 @@ unsigned long *_shadow_io_bitmap(struct vcpu *v)
 static void update_msrbitmap(struct vcpu *v, uint32_t shadow_ctrl)
 {
     struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
-    struct vmx_msr_bitmap *msr_bitmap;
+    struct vmx_msr_bitmap *msr_bitmap = nvmx->msr_merged;
 
     if ( !(shadow_ctrl & CPU_BASED_ACTIVATE_MSR_BITMAP) ||
          !nvmx->msrbitmap )
        return;
 
-    msr_bitmap = __map_domain_page(nvmx->msr_merged);
-
     bitmap_or(msr_bitmap->read_low, nvmx->msrbitmap->read_low,
               v->arch.hvm.vmx.msr_bitmap->read_low,
               sizeof(msr_bitmap->read_low) * 8);
@@ -603,9 +594,7 @@ static void update_msrbitmap(struct vcpu *v, uint32_t shadow_ctrl)
     bitmap_set(msr_bitmap->read_low, MSR_X2APIC_FIRST, 0x100);
     bitmap_set(msr_bitmap->write_low, MSR_X2APIC_FIRST, 0x100);
 
-    unmap_domain_page(msr_bitmap);
-
-    __vmwrite(MSR_BITMAP, page_to_maddr(nvmx->msr_merged));
+    __vmwrite(MSR_BITMAP, virt_to_maddr(nvmx->msr_merged));
 }
 
 void nvmx_update_exec_control(struct vcpu *v, u32 host_cntrl)
index c41f089939a648feacfe877cad2b68646a64926d..d5f68f30b1293df8fbb4efaf29293d2eacbf2d42 100644 (file)
@@ -38,7 +38,7 @@ struct nestedvmx {
     paddr_t    vmxon_region_pa;
     void       *iobitmap[2];           /* map (va) of L1 guest I/O bitmap */
     struct vmx_msr_bitmap *msrbitmap;  /* map (va) of L1 guest MSR bitmap */
-    struct page_info *msr_merged;      /* merged L1 and L2 MSR bitmap */
+    struct vmx_msr_bitmap *msr_merged; /* merged L1 and L2 MSR bitmap */
     /* deferred nested interrupt */
     struct {
         unsigned long intr_info;