x86/VMX: fix live migration while enabling APICV
authorJiongxi Li <jiongxi.li@intel.com>
Mon, 18 Feb 2013 08:27:58 +0000 (09:27 +0100)
committerJiongxi Li <jiongxi.li@intel.com>
Mon, 18 Feb 2013 08:27:58 +0000 (09:27 +0100)
SVI should be restored in case guest is processing virtual interrupt
while saveing a domain state. Otherwise SVI would be missed when
virtual interrupt delivery is enabled.

Signed-off-by: Jiongxi Li <jiongxi.li@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
Acked-by: Jun Nakajima <jun.nakajima@intel.com>
Committed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/hvm/vlapic.c
xen/arch/x86/hvm/vmx/intr.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/vmx/vmcs.h

index ee2294c913c06186f7e7c0e33ae39f410a29220e..38ff216827743f297fcaae5361610587226bfb08 100644 (file)
@@ -1198,6 +1198,9 @@ static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
     if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 ) 
         return -EINVAL;
 
+    if ( hvm_funcs.process_isr )
+        hvm_funcs.process_isr(vlapic_find_highest_isr(s), v);
+
     vlapic_adjust_i8259_target(d);
     lapic_rearm(s);
     return 0;
index c5c503ec2f921c7333c73d11d2e742aad94f2977..20d127a66427601f1ca88eb505e21bc17c08aa9a 100644 (file)
@@ -290,8 +290,8 @@ void vmx_intr_assist(void)
             vmx_set_eoi_exit_bitmap(v, pt_vector);
 
         /* we need update the RVI field */
-        status &= ~(unsigned long)0x0FF;
-        status |= (unsigned long)0x0FF & 
+        status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+        status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK &
                     intack.vector;
         __vmwrite(GUEST_INTR_STATUS, status);
         if (v->arch.hvm_vmx.eoi_exitmap_changed) {
index e623c915e677d468b3ca7233975fb47838264094..748ca6247ce8d52f8e2766123accee4324749f6f 100644 (file)
@@ -1421,6 +1421,29 @@ static int vmx_virtual_intr_delivery_enabled(void)
     return cpu_has_vmx_virtual_intr_delivery;
 }
 
+static void vmx_process_isr(int isr, struct vcpu *v)
+{
+    unsigned long status;
+    u8 old;
+
+    if ( !cpu_has_vmx_virtual_intr_delivery )
+        return;
+
+    if ( isr < 0 )
+        isr = 0;
+
+    vmx_vmcs_enter(v);
+    status = __vmread(GUEST_INTR_STATUS);
+    old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+    if ( isr != old )
+    {
+        status &= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+        status |= isr << VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+        __vmwrite(GUEST_INTR_STATUS, status);
+    }
+    vmx_vmcs_exit(v);
+}
+
 static struct hvm_function_table __read_mostly vmx_function_table = {
     .name                 = "VMX",
     .cpu_up_prepare       = vmx_cpu_up_prepare,
@@ -1470,6 +1493,7 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
     .nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
     .update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
     .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
+    .process_isr          = vmx_process_isr,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
 };
 
index 35a4b3eac2f662cfa65ace7cdb0e3bd625ced08d..2fa2ea5eed2f61ae38725c42b1cc2b8881c4817c 100644 (file)
@@ -183,6 +183,7 @@ struct hvm_function_table {
     /* Virtual interrupt delivery */
     void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
     int (*virtual_intr_delivery_enabled)(void);
+    void (*process_isr)(int isr, struct vcpu *v);
 
     /*Walk nested p2m  */
     int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
index ba0222172ad1fcba1c84a220f4d5f73a86663aee..db85baa92fbbea882f44a955b969226c6037d326 100644 (file)
@@ -270,6 +270,10 @@ extern bool_t cpu_has_vmx_ins_outs_instr_info;
  */
 #define VMX_BASIC_DEFAULT1_ZERO                (1ULL << 55)
 
+/* Guest interrupt status */
+#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK  0x0FF
+#define VMX_GUEST_INTR_STATUS_SVI_OFFSET        8
+
 /* VMCS field encodings. */
 enum vmcs_field {
     VIRTUAL_PROCESSOR_ID            = 0x00000000,