x86 hvm: implement vector callback for evtchn delivery
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 25 May 2010 10:28:58 +0000 (11:28 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 25 May 2010 10:28:58 +0000 (11:28 +0100)
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/hvm/irq.c
xen/arch/x86/hvm/vpt.c
xen/common/kernel.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/irq.h
xen/include/public/features.h
xen/include/public/hvm/params.h

index 1fb18cd150967d3bc6e341df4ab79d1f2bcf81d8..b0ab1a5343eae4486ef7b557bf782f6661f6e39e 100644 (file)
@@ -185,16 +185,16 @@ void hvm_maybe_deassert_evtchn_irq(void)
 
 void hvm_assert_evtchn_irq(struct vcpu *v)
 {
-    if ( v->vcpu_id != 0 )
-        return;
-
     if ( unlikely(in_irq() || !local_irq_is_enabled()) )
     {
         tasklet_schedule(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
         return;
     }
 
-    hvm_set_callback_irq_level(v);
+    if ( is_hvm_pv_evtchn_vcpu(v) )
+        vcpu_kick(v);
+    else if ( v->vcpu_id == 0 )
+        hvm_set_callback_irq_level(v);
 }
 
 void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
@@ -251,7 +251,7 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
 
     via_type = (uint8_t)(via >> 56) + 1;
     if ( ((via_type == HVMIRQ_callback_gsi) && (via == 0)) ||
-         (via_type > HVMIRQ_callback_pci_intx) )
+         (via_type > HVMIRQ_callback_vector) )
         via_type = HVMIRQ_callback_none;
 
     spin_lock(&d->arch.hvm_domain.irq_lock);
@@ -297,6 +297,9 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
         if ( hvm_irq->callback_via_asserted )
              __hvm_pci_intx_assert(d, pdev, pintx);
         break;
+    case HVMIRQ_callback_vector:
+        hvm_irq->callback_via.vector = (uint8_t)via;
+        break;
     default:
         break;
     }
@@ -312,6 +315,9 @@ void hvm_set_callback_via(struct domain *d, uint64_t via)
     case HVMIRQ_callback_pci_intx:
         printk("PCI INTx Dev 0x%02x Int%c\n", pdev, 'A' + pintx);
         break;
+    case HVMIRQ_callback_vector:
+        printk("Direct Vector 0x%02x\n", (uint8_t)via);
+        break;
     default:
         printk("None\n");
         break;
@@ -323,6 +329,10 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
     struct hvm_domain *plat = &v->domain->arch.hvm_domain;
     int vector;
 
+    if ( (plat->irq.callback_via_type == HVMIRQ_callback_vector)
+         && vcpu_info(v, evtchn_upcall_pending) )
+        return hvm_intack_vector(plat->irq.callback_via.vector);
+
     if ( unlikely(v->nmi_pending) )
         return hvm_intack_nmi;
 
@@ -364,6 +374,8 @@ struct hvm_intack hvm_vcpu_ack_pending_irq(
         if ( !vlapic_ack_pending_irq(v, intack.vector) )
             intack = hvm_intack_none;
         break;
+    case hvm_intsrc_vector:
+        break;
     default:
         intack = hvm_intack_none;
         break;
index ce35a3e0497d280b2f1bc3bcd9baad8937a6c1f5..096d083f74759a9d9e6d1336c24bbe03c597a95b 100644 (file)
@@ -286,6 +286,9 @@ void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
     time_cb *cb;
     void *cb_priv;
 
+    if ( intack.source == hvm_intsrc_vector )
+        return;
+
     spin_lock(&v->arch.hvm_vcpu.tm_lock);
 
     pt = is_pt_irq(v, intack);
index 61a798df4ea4c7a725d2939c6093e487e014d039..96a55318f2baa9b497ac2e6f7121040bb4e51c13 100644 (file)
@@ -260,7 +260,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE(void) arg)
                              (1U << XENFEAT_highmem_assist) |
                              (1U << XENFEAT_gnttab_map_avail_bits);
             else
-                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock);
+                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
+                             (1U << XENFEAT_hvm_callback_vector);
 #endif
             break;
         default:
index bbd2565548dd39cda093037dac85ab661bf1c694..b452b501e011a22afd8e35ad657920b29b234829 100644 (file)
 #endif
 #define is_pv_32on64_vcpu(v)   (is_pv_32on64_domain((v)->domain))
 
+#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
+        d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
+#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
+
 #define VCPU_TRAP_NMI          1
 #define VCPU_TRAP_MCE          2
 #define VCPU_TRAP_LAST         VCPU_TRAP_MCE
index 66637cc3d58ccc1912801aa33a25c98a943b873c..a2845d6c93d16ac9f18fca8ec95def4157e969dd 100644 (file)
@@ -33,17 +33,20 @@ enum hvm_intsrc {
     hvm_intsrc_pic,
     hvm_intsrc_lapic,
     hvm_intsrc_nmi,
-    hvm_intsrc_mce
+    hvm_intsrc_mce,
+    hvm_intsrc_vector
 };
 struct hvm_intack {
     uint8_t source; /* enum hvm_intsrc */
     uint8_t vector;
 };
-#define hvm_intack_none       ( (struct hvm_intack) { hvm_intsrc_none,  0 } )
-#define hvm_intack_pic(vec)   ( (struct hvm_intack) { hvm_intsrc_pic,   vec } )
-#define hvm_intack_lapic(vec) ( (struct hvm_intack) { hvm_intsrc_lapic, vec } )
-#define hvm_intack_nmi        ( (struct hvm_intack) { hvm_intsrc_nmi,   2 } )
-#define hvm_intack_mce        ( (struct hvm_intack) { hvm_intsrc_mce,   18 } )
+#define hvm_intack(src, vec)   ((struct hvm_intack) { hvm_intsrc_##src, vec })
+#define hvm_intack_none        hvm_intack(none, 0)
+#define hvm_intack_pic(vec)    hvm_intack(pic, vec)
+#define hvm_intack_lapic(vec)  hvm_intack(lapic, vec)
+#define hvm_intack_nmi         hvm_intack(nmi, 2)
+#define hvm_intack_mce         hvm_intack(mce, 18)
+#define hvm_intack_vector(vec) hvm_intack(vector, vec)
 enum hvm_intblk {
     hvm_intblk_none,      /* not blocked (deliverable) */
     hvm_intblk_shadow,    /* MOV-SS or STI shadow */
index 1f2312427900fd2416c3036e576d54906ba01db2..06e9884db4bc6ee4407fe672e51d077b48efa5b0 100644 (file)
@@ -54,12 +54,14 @@ struct hvm_irq {
         enum {
             HVMIRQ_callback_none,
             HVMIRQ_callback_gsi,
-            HVMIRQ_callback_pci_intx
+            HVMIRQ_callback_pci_intx,
+            HVMIRQ_callback_vector
         } callback_via_type;
     };
     union {
         uint32_t gsi;
         struct { uint8_t dev, intx; } pci;
+        uint32_t vector;
     } callback_via;
 
     /* Number of INTx wires asserting each PCI-ISA link. */
index e95c7b755a49d965086e2a2109b7f7ac5098ac8c..fef7901294fb76ed8e9a51c51ab615a445428fa3 100644 (file)
@@ -68,6 +68,9 @@
  */
 #define XENFEAT_gnttab_map_avail_bits      7
 
+/* x86: Does this Xen host support the HVM callback vector type? */
+#define XENFEAT_hvm_callback_vector        8
+
 /* x86: pvclock algorithm is safe to use on HVM */
 #define XENFEAT_hvm_safe_pvclock           9
 
index 15d828fe14dc1363101732cad034e27d1101d3b4..7e276d92e09d36bd1d2d10cb4a944094a402e4b6 100644 (file)
@@ -33,6 +33,9 @@
  * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
  *                  Domain = val[47:32], Bus  = val[31:16],
  *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
+ * val[63:56] == 2: val[7:0] is a vector number, check for
+ *                  XENFEAT_hvm_callback_vector to know if this delivery
+ *                  method is available.
  * If val == 0 then CPU0 event-channel notifications are not delivered.
  */
 #define HVM_PARAM_CALLBACK_IRQ 0