x86/guest: setup event channel upcall vector
authorRoger Pau Monne <roger.pau@citrix.com>
Tue, 9 Jan 2018 12:51:37 +0000 (12:51 +0000)
committerWei Liu <wei.liu2@citrix.com>
Tue, 16 Jan 2018 18:34:04 +0000 (18:34 +0000)
And a dummy event channel upcall handler.

Note that with the current code the underlying Xen (L0) must support
HVMOP_set_evtchn_upcall_vector or else event channel setup is going to
fail. This limitation can be lifted by implementing more event channel
interrupt injection methods as a backup.

Register callback_irq to trick toolstack to think the domain is
enlightened.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
xen/arch/x86/guest/xen.c
xen/include/asm-x86/guest/hypercall.h

index 60626ec21c403fb889d8c937e70e988a178b1f63..59871170c84f07d5757d94d4072a4ce24535974f 100644 (file)
@@ -24,6 +24,7 @@
 #include <xen/rangeset.h>
 #include <xen/types.h>
 
+#include <asm/apic.h>
 #include <asm/e820.h>
 #include <asm/guest.h>
 #include <asm/msr.h>
@@ -186,6 +187,43 @@ static void __init init_memmap(void)
     }
 }
 
+static void xen_evtchn_upcall(struct cpu_user_regs *regs)
+{
+    struct vcpu_info *vcpu_info = this_cpu(vcpu_info);
+
+    vcpu_info->evtchn_upcall_pending = 0;
+    write_atomic(&vcpu_info->evtchn_pending_sel, 0);
+
+    ack_APIC_irq();
+}
+
+static void init_evtchn(void)
+{
+    static uint8_t evtchn_upcall_vector;
+    int rc;
+
+    if ( !evtchn_upcall_vector )
+        alloc_direct_apic_vector(&evtchn_upcall_vector, xen_evtchn_upcall);
+
+    ASSERT(evtchn_upcall_vector);
+
+    rc = xen_hypercall_set_evtchn_upcall_vector(this_cpu(vcpu_id),
+                                                evtchn_upcall_vector);
+    if ( rc )
+        panic("Unable to set evtchn upcall vector: %d", rc);
+
+    /* Trick toolstack to think we are enlightened */
+    {
+        struct xen_hvm_param a = {
+            .domid = DOMID_SELF,
+            .index = HVM_PARAM_CALLBACK_IRQ,
+            .value = 1,
+        };
+
+        BUG_ON(xen_hypercall_hvm_op(HVMOP_set_param, &a));
+    }
+}
+
 void __init hypervisor_setup(void)
 {
     init_memmap();
@@ -210,12 +248,15 @@ void __init hypervisor_setup(void)
                "unable to map vCPU info, limiting vCPUs to: %u\n",
                XEN_LEGACY_MAX_VCPUS);
     }
+
+    init_evtchn();
 }
 
 void hypervisor_ap_setup(void)
 {
     set_vcpu_id();
     map_vcpuinfo();
+    init_evtchn();
 }
 
 int hypervisor_alloc_unused_page(mfn_t *mfn)
index dbc57a566eda32dfc0eedc56bde63c874f7bc4f1..b36a1cc189949038e81e72143612c29b050c14bc 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <public/xen.h>
 #include <public/sched.h>
+#include <public/hvm/hvm_op.h>
 
 #include <public/vcpu.h>
 
@@ -104,6 +105,11 @@ static inline int xen_hypercall_vcpu_op(unsigned int cmd, unsigned int vcpu,
     return _hypercall64_3(long, __HYPERVISOR_vcpu_op, cmd, vcpu, arg);
 }
 
+static inline long xen_hypercall_hvm_op(unsigned int op, void *arg)
+{
+    return _hypercall64_2(long, __HYPERVISOR_hvm_op, op, arg);
+}
+
 /*
  * Higher level hypercall helpers
  */
@@ -120,6 +126,17 @@ static inline long xen_hypercall_shutdown(unsigned int reason)
     return xen_hypercall_sched_op(SCHEDOP_shutdown, &s);
 }
 
+static inline long xen_hypercall_set_evtchn_upcall_vector(
+    unsigned int cpu, unsigned int vector)
+{
+    struct xen_hvm_evtchn_upcall_vector a = {
+        .vcpu = cpu,
+        .vector = vector,
+    };
+
+    return xen_hypercall_hvm_op(HVMOP_set_evtchn_upcall_vector, &a);
+}
+
 #else /* CONFIG_XEN_GUEST */
 
 #include <public/sched.h>