return do_domctl(xc_handle, &domctl);
}
+int xc_dom_subscribe(int xc_handle, domid_t dom, evtchn_port_t port)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_subscribe;
+ domctl.domain = dom;
+ domctl.u.subscribe.port = port;
+
+ return do_domctl(xc_handle, &domctl);
+}
+
/*
* Local variables:
* mode: C
int xc_flask_op(int xc_handle, flask_op_t *op);
+/*
+ * Subscribe to state changes in a domain via evtchn.
+ * Returns -1 on failure, in which case errno will be set appropriately.
+ */
+int xc_dom_subscribe(int xc_handle, domid_t domid, evtchn_port_t port);
+
/**************************
* GRANT TABLE OPERATIONS *
**************************/
#include <asm/msr.h>
#include <asm/shared.h>
#include <asm/x86_emulate.h>
+#include <asm/traps.h>
#include <asm/hvm/vpt.h>
#include <public/arch-x86/cpuid.h>
panic("GENERAL PROTECTION FAULT\n[error_code=%04x]\n", regs->error_code);
}
+static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
+
static void nmi_mce_softirq(void)
{
- /* Only used to defer wakeup of dom0,vcpu0 to a safe (non-NMI) context. */
- vcpu_kick(dom0->vcpu[0]);
+ int cpu = smp_processor_id();
+ struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
+ cpumask_t affinity;
+
+ BUG_ON(st == NULL);
+ BUG_ON(st->vcpu == NULL);
+
+ /* Set the tmp value unconditionally, so that
+ * the check in the iret hypercall works. */
+ st->vcpu->cpu_affinity_tmp = st->vcpu->cpu_affinity;
+
+ if ((cpu != st->processor)
+ || (st->processor != st->vcpu->processor))
+ {
+ /* We are on a different physical cpu.
+ * Make sure to wakeup the vcpu on the
+ * specified processor.
+ */
+ cpus_clear(affinity);
+ cpu_set(st->processor, affinity);
+ vcpu_set_affinity(st->vcpu, &affinity);
+
+ /* Affinity is restored in the iret hypercall. */
+ }
+
+ /* Only used to defer wakeup of domain/vcpu to
+ * a safe (non-NMI/MCE) context.
+ */
+ vcpu_kick(st->vcpu);
}
static void nmi_dom0_report(unsigned int reason_idx)
{
- struct domain *d;
- struct vcpu *v;
+ struct domain *d = dom0;
- if ( ((d = dom0) == NULL) || ((v = d->vcpu[0]) == NULL) )
+ if ( (d == NULL) || (d->vcpu[0] == NULL) )
return;
set_bit(reason_idx, nmi_reason(d));
- /* Not safe to wake a vcpu here, or even to schedule a tasklet! */
- if ( !test_and_set_bool(v->nmi_pending) )
- raise_softirq(NMI_MCE_SOFTIRQ);
+ send_guest_trap(d, 0, TRAP_nmi);
}
asmlinkage void mem_parity_error(struct cpu_user_regs *regs)
return 0;
}
+int send_guest_trap(struct domain *d, uint16_t vcpuid, unsigned int trap_nr)
+{
+ struct vcpu *v;
+ struct softirq_trap *st;
+
+ BUG_ON(d == NULL);
+ BUG_ON(vcpuid >= MAX_VIRT_CPUS);
+ v = d->vcpu[vcpuid];
+
+ switch (trap_nr) {
+ case TRAP_nmi:
+ if ( !test_and_set_bool(v->nmi_pending) ) {
+ st = &per_cpu(softirq_trap, smp_processor_id());
+ st->domain = dom0;
+ st->vcpu = dom0->vcpu[0];
+ st->processor = st->vcpu->processor;
+
+ /* not safe to wake up a vcpu here */
+ raise_softirq(NMI_MCE_SOFTIRQ);
+ return 0;
+ }
+ break;
+ }
+
+ /* delivery failed */
+ return -EIO;
+}
+
+
long do_set_trap_table(XEN_GUEST_HANDLE(const_trap_info_t) traps)
{
struct trap_info cur;
goto exit_and_crash;
}
+ /* Restore affinity. */
+ if (!cpus_equal(v->cpu_affinity_tmp, v->cpu_affinity))
+ vcpu_set_affinity(v, &v->cpu_affinity_tmp);
+
/* No longer in NMI context. */
v->nmi_masked = 0;
else
regs->_esp += 16;
+ /* Restore affinity. */
+ if (!cpus_equal(v->cpu_affinity_tmp, v->cpu_affinity))
+ vcpu_set_affinity(v, &v->cpu_affinity_tmp);
+
/* No longer in NMI context. */
v->nmi_masked = 0;
regs->rcx = iret_saved.rcx;
}
+ /* Restore affinity. */
+ if (!cpus_equal(v->cpu_affinity_tmp, v->cpu_affinity))
+ vcpu_set_affinity(v, &v->cpu_affinity_tmp);
+
/* No longer in NMI context. */
v->nmi_masked = 0;
return;
d->is_shut_down = 1;
- send_guest_global_virq(dom0, VIRQ_DOM_EXC);
+ if ( d->shutdown_code == SHUTDOWN_suspend
+ && d->suspend_evtchn > 0 )
+ {
+ evtchn_set_pending(dom0->vcpu[0], d->suspend_evtchn);
+ }
+ else
+ send_guest_global_virq(dom0, VIRQ_DOM_EXC);
}
static void vcpu_check_shutdown(struct vcpu *v)
}
break;
+ case XEN_DOMCTL_subscribe:
+ {
+ struct domain *d;
+
+ ret = -ESRCH;
+ d = rcu_lock_domain_by_id(op->domain);
+ if ( d != NULL )
+ {
+ d->suspend_evtchn = op->u.subscribe.port;
+ rcu_unlock_domain(d);
+ ret = 0;
+ }
+ }
+ break;
+
default:
ret = arch_do_domctl(op, u_domctl);
break;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
#endif
+#define XEN_DOMCTL_subscribe 29
+struct xen_domctl_subscribe {
+ uint32_t port; /* IN */
+};
+typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
+
struct xen_domctl {
uint32_t cmd;
uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
struct xen_domctl_ext_vcpucontext ext_vcpucontext;
struct xen_domctl_set_opt_feature set_opt_feature;
struct xen_domctl_set_target set_target;
+ struct xen_domctl_subscribe subscribe;
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_cpuid cpuid;
#endif
/* Bitmask of CPUs on which this VCPU may run. */
cpumask_t cpu_affinity;
+ /* Used to change affinity temporarily. */
+ cpumask_t cpu_affinity_tmp;
/* Bitmask of CPUs which are holding onto this VCPU's state. */
cpumask_t vcpu_dirty_cpumask;
bool_t is_shut_down; /* fully shut down? */
int shutdown_code;
+ /* If this is not 0, send suspend notification here instead of
+ * raising DOM_EXC */
+ int suspend_evtchn;
+
atomic_t pause_count;
unsigned long vm_assist;