regs->pc = rsp->data.regs.arm.pc;
}
+void vm_event_monitor_next_interrupt(struct vcpu *v)
+{
+ /* Not supported on ARM. */
+}
+
/*
* Local variables:
* mode: C
spin_unlock(&d->event_lock);
}
+static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ info->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+ return hvm_funcs.get_pending_event(v, info);
+}
+
void hvm_do_resume(struct vcpu *v)
{
check_wakeup_from_wait();
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
- hvm_inject_event(&v->arch.hvm_vcpu.inject_trap);
+ if ( !hvm_event_pending(v) )
+ hvm_inject_event(&v->arch.hvm_vcpu.inject_trap);
+
v->arch.hvm_vcpu.inject_trap.vector = -1;
}
+
+ if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
+ {
+ struct x86_event info;
+
+ if ( hvm_get_pending_event(v, &info) )
+ {
+ hvm_monitor_interrupt(info.vector, info.type, info.error_code,
+ info.cr2);
+ v->arch.monitor.next_interrupt_enabled = false;
+ }
+ }
}
static int hvm_print_line(
return monitor_traps(curr, 1, &req);
}
+void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
+ unsigned int err, uint64_t cr2)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_INTERRUPT,
+ .u.interrupt.x86.vector = vector,
+ .u.interrupt.x86.type = type,
+ .u.interrupt.x86.error_code = err,
+ .u.interrupt.x86.cr2 = cr2,
+ };
+
+ monitor_traps(current, 1, &req);
+}
+
/*
* Local variables:
* mode: C
svm_asid_g_invlpg(v, vaddr);
}
+static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ const struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ if ( vmcb->eventinj.fields.v )
+ return false;
+
+ info->vector = vmcb->eventinj.fields.vector;
+ info->type = vmcb->eventinj.fields.type;
+ info->error_code = vmcb->eventinj.fields.errorcode;
+
+ return true;
+}
+
static struct hvm_function_table __initdata svm_function_table = {
.name = "SVM",
.cpu_up_prepare = svm_cpu_up_prepare,
.inject_event = svm_inject_event,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
+ .get_pending_event = svm_get_pending_event,
.invlpg = svm_invlpg,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
return 0;
}
+static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ unsigned long intr_info, error_code;
+
+ vmx_vmcs_enter(v);
+ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+ __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &error_code);
+ vmx_vmcs_exit(v);
+
+ if ( !(intr_info & INTR_INFO_VALID_MASK) )
+ return false;
+
+ info->vector = MASK_EXTR(intr_info, INTR_INFO_VECTOR_MASK);
+ info->type = MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK);
+ info->error_code = error_code;
+
+ return true;
+}
+
static struct hvm_function_table __initdata vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
.inject_event = vmx_inject_event,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
+ .get_pending_event = vmx_get_pending_event,
.invlpg = vmx_invlpg,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
v->arch.user_regs.eip = rsp->data.regs.x86.rip;
}
+void vm_event_monitor_next_interrupt(struct vcpu *v)
+{
+ v->arch.monitor.next_interrupt_enabled = true;
+}
+
void vm_event_fill_regs(vm_event_request_t *req)
{
const struct cpu_user_regs *regs = guest_cpu_user_regs();
if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
vm_event_set_registers(v, &rsp);
+ if ( rsp.flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
+ vm_event_monitor_next_interrupt(v);
+
if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
vm_event_vcpu_unpause(v);
}
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
struct arch_vm_event *vm_event;
+
+ struct {
+ bool next_interrupt_enabled;
+ } monitor;
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
+ bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
void (*invlpg)(struct vcpu *v, unsigned long vaddr);
int (*cpu_up_prepare)(unsigned int cpu);
unsigned long trap_type, unsigned long insn_length);
int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
unsigned int subleaf);
+void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
+ unsigned int err, uint64_t cr2);
#endif /* __ASM_X86_HVM_MONITOR_H__ */
(1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
(1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
(1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID);
+ (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT);
/* Since we know this is on VMX, we can just call the hvm func */
if ( hvm_is_singlestep_supported() )
#define XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION 5
#define XEN_DOMCTL_MONITOR_EVENT_CPUID 6
#define XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL 7
+#define XEN_DOMCTL_MONITOR_EVENT_INTERRUPT 8
struct xen_domctl_monitor_op {
uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
* if any of those flags are set, only those will be honored).
*/
#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
+/*
+ * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
+ * interrupt pending after resuming the VCPU.
+ */
+#define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
/*
* Reasons for the vm event request
* These kinds of events will be filtered out in future versions.
*/
#define VM_EVENT_REASON_PRIVILEGED_CALL 11
+/* An interrupt has been delivered. */
+#define VM_EVENT_REASON_INTERRUPT 12
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0
uint32_t _pad;
};
+struct vm_event_interrupt_x86 {
+ uint32_t vector;
+ uint32_t type;
+ uint32_t error_code;
+ uint32_t _pad;
+ uint64_t cr2;
+};
+
#define MEM_PAGING_DROP_PAGE (1 << 0)
#define MEM_PAGING_EVICT_FAIL (1 << 1)
struct vm_event_debug software_breakpoint;
struct vm_event_debug debug_exception;
struct vm_event_cpuid cpuid;
+ union {
+ struct vm_event_interrupt_x86 x86;
+ } interrupt;
} u;
union {
void vm_event_fill_regs(vm_event_request_t *req);
void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
+void vm_event_monitor_next_interrupt(struct vcpu *v);
+
#endif /* __VM_EVENT_H__ */
/*