{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
- eventinj_t event;
+ intinfo_t event;
- event.bytes = 0;
- event.fields.v = 1;
- event.fields.type = X86_EVENTTYPE_NMI;
- event.fields.vector = 2;
+ event.raw = 0;
+ event.v = true;
+ event.type = X86_EVENTTYPE_NMI;
+ event.vector = TRAP_nmi;
- ASSERT(vmcb->eventinj.fields.v == 0);
- vmcb->eventinj = event;
+ ASSERT(!vmcb->event_inj.v);
+ vmcb->event_inj = event;
/*
* SVM does not virtualise the NMI mask, so we emulate it by intercepting
static void svm_inject_extint(struct vcpu *v, int vector)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- eventinj_t event;
+ intinfo_t event;
- event.bytes = 0;
- event.fields.v = 1;
- event.fields.type = X86_EVENTTYPE_EXT_INTR;
- event.fields.vector = vector;
+ event.raw = 0;
+ event.v = true;
+ event.type = X86_EVENTTYPE_EXT_INTR;
+ event.vector = vector;
- ASSERT(vmcb->eventinj.fields.v == 0);
- vmcb->eventinj = event;
+ ASSERT(!vmcb->event_inj.v);
+ vmcb->event_inj = event;
}
static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
}
HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
- vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
+ vmcb->event_inj.v ? vmcb->event_inj.vector : -1);
/*
* Create a dummy virtual interrupt to intercept as soon as the
* have cleared the interrupt out of the IRR.
* 2. The IRQ is masked.
*/
- if ( unlikely(vmcb->eventinj.fields.v) || intblk )
+ if ( unlikely(vmcb->event_inj.v) || intblk )
{
svm_enable_intr_window(v, intack);
return;
/* Clear exitintinfo to prevent a fault loop of re-injecting
* exceptions forever.
*/
- n1vmcb->exitintinfo.bytes = 0;
+ n1vmcb->exit_int_info.raw = 0;
/* Cleanbits */
n1vmcb->cleanbits.bytes = 0;
n2vmcb->exitcode = ns_vmcb->exitcode;
n2vmcb->exitinfo1 = ns_vmcb->exitinfo1;
n2vmcb->exitinfo2 = ns_vmcb->exitinfo2;
- n2vmcb->exitintinfo = ns_vmcb->exitintinfo;
+ n2vmcb->exit_int_info = ns_vmcb->exit_int_info;
/* Pending Interrupts */
- n2vmcb->eventinj = ns_vmcb->eventinj;
+ n2vmcb->event_inj = ns_vmcb->event_inj;
/* LBR and other virtualization */
if (!vcleanbit_set(lbr)) {
switch (exitcode) {
case VMEXIT_INTR:
- if ( unlikely(ns_vmcb->eventinj.fields.v)
- && nv->nv_vmentry_pending
- && hvm_event_needs_reinjection(ns_vmcb->eventinj.fields.type,
- ns_vmcb->eventinj.fields.vector) )
- {
- ns_vmcb->exitintinfo.bytes = ns_vmcb->eventinj.bytes;
- }
+ if ( unlikely(ns_vmcb->event_inj.v) && nv->nv_vmentry_pending &&
+ hvm_event_needs_reinjection(ns_vmcb->event_inj.type,
+ ns_vmcb->event_inj.vector) )
+ ns_vmcb->exit_int_info = ns_vmcb->event_inj;
break;
case VMEXIT_EXCEPTION_PF:
ns_vmcb->_cr2 = ns_vmcb->exitinfo2;
}
ns_vmcb->exitcode = exitcode;
- ns_vmcb->eventinj.bytes = 0;
+ ns_vmcb->event_inj.raw = 0;
return 0;
}
ns_vmcb->exitcode = n2vmcb->exitcode;
ns_vmcb->exitinfo1 = n2vmcb->exitinfo1;
ns_vmcb->exitinfo2 = n2vmcb->exitinfo2;
- ns_vmcb->exitintinfo = n2vmcb->exitintinfo;
+ ns_vmcb->exit_int_info = n2vmcb->exit_int_info;
/* Interrupts */
/* If we emulate a VMRUN/#VMEXIT in the same host #VMEXIT cycle we have
* only happens on a VMRUN instruction intercept which has no valid
* exitintinfo set.
*/
- if ( unlikely(n2vmcb->eventinj.fields.v) &&
- hvm_event_needs_reinjection(n2vmcb->eventinj.fields.type,
- n2vmcb->eventinj.fields.vector) )
- {
- ns_vmcb->exitintinfo = n2vmcb->eventinj;
- }
+ if ( unlikely(n2vmcb->event_inj.v) &&
+ hvm_event_needs_reinjection(n2vmcb->event_inj.type,
+ n2vmcb->event_inj.vector) )
+ ns_vmcb->exit_int_info = n2vmcb->event_inj;
- ns_vmcb->eventinj.bytes = 0;
+ ns_vmcb->event_inj.raw = 0;
/* Nested paging mode */
if (nestedhvm_paging_mode_hap(v)) {
if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE )
return hvm_intblk_shadow;
- if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
+ if ( !nv->nv_vmexit_pending && n2vmcb->exit_int_info.v )
+ {
/* Give the l2 guest a chance to finish the delivery of
* the last injected interrupt or exception before we
* emulate a VMEXIT (e.g. VMEXIT(INTR) ).
c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
- if ( vmcb->eventinj.fields.v &&
- hvm_event_needs_reinjection(vmcb->eventinj.fields.type,
- vmcb->eventinj.fields.vector) )
+ if ( vmcb->event_inj.v &&
+ hvm_event_needs_reinjection(vmcb->event_inj.type,
+ vmcb->event_inj.vector) )
{
- c->pending_event = (uint32_t)vmcb->eventinj.bytes;
- c->error_code = vmcb->eventinj.fields.errorcode;
+ c->pending_event = vmcb->event_inj.raw;
+ c->error_code = vmcb->event_inj.ec;
}
return 1;
{
gdprintk(XENLOG_INFO, "Re-injecting %#"PRIx32", %#"PRIx32"\n",
c->pending_event, c->error_code);
- vmcb->eventinj.bytes = c->pending_event;
- vmcb->eventinj.fields.errorcode = c->error_code;
+ vmcb->event_inj.raw = c->pending_event;
+ vmcb->event_inj.ec = c->error_code;
}
else
- vmcb->eventinj.bytes = 0;
+ vmcb->event_inj.raw = 0;
vmcb->cleanbits.bytes = 0;
paging_update_paging_modes(v);
{
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
- eventinj_t eventinj = vmcb->eventinj;
+ intinfo_t eventinj = vmcb->event_inj;
struct x86_event _event = *event;
struct cpu_user_regs *regs = guest_cpu_user_regs();
break;
}
- if ( unlikely(eventinj.fields.v) &&
- (eventinj.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
+ if ( eventinj.v && (eventinj.type == X86_EVENTTYPE_HW_EXCEPTION) )
{
_event.vector = hvm_combine_hw_exceptions(
- eventinj.fields.vector, _event.vector);
+ eventinj.vector, _event.vector);
if ( _event.vector == TRAP_double_fault )
_event.error_code = 0;
}
- eventinj.bytes = 0;
- eventinj.fields.v = 1;
- eventinj.fields.vector = _event.vector;
+ eventinj.raw = 0;
+ eventinj.v = true;
+ eventinj.vector = _event.vector;
/*
* Refer to AMD Vol 2: System Programming, 15.20 Event Injection.
vmcb->nextrip = regs->rip + _event.insn_len;
else
regs->rip += _event.insn_len;
- eventinj.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
+ eventinj.type = X86_EVENTTYPE_SW_INTERRUPT;
break;
case X86_EVENTTYPE_PRI_SW_EXCEPTION: /* icebp */
regs->rip += _event.insn_len;
if ( cpu_has_svm_nrips )
vmcb->nextrip = regs->rip;
- eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+ eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
break;
case X86_EVENTTYPE_SW_EXCEPTION: /* int3, into */
vmcb->nextrip = regs->rip + _event.insn_len;
else
regs->rip += _event.insn_len;
- eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+ eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
break;
default:
- eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
- eventinj.fields.ev = (_event.error_code != X86_EVENT_NO_EC);
- eventinj.fields.errorcode = _event.error_code;
+ eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
+ eventinj.ev = (_event.error_code != X86_EVENT_NO_EC);
+ eventinj.ec = _event.error_code;
break;
}
vmcb->nextrip = (uint32_t)vmcb->nextrip;
}
- ASSERT(!eventinj.fields.ev ||
- eventinj.fields.errorcode == (uint16_t)eventinj.fields.errorcode);
- vmcb->eventinj = eventinj;
+ ASSERT(!eventinj.ev || eventinj.ec == (uint16_t)eventinj.ec);
+ vmcb->event_inj = eventinj;
if ( _event.vector == TRAP_page_fault &&
_event.type == X86_EVENTTYPE_HW_EXCEPTION )
static bool svm_event_pending(const struct vcpu *v)
{
- return v->arch.hvm.svm.vmcb->eventinj.fields.v;
+ return v->arch.hvm.svm.vmcb->event_inj.v;
}
static void svm_cpu_dead(unsigned int cpu)
{
const struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- if ( vmcb->eventinj.fields.v )
+ if ( vmcb->event_inj.v )
return false;
- info->vector = vmcb->eventinj.fields.vector;
- info->type = vmcb->eventinj.fields.type;
- info->error_code = vmcb->eventinj.fields.errorcode;
+ info->vector = vmcb->event_inj.vector;
+ info->type = vmcb->event_inj.type;
+ info->error_code = vmcb->event_inj.ec;
return true;
}
vmcb->cleanbits.bytes = cpu_has_svm_cleanbits ? ~0u : 0u;
/* Event delivery caused this intercept? Queue for redelivery. */
- if ( unlikely(vmcb->exitintinfo.fields.v) &&
- hvm_event_needs_reinjection(vmcb->exitintinfo.fields.type,
- vmcb->exitintinfo.fields.vector) )
- vmcb->eventinj = vmcb->exitintinfo;
+ if ( unlikely(vmcb->exit_int_info.v) &&
+ hvm_event_needs_reinjection(vmcb->exit_int_info.type,
+ vmcb->exit_int_info.vector) )
+ vmcb->event_inj = vmcb->exit_int_info;
switch ( exit_reason )
{
* switches.
*/
insn_len = -1;
- if ( vmcb->exitintinfo.fields.v )
+ if ( vmcb->exit_int_info.v )
{
- switch ( vmcb->exitintinfo.fields.type )
+ switch ( vmcb->exit_int_info.type )
{
/*
* #BP and #OF are from INT3/INTO respectively. #DB from
* semantics.
*/
case X86_EVENTTYPE_HW_EXCEPTION:
- if ( vmcb->exitintinfo.fields.vector == TRAP_int3 ||
- vmcb->exitintinfo.fields.vector == TRAP_overflow )
+ if ( vmcb->exit_int_info.vector == TRAP_int3 ||
+ vmcb->exit_int_info.vector == TRAP_overflow )
break;
/* Fallthrough */
case X86_EVENTTYPE_EXT_INTR:
* The common logic above will have forwarded the vectoring
* information. Undo this as we are going to emulate.
*/
- vmcb->eventinj.bytes = 0;
+ vmcb->event_inj.raw = 0;
}
/*
printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#"PRIx64"\n",
vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes,
vmcb->interrupt_shadow);
- printk("eventinj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
- vmcb->eventinj.bytes, vmcb->eventinj.fields.v,
- vmcb->eventinj.fields.ev, vmcb->eventinj.fields.type,
- vmcb->eventinj.fields.vector);
- printk("exitcode = %#"PRIx64" exitintinfo = %#"PRIx64"\n",
- vmcb->exitcode, vmcb->exitintinfo.bytes);
+ printk("event_inj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
+ vmcb->event_inj.raw, vmcb->event_inj.v,
+ vmcb->event_inj.ev, vmcb->event_inj.type,
+ vmcb->event_inj.vector);
+ printk("exitcode = %#"PRIx64" exit_int_info = %#"PRIx64"\n",
+ vmcb->exitcode, vmcb->exit_int_info.raw);
printk("exitinfo1 = %#"PRIx64" exitinfo2 = %#"PRIx64"\n",
vmcb->exitinfo1, vmcb->exitinfo2);
printk("np_enable = %#"PRIx64" guest_asid = %#x\n",
PRINTF("GENERAL2_INTERCEPT: VMRUN intercept bit is clear (%#"PRIx32")\n",
vmcb_get_general2_intercepts(vmcb));
- if ( vmcb->eventinj.fields.resvd1 )
+ if ( vmcb->event_inj.resvd1 )
PRINTF("eventinj: MBZ bits are set (%#"PRIx64")\n",
- vmcb->eventinj.bytes);
+ vmcb->event_inj.raw);
#undef PRINTF
return ret;
BUILD_BUG_ON(sizeof(vmcb) != PAGE_SIZE);
BUILD_BUG_ON(offsetof(typeof(vmcb), _pause_filter_thresh) != 0x03c);
BUILD_BUG_ON(offsetof(typeof(vmcb), _vintr) != 0x060);
- BUILD_BUG_ON(offsetof(typeof(vmcb), eventinj) != 0x0a8);
+ BUILD_BUG_ON(offsetof(typeof(vmcb), event_inj) != 0x0a8);
BUILD_BUG_ON(offsetof(typeof(vmcb), es) != 0x400);
BUILD_BUG_ON(offsetof(typeof(vmcb), _cpl) != 0x4cb);
BUILD_BUG_ON(offsetof(typeof(vmcb), _cr4) != 0x548);
typedef union
{
- u64 bytes;
struct
{
- u64 vector: 8;
- u64 type: 3;
- u64 ev: 1;
- u64 resvd1: 19;
- u64 v: 1;
- u64 errorcode:32;
- } fields;
-} eventinj_t;
+ uint8_t vector;
+ uint8_t type:3;
+ bool ev:1;
+ uint32_t resvd1:19;
+ bool v:1;
+ uint32_t ec;
+ };
+ uint64_t raw;
+} intinfo_t;
typedef union
{
u64 exitcode; /* offset 0x70 */
u64 exitinfo1; /* offset 0x78 */
u64 exitinfo2; /* offset 0x80 */
- eventinj_t exitintinfo; /* offset 0x88 */
+ intinfo_t exit_int_info; /* offset 0x88 */
u64 _np_enable; /* offset 0x90 - cleanbit 4 */
u64 res08[2];
- eventinj_t eventinj; /* offset 0xA8 */
+ intinfo_t event_inj; /* offset 0xA8 */
u64 _h_cr3; /* offset 0xB0 - cleanbit 4 */
virt_ext_t virt_ext; /* offset 0xB8 */
vmcbcleanbits_t cleanbits; /* offset 0xC0 */