if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
return -EINVAL;
+ if ( hvm_funcs.process_isr )
+ hvm_funcs.process_isr(vlapic_find_highest_isr(s), v);
+
vlapic_adjust_i8259_target(d);
lapic_rearm(s);
return 0;
vmx_set_eoi_exit_bitmap(v, pt_vector);
/* we need update the RVI field */
- status &= ~(unsigned long)0x0FF;
- status |= (unsigned long)0x0FF &
+ status &= ~VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+ status |= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK &
intack.vector;
__vmwrite(GUEST_INTR_STATUS, status);
if (v->arch.hvm_vmx.eoi_exitmap_changed) {
return cpu_has_vmx_virtual_intr_delivery;
}
+static void vmx_process_isr(int isr, struct vcpu *v)
+{
+ unsigned long status;
+ u8 old;
+
+ if ( !cpu_has_vmx_virtual_intr_delivery )
+ return;
+
+ if ( isr < 0 )
+ isr = 0;
+
+ vmx_vmcs_enter(v);
+ status = __vmread(GUEST_INTR_STATUS);
+ old = status >> VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+ if ( isr != old )
+ {
+ status &= VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK;
+ status |= isr << VMX_GUEST_INTR_STATUS_SVI_OFFSET;
+ __vmwrite(GUEST_INTR_STATUS, status);
+ }
+ vmx_vmcs_exit(v);
+}
+
static struct hvm_function_table __read_mostly vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
.virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
+ .process_isr = vmx_process_isr,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
};
/* Virtual interrupt delivery */
void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
int (*virtual_intr_delivery_enabled)(void);
+ void (*process_isr)(int isr, struct vcpu *v);
/*Walk nested p2m */
int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
*/
#define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55)
+/* Guest interrupt status */
+#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF
+#define VMX_GUEST_INTR_STATUS_SVI_OFFSET 8
+
/* VMCS field encodings. */
enum vmcs_field {
VIRTUAL_PROCESSOR_ID = 0x00000000,