pt_adjust_global_vcpu_target(v);
}
-int vlapic_virtual_intr_delivery_enabled(void)
-{
- if ( hvm_funcs.virtual_intr_delivery_enabled )
- return hvm_funcs.virtual_intr_delivery_enabled();
- else
- return 0;
-}
-
int vlapic_has_pending_irq(struct vcpu *v)
{
struct vlapic *vlapic = vcpu_vlapic(v);
if ( irr == -1 )
return -1;
- if ( vlapic_virtual_intr_delivery_enabled() &&
+ if ( hvm_funcs.virtual_intr_delivery_enabled &&
!nestedhvm_vcpu_in_guestmode(v) )
return irr;
int isr;
if ( !force_ack &&
- vlapic_virtual_intr_delivery_enabled() )
+ hvm_funcs.virtual_intr_delivery_enabled )
return 1;
/* If there's no chance of using APIC assist then bail now. */
vmx_clear_eoi_exit_bitmap(v, vector);
}
-static int vmx_virtual_intr_delivery_enabled(void)
-{
- return cpu_has_vmx_virtual_intr_delivery;
-}
-
static void vmx_process_isr(int isr, struct vcpu *v)
{
unsigned long status;
.nhvm_intr_blocked = nvmx_intr_blocked,
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap,
- .virtual_intr_delivery_enabled = vmx_virtual_intr_delivery_enabled,
.process_isr = vmx_process_isr,
.deliver_posted_intr = vmx_deliver_posted_intr,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
vmx_function_table.process_isr = NULL;
vmx_function_table.handle_eoi = NULL;
}
+ else
+ vmx_function_table.virtual_intr_delivery_enabled = true;
if ( cpu_has_vmx_posted_intr_processing )
{
/* Necessary hardware support for alternate p2m's? */
bool altp2m_supported;
+ /* Hardware virtual interrupt delivery enable? */
+ bool virtual_intr_delivery_enabled;
+
/* Indicate HAP capabilities. */
unsigned int hap_capabilities;
/* Virtual interrupt delivery */
void (*update_eoi_exit_bitmap)(struct vcpu *v, u8 vector, u8 trig);
- int (*virtual_intr_delivery_enabled)(void);
void (*process_isr)(int isr, struct vcpu *v);
void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
void (*sync_pir_to_irr)(struct vcpu *v);