switch ( action )
{
case CPU_UP_PREPARE:
- rc = hvm_funcs.cpu_up_prepare(cpu);
+ rc = alternative_call(hvm_funcs.cpu_up_prepare, cpu);
break;
case CPU_DYING:
hvm_cpu_down();
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
- hvm_funcs.cpu_dead(cpu);
+ alternative_vcall(hvm_funcs.cpu_dead, cpu);
break;
default:
break;
if ( rc )
goto fail2;
- rc = hvm_funcs.domain_initialise(d);
+ rc = alternative_call(hvm_funcs.domain_initialise, d);
if ( rc != 0 )
goto fail2;
alternative_vcall(hvm_funcs.domain_relinquish_resources, d);
if ( hvm_funcs.nhvm_domain_relinquish_resources )
- hvm_funcs.nhvm_domain_relinquish_resources(d);
+ alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
viridian_domain_deinit(d);
return 0;
/* Architecture-specific vmcs/vmcb bits */
- hvm_funcs.save_cpu_ctxt(v, &ctxt);
+ alternative_vcall(hvm_funcs.save_cpu_ctxt, v, &ctxt);
hvm_get_segment_register(v, x86_seg_idtr, &seg);
ctxt.idtr_limit = seg.limit;
#undef UNFOLD_ARBYTES
/* Architecture-specific vmcs/vmcb bits */
- if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
+ if ( alternative_call(hvm_funcs.load_cpu_ctxt, v, &ctxt) < 0 )
return -EINVAL;
v->arch.hvm.guest_cr[2] = ctxt.cr2;
hvm_update_guest_cr(v, 2);
if ( hvm_funcs.tsc_scaling.setup )
- hvm_funcs.tsc_scaling.setup(v);
+ alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
v->arch.msrs->tsc_aux = ctxt.msr_tsc_aux;
if ( rc != 0 ) /* teardown: vlapic_destroy */
goto fail2;
- if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
+ rc = alternative_call(hvm_funcs.vcpu_initialise, v);
+ if ( rc != 0 ) /* teardown: hvm_funcs.vcpu_destroy */
goto fail3;
softirq_tasklet_init(&v->arch.hvm.assert_evtchn_irq_tasklet,
free_compat_arg_xlat(v);
fail4:
hvmemul_cache_destroy(v);
- hvm_funcs.vcpu_destroy(v);
+ alternative_vcall(hvm_funcs.vcpu_destroy, v);
fail3:
vlapic_destroy(v);
fail2:
free_compat_arg_xlat(v);
tasklet_kill(&v->arch.hvm.assert_evtchn_irq_tasklet);
- hvm_funcs.vcpu_destroy(v);
+ alternative_vcall(hvm_funcs.vcpu_destroy, v);
vlapic_destroy(v);
!(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
return hvm_intblk_rflags_ie;
- intr_shadow = hvm_funcs.get_interrupt_shadow(v);
+ intr_shadow = alternative_call(hvm_funcs.get_interrupt_shadow, v);
if ( intr_shadow & (HVM_INTR_SHADOW_STI|HVM_INTR_SHADOW_MOV_SS) )
return hvm_intblk_shadow;
hvm_set_segment_register(v, x86_seg_idtr, ®);
if ( hvm_funcs.tsc_scaling.setup )
- hvm_funcs.tsc_scaling.setup(v);
+ alternative_vcall(hvm_funcs.tsc_scaling.setup, v);
/* Sync AP's TSC with BSP's. */
v->arch.hvm.cache_tsc_offset =
static inline int hvm_cpu_up(void)
{
- return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0);
+ return alternative_call(hvm_funcs.cpu_up);
}
static inline void hvm_cpu_down(void)
{
- if ( hvm_funcs.cpu_down )
- hvm_funcs.cpu_down();
+ alternative_vcall(hvm_funcs.cpu_down);
}
static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
static inline int nhvm_vcpu_vmexit_event(
struct vcpu *v, const struct x86_event *event)
{
- return hvm_funcs.nhvm_vcpu_vmexit_event(v, event);
+ return alternative_call(hvm_funcs.nhvm_vcpu_vmexit_event, v, event);
}
/* returns l1 guest's cr3 that points to the page table used to
*/
static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
{
- return hvm_funcs.nhvm_vcpu_p2m_base(v);
+ return alternative_call(hvm_funcs.nhvm_vcpu_p2m_base, v);
}
/* returns true, when l1 guest intercepts the specified trap */
static inline bool_t nhvm_vmcx_guest_intercepts_event(
struct vcpu *v, unsigned int vector, int errcode)
{
- return hvm_funcs.nhvm_vmcx_guest_intercepts_event(v, vector, errcode);
+ return alternative_call(hvm_funcs.nhvm_vmcx_guest_intercepts_event, v,
+ vector, errcode);
}
/* returns true when l1 guest wants to use hap to run l2 guest */
static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
{
- return hvm_funcs.nhvm_vmcx_hap_enabled(v);
+ return alternative_call(hvm_funcs.nhvm_vmcx_hap_enabled, v);
}
/* interrupt */
static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
{
- return hvm_funcs.nhvm_intr_blocked(v);
+ return alternative_call(hvm_funcs.nhvm_intr_blocked, v);
}
static inline int nhvm_hap_walk_L1_p2m(
struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
uint8_t *p2m_acc, struct npfec npfec)
{
- return hvm_funcs.nhvm_hap_walk_L1_p2m(
+ return alternative_call(hvm_funcs.nhvm_hap_walk_L1_p2m,
v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
}
static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
{
- hvm_funcs.enable_msr_interception(d, msr);
+ alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
}
static inline bool_t hvm_is_singlestep_supported(void)
{
return (hvm_funcs.is_singlestep_supported &&
- hvm_funcs.is_singlestep_supported());
+ alternative_call(hvm_funcs.is_singlestep_supported));
}
static inline bool hvm_hap_supported(void)
static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
{
if ( hvm_funcs.altp2m_vcpu_update_p2m )
- hvm_funcs.altp2m_vcpu_update_p2m(v);
+ alternative_vcall(hvm_funcs.altp2m_vcpu_update_p2m, v);
}
/* updates VMCS fields related to VMFUNC and #VE */
static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
{
if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
- hvm_funcs.altp2m_vcpu_update_vmfunc_ve(v);
+ alternative_vcall(hvm_funcs.altp2m_vcpu_update_vmfunc_ve, v);
}
/* emulates #VE */
{
if ( hvm_funcs.altp2m_vcpu_emulate_ve )
{
- hvm_funcs.altp2m_vcpu_emulate_ve(v);
+ alternative_vcall(hvm_funcs.altp2m_vcpu_emulate_ve, v);
return true;
}
return false;
static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
{
if ( hvm_funcs.vmtrace_control )
- return hvm_funcs.vmtrace_control(v, enable, reset);
+ return alternative_call(hvm_funcs.vmtrace_control, v, enable, reset);
return -EOPNOTSUPP;
}
static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
{
if ( hvm_funcs.vmtrace_output_position )
- return hvm_funcs.vmtrace_output_position(v, pos);
+ return alternative_call(hvm_funcs.vmtrace_output_position, v, pos);
return -EOPNOTSUPP;
}
struct vcpu *v, uint64_t key, uint64_t value)
{
if ( hvm_funcs.vmtrace_set_option )
- return hvm_funcs.vmtrace_set_option(v, key, value);
+ return alternative_call(hvm_funcs.vmtrace_set_option, v, key, value);
return -EOPNOTSUPP;
}
struct vcpu *v, uint64_t key, uint64_t *value)
{
if ( hvm_funcs.vmtrace_get_option )
- return hvm_funcs.vmtrace_get_option(v, key, value);
+ return alternative_call(hvm_funcs.vmtrace_get_option, v, key, value);
return -EOPNOTSUPP;
}
static inline int hvm_vmtrace_reset(struct vcpu *v)
{
if ( hvm_funcs.vmtrace_reset )
- return hvm_funcs.vmtrace_reset(v);
+ return alternative_call(hvm_funcs.vmtrace_reset, v);
return -EOPNOTSUPP;
}