#define compat_grant_table_op hvm_grant_table_op_compat32
#define do_arch_1 paging_domctl_continuation
-static const hypercall_table_t hvm_hypercall_table[NR_hypercalls] = {
+static const hypercall_table_t hvm_hypercall_table[] = {
COMPAT_CALL(memory_op),
COMPAT_CALL(grant_table_op),
COMPAT_CALL(vcpu_op),
if ( (eax & 0x80000000) && is_viridian_domain(currd) )
return viridian_hypercall(regs);
- if ( (eax >= NR_hypercalls) || !hvm_hypercall_table[eax].native )
+ BUILD_BUG_ON(ARRAY_SIZE(hvm_hypercall_table) >
+ ARRAY_SIZE(hypercall_args_table));
+
+ if ( (eax >= ARRAY_SIZE(hvm_hypercall_table)) ||
+ !hvm_hypercall_table[eax].native )
{
regs->eax = -ENOSYS;
return HVM_HCALL_completed;
#define do_arch_1 paging_domctl_continuation
-static const hypercall_table_t pv_hypercall_table[NR_hypercalls] = {
+static const hypercall_table_t pv_hypercall_table[] = {
COMPAT_CALL(set_trap_table),
HYPERCALL(mmu_update),
COMPAT_CALL(set_gdt),
eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->eax;
- if ( (eax >= NR_hypercalls) || !pv_hypercall_table[eax].native )
+ BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) >
+ ARRAY_SIZE(hypercall_args_table));
+
+ if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) ||
+ !pv_hypercall_table[eax].native )
{
regs->eax = -ENOSYS;
return;
{
struct multicall_entry *call = &state->call;
- if ( (call->op < NR_hypercalls) && pv_hypercall_table[call->op].native )
+ if ( (call->op < ARRAY_SIZE(pv_hypercall_table)) &&
+ pv_hypercall_table[call->op].native )
call->result = pv_hypercall_table[call->op].native(
call->args[0], call->args[1], call->args[2],
call->args[3], call->args[4], call->args[5]);
{
struct compat_multicall_entry *call = &state->compat_call;
- if ( (call->op < NR_hypercalls) && pv_hypercall_table[call->op].compat )
+ if ( (call->op < ARRAY_SIZE(pv_hypercall_table)) &&
+ pv_hypercall_table[call->op].compat )
call->result = pv_hypercall_table[call->op].compat(
call->args[0], call->args[1], call->args[2],
call->args[3], call->args[4], call->args[5]);