Adjust VM_ASSIST() to prepend VMASST_TYPE_.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Tim Deegan <tim@xen.org>
case -ERESTART:
break;
case 0:
- if ( !compat && !VM_ASSIST(d, VMASST_TYPE_m2p_strict) &&
+ if ( !compat && !VM_ASSIST(d, m2p_strict) &&
!paging_mode_refcounts(d) )
fill_ro_mpt(cr3_gfn);
break;
cr3_page = NULL;
break;
case 0:
- if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ if ( VM_ASSIST(d, m2p_strict) )
zap_ro_mpt(cr3_gfn);
break;
}
adjust_guest_l4e(pl4e[i], d);
}
- init_guest_l4_table(pl4e, d, !VM_ASSIST(d, VMASST_TYPE_m2p_strict));
+ init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
unmap_domain_page(pl4e);
return rc > 0 ? 0 : rc;
invalidate_shadow_ldt(curr, 0);
- if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) && !paging_mode_refcounts(d) )
+ if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
fill_ro_mpt(mfn);
curr->arch.guest_table = pagetable_from_pfn(mfn);
update_cr3(curr);
op.arg1.mfn);
break;
}
- if ( VM_ASSIST(d, VMASST_TYPE_m2p_strict) &&
- !paging_mode_refcounts(d) )
+ if ( VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
zap_ro_mpt(op.arg1.mfn);
}
shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
__PAGE_HYPERVISOR);
- if ( !VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ if ( !VM_ASSIST(d, m2p_strict) )
sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
/* Shadow linear mapping for 4-level shadows. N.B. for 3-level
shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
if ( (v->arch.flags & TF_kernel_mode) &&
- !VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ !VM_ASSIST(d, m2p_strict) )
sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
else if ( !(v->arch.flags & TF_kernel_mode) &&
- VM_ASSIST(d, VMASST_TYPE_m2p_strict) )
+ VM_ASSIST(d, m2p_strict) )
sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
shadow_l4e_empty();
}
!(regs->error_code & (PFEC_reserved_bit | PFEC_insn_fetch)) &&
(regs->error_code & PFEC_write_access) )
{
- if ( VM_ASSIST(d, VMASST_TYPE_writable_pagetables) &&
+ if ( VM_ASSIST(d, writable_pagetables) &&
/* Do not check if access-protection fault since the page may
legitimately be not present in shadow page tables */
(paging_mode_enabled(d) ||
{
case 0:
fi.submap = (1U << XENFEAT_memory_op_vnode_supported);
- if ( VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3) )
+ if ( VM_ASSIST(d, pae_extended_cr3) )
fi.submap |= (1U << XENFEAT_pae_pgdir_above_4gb);
if ( paging_mode_translate(d) )
fi.submap |=
/* This check is for functionality specific to a control domain */
#define is_control_domain(_d) ((_d)->is_privileged)
-#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
+#define VM_ASSIST(d, t) (test_bit(VMASST_TYPE_ ## t, &(d)->vm_assist))
#define is_pv_domain(d) ((d)->guest_type == guest_type_pv)
#define is_pv_vcpu(v) (is_pv_domain((v)->domain))