static inline int oos_fixup_flush_gmfn(struct vcpu *v, mfn_t gmfn,
struct oos_fixup *fixup)
{
+ struct domain *d = v->domain;
int i;
for ( i = 0; i < SHADOW_OOS_FIXUPS; i++ )
{
if ( mfn_x(fixup->smfn[i]) != INVALID_MFN )
{
- sh_remove_write_access_from_sl1p(v, gmfn,
+ sh_remove_write_access_from_sl1p(d, gmfn,
fixup->smfn[i],
fixup->off[i]);
fixup->smfn[i] = _mfn(INVALID_MFN);
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_OOS_FIXUP_EVICT);
/* Reuse this slot and remove current writable mapping. */
- sh_remove_write_access_from_sl1p(v, gmfn,
+ sh_remove_write_access_from_sl1p(d, gmfn,
oos_fixup[idx].smfn[next],
oos_fixup[idx].off[next]);
perfc_incr(shadow_oos_fixup_evict);
unsigned long fault_addr)
{
/* Dispatch table for getting per-type functions */
- static const hash_vcpu_callback_t callbacks[SH_type_unused] = {
+ static const hash_domain_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32 */
int shtype = mfn_to_page(last_smfn)->u.sh.type;
if ( callbacks[shtype] )
- callbacks[shtype](curr, last_smfn, gmfn);
+ callbacks[shtype](d, last_smfn, gmfn);
if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
perfc_incr(shadow_writeable_h_5);
perfc_incr(shadow_writeable_bf_1);
else
perfc_incr(shadow_writeable_bf);
- hash_vcpu_foreach(v, callback_mask, callbacks, gmfn);
+ hash_domain_foreach(d, callback_mask, callbacks, gmfn);
/* If that didn't catch the mapping, then there's some non-pagetable
* mapping -- ioreq page, grant mapping, &c. */
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
-int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
+int sh_remove_write_access_from_sl1p(struct domain *d, mfn_t gmfn,
mfn_t smfn, unsigned long off)
{
struct page_info *sp = mfn_to_page(smfn);
|| sp->u.sh.type == SH_type_fl1_32_shadow )
{
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,2)
- (v, gmfn, smfn, off);
+ (d, gmfn, smfn, off);
}
else if ( sp->u.sh.type == SH_type_l1_pae_shadow
|| sp->u.sh.type == SH_type_fl1_pae_shadow )
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,3)
- (v, gmfn, smfn, off);
+ (d, gmfn, smfn, off);
else if ( sp->u.sh.type == SH_type_l1_64_shadow
|| sp->u.sh.type == SH_type_fl1_64_shadow )
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p,4)
- (v, gmfn, smfn, off);
+ (d, gmfn, smfn, off);
return 0;
}
/* Functions to revoke guest rights */
#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
-int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
+int sh_rm_write_access_from_sl1p(struct domain *d, mfn_t gmfn,
mfn_t smfn, unsigned long off)
{
- struct domain *d = v->domain;
struct vcpu *curr = current;
int r;
shadow_l1e_t *sl1p, sl1e;
}
#endif
-int sh_rm_write_access_from_l1(struct vcpu *v, mfn_t sl1mfn,
+int sh_rm_write_access_from_l1(struct domain *d, mfn_t sl1mfn,
mfn_t readonly_mfn)
/* Excises all writeable mappings to readonly_mfn from this l1 shadow table */
{
- struct domain *d = v->domain;
shadow_l1e_t *sl1e;
int done = 0;
int flags;
extern int
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, GUEST_LEVELS)
- (struct vcpu *v, mfn_t sl1mfn, mfn_t readonly_mfn);
+ (struct domain *d, mfn_t sl1mfn, mfn_t readonly_mfn);
extern int
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, GUEST_LEVELS)
(struct vcpu *v, mfn_t sl1mfn, mfn_t target_mfn);
extern int
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p, GUEST_LEVELS)
- (struct vcpu *v, mfn_t gmfn, mfn_t smfn, unsigned long off);
+ (struct domain *d, mfn_t gmfn, mfn_t smfn, unsigned long off);
#endif