return ret;
}
+void arch_p2m_set_access_required(struct domain *d, bool access_required)
+{
+ ASSERT(atomic_read(&d->pause_count));
+ p2m_get_hostp2m(d)->access_required = access_required;
+}
+
/*
* Local variables:
* mode: C
return _p2m_get_mem_access(p2m, gfn, access);
}
+void arch_p2m_set_access_required(struct domain *d, bool access_required)
+{
+ unsigned int i;
+
+ ASSERT(atomic_read(&d->pause_count));
+
+ p2m_get_hostp2m(d)->access_required = access_required;
+
+ if ( !altp2m_active(d) )
+ return;
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+
+ if ( p2m )
+ p2m->access_required = access_required;
+ }
+}
+
/*
* Local variables:
* mode: C
{
unsigned int i;
struct p2m_domain *p2m;
+ struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
mm_lock_init(&d->arch.altp2m_list_lock);
for ( i = 0; i < MAX_ALTP2M; i++ )
return -ENOMEM;
}
p2m->p2m_class = p2m_alternate;
- p2m->access_required = 1;
+ p2m->access_required = hostp2m->access_required;
_atomic_set(&p2m->active_vcpus, 0);
}
else
{
domain_pause(d);
- p2m_get_hostp2m(d)->access_required =
- op->u.access_required.access_required;
+ arch_p2m_set_access_required(d,
+ op->u.access_required.access_required);
domain_unpause(d);
}
break;
int arch_domain_soft_reset(struct domain *d);
+void arch_p2m_set_access_required(struct domain *d, bool access_required);
+
int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);
void arch_get_info_guest(struct vcpu *, vcpu_guest_context_u);