break;
#endif /* P2M_AUDIT */
- case XEN_DOMCTL_set_access_required:
- {
- struct p2m_domain* p2m;
-
- ret = -EPERM;
- if ( current->domain == d )
- break;
-
- ret = 0;
- p2m = p2m_get_hostp2m(d);
- p2m->access_required = domctl->u.access_required.access_required;
- }
- break;
-
case XEN_DOMCTL_set_broken_page_p2m:
{
p2m_type_t pt;
#include <asm/current.h>
#include <asm/irq.h>
#include <asm/page.h>
+#include <asm/p2m.h>
#include <public/domctl.h>
#include <xsm/xsm.h>
}
break;
+#ifdef HAS_MEM_ACCESS
+ case XEN_DOMCTL_set_access_required:
+ {
+ struct p2m_domain* p2m;
+
+ ret = -EPERM;
+ if ( current->domain == d )
+ break;
+
+ ret = 0;
+ p2m = p2m_get_hostp2m(d);
+ p2m->access_required = op->u.access_required.access_required;
+ }
+ break;
+#endif
+
case XEN_DOMCTL_set_virq_handler:
{
uint32_t virq = op->u.set_virq_handler.virq;
* at each p2m tree level. */
unsigned long shattered[4];
} stats;
+
+ /* If true, and an access fault comes in and there is no mem_event listener,
+ * pause domain. Otherwise, remove access restrictions. */
+ bool_t access_required;
};
/* List of possible type for each page in the p2m entry.
bool_t writeable);
int arch_grant_unmap_page_identity(struct domain *d, unsigned long frame);
+/* get host p2m table */
+#define p2m_get_hostp2m(d) (&(d)->arch.p2m)
+
#endif /* _XEN_P2M_H */
/*