getdomaininfo hypercall setvcpucontext setextvcpucontext
getscheduler getvcpuinfo getvcpuextstate getaddrsize
getaffinity setaffinity };
- allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim };
+ allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim set_max_evtchn };
allow $1 $2:security check_context;
allow $1 $2:shadow enable;
allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op };
getpodtarget setpodtarget set_misc_info set_virq_handler
};
allow dom0_t dom0_t:domain2 {
- set_cpuid gettsc settsc setscheduler
+ set_cpuid gettsc settsc setscheduler set_max_evtchn
};
allow dom0_t dom0_t:resource { add remove };
}
break;
+ case XEN_DOMCTL_set_max_evtchn:
+ {
+ d->max_evtchn_port = min_t(unsigned int,
+ op->u.set_max_evtchn.max_port,
+ INT_MAX);
+ }
+ break;
+
default:
ret = arch_do_domctl(op, d, u_domctl);
break;
return -EINVAL;
for ( port = 0; port_is_valid(d, port); port++ )
+ {
+ if ( port > d->max_evtchn_port )
+ return -ENOSPC;
if ( evtchn_from_port(d, port)->state == ECS_FREE )
return port;
+ }
- if ( port == d->max_evtchns )
+ if ( port == d->max_evtchns || port > d->max_evtchn_port )
return -ENOSPC;
if ( !group_from_port(d, port) )
int evtchn_init(struct domain *d)
{
evtchn_2l_init(d);
+ d->max_evtchn_port = INT_MAX;
d->evtchn = alloc_evtchn_bucket(d, 0);
if ( !d->evtchn )
typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
+/*
+ * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
+ * number the guest may use. Use this limit the amount of resources
+ * (global mapping space, xenheap) a guest may use for event channels.
+ */
+struct xen_domctl_set_max_evtchn {
+ uint32_t max_port;
+};
+typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
#define XEN_DOMCTL_set_broken_page_p2m 67
#define XEN_DOMCTL_setnodeaffinity 68
#define XEN_DOMCTL_getnodeaffinity 69
+#define XEN_DOMCTL_set_max_evtchn 70
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
struct xen_domctl_set_access_required access_required;
struct xen_domctl_audit_p2m audit_p2m;
struct xen_domctl_set_virq_handler set_virq_handler;
+ struct xen_domctl_set_max_evtchn set_max_evtchn;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct evtchn *evtchn; /* first bucket only */
struct evtchn **evtchn_group[NR_EVTCHN_GROUPS]; /* all other buckets */
unsigned int max_evtchns;
+ unsigned int max_evtchn_port;
spinlock_t event_lock;
const struct evtchn_port_ops *evtchn_port_ops;
struct evtchn_fifo_domain *evtchn_fifo;
case XEN_DOMCTL_audit_p2m:
return current_has_perm(d, SECCLASS_HVM, HVM__AUDIT_P2M);
+ case XEN_DOMCTL_set_max_evtchn:
+ return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_MAX_EVTCHN);
+
default:
printk("flask_domctl: Unknown op %d\n", cmd);
return -EPERM;
setscheduler
# XENMEM_claim_pages
setclaim
+# XEN_DOMCTL_set_max_evtchn
+ set_max_evtchn
}
# Similar to class domain, but primarily contains domctls related to HVM domains