spin_lock(&domctl_lock);
- if ( xsm_domctl(op) )
- goto domctl_out;
-
switch ( op->cmd )
{
if ( d == NULL )
break;
+ ret = xsm_setvcpucontext(d);
+ if ( ret )
+ goto svc_out;
+
ret = -EINVAL;
if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
goto svc_out;
ret = -ESRCH;
if ( d != NULL )
{
+ ret = xsm_pausedomain(d);
+ if ( ret )
+ goto pausedomain_out;
+
ret = -EINVAL;
if ( d != current->domain )
{
domain_pause_by_systemcontroller(d);
ret = 0;
}
+ pausedomain_out:
rcu_unlock_domain(d);
}
}
ret = -ESRCH;
if ( d == NULL )
break;
+
+ ret = xsm_unpausedomain(d);
+ if ( ret )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+
domain_unpause_by_systemcontroller(d);
rcu_unlock_domain(d);
ret = 0;
if ( d == NULL )
break;
+ ret = xsm_resumedomain(d);
+ if ( ret )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+
domain_resume(d);
rcu_unlock_domain(d);
ret = 0;
if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
break;
+ ret = xsm_max_vcpus(d);
+ if ( ret )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+
/* Needed, for example, to ensure writable p.t. state is synced. */
domain_pause(d);
ret = -ESRCH;
if ( d != NULL )
{
- domain_kill(d);
+ ret = xsm_destroydomain(d) ? : domain_kill(d);
rcu_unlock_domain(d);
}
}
if ( d == NULL )
break;
+ ret = xsm_vcpuaffinity(op->cmd, d);
+ if ( ret )
+ goto vcpuaffinity_out;
+
ret = -EINVAL;
if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
goto vcpuaffinity_out;
if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
break;
+ ret = xsm_scheduler(d);
+ if ( ret )
+ goto scheduler_op_out;
+
ret = sched_adjust(d, &op->u.scheduler_op);
if ( copy_to_guest(u_domctl, op, 1) )
ret = -EFAULT;
+ scheduler_op_out:
rcu_unlock_domain(d);
}
break;
break;
}
+ ret = xsm_getdomaininfo(d);
+ if ( ret )
+ goto getdomaininfo_out;
+
getdomaininfo(d, &op->u.getdomaininfo);
op->domain = op->u.getdomaininfo.domain;
if ( copy_to_guest(u_domctl, op, 1) )
ret = -EFAULT;
+ getdomaininfo_out:
rcu_read_unlock(&domlist_read_lock);
}
break;
if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
break;
+ ret = xsm_getvcpucontext(d);
+ if ( ret )
+ goto getvcpucontext_out;
+
ret = -EINVAL;
if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
goto getvcpucontext_out;
if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
break;
+ ret = xsm_getvcpuinfo(d);
+ if ( ret )
+ goto getvcpuinfo_out;
+
ret = -EINVAL;
if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
goto getvcpuinfo_out;
if ( d == NULL )
break;
+ ret = xsm_setdomainmaxmem(d);
+ if ( ret )
+ goto max_mem_out;
+
ret = -EINVAL;
new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
}
spin_unlock(&d->page_alloc_lock);
+ max_mem_out:
rcu_unlock_domain(d);
}
break;
if ( d == NULL )
break;
+ ret = xsm_setdomainhandle(d);
+ if ( ret )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+
memcpy(d->handle, op->u.setdomainhandle.handle,
sizeof(xen_domain_handle_t));
rcu_unlock_domain(d);
if ( d == NULL )
break;
+ ret = xsm_setdebugging(d);
+ if ( ret )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+
domain_pause(d);
d->debugger_attached = !!op->u.setdebugging.enable;
domain_unpause(d); /* causes guest to latch new status */
if ( d == NULL )
break;
+ ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access);
+ if ( ret )
+ goto irq_permission_out;
+
if ( op->u.irq_permission.allow_access )
ret = irq_permit_access(d, pirq);
else
ret = irq_deny_access(d, pirq);
+ irq_permission_out:
rcu_unlock_domain(d);
}
break;
if ( d == NULL )
break;
+ ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access);
+ if ( ret )
+ goto iomem_permission_out;
+
if ( op->u.iomem_permission.allow_access )
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
else
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+ iomem_permission_out:
rcu_unlock_domain(d);
}
break;
d = rcu_lock_domain_by_id(op->domain);
if ( d != NULL )
{
+ ret = xsm_domain_settime(d);
+ if ( ret )
+ {
+ rcu_unlock_domain(d);
+ break;
+ }
+
d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
rcu_unlock_domain(d);
ret = 0;
break;
}
-domctl_out:
spin_unlock(&domctl_lock);
return ret;
struct xsm_operations {
void (*security_domaininfo) (struct domain *d,
- struct xen_domctl_getdomaininfo *info);
- int (*domctl) (struct xen_domctl *domctl);
+ struct xen_domctl_getdomaininfo *info);
+ int (*setvcpucontext) (struct domain *d);
+ int (*pausedomain) (struct domain *d);
+ int (*unpausedomain) (struct domain *d);
+ int (*resumedomain) (struct domain *d);
int (*domain_create) (struct domain *d, u32 ssidref);
+ int (*max_vcpus) (struct domain *d);
+ int (*destroydomain) (struct domain *d);
+ int (*vcpuaffinity) (int cmd, struct domain *d);
+ int (*scheduler) (struct domain *d);
int (*getdomaininfo) (struct domain *d);
+ int (*getvcpucontext) (struct domain *d);
+ int (*getvcpuinfo) (struct domain *d);
+ int (*domain_settime) (struct domain *d);
int (*tbufcontrol) (void);
int (*readconsole) (uint32_t clear);
int (*sched_id) (void);
+ int (*setdomainmaxmem) (struct domain *d);
+ int (*setdomainhandle) (struct domain *d);
+ int (*setdebugging) (struct domain *d);
+ int (*irq_permission) (struct domain *d, uint8_t pirq, uint8_t access);
+ int (*iomem_permission) (struct domain *d, unsigned long mfn,
+ uint8_t access);
int (*perfcontrol) (void);
int (*evtchn_unbound) (struct domain *d, struct evtchn *chn, domid_t id2);
int (*evtchn_interdomain) (struct domain *d1, struct evtchn *chn1,
- struct domain *d2, struct evtchn *chn2);
+ struct domain *d2, struct evtchn *chn2);
void (*evtchn_close_post) (struct evtchn *chn);
int (*evtchn_send) (struct domain *d, struct evtchn *chn);
int (*evtchn_status) (struct domain *d, struct evtchn *chn);
xsm_call(security_domaininfo(d, info));
}
-static inline int xsm_domctl(struct xen_domctl *domctl)
+static inline int xsm_setvcpucontext(struct domain *d)
+{
+ return xsm_call(setvcpucontext(d));
+}
+
+static inline int xsm_pausedomain (struct domain *d)
+{
+ return xsm_call(pausedomain(d));
+}
+
+static inline int xsm_unpausedomain (struct domain *d)
+{
+ return xsm_call(unpausedomain(d));
+}
+
+static inline int xsm_resumedomain (struct domain *d)
{
- return xsm_call(domctl(domctl));
+ return xsm_call(resumedomain(d));
}
-static inline int xsm_domain_create(struct domain *d, u32 ssidref)
+static inline int xsm_domain_create (struct domain *d, u32 ssidref)
{
return xsm_call(domain_create(d, ssidref));
}
-static inline int xsm_getdomaininfo(struct domain *d)
+static inline int xsm_max_vcpus(struct domain *d)
+{
+ return xsm_call(max_vcpus(d));
+}
+
+static inline int xsm_destroydomain (struct domain *d)
+{
+ return xsm_call(destroydomain(d));
+}
+
+static inline int xsm_vcpuaffinity (int cmd, struct domain *d)
+{
+ return xsm_call(vcpuaffinity(cmd, d));
+}
+
+static inline int xsm_scheduler (struct domain *d)
+{
+ return xsm_call(scheduler(d));
+}
+
+static inline int xsm_getdomaininfo (struct domain *d)
{
- return xsm_call(domain_getdomaininfo(d));
+ return xsm_call(getdomaininfo(d));
+}
+
+static inline int xsm_getvcpucontext (struct domain *d)
+{
+ return xsm_call(getvcpucontext(d));
+}
+
+static inline int xsm_getvcpuinfo (struct domain *d)
+{
+ return xsm_call(getvcpuinfo(d));
+}
+
+static inline int xsm_domain_settime (struct domain *d)
+{
+ return xsm_call(domain_settime(d));
}
static inline int xsm_tbufcontrol (void)
return xsm_call(sched_id());
}
+static inline int xsm_setdomainmaxmem (struct domain *d)
+{
+ return xsm_call(setdomainmaxmem(d));
+}
+
+static inline int xsm_setdomainhandle (struct domain *d)
+{
+ return xsm_call(setdomainhandle(d));
+}
+
+static inline int xsm_setdebugging (struct domain *d)
+{
+ return xsm_call(setdebugging(d));
+}
+
+static inline int xsm_irq_permission (struct domain *d, uint8_t pirq,
+ uint8_t access)
+{
+ return xsm_call(irq_permission(d, pirq, access));
+}
+
+static inline int xsm_iomem_permission (struct domain *d, unsigned long mfn,
+ uint8_t access)
+{
+ return xsm_call(iomem_permission(d, mfn, access));
+}
+
static inline int xsm_perfcontrol (void)
{
return xsm_call(perfcontrol());