debug
getcpuinfo
heap
+ pm_op
+ mca_op
+ lockprof
+ cpupool_op
+ sched_op
}
class domain
setextvcpucontext
getvcpuextstate
setvcpuextstate
+ getpodtarget
+ setpodtarget
+ set_misc_info
}
class hvm
bind_irq
cacheattr
trackdirtyvram
+ hvmctl
+ mem_event
+ mem_sharing
}
class event
stat_device
add_device
remove_device
+ plug
+ unplug
+ setup
}
class security
if ( rc != 0 )
return rc;
+ if ( op == XENMEM_set_pod_target )
+ rc = xsm_set_pod_target(d);
+ else
+ rc = xsm_get_pod_target(d);
+
+ if ( rc != 0 )
+ goto pod_target_out_unlock;
+
if ( op == XENMEM_set_pod_target )
{
/* if -ENOSYS is returned,
if (!IS_PRIV(v->domain) )
return x86_mcerr(NULL, -EPERM);
+ ret = xsm_do_mca();
+ if ( ret )
+ return x86_mcerr(NULL, ret);
+
if ( copy_from_guest(op, u_xen_mc, 1) )
return x86_mcerr("do_mca: failed copyin of xen_mc_t", -EFAULT);
d = rcu_lock_domain_by_id(domctl->domain);
if ( d != NULL )
{
- ret = mem_event_domctl(d, &domctl->u.mem_event_op,
- guest_handle_cast(u_domctl, void));
+ ret = xsm_mem_event(d);
+ if ( !ret )
+ ret = mem_event_domctl(d, &domctl->u.mem_event_op,
+ guest_handle_cast(u_domctl, void));
rcu_unlock_domain(d);
copy_to_guest(u_domctl, domctl, 1);
}
d = rcu_lock_domain_by_id(domctl->domain);
if ( d != NULL )
{
- ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
+ ret = xsm_mem_sharing(d);
+ if ( !ret )
+ ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
rcu_unlock_domain(d);
copy_to_guest(u_domctl, domctl, 1);
}
d = rcu_lock_domain_by_id(domctl->domain);
if ( d != NULL )
{
- p2m = p2m_get_hostp2m(d);
- p2m->access_required = domctl->u.access_required.access_required;
+ ret = xsm_mem_event(d);
+ if ( !ret ) {
+ p2m = p2m_get_hostp2m(d);
+ p2m->access_required = domctl->u.access_required.access_required;
+ }
rcu_unlock_domain(d);
}
}
if ( rc != 0 )
return rc;
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail_getmemtype;
+
rc = -EINVAL;
if ( is_hvm_domain(d) )
{
a.mem_type = HVMMEM_mmio_dm;
rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
}
+
+ param_fail_getmemtype:
rcu_unlock_domain(d);
break;
}
if ( !is_hvm_domain(d) )
goto param_fail4;
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail4;
+
rc = -EINVAL;
if ( (a.first_pfn > domain_get_maximum_gpfn(d)) ||
((a.first_pfn + a.nr - 1) < a.first_pfn) ||
if ( !is_hvm_domain(d) )
goto param_fail5;
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail5;
+
rc = -EINVAL;
if ( (a.first_pfn > domain_get_maximum_gpfn(d)) ||
((a.first_pfn + a.nr - 1) < a.first_pfn) ||
if ( !is_hvm_domain(d) )
goto param_fail6;
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail6;
+
rc = -EINVAL;
if ( (a.pfn > domain_get_maximum_gpfn(d)) && a.pfn != ~0ull )
goto param_fail6;
if ( !is_hvm_domain(d) || !paging_mode_shadow(d) )
goto param_fail7;
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail7;
+
rc = 0;
pagetable_dying(d, a.gpa);
if ( !is_hvm_domain(d) )
goto param_fail8;
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail8;
+
rc = -ENOENT;
if ( tr.vcpuid >= d->max_vcpus || (v = d->vcpu[tr.vcpuid]) == NULL )
goto param_fail8;
/* Support DOMID_SELF? */
if ( !IS_PRIV(current->domain) )
- return -EINVAL;
+ return -EPERM;
if ( copy_from_guest(&target, arg, 1) )
return -EFAULT;
if ( rc != 0 )
return rc;
+ if ( op == XENMEM_set_pod_target )
+ rc = xsm_set_pod_target(d);
+ else
+ rc = xsm_get_pod_target(d);
+
+ if ( rc != 0 )
+ goto pod_target_out_unlock;
+
if ( op == XENMEM_set_pod_target )
{
if ( target.target_pages > d->max_pages )
#include <io_ports.h>
#include <public/physdev.h>
#include <xen/iommu.h>
+#include <xsm/xsm.h>
/* bitmap indicate which fixed map is free */
DEFINE_SPINLOCK(msix_fixmap_lock);
{
unsigned long flags;
int irq;
+ int ret;
struct msi_desc *entry, *tmp;
struct irq_desc *desc;
if (!pdev)
return -EINVAL;
+ ret = xsm_resource_setup_pci((pdev->seg << 16) | (pdev->bus << 8) | pdev->devfn);
+ if ( ret )
+ return ret;
+
list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list )
{
irq = entry->irq;
if ( !IS_PRIV(current->domain) )
break;
+ ret = xsm_resource_setup_misc();
+ if ( ret )
+ break;
+
ret = -EFAULT;
if ( copy_from_guest(&info, arg, 1) )
break;
ret = -EINVAL;
if ( setup_gsi.gsi < 0 || setup_gsi.gsi >= nr_irqs_gsi )
break;
+
+ ret = xsm_resource_setup_gsi(setup_gsi.gsi);
+ if ( ret )
+ break;
+
ret = mp_register_gsi(setup_gsi.gsi, setup_gsi.triggering,
setup_gsi.polarity);
break;
break;
case XENPF_set_processor_pminfo:
+ ret = xsm_setpminfo();
+ if ( ret )
+ break;
+
switch ( op->u.set_pminfo.type )
{
case XEN_PM_PX:
g_info = &op->u.pcpu_info;
+ ret = xsm_getcpuinfo();
+ if ( ret )
+ break;
+
if ( !get_cpu_maps() )
{
ret = -EBUSY;
{
int cpu = op->u.cpu_ol.cpuid;
+ ret = xsm_resource_plug_core();
+ if ( ret )
+ break;
+
if ( cpu >= nr_cpu_ids || !cpu_present(cpu) )
{
ret = -EINVAL;
break;
}
+ ret = xsm_resource_plug_core();
+ if ( ret )
+ break;
+
ret = continue_hypercall_on_cpu(
0, cpu_up_helper, (void *)(unsigned long)cpu);
break;
{
int cpu = op->u.cpu_ol.cpuid;
+ ret = xsm_resource_unplug_core();
+ if ( ret )
+ break;
+
if ( cpu == 0 )
{
ret = -EOPNOTSUPP;
break;
case XENPF_cpu_hotadd:
+ ret = xsm_resource_plug_core();
+ if ( ret )
+ break;
+
ret = cpu_add(op->u.cpu_add.apic_id,
op->u.cpu_add.acpi_id,
op->u.cpu_add.pxm);
break;
case XENPF_mem_hotadd:
+ ret = xsm_resource_plug_core();
+ if ( ret )
+ break;
+
ret = memory_add(op->u.mem_add.spfn,
op->u.mem_add.epfn,
op->u.mem_add.pxm);
uint32_t i, max_cpu_index, last_online_cpu;
xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
+ ret = xsm_physinfo();
+ if ( ret )
+ break;
+
last_online_cpu = cpumask_last(&cpu_online_map);
max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
ti->max_cpu_index = last_online_cpu;
uint32_t i, j, max_node_index, last_online_node;
xen_sysctl_numainfo_t *ni = &sysctl->u.numainfo;
+ ret = xsm_physinfo();
+ if ( ret )
+ break;
+
last_online_node = last_node(node_online_map);
max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
ni->max_node_index = last_online_node;
switch ( sysctl->u.cpu_hotplug.op )
{
case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
+ ret = xsm_resource_plug_core();
+ if ( ret )
+ break;
ret = continue_hypercall_on_cpu(
0, cpu_up_helper, (void *)(unsigned long)cpu);
break;
case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
+ ret = xsm_resource_unplug_core();
+ if ( ret )
+ break;
ret = continue_hypercall_on_cpu(
0, cpu_down_helper, (void *)(unsigned long)cpu);
break;
d = rcu_lock_domain_by_id(op->domain);
if ( d != NULL )
{
- d->suspend_evtchn = op->u.subscribe.port;
+ ret = xsm_domctl(d, op->cmd);
+ if ( !ret )
+ d->suspend_evtchn = op->u.subscribe.port;
rcu_unlock_domain(d);
- ret = 0;
}
}
break;
ret = -ESRCH;
if ( (d = rcu_lock_domain_by_id(op->domain)) != NULL )
{
- d->disable_migrate = op->u.disable_migrate.disable;
+ ret = xsm_domctl(d, op->cmd);
+ if ( !ret )
+ d->disable_migrate = op->u.disable_migrate.disable;
rcu_unlock_domain(d);
- ret = 0;
}
}
break;
op.status = GNTST_general_error;
goto out1;
}
+ rc = xsm_grant_setup(current->domain, d);
+ if ( rc ) {
+ op.status = GNTST_permission_denied;
+ goto out1;
+ }
gt = d->grant_table;
rcu_unlock_domain(d);
return -EPERM;
}
+ if ( xsm_grant_query_size(current->domain, d) )
+ {
+ rcu_unlock_domain(d);
+ return -EPERM;
+ }
spin_lock(&d->grant_table->lock);
op.version = d->grant_table->gt_version;
spin_unlock(&d->grant_table->lock);
#ifdef LOCK_PROFILE
case XEN_SYSCTL_lockprof_op:
{
+ ret = xsm_lockprof();
+ if ( ret )
+ break;
+
+ ret = perfc_control(&op->u.perfc_op);
ret = spinlock_profile_control(&op->u.lockprof_op);
if ( copy_to_guest(u_sysctl, op, 1) )
ret = -EFAULT;
uint32_t *status, *ptr;
unsigned long pfn;
+ ret = xsm_page_offline(op->u.page_offline.cmd);
+ if ( ret )
+ break;
+
ptr = status = xmalloc_bytes( sizeof(uint32_t) *
(op->u.page_offline.end -
op->u.page_offline.start + 1));
case XEN_SYSCTL_cpupool_op:
{
+ ret = xsm_cpupool_op();
+ if ( ret )
+ break;
+
ret = cpupool_do_sysctl(&op->u.cpupool_op);
if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
ret = -EFAULT;
case XEN_SYSCTL_scheduler_op:
{
+ ret = xsm_sched_op();
+ if ( ret )
+ break;
+
ret = sched_adjust_global(&op->u.scheduler_op);
if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
ret = -EFAULT;
((pdev->bus == bus) && (pdev->devfn == devfn)) )
continue;
+ if ( xsm_get_device_group((seg << 16) | (pdev->bus << 8) | pdev->devfn) )
+ continue;
+
sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn);
if ( (sdev_id == group_id) && (i < max_sdevs) )
{
bdf = 0;
bdf |= (pdev->bus & 0xff) << 16;
bdf |= (pdev->devfn & 0xff) << 8;
+
if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
{
spin_unlock(&pcidevs_lock);
u32 max_sdevs;
XEN_GUEST_HANDLE_64(uint32) sdevs;
+ ret = xsm_get_device_group(domctl->u.get_device_group.machine_sbdf);
+ if ( ret )
+ break;
+
ret = -EINVAL;
if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
break;
#include <xen/keyhandler.h>
#include <xen/radix-tree.h>
#include <xen/tasklet.h>
+#include <xsm/xsm.h>
#ifdef CONFIG_X86
#include <asm/msi.h>
#endif
struct pci_dev *pdev;
unsigned int slot = PCI_SLOT(devfn), func = PCI_FUNC(devfn);
const char *pdev_type;
- int ret = -ENOMEM;
+ int ret;
if (!info)
pdev_type = "device";
pdev_type = "device";
}
+ ret = xsm_resource_plug_pci((seg << 16) | (bus << 8) | devfn);
+ if ( ret )
+ return ret;
+
+ ret = -ENOMEM;
+
spin_lock(&pcidevs_lock);
pseg = alloc_pseg(seg);
if ( !pseg )
{
struct pci_seg *pseg = get_pseg(seg);
struct pci_dev *pdev;
- int ret = -ENODEV;
+ int ret;
+
+ ret = xsm_resource_unplug_pci((seg << 16) | (bus << 8) | devfn);
+ if ( ret )
+ return ret;
+
+ ret = -ENODEV;
if ( !pseg )
return -ENODEV;
int (*getvcpuinfo) (struct domain *d);
int (*domain_settime) (struct domain *d);
int (*set_target) (struct domain *d, struct domain *e);
+ int (*domctl) (struct domain *d, int cmd);
int (*tbufcontrol) (void);
int (*readconsole) (uint32_t clear);
int (*sched_id) (void);
int (*getcpuinfo) (void);
int (*availheap) (void);
int (*get_pmstat) (void);
+ int (*setpminfo) (void);
int (*pm_op) (void);
+ int (*do_mca) (void);
int (*evtchn_unbound) (struct domain *d, struct evtchn *chn, domid_t id2);
int (*evtchn_interdomain) (struct domain *d1, struct evtchn *chn1,
int (*alloc_security_evtchn) (struct evtchn *chn);
void (*free_security_evtchn) (struct evtchn *chn);
+ int (*get_pod_target) (struct domain *d);
+ int (*set_pod_target) (struct domain *d);
int (*memory_adjust_reservation) (struct domain *d1, struct domain *d2);
int (*memory_stat_reservation) (struct domain *d1, struct domain *d2);
int (*memory_pin_page) (struct domain *d, struct page_info *page);
int (*irq_permission) (struct domain *d, int pirq, uint8_t allow);
int (*iomem_permission) (struct domain *d, uint64_t s, uint64_t e, uint8_t allow);
+ int (*get_device_group) (uint32_t machine_bdf);
int (*test_assign_device) (uint32_t machine_bdf);
int (*assign_device) (struct domain *d, uint32_t machine_bdf);
int (*deassign_device) (struct domain *d, uint32_t machine_bdf);
+ int (*resource_plug_core) (void);
+ int (*resource_unplug_core) (void);
+ int (*resource_plug_pci) (uint32_t machine_bdf);
+ int (*resource_unplug_pci) (uint32_t machine_bdf);
+ int (*resource_setup_pci) (uint32_t machine_bdf);
+ int (*resource_setup_gsi) (int gsi);
+ int (*resource_setup_misc) (void);
+
+ int (*page_offline)(uint32_t cmd);
+ int (*lockprof)(void);
+ int (*cpupool_op)(void);
+ int (*sched_op)(void);
+
long (*__do_xsm_op) (XEN_GUEST_HANDLE(xsm_op_t) op);
#ifdef CONFIG_X86
int (*hvm_set_isa_irq_level) (struct domain *d);
int (*hvm_set_pci_link_route) (struct domain *d);
int (*hvm_inject_msi) (struct domain *d);
+ int (*mem_event) (struct domain *d);
+ int (*mem_sharing) (struct domain *d);
int (*apic) (struct domain *d, int cmd);
int (*xen_settime) (void);
int (*memtype) (uint32_t access);
int (*add_to_physmap) (struct domain *d1, struct domain *d2);
int (*sendtrigger) (struct domain *d);
int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind);
+ int (*unbind_pt_irq) (struct domain *d);
int (*pin_mem_cacheattr) (struct domain *d);
int (*ext_vcpucontext) (struct domain *d, uint32_t cmd);
int (*vcpuextstate) (struct domain *d, uint32_t cmd);
return xsm_call(set_target(d, e));
}
+static inline int xsm_domctl (struct domain *d, int cmd)
+{
+ return xsm_call(domctl(d, cmd));
+}
+
static inline int xsm_tbufcontrol (void)
{
return xsm_call(tbufcontrol());
return xsm_call(get_pmstat());
}
+static inline int xsm_setpminfo(void)
+{
+ return xsm_call(setpminfo());
+}
+
static inline int xsm_pm_op(void)
{
return xsm_call(pm_op());
}
+static inline int xsm_do_mca(void)
+{
+ return xsm_call(do_mca());
+}
+
static inline int xsm_evtchn_unbound (struct domain *d1, struct evtchn *chn,
domid_t id2)
{
(void)xsm_call(free_security_evtchn(chn));
}
+static inline int xsm_get_pod_target (struct domain *d)
+{
+ return xsm_call(get_pod_target(d));
+}
+
+static inline int xsm_set_pod_target (struct domain *d)
+{
+ return xsm_call(set_pod_target(d));
+}
+
static inline int xsm_memory_adjust_reservation (struct domain *d1, struct
domain *d2)
{
return xsm_call(iomem_permission(d, s, e, allow));
}
+static inline int xsm_get_device_group(uint32_t machine_bdf)
+{
+ return xsm_call(get_device_group(machine_bdf));
+}
+
static inline int xsm_test_assign_device(uint32_t machine_bdf)
{
return xsm_call(test_assign_device(machine_bdf));
return xsm_call(deassign_device(d, machine_bdf));
}
+static inline int xsm_resource_plug_pci (uint32_t machine_bdf)
+{
+ return xsm_call(resource_plug_pci(machine_bdf));
+}
+
+static inline int xsm_resource_unplug_pci (uint32_t machine_bdf)
+{
+ return xsm_call(resource_unplug_pci(machine_bdf));
+}
+
+static inline int xsm_resource_plug_core (void)
+{
+ return xsm_call(resource_plug_core());
+}
+
+static inline int xsm_resource_unplug_core (void)
+{
+ return xsm_call(resource_unplug_core());
+}
+
+static inline int xsm_resource_setup_pci (uint32_t machine_bdf)
+{
+ return xsm_call(resource_setup_pci(machine_bdf));
+}
+
+static inline int xsm_resource_setup_gsi (int gsi)
+{
+ return xsm_call(resource_setup_gsi(gsi));
+}
+
+static inline int xsm_resource_setup_misc (void)
+{
+ return xsm_call(resource_setup_misc());
+}
+
+static inline int xsm_page_offline(uint32_t cmd)
+{
+ return xsm_call(page_offline(cmd));
+}
+
+static inline int xsm_lockprof(void)
+{
+ return xsm_call(lockprof());
+}
+
+static inline int xsm_cpupool_op(void)
+{
+ return xsm_call(cpupool_op());
+}
+
+static inline int xsm_sched_op(void)
+{
+ return xsm_call(sched_op());
+}
+
static inline long __do_xsm_op (XEN_GUEST_HANDLE(xsm_op_t) op)
{
#ifdef XSM_ENABLE
return xsm_call(hvm_inject_msi(d));
}
+static inline int xsm_mem_event (struct domain *d)
+{
+ return xsm_call(mem_event(d));
+}
+
+static inline int xsm_mem_sharing (struct domain *d)
+{
+ return xsm_call(mem_sharing(d));
+}
+
static inline int xsm_apic (struct domain *d, int cmd)
{
return xsm_call(apic(d, cmd));
return xsm_call(bind_pt_irq(d, bind));
}
+static inline int xsm_unbind_pt_irq(struct domain *d)
+{
+ return xsm_call(unbind_pt_irq(d));
+}
+
static inline int xsm_pin_mem_cacheattr(struct domain *d)
{
return xsm_call(pin_mem_cacheattr(d));
return rc;
}
+static int flask_get_pod_target(struct domain *d)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__GETPODTARGET);
+}
+
+static int flask_set_pod_target(struct domain *d)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SETPODTARGET);
+}
+
static int flask_memory_adjust_reservation(struct domain *d1, struct domain *d2)
{
return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST);
return domain_has_perm(d, e, SECCLASS_DOMAIN, DOMAIN__SET_TARGET);
}
+static int flask_domctl(struct domain *d, int cmd)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO);
+}
+
static int flask_tbufcontrol(void)
{
return domain_has_xen(current->domain, XEN__TBUFCONTROL);
return domain_has_xen(current->domain, XEN__HEAP);
}
+static int flask_get_pmstat(void)
+{
+ return domain_has_xen(current->domain, XEN__PM_OP);
+}
+
+static int flask_setpminfo(void)
+{
+ return domain_has_xen(current->domain, XEN__PM_OP);
+}
+
+static int flask_pm_op(void)
+{
+ return domain_has_xen(current->domain, XEN__PM_OP);
+}
+
+static int flask_do_mca(void)
+{
+ return domain_has_xen(current->domain, XEN__MCA_OP);
+}
+
static inline u32 resource_to_perm(uint8_t access)
{
if ( access )
return security_iterate_iomem_sids(start, end, _iomem_has_perm, &data);
}
+static int flask_resource_plug_core(void)
+{
+ struct domain_security_struct *ssec;
+
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, SECINITSID_DOMXEN, SECCLASS_RESOURCE, RESOURCE__PLUG, NULL);
+}
+
+static int flask_resource_unplug_core(void)
+{
+ struct domain_security_struct *ssec;
+
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, SECINITSID_DOMXEN, SECCLASS_RESOURCE, RESOURCE__UNPLUG, NULL);
+}
+
+static int flask_resource_use_core(void)
+{
+ struct domain_security_struct *ssec;
+
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, SECINITSID_DOMXEN, SECCLASS_RESOURCE, RESOURCE__USE, NULL);
+}
+
+static int flask_resource_plug_pci(uint32_t machine_bdf)
+{
+ u32 rsid;
+ int rc = -EPERM;
+ struct avc_audit_data ad;
+ struct domain_security_struct *ssec;
+
+ rc = security_device_sid(machine_bdf, &rsid);
+ if ( rc )
+ return rc;
+
+ AVC_AUDIT_DATA_INIT(&ad, DEV);
+ ad.device = (unsigned long) machine_bdf;
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__PLUG, &ad);
+}
+
+static int flask_resource_unplug_pci(uint32_t machine_bdf)
+{
+ u32 rsid;
+ int rc = -EPERM;
+ struct avc_audit_data ad;
+ struct domain_security_struct *ssec;
+
+ rc = security_device_sid(machine_bdf, &rsid);
+ if ( rc )
+ return rc;
+
+ AVC_AUDIT_DATA_INIT(&ad, DEV);
+ ad.device = (unsigned long) machine_bdf;
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__UNPLUG, &ad);
+}
+
+static int flask_resource_setup_pci(uint32_t machine_bdf)
+{
+ u32 rsid;
+ int rc = -EPERM;
+ struct avc_audit_data ad;
+ struct domain_security_struct *ssec;
+
+ rc = security_device_sid(machine_bdf, &rsid);
+ if ( rc )
+ return rc;
+
+ AVC_AUDIT_DATA_INIT(&ad, DEV);
+ ad.device = (unsigned long) machine_bdf;
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__SETUP, &ad);
+}
+
+static int flask_resource_setup_gsi(int gsi)
+{
+ u32 rsid;
+ int rc = -EPERM;
+ struct avc_audit_data ad;
+ struct domain_security_struct *ssec;
+
+ rc = security_irq_sid(gsi, &rsid);
+ if ( rc )
+ return rc;
+
+ AVC_AUDIT_DATA_INIT(&ad, IRQ);
+ ad.irq = gsi;
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__SETUP, &ad);
+}
+
+static int flask_resource_setup_misc(void)
+{
+ struct domain_security_struct *ssec;
+
+ ssec = current->domain->ssid;
+ return avc_has_perm(ssec->sid, SECINITSID_XEN, SECCLASS_RESOURCE, RESOURCE__SETUP, NULL);
+}
+
+static inline int flask_page_offline(uint32_t cmd)
+{
+ switch (cmd) {
+ case sysctl_page_offline:
+ return flask_resource_unplug_core();
+ case sysctl_page_online:
+ return flask_resource_plug_core();
+ case sysctl_query_page_offline:
+ return flask_resource_use_core();
+ default:
+ return -EPERM;
+ }
+}
+
+static inline int flask_lockprof(void)
+{
+ return domain_has_xen(current->domain, XEN__LOCKPROF);
+}
+
+static inline int flask_cpupool_op(void)
+{
+ return domain_has_xen(current->domain, XEN__CPUPOOL_OP);
+}
+
+static inline int flask_sched_op(void)
+{
+ return domain_has_xen(current->domain, XEN__SCHED_OP);
+}
+
static int flask_perfcontrol(void)
{
return domain_has_xen(current->domain, XEN__PERFCONTROL);
case HVMOP_get_param:
perm = HVM__GETPARAM;
break;
+ case HVMOP_track_dirty_vram:
+ perm = HVM__TRACKDIRTYVRAM;
+ break;
default:
- return -EPERM;
+ perm = HVM__HVMCTL;
}
return domain_has_perm(current->domain, d, SECCLASS_HVM, perm);
return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__PCIROUTE);
}
+static int flask_mem_event(struct domain *d)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__MEM_EVENT);
+}
+
+static int flask_mem_sharing(struct domain *d)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__MEM_SHARING);
+}
+
static int flask_apic(struct domain *d, int cmd)
{
u32 perm;
return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__TRIGGER);
}
+static int flask_get_device_group(uint32_t machine_bdf)
+{
+ u32 rsid;
+ int rc = -EPERM;
+ struct domain_security_struct *ssec = current->domain->ssid;
+
+ rc = security_device_sid(machine_bdf, &rsid);
+ if ( rc )
+ return rc;
+
+ return avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__STAT_DEVICE, NULL);
+}
+
static int flask_test_assign_device(uint32_t machine_bdf)
{
u32 rsid;
return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__USE, &ad);
}
+static int flask_unbind_pt_irq (struct domain *d)
+{
+ return domain_has_perm(current->domain, d, SECCLASS_RESOURCE, RESOURCE__REMOVE);
+}
+
static int flask_pin_mem_cacheattr (struct domain *d)
{
return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__CACHEATTR);
.getvcpuinfo = flask_getvcpuinfo,
.domain_settime = flask_domain_settime,
.set_target = flask_set_target,
+ .domctl = flask_domctl,
.tbufcontrol = flask_tbufcontrol,
.readconsole = flask_readconsole,
.sched_id = flask_sched_id,
.debug_keys = flask_debug_keys,
.getcpuinfo = flask_getcpuinfo,
.availheap = flask_availheap,
+ .get_pmstat = flask_get_pmstat,
+ .setpminfo = flask_setpminfo,
+ .pm_op = flask_pm_op,
+ .do_mca = flask_do_mca,
.evtchn_unbound = flask_evtchn_unbound,
.evtchn_interdomain = flask_evtchn_interdomain,
.alloc_security_evtchn = flask_alloc_security_evtchn,
.free_security_evtchn = flask_free_security_evtchn,
+ .get_pod_target = flask_get_pod_target,
+ .set_pod_target = flask_set_pod_target,
.memory_adjust_reservation = flask_memory_adjust_reservation,
.memory_stat_reservation = flask_memory_stat_reservation,
.memory_pin_page = flask_memory_pin_page,
.irq_permission = flask_irq_permission,
.iomem_permission = flask_iomem_permission,
+ .resource_plug_core = flask_resource_plug_core,
+ .resource_unplug_core = flask_resource_unplug_core,
+ .resource_plug_pci = flask_resource_plug_pci,
+ .resource_unplug_pci = flask_resource_unplug_pci,
+ .resource_setup_pci = flask_resource_setup_pci,
+ .resource_setup_gsi = flask_resource_setup_gsi,
+ .resource_setup_misc = flask_resource_setup_misc,
+
+ .page_offline = flask_page_offline,
+ .lockprof = flask_lockprof,
+ .cpupool_op = flask_cpupool_op,
+ .sched_op = flask_sched_op,
+
.__do_xsm_op = do_flask_op,
#ifdef CONFIG_X86
.hvm_set_pci_intx_level = flask_hvm_set_pci_intx_level,
.hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
.hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
+ .mem_event = flask_mem_event,
+ .mem_sharing = flask_mem_sharing,
.apic = flask_apic,
.xen_settime = flask_xen_settime,
.memtype = flask_memtype,
.update_va_mapping = flask_update_va_mapping,
.add_to_physmap = flask_add_to_physmap,
.sendtrigger = flask_sendtrigger,
+ .get_device_group = flask_get_device_group,
.test_assign_device = flask_test_assign_device,
.assign_device = flask_assign_device,
.deassign_device = flask_deassign_device,
.bind_pt_irq = flask_bind_pt_irq,
+ .unbind_pt_irq = flask_unbind_pt_irq,
.pin_mem_cacheattr = flask_pin_mem_cacheattr,
.ext_vcpucontext = flask_ext_vcpucontext,
.vcpuextstate = flask_vcpuextstate,
S_(SECCLASS_XEN, XEN__DEBUG, "debug")
S_(SECCLASS_XEN, XEN__GETCPUINFO, "getcpuinfo")
S_(SECCLASS_XEN, XEN__HEAP, "heap")
+ S_(SECCLASS_XEN, XEN__PM_OP, "pm_op")
+ S_(SECCLASS_XEN, XEN__MCA_OP, "mca_op")
+ S_(SECCLASS_XEN, XEN__LOCKPROF, "lockprof")
+ S_(SECCLASS_XEN, XEN__CPUPOOL_OP, "cpupool_op")
+ S_(SECCLASS_XEN, XEN__SCHED_OP, "sched_op")
S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUCONTEXT, "setvcpucontext")
S_(SECCLASS_DOMAIN, DOMAIN__PAUSE, "pause")
S_(SECCLASS_DOMAIN, DOMAIN__UNPAUSE, "unpause")
S_(SECCLASS_DOMAIN, DOMAIN__SETEXTVCPUCONTEXT, "setextvcpucontext")
S_(SECCLASS_DOMAIN, DOMAIN__GETVCPUEXTSTATE, "getvcpuextstate")
S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUEXTSTATE, "setvcpuextstate")
+ S_(SECCLASS_DOMAIN, DOMAIN__GETPODTARGET, "getpodtarget")
+ S_(SECCLASS_DOMAIN, DOMAIN__SETPODTARGET, "setpodtarget")
+ S_(SECCLASS_DOMAIN, DOMAIN__SET_MISC_INFO, "set_misc_info")
S_(SECCLASS_HVM, HVM__SETHVMC, "sethvmc")
S_(SECCLASS_HVM, HVM__GETHVMC, "gethvmc")
S_(SECCLASS_HVM, HVM__SETPARAM, "setparam")
S_(SECCLASS_HVM, HVM__BIND_IRQ, "bind_irq")
S_(SECCLASS_HVM, HVM__CACHEATTR, "cacheattr")
S_(SECCLASS_HVM, HVM__TRACKDIRTYVRAM, "trackdirtyvram")
+ S_(SECCLASS_HVM, HVM__HVMCTL, "hvmctl")
+ S_(SECCLASS_HVM, HVM__MEM_EVENT, "mem_event")
+ S_(SECCLASS_HVM, HVM__MEM_SHARING, "mem_sharing")
S_(SECCLASS_EVENT, EVENT__BIND, "bind")
S_(SECCLASS_EVENT, EVENT__SEND, "send")
S_(SECCLASS_EVENT, EVENT__STATUS, "status")
S_(SECCLASS_RESOURCE, RESOURCE__STAT_DEVICE, "stat_device")
S_(SECCLASS_RESOURCE, RESOURCE__ADD_DEVICE, "add_device")
S_(SECCLASS_RESOURCE, RESOURCE__REMOVE_DEVICE, "remove_device")
+ S_(SECCLASS_RESOURCE, RESOURCE__PLUG, "plug")
+ S_(SECCLASS_RESOURCE, RESOURCE__UNPLUG, "unplug")
+ S_(SECCLASS_RESOURCE, RESOURCE__SETUP, "setup")
S_(SECCLASS_SECURITY, SECURITY__COMPUTE_AV, "compute_av")
S_(SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE, "compute_create")
S_(SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER, "compute_member")
#define XEN__DEBUG 0x00400000UL
#define XEN__GETCPUINFO 0x00800000UL
#define XEN__HEAP 0x01000000UL
+#define XEN__PM_OP 0x02000000UL
+#define XEN__MCA_OP 0x04000000UL
+#define XEN__LOCKPROF 0x08000000UL
+#define XEN__CPUPOOL_OP 0x10000000UL
+#define XEN__SCHED_OP 0x20000000UL
#define DOMAIN__SETVCPUCONTEXT 0x00000001UL
#define DOMAIN__PAUSE 0x00000002UL
#define DOMAIN__SETEXTVCPUCONTEXT 0x02000000UL
#define DOMAIN__GETVCPUEXTSTATE 0x04000000UL
#define DOMAIN__SETVCPUEXTSTATE 0x08000000UL
+#define DOMAIN__GETPODTARGET 0x10000000UL
+#define DOMAIN__SETPODTARGET 0x20000000UL
+#define DOMAIN__SET_MISC_INFO 0x40000000UL
#define HVM__SETHVMC 0x00000001UL
#define HVM__GETHVMC 0x00000002UL
#define HVM__BIND_IRQ 0x00000080UL
#define HVM__CACHEATTR 0x00000100UL
#define HVM__TRACKDIRTYVRAM 0x00000200UL
+#define HVM__HVMCTL 0x00000400UL
+#define HVM__MEM_EVENT 0x00000800UL
+#define HVM__MEM_SHARING 0x00001000UL
#define EVENT__BIND 0x00000001UL
#define EVENT__SEND 0x00000002UL
#define RESOURCE__STAT_DEVICE 0x00000200UL
#define RESOURCE__ADD_DEVICE 0x00000400UL
#define RESOURCE__REMOVE_DEVICE 0x00000800UL
+#define RESOURCE__PLUG 0x00001000UL
+#define RESOURCE__UNPLUG 0x00002000UL
+#define RESOURCE__SETUP 0x00004000UL
#define SECURITY__COMPUTE_AV 0x00000001UL
#define SECURITY__COMPUTE_CREATE 0x00000002UL