getdomaininfo hypercall setvcpucontext setextvcpucontext
getscheduler getvcpuinfo getvcpuextstate getaddrsize
getaffinity setaffinity };
- allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim set_max_evtchn };
+ allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim set_max_evtchn set_vnumainfo get_vnumainfo };
allow $1 $2:security check_context;
allow $1 $2:shadow enable;
allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op };
allow $1 $2:domain { getdomaininfo getvcpuinfo getaffinity
getaddrsize pause unpause trigger shutdown destroy
setaffinity setdomainmaxmem getscheduler };
+ allow $1 $2:domain2 set_vnumainfo;
')
# migrate_domain_out(priv, target)
getpodtarget setpodtarget set_misc_info set_virq_handler
};
allow dom0_t dom0_t:domain2 {
- set_cpuid gettsc settsc setscheduler set_max_evtchn
+ set_cpuid gettsc settsc setscheduler set_max_evtchn set_vnumainfo get_vnumainfo
};
allow dom0_t dom0_t:resource { add remove };
if ( (d = rcu_lock_domain_by_any_id(topology.domid)) == NULL )
return -ESRCH;
+ rc = xsm_get_vnumainfo(XSM_TARGET, d);
+ if ( rc )
+ {
+ rcu_unlock_domain(d);
+ return rc;
+ }
+
read_lock(&d->vnuma_rwlock);
if ( d->vnuma == NULL )
return xsm_default_action(action, current->domain, d);
}
+static XSM_INLINE int xsm_get_vnumainfo(XSM_DEFAULT_ARG struct domain *d)
+{
+ XSM_ASSERT_ACTION(XSM_TARGET);
+ return xsm_default_action(action, current->domain, d);
+}
+
#if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
static XSM_INLINE int xsm_get_device_group(XSM_DEFAULT_ARG uint32_t machine_bdf)
{
int (*hvm_param) (struct domain *d, unsigned long op);
int (*hvm_control) (struct domain *d, unsigned long op);
int (*hvm_param_nested) (struct domain *d);
+ int (*get_vnumainfo) (struct domain *d);
#ifdef CONFIG_X86
int (*do_mca) (void);
return xsm_ops->hvm_param_nested(d);
}
+static inline int xsm_get_vnumainfo (xsm_default_t def, struct domain *d)
+{
+ return xsm_ops->get_vnumainfo(d);
+}
+
#ifdef CONFIG_X86
static inline int xsm_do_mca(xsm_default_t def)
{
{
return xsm_ops->ioport_mapping(d, s, e, allow);
}
+
#endif /* CONFIG_X86 */
#endif /* XSM_NO_WRAPPERS */
set_to_dummy_if_null(ops, iomem_permission);
set_to_dummy_if_null(ops, iomem_mapping);
set_to_dummy_if_null(ops, pci_config_permission);
+ set_to_dummy_if_null(ops, get_vnumainfo);
#if defined(HAS_PASSTHROUGH) && defined(HAS_PCI)
set_to_dummy_if_null(ops, get_device_group);
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETCLAIM);
}
+static int flask_get_vnumainfo(struct domain *d)
+{
+ return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__GET_VNUMAINFO);
+}
+
static int flask_console_io(struct domain *d, int cmd)
{
u32 perm;
case XEN_DOMCTL_cacheflush:
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__CACHEFLUSH);
+ case XEN_DOMCTL_setvnumainfo:
+ return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN2__SET_VNUMAINFO);
+
default:
printk("flask_domctl: Unknown op %d\n", cmd);
return -EPERM;
.hvm_param_nested = flask_hvm_param_nested,
.do_xsm_op = do_flask_op,
+ .get_vnumainfo = flask_get_vnumainfo,
+
#ifdef CONFIG_COMPAT
.do_compat_op = compat_flask_op,
#endif
cacheflush
# Creation of the hardware domain when it is not dom0
create_hardware_domain
+# XEN_DOMCTL_setvnumainfo
+ set_vnumainfo
+# XENMEM_getvnumainfo
+ get_vnumainfo
}
# Similar to class domain, but primarily contains domctls related to HVM domains