return ret;
}
+static int get_cpumap_size(xc_interface *xch)
+{
+ return (xc_get_max_cpus(xch) + 7) / 8;
+}
+
int xc_cpupool_create(xc_interface *xch,
uint32_t *ppoolid,
uint32_t sched_id)
return do_sysctl_save(xch, &sysctl);
}
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info)
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid)
{
int err = 0;
- int p;
- uint32_t poolid = first_poolid;
- uint8_t local[sizeof (info->cpumap)];
+ xc_cpupoolinfo_t *info;
+ uint8_t *local;
+ int local_size;
+ int cpumap_size;
+ int size;
DECLARE_SYSCTL;
- memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t));
+ local_size = get_cpumap_size(xch);
+ if (!local_size)
+ {
+ PERROR("Could not get number of cpus");
+ return NULL;
+ }
+ local = alloca(local_size);
+ cpumap_size = (local_size + sizeof(*info->cpumap) - 1) / sizeof(*info->cpumap);
+ size = sizeof(xc_cpupoolinfo_t) + cpumap_size * sizeof(*info->cpumap);
+ info = malloc(size);
+ if ( !info )
+ return NULL;
+
+ memset(info, 0, size);
+ info->cpumap_size = local_size * 8;
+ info->cpumap = (uint64_t *)(info + 1);
+
+ sysctl.cmd = XEN_SYSCTL_cpupool_op;
+ sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
+ sysctl.u.cpupool_op.cpupool_id = poolid;
+ set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
+ sysctl.u.cpupool_op.cpumap.nr_cpus = local_size * 8;
+
+ if ( (err = lock_pages(xch, local, local_size)) != 0 )
+ {
+ PERROR("Could not lock memory for Xen hypercall");
+ free(info);
+ return NULL;
+ }
+ err = do_sysctl_save(xch, &sysctl);
+ unlock_pages(xch, local, local_size);
- for (p = 0; p < n_max; p++)
+ if ( err < 0 )
{
- sysctl.cmd = XEN_SYSCTL_cpupool_op;
- sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_INFO;
- sysctl.u.cpupool_op.cpupool_id = poolid;
- set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8;
-
- if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
- {
- PERROR("Could not lock memory for Xen hypercall");
- break;
- }
- err = do_sysctl_save(xch, &sysctl);
- unlock_pages(xch, local, sizeof (local));
-
- if ( err < 0 )
- break;
-
- info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
- info->sched_id = sysctl.u.cpupool_op.sched_id;
- info->n_dom = sysctl.u.cpupool_op.n_dom;
- bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8);
- poolid = sysctl.u.cpupool_op.cpupool_id + 1;
- info++;
+ free(info);
+ return NULL;
}
- if ( p == 0 )
- return err;
+ info->cpupool_id = sysctl.u.cpupool_op.cpupool_id;
+ info->sched_id = sysctl.u.cpupool_op.sched_id;
+ info->n_dom = sysctl.u.cpupool_op.n_dom;
+ bitmap_byte_to_64(info->cpumap, local, local_size * 8);
- return p;
+ return info;
}
int xc_cpupool_addcpu(xc_interface *xch,
return do_sysctl_save(xch, &sysctl);
}
-int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap)
+uint64_t * xc_cpupool_freeinfo(xc_interface *xch,
+ int *cpusize)
{
int err;
- uint8_t local[sizeof (*cpumap)];
+ uint8_t *local;
+ uint64_t *cpumap;
DECLARE_SYSCTL;
+ *cpusize = get_cpumap_size(xch);
+ if (*cpusize == 0)
+ return NULL;
+ local = alloca(*cpusize);
+ cpumap = calloc((*cpusize + sizeof(*cpumap) - 1) / sizeof(*cpumap), sizeof(*cpumap));
+ if (cpumap == NULL)
+ return NULL;
+
sysctl.cmd = XEN_SYSCTL_cpupool_op;
sysctl.u.cpupool_op.op = XEN_SYSCTL_CPUPOOL_OP_FREEINFO;
set_xen_guest_handle(sysctl.u.cpupool_op.cpumap.bitmap, local);
- sysctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8;
+ sysctl.u.cpupool_op.cpumap.nr_cpus = *cpusize * 8;
- if ( (err = lock_pages(xch, local, sizeof(local))) != 0 )
+ if ( (err = lock_pages(xch, local, *cpusize)) != 0 )
{
PERROR("Could not lock memory for Xen hypercall");
- return err;
+ free(cpumap);
+ return NULL;
}
err = do_sysctl_save(xch, &sysctl);
- unlock_pages(xch, local, sizeof (local));
+ unlock_pages(xch, local, *cpusize);
+ bitmap_byte_to_64(cpumap, local, *cpusize * 8);
- if (err < 0)
- return err;
+ if (err >= 0)
+ return cpumap;
- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8);
-
- return 0;
+ free(cpumap);
+ return NULL;
}
#include "xc_private.h"
#include <xen/hvm/hvm_op.h>
+int xc_get_max_cpus(xc_interface *xch)
+{
+ static int max_cpus = 0;
+ xc_physinfo_t physinfo;
+
+ if ( max_cpus )
+ return max_cpus;
+
+ if ( !xc_physinfo(xch, &physinfo) )
+ max_cpus = physinfo.max_cpu_id + 1;
+
+ return max_cpus;
+}
+
int xc_readconsolering(xc_interface *xch,
char *buffer,
unsigned int *pnr_chars,
} start_info_any_t;
+/* return maximum number of cpus the hypervisor supports */
+int xc_get_max_cpus(xc_interface *xch);
+
int xc_domain_create(xc_interface *xch,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t cpupool_id;
uint32_t sched_id;
uint32_t n_dom;
- uint64_t cpumap;
+ uint32_t cpumap_size; /* max number of cpus in map */
+ uint64_t *cpumap;
} xc_cpupoolinfo_t;
/**
* Get cpupool info. Returns info for up to the specified number of cpupools
* starting at the given id.
* @parm xc_handle a handle to an open hypervisor interface
- * @parm first_poolid lowest id for which info is returned
- * @parm n_max maximum number of cpupools to return info
- * @parm info pointer to xc_cpupoolinfo_t array
- * return number of cpupool infos
+ * @parm poolid lowest id for which info is returned
+ * return cpupool info ptr (obtained by malloc)
*/
-int xc_cpupool_getinfo(xc_interface *xch,
- uint32_t first_poolid,
- uint32_t n_max,
- xc_cpupoolinfo_t *info);
+xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
+ uint32_t poolid);
/**
* Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
* Return map of cpus not in any cpupool.
*
* @parm xc_handle a handle to an open hypervisor interface
- * @parm cpumap pointer where to store the cpumap
- * return 0 on success, -1 on failure
+ * @parm cpusize where to store array size in bytes
+ * return cpumap array on success, NULL else
*/
-int xc_cpupool_freeinfo(xc_interface *xch,
- uint64_t *cpumap);
+uint64_t *xc_cpupool_freeinfo(xc_interface *xch,
+ int *cpusize);
/*
libxl_poolinfo * libxl_list_pool(libxl_ctx *ctx, int *nb_pool)
{
- libxl_poolinfo *ptr;
- int i, ret;
- xc_cpupoolinfo_t info[256];
- int size = 256;
+ libxl_poolinfo *ptr, *tmp;
+ int i;
+ xc_cpupoolinfo_t *info;
+ uint32_t poolid;
- ptr = calloc(size, sizeof(libxl_poolinfo));
- if (!ptr) {
- LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
- return NULL;
- }
+ ptr = NULL;
- ret = xc_cpupool_getinfo(ctx->xch, 0, 256, info);
- if (ret<0) {
- LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting cpupool info");
- return NULL;
+ poolid = 0;
+ for (i = 0;; i++) {
+ info = xc_cpupool_getinfo(ctx->xch, poolid);
+ if (info == NULL)
+ break;
+ tmp = realloc(ptr, (i + 1) * sizeof(libxl_poolinfo));
+ if (!tmp) {
+ LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "allocating cpupool info");
+ free(ptr);
+ return NULL;
+ }
+ ptr = tmp;
+ ptr[i].poolid = info->cpupool_id;
+ poolid = info->cpupool_id + 1;
+ free(info);
}
- for (i = 0; i < ret; i++) {
- ptr[i].poolid = info[i].cpupool_id;
- }
- *nb_pool = ret;
+ *nb_pool = i;
return ptr;
}
libxl_vcpuinfo *ptr, *ret;
xc_domaininfo_t domaininfo;
xc_vcpuinfo_t vcpuinfo;
- xc_physinfo_t physinfo = { 0 };
unsigned num_cpuwords;
if (xc_domain_getinfolist(ctx->xch, domid, 1, &domaininfo) != 1) {
LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting infolist");
return NULL;
}
- if (xc_physinfo(ctx->xch, &physinfo) == -1) {
- LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting physinfo");
- return NULL;
- }
- *nrcpus = physinfo.max_cpu_id + 1;
+ *nrcpus = xc_get_max_cpus(ctx->xch);
ret = ptr = calloc(domaininfo.max_vcpu_id + 1, sizeof (libxl_vcpuinfo));
if (!ptr) {
return NULL;
}
- num_cpuwords = ((physinfo.max_cpu_id + 64) / 64);
+ num_cpuwords = ((*nrcpus + 63) / 64);
for (*nb_vcpu = 0; *nb_vcpu <= domaininfo.max_vcpu_id; ++*nb_vcpu, ++ptr) {
ptr->cpumap = malloc(num_cpuwords * sizeof(*ptr->cpumap));
if (!ptr->cpumap) {
int libxl_domain_destroy(libxl_ctx *ctx, uint32_t domid, int force);
int libxl_domain_preserve(libxl_ctx *ctx, uint32_t domid, libxl_domain_create_info *info, const char *name_suffix, libxl_uuid new_uuid);
+/* get max. number of cpus supported by hypervisor */
+int libxl_get_max_cpus(libxl_ctx *ctx);
+
/*
* Run the configured bootloader for a PV domain and update
* info->kernel, info->u.pv.ramdisk and info->u.pv.cmdline as
libxl__free_all(&gc);
return rc;
}
+
+int libxl_get_max_cpus(libxl_ctx *ctx)
+{
+ return xc_get_max_cpus(ctx->xch);
+}
static void vcpupin(char *d, const char *vcpu, char *cpu)
{
libxl_vcpuinfo *vcpuinfo;
- libxl_physinfo physinfo;
uint64_t *cpumap = NULL;
uint32_t vcpuid, cpuida, cpuidb;
char *endptr, *toka, *tokb;
- int i, nb_vcpu, cpusize;
+ int i, nb_vcpu, cpusize, cpumapsize;
vcpuid = strtoul(vcpu, &endptr, 10);
if (vcpu == endptr) {
find_domain(d);
- if (libxl_get_physinfo(&ctx, &physinfo) != 0) {
- fprintf(stderr, "libxl_get_physinfo failed.\n");
+ if ((cpusize = libxl_get_max_cpus(&ctx)) == 0) {
+ fprintf(stderr, "libxl_get_max_cpus failed.\n");
goto vcpupin_out1;
}
+ cpumapsize = (cpusize + sizeof (uint64_t) - 1) / sizeof (uint64_t);
- cpumap = calloc(physinfo.max_cpu_id + 1, sizeof (uint64_t));
+ cpumap = calloc(cpumapsize, sizeof (uint64_t));
if (!cpumap) {
goto vcpupin_out1;
}
}
}
else {
- memset(cpumap, -1, sizeof (uint64_t) * (physinfo.max_cpu_id + 1));
+ memset(cpumap, -1, sizeof (uint64_t) * cpumapsize);
}
if (vcpuid != -1) {
if (libxl_set_vcpuaffinity(&ctx, domid, vcpuid,
- cpumap, physinfo.max_cpu_id + 1) == -1) {
+ cpumap, cpusize) == -1) {
fprintf(stderr, "Could not set affinity for vcpu `%u'.\n", vcpuid);
}
}
else {
- if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &cpusize))) {
+ if (!(vcpuinfo = libxl_list_vcpu(&ctx, domid, &nb_vcpu, &i))) {
fprintf(stderr, "libxl_list_vcpu failed.\n");
goto vcpupin_out;
}
for (; nb_vcpu > 0; --nb_vcpu, ++vcpuinfo) {
if (libxl_set_vcpuaffinity(&ctx, domid, vcpuinfo->vcpuid,
- cpumap, physinfo.max_cpu_id + 1) == -1) {
- fprintf(stderr, "libxl_list_vcpu failed on vcpu `%u'.\n", vcpuinfo->vcpuid);
+ cpumap, cpusize) == -1) {
+ fprintf(stderr, "libxl_set_vcpuaffinity failed on vcpu `%u'.\n", vcpuinfo->vcpuid);
}
}
}
uint64_t *cpumap;
PyObject *cpulist = NULL;
int nr_cpus, size;
- xc_physinfo_t info = {0};
uint64_t cpumap_size = sizeof(*cpumap);
static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL };
&dom, &vcpu, &cpulist) )
return NULL;
- if ( xc_physinfo(self->xc_handle, &info) != 0 )
+ nr_cpus = xc_get_max_cpus(self->xc_handle);
+ if ( nr_cpus == 0 )
return pyxc_error_to_exception(self->xc_handle);
-
- nr_cpus = info.nr_cpus;
size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8);
cpumap = malloc(cpumap_size * size);
int rc, i;
uint64_t *cpumap;
int nr_cpus, size;
- xc_physinfo_t pinfo = { 0 };
uint64_t cpumap_size = sizeof(*cpumap);
static char *kwd_list[] = { "domid", "vcpu", NULL };
&dom, &vcpu) )
return NULL;
- if ( xc_physinfo(self->xc_handle, &pinfo) != 0 )
+ nr_cpus = xc_get_max_cpus(self->xc_handle);
+ if ( nr_cpus == 0 )
return pyxc_error_to_exception(self->xc_handle);
- nr_cpus = pinfo.nr_cpus;
rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info);
if ( rc < 0 )
return zero;
}
-static PyObject *cpumap_to_cpulist(uint64_t cpumap)
+static PyObject *cpumap_to_cpulist(uint64_t *cpumap, int cpusize)
{
PyObject *cpulist = NULL;
- uint32_t i;
+ int i;
cpulist = PyList_New(0);
- for ( i = 0; cpumap != 0; i++ )
+ for ( i = 0; i < cpusize; i++ )
{
- if ( cpumap & 1 )
+ if ( *cpumap & (1L << (i % 64)) )
{
PyObject* pyint = PyInt_FromLong(i);
PyList_Append(cpulist, pyint);
Py_DECREF(pyint);
}
- cpumap >>= 1;
+ if ( (i % 64) == 63 )
+ cpumap++;
}
return cpulist;
}
return zero;
}
-static PyObject *pyxc_cpupool_getinfo(XcObject *self,
- PyObject *args,
- PyObject *kwds)
+static PyObject *pyxc_cpupool_getinfo(XcObject *self)
{
PyObject *list, *info_dict;
- uint32_t first_pool = 0;
- int max_pools = 1024, nr_pools, i;
+ uint32_t pool;
xc_cpupoolinfo_t *info;
- static char *kwd_list[] = { "first_pool", "max_pools", NULL };
-
- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list,
- &first_pool, &max_pools) )
- return NULL;
-
- info = calloc(max_pools, sizeof(xc_cpupoolinfo_t));
- if (info == NULL)
- return PyErr_NoMemory();
-
- nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info);
-
- if (nr_pools < 0)
- {
- free(info);
- return pyxc_error_to_exception(self->xc_handle);
- }
-
- list = PyList_New(nr_pools);
- for ( i = 0 ; i < nr_pools; i++ )
+ list = PyList_New(0);
+ for (pool = 0;;)
{
+ info = xc_cpupool_getinfo(self->xc_handle, pool);
+ if (info == NULL)
+ break;
info_dict = Py_BuildValue(
"{s:i,s:i,s:i,s:N}",
- "cpupool", (int)info[i].cpupool_id,
- "sched", info[i].sched_id,
- "n_dom", info[i].n_dom,
- "cpulist", cpumap_to_cpulist(info[i].cpumap));
+ "cpupool", (int)info->cpupool_id,
+ "sched", info->sched_id,
+ "n_dom", info->n_dom,
+ "cpulist", cpumap_to_cpulist(info->cpumap,
+ info->cpumap_size));
+ pool = info->cpupool_id + 1;
+ free(info);
+
if ( info_dict == NULL )
{
Py_DECREF(list);
- if ( info_dict != NULL ) { Py_DECREF(info_dict); }
- free(info);
return NULL;
}
- PyList_SetItem(list, i, info_dict);
- }
- free(info);
+ PyList_Append(list, info_dict);
+ Py_DECREF(info_dict);
+ }
return list;
}
static PyObject *pyxc_cpupool_freeinfo(XcObject *self)
{
- uint64_t cpumap;
+ uint64_t *cpumap;
+ int mapsize;
+ PyObject *info = NULL;
- if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0)
+ cpumap = xc_cpupool_freeinfo(self->xc_handle, &mapsize);
+ if (!cpumap)
return pyxc_error_to_exception(self->xc_handle);
- return cpumap_to_cpulist(cpumap);
+ info = cpumap_to_cpulist(cpumap, mapsize * 8);
+
+ free(cpumap);
+
+ return info;
}
static PyObject *pyflask_context_to_sid(PyObject *self, PyObject *args,
{ "cpupool_getinfo",
(PyCFunction)pyxc_cpupool_getinfo,
- METH_VARARGS | METH_KEYWORDS, "\n"
+ METH_NOARGS, "\n"
"Get information regarding a set of cpupools, in increasing id order.\n"
- " first_pool [int, 0]: First cpupool to retrieve info about.\n"
- " max_pools [int, 1024]: Maximum number of cpupools to retrieve info"
- " about.\n\n"
- "Returns: [list of dicts] if list length is less than 'max_pools'\n"
- " parameter then there was an error, or the end of the\n"
- " cpupool-id space was reached.\n"
+ "Returns: [list of dicts]\n"
" pool [int]: Identifier of cpupool to which this info pertains\n"
" sched [int]: Scheduler used for this cpupool\n"
" n_dom [int]: Number of Domains in this cpupool\n"