libxl: allow setting more than 31 vcpus
authorYang Zhang <yang.z.zhang@Intel.com>
Thu, 28 Jun 2012 16:51:56 +0000 (17:51 +0100)
committerYang Zhang <yang.z.zhang@Intel.com>
Thu, 28 Jun 2012 16:51:56 +0000 (17:51 +0100)
In current implementation, it uses integer to record current avail
cpus and this only allows user to specify 31 vcpus.  In following
patch, it uses cpumap instead integer which make more sense than
before. Also there is no limit to the max vcpus.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
tools/libxl/libxl_create.c
tools/libxl/libxl_dm.c
tools/libxl/libxl_dom.c
tools/libxl/libxl_types.idl
tools/libxl/libxl_utils.c
tools/libxl/libxl_utils.h
tools/libxl/xl_cmdimpl.c

index 08e5536c0c18cebe2f0bb959a818932cb76474d4..4599e733b1d03a1dc32260acdccc7c83db77cc0f 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <xc_dom.h>
 #include <xenguest.h>
+#include <xen/hvm/hvm_info_table.h>
 
 void libxl_domain_config_init(libxl_domain_config *d_config)
 {
@@ -201,8 +202,12 @@ int libxl__domain_build_info_setdefault(libxl__gc *gc,
 
     if (!b_info->max_vcpus)
         b_info->max_vcpus = 1;
-    if (!b_info->cur_vcpus)
-        b_info->cur_vcpus = 1;
+    if (!b_info->avail_vcpus.size) {
+        if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1))
+            return ERROR_FAIL;
+        libxl_cpumap_set(&b_info->avail_vcpus, 0);
+    } else if (b_info->avail_vcpus.size > HVM_MAX_VCPUS)
+        return ERROR_FAIL;
 
     if (!b_info->cpumap.size) {
         if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0))
index 340fcfa8465df0a7c7644e4460334f0d30cbf23e..2edc734cff43c5221630b65526beea89382d8439 100644 (file)
@@ -160,6 +160,8 @@ static char ** libxl__build_device_model_args_old(libxl__gc *gc,
     }
     if (b_info->type == LIBXL_DOMAIN_TYPE_HVM) {
         int ioemu_vifs = 0;
+        int nr_set_cpus = 0;
+        char *s;
 
         if (b_info->u.hvm.serial) {
             flexarray_vappend(dm_args, "-serial", b_info->u.hvm.serial, NULL);
@@ -200,11 +202,13 @@ static char ** libxl__build_device_model_args_old(libxl__gc *gc,
                               libxl__sprintf(gc, "%d", b_info->max_vcpus),
                               NULL);
         }
-        if (b_info->cur_vcpus) {
-            flexarray_vappend(dm_args, "-vcpu_avail",
-                              libxl__sprintf(gc, "0x%x", b_info->cur_vcpus),
-                              NULL);
-        }
+
+        nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
+        s = libxl_cpumap_to_hex_string(&b_info->avail_vcpus);
+        flexarray_vappend(dm_args, "-vcpu_avail",
+                              libxl__sprintf(gc, "%s", s), NULL);
+        free(s);
+
         for (i = 0; i < num_vifs; i++) {
             if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) {
                 char *smac = libxl__sprintf(gc,
@@ -443,11 +447,14 @@ static char ** libxl__build_device_model_args_new(libxl__gc *gc,
         }
         if (b_info->max_vcpus > 1) {
             flexarray_append(dm_args, "-smp");
-            if (b_info->cur_vcpus)
+            if (b_info->avail_vcpus.size) {
+                int nr_set_cpus = 0;
+                nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus);
+
                 flexarray_append(dm_args, libxl__sprintf(gc, "%d,maxcpus=%d",
                                                          b_info->max_vcpus,
-                                                         b_info->cur_vcpus));
-            else
+                                                         nr_set_cpus));
+            else
                 flexarray_append(dm_args, libxl__sprintf(gc, "%d",
                                                          b_info->max_vcpus));
         }
index a2e66558d3da3fd0efbfd41657485e9d7e89e64c..d88629fc18aa43869799189f746ab84b40d4010e 100644 (file)
@@ -199,8 +199,8 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid,
     ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn);
     for (i = 0; i < info->max_vcpus; i++) {
         ents[12+(i*2)]   = libxl__sprintf(gc, "cpu/%d/availability", i);
-        ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << i)))
-                            ? "offline" : "online";
+        ents[12+(i*2)+1] = libxl_cpumap_test(&info->avail_vcpus, i)
+                            ? "online" : "offline";
     }
 
     hvm_ents = NULL;
@@ -354,7 +354,7 @@ static int hvm_build_set_params(xc_interface *handle, uint32_t domid,
     va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET);
     va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic);
     va_hvm->nr_vcpus = info->max_vcpus;
-    memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus));
+    memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size);
     for (i = 0, sum = 0; i < va_hvm->length; i++)
         sum += ((uint8_t *) va_hvm)[i];
     va_hvm->checksum -= sum;
index b727abbe5df01cfec25e9c5b39b84b4bb5f0b6e0..a4a8e258f47364b7673e66398bac743bb9fb15fc 100644 (file)
@@ -237,7 +237,7 @@ libxl_domain_sched_params = Struct("domain_sched_params",[
 
 libxl_domain_build_info = Struct("domain_build_info",[
     ("max_vcpus",       integer),
-    ("cur_vcpus",       integer),
+    ("avail_vcpus",     libxl_cpumap),
     ("cpumap",          libxl_cpumap),
     ("tsc_mode",        libxl_tsc_mode),
     ("max_memkb",       MemKB),
index 2acf7d4a43a0a02aa0767e315e3033f261733292..d07a5a7a58cecebb7ebc5a831f486419b26a2007 100644 (file)
@@ -511,7 +511,7 @@ void libxl_cpumap_dispose(libxl_cpumap *map)
     free(map->map);
 }
 
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu)
+int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu)
 {
     if (cpu >= cpumap->size * 8)
         return 0;
@@ -532,6 +532,31 @@ void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu)
     cpumap->map[cpu / 8] &= ~(1 << (cpu & 7));
 }
 
+int libxl_cpumap_count_set(const libxl_cpumap *cpumap)
+{
+    int i, nr_set_cpus = 0;
+    libxl_for_each_set_cpu(i, *cpumap)
+        nr_set_cpus++;
+
+    return nr_set_cpus;
+}
+
+/* NB. caller is responsible for freeing the memory */
+char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap)
+{
+    int i = cpumap->size;
+    char *p = libxl__zalloc(NULL, cpumap->size * 2 + 3);
+    char *q = p;
+    strncpy(p, "0x", 2);
+    p += 2;
+    while(--i >= 0) {
+        sprintf(p, "%02x", cpumap->map[i]);
+        p += 2;
+    }
+    *p = '\0';
+    return q;
+}
+
 int libxl_get_max_cpus(libxl_ctx *ctx)
 {
     return xc_get_max_cpus(ctx->xch);
index 7ab0c0915ce033083c8616b6155eae2481e90843..a762734c3fabcc38a7ac2c7de6aa386eb4315276 100644 (file)
@@ -64,9 +64,11 @@ int libxl_vdev_to_device_disk(libxl_ctx *ctx, uint32_t domid, const char *vdev,
                                libxl_device_disk *disk);
 
 int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus);
-int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu);
+int libxl_cpumap_test(const libxl_cpumap *cpumap, int cpu);
 void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu);
 void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu);
+int libxl_cpumap_count_set(const libxl_cpumap *cpumap);
+char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap);
 static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap)
 {
     memset(cpumap->map, -1, cpumap->size);
index 02b55f1305f7926e2eb96e9a3182ae30d62aafff..4101669dc38e24633b541b4c3d8ce24d006c182d 100644 (file)
@@ -647,7 +647,14 @@ static void parse_config_data(const char *config_source,
 
     if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) {
         b_info->max_vcpus = l;
-        b_info->cur_vcpus = (1 << l) - 1;
+
+        if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) {
+            fprintf(stderr, "Unable to allocate cpumap\n");
+            exit(1);
+        }
+        libxl_cpumap_set_none(&b_info->avail_vcpus);
+        while (l-- > 0)
+            libxl_cpumap_set((&b_info->avail_vcpus), l);
     }
 
     if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))