libxc: enable per-VCPU parameter for RTDS
authorChong Li <lichong659@gmail.com>
Fri, 1 Apr 2016 15:14:42 +0000 (15:14 +0000)
committerIan Jackson <Ian.Jackson@eu.citrix.com>
Wed, 6 Apr 2016 15:28:59 +0000 (16:28 +0100)
Add xc_sched_rtds_vcpu_get/set functions to interact with
Xen to get/set a domain's per-VCPU parameters.

Signed-off-by: Chong Li <chong.li@wustl.edu>
Signed-off-by: Meng Xu <mengxu@cis.upenn.edu>
Signed-off-by: Sisu Xi <xisisu@gmail.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Dario Faggioli <dario.faggioli@citrix.com>
tools/libxc/include/xenctrl.h
tools/libxc/xc_rt.c

index a964681b77b4583d1a69088fb6001617f9a91df8..e8cb1ecf06e09f965c8c83a937ef816483011aea 100644 (file)
@@ -929,6 +929,14 @@ int xc_sched_rtds_domain_set(xc_interface *xch,
 int xc_sched_rtds_domain_get(xc_interface *xch,
                             uint32_t domid,
                             struct xen_domctl_sched_rtds *sdom);
+int xc_sched_rtds_vcpu_set(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus);
+int xc_sched_rtds_vcpu_get(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus);
 
 int
 xc_sched_arinc653_schedule_set(
index d59e5ce77cb7425abbe2c2d7197bd54336e62bbc..221d17fa7cd6d42e0b93a43b8b53c507d38ffa23 100644 (file)
@@ -62,3 +62,71 @@ int xc_sched_rtds_domain_get(xc_interface *xch,
 
     return rc;
 }
+
+int xc_sched_rtds_vcpu_set(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus)
+{
+    int rc;
+    unsigned processed = 0;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, vcpus) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = (domid_t) domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_putvcpuinfo;
+
+    while ( processed < num_vcpus )
+    {
+        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
+        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
+                                    processed);
+        if ( (rc = do_domctl(xch, &domctl)) != 0 )
+            break;
+        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
+    }
+
+    xc_hypercall_bounce_post(xch, vcpus);
+
+    return rc;
+}
+
+int xc_sched_rtds_vcpu_get(xc_interface *xch,
+                           uint32_t domid,
+                           struct xen_domctl_schedparam_vcpu *vcpus,
+                           uint32_t num_vcpus)
+{
+    int rc;
+    unsigned processed = 0;
+    DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(vcpus, sizeof(*vcpus) * num_vcpus,
+                             XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+    if ( xc_hypercall_bounce_pre(xch, vcpus) )
+        return -1;
+
+    domctl.cmd = XEN_DOMCTL_scheduler_op;
+    domctl.domain = (domid_t) domid;
+    domctl.u.scheduler_op.sched_id = XEN_SCHEDULER_RTDS;
+    domctl.u.scheduler_op.cmd = XEN_DOMCTL_SCHEDOP_getvcpuinfo;
+
+    while ( processed < num_vcpus )
+    {
+        domctl.u.scheduler_op.u.v.nr_vcpus = num_vcpus - processed;
+        set_xen_guest_handle_offset(domctl.u.scheduler_op.u.v.vcpus, vcpus,
+                                    processed);
+        if ( (rc = do_domctl(xch, &domctl)) != 0 )
+            break;
+        processed += domctl.u.scheduler_op.u.v.nr_vcpus;
+    }
+
+    xc_hypercall_bounce_post(xch, vcpus);
+
+    return rc;
+}