xen: move XEN_SYSCTL_physinfo, XEN_SYSCTL_numainfo and XEN_SYSCTL_topologyinfo to...
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>
Fri, 15 Feb 2013 13:32:20 +0000 (13:32 +0000)
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>
Fri, 15 Feb 2013 13:32:20 +0000 (13:32 +0000)
Move XEN_SYSCTL_physinfo, XEN_SYSCTL_numainfo and
XEN_SYSCTL_topologyinfo from x86/sysctl.c to common/sysctl.c.

The implementation of XEN_SYSCTL_physinfo is mostly generic but needs to
fill in few arch specific details: introduce arch_do_physinfo to do that.

The implementation of XEN_SYSCTL_physinfo relies on two global
variables: total_pages and cpu_khz. Make them available on ARM.

Implement node_spanned_pages and __node_distance on ARM, assuming 1 numa
node for now.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Keir Fraser <keir@xen.org>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Committed-by: Ian Campbell <ian.campbell@citrix.com>
xen/arch/arm/mm.c
xen/arch/arm/setup.c
xen/arch/arm/sysctl.c
xen/arch/arm/time.c
xen/arch/x86/sysctl.c
xen/common/sysctl.c
xen/include/asm-arm/numa.h
xen/include/xen/sched.h

index d0037fd0e145bf56a320fbe80d4edd91d4b34a6a..1c78d034b389edd728ce7adf2f6aeade7be9da46 100644 (file)
@@ -62,6 +62,7 @@ unsigned long frametable_base_mfn __read_mostly;
 unsigned long frametable_virt_end __read_mostly;
 
 unsigned long max_page;
+unsigned long total_pages;
 
 extern char __init_begin[], __init_end[];
 
index 2be5515053c428e3c4e9a601c1002916d584594d..5a86f907d52dacff1c6abf64634eb6a78733a885 100644 (file)
@@ -219,7 +219,7 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
     ram_start = early_info.mem.bank[0].start;
     ram_size  = early_info.mem.bank[0].size;
     ram_end = ram_start + ram_size;
-    ram_pages = ram_size >> PAGE_SHIFT;
+    total_pages = ram_pages = ram_size >> PAGE_SHIFT;
 
     /*
      * Locate the xenheap using these constraints:
index a286abee4628dd2ea247aa9538365e8e7063c376..a5d9cf0ce9213187e5f29fb709e21d10ed00a251 100644 (file)
@@ -12,6 +12,8 @@
 #include <xen/errno.h>
 #include <public/sysctl.h>
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi) { }
+
 long arch_do_sysctl(struct xen_sysctl *sysctl,
                     XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
 {
index 07628e1bc3744aafb42c763058e772ed7ef8eb46..3dad9b3d5e1390f28bd71c2a1f908d49d8c7989b 100644 (file)
@@ -43,16 +43,16 @@ uint64_t __read_mostly boot_count;
 
 /* For fine-grained timekeeping, we use the ARM "Generic Timer", a
  * register-mapped time source in the SoC. */
-static uint32_t __read_mostly cntfrq;      /* Ticks per second */
+unsigned long __read_mostly cpu_khz;  /* CPU clock frequency in kHz. */
 
 /*static inline*/ s_time_t ticks_to_ns(uint64_t ticks)
 {
-    return muldiv64(ticks, SECONDS(1), cntfrq);
+    return muldiv64(ticks, SECONDS(1), 1000 * cpu_khz);
 }
 
 /*static inline*/ uint64_t ns_to_ticks(s_time_t ns)
 {
-    return muldiv64(ns, cntfrq, SECONDS(1));
+    return muldiv64(ns, 1000 * cpu_khz, SECONDS(1));
 }
 
 /* TODO: On a real system the firmware would have set the frequency in
@@ -93,9 +93,9 @@ int __init init_xen_time(void)
     if ( (READ_CP32(ID_PFR1) & ID_PFR1_GT_MASK) != ID_PFR1_GT_v1 )
         panic("CPU does not support the Generic Timer v1 interface.\n");
 
-    cntfrq = READ_CP32(CNTFRQ);
+    cpu_khz = READ_CP32(CNTFRQ) / 1000;
     boot_count = READ_CP64(CNTPCT);
-    printk("Using generic timer at %"PRIu32" Hz\n", cntfrq);
+    printk("Using generic timer at %lu KHz\n", cpu_khz);
 
     return 0;
 }
index d0be4bee6a1a642d400e467f7d97318923e50824..b4d3e32775d7643ae1a9f5e04932134fed0d9a7f 100644 (file)
@@ -57,6 +57,15 @@ long cpu_down_helper(void *data)
     return ret;
 }
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
+{
+    memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
+    if ( hvm_enabled )
+        pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
+    if ( iommu_enabled )
+        pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
+}
+
 long arch_do_sysctl(
     struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
 {
@@ -65,120 +74,6 @@ long arch_do_sysctl(
     switch ( sysctl->cmd )
     {
 
-    case XEN_SYSCTL_physinfo:
-    {
-        xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
-
-        memset(pi, 0, sizeof(*pi));
-        pi->threads_per_core =
-            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
-        pi->cores_per_socket =
-            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
-        pi->nr_cpus = num_online_cpus();
-        pi->nr_nodes = num_online_nodes();
-        pi->max_node_id = MAX_NUMNODES-1;
-        pi->max_cpu_id = nr_cpu_ids - 1;
-        pi->total_pages = total_pages;
-        pi->free_pages = avail_domheap_pages();
-        pi->scrub_pages = 0;
-        pi->cpu_khz = cpu_khz;
-        memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
-        if ( hvm_enabled )
-            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
-        if ( iommu_enabled )
-            pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
-
-        if ( __copy_field_to_guest(u_sysctl, sysctl, u.physinfo) )
-            ret = -EFAULT;
-    }
-    break;
-        
-    case XEN_SYSCTL_topologyinfo:
-    {
-        uint32_t i, max_cpu_index, last_online_cpu;
-        xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
-
-        last_online_cpu = cpumask_last(&cpu_online_map);
-        max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
-        ti->max_cpu_index = last_online_cpu;
-
-        for ( i = 0; i <= max_cpu_index; i++ )
-        {
-            if ( !guest_handle_is_null(ti->cpu_to_core) )
-            {
-                uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
-                    break;
-            }
-            if ( !guest_handle_is_null(ti->cpu_to_socket) )
-            {
-                uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
-                    break;
-            }
-            if ( !guest_handle_is_null(ti->cpu_to_node) )
-            {
-                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
-                    break;
-            }
-        }
-
-        ret = ((i <= max_cpu_index) ||
-               __copy_field_to_guest(u_sysctl, sysctl, u.topologyinfo))
-            ? -EFAULT : 0;
-    }
-    break;
-
-    case XEN_SYSCTL_numainfo:
-    {
-        uint32_t i, j, max_node_index, last_online_node;
-        xen_sysctl_numainfo_t *ni = &sysctl->u.numainfo;
-
-        last_online_node = last_node(node_online_map);
-        max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
-        ni->max_node_index = last_online_node;
-
-        for ( i = 0; i <= max_node_index; i++ )
-        {
-            if ( !guest_handle_is_null(ni->node_to_memsize) )
-            {
-                uint64_t memsize = node_online(i) ? 
-                                   node_spanned_pages(i) << PAGE_SHIFT : 0ul;
-                if ( copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1) )
-                    break;
-            }
-            if ( !guest_handle_is_null(ni->node_to_memfree) )
-            {
-                uint64_t memfree = node_online(i) ? 
-                                   avail_node_heap_pages(i) << PAGE_SHIFT : 0ul;
-                if ( copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1) )
-                    break;
-            }
-
-            if ( !guest_handle_is_null(ni->node_to_node_distance) )
-            {
-                for ( j = 0; j <= max_node_index; j++)
-                {
-                    uint32_t distance = ~0u;
-                    if ( node_online(i) && node_online(j) )
-                        distance = __node_distance(i, j);
-                    if ( copy_to_guest_offset(
-                        ni->node_to_node_distance, 
-                        i*(max_node_index+1) + j, &distance, 1) )
-                        break;
-                }
-                if ( j <= max_node_index )
-                    break;
-            }
-        }
-
-        ret = ((i <= max_node_index) ||
-               __copy_field_to_guest(u_sysctl, sysctl, u.numainfo))
-            ? -EFAULT : 0;
-    }
-    break;
-    
     case XEN_SYSCTL_cpu_hotplug:
     {
         unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
index d663ed743086b3f2d9262c05cb7ee10a5b721ffd..20bb864e64f3de054551800fd0688fe5403628f4 100644 (file)
@@ -249,6 +249,115 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
         ret = sched_adjust_global(&op->u.scheduler_op);
         break;
 
+    case XEN_SYSCTL_physinfo:
+    {
+        xen_sysctl_physinfo_t *pi = &op->u.physinfo;
+
+        memset(pi, 0, sizeof(*pi));
+        pi->threads_per_core =
+            cpumask_weight(per_cpu(cpu_sibling_mask, 0));
+        pi->cores_per_socket =
+            cpumask_weight(per_cpu(cpu_core_mask, 0)) / pi->threads_per_core;
+        pi->nr_cpus = num_online_cpus();
+        pi->nr_nodes = num_online_nodes();
+        pi->max_node_id = MAX_NUMNODES-1;
+        pi->max_cpu_id = nr_cpu_ids - 1;
+        pi->total_pages = total_pages;
+        pi->free_pages = avail_domheap_pages();
+        pi->scrub_pages = 0;
+        pi->cpu_khz = cpu_khz;
+        arch_do_physinfo(pi);
+
+        if ( copy_to_guest(u_sysctl, op, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
+    case XEN_SYSCTL_numainfo:
+    {
+        uint32_t i, j, max_node_index, last_online_node;
+        xen_sysctl_numainfo_t *ni = &op->u.numainfo;
+
+        last_online_node = last_node(node_online_map);
+        max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
+        ni->max_node_index = last_online_node;
+
+        for ( i = 0; i <= max_node_index; i++ )
+        {
+            if ( !guest_handle_is_null(ni->node_to_memsize) )
+            {
+                uint64_t memsize = node_online(i) ?
+                                   node_spanned_pages(i) << PAGE_SHIFT : 0ul;
+                if ( copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1) )
+                    break;
+            }
+            if ( !guest_handle_is_null(ni->node_to_memfree) )
+            {
+                uint64_t memfree = node_online(i) ?
+                                   avail_node_heap_pages(i) << PAGE_SHIFT : 0ul;
+                if ( copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1) )
+                    break;
+            }
+
+            if ( !guest_handle_is_null(ni->node_to_node_distance) )
+            {
+                for ( j = 0; j <= max_node_index; j++)
+                {
+                    uint32_t distance = ~0u;
+                    if ( node_online(i) && node_online(j) )
+                        distance = __node_distance(i, j);
+                    if ( copy_to_guest_offset(
+                        ni->node_to_node_distance,
+                        i*(max_node_index+1) + j, &distance, 1) )
+                        break;
+                }
+                if ( j <= max_node_index )
+                    break;
+            }
+        }
+
+        ret = ((i <= max_node_index) || copy_to_guest(u_sysctl, op, 1))
+            ? -EFAULT : 0;
+    }
+    break;
+
+    case XEN_SYSCTL_topologyinfo:
+    {
+        uint32_t i, max_cpu_index, last_online_cpu;
+        xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
+
+        last_online_cpu = cpumask_last(&cpu_online_map);
+        max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
+        ti->max_cpu_index = last_online_cpu;
+
+        for ( i = 0; i <= max_cpu_index; i++ )
+        {
+            if ( !guest_handle_is_null(ti->cpu_to_core) )
+            {
+                uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
+                    break;
+            }
+            if ( !guest_handle_is_null(ti->cpu_to_socket) )
+            {
+                uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
+                    break;
+            }
+            if ( !guest_handle_is_null(ti->cpu_to_node) )
+            {
+                uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
+                if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
+                    break;
+            }
+        }
+
+        ret = ((i <= max_cpu_index) || copy_to_guest(u_sysctl, op, 1))
+            ? -EFAULT : 0;
+    }
+    break;
+
+
     default:
         ret = arch_do_sysctl(op, u_sysctl);
         copyback = 0;
index a1b1f584f84692f6ce64159880f0637743c4684a..86f0183c9569a10ec13428c3bb506743554ba074 100644 (file)
@@ -10,6 +10,10 @@ static inline __attribute__((pure)) int phys_to_nid(paddr_t addr)
     return 0;
 }
 
+/* XXX: implement NUMA support */
+#define node_spanned_pages(nid)        (total_pages)
+#define __node_distance(a, b) (20)
+
 #endif /* __ARCH_ARM_NUMA_H */
 /*
  * Local variables:
index 90a6537d6eb8de40e1a902496091b6b7fcccdbba..ba0f2f87b7a612629a1f26cbb48fe3b4561fef38 100644 (file)
@@ -748,6 +748,8 @@ extern void dump_runq(unsigned char key);
 
 #define num_cpupool_cpus(c) cpumask_weight((c)->cpu_valid)
 
+void arch_do_physinfo(xen_sysctl_physinfo_t *pi);
+
 #endif /* __SCHED_H__ */
 
 /*