[IA64] allocate percpu area in the xen va area.
authorIsaku Yamahata <yamahata@valinux.co.jp>
Thu, 7 Aug 2008 02:47:34 +0000 (11:47 +0900)
committerIsaku Yamahata <yamahata@valinux.co.jp>
Thu, 7 Aug 2008 02:47:34 +0000 (11:47 +0900)
To guarantee that the percpu is pinned down,
move its virtual address from the xen identity mapped area
to the xen va area which is pinned by DTR[IA64_TR_KERNEL].
Then unnecessary tlb miss fault will be avoided.
Sometimes per cpu area is accessed from very critial
point where tlb miss isn't allowed.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/linux-xen/mm_contig.c
xen/arch/ia64/xen/xensetup.c
xen/include/asm-ia64/linux-xen/asm/percpu.h

index abd1c2cfb8d3971b0457d48efce03b909581ee4b..00d90faff9f7823ae6cb3d3f225ec59ae1414928 100644 (file)
@@ -175,6 +175,39 @@ find_memory (void)
 #endif
 
 #ifdef CONFIG_SMP
+#ifdef XEN
+#include <asm/elf.h>
+
+void *percpu_area __initdata = NULL;
+
+void* __init
+per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa)
+{
+       int order = get_order(NR_CPUS * PERCPU_PAGE_SIZE);
+       unsigned long size = 1UL << (order + PAGE_SHIFT);
+       unsigned long start = ALIGN_UP((unsigned long)xen_heap_start,
+                                      PERCPU_PAGE_SIZE);
+       unsigned long end = start + size;
+
+       if (__pa(end) < end_in_pa) {
+               init_xenheap_pages(__pa(xen_heap_start), __pa(start));
+               xen_heap_start = (void*)end;
+               percpu_area = (void*)virt_to_xenva(start);
+               printk("allocate percpu area 0x%lx@0x%lx 0x%p\n",
+                      size, start, percpu_area);
+       } else {
+               panic("can't allocate percpu area. size 0x%lx\n", size);
+       }
+       return xen_heap_start;
+}
+
+static void* __init
+get_per_cpu_area(void)
+{
+       return percpu_area;
+}
+#endif
+
 /**
  * per_cpu_init - setup per-cpu variables
  *
@@ -193,13 +226,9 @@ per_cpu_init (void)
         */
        if (smp_processor_id() == 0) {
 #ifdef XEN
-               struct page_info *page;
-               page = alloc_domheap_pages(NULL,
-                                          get_order(NR_CPUS *
-                                                    PERCPU_PAGE_SIZE), 0);
-               if (page == NULL) 
+               cpu_data = get_per_cpu_area();
+               if (cpu_data == NULL) 
                        panic("can't allocate per cpu area.\n");
-               cpu_data = page_to_virt(page);
 #else
                cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
                                           PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
index 7ccc4051cf45203aa989289ae70451dcd8a02ff5..fb242708d35266180f25932284a377706231dc05 100644 (file)
@@ -566,6 +566,13 @@ skip_move:
     if (vmx_enabled)
         xen_heap_start = vmx_init_env(xen_heap_start, xenheap_phys_end);
 
+    /* allocate memory for percpu area
+     * per_cpu_init() called from late_set_arch() is called after
+     * end_boot_allocate(). It's too late to allocate memory in
+     * xenva.
+     */
+    xen_heap_start = per_cpu_allocate(xen_heap_start, xenheap_phys_end);
+
     heap_desc.xen_heap_start   = xen_heap_start;
     heap_desc.xenheap_phys_end = xenheap_phys_end;
     heap_desc.kern_md          = kern_md;
index 4e11b3cd5d4b1b6306bb8124e095e3118f3ea694..8642afe79214543a5a78c329620d70587d3e3c58 100644 (file)
@@ -50,12 +50,22 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
 extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
 extern void setup_per_cpu_areas (void);
 extern void *per_cpu_init(void);
+#ifdef XEN
+extern void *per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa);
+#endif
 
 #else /* ! SMP */
 
 #define per_cpu(var, cpu)                      (*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)                     per_cpu__##var
 #define per_cpu_init()                         (__phys_per_cpu_start)
+#ifdef XEN
+static inline void *per_cpu_allocate(void *xen_heap_start,
+                                    unsigned long end_in_pa)
+{
+       return xen_heap_start;
+}
+#endif
 
 #endif /* SMP */