From 91afde280ec7c5495481a4264ce38cdc5d880034 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Thu, 7 Aug 2008 11:47:34 +0900 Subject: [PATCH] [IA64] allocate percpu area in the xen va area. To guarantee that the percpu is pinned down, move its virtual address from the xen identity mapped area to the xen va area which is pinned by DTR[IA64_TR_KERNEL]. Then unnecessary tlb miss fault will be avoided. Sometimes per cpu area is accessed from very critial point where tlb miss isn't allowed. Signed-off-by: Isaku Yamahata --- xen/arch/ia64/linux-xen/mm_contig.c | 41 ++++++++++++++++++--- xen/arch/ia64/xen/xensetup.c | 7 ++++ xen/include/asm-ia64/linux-xen/asm/percpu.h | 10 +++++ 3 files changed, 52 insertions(+), 6 deletions(-) diff --git a/xen/arch/ia64/linux-xen/mm_contig.c b/xen/arch/ia64/linux-xen/mm_contig.c index abd1c2cfb8..00d90faff9 100644 --- a/xen/arch/ia64/linux-xen/mm_contig.c +++ b/xen/arch/ia64/linux-xen/mm_contig.c @@ -175,6 +175,39 @@ find_memory (void) #endif #ifdef CONFIG_SMP +#ifdef XEN +#include + +void *percpu_area __initdata = NULL; + +void* __init +per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa) +{ + int order = get_order(NR_CPUS * PERCPU_PAGE_SIZE); + unsigned long size = 1UL << (order + PAGE_SHIFT); + unsigned long start = ALIGN_UP((unsigned long)xen_heap_start, + PERCPU_PAGE_SIZE); + unsigned long end = start + size; + + if (__pa(end) < end_in_pa) { + init_xenheap_pages(__pa(xen_heap_start), __pa(start)); + xen_heap_start = (void*)end; + percpu_area = (void*)virt_to_xenva(start); + printk("allocate percpu area 0x%lx@0x%lx 0x%p\n", + size, start, percpu_area); + } else { + panic("can't allocate percpu area. size 0x%lx\n", size); + } + return xen_heap_start; +} + +static void* __init +get_per_cpu_area(void) +{ + return percpu_area; +} +#endif + /** * per_cpu_init - setup per-cpu variables * @@ -193,13 +226,9 @@ per_cpu_init (void) */ if (smp_processor_id() == 0) { #ifdef XEN - struct page_info *page; - page = alloc_domheap_pages(NULL, - get_order(NR_CPUS * - PERCPU_PAGE_SIZE), 0); - if (page == NULL) + cpu_data = get_per_cpu_area(); + if (cpu_data == NULL) panic("can't allocate per cpu area.\n"); - cpu_data = page_to_virt(page); #else cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); diff --git a/xen/arch/ia64/xen/xensetup.c b/xen/arch/ia64/xen/xensetup.c index 7ccc4051cf..fb242708d3 100644 --- a/xen/arch/ia64/xen/xensetup.c +++ b/xen/arch/ia64/xen/xensetup.c @@ -566,6 +566,13 @@ skip_move: if (vmx_enabled) xen_heap_start = vmx_init_env(xen_heap_start, xenheap_phys_end); + /* allocate memory for percpu area + * per_cpu_init() called from late_set_arch() is called after + * end_boot_allocate(). It's too late to allocate memory in + * xenva. + */ + xen_heap_start = per_cpu_allocate(xen_heap_start, xenheap_phys_end); + heap_desc.xen_heap_start = xen_heap_start; heap_desc.xenheap_phys_end = xenheap_phys_end; heap_desc.kern_md = kern_md; diff --git a/xen/include/asm-ia64/linux-xen/asm/percpu.h b/xen/include/asm-ia64/linux-xen/asm/percpu.h index 4e11b3cd5d..8642afe792 100644 --- a/xen/include/asm-ia64/linux-xen/asm/percpu.h +++ b/xen/include/asm-ia64/linux-xen/asm/percpu.h @@ -50,12 +50,22 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); extern void setup_per_cpu_areas (void); extern void *per_cpu_init(void); +#ifdef XEN +extern void *per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa); +#endif #else /* ! SMP */ #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define per_cpu_init() (__phys_per_cpu_start) +#ifdef XEN +static inline void *per_cpu_allocate(void *xen_heap_start, + unsigned long end_in_pa) +{ + return xen_heap_start; +} +#endif #endif /* SMP */ -- 2.30.2