From 99db02d50976b46f4681f38e41c403f8f605942d Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Tue, 7 Oct 2003 19:34:30 +0000 Subject: [PATCH] bitkeeper revision 1.488 (3f831546g57eNx40G9Qp-XWxciMP9Q) Many files: Remove CPU-dependent page-directory entries. A singl epage table can now be used simultaneously by several CPUs. --- xen/arch/i386/boot/boot.S | 8 +++---- xen/arch/i386/ioremap.c | 2 +- xen/arch/i386/mm.c | 34 ++++++++++++++++++------------ xen/arch/i386/setup.c | 16 -------------- xen/arch/i386/smpboot.c | 8 ++----- xen/arch/i386/traps.c | 6 ++---- xen/common/domain.c | 7 +++--- xen/common/domain_page.c | 28 ++++++++++++++++-------- xen/common/memory.c | 2 +- xen/include/asm-i386/domain_page.h | 2 +- xen/include/asm-i386/page.h | 3 +-- xen/include/xeno/sched.h | 2 +- 12 files changed, 55 insertions(+), 63 deletions(-) diff --git a/xen/arch/i386/boot/boot.S b/xen/arch/i386/boot/boot.S index e882428a21..6179cf5a06 100644 --- a/xen/arch/i386/boot/boot.S +++ b/xen/arch/i386/boot/boot.S @@ -110,7 +110,7 @@ continue_boot_cpu: skip_dom0_copy: /* Initialize low and high mappings of all memory with 4MB pages */ - mov $idle0_pg_table-__PAGE_OFFSET,%edi + mov $idle_pg_table-__PAGE_OFFSET,%edi mov $0x1e3,%eax /* PRESENT+RW+A+D+4MB+GLOBAL */ 1: mov %eax,__PAGE_OFFSET>>20(%edi) /* high mapping */ stosl /* low mapping */ @@ -127,7 +127,7 @@ skip_dom0_copy: ud2 /* Force a panic (invalid opcode). */ start_paging: - mov $idle0_pg_table-__PAGE_OFFSET,%eax + mov $idle_pg_table-__PAGE_OFFSET,%eax mov %eax,%cr3 mov %cr0,%eax or $0x80010000,%eax /* set PG and WP bits */ @@ -254,9 +254,9 @@ ENTRY(gdt_table) .quad 0x0000000000000000 /* unused */ .fill 2*NR_CPUS,8,0 /* space for TSS and LDT per CPU */ -# The following adds 12kB to the kernel file size. +# The following adds 8-12kB to the kernel file size. .org 0x1000 -ENTRY(idle0_pg_table) +ENTRY(idle_pg_table) .org 0x2000 ENTRY(cpu0_stack) .org 0x3000 diff --git a/xen/arch/i386/ioremap.c b/xen/arch/i386/ioremap.c index bf24892367..06c09f8520 100644 --- a/xen/arch/i386/ioremap.c +++ b/xen/arch/i386/ioremap.c @@ -82,7 +82,7 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag */ vaddr = remap_base; remap_base += size; - pl2e = idle0_pg_table + l2_table_offset(vaddr); + pl2e = &idle_pg_table[l2_table_offset(vaddr)]; if ( l2_pgentry_empty(*pl2e) ) new_l2e(pl2e); pl1e = l2_pgentry_to_l1(*pl2e++) + l1_table_offset(vaddr); for ( ; ; ) diff --git a/xen/arch/i386/mm.c b/xen/arch/i386/mm.c index 34ab91add3..4c812e849f 100644 --- a/xen/arch/i386/mm.c +++ b/xen/arch/i386/mm.c @@ -33,7 +33,7 @@ static inline void set_pte_phys (unsigned long vaddr, l2_pgentry_t *l2ent; l1_pgentry_t *l1ent; - l2ent = idle0_pg_table + l2_table_offset(vaddr); + l2ent = &idle_pg_table[l2_table_offset(vaddr)]; l1ent = l2_pgentry_to_l1(*l2ent) + l1_table_offset(vaddr); *l1ent = entry; @@ -89,31 +89,37 @@ void __init paging_init(void) * created - mappings will be set by set_fixmap(): */ addr = FIXADDR_START & ~((1<> L2_PAGETABLE_SHIFT] = + idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR); /* Create read-only mapping of MPT for guest-OS use. */ - idle0_pg_table[READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] = - idle0_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]; - mk_l2_readonly(idle0_pg_table + + idle_pg_table[READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] = + idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]; + mk_l2_readonly(idle_pg_table + (READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT)); + + /* Set up mapping cache for domain pages. */ + mapcache = (unsigned long *)get_free_page(GFP_KERNEL); + clear_page(mapcache); + idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] = + mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR); + + /* Set up linear page table mapping. */ + idle_pg_table[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = + mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR); + } void __init zap_low_mappings (void) { - int i, j; - for ( i = 0; i < smp_num_cpus; i++ ) - { - for ( j = 0; j < DOMAIN_ENTRIES_PER_L2_PAGETABLE; j++ ) - { - idle_pg_table[i][j] = mk_l2_pgentry(0); - } - } + int i; + for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) + idle_pg_table[i] = mk_l2_pgentry(0); flush_tlb_all(); } diff --git a/xen/arch/i386/setup.c b/xen/arch/i386/setup.c index 61ea25c6e0..cc74655463 100644 --- a/xen/arch/i386/setup.c +++ b/xen/arch/i386/setup.c @@ -18,13 +18,8 @@ struct cpuinfo_x86 boot_cpu_data = { 0 }; unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE; unsigned long wait_init_idle; -/* Basic page table for each CPU in the system. */ -l2_pgentry_t *idle_pg_table[NR_CPUS] = { idle0_pg_table }; struct task_struct *idle_task[NR_CPUS] = { &idle0_task }; -/* for asm/domain_page.h, map_domain_page() */ -unsigned long *mapcache[NR_CPUS]; - int phys_proc_id[NR_CPUS]; int logical_proc_id[NR_CPUS]; @@ -234,7 +229,6 @@ void __init cpu_init(void) { int nr = smp_processor_id(); struct tss_struct * t = &init_tss[nr]; - l2_pgentry_t *pl2e; if ( test_and_set_bit(nr, &cpu_initialized) ) panic("CPU#%d already initialized!!!\n", nr); @@ -268,16 +262,6 @@ void __init cpu_init(void) __asm__ __volatile__ ("movl %%eax,%%cr3" : : "a" (pagetable_val(current->mm.pagetable))); - /* Set up mapping cache for domain pages. */ - pl2e = idle_pg_table[nr] + (MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT); - mapcache[nr] = (unsigned long *)get_free_page(GFP_KERNEL); - clear_page(mapcache[nr]); - *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | __PAGE_HYPERVISOR); - - /* Set up linear page table mapping. */ - idle_pg_table[nr][LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = - mk_l2_pgentry(__pa(idle_pg_table[nr]) | __PAGE_HYPERVISOR); - init_idle_task(); } diff --git a/xen/arch/i386/smpboot.c b/xen/arch/i386/smpboot.c index 91b03203d0..4f3800903e 100644 --- a/xen/arch/i386/smpboot.c +++ b/xen/arch/i386/smpboot.c @@ -666,17 +666,13 @@ static void __init do_boot_cpu (int apicid) unsigned long boot_error = 0; int timeout, cpu; unsigned long start_eip; - l2_pgentry_t *pagetable; cpu = ++cpucount; if ( (idle = do_newdomain(IDLE_DOMAIN_ID, cpu)) == NULL ) panic("failed 'newdomain' for CPU %d", cpu); - pagetable = (void *)get_free_page(GFP_KERNEL); - memcpy(pagetable, idle0_pg_table, PAGE_SIZE); - idle_pg_table[cpu] = pagetable; - idle->mm.pagetable = mk_pagetable(__pa(pagetable)); + idle->mm.pagetable = mk_pagetable(__pa(idle_pg_table)); map_cpu_to_boot_apicid(cpu, apicid); @@ -687,7 +683,7 @@ static void __init do_boot_cpu (int apicid) /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); - /* So we see what's up */ + /* So we see what's up. */ printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); stack_start.esp = __pa(get_free_page(GFP_KERNEL)) + 4000; diff --git a/xen/arch/i386/traps.c b/xen/arch/i386/traps.c index a555a21476..a15b8eab69 100644 --- a/xen/arch/i386/traps.c +++ b/xen/arch/i386/traps.c @@ -398,9 +398,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code) if ( addr >= PAGE_OFFSET ) { unsigned long page; - unsigned long *pde; - pde = (unsigned long *)idle_pg_table[smp_processor_id()]; - page = pde[addr >> L2_PAGETABLE_SHIFT]; + page = l2_pgentry_val(idle_pg_table[addr >> L2_PAGETABLE_SHIFT]); printk("*pde = %08lx\n", page); if ( page & _PAGE_PRESENT ) { @@ -683,7 +681,7 @@ void __init trap_init(void) tss->ss = __HYPERVISOR_DS; tss->esp = (unsigned long) &doublefault_stack[DOUBLEFAULT_STACK_SIZE]; - tss->__cr3 = __pa(idle0_pg_table); + tss->__cr3 = __pa(idle_pg_table); tss->cs = __HYPERVISOR_CS; tss->eip = (unsigned long)do_double_fault; tss->eflags = 2; diff --git a/xen/common/domain.c b/xen/common/domain.c index 481234bda6..0cd37ec261 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -360,9 +360,8 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo) */ phys_l2tab = meminfo->l2_pgt_addr; l2tab = map_domain_mem(phys_l2tab); - memcpy(l2tab + DOMAIN_ENTRIES_PER_L2_PAGETABLE, - ((l2_pgentry_t *)idle_pg_table[p->processor]) + - DOMAIN_ENTRIES_PER_L2_PAGETABLE, + memcpy(&l2tab[DOMAIN_ENTRIES_PER_L2_PAGETABLE], + &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE], (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t)); l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = @@ -541,7 +540,7 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params, */ phys_l2tab = alloc_page_from_domain(&alloc_address, &alloc_index); l2start = l2tab = map_domain_mem(phys_l2tab); - memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE); + memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE); l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR); l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = diff --git a/xen/common/domain_page.c b/xen/common/domain_page.c index 0e90fb45cb..5e5974562a 100644 --- a/xen/common/domain_page.c +++ b/xen/common/domain_page.c @@ -18,14 +18,16 @@ #include #include -static unsigned int map_idx[NR_CPUS]; +unsigned long *mapcache; +static unsigned int map_idx, shadow_map_idx[NR_CPUS]; +static spinlock_t map_lock = SPIN_LOCK_UNLOCKED; /* Use a spare PTE bit to mark entries ready for recycling. */ #define READY_FOR_TLB_FLUSH (1<<10) static void flush_all_ready_maps(void) { - unsigned long *cache = mapcache[smp_processor_id()]; + unsigned long *cache = mapcache; /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */ do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; } @@ -39,23 +41,31 @@ static void flush_all_ready_maps(void) void *map_domain_mem(unsigned long pa) { unsigned long va; - int cpu = smp_processor_id(); - unsigned int idx; - unsigned long *cache = mapcache[cpu]; + unsigned int idx, cpu = smp_processor_id(); + unsigned long *cache = mapcache; unsigned long flags; - local_irq_save(flags); + spin_lock_irqsave(&map_lock, flags); + + /* Has some other CPU caused a wrap? We must flush if so. */ + if ( map_idx < shadow_map_idx[cpu] ) + { + perfc_incrc(domain_page_tlb_flush); + local_flush_tlb(); + } for ( ; ; ) { - idx = map_idx[cpu] = (map_idx[cpu] + 1) & (MAPCACHE_ENTRIES - 1); + idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1); if ( idx == 0 ) flush_all_ready_maps(); if ( cache[idx] == 0 ) break; } cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR; - local_irq_restore(flags); + spin_unlock_irqrestore(&map_lock, flags); + + shadow_map_idx[cpu] = idx; va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK); return (void *)va; @@ -65,5 +75,5 @@ void unmap_domain_mem(void *va) { unsigned int idx; idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; - mapcache[smp_processor_id()][idx] |= READY_FOR_TLB_FLUSH; + mapcache[idx] |= READY_FOR_TLB_FLUSH; } diff --git a/xen/common/memory.c b/xen/common/memory.c index 9a51109335..ecb3a30919 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -362,7 +362,7 @@ static int get_l2_table(unsigned long page_nr) /* Now we simply slap in our high mapping. */ memcpy(p_l2_entry, - idle_pg_table[smp_processor_id()] + DOMAIN_ENTRIES_PER_L2_PAGETABLE, + &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE], HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); p_l2_entry[(PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT) - DOMAIN_ENTRIES_PER_L2_PAGETABLE] = diff --git a/xen/include/asm-i386/domain_page.h b/xen/include/asm-i386/domain_page.h index 92fb261147..e91689c867 100644 --- a/xen/include/asm-i386/domain_page.h +++ b/xen/include/asm-i386/domain_page.h @@ -7,7 +7,7 @@ #include #include -extern unsigned long *mapcache[NR_CPUS]; +extern unsigned long *mapcache; #define MAPCACHE_ENTRIES 1024 /* diff --git a/xen/include/asm-i386/page.h b/xen/include/asm-i386/page.h index 86b583b9ed..c9191c43eb 100644 --- a/xen/include/asm-i386/page.h +++ b/xen/include/asm-i386/page.h @@ -89,8 +89,7 @@ typedef struct { unsigned long pt_lo; } pagetable_t; #define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START) -extern l2_pgentry_t idle0_pg_table[ENTRIES_PER_L2_PAGETABLE]; -extern l2_pgentry_t *idle_pg_table[NR_CPUS]; +extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE]; extern void paging_init(void); #define __flush_tlb() __flush_tlb_counted() diff --git a/xen/include/xeno/sched.h b/xen/include/xeno/sched.h index b05c441727..63ac894264 100644 --- a/xen/include/xeno/sched.h +++ b/xen/include/xeno/sched.h @@ -50,7 +50,7 @@ extern struct mm_struct init_mm; { \ cpu_vm_mask: 0, \ perdomain_pt: 0, \ - pagetable: mk_pagetable(__pa(idle0_pg_table)) \ + pagetable: mk_pagetable(__pa(idle_pg_table)) \ } #define _HYP_EVENT_NEED_RESCHED 0 -- 2.30.2