skip_dom0_copy:
/* Initialize low and high mappings of all memory with 4MB pages */
- mov $idle0_pg_table-__PAGE_OFFSET,%edi
+ mov $idle_pg_table-__PAGE_OFFSET,%edi
mov $0x1e3,%eax /* PRESENT+RW+A+D+4MB+GLOBAL */
1: mov %eax,__PAGE_OFFSET>>20(%edi) /* high mapping */
stosl /* low mapping */
ud2 /* Force a panic (invalid opcode). */
start_paging:
- mov $idle0_pg_table-__PAGE_OFFSET,%eax
+ mov $idle_pg_table-__PAGE_OFFSET,%eax
mov %eax,%cr3
mov %cr0,%eax
or $0x80010000,%eax /* set PG and WP bits */
.quad 0x0000000000000000 /* unused */
.fill 2*NR_CPUS,8,0 /* space for TSS and LDT per CPU */
-# The following adds 12kB to the kernel file size.
+# The following adds 8-12kB to the kernel file size.
.org 0x1000
-ENTRY(idle0_pg_table)
+ENTRY(idle_pg_table)
.org 0x2000
ENTRY(cpu0_stack)
.org 0x3000
*/
vaddr = remap_base;
remap_base += size;
- pl2e = idle0_pg_table + l2_table_offset(vaddr);
+ pl2e = &idle_pg_table[l2_table_offset(vaddr)];
if ( l2_pgentry_empty(*pl2e) ) new_l2e(pl2e);
pl1e = l2_pgentry_to_l1(*pl2e++) + l1_table_offset(vaddr);
for ( ; ; )
l2_pgentry_t *l2ent;
l1_pgentry_t *l1ent;
- l2ent = idle0_pg_table + l2_table_offset(vaddr);
+ l2ent = &idle_pg_table[l2_table_offset(vaddr)];
l1ent = l2_pgentry_to_l1(*l2ent) + l1_table_offset(vaddr);
*l1ent = entry;
* created - mappings will be set by set_fixmap():
*/
addr = FIXADDR_START & ~((1<<L2_PAGETABLE_SHIFT)-1);
- fixrange_init(addr, 0, idle0_pg_table);
+ fixrange_init(addr, 0, idle_pg_table);
/* Create page table for ioremap(). */
ioremap_pt = (void *)get_free_page(GFP_KERNEL);
clear_page(ioremap_pt);
- idle0_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
/* Create read-only mapping of MPT for guest-OS use. */
- idle0_pg_table[READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
- idle0_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT];
- mk_l2_readonly(idle0_pg_table +
+ idle_pg_table[READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT];
+ mk_l2_readonly(idle_pg_table +
(READONLY_MPT_VIRT_START >> L2_PAGETABLE_SHIFT));
+
+ /* Set up mapping cache for domain pages. */
+ mapcache = (unsigned long *)get_free_page(GFP_KERNEL);
+ clear_page(mapcache);
+ idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
+
+ /* Set up linear page table mapping. */
+ idle_pg_table[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
+
}
void __init zap_low_mappings (void)
{
- int i, j;
- for ( i = 0; i < smp_num_cpus; i++ )
- {
- for ( j = 0; j < DOMAIN_ENTRIES_PER_L2_PAGETABLE; j++ )
- {
- idle_pg_table[i][j] = mk_l2_pgentry(0);
- }
- }
+ int i;
+ for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
+ idle_pg_table[i] = mk_l2_pgentry(0);
flush_tlb_all();
}
unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
unsigned long wait_init_idle;
-/* Basic page table for each CPU in the system. */
-l2_pgentry_t *idle_pg_table[NR_CPUS] = { idle0_pg_table };
struct task_struct *idle_task[NR_CPUS] = { &idle0_task };
-/* for asm/domain_page.h, map_domain_page() */
-unsigned long *mapcache[NR_CPUS];
-
int phys_proc_id[NR_CPUS];
int logical_proc_id[NR_CPUS];
{
int nr = smp_processor_id();
struct tss_struct * t = &init_tss[nr];
- l2_pgentry_t *pl2e;
if ( test_and_set_bit(nr, &cpu_initialized) )
panic("CPU#%d already initialized!!!\n", nr);
__asm__ __volatile__ ("movl %%eax,%%cr3"
: : "a" (pagetable_val(current->mm.pagetable)));
- /* Set up mapping cache for domain pages. */
- pl2e = idle_pg_table[nr] + (MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT);
- mapcache[nr] = (unsigned long *)get_free_page(GFP_KERNEL);
- clear_page(mapcache[nr]);
- *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | __PAGE_HYPERVISOR);
-
- /* Set up linear page table mapping. */
- idle_pg_table[nr][LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
- mk_l2_pgentry(__pa(idle_pg_table[nr]) | __PAGE_HYPERVISOR);
-
init_idle_task();
}
unsigned long boot_error = 0;
int timeout, cpu;
unsigned long start_eip;
- l2_pgentry_t *pagetable;
cpu = ++cpucount;
if ( (idle = do_newdomain(IDLE_DOMAIN_ID, cpu)) == NULL )
panic("failed 'newdomain' for CPU %d", cpu);
- pagetable = (void *)get_free_page(GFP_KERNEL);
- memcpy(pagetable, idle0_pg_table, PAGE_SIZE);
- idle_pg_table[cpu] = pagetable;
- idle->mm.pagetable = mk_pagetable(__pa(pagetable));
+ idle->mm.pagetable = mk_pagetable(__pa(idle_pg_table));
map_cpu_to_boot_apicid(cpu, apicid);
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
- /* So we see what's up */
+ /* So we see what's up. */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
stack_start.esp = __pa(get_free_page(GFP_KERNEL)) + 4000;
if ( addr >= PAGE_OFFSET )
{
unsigned long page;
- unsigned long *pde;
- pde = (unsigned long *)idle_pg_table[smp_processor_id()];
- page = pde[addr >> L2_PAGETABLE_SHIFT];
+ page = l2_pgentry_val(idle_pg_table[addr >> L2_PAGETABLE_SHIFT]);
printk("*pde = %08lx\n", page);
if ( page & _PAGE_PRESENT )
{
tss->ss = __HYPERVISOR_DS;
tss->esp = (unsigned long)
&doublefault_stack[DOUBLEFAULT_STACK_SIZE];
- tss->__cr3 = __pa(idle0_pg_table);
+ tss->__cr3 = __pa(idle_pg_table);
tss->cs = __HYPERVISOR_CS;
tss->eip = (unsigned long)do_double_fault;
tss->eflags = 2;
*/
phys_l2tab = meminfo->l2_pgt_addr;
l2tab = map_domain_mem(phys_l2tab);
- memcpy(l2tab + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
- ((l2_pgentry_t *)idle_pg_table[p->processor]) +
- DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ memcpy(&l2tab[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
+ &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
(ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
* sizeof(l2_pgentry_t));
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
*/
phys_l2tab = alloc_page_from_domain(&alloc_address, &alloc_index);
l2start = l2tab = map_domain_mem(phys_l2tab);
- memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
+ memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
#include <asm/domain_page.h>
#include <asm/pgalloc.h>
-static unsigned int map_idx[NR_CPUS];
+unsigned long *mapcache;
+static unsigned int map_idx, shadow_map_idx[NR_CPUS];
+static spinlock_t map_lock = SPIN_LOCK_UNLOCKED;
/* Use a spare PTE bit to mark entries ready for recycling. */
#define READY_FOR_TLB_FLUSH (1<<10)
static void flush_all_ready_maps(void)
{
- unsigned long *cache = mapcache[smp_processor_id()];
+ unsigned long *cache = mapcache;
/* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
do { if ( (*cache & READY_FOR_TLB_FLUSH) ) *cache = 0; }
void *map_domain_mem(unsigned long pa)
{
unsigned long va;
- int cpu = smp_processor_id();
- unsigned int idx;
- unsigned long *cache = mapcache[cpu];
+ unsigned int idx, cpu = smp_processor_id();
+ unsigned long *cache = mapcache;
unsigned long flags;
- local_irq_save(flags);
+ spin_lock_irqsave(&map_lock, flags);
+
+ /* Has some other CPU caused a wrap? We must flush if so. */
+ if ( map_idx < shadow_map_idx[cpu] )
+ {
+ perfc_incrc(domain_page_tlb_flush);
+ local_flush_tlb();
+ }
for ( ; ; )
{
- idx = map_idx[cpu] = (map_idx[cpu] + 1) & (MAPCACHE_ENTRIES - 1);
+ idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1);
if ( idx == 0 ) flush_all_ready_maps();
if ( cache[idx] == 0 ) break;
}
cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR;
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&map_lock, flags);
+
+ shadow_map_idx[cpu] = idx;
va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
return (void *)va;
{
unsigned int idx;
idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
- mapcache[smp_processor_id()][idx] |= READY_FOR_TLB_FLUSH;
+ mapcache[idx] |= READY_FOR_TLB_FLUSH;
}
/* Now we simply slap in our high mapping. */
memcpy(p_l2_entry,
- idle_pg_table[smp_processor_id()] + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
p_l2_entry[(PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT) -
DOMAIN_ENTRIES_PER_L2_PAGETABLE] =
#include <xeno/config.h>
#include <xeno/sched.h>
-extern unsigned long *mapcache[NR_CPUS];
+extern unsigned long *mapcache;
#define MAPCACHE_ENTRIES 1024
/*
#define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
-extern l2_pgentry_t idle0_pg_table[ENTRIES_PER_L2_PAGETABLE];
-extern l2_pgentry_t *idle_pg_table[NR_CPUS];
+extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
extern void paging_init(void);
#define __flush_tlb() __flush_tlb_counted()
{ \
cpu_vm_mask: 0, \
perdomain_pt: 0, \
- pagetable: mk_pagetable(__pa(idle0_pg_table)) \
+ pagetable: mk_pagetable(__pa(idle_pg_table)) \
}
#define _HYP_EVENT_NEED_RESCHED 0