3ddb79c2jFkPAZTDmU35L6IUssYMgQ xen-2.4.16/include/asm-i386/debugreg.h
3ddb79c3r9-31dIsewPV3P3i8HALsQ xen-2.4.16/include/asm-i386/delay.h
3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen-2.4.16/include/asm-i386/desc.h
+3e20b82fl1jmQiKdLy7fxMcutfpjWA xen-2.4.16/include/asm-i386/domain_page.h
3ddb79c2O729EttZTYu1c8LcsUO_GQ xen-2.4.16/include/asm-i386/elf.h
3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen-2.4.16/include/asm-i386/fixmap.h
3ddb79c39o75zPP0T1aQQ4mNrCAN2w xen-2.4.16/include/asm-i386/hardirq.h
3ddb79c0dVhTHLsv6CPTf4baKix4mA xen-2.4.16/include/xeno/blkdev.h
3ddb79c18ePBgitnOs7GiOCFilODVw xen-2.4.16/include/xeno/blkpg.h
3ddb79c2SisDOHDyTeK5-MV3m7pNbA xen-2.4.16/include/xeno/block.h
-3ddb79c2JOriBs0mWh-Tlolq78tg3w xen-2.4.16/include/xeno/bootmem.h
3ddb79c1oOjpQbp68MW7yiUpoi-S-w xen-2.4.16/include/xeno/brlock.h
3ddb79c1x7Ie3kifu7dQRx8y7HVyvA xen-2.4.16/include/xeno/byteorder/big_endian.h
3ddb79c1qFXOEX1eD0yXJ_gsGkUt8w xen-2.4.16/include/xeno/byteorder/generic.h
3ddb79bac26NkKcPIEsfxETc5Snyag xenolinux-2.4.16-sparse/include/asm-xeno/elf.h
3ddb79ba722pCJ_g_xI8ebsE31IK-Q xenolinux-2.4.16-sparse/include/asm-xeno/errno.h
3ddb79b8vIpUpgaSNEneFkg5hYSvNg xenolinux-2.4.16-sparse/include/asm-xeno/fcntl.h
-3ddb79b8c_oKu2_BGNJctM4DBET31Q xenolinux-2.4.16-sparse/include/asm-xeno/fixmap.h
3ddb79b8780YvqvK1g5KPIWzQ6P15w xenolinux-2.4.16-sparse/include/asm-xeno/floppy.h
3ddb79bas-nFywnmilbUeT34PEAA0g xenolinux-2.4.16-sparse/include/asm-xeno/hardirq.h
3ddb79batzR40ZFY9dvgs5f1aM9I6g xenolinux-2.4.16-sparse/include/asm-xeno/hdreg.h
CC := gcc
LD := ld
# Linker should relocate monitor to this address
-MONITOR_BASE := 0xE0100000
+MONITOR_BASE := 0xFC100000
# Bootloader should load monitor to this real address
LOAD_BASE := 0x00100000
CFLAGS := -fno-builtin -O3 -Wall -DMONITOR_BASE=$(MONITOR_BASE)
1: mov %eax,__PAGE_OFFSET>>20(%edi) /* high mapping */
stosl /* low mapping */
add $(1<<L2_PAGETABLE_SHIFT),%eax
- cmp $MAX_USABLE_ADDRESS+0x1e3,%eax
+ cmp $MAX_DIRECTMAP_ADDRESS+0x1e3,%eax
jne 1b
call start_paging
ENTRY(gdt_table)
.quad 0x0000000000000000 /* NULL descriptor */
.quad 0x0000000000000000 /* not used */
- .quad 0x00cdba000000ffff /* 0x11 ring 1 3.5GB code at 0x00000000 */
- .quad 0x00cdb2000000ffff /* 0x19 ring 1 3.5GB data at 0x00000000 */
- .quad 0x00cdfa000000ffff /* 0x23 ring 3 3.5GB code at 0x00000000 */
- .quad 0x00cdf2000000ffff /* 0x2b ring 3 3.5GB data at 0x00000000 */
- .quad 0x00cf9a000000ffff /* 0x30 ring 0 4.0GB code at 0x00000000 */
- .quad 0x00cf92000000ffff /* 0x38 ring 0 4.0GB data at 0x00000000 */
+ .quad 0x00cfba000000bfff /* 0x11 ring 1 3.95GB code at 0x0 */
+ .quad 0x00cfb2000000bfff /* 0x19 ring 1 3.95GB data at 0x0 */
+ .quad 0x00cffa000000bfff /* 0x23 ring 3 3.95GB code at 0x0 */
+ .quad 0x00cff2000000bfff /* 0x2b ring 3 3.95GB data at 0x0 */
+ .quad 0x00cf9a000000ffff /* 0x30 ring 0 4.00GB code at 0x0 */
+ .quad 0x00cf92000000ffff /* 0x38 ring 0 4.00GB data at 0x0 */
.quad 0x0000000000000000
.quad 0x0000000000000000
.quad 0x0000000000000000
call create_bounce_frame
subl $8,%esi # add DS/ES to failsafe stack frame
movl DS(%esp),%eax
-FAULT3: movl %eax,(%esi)
+FAULT1: movl %eax,(%esi)
movl ES(%esp),%eax
-FAULT4: movl %eax,4(%esi)
+FAULT2: movl %eax,4(%esi)
movl %esi,OLDESP(%esp)
popl %ebx
popl %ecx
popl %ebp
popl %eax
addl $12,%esp
-FAULT5: iret
+FAULT3: iret
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
shll $8,%eax /* multiply by 256 */
addl $init_tss + 12,%eax
movl (%eax),%esi /* tss->esp1 */
-FAULT6: movl 4(%eax),%ds /* tss->ss1 */
+FAULT4: movl 4(%eax),%ds /* tss->ss1 */
/* base of stack frame must contain ss/esp (inter-priv iret) */
subl $8,%esi
movl OLDESP+4(%esp),%eax
-FAULT7: movl %eax,(%esi)
+FAULT5: movl %eax,(%esi)
movl OLDSS+4(%esp),%eax
-FAULT8: movl %eax,4(%esi)
+FAULT6: movl %eax,4(%esi)
jmp 2f
1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
movl OLDESP+4(%esp),%esi
-FAULT9: movl OLDSS+4(%esp),%ds
+FAULT7: movl OLDSS+4(%esp),%ds
2: /* Construct a stack frame: EFLAGS, CS/EIP */
subl $12,%esi
movl EIP+4(%esp),%eax
-FAULT10:movl %eax,(%esi)
+FAULT8: movl %eax,(%esi)
movl CS+4(%esp),%eax
-FAULT11:movl %eax,4(%esi)
+FAULT9: movl %eax,4(%esi)
movl EFLAGS+4(%esp),%eax
-FAULT12:movl %eax,8(%esi)
+FAULT10:movl %eax,8(%esi)
/* Rewrite our stack frame and return to ring 1. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
andl $0xfffcbeff,%eax
.align 4
.long FAULT1, kill_domain_fixup3 # Fault writing to ring-1 stack
.long FAULT2, kill_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT3, kill_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT4, kill_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT5, kill_domain_fixup1 # Fault executing failsafe iret
- .long FAULT6, kill_domain_fixup2 # Fault loading ring-1 stack selector
- .long FAULT7, kill_domain_fixup2 # Fault writing to ring-1 stack
+ .long FAULT3, kill_domain_fixup1 # Fault executing failsafe iret
+ .long FAULT4, kill_domain_fixup2 # Fault loading ring-1 stack selector
+ .long FAULT5, kill_domain_fixup2 # Fault writing to ring-1 stack
+ .long FAULT6, kill_domain_fixup2 # Fault writing to ring-1 stack
+ .long FAULT7, kill_domain_fixup2 # Fault loading ring-1 stack selector
.long FAULT8, kill_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT9, kill_domain_fixup2 # Fault loading ring-1 stack selector
+ .long FAULT9, kill_domain_fixup2 # Fault writing to ring-1 stack
.long FAULT10,kill_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT11,kill_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT12,kill_domain_fixup2 # Fault writing to ring-1 stack
+ .long FAULT11,kill_domain_fixup3 # Fault writing to ring-1 stack
+ .long FAULT12,kill_domain_fixup3 # Fault writing to ring-1 stack
.previous
# This handler kills domains which experience unrecoverable faults.
jnz 2f
subl $4,%esi # push error_code onto guest frame
movl %es:GTB_ERROR_CODE(%edx),%eax
-FAULT1: movl %eax,(%esi)
+FAULT11:movl %eax,(%esi)
test $GTBF_TRAP_CR2,%cl
jz 1f
subl $4,%esi # push %cr2 onto guest frame
movl %es:GTB_CR2(%edx),%eax
-FAULT2: movl %eax,(%esi)
+FAULT12:movl %eax,(%esi)
1: movl %esi,OLDESP(%esp)
2: push %es # unclobber %ds
pop %ds
*pl2e = mk_l2_pgentry(__pa(pl1e)|L2_PROT);
}
+
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
unsigned long vaddr;
l2_pgentry_t *pl2e;
l1_pgentry_t *pl1e;
- /* First time through, start allocating from end of real memory. */
- if ( !remap_base )
- remap_base = (unsigned long)phys_to_virt(MAX_USABLE_ADDRESS);
+ /* First time through, start allocating from far end of virtual memory. */
+ if ( !remap_base ) remap_base = IOREMAP_VIRT_START;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
void __init paging_init(void)
{
unsigned long addr;
+ void *ioremap_pt;
/* XXX initialised in boot.S */
/*if ( cpu_has_pge ) set_in_cr4(X86_CR4_PGE);*/
*/
addr = FIXADDR_START & ~((1<<L2_PAGETABLE_SHIFT)-1);
fixrange_init(addr, 0, idle0_pg_table);
+
+ /* Create page table for ioremap(). */
+ ioremap_pt = (void *)get_free_page(GFP_KERNEL);
+ clear_page(ioremap_pt);
+ idle0_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(__pa(ioremap_pt) | PAGE_HYPERVISOR);
}
void __init zap_low_mappings (void)
{
- int i;
- for (i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
- idle0_pg_table[i] = mk_l2_pgentry(0);
+ int i, j;
+ for ( i = 0; i < smp_num_cpus; i++ )
+ {
+ for ( j = 0; j < DOMAIN_ENTRIES_PER_L2_PAGETABLE; j++ )
+ {
+ idle_pg_table[i][j] = mk_l2_pgentry(0);
+ }
+ }
flush_tlb_all();
}
#include <xeno/interrupt.h>
#include <xeno/lib.h>
#include <xeno/sched.h>
-#include <xeno/bootmem.h>
#include <xeno/pci.h>
#include <asm/bitops.h>
#include <asm/smp.h>
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <asm/desc.h>
+#include <asm/domain_page.h>
struct cpuinfo_x86 boot_cpu_data = { 0 };
/* Lots of nice things, since we only target PPro+. */
unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
unsigned long wait_init_idle;
+/* Basic page table for each CPU in the system. */
+l2_pgentry_t *idle_pg_table[NR_CPUS] = { idle0_pg_table };
+
+/* for asm/domain_page.h, map_domain_page() */
+unsigned long *mapcache[NR_CPUS];
+
/* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag)
{
{
int nr = smp_processor_id();
struct tss_struct * t = &init_tss[nr];
-
+ l2_pgentry_t *pl2e;
+
if ( test_and_set_bit(nr, &cpu_initialized) )
panic("CPU#%d already initialized!!!\n", nr);
printk("Initializing CPU#%d\n", nr);
CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
#undef CD
+ /* Install correct page table. */
+ __asm__ __volatile__ ("movl %%eax,%%cr3"
+ : : "a" (pagetable_val(current->mm.pagetable)));
+
+ /* Set up mapping cache for domain pages. */
+ pl2e = idle_pg_table[nr] + (MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT);
+ mapcache[nr] = (unsigned long *)get_free_page(GFP_KERNEL);
+ clear_page(mapcache[nr]);
+ *pl2e = mk_l2_pgentry(__pa(mapcache[nr]) | PAGE_HYPERVISOR);
+
/* Stick the idle task on the run queue. */
(void)wake_up(current);
}
paging_init(); /* not much here now, but sets up fixmap */
if ( smp_found_config ) get_smp_config();
domain_init();
- trap_init(); /*
- * installs trap (s/w exception) wrappers.
- * Most route via entry.S and thence back into traps.c
- * where a really simple handler does a panic.
- * Instead, we'll want to pass most back to a domain.
- */
+ trap_init();
init_IRQ(); /* installs simple interrupt wrappers. Starts HZ clock. */
time_init(); /* installs software handler for HZ clock. */
softirq_init();
unsigned long boot_error = 0;
int timeout, cpu;
unsigned long start_eip;
+ l2_pgentry_t *pagetable;
cpu = ++cpucount;
/*
idle->processor = cpu;
idle->domain = IDLE_DOMAIN_ID;
- idle->mm.pagetable = mk_pagetable((unsigned long)idle0_pg_table);
+ pagetable = (void *)get_free_page(GFP_KERNEL);
+ memcpy(pagetable, idle0_pg_table, PAGE_SIZE);
+ idle_pg_table[cpu] = pagetable;
+ idle->mm.pagetable = mk_pagetable(__pa(pagetable));
map_cpu_to_boot_apicid(cpu, apicid);
__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
- if ( trapnr == 14 )
+ if ( (trapnr == 14) && (addr >= PAGE_OFFSET) )
{
unsigned long page;
- __asm__ __volatile__ ("movl %%cr3,%0" : "=r" (page) : );
- printk(" pde = %08lx\n", page);
- page = ((unsigned long *) __va(page))[addr >> 22];
+ unsigned long *pde;
+ pde = (unsigned long *)idle_pg_table[smp_processor_id()];
+ page = pde[addr >> L2_PAGETABLE_SHIFT];
printk("*pde = %08lx\n", page);
if ( page & _PAGE_PRESENT )
{
ENTRY(start)
SECTIONS
{
- . = 0xE0000000 + 0x100000;
+ . = 0xFC000000 + 0x100000;
_text = .; /* Text and read-only data */
.text : {
*(.text)
#include <xeno/event.h>
#include <xeno/dom0_ops.h>
#include <asm/io.h>
+#include <asm/domain_page.h>
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
static unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
{
-
struct list_head *temp;
struct pfn_info *pf, *pf_head;
unsigned int alloc_pfns;
{
#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
-#define ALLOC_PAGE_FROM_DOMAIN() \
- ({ alloc_address -= PAGE_SIZE; __va(alloc_address); })
+#define ALLOC_FRAME_FROM_DOMAIN() (alloc_address -= PAGE_SIZE)
char *src, *dst;
int i, dom = p->domain;
- unsigned long start_address = MAX_MONITOR_ADDRESS;
+ unsigned long start_address, phys_l1tab, phys_l2tab;
unsigned long cur_address, end_address, alloc_address, vaddr;
unsigned long virt_load_address, virt_stack_address, virt_shinfo_address;
unsigned long virt_ftable_start_addr = 0, virt_ftable_end_addr;
l1_pgentry_t *l1tab = NULL;
struct pfn_info *page = NULL;
net_ring_t *net_ring;
- blk_ring_t *blk_ring;
net_vif_t *net_vif;
if ( strncmp(__va(mod[0].mod_start), "XenoGues", 8) )
dom, (mod[nr_mods-1].mod_end-mod[0].mod_start)>>20,
(end_address-start_address)>>21,
(end_address-start_address)>>20);
- /* XXX Should release memory from alloc_new_dom_mem here XXX */
+ /* XXX should free domain memory here XXX */
return -1;
}
printk("DOM%d: Guest OS virtual load address is %08lx\n", dom,
virt_load_address);
- l2tab = (l2_pgentry_t *)ALLOC_PAGE_FROM_DOMAIN();
- memcpy(l2tab, idle0_pg_table, sizeof(idle0_pg_table));
+ /*
+ * WARNING: The new domain must have its 'processor' field
+ * filled in by now !!
+ */
+ phys_l2tab = ALLOC_FRAME_FROM_DOMAIN();
+ l2tab = map_domain_mem(phys_l2tab);
+ memcpy(l2tab, idle_pg_table[p->processor], PAGE_SIZE);
memset(l2tab, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
- p->mm.pagetable = mk_pagetable((unsigned long)l2tab);
+ p->mm.pagetable = mk_pagetable(phys_l2tab);
/*
* NB. The upper limit on this loop does one extra page. This is to
if(dom == 0)
ft_size = frame_table_size;
- l2tab = pagetable_ptr(p->mm.pagetable) +
- l2_table_offset(virt_load_address);
+ phys_l2tab += l2_table_offset(virt_load_address)*sizeof(l2_pgentry_t);
for ( cur_address = start_address;
cur_address != (end_address + PAGE_SIZE + ft_size);
cur_address += PAGE_SIZE )
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
- l1tab = (l1_pgentry_t *)ALLOC_PAGE_FROM_DOMAIN();
+ phys_l1tab = ALLOC_FRAME_FROM_DOMAIN();
+ l2tab = map_domain_mem(phys_l2tab);
+ *l2tab = mk_l2_pgentry(phys_l1tab|L2_PROT);
+ phys_l2tab += sizeof(l2_pgentry_t);
+ l1tab = map_domain_mem(phys_l1tab);
clear_page(l1tab);
l1tab += l1_table_offset(
virt_load_address + cur_address - start_address);
- *l2tab++ = mk_l2_pgentry(__pa(l1tab)|L2_PROT);
}
*l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
/* Pages that are part of page tables must be read-only. */
vaddr = virt_load_address + alloc_address - start_address;
- l2tab = pagetable_ptr(p->mm.pagetable) + l2_table_offset(vaddr);
- l1tab = l2_pgentry_to_l1(*l2tab++) + l1_table_offset(vaddr);
+ phys_l2tab = pagetable_val(p->mm.pagetable) +
+ (l2_table_offset(vaddr) * sizeof(l2_pgentry_t));
+ l2tab = map_domain_mem(phys_l2tab);
+ phys_l1tab = l2_pgentry_to_phys(*l2tab) +
+ (l1_table_offset(vaddr) * sizeof(l1_pgentry_t));
+ phys_l2tab += sizeof(l2_pgentry_t);
+ l1tab = map_domain_mem(phys_l1tab);
for ( cur_address = alloc_address;
cur_address != end_address;
cur_address += PAGE_SIZE )
{
*l1tab++ = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
- l1tab = l2_pgentry_to_l1(*l2tab++);
+ {
+ l2tab = map_domain_mem(phys_l2tab);
+ phys_l1tab = l2_pgentry_to_phys(*l2tab);
+ phys_l2tab += sizeof(l2_pgentry_t);
+ l1tab = map_domain_mem(phys_l1tab);
+ }
page = frame_table + (cur_address >> PAGE_SHIFT);
page->flags = dom | PGT_l1_page_table;
page->tot_count++;
/* Map in the the shared info structure. */
virt_shinfo_address = end_address - start_address + virt_load_address;
- l2tab = pagetable_ptr(p->mm.pagetable) +
- l2_table_offset(virt_shinfo_address);
- l1tab = l2_pgentry_to_l1(*l2tab) +
- l1_table_offset(virt_shinfo_address);
+ phys_l2tab = pagetable_val(p->mm.pagetable) +
+ (l2_table_offset(virt_shinfo_address) * sizeof(l2_pgentry_t));
+ l2tab = map_domain_mem(phys_l2tab);
+ phys_l1tab = l2_pgentry_to_phys(*l2tab) +
+ (l1_table_offset(virt_shinfo_address) * sizeof(l1_pgentry_t));
+ l1tab = map_domain_mem(phys_l1tab);
*l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
/* Set up shared info area. */
cur_address < virt_ftable_end_addr;
cur_address += PAGE_SIZE)
{
- l2tab = pagetable_ptr(p->mm.pagetable) + l2_table_offset(cur_address);
- l1tab = l2_pgentry_to_l1(*l2tab) + l1_table_offset(cur_address);
+ phys_l2tab = pagetable_val(p->mm.pagetable) +
+ (l2_table_offset(cur_address) * sizeof(l2_pgentry_t));
+ l2tab = map_domain_mem(phys_l2tab);
+ phys_l1tab = l2_pgentry_to_phys(*l2tab) +
+ (l1_table_offset(cur_address) * sizeof(l1_pgentry_t));
+ l1tab = map_domain_mem(phys_l1tab);
*l1tab = mk_l1_pgentry(__pa(ft_mapping)|L1_PROT);
ft_mapping += PAGE_SIZE;
}
/* Install the new page tables. */
__cli();
__asm__ __volatile__ (
- "mov %%eax,%%cr3"
- : : "a" (__pa(pagetable_ptr(p->mm.pagetable))));
+ "mov %%eax,%%cr3" : : "a" (pagetable_val(p->mm.pagetable)));
/* Copy the guest OS image. */
src = (char *)__va(mod[0].mod_start + 12);
/* Reinstate the caller's page tables. */
__asm__ __volatile__ (
- "mov %%eax,%%cr3"
- : : "a" (__pa(pagetable_ptr(current->mm.pagetable))));
+ "mov %%eax,%%cr3" : : "a" (pagetable_val(current->mm.pagetable)));
__sti();
new_thread(p,
{
struct task_struct *new_dom;
dom0_newdomain_t dom0_params;
- unsigned long max_page, remaining_hypervisor_memory;
+ unsigned long max_page;
unsigned char *cmdline;
int i;
memcpy(&idle0_task_union, &first_task_struct, sizeof(first_task_struct));
max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
- if ( max_page > (MAX_USABLE_ADDRESS >> PAGE_SHIFT) )
- max_page = MAX_USABLE_ADDRESS >> PAGE_SHIFT;
- /* mem_upper is address of first memory hole in high memory, minus 1MB. */
- /* PS. mem_upper is in kB. */
- remaining_hypervisor_memory = init_frametable(max_page);
- printk("Initialised %luMB of memory on a %luMB machine\n",
- max_page >> (20-PAGE_SHIFT), (mbi->mem_upper>>10)+1);
-
- init_page_allocator(mod[nr_mods-1].mod_end, remaining_hypervisor_memory);
+ init_frametable(max_page);
+ printk("Initialised all memory on a %luMB machine\n",
+ max_page >> (20-PAGE_SHIFT));
+
+ init_page_allocator(mod[nr_mods-1].mod_end, MAX_MONITOR_ADDRESS);
/* These things will get done by do_newdomain() for all other tasks. */
current->shared_info = (void *)get_free_page(GFP_KERNEL);
#include <asm/page.h>
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/domain_page.h>
#if 0
#define MEM_LOG(_f, _a...) printk("DOM%d: (file=memory.c, line=%d) " _f "\n", current->domain, __LINE__, ## _a )
static void put_page(unsigned long page_nr, int writeable);
static int dec_page_refcnt(unsigned long page_nr, unsigned int type);
-static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t);
-static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
+static int mod_l2_entry(unsigned long, l2_pgentry_t);
+static int mod_l1_entry(unsigned long, l1_pgentry_t);
/* frame table size and its size in pages */
frame_table_t * frame_table;
/*
* init_frametable:
- * Initialise per-frame memory information. The return value
- * is the amount of memory available for use by the rest of Xen.
- * The very highest frames are reserved for the per-frame info.
- * This function should be called before initialising the
- * page allocator!
+ * Initialise per-frame memory information. This goes directly after
+ * MAX_MONITOR_ADDRESS in physical memory.
*/
-unsigned long __init init_frametable(unsigned long nr_pages)
+void __init init_frametable(unsigned long nr_pages)
{
struct pfn_info *pf;
unsigned long page_index;
max_page = nr_pages;
frame_table_size = nr_pages * sizeof(struct pfn_info);
frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
- free_pfns = nr_pages - (MAX_MONITOR_ADDRESS >> PAGE_SHIFT);
+ free_pfns = nr_pages -
+ ((MAX_MONITOR_ADDRESS + frame_table_size) >> PAGE_SHIFT);
- frame_table = phys_to_virt(MAX_MONITOR_ADDRESS - frame_table_size);
+ frame_table = phys_to_virt(MAX_MONITOR_ADDRESS);
memset(frame_table, 0, frame_table_size);
/* Put all domain-allocatable memory on a free list. */
INIT_LIST_HEAD(&free_list);
- for( page_index = MAX_MONITOR_ADDRESS >> PAGE_SHIFT;
+ for( page_index = (MAX_MONITOR_ADDRESS + frame_table_size) >> PAGE_SHIFT;
page_index < nr_pages;
page_index++ )
{
pf = list_entry(&frame_table[page_index].list, struct pfn_info, list);
list_add_tail(&pf->list, &free_list);
}
-
- /* Return the remaing Xen-allocatable memory. */
- return(MAX_MONITOR_ADDRESS - frame_table_size);
}
if ( ret != 0 ) return((ret < 0) ? ret : 0);
/* NEW level-2 page table! Deal with every PDE in the table. */
- p_l2_entry = (l2_pgentry_t *)__va(page_nr << PAGE_SHIFT);
+ p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
{
l2_entry = *p_l2_entry++;
}
ret = get_l1_table(l2_pgentry_to_pagenr(l2_entry));
if ( ret ) return(ret);
+ p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) +
+ ((i+1) * sizeof(l2_pgentry_t)));
}
/* Now we simply slap in our high mapping. */
- memcpy(p_l2_entry, idle0_pg_table + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ memcpy(p_l2_entry,
+ idle_pg_table[smp_processor_id()] + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
return(ret);
if ( ret != 0 ) return((ret < 0) ? ret : 0);
/* NEW level-1 page table! Deal with every PTE in the table. */
- p_l1_entry = (l1_pgentry_t *)__va(page_nr << PAGE_SHIFT);
+ p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
{
l1_entry = *p_l1_entry++;
if ( ret != 0 ) return((ret < 0) ? ret : 0);
/* We had last reference to level-2 page table. Free the PDEs. */
- p_l2_entry = (l2_pgentry_t *)__va(page_nr << PAGE_SHIFT);
+ p_l2_entry = map_domain_mem(page_nr << PAGE_SHIFT);
for ( i = 0; i < HYPERVISOR_ENTRIES_PER_L2_PAGETABLE; i++ )
{
l2_entry = *p_l2_entry++;
- if ( (l2_pgentry_val(l2_entry) & _PAGE_PRESENT) )
+ if ( (l2_pgentry_val(l2_entry) & _PAGE_PRESENT) )
+ {
put_l1_table(l2_pgentry_to_pagenr(l2_entry));
+ p_l2_entry = map_domain_mem((page_nr << PAGE_SHIFT) +
+ ((i+1) * sizeof(l2_pgentry_t)));
+ }
}
return(0);
if ( dec_page_refcnt(page_nr, PGT_l1_page_table) != 0 ) return;
/* We had last reference to level-1 page table. Free the PTEs. */
- p_l1_entry = (l1_pgentry_t *)__va(page_nr << PAGE_SHIFT);
+ p_l1_entry = map_domain_mem(page_nr << PAGE_SHIFT);
for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
{
l1_entry = *p_l1_entry++;
}
-static int mod_l2_entry(l2_pgentry_t *p_l2_entry, l2_pgentry_t new_l2_entry)
+static int mod_l2_entry(unsigned long pa, l2_pgentry_t new_l2_entry)
{
- l2_pgentry_t old_l2_entry = *p_l2_entry;
+ l2_pgentry_t *p_l2_entry, old_l2_entry;
+
+ p_l2_entry = map_domain_mem(pa);
+ old_l2_entry = *p_l2_entry;
if ( (((unsigned long)p_l2_entry & (PAGE_SIZE-1)) >> 2) >=
DOMAIN_ENTRIES_PER_L2_PAGETABLE )
goto fail;
}
+ /*
+ * Write the new value while pointer is still valid. The mapping cache
+ * entry for p_l2_entry may get clobbered by {put,get}_l1_table.
+ */
+ *p_l2_entry = new_l2_entry;
+
if ( (l2_pgentry_val(new_l2_entry) & _PAGE_PRESENT) )
{
if ( (l2_pgentry_val(new_l2_entry) & (_PAGE_GLOBAL|_PAGE_PSE)) )
put_l1_table(l2_pgentry_to_pagenr(old_l2_entry));
}
- *p_l2_entry++ = new_l2_entry;
-
return(0);
+
fail:
+ /*
+ * On failure we put the old value back. We need to regrab the
+ * mapping of the physical page frame.
+ */
+ p_l2_entry = map_domain_mem(pa);
+ *p_l2_entry = old_l2_entry;
return(-1);
}
-static int mod_l1_entry(l1_pgentry_t *p_l1_entry, l1_pgentry_t new_l1_entry)
+static int mod_l1_entry(unsigned long pa, l1_pgentry_t new_l1_entry)
{
- l1_pgentry_t old_l1_entry = *p_l1_entry;
+ l1_pgentry_t *p_l1_entry, old_l1_entry;
+
+ p_l1_entry = map_domain_mem(pa);
+ old_l1_entry = *p_l1_entry;
if ( (l1_pgentry_val(new_l1_entry) & _PAGE_PRESENT) )
{
l1_pgentry_val(old_l1_entry) & _PAGE_RW);
}
- *p_l1_entry++ = new_l1_entry;
+ /* p_l1_entry is still valid here */
+ *p_l1_entry = new_l1_entry;
return(0);
fail:
err = get_l2_table(pfn);
if ( !err )
{
- put_l2_table(__pa(pagetable_ptr(current->mm.pagetable))
- >> PAGE_SHIFT);
- current->mm.pagetable =
- mk_pagetable((unsigned long)__va(pfn<<PAGE_SHIFT));
+ put_l2_table(pagetable_val(current->mm.pagetable) >> PAGE_SHIFT);
+ current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
}
else
{
break;
case PGEXT_INVLPG:
- __asm__ __volatile__ ("invlpg %0" : :
- "m" (*(char*)(val & ~PGEXT_CMD_MASK)));
+ __flush_tlb_one(val & ~PGEXT_CMD_MASK);
break;
default:
switch ( (flags & PG_type_mask) )
{
case PGT_l1_page_table:
- err = mod_l1_entry((l1_pgentry_t *)__va(cur.ptr),
- mk_l1_pgentry(cur.val));
+ err = mod_l1_entry(cur.ptr, mk_l1_pgentry(cur.val));
break;
case PGT_l2_page_table:
- err = mod_l2_entry((l2_pgentry_t *)__va(cur.ptr),
- mk_l2_pgentry(cur.val));
+ err = mod_l2_entry(cur.ptr, mk_l2_pgentry(cur.val));
break;
default:
MEM_LOG("Update to non-pt page %08lx", cur.ptr);
flags = page->flags;
if ( (flags | current->domain) == PGT_l1_page_table )
{
- *(unsigned long *)__va(cur.ptr) = cur.val;
+
+ *(unsigned long *)map_domain_mem(cur.ptr) = cur.val;
err = 0;
}
else
tlb_flush[smp_processor_id()] = 0;
__asm__ __volatile__ (
"movl %%eax,%%cr3" : :
- "a" (__pa(pagetable_ptr(current->mm.pagetable))));
+ "a" (pagetable_val(current->mm.pagetable)));
}
return(0);
--- /dev/null
+/******************************************************************************
+ * domain_page.h
+ *
+ * Allow temporary mapping of domain page frames into Xen space.
+ */
+
+#include <xeno/config.h>
+#include <xeno/sched.h>
+
+extern unsigned long *mapcache[NR_CPUS];
+#define MAPCACHE_ENTRIES 1024
+#define MAPCACHE_HASH(_pfn) ((_pfn) & (MAPCACHE_ENTRIES-1))
+static inline void *map_domain_mem(unsigned long pa)
+{
+ unsigned long pfn = pa >> PAGE_SHIFT;
+ unsigned long hash = MAPCACHE_HASH(pfn);
+ unsigned long *pent = mapcache[smp_processor_id()] + hash;
+ void *va = (void *)(MAPCACHE_VIRT_START +
+ (hash << PAGE_SHIFT) +
+ (pa & ~PAGE_MASK));
+ if ( (*pent & PAGE_MASK) != (pfn << PAGE_SHIFT) )
+ {
+ *pent = (pfn << PAGE_SHIFT) | PAGE_HYPERVISOR;
+ __flush_tlb_one(va);
+ }
+ return va;
+}
/* Strip type from a table entry. */
#define l1_pgentry_val(_x) ((_x).l1_lo)
#define l2_pgentry_val(_x) ((_x).l2_lo)
+#define pagetable_val(_x) ((_x).pt_lo)
#define alloc_l1_pagetable() ((l1_pgentry_t *)get_free_page(GFP_KERNEL))
#define alloc_l2_pagetable() ((l2_pgentry_t *)get_free_page(GFP_KERNEL))
-#define pagetable_ptr(_x) ((l2_pagetable_t)((_x).pt_lo))
-#define pagetable_type(_x) (((_x).pt_lo) & ~PAGE_MASK)
-#define mk_pagetable(_x) ( (pagetable_t) { (_x) } )
-#define pagetable_none(_x) ((_x).pt_lo == 0)
-
/* Add type to a table entry. */
#define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } )
#define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } )
+#define mk_pagetable(_x) ( (pagetable_t) { (_x) } )
/* Turn a typed table entry into a page index. */
#define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT)
#define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT)
+/* Turn a typed table entry into a physical address. */
+#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK)
+#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK)
+
/* Dereference a typed level-2 entry to yield a typed level-1 table. */
#define l2_pgentry_to_l1(_x) \
((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK))
#define l1_pgentry_empty(_x) (!l1_pgentry_val(_x))
#define l2_pgentry_empty(_x) (!l2_pgentry_val(_x))
-#define __PAGE_OFFSET (0xE0000000)
+#define __PAGE_OFFSET (0xFC000000)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#include <asm/bitops.h>
extern l2_pgentry_t idle0_pg_table[ENTRIES_PER_L2_PAGETABLE];
+extern l2_pgentry_t *idle_pg_table[NR_CPUS];
extern void paging_init(void);
#define __flush_tlb() \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next), \
- "c" (__pa(pagetable_ptr(next->mm.pagetable))) \
+ "c" (pagetable_val(next->mm.pagetable)) \
:"memory"); \
} while (0)
#ifndef __HYPERVISOR_IF_H__
#define __HYPERVISOR_IF_H__
+/* Virtual addresses beyond this are inaccessible by guest OSes. */
+#define HYPERVISOR_VIRT_START (0xFC000000UL)
+
typedef struct trap_info_st
{
unsigned char vector; /* exception/interrupt vector */
+++ /dev/null
-/*
- * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
- */
-#ifndef _LINUX_BOOTMEM_H
-#define _LINUX_BOOTMEM_H
-
-//#include <asm/pgtable.h>
-
-/*
- * simple boot-time physical memory area allocator.
- */
-
-extern unsigned long max_low_pfn, max_page;
-extern unsigned long min_low_pfn;
-
-#if 0
-
-/*
- * node_bootmem_map is a map pointer - the bits represent all physical
- * memory pages (including holes) on the node.
- */
-typedef struct bootmem_data {
- unsigned long node_boot_start;
- unsigned long node_low_pfn;
- void *node_bootmem_map;
- unsigned long last_offset;
- unsigned long last_pos;
-} bootmem_data_t;
-
-extern unsigned long __init bootmem_bootmap_pages (unsigned long);
-extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend);
-extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
-extern void __init free_bootmem (unsigned long addr, unsigned long size);
-extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
-#define alloc_bootmem(x) \
- __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low(x) \
- __alloc_bootmem((x), SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_pages(x) \
- __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem((x), PAGE_SIZE, 0)
-extern unsigned long __init free_all_bootmem (void);
-
-extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn);
-extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size);
-extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size);
-extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat);
-extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal);
-#define alloc_bootmem_node(pgdat, x) \
- __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_pages_node(pgdat, x) \
- __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0)
-
-#else
-
-extern void __init init_bootmem (unsigned long addr, unsigned long memend);
-extern void * __init alloc_bootmem_low_pages(unsigned long size);
-
-#endif
-
-#endif /* _LINUX_BOOTMEM_H */
#define __cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#define ____cacheline_aligned __cacheline_aligned
-/* 0-8MB is fixed monitor space for now. */
-#define MAX_MONITOR_ADDRESS ( 16*1024*1024)
-#define MAX_DMA_ADDRESS ( 16*1024*1024)
-#define MAX_USABLE_ADDRESS ((0xfc000000-__PAGE_OFFSET) & ~((1<<22)-1))
- /*^^^^^^^^^*/
- /*arbitrary*/
+/* 0-16MB is fixed monitor space. 0-56MB is direct-mapped at top of memory.*/
+#define MAX_MONITOR_ADDRESS (16*1024*1024)
+#define MAX_DMA_ADDRESS (16*1024*1024)
+#define MAX_DIRECTMAP_ADDRESS (56*1024*1024)
+/* Penultimate 4MB of virtual address space used for domain page mappings. */
+#define MAPCACHE_VIRT_START (PAGE_OFFSET + MAX_DIRECTMAP_ADDRESS)
+#define MAPCACHE_VIRT_END (MAPCACHE_VIRT_START + (4*1024*1024))
+/* Final 4MB of virtual address space used for ioremap(). */
+#define IOREMAP_VIRT_START (MAPCACHE_VIRT_END)
+#define IOREMAP_VIRT_END (IOREMAP_VIRT_START + (4*1024*1024))
/* Linkage for x86 */
#define FASTCALL(x) x __attribute__((regparm(3)))
extern unsigned long frame_table_size;
extern struct list_head free_list;
extern unsigned int free_pfns;
-unsigned long init_frametable(unsigned long nr_pages);
+extern unsigned long max_page;
+void init_frametable(unsigned long nr_pages);
/* Part of the domain API. */
int do_process_page_updates(page_update_request_t *updates, int count);
#define IDLE0_MM \
{ \
cpu_vm_mask: 0, \
- pagetable: mk_pagetable((unsigned long)idle0_pg_table) \
+ pagetable: mk_pagetable(__pa(idle0_pg_table)) \
}
#define _HYP_EVENT_NEED_RESCHED 0
{
unsigned long start_pfn, max_pfn, max_low_pfn;
unsigned long bootmap_size;
- char str[256]; int strcnt;
extern void hypervisor_callback(void);
extern void failsafe_callback(void);
* 128MB for vmalloc and initrd
*/
#define VMALLOC_RESERVE (unsigned long)(128 << 20)
-#define MAXMEM (unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)
+#define MAXMEM (unsigned long)(HYPERVISOR_VIRT_START-PAGE_OFFSET-VMALLOC_RESERVE)
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
#define MAX_NONPAE_PFN (1 << 20)
* Then reserve space for OS image, and the bootmem bitmap.
*/
bootmap_size = init_bootmem(start_pfn, max_low_pfn);
- free_bootmem(0, PFN_PHYS(max_pfn));
+ free_bootmem(0, PFN_PHYS(max_low_pfn));
reserve_bootmem(0, PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1);
/* Now reserve space for the hypervisor-provided page tables. */
unsigned long pte;
int i;
reserve_bootmem(__pa(pgd), PAGE_SIZE);
- for ( i = 0; i < (0xE0000000UL>>22); i++ )
+ for ( i = 0; i < (HYPERVISOR_VIRT_START>>22); i++ )
{
unsigned long pgde = *pgd++;
if ( !(pgde & 1) ) continue;
}
}
cur_pgd = init_mm.pgd = (pgd_t *)start_info.pt_base;
+ queue_pgd_pin(__pa(init_mm.pgd));
#ifdef CONFIG_BLK_DEV_INITRD
if (start_info.mod_start) {
static int __init setup_death_event(void)
{
(void)request_irq(_EVENT_DIE, time_to_die, 0, "die", NULL);
+ return 0;
}
__initcall(setup_death_event);
#include <linux/timex.h>
#include <linux/config.h>
-#include <asm/fixmap.h>
#include <asm/hypervisor.h>
#include <linux/irq.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
-#include <asm/fixmap.h>
#include <asm/apic.h>
#include <asm/tlb.h>
return freed;
}
-/*
- * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
- * physical space so we can cache the place of the first one and move
- * around without checking the pgd every time.
- */
-
-#if CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
-
-void __init kmap_init(void)
-{
- unsigned long kmap_vstart;
-
- /* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
- kmap_prot = PAGE_KERNEL;
-}
-#endif /* CONFIG_HIGHMEM */
-
void show_mem(void)
{
int i, total = 0, reserved = 0;
__flush_tlb_one(vaddr);
}
-void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
-{
- unsigned long address = __fix_to_virt(idx);
-
- if (idx >= __end_of_fixed_addresses) {
- printk("Invalid __set_fixmap\n");
- return;
- }
- set_pte_phys(address, phys, flags);
-}
-
-#if 0
-static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- int i, j;
- unsigned long vaddr;
-
- vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pmd_offset(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
-#if CONFIG_X86_PAE
- if (pgd_none(*pgd)) {
- pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
- if (pmd != pmd_offset(pgd, 0))
- printk("PAE BUG #02!\n");
- }
- pmd = pmd_offset(pgd, vaddr);
-#else
- pmd = (pmd_t *)pgd;
-#endif
- for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
- if (pte != pte_offset(pmd, 0))
- BUG();
- }
- vaddr += PMD_SIZE;
- }
- j = 0;
- }
-}
-#endif
-
-static void __init pagetable_init (void)
-{
-#if 0
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, 0, pgd_base);
-#endif
-
-#if CONFIG_HIGHMEM
- /*
- * Permanent kmaps:
- */
- vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
-
- pgd = init_mm.pgd + __pgd_offset(vaddr);
- pmd = pmd_offset(pgd, vaddr);
- pte = pte_offset(pmd, vaddr);
- pkmap_page_table = pte;
-#endif
-}
-
-/*
- * paging_init() sets up the page tables - note that the first 8MB are
- * already mapped by head.S.
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
- */
void __init paging_init(void)
{
- pagetable_init();
-
-#ifdef CONFIG_HIGHMEM
- kmap_init();
-#endif
{
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
unsigned int max_dma, high, low;
else {
zones_size[ZONE_DMA] = max_dma;
zones_size[ZONE_NORMAL] = low - max_dma;
-#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = high - low;
-#endif
}
free_area_init(zones_size);
}
int codesize, reservedpages, datasize, initsize;
int tmp;
-#ifdef CONFIG_HIGHMEM
- highmem_start_page = mem_map + highstart_pfn;
- max_mapnr = num_physpages = highend_pfn;
-#else
max_mapnr = num_physpages = max_low_pfn;
-#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
/* clear the zero-page */
*/
if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
reservedpages++;
-#ifdef CONFIG_HIGHMEM
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = mem_map + tmp;
-
- if (!page_is_ram(tmp)) {
- SetPageReserved(page);
- continue;
- }
- ClearPageReserved(page);
- set_bit(PG_highmem, &page->flags);
- atomic_set(&page->count, 1);
- __free_page(page);
- totalhigh_pages++;
- }
- totalram_pages += totalhigh_pages;
-#endif
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+++ /dev/null
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-/*
- * on UP currently we will have no trace of the fixmap mechanizm,
- * no page table allocations, etc. This might change in the
- * future, say framebuffers for the console driver(s) could be
- * fix-mapped?
- */
-enum fixed_addresses {
-#ifdef CONFIG_X86_LOCAL_APIC
- FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
-#endif
-#ifdef CONFIG_X86_IO_APIC
- FIX_IO_APIC_BASE_0,
- FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-#endif
-#ifdef CONFIG_X86_VISWS_APIC
- FIX_CO_CPU, /* Cobalt timer */
- FIX_CO_APIC, /* Cobalt APIC Redirection Table */
- FIX_LI_PCIA, /* Lithium PCI Bridge A */
- FIX_LI_PCIB, /* Lithium PCI Bridge B */
-#endif
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
- __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap, and leave one page empty
- * at the top of mem..
- */
-#define FIXADDR_TOP (0xffffe000UL)
-#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-#endif
-/*
- * highmem.h: virtual kernel memory mappings for high memory
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- */
-#ifndef _ASM_HIGHMEM_H
-#define _ASM_HIGHMEM_H
-
-#ifdef __KERNEL__
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define HIGHMEM_DEBUG 1
-#else
-#define HIGHMEM_DEBUG 0
-#endif
-
-/* declarations for highmem.c */
-extern unsigned long highstart_pfn, highend_pfn;
-
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
-extern pte_t *pkmap_page_table;
-
-extern void kmap_init(void) __init;
-
-/*
- * Right now we initialize only a single pte table. It can be extended
- * easily, subsequent pte tables have to be allocated in one physical
- * chunk of RAM.
- */
-#define PKMAP_BASE (0xfe000000UL)
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-
-extern void * FASTCALL(kmap_high(struct page *page));
-extern void FASTCALL(kunmap_high(struct page *page));
-
-static inline void *kmap(struct page *page)
-{
- if (in_interrupt())
- BUG();
- if (page < highmem_start_page)
- return page_address(page);
- return kmap_high(page);
-}
-
-static inline void kunmap(struct page *page)
-{
- if (in_interrupt())
- BUG();
- if (page < highmem_start_page)
- return;
- kunmap_high(page);
-}
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-static inline void *kmap_atomic(struct page *page, enum km_type type)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
-
- if (page < highmem_start_page)
- return page_address(page);
-
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#if HIGHMEM_DEBUG
- if (!pte_none(*(kmap_pte-idx)))
- BUG();
-#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
- __flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#if HIGHMEM_DEBUG
- unsigned long vaddr = (unsigned long) kvaddr;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < FIXADDR_START) // FIXME
- return;
-
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
- BUG();
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(kmap_pte-idx);
- __flush_tlb_one(vaddr);
-#endif
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_HIGHMEM_H */
+#error "Highmem unsupported!"
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
-#include <asm/fixmap.h>
#include <linux/threads.h>
/*
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/hypervisor.h>
-#include <asm/fixmap.h>
#include <linux/threads.h>
#ifndef _I386_BITOPS_H
#ifndef __ASSEMBLY__
-/* Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts. That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_OFFSET (8*1024*1024)
+/* 4MB is just a nice "safety zone". Also, we align to a fresh pde. */
+#define VMALLOC_OFFSET (4*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#if CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
-#else
-# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-#endif
+#define VMALLOC_END (HYPERVISOR_VIRT_START-PAGE_SIZE)
#define _PAGE_BIT_PRESENT 0
#define _PAGE_BIT_RW 1
#ifdef CONFIG_X86_LOCAL_APIC
#ifndef __ASSEMBLY__
-#include <asm/fixmap.h>
#include <asm/bitops.h>
#include <asm/mpspec.h>
#ifdef CONFIG_X86_IO_APIC