From 4cbfcf6efe2f173d73aca5bba79acdd26d6d82a8 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 30 Oct 2007 11:33:55 -0600 Subject: [PATCH] [IA64] Make Xen relocatable 1. Put xenheap at 0xf400000004000000, then xenheap doesn't overlap with identity mapping. 2. Xen itself can be relocated by OS loader if there is no low memory in platform. 3. Use another DTR for mapping xenheap Signed-off-by: Anthony Xu --- xen/arch/ia64/linux-xen/head.S | 12 ++++++++++- xen/arch/ia64/vmx/vmx_entry.S | 16 +++++++++++--- xen/arch/ia64/vmx/vmx_init.c | 2 ++ xen/arch/ia64/xen/xenasm.S | 20 ++++++++++++++---- xen/include/asm-ia64/linux-xen/asm/io.h | 4 ++++ xen/include/asm-ia64/xenkregs.h | 3 ++- xen/include/asm-ia64/xenpage.h | 28 +++++++++++++++++++------ xen/include/asm-ia64/xensystem.h | 2 +- 8 files changed, 71 insertions(+), 16 deletions(-) diff --git a/xen/arch/ia64/linux-xen/head.S b/xen/arch/ia64/linux-xen/head.S index 89afda7f4d..1e991ef663 100644 --- a/xen/arch/ia64/linux-xen/head.S +++ b/xen/arch/ia64/linux-xen/head.S @@ -263,6 +263,16 @@ start_ap: itr.d dtr[r16]=r18 ;; srlz.i + ;; + /* XEN HEAP is identity mapped */ + mov r16 = IA64_TR_XEN_HEAP_REGS + dep r17 = -1, r2, 60, 4 + ;; + mov cr.ifa = r17 + ;; + itr.d dtr[r16]=r18 + ;; + srlz.i /* * Switch into virtual mode: @@ -294,7 +304,7 @@ start_ap: ;; srlz.i movl gp=__gp - + ;; mov ar.fpsr=r2 ;; diff --git a/xen/arch/ia64/vmx/vmx_entry.S b/xen/arch/ia64/vmx/vmx_entry.S index 6d4102edd0..2bc9add8a2 100644 --- a/xen/arch/ia64/vmx/vmx_entry.S +++ b/xen/arch/ia64/vmx/vmx_entry.S @@ -677,13 +677,23 @@ GLOBAL_ENTRY(vmx_switch_rr7) ;; dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT ;; - or r18=r2,r25 + or r24=r2,r25 ;; srlz.i ;; - itr.i itr[r16]=r18 + itr.i itr[r16]=r24 ;; - itr.d dtr[r16]=r18 + itr.d dtr[r16]=r24 + ;; + /* xen heap is also identity mapped */ + mov r16 = IA64_TR_XEN_HEAP_REGS + dep r17=-1,r3,60,4 + ;; + ptr.d r17, r18 + ;; + mov cr.ifa=r17 + ;; + itr.d dtr[r16]=r24 ;; // re-pin mappings for per-cpu data diff --git a/xen/arch/ia64/vmx/vmx_init.c b/xen/arch/ia64/vmx/vmx_init.c index a1f03b64d1..00366d397e 100644 --- a/xen/arch/ia64/vmx/vmx_init.c +++ b/xen/arch/ia64/vmx/vmx_init.c @@ -117,6 +117,7 @@ vmx_init_env(void) if (!vm_buffer) { vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order); ASSERT(vm_buffer); + vm_buffer = virt_to_xenva((vm_buffer)); printk("vm_buffer: 0x%lx\n", vm_buffer); } @@ -162,6 +163,7 @@ static vpd_t *alloc_vpd(void) printk("VPD allocation failed.\n"); return NULL; } + vpd = (vpd_t *)virt_to_xenva(vpd); printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n", vpd, sizeof(vpd_t)); diff --git a/xen/arch/ia64/xen/xenasm.S b/xen/arch/ia64/xen/xenasm.S index 63b203129d..c9c11e5f95 100644 --- a/xen/arch/ia64/xen/xenasm.S +++ b/xen/arch/ia64/xen/xenasm.S @@ -60,19 +60,21 @@ GLOBAL_ENTRY(ia64_new_rr7) mov ar.rsc=0 // put RSE in enforced lazy, LE mode or loc3=loc3,r17 // add in psr the bits to set ;; + movl loc5=pal_vaddr // get pal_vaddr + ;; + ld8 loc5=[loc5] // read pal_vaddr + ;; andcm r16=loc3,r16 // removes bits to clear from psr dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr br.call.sptk.many rp=ia64_switch_mode_phys 1: // now in physical mode with psr.i/ic off so do rr7 switch - movl r16=pal_vaddr // Note: belong to region 7! + dep r16=-1,r0,61,3 // Note: belong to region 7! ;; mov rr[r16]=in0 ;; srlz.d - dep r16=0,r16,60,4 // Get physical address. ;; - ld8 loc5=[r16] // read pal_vaddr movl r26=PAGE_KERNEL ;; @@ -90,7 +92,17 @@ GLOBAL_ENTRY(ia64_new_rr7) itr.i itr[r16]=r18 ;; itr.d dtr[r16]=r18 - + ;; + /* xen heap is also identity mapped */ + mov r16 = IA64_TR_XEN_HEAP_REGS + dep r17=-1,loc6,60,4 + ;; + ptr.d r17,r24 + ;; + mov cr.ifa=r17 + ;; + itr.d dtr[r16]=r18 + ;; // re-pin mappings for stack (current) // unless overlaps with KERNEL_TR diff --git a/xen/include/asm-ia64/linux-xen/asm/io.h b/xen/include/asm-ia64/linux-xen/asm/io.h index 3182a944aa..2b7b05ae01 100644 --- a/xen/include/asm-ia64/linux-xen/asm/io.h +++ b/xen/include/asm-ia64/linux-xen/asm/io.h @@ -82,6 +82,8 @@ extern unsigned int num_io_spaces; #include #include + +#ifndef XEN /* * Change virtual addresses to physical addresses and vv. */ @@ -90,6 +92,7 @@ virt_to_maddr (volatile void *address) { return (unsigned long) address - PAGE_OFFSET; } +#endif static inline void* maddr_to_virt (unsigned long address) @@ -97,6 +100,7 @@ maddr_to_virt (unsigned long address) return (void *) (address + PAGE_OFFSET); } + #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ diff --git a/xen/include/asm-ia64/xenkregs.h b/xen/include/asm-ia64/xenkregs.h index ee13211a10..fb62f70c0e 100644 --- a/xen/include/asm-ia64/xenkregs.h +++ b/xen/include/asm-ia64/xenkregs.h @@ -7,7 +7,8 @@ #define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ #define IA64_TR_VHPT 4 /* dtr4: vhpt */ #define IA64_TR_MAPPED_REGS 5 /* dtr5: vcpu mapped regs */ -#define IA64_DTR_GUEST_KERNEL 6 +#define IA64_TR_XEN_HEAP_REGS 6 /* dtr6: xen heap identity mapped regs */ +#define IA64_DTR_GUEST_KERNEL 7 #define IA64_ITR_GUEST_KERNEL 2 /* Processor status register bits: */ #define IA64_PSR_VM_BIT 46 diff --git a/xen/include/asm-ia64/xenpage.h b/xen/include/asm-ia64/xenpage.h index a9dc9b4a68..13d27a06df 100644 --- a/xen/include/asm-ia64/xenpage.h +++ b/xen/include/asm-ia64/xenpage.h @@ -1,14 +1,13 @@ #ifndef _ASM_IA64_XENPAGE_H #define _ASM_IA64_XENPAGE_H +#ifndef __ASSEMBLY__ #undef mfn_valid #undef page_to_mfn #undef mfn_to_page #ifdef CONFIG_VIRTUAL_FRAME_TABLE #undef ia64_mfn_valid -#ifndef __ASSEMBLY__ extern int ia64_mfn_valid (unsigned long pfn); -#endif # define mfn_valid(_pfn) (((_pfn) < max_page) && ia64_mfn_valid(_pfn)) #else # define mfn_valid(_pfn) ((_pfn) < max_page) @@ -16,10 +15,24 @@ extern int ia64_mfn_valid (unsigned long pfn); # define page_to_mfn(_page) ((unsigned long) ((_page) - frame_table)) # define mfn_to_page(_pfn) (frame_table + (_pfn)) + +#include + +static inline unsigned long __virt_to_maddr(unsigned long va) +{ + if (va - KERNEL_START < xenheap_size) + return xen_pstart + (va - KERNEL_START); + else + return (va & ((1UL << 60) - 1)); +} + +#define virt_to_maddr(va) (__virt_to_maddr((unsigned long)va)) + + #undef page_to_maddr #undef virt_to_page #define page_to_maddr(page) (page_to_mfn(page) << PAGE_SHIFT) -#define virt_to_page(kaddr) mfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) (mfn_to_page(virt_to_maddr(kaddr) >> PAGE_SHIFT)) #define page_to_virt(_page) maddr_to_virt(page_to_maddr(_page)) #define maddr_to_page(kaddr) mfn_to_page(((kaddr) >> PAGE_SHIFT)) @@ -28,7 +41,6 @@ extern int ia64_mfn_valid (unsigned long pfn); #define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT) #define mfn_to_virt(mfn) maddr_to_virt(mfn << PAGE_SHIFT) -#ifndef __ASSEMBLY__ typedef union xen_va { struct { unsigned long off : 60; @@ -63,14 +75,18 @@ static inline int get_order_from_shift(unsigned long shift) else return shift - PAGE_SHIFT; } -#endif +/* from identity va to xen va */ +#define virt_to_xenva(va) ((unsigned long)va - PAGE_OFFSET - \ + xen_pstart + KERNEL_START) + #undef __pa #undef __va -#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) +#define __pa(x) (virt_to_maddr(x)) #define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) /* It is sometimes very useful to have unsigned long as result. */ #define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;}) +#endif #endif /* _ASM_IA64_XENPAGE_H */ diff --git a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h index 1bea363de6..b40635b409 100644 --- a/xen/include/asm-ia64/xensystem.h +++ b/xen/include/asm-ia64/xensystem.h @@ -30,7 +30,7 @@ #define XEN_VIRT_UC_BIT 57 -#define KERNEL_START 0xf000000004000000 +#define KERNEL_START 0xf400000004000000 #define GATE_ADDR KERNEL_START #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1) -- 2.30.2