itr.d dtr[r16]=r18
;;
srlz.i
+ ;;
+ /* XEN HEAP is identity mapped */
+ mov r16 = IA64_TR_XEN_HEAP_REGS
+ dep r17 = -1, r2, 60, 4
+ ;;
+ mov cr.ifa = r17
+ ;;
+ itr.d dtr[r16]=r18
+ ;;
+ srlz.i
/*
* Switch into virtual mode:
;;
srlz.i
movl gp=__gp
-
+ ;;
mov ar.fpsr=r2
;;
;;
dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
;;
- or r18=r2,r25
+ or r24=r2,r25
;;
srlz.i
;;
- itr.i itr[r16]=r18
+ itr.i itr[r16]=r24
;;
- itr.d dtr[r16]=r18
+ itr.d dtr[r16]=r24
+ ;;
+ /* xen heap is also identity mapped */
+ mov r16 = IA64_TR_XEN_HEAP_REGS
+ dep r17=-1,r3,60,4
+ ;;
+ ptr.d r17, r18
+ ;;
+ mov cr.ifa=r17
+ ;;
+ itr.d dtr[r16]=r24
;;
// re-pin mappings for per-cpu data
if (!vm_buffer) {
vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
ASSERT(vm_buffer);
+ vm_buffer = virt_to_xenva((vm_buffer));
printk("vm_buffer: 0x%lx\n", vm_buffer);
}
printk("VPD allocation failed.\n");
return NULL;
}
+ vpd = (vpd_t *)virt_to_xenva(vpd);
printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n",
vpd, sizeof(vpd_t));
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
or loc3=loc3,r17 // add in psr the bits to set
;;
+ movl loc5=pal_vaddr // get pal_vaddr
+ ;;
+ ld8 loc5=[loc5] // read pal_vaddr
+ ;;
andcm r16=loc3,r16 // removes bits to clear from psr
dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
br.call.sptk.many rp=ia64_switch_mode_phys
1:
// now in physical mode with psr.i/ic off so do rr7 switch
- movl r16=pal_vaddr // Note: belong to region 7!
+ dep r16=-1,r0,61,3 // Note: belong to region 7!
;;
mov rr[r16]=in0
;;
srlz.d
- dep r16=0,r16,60,4 // Get physical address.
;;
- ld8 loc5=[r16] // read pal_vaddr
movl r26=PAGE_KERNEL
;;
itr.i itr[r16]=r18
;;
itr.d dtr[r16]=r18
-
+ ;;
+ /* xen heap is also identity mapped */
+ mov r16 = IA64_TR_XEN_HEAP_REGS
+ dep r17=-1,loc6,60,4
+ ;;
+ ptr.d r17,r24
+ ;;
+ mov cr.ifa=r17
+ ;;
+ itr.d dtr[r16]=r18
+ ;;
// re-pin mappings for stack (current)
// unless overlaps with KERNEL_TR
#include <asm/system.h>
#include <asm-generic/iomap.h>
+
+#ifndef XEN
/*
* Change virtual addresses to physical addresses and vv.
*/
{
return (unsigned long) address - PAGE_OFFSET;
}
+#endif
static inline void*
maddr_to_virt (unsigned long address)
return (void *) (address + PAGE_OFFSET);
}
+
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */
#define IA64_TR_VHPT 4 /* dtr4: vhpt */
#define IA64_TR_MAPPED_REGS 5 /* dtr5: vcpu mapped regs */
-#define IA64_DTR_GUEST_KERNEL 6
+#define IA64_TR_XEN_HEAP_REGS 6 /* dtr6: xen heap identity mapped regs */
+#define IA64_DTR_GUEST_KERNEL 7
#define IA64_ITR_GUEST_KERNEL 2
/* Processor status register bits: */
#define IA64_PSR_VM_BIT 46
#ifndef _ASM_IA64_XENPAGE_H
#define _ASM_IA64_XENPAGE_H
+#ifndef __ASSEMBLY__
#undef mfn_valid
#undef page_to_mfn
#undef mfn_to_page
#ifdef CONFIG_VIRTUAL_FRAME_TABLE
#undef ia64_mfn_valid
-#ifndef __ASSEMBLY__
extern int ia64_mfn_valid (unsigned long pfn);
-#endif
# define mfn_valid(_pfn) (((_pfn) < max_page) && ia64_mfn_valid(_pfn))
#else
# define mfn_valid(_pfn) ((_pfn) < max_page)
# define page_to_mfn(_page) ((unsigned long) ((_page) - frame_table))
# define mfn_to_page(_pfn) (frame_table + (_pfn))
+
+#include <asm/xensystem.h>
+
+static inline unsigned long __virt_to_maddr(unsigned long va)
+{
+ if (va - KERNEL_START < xenheap_size)
+ return xen_pstart + (va - KERNEL_START);
+ else
+ return (va & ((1UL << 60) - 1));
+}
+
+#define virt_to_maddr(va) (__virt_to_maddr((unsigned long)va))
+
+
#undef page_to_maddr
#undef virt_to_page
#define page_to_maddr(page) (page_to_mfn(page) << PAGE_SHIFT)
-#define virt_to_page(kaddr) mfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr) (mfn_to_page(virt_to_maddr(kaddr) >> PAGE_SHIFT))
#define page_to_virt(_page) maddr_to_virt(page_to_maddr(_page))
#define maddr_to_page(kaddr) mfn_to_page(((kaddr) >> PAGE_SHIFT))
#define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
#define mfn_to_virt(mfn) maddr_to_virt(mfn << PAGE_SHIFT)
-#ifndef __ASSEMBLY__
typedef union xen_va {
struct {
unsigned long off : 60;
else
return shift - PAGE_SHIFT;
}
-#endif
+/* from identity va to xen va */
+#define virt_to_xenva(va) ((unsigned long)va - PAGE_OFFSET - \
+ xen_pstart + KERNEL_START)
+
#undef __pa
#undef __va
-#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+#define __pa(x) (virt_to_maddr(x))
#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
/* It is sometimes very useful to have unsigned long as result. */
#define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;})
+#endif
#endif /* _ASM_IA64_XENPAGE_H */
#define XEN_VIRT_UC_BIT 57
-#define KERNEL_START 0xf000000004000000
+#define KERNEL_START 0xf400000004000000
#define GATE_ADDR KERNEL_START
#define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)