md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif
- return __va(md->phys_addr);
+ return __va_efi(md->phys_addr);
}
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
__FUNCTION__);
}
#ifdef XEN
-void *pal_vaddr = 0;
+static void *pal_vaddr = 0;
void *
efi_get_pal_addr(void)
}
#endif
+#ifdef XEN
+static void
+__efi_unmap_pal_code (void *pal_vaddr)
+{
+ ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr),
+ IA64_GRANULE_SHIFT);
+}
+
void
-efi_map_pal_code (void)
+efi_unmap_pal_code (void)
{
-#ifdef XEN
- u64 psr;
- (void)efi_get_pal_addr();
-#else
void *pal_vaddr = efi_get_pal_addr ();
u64 psr;
if (!pal_vaddr)
return;
+
+ /*
+ * Cannot write to CRx with PSR.ic=1
+ */
+ psr = ia64_clear_ic();
+ __efi_unmap_pal_code(pal_vaddr);
+ ia64_set_psr(psr); /* restore psr */
+ ia64_srlz_i();
+}
#endif
+void
+efi_map_pal_code (void)
+{
+ void *pal_vaddr = efi_get_pal_addr ();
+ u64 psr;
+
+ if (!pal_vaddr)
+ return;
+
/*
* Cannot write to CRx with PSR.ic=1
*/
psr = ia64_clear_ic();
+#ifdef XEN
+ /* pal_vaddr must be unpinned before pinning
+ * This is needed in the case of a nested EFI, PAL or SAL call */
+ __efi_unmap_pal_code(pal_vaddr);
+#endif
ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT);
}
#endif
+#ifndef XEN
efi_map_pal_code();
+#endif
efi_enter_virtual_mode();
}
;;
srlz.d
;;
+#ifndef XEN
// 3. Reload ITR for PAL code.
GET_THIS_PADDR(r2, ia64_mca_pal_pte)
;;
;;
srlz.i
;;
+#endif
+
// 4. Reload DTR for stack.
#ifdef XEN
// Kernel registers are saved in a per_cpu cpu_kr_ia64_t
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
+#ifndef XEN
efi_map_pal_code();
+#endif
cpu_init();
smp_callin();
/*
* in0: new rr7
* in1: virtual address of guest_vhpt
- * in2: virtual address of pal code segment
+ * in2: virtual addres of guest shared_info
* r8: will contain old rid value
*/
GLOBAL_ENTRY(__vmx_switch_rr7)
// not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 4, 8, 0, 0
+ alloc loc1 = ar.pfs, 4, 7, 0, 0
1:{
mov r28 = in0 // copy procedure index
mov r8 = ip // save ip to compute branch
tpa loc2 = loc2 // get physical address of per cpu date
tpa r3 = r8 // get physical address of ip
dep loc5 = 0,in1,60,4 // get physical address of guest_vhpt
- dep loc6 = 0,in2,60,4 // get physical address of pal code
- dep loc7 = 0,in3,60,4 // get physical address of privregs
+ dep loc6 = 0,in2,60,4 // get physical address of privregs
;;
dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
// mask granule shift
- dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
- // mask granule shift
mov loc4 = psr // save psr
;;
mov loc3 = ar.rsc // save RSE configuration
;;
.vhpt_overlaps:
- // re-pin mappings for PAL code section
- mov r24=IA64_TR_PALCODE
- or loc6 = r25,loc6 // construct PA | page properties
- mov r23 = IA64_GRANULE_SHIFT<<2
- ;;
- ptr.i in2,r23
- ;;
- mov cr.itir=r23
- mov cr.ifa=in2
- ;;
- itr.i itr[r24]=loc6 // wire in new mapping...
- ;;
-
// r16, r19, r20 are used by
// ia64_switch_mode_phys()/ia64_switch_mode_virt()
// re-pin mappings for privregs
// r21 = (current physical addr) & (IA64_GRANULE_SIZE - 1)
// r17 = (guest_vhpt physical addr) & (IA64_GRANULE_SIZE - 1)
- // loc6 = (((pal phys addr) & (IA64_GRANULE_SIZE - 1) << 2)) | PAGE_KERNEL
- // loc7 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
- cmp.ne.unc p7,p0=r21,loc7 // check overlap with current stack
+ // loc6 = (privregs physical addr) & (IA64_GRANULE_SIZE - 1)
+ cmp.ne.unc p7,p0=r21,loc6 // check overlap with current stack
;;
-(p7) cmp.ne.unc p8,p0=r17,loc7 // check overlap with guest_vhpt
+(p7) cmp.ne.unc p8,p0=r17,loc6 // check overlap with guest_vhpt
;;
- // loc7 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL
- or loc7 = r25,loc7 // construct PA | page properties
+ // loc6 = (((privregs phys) & (IA64_GRANULE_SIZE - 1)) << 2) | PAGE_KERNEL
+ or loc6 = r25,loc6 // construct PA | page properties
;;
- cmp.ne p9,p0=loc6,loc7
mov r22=IA64_TR_VPD
mov r24=IA64_TR_MAPPED_REGS
mov r23=IA64_GRANULE_SHIFT<<2
;;
-(p9) ptr.i in3,r23
-(p8) ptr.d in3,r23
+ ptr.i in2,r23
+(p8) ptr.d in2,r23
mov cr.itir=r23
- mov cr.ifa=in3
+ mov cr.ifa=in2
;;
-(p9) itr.i itr[r22]=loc7 // wire in new mapping...
+ itr.i itr[r22]=loc6 // wire in new mapping...
;;
-(p8) itr.d dtr[r24]=loc7 // wire in new mapping...
+(p8) itr.d dtr[r24]=loc6 // wire in new mapping...
;;
// done, switch back to virtual and return
VMX(vcpu, vrr[VRN7]) = 0x738;
}
-extern void * pal_vaddr;
-
void
vmx_load_all_rr(VCPU *vcpu)
{
ia64_dv_serialize_data();
vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
(void *)vcpu->arch.vhpt.hash,
- pal_vaddr, vcpu->arch.privregs);
+ vcpu->arch.privregs);
ia64_set_pta(VMX(vcpu, mpta));
vmx_ia64_set_dcr(vcpu);
}
void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
- void *pal_vaddr, void *shared_arch_info)
+ void *shared_arch_info)
{
__get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
__get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
__get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
- __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
+ __vmx_switch_rr7(rid, guest_vhpt, shared_arch_info);
}
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
case VRN7:
if (likely(vcpu == current))
vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
- pal_vaddr, vcpu->arch.privregs);
+ vcpu->arch.privregs);
break;
case VRN4:
rrval = vrrtomrr(vcpu,val);
// unsigned long va_vhpt) /* in4 */
//Local usage:
// loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
-// loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
+// loc5=shared_archinfo_paddr, loc6=xen_paddr,
// r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
+// loc5 is unused.
GLOBAL_ENTRY(ia64_new_rr7)
// FIXME? not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 5, 8, 0, 0
+ alloc loc1 = ar.pfs, 5, 7, 0, 0
movl loc2=PERCPU_ADDR
1: {
mov loc3 = psr // save psr
tpa in1=in1 // grab shared_info BEFORE changing rr7
adds r8 = 1f-1b,r8 // calculate return address for call
;;
- tpa loc7=in2 // grab arch_vcpu_info BEFORE chg rr7
+ tpa loc5=in2 // grab arch_vcpu_info BEFORE chg rr7
movl r17=PSR_BITS_TO_SET
mov loc4=ar.rsc // save RSE configuration
movl r16=PSR_BITS_TO_CLEAR
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
or loc3=loc3,r17 // add in psr the bits to set
;;
- movl loc5=pal_vaddr // get pal_vaddr
- ;;
- ld8 loc5=[loc5] // read pal_vaddr
- ;;
+
andcm r16=loc3,r16 // removes bits to clear from psr
dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
br.call.sptk.many rp=ia64_switch_mode_phys
add r22=r22,in3
;;
ptr.d r22,r24
- or r23=loc7,r25 // construct PA | page properties
+ or r23=loc5,r25 // construct PA | page properties
mov cr.itir=r24
mov cr.ifa=r22
mov r21=IA64_TR_MAPPED_REGS
;;
itr.d dtr[r21]=r23 // wire in new mapping...
- // Purge/insert PAL TR
- mov r24=IA64_TR_PALCODE
- mov r23=IA64_GRANULE_SHIFT<<2
- dep r25=0,loc5,60,4 // convert pal vaddr to paddr
- ;;
- ptr.i loc5,r23
- or r25=r25,r26 // construct PA | page properties
- mov cr.itir=r23
- mov cr.ifa=loc5
- ;;
- itr.i itr[r24]=r25
-
// done, switch back to virtual and return
mov r16=loc3 // r16= original psr
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
GLOBAL_ENTRY(ia64_new_rr7_efi)
// FIXME? not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
- alloc loc1 = ar.pfs, 3, 8, 0, 0
+ alloc loc1 = ar.pfs, 3, 7, 0, 0
movl loc2=PERCPU_ADDR
1: {
mov loc3 = psr // save psr
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
or loc3=loc3,r17 // add in psr the bits to set
;;
- movl loc5=pal_vaddr // get pal_vaddr
- ;;
- ld8 loc5=[loc5] // read pal_vaddr
+ dep loc6 = 0,in2,60,4 // get physical address of VPD
;;
- dep loc7 = 0,in2,60,4 // get physical address of VPD
- ;;
- dep loc7 = 0,loc7,0,IA64_GRANULE_SHIFT
+ dep loc6 = 0,loc6,0,IA64_GRANULE_SHIFT
// mask granule shift
;;
andcm r16=loc3,r16 // removes bits to clear from psr
- dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
+ dep loc5=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
br.call.sptk.many rp=ia64_switch_mode_phys
1:
movl r26=PAGE_KERNEL
mov r16=IA64_TR_KERNEL
mov cr.itir=r24
mov cr.ifa=r17
- or r18=loc6,r26
+ or r18=loc5,r26
;;
itr.i itr[r16]=r18
;;
// VPD
cmp.eq p7,p0=r0,in2
(p7) br.cond.sptk ia64_new_rr7_efi_vpd_not_mapped
- or loc7 = r26,loc7 // construct PA | page properties
+ or loc6 = r26,loc6 // construct PA | page properties
mov r22=IA64_TR_VPD
mov r24=IA64_TR_MAPPED_REGS
mov r23=IA64_GRANULE_SHIFT<<2
mov cr.itir=r23
mov cr.ifa=in2
;;
- itr.i itr[r22]=loc7
+ itr.i itr[r22]=loc6
;;
- itr.d dtr[r24]=loc7
+ itr.d dtr[r24]=loc6
;;
srlz.i
;;
;;
ia64_new_rr7_efi_vpd_not_mapped:
- // Purge/insert PAL TR
- mov r24=IA64_TR_PALCODE
- mov r23=IA64_GRANULE_SHIFT<<2
- dep r25=0,loc5,60,4 // convert pal vaddr to paddr
- ;;
- ptr.i loc5,r23
- or r25=r25,r26 // construct PA | page properties
- mov cr.itir=r23
- mov cr.ifa=loc5
- ;;
- itr.i itr[r24]=r25
-
// done, switch back to virtual and return
mov r16=loc3 // r16= original psr
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
#include <asm/page.h>
#include <asm/system.h>
-#ifdef XEN
-extern void * pal_vaddr;
-#endif
-
#define EFI_SUCCESS 0
#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
extern void efi_init (void);
extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
+#ifdef XEN
+extern void efi_unmap_pal_code (void);
+#endif
extern void efi_map_memmap(void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec *ts);
* E: bits N-53: reserved (0)
*/
+/* rr7 (and rr6) may already be set to XEN_EFI_RR, which
+ * would indicate a nested EFI, SAL or PAL call, such
+ * as from an MCA. This may have occured during a call
+ * to set_one_rr_efi(). To be safe, repin everything anyway.
+ */
+
#define XEN_EFI_RR_ENTER(rr6, rr7) do { \
rr6 = ia64_get_rr(6UL << 61); \
rr7 = ia64_get_rr(7UL << 61); \
set_one_rr_efi(6UL << 61, XEN_EFI_RR); \
set_one_rr_efi(7UL << 61, XEN_EFI_RR); \
+ efi_map_pal_code(); \
} while (0)
+/* There is no need to do anything if the saved rr7 (and rr6)
+ * is XEN_EFI_RR, as it would just switch them from XEN_EFI_RR to XEN_EFI_RR
+ * Furthermore, if this is a nested call it is important not
+ * to unpin efi_unmap_pal_code() until the outermost call is finished
+ */
+
#define XEN_EFI_RR_LEAVE(rr6, rr7) do { \
- set_one_rr_efi(6UL << 61, rr6); \
- set_one_rr_efi(7UL << 61, rr7); \
+ if (rr7 != XEN_EFI_RR) { \
+ efi_unmap_pal_code(); \
+ set_one_rr_efi(6UL << 61, rr6); \
+ set_one_rr_efi(7UL << 61, rr7); \
+ } \
} while (0)
#else
extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
extern void vcpu_load_kernel_regs(VCPU * vcpu);
extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
- void *pal_vaddr, void *shared_arch_info);
+ void *shared_arch_info);
extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
- void *pal_vaddr, void *shared_arch_info);
+ void *shared_arch_info);
extern void vmx_ia64_set_dcr(VCPU * v);
extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
extern void vmx_asm_bsw0(void);