Mov from psr is used frequently by xeno.
Signed-of-by: Anthony Xu <anthony.xu@intel.com>
#include <asm/processor.h>
#include <asm/asmmacro.h>
+GLOBAL_ENTRY(xen_get_psr)
+ movl r8=running_on_xen;;
+ ld4 r8=[r8];;
+ cmp.eq p7,p0=r8,r0;;
+(p7) mov r8=psr;;
+(p7) br.ret.sptk.many rp
+ ;;
+ XEN_HYPER_GET_PSR
+ ;;
+ br.ret.sptk.many rp
+ ;;
+END(xen_get_psr)
+
GLOBAL_ENTRY(xen_get_ivr)
movl r8=running_on_xen;;
ld4 r8=[r8];;
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
;;
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
+#ifdef CONFIG_XEN
+(pKStk) mov r21=r8
+(pKStk) XEN_HYPER_GET_PSR
+ ;;
+(pKStk) mov r22=r8
+(pKStk) mov r8=r21
+ ;;
+#else
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+#endif
nop 0
;;
ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
adds r16=PT(CR_IPSR)+16,r12
adds r17=PT(CR_IIP)+16,r12
+#ifdef CONFIG_XEN
+(pKStk) mov r29=r8
+(pKStk) XEN_HYPER_GET_PSR
+ ;;
+(pKStk) mov r22=r8
+(pKStk) mov r8=r29
+ ;;
+#else
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+#endif
nop.i 0
nop.i 0
;;
mov loc4=ar.rsc // save RSE configuration
;;
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
+#ifdef CONFIG_XEN
+ mov r9 = r8
+ XEN_HYPER_GET_PSR
+ ;;
+ mov loc3 = r8
+ mov r8 = r9
+ ;;
+#else
mov loc3 = psr
+#endif
mov loc0 = rp
.body
mov r30 = in2
* be properly handled by Xen, some are frequent enough that we use
* hyperprivops for performance. */
+extern unsigned long xen_get_psr(void);
extern unsigned long xen_get_ivr(void);
extern unsigned long xen_get_tpr(void);
extern void xen_set_itm(unsigned long);
__u64 ia64_intri_res; \
\
switch(regnum) { \
+ case _IA64_REG_PSR: \
+ ia64_intri_res = (is_running_on_xen()) ? \
+ xen_get_psr() : \
+ __ia64_getreg(regnum); \
+ break; \
case _IA64_REG_CR_IVR: \
ia64_intri_res = (is_running_on_xen()) ? \
xen_get_ivr() : \
DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
+ DEFINE_MAPPED_REG_OFS(XSI_VPSR_PP_OFS, vpsr_pp);
DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
(p7) br.sptk.many hyper_get_rr
;;
+ // HYPERPRIVOP_GET_PSR?
+ cmp.eq p7,p6=HYPERPRIVOP_GET_PSR,r17
+(p7) br.sptk.many hyper_get_psr
+ ;;
+
// HYPERPRIVOP_PTC_GA?
cmp.eq p7,p6=HYPERPRIVOP_PTC_GA,r17
(p7) br.sptk.many hyper_ptc_ga
;;
END(hyper_set_itm)
+ENTRY(hyper_get_psr)
+#ifdef FAST_HYPERPRIVOP_CNT
+ movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_PSR);;
+ ld4 r21=[r20];;
+ adds r21=1,r21;;
+ st4 [r20]=r21;;
+#endif
+ mov r24=cr.ipsr
+ movl r8=0x18ffffffff;;
+ // only return PSR{36:35,31:0}
+ and r8=r8,r24
+ // set vpsr.ic
+ ld4 r21=[r18];;
+ dep r8=r21,r8,IA64_PSR_IC_BIT,1
+ // set vpsr.pp
+ adds r20=XSI_VPSR_PP_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld1 r21=[r20];;
+ dep r8=r21,r8,IA64_PSR_PP_BIT,1
+ // set vpsr.dt
+ adds r20=XSI_METAPHYS_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld4 r21=[r20];;
+ cmp.ne p6,p0=r21,r0
+ ;;
+(p6) dep.z r8=r8,IA64_PSR_DT_BIT,1
+ // set vpsr.i
+ adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ ld8 r20=[r20];;
+ ld1 r21=[r20];;
+ dep r8=r21,r8,IA64_PSR_I_BIT,1
+ ;;
+ mov r25=cr.iip
+ extr.u r26=r24,41,2 ;;
+ cmp.eq p6,p7=2,r26 ;;
+(p6) mov r26=0
+(p6) adds r25=16,r25
+(p7) adds r26=1,r26
+ ;;
+ dep r24=r26,r24,41,2
+ ;;
+ mov cr.ipsr=r24
+ mov cr.iip=r25
+ mov pr=r31,-1 ;;
+ rfi
+ ;;
+END(hyper_get_psr)
+
+
ENTRY(hyper_get_rr)
#ifdef FAST_HYPERPRIVOP_CNT
movl r20=FAST_HYPERPRIVOP_PERFC(HYPERPRIVOP_GET_RR);;