__FUNCTION__);
break;
}
+#ifndef XEN
if (unw_is_intr_frame(info) &&
(pr & (1UL << PRED_USER_STACK)))
return 0;
-#ifdef XEN
+#else
+ if (unw_is_intr_frame(info) &&
+ !info->task->domain->arch.is_vti &&
+ (pr & (1UL << PRED_USER_STACK)))
+ return 0;
/*
- * vmx fault handlers don't always update vcpu->on_stack
- * so that the above (pr & (1UL << PRED_USER_STACK)) condition
- * isn't always true.
- * hypercall path of break_fault does set pUStk=1,
- * other fault paths don't set.
- *
+ * vmx fault handlers don't vcpu->on_stack and keep
+ * (pr & (1UL << PRED_USER_STACK)) condition untouched.
* we need to stop unwinding somehow.
*/
if (unw_is_intr_frame(info) &&
;;
st8 [r16]=r8
;;
-(pUStk) rsm psr.i
+//(pUStk) rsm psr.i
+ rsm psr.i
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+//(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
;;
br.call.sptk.many b0=leave_hypervisor_tail
.work_processed_syscall:
//(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
;;
ld8 r27=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
-(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+//(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
nop 0
;;
ld8 r22=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
///////////////////////////////////////////////////////////////////////
// st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
mov b6=r30 // I0 setup syscall handler branch reg early
- cmp.ne pKStk,pUStk=r0,r0 // A were we on kernel stacks already?
+// cmp.ne pKStk,pUStk=r0,r0 // A were we on kernel stacks already?
// and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
mov r18=ar.bsp // M2 (12 cyc)
;;
-(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
+//(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
+ addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
// cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
// br.call.sptk.many b7=ia64_syscall_setup // B
br.call.sptk.many b7=ia64_hypercall_setup // B
st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
tnat.nz p9,p0=in1
-(pKStk) mov r18=r0 // make sure r18 isn't NaT
+//(pKStk) mov r18=r0 // make sure r18 isn't NaT
;;
st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
(p9) mov in1=-1
;;
-(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
+//(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
+ sub r18=r18,r22 // r18=RSE.ndirty*8
tnat.nz p10,p0=in2
add r11=8,r11
;;
-(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
-(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
+//(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
+//(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
tnat.nz p11,p0=in3
;;
(p10) mov in2=-1
tnat.nz p12,p0=in4 // [I0]
(p11) mov in3=-1
;;
-(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
-(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
+//(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
+ st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
+//(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
+ st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
;;
st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates