extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
+/* Address of vpsr.i (in fact evtchn_upcall_mask) of current vcpu.
+ This is a Xen virtual address. */
+DEFINE_PER_CPU(uint8_t *, current_psr_i_addr);
+
#include <xen/sched-if.h>
void schedule_tail(struct vcpu *prev)
VHPT_ENABLED);
load_region_regs(current);
vcpu_load_kernel_regs(current);
+ __ia64_per_cpu_var(current_psr_i_addr) = ¤t->domain->
+ shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask;
}
}
/*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/
prev = ia64_switch_to(next);
- //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
-
- if (!VMX_DOMAIN(current)){
- vcpu_set_next_timer(current);
- }
+ /* Note: ia64_switch_to does not return here at vcpu initialization. */
+ //cpu_set(smp_processor_id(), current->domain->domain_dirty_cpumask);
// leave this debug for now: it acts as a heartbeat when more than
// one domain is active
if (!cnt[id]--) { cnt[id] = 500000; printk("%x",id); }
if (!i--) { i = 1000000; printk("+"); }
}
-
+
if (VMX_DOMAIN(current)){
- vmx_load_all_rr(current);
- }else{
+ vmx_load_all_rr(current);
+ } else {
+ struct domain *nd;
extern char ia64_ivt;
+
ia64_set_iva(&ia64_ivt);
- if (!is_idle_domain(current->domain)) {
+
+ nd = current->domain;
+ if (!is_idle_domain(nd)) {
ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
VHPT_ENABLED);
load_region_regs(current);
vcpu_load_kernel_regs(current);
+ vcpu_set_next_timer(current);
if (vcpu_timer_expired(current))
vcpu_pend_timer(current);
- }else {
+ __ia64_per_cpu_var(current_psr_i_addr) = &nd->shared_info->
+ vcpu_info[current->vcpu_id].evtchn_upcall_mask;
+ } else {
/* When switching to idle domain, only need to disable vhpt
* walker. Then all accesses happen within idle context will
* be handled by TR mapping and identity mapping.
*/
pta = ia64_get_pta();
ia64_set_pta(pta & ~VHPT_ENABLED);
+ __ia64_per_cpu_var(current_psr_i_addr) = NULL;
}
}
local_irq_restore(spsr);
or r20=r23,r21;;
1: // when we get to here r20=~=interrupts pending
// Check pending event indication
-(p7) adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18;;
+(p7) movl r20=THIS_CPU(current_psr_i_addr);;
(p7) ld8 r20=[r20];;
-(p7) adds r20=-1,r20;;
+(p7) adds r20=-1,r20;; /* evtchn_upcall_pending */
(p7) ld1 r20=[r20];;
// HYPERPRIVOP_RFI?
or r30=r30,r28;;
and r30=r30,r27;;
mov r20=1
- adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
+ movl r22=THIS_CPU(current_psr_i_addr)
adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
ld8 r22=[r22]
- st8 [r21]=r30 ;;
+ st8 [r21]=r30;;
// set shared_mem interrupt_delivery_enabled to 0
// set shared_mem interrupt_collection_enabled to 0
- st1 [r22]=r20;;
- st4 [r18]=r0;;
+ st1 [r22]=r20
+ st4 [r18]=r0
// cover and set shared_mem precover_ifs to cr.ifs
// set shared_mem ifs and incomplete_regframe to 0
cover ;;
cmp.eq p6,p0=r16,r0;;
(p6) br.cond.spnt.few fast_tick_reflect_done;;
// if guest vpsr.i is off, we're done
- adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18 ;;
+ movl r21=THIS_CPU(current_psr_i_addr);;
ld8 r21=[r21];;
ld1 r21=[r21];;
cmp.eq p0,p6=r21,r0
dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
or r17=r17,r28;;
and r17=r17,r27;;
- ld4 r16=[r18],XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS;;
+ ld4 r16=[r18];;
cmp.ne p6,p0=r16,r0;;
- ld8 r16=[r18],XSI_PSR_IC_OFS-XSI_PSR_I_ADDR_OFS
+ movl r22=THIS_CPU(current_psr_i_addr);;
+ ld8 r22=[r22]
(p6) dep r17=-1,r17,IA64_PSR_IC_BIT,1 ;;
- ld1 r16=[r16];;
+ ld1 r16=[r22];;
cmp.eq p6,p0=r16,r0;;
(p6) dep r17=-1,r17,IA64_PSR_I_BIT,1 ;;
mov r20=1
- adds r22=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
adds r21=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18 ;;
- ld8 r22=[r22]
st8 [r21]=r17 ;;
// set shared_mem interrupt_delivery_enabled to 0
// set shared_mem interrupt_collection_enabled to 0
// set shared_mem isr
st8 [r21]=r16 ;;
// set cr.ipsr
- adds r21=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
+ movl r21=THIS_CPU(current_psr_i_addr)
mov r29=r30 ;;
ld8 r21=[r21]
movl r28=DELIVER_PSR_SET;;
dep r20=0,r20,38,25;; // ensure ifs has no reserved bits set
mov cr.ifs=r20 ;;
// ipsr.cpl == (vcr.ipsr.cpl == 0) 2 : 3;
- adds r20=XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS,r18
+ movl r20=THIS_CPU(current_psr_i_addr)
dep r21=-1,r21,IA64_PSR_CPL1_BIT,1 ;;
// vpsr.i = vcr.ipsr.i; vpsr.ic = vcr.ipsr.ic
ld8 r20=[r20]
extr.u r20=r21,41,2 ;; // get v(!)psr.ri
dep r16=r20,r16,41,2 ;; // deposit cr.isr.ei
adds r22=XSI_ISR_OFS-XSI_PSR_IC_OFS,r18 ;;
- st8 [r22]=r16,XSI_PSR_I_ADDR_OFS-XSI_ISR_OFS ;;
+ st8 [r22]=r16;;
+ movl r22=THIS_CPU(current_psr_i_addr)
// set cr.ipsr (make sure cpl==2!)
- mov r29=r17 ;;
+ mov r29=r17
movl r28=DELIVER_PSR_SET;;
- mov r20=1
+ mov r20=1;;
ld8 r22=[r22]
- movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0);;
+ movl r27=~(DELIVER_PSR_CLR|IA64_PSR_CPL0)
or r29=r29,r28;;
and r29=r29,r27;;
mov cr.ipsr=r29;;