Add a C wrapper around the assembly vmx_switch_rr7().
This will be filled out with code that saves inserted TLB values
by subsequent patches.
Cc: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Simon Horman <horms@verge.net.au>
IA64_PSR_ED | IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)
#define PSR_BITS_TO_SET IA64_PSR_BN
-//extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt, void * pal_vaddr, void * shared_arch_info );
-GLOBAL_ENTRY(vmx_switch_rr7)
+GLOBAL_ENTRY(__vmx_switch_rr7)
// not sure this unwind statement is correct...
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
alloc loc1 = ar.pfs, 4, 8, 0, 0
mov ar.rsc=loc3 // restore RSE configuration
srlz.d // seralize restoration of psr.l
br.ret.sptk.many rp
-END(vmx_switch_rr7)
+END(__vmx_switch_rr7)
}
}
+void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
+ void *pal_vaddr, void *shared_arch_info)
+{
+ __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
+}
+
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
{
u64 rrval;
extern uint64_t guest_read_vivr(VCPU * vcpu);
extern int vmx_vcpu_pend_interrupt(VCPU * vcpu, uint8_t vector);
extern void vcpu_load_kernel_regs(VCPU * vcpu);
-extern void vmx_switch_rr7(unsigned long, void *, void *, void *);
+extern void __vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
+ void *pal_vaddr, void *shared_arch_info);
+extern void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
+ void *pal_vaddr, void *shared_arch_info);
extern void vmx_ia64_set_dcr(VCPU * v);
extern void inject_guest_interruption(struct vcpu *vcpu, u64 vec);
extern void vmx_asm_bsw0(void);