;;
#ifdef XEN
// 5. VHPT
-#if VHPT_ENABLED
- GET_VA_VCPU_VHPT_MADDR(r2,r3);;
+ // GET_VA_VCPU_VHPT_MADDR() may not give the
+ // value of the VHPT currently pinned into the TLB
+ GET_THIS_PADDR(r2, inserted_vhpt);;
+ ;;
+ cmp.eq p7,p0=r2,r0
+ ;;
+(p7) br.cond.sptk .vhpt_not_mapped
dep r16=0,r2,0,IA64_GRANULE_SHIFT
mov r18=IA64_GRANULE_SHIFT<<2
;;
;;
srlz.d
;;
-#endif
+.vhpt_not_mapped:
#endif
// Now branch away to caller.
br.sptk.many b1
void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
void *pal_vaddr, void *shared_arch_info)
{
+ __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
__vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
}
#include <asm/regionreg.h>
#include <asm/vhpt.h>
#include <asm/vcpu.h>
+#include <asm/percpu.h>
/* Defined in xemasm.S */
extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
integer_param("dom_rid_bits", domain_rid_bits_default);
+DEFINE_PER_CPU(unsigned long, inserted_vhpt);
+
#if 0
// following already defined in include/asm-ia64/gcc_intrin.h
// it should probably be ifdef'd out from there to ensure all region
if (!PSCB(v,metaphysical_mode))
set_rr(rr,newrrv.rrval);
} else if (rreg == 7) {
+#if VHPT_ENABLED
+ __get_cpu_var(inserted_vhpt) = __va_ul(vcpu_vhpt_maddr(v));
+#endif
ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
v->arch.privregs, v->domain->arch.shared_info_va,
__va_ul(vcpu_vhpt_maddr(v)));
#define RR_RID(arg) (((arg) & 0x0000000000ffffff) << 8)
#define RR_RID_MASK 0x00000000ffffff00L
+DECLARE_PER_CPU(unsigned long, inserted_vhpt);
int set_one_rr(unsigned long rr, unsigned long val);