#include <asm/mca.h>
#ifdef XEN
#include <asm/vhpt.h>
+#include <public/arch-ia64.h>
#endif
/*
srlz.i
;;
#ifdef XEN
- // 5. VHPT
+ // 5. shared_info
+ GET_THIS_PADDR(r2, inserted_shared_info);;
+ ld8 r16=[r2]
+ mov r18=XSI_SHIFT<<2
+ ;;
+ ptr.d r16,r18
+ ;;
+ srlz.d
+ ;;
+
+ // 6. mapped_regs
+ GET_THIS_PADDR(r2, inserted_mapped_regs);;
+ ld8 r16=[r2]
+ mov r18=XMAPPEDREGS_SHIFT<<2
+ ;;
+ ptr.d r16,r18
+ ;;
+ srlz.d
+ ;;
+
+ // 7. VPD
+ // The VPD will not be mapped in the case where
+ // a VMX domain hasn't been started since boot
+ GET_THIS_PADDR(r2, inserted_vpd);;
+ ld8 r16=[r2]
+ mov r18=XMAPPEDREGS_SHIFT<<2
+ ;;
+ cmp.eq p7,p0=r2,r0
+ ;;
+(p7) br.cond.sptk .vpd_not_mapped
+ ;;
+ ptr.i r16,r18
+ ;;
+ srlz.i
+ ;;
+.vpd_not_mapped:
+
+ // 8. VHPT
// GET_VA_VCPU_VHPT_MADDR() may not give the
// value of the VHPT currently pinned into the TLB
GET_THIS_PADDR(r2, inserted_vhpt);;
#ifdef XEN
.reload_vhpt:
// 5. VHPT
-#if VHPT_ENABLED
- GET_VA_VCPU_VHPT_MADDR(r2,r3);;
+ GET_THIS_PADDR(r1, inserted_vhpt);;
+ cmp.eq p7,p0=r2,r0
+(p7) br.cond.sptk .overlap_vhpt // vhpt isn't mapped.
// avoid overlapping with stack TR
shr.u r17=r2,IA64_GRANULE_SHIFT
srlz.d
;;
.overlap_vhpt:
-#endif
#endif
br.sptk.many done_tlb_purge_and_reload
err:
void *pal_vaddr, void *shared_arch_info)
{
__get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
+ __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
+ __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
__vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
}
integer_param("dom_rid_bits", domain_rid_bits_default);
DEFINE_PER_CPU(unsigned long, inserted_vhpt);
+DEFINE_PER_CPU(unsigned long, inserted_shared_info);
+DEFINE_PER_CPU(unsigned long, inserted_mapped_regs);
+DEFINE_PER_CPU(unsigned long, inserted_vpd);
#if 0
// following already defined in include/asm-ia64/gcc_intrin.h
#if VHPT_ENABLED
__get_cpu_var(inserted_vhpt) = __va_ul(vcpu_vhpt_maddr(v));
#endif
+ __get_cpu_var(inserted_shared_info) =
+ v->domain->arch.shared_info_va;
+ __get_cpu_var(inserted_mapped_regs) =
+ v->domain->arch.shared_info_va +
+ XMAPPEDREGS_OFS;
ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
v->arch.privregs, v->domain->arch.shared_info_va,
__va_ul(vcpu_vhpt_maddr(v)));
#define RR_RID_MASK 0x00000000ffffff00L
DECLARE_PER_CPU(unsigned long, inserted_vhpt);
+DECLARE_PER_CPU(unsigned long, inserted_shared_info);
+DECLARE_PER_CPU(unsigned long, inserted_mapped_regs);
+DECLARE_PER_CPU(unsigned long, inserted_vpd);
int set_one_rr(unsigned long rr, unsigned long val);