pt_isr.ir = 0;
VMX(vcpu,cr_isr) = pt_isr.val;
collect_interruption(vcpu);
-
+ vmx_ia64_set_dcr(vcpu);
vmx_vcpu_get_iva(vcpu,&viva);
regs->cr_iip = viva + vec;
}
(void *)vcpu->arch.privregs,
(void *)vcpu->arch.vhpt.hash, pal_vaddr );
ia64_set_pta(VMX(vcpu, mpta));
- ia64_set_dcr(VMX(vcpu, mdcr));
+ vmx_ia64_set_dcr(vcpu);
ia64_srlz_d();
ia64_set_psr(psr);
unsigned long guest_psr_index = 0;
#endif
+
+void
+vmx_ia64_set_dcr(VCPU *v)
+{
+ unsigned long dcr_bits = IA64_DEFAULT_DCR_BITS;
+
+ // if guest is runing on cpl > 0, set dcr.dm=1
+ // if geust is runing on cpl = 0, set dcr.dm=0
+ // because Guest OS may ld.s on tr mapped page.
+ if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
+ dcr_bits &= ~IA64_DCR_DM;
+
+ ia64_set_dcr(dcr_bits);
+}
+
+
void
vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
{
else
vcpu_bsw0(vcpu);
vmx_vcpu_set_psr(vcpu,psr);
+ vmx_ia64_set_dcr(vcpu);
ifs=VCPU(vcpu,ifs);
if(ifs>>63)
regs->cr_ifs = ifs;
#endif //CHECK_FAULT
r2 = cr_igfld_mask(inst.M32.cr3,r2);
switch (inst.M32.cr3) {
- case 0: return vmx_vcpu_set_dcr(vcpu,r2);
+ case 0: return vcpu_set_dcr(vcpu,r2);
case 1: return vmx_vcpu_set_itm(vcpu,r2);
case 2: return vmx_vcpu_set_iva(vcpu,r2);
case 8: return vmx_vcpu_set_pta(vcpu,r2);
// from_cr_cnt[inst.M33.cr3]++;
switch (inst.M33.cr3) {
- case 0: return vmx_cr_get(dcr);
+ case 0: return cr_get(dcr);
case 1: return vmx_cr_get(itm);
case 2: return vmx_cr_get(iva);
case 8: return vmx_cr_get(pta);
#include <asm/vmx_vpd.h>
#include <asm/vmx_phy_mode.h>
#include <asm/vhpt.h>
+#include <asm/vcpu.h>
#include <asm/tlbflush.h>
#include <asm/regionreg.h>
#include <asm/dom_fw.h>
if (!VMX_DOMAIN(next)) {
/* VMX domains can change the physical cr.dcr.
* Restore default to prevent leakage. */
- ia64_setreg(_IA64_REG_CR_DCR, (IA64_DCR_DP | IA64_DCR_DK
- | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_PP
- | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
+ ia64_setreg(_IA64_REG_CR_DCR, IA64_DEFAULT_DCR_BITS);
}
}
if (VMX_DOMAIN(next))
er->dtrs[i].rid = v->arch.dtrs[i].rid;
}
er->event_callback_ip = v->arch.event_callback_ip;
- er->dcr = v->arch.dcr;
+ er->dcr = PSCB(v,dcr);
er->iva = v->arch.iva;
}
er->dtrs[i].rid);
}
v->arch.event_callback_ip = er->event_callback_ip;
- v->arch.dcr = er->dcr;
+ PSCB(v,dcr) = er->dcr;
v->arch.iva = er->iva;
}
u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr)
{
- u64 dcr = PSCBX(vcpu, dcr);
+ u64 dcr = PSCB(vcpu, dcr);
PSR psr;
//printk("*** vcpu_get_ipsr_int_state (0x%016lx)...\n",prevpsr);
IA64FAULT vcpu_get_dcr(VCPU * vcpu, u64 * pval)
{
-//verbose("vcpu_get_dcr: called @%p\n",PSCB(vcpu,iip));
- // Reads of cr.dcr on Xen always have the sign bit set, so
- // a domain can differentiate whether it is running on SP or not
- *pval = PSCBX(vcpu, dcr) | 0x8000000000000000L;
+ *pval = PSCB(vcpu, dcr);
return IA64_NO_FAULT;
}
IA64FAULT vcpu_set_dcr(VCPU * vcpu, u64 val)
{
- // Reads of cr.dcr on SP always have the sign bit set, so
- // a domain can differentiate whether it is running on SP or not
- // Thus, writes of DCR should ignore the sign bit
-//verbose("vcpu_set_dcr: called\n");
- PSCBX(vcpu, dcr) = val & ~0x8000000000000000L;
+ PSCB(vcpu, dcr) = val;
return IA64_NO_FAULT;
}
unsigned long irr[4]; /* Interrupt request register. */
unsigned long insvc[4]; /* Interrupt in service. */
unsigned long iva;
- unsigned long dcr;
unsigned long domain_itm;
unsigned long domain_itm_last;
extern void data_page_not_present(VCPU * vcpu, u64 vadr);
extern void inst_page_not_present(VCPU * vcpu, u64 vadr);
extern void data_access_rights(VCPU * vcpu, u64 vadr);
+extern void vmx_ia64_set_dcr(VCPU * v);
/**************************************************************************
VCPU control register access routines
**************************************************************************/
-static inline IA64FAULT vmx_vcpu_get_dcr(VCPU * vcpu, u64 * pval)
-{
- *pval = VCPU(vcpu, dcr);
- return IA64_NO_FAULT;
-}
-
static inline IA64FAULT vmx_vcpu_get_itm(VCPU * vcpu, u64 * pval)
{
*pval = VCPU(vcpu, itm);
return IA64_NO_FAULT;
}
-static inline IA64FAULT vmx_vcpu_set_dcr(VCPU * vcpu, u64 val)
-{
- u64 mdcr, mask;
- VCPU(vcpu, dcr) = val;
- /* All vDCR bits will go to mDCR, except for be/pp/dm bits */
- mdcr = ia64_get_dcr();
- /* Machine dcr.dm masked to handle guest ld.s on tr mapped page */
- mask = IA64_DCR_BE | IA64_DCR_PP | IA64_DCR_DM;
- mdcr = (mdcr & mask) | (val & (~mask));
- ia64_set_dcr(mdcr);
- VMX(vcpu, mdcr) = mdcr;
- return IA64_NO_FAULT;
-}
-
static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
{
vtm_set_itm(vcpu, val);
unsigned long cr_isr; /* for emulation */
unsigned long cause;
unsigned long opcode;
-
-// unsigned long mrr5;
-// unsigned long mrr6;
-// unsigned long mrr7;
- unsigned long mdcr;
unsigned long mpta;
-// unsigned long rfi_pfs;
-// unsigned long rfi_iip;
-// unsigned long rfi_ipsr;
-// unsigned long rfi_ifs;
unsigned long flags;
unsigned long xen_port;
unsigned char xtp;
#define IA64_PSR_VM_BIT 46
#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
+#define IA64_DEFAULT_DCR_BITS (IA64_DCR_PP | IA64_DCR_LC | IA64_DCR_DM | \
+ IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | \
+ IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD)
+
/* Interruption Function State */
#define IA64_IFS_V_BIT 63
#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)