memset(&v->arch.user_regs, 0, sizeof(v->arch.user_regs));
v->arch.user_regs.eflags = X86_EFLAGS_MBS;
- memset(v->arch.debugreg, 0, sizeof(v->arch.debugreg));
- v->arch.debugreg[6] = X86_DR6_DEFAULT;
- v->arch.debugreg[7] = X86_DR7_DEFAULT;
+ memset(v->arch.dr, 0, sizeof(v->arch.dr));
+ v->arch.dr6 = X86_DR6_DEFAULT;
+ v->arch.dr7 = X86_DR7_DEFAULT;
}
int arch_vcpu_create(struct vcpu *v)
if ( is_hvm_domain(d) )
{
- for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
- v->arch.debugreg[i] = c(debugreg[i]);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.dr); ++i )
+ v->arch.dr[i] = c(debugreg[i]);
+ v->arch.dr6 = c(debugreg[6]);
+ v->arch.dr7 = c(debugreg[7]);
hvm_set_info_guest(v);
goto out;
v->arch.pv.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(v, cr4) :
real_cr4_to_pv_guest_cr4(mmu_cr4_features);
- memset(v->arch.debugreg, 0, sizeof(v->arch.debugreg));
- for ( i = 0; i < 8; i++ )
- (void)set_debugreg(v, i, c(debugreg[i]));
+ memset(v->arch.dr, 0, sizeof(v->arch.dr));
+ v->arch.dr6 = X86_DR6_DEFAULT;
+ v->arch.dr7 = X86_DR7_DEFAULT;
+ v->arch.pv.dr7_emul = 0;
+
+ for ( i = 0; i < ARRAY_SIZE(v->arch.dr); i++ )
+ set_debugreg(v, i, c(debugreg[i]));
+ set_debugreg(v, 6, c(debugreg[6]));
+ set_debugreg(v, 7, c(debugreg[7]));
if ( v->is_initialised )
goto out;
* inside Xen, before we get a chance to reload DR7, and this cannot always
* safely be handled.
*/
- if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
+ if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
write_debugreg(7, 0);
}
l4e_from_page(v->domain->arch.perdomain_l3_pg,
__PAGE_HYPERVISOR_RW);
- if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
+ if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
activate_debugregs(v);
if ( cpu_has_rdtscp )
}
}
- for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
- c(debugreg[i] = v->arch.debugreg[i]);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.dr); ++i )
+ c(debugreg[i] = v->arch.dr[i]);
+ c(debugreg[6] = v->arch.dr6);
+ c(debugreg[7] = v->arch.dr7 |
+ (is_pv_domain(d) ? v->arch.pv.dr7_emul : 0));
if ( is_hvm_domain(d) )
{
c.nat->ctrlreg[1] =
pagetable_is_null(v->arch.guest_table_user) ? 0
: xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
-
- /* Merge shadow DR7 bits into real DR7. */
- c.nat->debugreg[7] |= c.nat->debugreg[5];
- c.nat->debugreg[5] = 0;
}
else
{
c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
unmap_domain_page(l4e);
-
- /* Merge shadow DR7 bits into real DR7. */
- c.cmp->debugreg[7] |= c.cmp->debugreg[5];
- c.cmp->debugreg[5] = 0;
}
if ( guest_kernel_mode(v, &v->arch.user_regs) )
.cr2 = v->arch.hvm.guest_cr[2],
.cr3 = v->arch.hvm.guest_cr[3],
.cr4 = v->arch.hvm.guest_cr[4],
- .dr0 = v->arch.debugreg[0],
- .dr1 = v->arch.debugreg[1],
- .dr2 = v->arch.debugreg[2],
- .dr3 = v->arch.debugreg[3],
- .dr6 = v->arch.debugreg[6],
- .dr7 = v->arch.debugreg[7],
+ .dr0 = v->arch.dr[0],
+ .dr1 = v->arch.dr[1],
+ .dr2 = v->arch.dr[2],
+ .dr3 = v->arch.dr[3],
+ .dr6 = v->arch.dr6,
+ .dr7 = v->arch.dr7,
.msr_efer = v->arch.hvm.guest_efer,
};
v->arch.user_regs.r13 = ctxt.r13;
v->arch.user_regs.r14 = ctxt.r14;
v->arch.user_regs.r15 = ctxt.r15;
- v->arch.debugreg[0] = ctxt.dr0;
- v->arch.debugreg[1] = ctxt.dr1;
- v->arch.debugreg[2] = ctxt.dr2;
- v->arch.debugreg[3] = ctxt.dr3;
- v->arch.debugreg[6] = ctxt.dr6;
- v->arch.debugreg[7] = ctxt.dr7;
+ v->arch.dr[0] = ctxt.dr0;
+ v->arch.dr[1] = ctxt.dr1;
+ v->arch.dr[2] = ctxt.dr2;
+ v->arch.dr[3] = ctxt.dr3;
+ v->arch.dr6 = ctxt.dr6;
+ v->arch.dr7 = ctxt.dr7;
v->arch.vgc_flags = VGCF_online;
rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
}
- v->arch.debugreg[0] = read_debugreg(0);
- v->arch.debugreg[1] = read_debugreg(1);
- v->arch.debugreg[2] = read_debugreg(2);
- v->arch.debugreg[3] = read_debugreg(3);
- v->arch.debugreg[6] = vmcb_get_dr6(vmcb);
- v->arch.debugreg[7] = vmcb_get_dr7(vmcb);
+ v->arch.dr[0] = read_debugreg(0);
+ v->arch.dr[1] = read_debugreg(1);
+ v->arch.dr[2] = read_debugreg(2);
+ v->arch.dr[3] = read_debugreg(3);
+ v->arch.dr6 = vmcb_get_dr6(vmcb);
+ v->arch.dr7 = vmcb_get_dr7(vmcb);
}
static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v)
wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
}
- write_debugreg(0, v->arch.debugreg[0]);
- write_debugreg(1, v->arch.debugreg[1]);
- write_debugreg(2, v->arch.debugreg[2]);
- write_debugreg(3, v->arch.debugreg[3]);
- vmcb_set_dr6(vmcb, v->arch.debugreg[6]);
- vmcb_set_dr7(vmcb, v->arch.debugreg[7]);
+ write_debugreg(0, v->arch.dr[0]);
+ write_debugreg(1, v->arch.dr[1]);
+ write_debugreg(2, v->arch.dr[2]);
+ write_debugreg(3, v->arch.dr[3]);
+ vmcb_set_dr6(vmcb, v->arch.dr6);
+ vmcb_set_dr7(vmcb, v->arch.dr7);
}
/*
static void svm_restore_dr(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
+
+ if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
__restore_debug_registers(vmcb, v);
}
v->arch.hvm.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
vmx_update_cpu_exec_control(v);
- v->arch.debugreg[0] = read_debugreg(0);
- v->arch.debugreg[1] = read_debugreg(1);
- v->arch.debugreg[2] = read_debugreg(2);
- v->arch.debugreg[3] = read_debugreg(3);
- v->arch.debugreg[6] = read_debugreg(6);
+ v->arch.dr[0] = read_debugreg(0);
+ v->arch.dr[1] = read_debugreg(1);
+ v->arch.dr[2] = read_debugreg(2);
+ v->arch.dr[3] = read_debugreg(3);
+ v->arch.dr6 = read_debugreg(6);
/* DR7 must be saved as it is used by vmx_restore_dr(). */
- __vmread(GUEST_DR7, &v->arch.debugreg[7]);
+ __vmread(GUEST_DR7, &v->arch.dr7);
}
static void __restore_debug_registers(struct vcpu *v)
v->arch.hvm.flag_dr_dirty = 1;
- write_debugreg(0, v->arch.debugreg[0]);
- write_debugreg(1, v->arch.debugreg[1]);
- write_debugreg(2, v->arch.debugreg[2]);
- write_debugreg(3, v->arch.debugreg[3]);
- write_debugreg(6, v->arch.debugreg[6]);
+ write_debugreg(0, v->arch.dr[0]);
+ write_debugreg(1, v->arch.dr[1]);
+ write_debugreg(2, v->arch.dr[2]);
+ write_debugreg(3, v->arch.dr[3]);
+ write_debugreg(6, v->arch.dr6);
/* DR7 is loaded from the VMCS. */
}
static void vmx_restore_dr(struct vcpu *v)
{
/* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
- if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
+ if ( unlikely(v->arch.dr7 & DR7_ACTIVE_MASK) )
__restore_debug_registers(v);
}
vmx_vmcs_enter(v);
- __vmwrite(GUEST_DR7, v->arch.debugreg[7]);
+ __vmwrite(GUEST_DR7, v->arch.dr7);
/*
* If the interruptibility-state field indicates blocking by STI,
unsigned int width, i, match = 0;
unsigned long start;
- if ( !(v->arch.debugreg[5]) || !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) )
+ if ( !v->arch.pv.dr7_emul || !(v->arch.pv.ctrlreg[4] & X86_CR4_DE) )
return 0;
for ( i = 0; i < 4; i++ )
{
- if ( !(v->arch.debugreg[5] &
- (3 << (i * DR_ENABLE_SIZE))) )
+ if ( !(v->arch.pv.dr7_emul & (3 << (i * DR_ENABLE_SIZE))) )
continue;
- start = v->arch.debugreg[i];
+ start = v->arch.dr[i];
width = 0;
- switch ( (v->arch.debugreg[7] >>
+ switch ( (v->arch.dr7 >>
(DR_CONTROL_SHIFT + i * DR_CONTROL_SIZE)) & 0xc )
{
case DR_LEN_1: width = 1; break;
if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
break;
curr->arch.pv.dr_mask[0] = val;
- if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
+ if ( curr->arch.dr7 & DR7_ACTIVE_MASK )
wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val);
return X86EMUL_OKAY;
if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
break;
curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
- if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
+ if ( curr->arch.dr7 & DR7_ACTIVE_MASK )
wrmsrl(reg, val);
return X86EMUL_OKAY;
ctxt.bpmatch |= DR_STEP;
if ( ctxt.bpmatch )
{
- curr->arch.debugreg[6] |= ctxt.bpmatch | DR_STATUS_RESERVED_ONE;
+ curr->arch.dr6 |= ctxt.bpmatch | DR_STATUS_RESERVED_ONE;
if ( !(curr->arch.pv.trap_bounce.flags & TBF_EXCEPTION) )
pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
regs->eflags &= ~X86_EFLAGS_RF;
if ( regs->eflags & X86_EFLAGS_TF )
{
- current->arch.debugreg[6] |= DR_STEP | DR_STATUS_RESERVED_ONE;
+ current->arch.dr6 |= DR_STEP | DR_STATUS_RESERVED_ONE;
pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
}
}
/* Save debug status register where guest OS can peek at it */
- v->arch.debugreg[6] |= (dr6 & ~X86_DR6_DEFAULT);
- v->arch.debugreg[6] &= (dr6 | ~X86_DR6_DEFAULT);
+ v->arch.dr6 |= (dr6 & ~X86_DR6_DEFAULT);
+ v->arch.dr6 &= (dr6 | ~X86_DR6_DEFAULT);
pv_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
}
{
ASSERT(curr == current);
- write_debugreg(0, curr->arch.debugreg[0]);
- write_debugreg(1, curr->arch.debugreg[1]);
- write_debugreg(2, curr->arch.debugreg[2]);
- write_debugreg(3, curr->arch.debugreg[3]);
- write_debugreg(6, curr->arch.debugreg[6]);
+ write_debugreg(0, curr->arch.dr[0]);
+ write_debugreg(1, curr->arch.dr[1]);
+ write_debugreg(2, curr->arch.dr[2]);
+ write_debugreg(3, curr->arch.dr[3]);
+ write_debugreg(6, curr->arch.dr6);
/*
* Avoid writing the subsequently getting replaced value when getting
* called from set_debugreg() below. Eventual future callers will need
* to take this into account.
*/
- if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
- write_debugreg(7, curr->arch.debugreg[7]);
+ if ( curr->arch.dr7 & DR7_ACTIVE_MASK )
+ write_debugreg(7, curr->arch.dr7);
if ( boot_cpu_has(X86_FEATURE_DBEXT) )
{
if ( !access_ok(value, sizeof(long)) )
return -EPERM;
+ v->arch.dr[reg] = value;
if ( v == curr )
{
switch ( reg )
*/
value &= ~DR_STATUS_RESERVED_ZERO; /* reserved bits => 0 */
value |= DR_STATUS_RESERVED_ONE; /* reserved bits => 1 */
+
+ v->arch.dr6 = value;
if ( v == curr )
write_debugreg(6, value);
break;
}
}
- /* Guest DR5 is a handy stash for I/O intercept information. */
- v->arch.debugreg[5] = io_enable;
+ v->arch.pv.dr7_emul = io_enable;
value &= ~io_enable;
/*
* debug registers at this point as they were not restored during
* context switch. Updating DR7 itself happens later.
*/
- if ( (v == curr) &&
- !(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
+ if ( (v == curr) && !(v->arch.dr7 & DR7_ACTIVE_MASK) )
activate_debugregs(v);
}
else
/* Zero the emulated controls if %dr7 isn't active. */
- v->arch.debugreg[5] = 0;
+ v->arch.pv.dr7_emul = 0;
+ v->arch.dr7 = value;
if ( v == curr )
write_debugreg(7, value);
break;
return -ENODEV;
}
- v->arch.debugreg[reg] = value;
return 0;
}
req->data.regs.x86.rflags = regs->rflags;
req->data.regs.x86.rip = regs->rip;
- req->data.regs.x86.dr7 = curr->arch.debugreg[7];
+ req->data.regs.x86.dr7 = curr->arch.dr7;
req->data.regs.x86.cr0 = curr->arch.hvm.guest_cr[0];
req->data.regs.x86.cr2 = curr->arch.hvm.guest_cr[2];
req->data.regs.x86.cr3 = curr->arch.hvm.guest_cr[3];
switch ( reg )
{
case 0 ... 3:
- *val = curr->arch.debugreg[reg];
+ *val = curr->arch.dr[reg];
break;
case 4:
/* Fallthrough */
case 6:
- *val = curr->arch.debugreg[6];
+ *val = curr->arch.dr6;
break;
case 5:
/* Fallthrough */
case 7:
- *val = (curr->arch.debugreg[7] |
- curr->arch.debugreg[5]);
+ *val = curr->arch.dr7 | curr->arch.pv.dr7_emul;
break;
ud_fault:
spinlock_t shadow_ldt_lock;
#endif
+ /*
+ * %dr7 bits the guest has set, but aren't loaded into hardware, and are
+ * completely emulated.
+ */
+ uint32_t dr7_emul;
+
/* data breakpoint extension MSRs */
uint32_t dr_mask[4];
void *fpu_ctxt;
unsigned long vgc_flags;
struct cpu_user_regs user_regs;
- unsigned long debugreg[8];
+
+ /* Debug registers. */
+ unsigned long dr[4];
+ unsigned long dr7; /* Ideally int, but __vmread() needs long. */
+ unsigned int dr6;
/* other state */