x86: Consolidate the storage of MSR_AMD64_DR{0-3}_ADDRESS_MASK
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 19 Oct 2018 15:14:22 +0000 (16:14 +0100)
committerWei Liu <wei.liu2@citrix.com>
Thu, 1 Nov 2018 10:15:10 +0000 (10:15 +0000)
The PV and HVM code both have a copy of these, which gives the false
impression in the context switch code that they are PV/HVM specific.

Move the storage into struct vcpu_msrs, and update all users to match.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/domctl.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/pv/emul-priv-op.c
xen/arch/x86/traps.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/hvm/svm/vmcb.h
xen/include/asm-x86/msr.h

index cc85395e96f97e80d291d5789f2284c7e86b4ec3..f79827e6e4dcac443d29a9f4f7d0496a3a79545c 100644 (file)
@@ -1328,12 +1328,12 @@ long arch_do_domctl(
 
                 if ( boot_cpu_has(X86_FEATURE_DBEXT) )
                 {
-                    if ( v->arch.pv.dr_mask[0] )
+                    if ( v->arch.msrs->dr_mask[0] )
                     {
                         if ( i < vmsrs->msr_count && !ret )
                         {
                             msr.index = MSR_AMD64_DR0_ADDRESS_MASK;
-                            msr.value = v->arch.pv.dr_mask[0];
+                            msr.value = v->arch.msrs->dr_mask[0];
                             if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
                                 ret = -EFAULT;
                         }
@@ -1342,12 +1342,12 @@ long arch_do_domctl(
 
                     for ( j = 0; j < 3; ++j )
                     {
-                        if ( !v->arch.pv.dr_mask[1 + j] )
+                        if ( !v->arch.msrs->dr_mask[1 + j] )
                             continue;
                         if ( i < vmsrs->msr_count && !ret )
                         {
                             msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j;
-                            msr.value = v->arch.pv.dr_mask[1 + j];
+                            msr.value = v->arch.msrs->dr_mask[1 + j];
                             if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
                                 ret = -EFAULT;
                         }
@@ -1392,7 +1392,7 @@ long arch_do_domctl(
                     if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
                          (msr.value >> 32) )
                         break;
-                    v->arch.pv.dr_mask[0] = msr.value;
+                    v->arch.msrs->dr_mask[0] = msr.value;
                     continue;
 
                 case MSR_AMD64_DR1_ADDRESS_MASK ...
@@ -1401,7 +1401,7 @@ long arch_do_domctl(
                          (msr.value >> 32) )
                         break;
                     msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1;
-                    v->arch.pv.dr_mask[msr.index] = msr.value;
+                    v->arch.msrs->dr_mask[msr.index] = msr.value;
                     continue;
                 }
                 break;
index 41427e7b9b2f90adac7c23486b302b7257f07984..be48ca72c54dca980d40eee1e08afd31deef7850 100644 (file)
@@ -210,10 +210,10 @@ static void svm_save_dr(struct vcpu *v)
         svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_RW);
         svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_RW);
 
-        rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
-        rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
-        rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
-        rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
+        rdmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]);
+        rdmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]);
+        rdmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.msrs->dr_mask[2]);
+        rdmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.msrs->dr_mask[3]);
     }
 
     v->arch.dr[0] = read_debugreg(0);
@@ -241,10 +241,10 @@ static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v)
         svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
         svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
 
-        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[0]);
-        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[1]);
-        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[2]);
-        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.hvm.svm.dr_mask[3]);
+        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]);
+        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]);
+        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.msrs->dr_mask[2]);
+        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.msrs->dr_mask[3]);
     }
 
     write_debugreg(0, v->arch.dr[0]);
@@ -422,19 +422,19 @@ static void svm_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
 {
     if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[0];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[0];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR0_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[1];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[1];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR1_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[2];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[2];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR2_ADDRESS_MASK;
 
-        ctxt->msr[ctxt->count].val = v->arch.hvm.svm.dr_mask[3];
+        ctxt->msr[ctxt->count].val = v->arch.msrs->dr_mask[3];
         if ( ctxt->msr[ctxt->count].val )
             ctxt->msr[ctxt->count++].index = MSR_AMD64_DR3_ADDRESS_MASK;
     }
@@ -455,7 +455,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
             else if ( ctxt->msr[i].val >> 32 )
                 err = -EDOM;
             else
-                v->arch.hvm.svm.dr_mask[0] = ctxt->msr[i].val;
+                v->arch.msrs->dr_mask[0] = ctxt->msr[i].val;
             break;
 
         case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
@@ -464,7 +464,7 @@ static int svm_load_msr(struct vcpu *v, struct hvm_msr *ctxt)
             else if ( ctxt->msr[i].val >> 32 )
                 err = -EDOM;
             else
-                v->arch.hvm.svm.dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+                v->arch.msrs->dr_mask[idx - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
                     ctxt->msr[i].val;
             break;
 
@@ -2079,14 +2079,14 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext )
             goto gpf;
-        *msr_content = v->arch.hvm.svm.dr_mask[0];
+        *msr_content = v->arch.msrs->dr_mask[0];
         break;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext )
             goto gpf;
         *msr_content =
-            v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+            v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1];
         break;
 
     case MSR_AMD_OSVW_ID_LENGTH:
@@ -2277,13 +2277,13 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
             goto gpf;
-        v->arch.hvm.svm.dr_mask[0] = msr_content;
+        v->arch.msrs->dr_mask[0] = msr_content;
         break;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !v->domain->arch.cpuid->extd.dbext || (msr_content >> 32) )
             goto gpf;
-        v->arch.hvm.svm.dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
+        v->arch.msrs->dr_mask[msr - MSR_AMD64_DR1_ADDRESS_MASK + 1] =
             msr_content;
         break;
 
index aecf517cf0e8a9af1555cdf1adc4e5c0d69bca98..f73ea4a163dca14e2ff4c6f063a3fb302f65341d 100644 (file)
@@ -916,13 +916,13 @@ static int read_msr(unsigned int reg, uint64_t *val,
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
             break;
-        *val = curr->arch.pv.dr_mask[0];
+        *val = curr->arch.msrs->dr_mask[0];
         return X86EMUL_OKAY;
 
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
             break;
-        *val = curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+        *val = curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1];
         return X86EMUL_OKAY;
 
     case MSR_IA32_PERF_CAPABILITIES:
@@ -1110,7 +1110,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_AMD64_DR0_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
             break;
-        curr->arch.pv.dr_mask[0] = val;
+        curr->arch.msrs->dr_mask[0] = val;
         if ( curr->arch.dr7 & DR7_ACTIVE_MASK )
             wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, val);
         return X86EMUL_OKAY;
@@ -1118,7 +1118,7 @@ static int write_msr(unsigned int reg, uint64_t val,
     case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
         if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (val >> 32) )
             break;
-        curr->arch.pv.dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
+        curr->arch.msrs->dr_mask[reg - MSR_AMD64_DR1_ADDRESS_MASK + 1] = val;
         if ( curr->arch.dr7 & DR7_ACTIVE_MASK )
             wrmsrl(reg, val);
         return X86EMUL_OKAY;
index 9b532199a87e192f694f748057c42655be458f75..c60c8f5c2af6e0efed93aab50d3c1c9989818949 100644 (file)
@@ -2071,10 +2071,10 @@ void activate_debugregs(const struct vcpu *curr)
 
     if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
-        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv.dr_mask[0]);
-        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv.dr_mask[1]);
-        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv.dr_mask[2]);
-        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv.dr_mask[3]);
+        wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.msrs->dr_mask[0]);
+        wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.msrs->dr_mask[1]);
+        wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.msrs->dr_mask[2]);
+        wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.msrs->dr_mask[3]);
     }
 }
 
index 1a88cac08314b7038516a8748708f549d65f947d..721403782045eed5646b4a511e478db31139df54 100644 (file)
@@ -553,9 +553,6 @@ struct pv_vcpu
      */
     uint32_t dr7_emul;
 
-    /* data breakpoint extension MSRs */
-    uint32_t dr_mask[4];
-
     /* Deferred VA-based update state. */
     bool_t need_update_runstate_area;
     struct vcpu_time_info pending_system_time;
index 48aed78292dbb6a651e60f8e8cc4165a63e232b4..70177059e74c13eedd5b9a4b17d3fba75ac3fe9a 100644 (file)
@@ -538,9 +538,6 @@ struct svm_vcpu {
     /* AMD lightweight profiling MSR */
     uint64_t guest_lwp_cfg;      /* guest version */
     uint64_t cpu_lwp_cfg;        /* CPU version */
-
-    /* data breakpoint extension MSRs */
-    uint32_t dr_mask[4];
 };
 
 struct vmcb_struct *alloc_vmcb(void);
index 7a061b28e8dfa8bd61a253c8fd79b246a496fb4a..c1cb38fab3eb8a4d0e1eea15ff50f1606af09b06 100644 (file)
@@ -287,6 +287,12 @@ struct vcpu_msrs
             bool cpuid_faulting:1;
         };
     } misc_features_enables;
+
+    /*
+     * 0xc00110{27,19-1b} MSR_AMD64_DR{0-3}_ADDRESS_MASK
+     * TODO: Not yet handled by guest_{rd,wr}msr() infrastructure.
+     */
+    uint32_t dr_mask[4];
 };
 
 void init_guest_msr_policy(void);