return cpu_frequency_change((uint64_t)data);
}
-static bool allow_access_msr(unsigned int msr)
+static bool msr_read_allowed(unsigned int msr)
{
switch ( msr )
{
- /* MSR for CMT, refer to chapter 17.14 of Intel SDM. */
case MSR_IA32_CMT_EVTSEL:
case MSR_IA32_CMT_CTR:
+ return cpu_has_pqe;
+
case MSR_IA32_TSC:
return true;
}
+ if ( ppin_msr && msr == ppin_msr )
+ return true;
+
+ return false;
+}
+
+static bool msr_write_allowed(unsigned int msr)
+{
+ switch ( msr )
+ {
+ case MSR_IA32_CMT_EVTSEL:
+ case MSR_IA32_CMT_CTR:
+ return cpu_has_pqe;
+ }
+
return false;
}
switch ( entry->u.cmd )
{
case XEN_RESOURCE_OP_MSR_READ:
- if ( ppin_msr && entry->idx == ppin_msr )
- break;
- /* fall through */
+ if ( entry->idx >> 32 )
+ ret = -EINVAL;
+ else if ( !msr_read_allowed(entry->idx) )
+ ret = -EPERM;
+ break;
+
case XEN_RESOURCE_OP_MSR_WRITE:
if ( entry->idx >> 32 )
ret = -EINVAL;
- else if ( !allow_access_msr(entry->idx) )
- ret = -EACCES;
+ else if ( !msr_write_allowed(entry->idx) )
+ ret = -EPERM;
break;
+
default:
ret = -EOPNOTSUPP;
break;
}
}
break;
+
case XEN_RESOURCE_OP_MSR_WRITE:
- if ( unlikely(entry->idx == MSR_IA32_TSC) )
- ret = -EPERM;
- else
- ret = wrmsr_safe(entry->idx, entry->val);
+ ret = wrmsr_safe(entry->idx, entry->val);
break;
+
default:
BUG();
break;
struct cpuid_leaf regs;
uint32_t feat_mask;
- if ( !psr_alloc_feat_enabled() || !boot_cpu_has(X86_FEATURE_PQE) )
+ if ( !psr_alloc_feat_enabled() || !cpu_has_pqe )
goto assoc_init;
if ( boot_cpu_data.cpuid_level < PSR_CPUID_LEVEL_CAT )
#define cpu_has_bmi2 boot_cpu_has(X86_FEATURE_BMI2)
#define cpu_has_invpcid boot_cpu_has(X86_FEATURE_INVPCID)
#define cpu_has_rtm boot_cpu_has(X86_FEATURE_RTM)
+#define cpu_has_pqe boot_cpu_has(X86_FEATURE_PQE)
#define cpu_has_fpu_sel (!boot_cpu_has(X86_FEATURE_NO_FPU_SEL))
#define cpu_has_mpx boot_cpu_has(X86_FEATURE_MPX)
#define cpu_has_avx512f boot_cpu_has(X86_FEATURE_AVX512F)