# define read_sreg(regs, sr) read_segment_register(sr)
#endif
+static int is_cpufreq_controller(struct domain *d)
+{
+ return ((cpufreq_controller == FREQCTL_dom0_kernel) &&
+ (d->domain_id == 0));
+}
+
static int emulate_privileged_op(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
case MSR_K8_PSTATE7:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
goto fail;
- if ( cpufreq_controller != FREQCTL_dom0_kernel )
+ if ( !is_cpufreq_controller(v->domain) )
break;
if ( wrmsr_safe(regs->ecx, eax, edx) != 0 )
goto fail;
case MSR_IA32_MPERF:
case MSR_IA32_APERF:
case MSR_IA32_PERF_CTL:
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
- goto fail;
- if ( cpufreq_controller != FREQCTL_dom0_kernel )
- break;
- if ( wrmsr_safe(regs->ecx, eax, edx) != 0 )
- goto fail;
- break;
case MSR_IA32_THERM_CONTROL:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
goto fail;
+ if ( !is_cpufreq_controller(v->domain) )
+ break;
if ( wrmsr_safe(regs->ecx, eax, edx) != 0 )
goto fail;
break;
case MSR_K8_PSTATE7:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
goto fail;
- if ( cpufreq_controller != FREQCTL_dom0_kernel )
+ if ( !is_cpufreq_controller(v->domain) )
{
regs->eax = regs->edx = 0;
break;
MSR_IA32_MISC_ENABLE_XTPR_DISABLE;
break;
case MSR_EFER:
- case MSR_IA32_THERM_CONTROL:
case MSR_AMD_PATCHLEVEL:
default:
if ( rdmsr_hypervisor_regs(regs->ecx, &l, &h) )