#include <asm/regs.h>
#include <asm/types.h>
#include <asm/apic.h>
+#include <asm/traps.h>
#include <asm/msr.h>
#include <asm/msr-index.h>
#include <asm/hvm/support.h>
rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, fixed_counters[i]);
for ( i = 0; i < arch_pmc_cnt; i++ )
rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter);
+
+ if ( !has_hvm_container_vcpu(v) )
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
}
static int core2_vpmu_save(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ if ( !has_hvm_container_vcpu(v) )
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
return 0;
wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, core2_vpmu_cxt->fixed_ctrl);
wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area);
wrmsrl(MSR_IA32_PEBS_ENABLE, core2_vpmu_cxt->pebs_enable);
+
+ if ( !has_hvm_container_vcpu(v) )
+ {
+ wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl);
+ core2_vpmu_cxt->global_ovf_ctrl = 0;
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
+ }
}
static void core2_vpmu_load(struct vcpu *v)
static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
uint64_t supported)
{
- u64 global_ctrl;
int i, tmp;
int type = -1, index = -1;
struct vcpu *v = current;
switch ( msr )
{
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ if ( msr_content & ~(0xC000000000000000 |
+ (((1ULL << fixed_pmc_cnt) - 1) << 32) |
+ ((1ULL << arch_pmc_cnt) - 1)) )
+ return -EINVAL;
core2_vpmu_cxt->global_status &= ~msr_content;
+ wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, msr_content);
return 0;
case MSR_CORE_PERF_GLOBAL_STATUS:
gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n");
return 0;
case MSR_CORE_PERF_GLOBAL_CTRL:
- global_ctrl = msr_content;
+ core2_vpmu_cxt->global_ctrl = msr_content;
break;
case MSR_CORE_PERF_FIXED_CTR_CTRL:
if ( msr_content &
( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) )
return -EINVAL;
- vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+ if ( has_hvm_container_vcpu(v) )
+ vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
+ &core2_vpmu_cxt->global_ctrl);
+ else
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
*enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32);
if ( msr_content != 0 )
{
if ( msr_content & (~((1ull << 32) - 1)) )
return -EINVAL;
- vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
+ if ( has_hvm_container_vcpu(v) )
+ vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
+ &core2_vpmu_cxt->global_ctrl);
+ else
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
if ( msr_content & (1ULL << 22) )
*enabled_cntrs |= 1ULL << tmp;
if ( type != MSR_TYPE_GLOBAL )
wrmsrl(msr, msr_content);
else
- vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ {
+ if ( has_hvm_container_vcpu(v) )
+ vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ else
+ wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ }
- if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) )
+ if ( (core2_vpmu_cxt->global_ctrl & *enabled_cntrs) ||
+ (core2_vpmu_cxt->ds_area != 0) )
vpmu_set(vpmu, VPMU_RUNNING);
else
vpmu_reset(vpmu, VPMU_RUNNING);
*msr_content = core2_vpmu_cxt->global_status;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
- vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ if ( has_hvm_container_vcpu(v) )
+ vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
+ else
+ rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
break;
default:
rdmsrl(msr, *msr_content);
#include <asm/apic.h>
#include <asm/mc146818rtc.h>
#include <asm/hpet.h>
+#include <asm/hvm/vpmu.h>
#include <public/arch-x86/cpuid.h>
#include <xsm/xsm.h>
__clear_bit(X86_FEATURE_TOPOEXT % 32, &c);
break;
+ case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+ break;
+
case 0x00000005: /* MONITOR/MWAIT */
- case 0x0000000a: /* Architectural Performance Monitor Features */
case 0x0000000b: /* Extended Topology Enumeration */
case 0x8000000a: /* SVM revision and features */
case 0x8000001b: /* Instruction Based Sampling */
}
out:
+ /* VPMU may decide to modify some of the leaves */
+ vpmu_do_cpuid(regs->eax, &a, &b, &c, &d);
+
regs->eax = a;
regs->ebx = b;
regs->ecx = c;
char *io_emul_stub = NULL;
void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1)));
uint64_t val;
+ bool_t vpmu_msr;
if ( !read_descriptor(regs->cs, v, regs,
&code_base, &code_limit, &ar,
uint32_t eax = regs->eax;
uint32_t edx = regs->edx;
uint64_t msr_content = ((uint64_t)edx << 32) | eax;
-
+ vpmu_msr = 0;
switch ( regs->_ecx )
{
case MSR_FS_BASE:
if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK )
wrmsrl(regs->_ecx, msr_content);
break;
+ case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
+ case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
+ case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+ case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ {
+ vpmu_msr = 1;
+ case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
+ if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
+ {
+ if ( vpmu_do_wrmsr(regs->ecx, msr_content, 0) )
+ goto fail;
+ }
+ break;
+ }
+ /*FALLTHROUGH*/
default:
if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
break;
case 0x32: /* RDMSR */
+ vpmu_msr = 0;
switch ( regs->_ecx )
{
case MSR_FS_BASE:
[regs->_ecx - MSR_AMD64_DR1_ADDRESS_MASK + 1];
regs->edx = 0;
break;
+ case MSR_IA32_PERF_CAPABILITIES:
+ /* No extra capabilities are supported */
+ regs->eax = regs->edx = 0;
+ break;
+ case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
+ case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
+ case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+ case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ {
+ vpmu_msr = 1;
+ case MSR_AMD_FAM15H_EVNTSEL0...MSR_AMD_FAM15H_PERFCTR5:
+ if ( vpmu_msr || (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) )
+ {
+ if ( vpmu_do_rdmsr(regs->ecx, &val) )
+ goto fail;
+
+ regs->eax = (uint32_t)val;
+ regs->edx = (uint32_t)(val >> 32);
+ }
+ break;
+ }
+ /*FALLTHROUGH*/
default:
if ( rdmsr_hypervisor_regs(regs->ecx, &val) )