#endif
}
-static void cf_check vpmu_save_force(void *arg)
+#ifdef CONFIG_MEM_SHARING
+int vpmu_allocate_context(struct vcpu *v)
+{
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+ if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
+ return 0;
+
+ return alternative_call(vpmu_ops.allocate_context, v) ? 0 : -ENOMEM;
+}
+#endif
+
+void cf_check vpmu_save_force(void *arg)
{
struct vcpu *v = arg;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
return 0;
}
+#ifdef CONFIG_MEM_SHARING
+static int cf_check amd_allocate_context(struct vcpu *v)
+{
+ ASSERT_UNREACHABLE();
+ return 0;
+}
+#endif
+
static const struct arch_vpmu_ops __initconst_cf_clobber amd_vpmu_ops = {
.initialise = svm_vpmu_initialise,
.do_wrmsr = amd_vpmu_do_wrmsr,
.arch_vpmu_save = amd_vpmu_save,
.arch_vpmu_load = amd_vpmu_load,
.arch_vpmu_dump = amd_vpmu_dump,
+
+#ifdef CONFIG_MEM_SHARING
+ .allocate_context = amd_allocate_context,
+#endif
};
static const struct arch_vpmu_ops *__init common_init(void)
for ( i = 0; i < fixed_pmc_cnt; i++ )
rdmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, fixed_counters[i]);
for ( i = 0; i < arch_pmc_cnt; i++ )
+ {
rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter);
+ rdmsrl(MSR_P6_EVNTSEL(i), xen_pmu_cntr_pair[i].control);
+ }
if ( !is_hvm_vcpu(v) )
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
+ /* Save MSR to private context to make it fork-friendly */
+ else if ( mem_sharing_enabled(v->domain) )
+ vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
+ &core2_vpmu_cxt->global_ctrl);
}
static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest)
core2_vpmu_cxt->global_ovf_ctrl = 0;
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, core2_vpmu_cxt->global_ctrl);
}
+ /* Restore MSR from context when used with a fork */
+ else if ( mem_sharing_is_fork(v->domain) )
+ vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL,
+ core2_vpmu_cxt->global_ctrl);
}
static int core2_vpmu_verify(struct vcpu *v)
return 0;
}
-static int core2_vpmu_alloc_resource(struct vcpu *v)
+static int cf_check core2_vpmu_alloc_resource(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct xen_pmu_intel_ctxt *core2_vpmu_cxt = NULL;
goto out_err;
}
- core2_vpmu_cxt = xzalloc_flex_struct(struct xen_pmu_intel_ctxt, regs,
- fixed_pmc_cnt + arch_pmc_cnt *
- (sizeof(struct xen_pmu_cntr_pair) /
- sizeof(*core2_vpmu_cxt->regs)));
+ vpmu->priv_context_size = sizeof(uint64_t);
+ vpmu->context_size = sizeof(struct xen_pmu_intel_ctxt) +
+ fixed_pmc_cnt * sizeof(uint64_t) +
+ arch_pmc_cnt * sizeof(struct xen_pmu_cntr_pair);
+ /* Calculate and add the padding for alignment */
+ vpmu->context_size += vpmu->context_size %
+ sizeof(struct xen_pmu_intel_ctxt);
+
+ core2_vpmu_cxt = _xzalloc(vpmu->context_size,
+ sizeof(struct xen_pmu_intel_ctxt));
p = xzalloc(uint64_t);
+
if ( !core2_vpmu_cxt || !p )
goto out_err;
.arch_vpmu_save = core2_vpmu_save,
.arch_vpmu_load = core2_vpmu_load,
.arch_vpmu_dump = core2_vpmu_dump,
+
+#ifdef CONFIG_MEM_SHARING
+ .allocate_context = core2_vpmu_alloc_resource,
+#endif
};
const struct arch_vpmu_ops *__init core2_vpmu_init(void)
int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
void (*arch_vpmu_dump)(const struct vcpu *);
+
+#ifdef CONFIG_MEM_SHARING
+ int (*allocate_context)(struct vcpu *v);
+#endif
};
const struct arch_vpmu_ops *core2_vpmu_init(void);
u32 hw_lapic_lvtpc;
void *context; /* May be shared with PV guest */
void *priv_context; /* hypervisor-only */
+ size_t context_size;
+ size_t priv_context_size;
struct xen_pmu_data *xenpmu_data;
spinlock_t vpmu_lock;
};
void vpmu_initialise(struct vcpu *v);
void vpmu_destroy(struct vcpu *v);
void vpmu_save(struct vcpu *v);
+void vpmu_save_force(void *arg);
int vpmu_load(struct vcpu *v, bool_t from_guest);
void vpmu_dump(struct vcpu *v);
vpmu_load(next, 0);
}
+#ifdef CONFIG_MEM_SHARING
+int vpmu_allocate_context(struct vcpu *v);
+#else
+static inline int vpmu_allocate_context(struct vcpu *v)
+{
+ ASSERT_UNREACHABLE();
+ return 0;
+}
+#endif
+
#endif /* __ASM_X86_HVM_VPMU_H_*/
hvm_set_nonreg_state(cd_vcpu, &nrs);
}
+static int copy_vpmu(struct vcpu *d_vcpu, struct vcpu *cd_vcpu)
+{
+ struct vpmu_struct *d_vpmu = vcpu_vpmu(d_vcpu);
+ struct vpmu_struct *cd_vpmu = vcpu_vpmu(cd_vcpu);
+ int ret;
+
+ if ( !vpmu_are_all_set(d_vpmu, VPMU_INITIALIZED | VPMU_CONTEXT_ALLOCATED) )
+ return 0;
+ if ( (ret = vpmu_allocate_context(cd_vcpu)) )
+ return ret;
+
+ /*
+ * The VPMU subsystem only saves the context when the CPU does a context
+ * switch. Otherwise, the relevant MSRs are not saved on vmexit.
+ * We force a save here in case the parent CPU context is still loaded.
+ */
+ if ( vpmu_is_set(d_vpmu, VPMU_CONTEXT_LOADED) )
+ {
+ unsigned int pcpu = smp_processor_id();
+
+ if ( d_vpmu->last_pcpu != pcpu )
+ {
+ on_selected_cpus(cpumask_of(d_vpmu->last_pcpu),
+ vpmu_save_force, d_vcpu, 1);
+ vpmu_reset(d_vpmu, VPMU_CONTEXT_LOADED);
+ }
+ else
+ vpmu_save(d_vcpu);
+ }
+
+ if ( vpmu_is_set(d_vpmu, VPMU_RUNNING) )
+ vpmu_set(cd_vpmu, VPMU_RUNNING);
+
+ /* Make sure context gets (re-)loaded when scheduled next */
+ vpmu_reset(cd_vpmu, VPMU_CONTEXT_LOADED);
+
+ memcpy(cd_vpmu->context, d_vpmu->context, d_vpmu->context_size);
+ memcpy(cd_vpmu->priv_context, d_vpmu->priv_context,
+ d_vpmu->priv_context_size);
+
+ return 0;
+}
+
static int copy_vcpu_settings(struct domain *cd, const struct domain *d)
{
unsigned int i;
copy_domain_page(new_vcpu_info_mfn, vcpu_info_mfn);
}
+ ret = copy_vpmu(d_vcpu, cd_vcpu);
+ if ( ret )
+ return ret;
+
hvm_vmtrace_reset(cd_vcpu);
copy_vcpu_nonreg_state(d_vcpu, cd_vcpu);