return mcheck_amd_famXX;
}
+
+/* amd specific MCA MSR */
+int vmce_amd_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+{
+ switch (msr) {
+ case MSR_F10_MC4_MISC1:
+ case MSR_F10_MC4_MISC2:
+ case MSR_F10_MC4_MISC3:
+ break;
+ }
+
+ return 1;
+}
+
+int vmce_amd_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+{
+ switch (msr) {
+ case MSR_F10_MC4_MISC1:
+ case MSR_F10_MC4_MISC2:
+ case MSR_F10_MC4_MISC3:
+ break;
+ }
+
+ return 1;
+}
#include <asm/msr.h>
#include "mce.h"
+#include "vmce.h"
static struct timer mce_timer;
#include "mce.h"
#include "barrier.h"
#include "util.h"
+#include "vmce.h"
bool_t __read_mostly mce_disabled;
invbool_param("mce", mce_disabled);
void mce_intel_feature_init(struct cpuinfo_x86 *c);
void amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c);
-int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d);
-int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn);
-
-u64 mce_cap_init(void);
+uint64_t mce_cap_init(void);
extern unsigned int firstbank;
-int intel_mce_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val);
-int intel_mce_wrmsr(struct vcpu *, uint32_t msr, uint64_t val);
-
struct mcinfo_extended *intel_get_extended_msrs(
struct mcinfo_global *mig, struct mc_info *mi);
void x86_mc_get_cpu_info(unsigned, uint32_t *, uint16_t *, uint16_t *,
uint32_t *, uint32_t *, uint32_t *, uint32_t *);
-#define dom0_vmce_enabled() (dom0 && dom0->max_vcpus && dom0->vcpu[0] \
- && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA))
-
/* Register a handler for machine check exceptions. */
typedef void (*x86_mce_vector_t)(struct cpu_user_regs *, long);
extern void x86_mce_vector_register(x86_mce_vector_t);
void *x86_mcinfo_reserve(struct mc_info *mi, int size);
void x86_mcinfo_dump(struct mc_info *mi);
-int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d,
- uint64_t gstatus);
-int inject_vmce(struct domain *d, int vcpu);
-
static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
{
switch (boot_cpu_data.x86_vendor) {
#include "x86_mca.h"
#include "barrier.h"
#include "util.h"
+#include "vmce.h"
DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned);
DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks);
}
/* intel specific MCA MSR */
-int intel_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
+int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
{
unsigned int bank = msr - MSR_IA32_MC0_CTL2;
return 1;
}
-int intel_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
+int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
{
unsigned int bank = msr - MSR_IA32_MC0_CTL2;
#include <asm/msr.h>
#include "mce.h"
+#include "vmce.h"
DEFINE_PER_CPU(struct mca_banks *, poll_bankmask);
static struct timer mce_timer;
#include <asm/system.h>
#include <asm/msr.h>
#include <asm/p2m.h>
+
#include "mce.h"
#include "x86_mca.h"
+#include "vmce.h"
/*
* MCG_SER_P: software error recovery supported
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
- ret = intel_mce_rdmsr(v, msr, val);
+ ret = vmce_intel_rdmsr(v, msr, val);
+ break;
+ case X86_VENDOR_AMD:
+ ret = vmce_amd_rdmsr(v, msr, val);
break;
default:
ret = 0;
* For historic version reason, bank number may greater than GUEST_MC_BANK_NUM,
* when migratie from old vMCE version to new vMCE.
*/
-static int bank_mce_wrmsr(struct vcpu *v, u32 msr, u64 val)
+static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
{
int ret = 1;
unsigned int bank = (msr - MSR_IA32_MC0_CTL) / 4;
switch ( boot_cpu_data.x86_vendor )
{
case X86_VENDOR_INTEL:
- ret = intel_mce_wrmsr(v, msr, val);
+ ret = vmce_intel_wrmsr(v, msr, val);
+ break;
+ case X86_VENDOR_AMD:
+ ret = vmce_amd_wrmsr(v, msr, val);
break;
default:
ret = 0;
* = 0: Not handled, should be handled by other components
* > 0: Success
*/
-int vmce_wrmsr(u32 msr, u64 val)
+int vmce_wrmsr(uint32_t msr, uint64_t val)
{
struct vcpu *cur = current;
int ret = 1;
--- /dev/null
+#ifndef _MCHECK_VMCE_H
+#define _MCHECK_VMCE_H
+
+#include "x86_mca.h"
+
+int vmce_init(struct cpuinfo_x86 *c);
+
+#define dom0_vmce_enabled() (dom0 && dom0->max_vcpus && dom0->vcpu[0] \
+ && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA))
+
+int is_vmce_ready(struct mcinfo_bank *bank, struct domain *d);
+int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn);
+
+int vmce_intel_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val);
+int vmce_intel_wrmsr(struct vcpu *, uint32_t msr, uint64_t val);
+int vmce_amd_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val);
+int vmce_amd_wrmsr(struct vcpu *, uint32_t msr, uint64_t val);
+
+int fill_vmsr_data(struct mcinfo_bank *mc_bank, struct domain *d,
+ uint64_t gstatus);
+int inject_vmce(struct domain *d, int vcpu);
+
+#endif