/* Machine Check Handler for AMD K8 family series */
static void k8_machine_check(struct cpu_user_regs *regs, long error_code)
{
- mcheck_cmn_handler(regs, error_code, mca_allbanks, NULL);
+ mcheck_cmn_handler(regs, error_code, mca_allbanks,
+ __get_cpu_var(mce_clear_banks));
+}
+
+static int k8_need_clearbank_scan(enum mca_source who, uint64_t status)
+{
+ if (who != MCA_MCE_SCAN)
+ return 1;
+
+ /*
+ * For fatal error, it shouldn't be cleared so that sticky bank
+ * have a chance to be handled after reboot by polling.
+ */
+ if ((status & MCi_STATUS_UC) && (status & MCi_STATUS_PCC))
+ return 0;
+
+ return 1;
}
/* AMD K8 machine check */
mce_handler_init();
x86_mce_vector_register(k8_machine_check);
+ mce_need_clearbank_register(k8_need_clearbank_scan);
for (i = 0; i < nr_mce_banks; i++) {
if (quirkflag == MCEQUIRK_K8_GART && i == 4) {
unsigned int __read_mostly nr_mce_banks;
unsigned int __read_mostly firstbank;
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, poll_bankmask);
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, no_cmci_banks);
+DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, mce_clear_banks);
+
static void intpose_init(void);
static void mcinfo_clear(struct mc_info *);
struct mca_banks *mca_allbanks;
return mca_allbanks ? 0:-ENOMEM;
}
-static void cpu_poll_bankmask_free(unsigned int cpu)
+static void cpu_bank_free(unsigned int cpu)
{
- struct mca_banks *mb = per_cpu(poll_bankmask, cpu);
+ struct mca_banks *poll = per_cpu(poll_bankmask, cpu);
+ struct mca_banks *clr = per_cpu(mce_clear_banks, cpu);
- mcabanks_free(mb);
+ mcabanks_free(poll);
+ mcabanks_free(clr);
}
-static int cpu_poll_bankmask_alloc(unsigned int cpu)
+static int cpu_bank_alloc(unsigned int cpu)
{
- struct mca_banks *mb;
+ struct mca_banks *poll = mcabanks_alloc();
+ struct mca_banks *clr = mcabanks_alloc();
- mb = mcabanks_alloc();
- if ( !mb )
+ if ( !poll || !clr )
+ {
+ mcabanks_free(poll);
+ mcabanks_free(clr);
return -ENOMEM;
+ }
- per_cpu(poll_bankmask, cpu) = mb;
+ per_cpu(poll_bankmask, cpu) = poll;
+ per_cpu(mce_clear_banks, cpu) = clr;
return 0;
}
switch ( action )
{
case CPU_UP_PREPARE:
- rc = cpu_poll_bankmask_alloc(cpu);
+ rc = cpu_bank_alloc(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
- cpu_poll_bankmask_free(cpu);
+ cpu_bank_free(cpu);
break;
default:
break;
if (mca_cap_init())
return;
+ /* Early MCE initialisation for BSP. */
+ if ( bsp && cpu_bank_alloc(smp_processor_id()) )
+ BUG();
+
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
inited = amd_mcheck_init(c);
set_in_cr4(X86_CR4_MCE);
if ( bsp )
- {
- /* Early MCE initialisation for BSP. */
- if ( cpu_poll_bankmask_alloc(0) )
- BUG();
register_cpu_notifier(&cpu_nfb);
- }
set_poll_bankmask(c);
return;
out:
- if (smp_processor_id() == 0)
+ if ( bsp )
{
+ cpu_bank_free(smp_processor_id());
mcabanks_free(mca_allbanks);
mca_allbanks = NULL;
}
DECLARE_PER_CPU(struct mca_banks *, poll_bankmask);
DECLARE_PER_CPU(struct mca_banks *, no_cmci_banks);
+DECLARE_PER_CPU(struct mca_banks *, mce_clear_banks);
extern bool_t cmci_support;
extern bool_t is_mc_panic;
#include "vmce.h"
#include "mcaction.h"
-DEFINE_PER_CPU(struct mca_banks *, mce_banks_owned);
-DEFINE_PER_CPU(struct mca_banks *, no_cmci_banks);
-DEFINE_PER_CPU(struct mca_banks *, mce_clear_banks);
+static DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, mce_banks_owned);
bool_t __read_mostly cmci_support = 0;
static bool_t __read_mostly ser_support = 0;
static bool_t __read_mostly mce_force_broadcast;
static void cpu_mcabank_free(unsigned int cpu)
{
- struct mca_banks *mb1, *mb2, *mb3;
+ struct mca_banks *cmci = per_cpu(no_cmci_banks, cpu);
+ struct mca_banks *owned = per_cpu(mce_banks_owned, cpu);
- mb1 = per_cpu(mce_clear_banks, cpu);
- mb2 = per_cpu(no_cmci_banks, cpu);
- mb3 = per_cpu(mce_banks_owned, cpu);
-
- mcabanks_free(mb1);
- mcabanks_free(mb2);
- mcabanks_free(mb3);
+ mcabanks_free(cmci);
+ mcabanks_free(owned);
}
static int cpu_mcabank_alloc(unsigned int cpu)
{
- struct mca_banks *mb1, *mb2, *mb3;
+ struct mca_banks *cmci = mcabanks_alloc();
+ struct mca_banks *owned = mcabanks_alloc();
- mb1 = mcabanks_alloc();
- mb2 = mcabanks_alloc();
- mb3 = mcabanks_alloc();
- if (!mb1 || !mb2 || !mb3)
+ if (!cmci || !owned)
goto out;
- per_cpu(mce_clear_banks, cpu) = mb1;
- per_cpu(no_cmci_banks, cpu) = mb2;
- per_cpu(mce_banks_owned, cpu) = mb3;
+ per_cpu(no_cmci_banks, cpu) = cmci;
+ per_cpu(mce_banks_owned, cpu) = owned;
return 0;
out:
- mcabanks_free(mb1);
- mcabanks_free(mb2);
- mcabanks_free(mb3);
+ mcabanks_free(cmci);
+ mcabanks_free(owned);
return -ENOMEM;
}
* urgent uses, intended for use from machine check exception handlers,
* and non-urgent uses intended for use from error pollers.
* Associated with each logout entry of whatever class is a data area
- * sized per the single argument to mctelem_init. mcelem_init should be
+ * sized per the single argument to mctelem_init. mctelem_init should be
* called from MCA init code before anybody has the chance to change the
* machine check vector with mcheck_mca_logout or to use mcheck_mca_logout.
*
* which will return a cookie referencing the oldest (first committed)
* entry of the requested class. Access the associated data using
* mctelem_dataptr and when finished use mctelem_consume_oldest_end - in the
- * begin .. end bracket you are guaranteed that the entry canot be freed
+ * begin .. end bracket you are guaranteed that the entry can't be freed
* even if it is ack'd elsewhere). Once the ultimate consumer of the
* telemetry has processed it to stable storage it should acknowledge
* the telemetry quoting the cookie id, at which point we will free
#include "mce.h"
#include "vmce.h"
-DEFINE_PER_CPU(struct mca_banks *, poll_bankmask);
static struct timer mce_timer;
#define MCE_PERIOD MILLISECS(8000)