This fixes the wrong use of literal vector 0xF7 with an "int"
instruction (invalidated by 25113:
14609be41f36) and the fact that doing
the injection via a software interrupt was never valid anyway (because
cmci_interrupt() acks the LAPIC, which does the wrong thing if the
interrupt didn't get delivered though it).
In order to do latter, the patch introduces send_IPI_self(), at once
removing two opend coded uses of "genapic" in the IRQ handling code.
Reported-by: Yongjie Ren <yongjie.ren@intel.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Tested-by: Yongjie Ren <yongjie.ren@intel.com>
Acked-by: Keir Fraser <keir@xen.org>
bool_t is_mc_panic;
unsigned int __read_mostly nr_mce_banks;
unsigned int __read_mostly firstbank;
+uint8_t __read_mostly cmci_apic_vector;
DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, poll_bankmask);
DEFINE_PER_CPU_READ_MOSTLY(struct mca_banks *, no_cmci_banks);
__asm__ __volatile__("int $0x12");
}
-static void x86_cmci_inject(void *data)
-{
- printk("Simulating CMCI on cpu %d\n", smp_processor_id());
- __asm__ __volatile__("int $0xf7");
-}
-
#if BITS_PER_LONG == 64
#define ID2COOKIE(id) ((mctelem_cookie_t)(id))
on_selected_cpus(cpumap, x86_mc_mceinject, NULL, 1);
break;
case XEN_MC_INJECT_TYPE_CMCI:
- if ( !cmci_support )
+ if ( !cmci_apic_vector )
ret = x86_mcerr(
"No CMCI supported in platform\n", -EINVAL);
else
- on_selected_cpus(cpumap, x86_cmci_inject, NULL, 1);
+ {
+ if ( cpumask_test_cpu(smp_processor_id(), cpumap) )
+ send_IPI_self(cmci_apic_vector);
+ send_IPI_mask(cpumap, cmci_apic_vector);
+ }
break;
default:
ret = x86_mcerr("Wrong mca type\n", -EINVAL);
mcheck_intel
};
+extern uint8_t cmci_apic_vector;
+
/* Init functions */
enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *c);
enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c, bool_t bsp);
{
u32 l, apic;
int cpu = smp_processor_id();
- static uint8_t cmci_apic_vector;
if (!mce_available(c) || !cmci_support) {
if (opt_cpu_info)
* to myself.
*/
if (irr & (1 << (vector % 32))) {
- genapic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
+ send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
TRACE_3D(TRC_HW_IRQ_MOVE_CLEANUP_DELAY,
irq, vector, smp_processor_id());
goto unlock;
cpumask_and(&cleanup_mask, desc->arch.old_cpu_mask, &cpu_online_map);
desc->arch.move_cleanup_count = cpumask_weight(&cleanup_mask);
- genapic->send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
desc->arch.move_in_progress = 0;
}
genapic->send_IPI_mask(mask, vector);
}
+void send_IPI_self(int vector)
+{
+ genapic->send_IPI_self(vector);
+}
+
/*
* Some notes on x86 processor bugs affecting SMP operation:
*
void smp_send_nmi_allbutself(void);
-void send_IPI_mask(const cpumask_t *mask, int vector);
+void send_IPI_mask(const cpumask_t *, int vector);
+void send_IPI_self(int vector);
extern void (*mtrr_hook) (void);