From: Keir Fraser Date: Wed, 15 Sep 2010 08:00:35 +0000 (+0100) Subject: C6 state with EOI issue fix for some Intel processors X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~11482 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=95807bcae47efaf96888119b2a0b4b921ad737df;p=xen.git C6 state with EOI issue fix for some Intel processors There is an errata in some of Intel processors. AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During an Interrupt Service Routine If core C6 is entered after the start of an interrupt service routine but before a write to the APIC EOI register, the core may not send an EOI transaction (if needed) and further interrupts from the same priority level or lower may be blocked. This patch fix this issue, by checking if ISR is pending before enter deep Cx state. If so, it would use power->safe_state instead of deep Cx state to prevent the above issue happen. Signed-off-by: Sheng Yang Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/acpi/cpu_idle.c b/xen/arch/x86/acpi/cpu_idle.c index cdf0f492b4..a4ebbb52e7 100644 --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -351,6 +351,31 @@ static int sched_has_urgent_vcpu(void) return atomic_read(&this_cpu(schedule_data).urgent_count); } +/* + * "AAJ72. EOI Transaction May Not be Sent if Software Enters Core C6 During + * an Interrupt Service Routine" + * + * There was an errata with some Core i7 processors that an EOI transaction + * may not be sent if software enters core C6 during an interrupt service + * routine. So we don't enter deep Cx state if there is an EOI pending. + */ +bool_t errata_c6_eoi_workaround(void) +{ + static bool_t fix_needed = -1; + + if ( unlikely(fix_needed == -1) ) + { + int model = boot_cpu_data.x86_model; + fix_needed = (cpu_has_apic && !directed_eoi_enabled && + (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && + (boot_cpu_data.x86 == 6) && + ((model == 0x1a) || (model == 0x1e) || (model == 0x1f) || + (model == 0x25) || (model == 0x2c) || (model == 0x2f))); + } + + return (fix_needed && cpu_has_pending_apic_eoi()); +} + static void acpi_processor_idle(void) { struct acpi_processor_power *power = processor_powers[smp_processor_id()]; @@ -401,6 +426,9 @@ static void acpi_processor_idle(void) return; } + if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() ) + cx = power->safe_state; + power->last_state = cx; /* diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index 7fd967be8a..fa5127e1ac 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -765,6 +765,11 @@ struct pending_eoi { static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_DYNAMIC_VECTORS]); #define pending_eoi_sp(p) ((p)[NR_DYNAMIC_VECTORS-1].vector) +bool_t cpu_has_pending_apic_eoi(void) +{ + return (pending_eoi_sp(this_cpu(pending_eoi)) != 0); +} + static inline void set_pirq_eoi(struct domain *d, unsigned int irq) { if ( d->arch.pirq_eoi_map ) diff --git a/xen/include/asm-x86/irq.h b/xen/include/asm-x86/irq.h index d94647be29..2e80dc948a 100644 --- a/xen/include/asm-x86/irq.h +++ b/xen/include/asm-x86/irq.h @@ -147,4 +147,6 @@ void irq_set_affinity(struct irq_desc *, const cpumask_t *mask); #define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq]) #define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq]) +bool_t cpu_has_pending_apic_eoi(void); + #endif /* _ASM_HW_IRQ_H */