#include <asm/hardirq.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
-#include <asm/asm_defns.h> /* for BUILD_SMP_INTERRUPT */
#include <mach_apic.h>
#include <io_ports.h>
#include <xen/kexec.h>
bool_t __read_mostly x2apic_enabled = 0;
bool_t __read_mostly directed_eoi_enabled = 0;
-/*
- * The following vectors are part of the Linux architecture, there
- * is no hardware IRQ pin equivalent for them, they are triggered
- * through the ICC by us (IPIs)
- */
-__asm__(".section .text");
-BUILD_SMP_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
-BUILD_SMP_INTERRUPT(event_check_interrupt,EVENT_CHECK_VECTOR)
-BUILD_SMP_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
-BUILD_SMP_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
-
-/*
- * Every pentium local APIC has two 'local interrupts', with a
- * soft-definable vector attached to both interrupts, one of
- * which is a timer interrupt, the other one is error counter
- * overflow. Linux uses the local APIC timer interrupt to get
- * a much simpler SMP time architecture:
- */
-BUILD_SMP_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
-BUILD_SMP_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
-BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-BUILD_SMP_INTERRUPT(pmu_apic_interrupt,PMU_APIC_VECTOR)
-BUILD_SMP_INTERRUPT(cmci_interrupt, CMCI_APIC_VECTOR)
-#ifdef CONFIG_X86_MCE_THERMAL
-BUILD_SMP_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
-#endif
-
static int modern_apic(void)
{
unsigned int lvr, version;
smp_intr_init();
/* self generated IPI for local APIC timer */
- set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
+ set_direct_apic_vector(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
/* IPI vectors for APIC spurious and error interrupts */
- set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
- set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
+ set_direct_apic_vector(SPURIOUS_APIC_VECTOR, spurious_interrupt);
+ set_direct_apic_vector(ERROR_APIC_VECTOR, error_interrupt);
/* Performance Counters Interrupt */
- set_intr_gate(PMU_APIC_VECTOR, pmu_apic_interrupt);
+ set_direct_apic_vector(PMU_APIC_VECTOR, pmu_apic_interrupt);
/* CMCI Correctable Machine Check Interrupt */
- set_intr_gate(CMCI_APIC_VECTOR, cmci_interrupt);
+ set_direct_apic_vector(CMCI_APIC_VECTOR, cmci_interrupt);
/* thermal monitor LVT interrupt, for P4 and latest Intel CPU*/
#ifdef CONFIG_X86_MCE_THERMAL
- set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+ set_direct_apic_vector(THERMAL_APIC_VECTOR, thermal_interrupt);
#endif
}
return apic_tmict || !timeout;
}
-fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
+void apic_timer_interrupt(struct cpu_user_regs * regs)
{
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
perfc_incr(apic_timer);
- this_cpu(irq_count)++;
raise_softirq(TIMER_SOFTIRQ);
- set_irq_regs(old_regs);
}
static DEFINE_PER_CPU(bool_t, state_dump_pending);
/*
* Spurious interrupts should _never_ happen with our APIC/SMP architecture.
*/
-fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs)
+void spurious_interrupt(struct cpu_user_regs *regs)
{
unsigned long v;
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
-
- this_cpu(irq_count)++;
- irq_enter();
/*
* Check if this is a vectored interrupt (most likely, as this is probably
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should "
"never happen.\n", smp_processor_id());
- out:
- irq_exit();
- set_irq_regs(old_regs);
+out: ;
}
/*
* This interrupt should never happen with our APIC/SMP architecture
*/
-fastcall void smp_error_interrupt(struct cpu_user_regs *regs)
+void error_interrupt(struct cpu_user_regs *regs)
{
unsigned long v, v1;
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
- this_cpu(irq_count)++;
- irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
*/
printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
smp_processor_id(), v , v1);
- irq_exit();
- set_irq_regs(old_regs);
}
/*
* This interrupt handles performance counters interrupt
*/
-fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs)
+void pmu_apic_interrupt(struct cpu_user_regs *regs)
{
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
- this_cpu(irq_count)++;
hvm_do_pmu_interrupt(regs);
- set_irq_regs(old_regs);
}
/*
#include "x86_mca.h"
/* Machine Check Handler For AMD Athlon/Duron */
-static fastcall void k7_machine_check(struct cpu_user_regs * regs, long error_code)
+static void k7_machine_check(struct cpu_user_regs * regs, long error_code)
{
int recover = 1;
uint64_t msr_content, mcgst;
unsigned int cpu = smp_processor_id();
static DEFINE_PER_CPU(s_time_t, next);
- ack_APIC_irq();
if (NOW() < per_cpu(next, cpu))
return;
}
/* Thermal interrupt handler for this CPU setup */
-static void (*__read_mostly vendor_thermal_interrupt)(struct cpu_user_regs *regs)
- = unexpected_thermal_interrupt;
+static void (*__read_mostly vendor_thermal_interrupt)(
+ struct cpu_user_regs *regs) = unexpected_thermal_interrupt;
-fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs)
+void thermal_interrupt(struct cpu_user_regs *regs)
{
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
- this_cpu(irq_count)++;
- irq_enter();
+ ack_APIC_irq();
vendor_thermal_interrupt(regs);
- irq_exit();
- set_irq_regs(old_regs);
}
/* Thermal monitoring depends on APIC, ACPI and clock modulation */
mce_set_owner();
}
-fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs)
+void cmci_interrupt(struct cpu_user_regs *regs)
{
mctelem_cookie_t mctc;
struct mca_summary bs;
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
- this_cpu(irq_count)++;
- irq_enter();
mctc = mcheck_mca_logout(
MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), &bs, NULL);
}
} else if (mctc != NULL)
mctelem_dismiss(mctc);
-
- irq_exit();
- set_irq_regs(old_regs);
}
/* MCA */
vector &= INTR_INFO_VECTOR_MASK;
HVMTRACE_1D(INTR, vector);
- switch ( vector )
- {
- case IRQ_MOVE_CLEANUP_VECTOR:
- smp_irq_move_cleanup_interrupt(regs);
- break;
- case LOCAL_TIMER_VECTOR:
- smp_apic_timer_interrupt(regs);
- break;
- case EVENT_CHECK_VECTOR:
- smp_event_check_interrupt(regs);
- break;
- case INVALIDATE_TLB_VECTOR:
- smp_invalidate_interrupt();
- break;
- case CALL_FUNCTION_VECTOR:
- smp_call_function_interrupt(regs);
- break;
- case SPURIOUS_APIC_VECTOR:
- smp_spurious_interrupt(regs);
- break;
- case ERROR_APIC_VECTOR:
- smp_error_interrupt(regs);
- break;
- case CMCI_APIC_VECTOR:
- smp_cmci_interrupt(regs);
- break;
- case PMU_APIC_VECTOR:
- smp_pmu_apic_interrupt(regs);
- break;
-#ifdef CONFIG_X86_MCE_THERMAL
- case THERMAL_APIC_VECTOR:
- smp_thermal_interrupt(regs);
- break;
-#endif
- default:
- regs->entry_vector = vector;
- do_IRQ(regs);
- break;
- }
+ regs->entry_vector = vector;
+ do_IRQ(regs);
}
static void wbinvd_ipi(void *info)
static void dump_irqs(unsigned char key);
-fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
+void irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
{
unsigned vector, me;
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
- this_cpu(irq_count)++;
- irq_enter();
me = smp_processor_id();
for (vector = FIRST_DYNAMIC_VECTOR; vector < NR_VECTORS; vector++) {
unlock:
spin_unlock(&desc->lock);
}
-
- irq_exit();
- set_irq_regs(old_regs);
}
static void send_cleanup_vector(struct irq_desc *desc)
DEFINE_PER_CPU(unsigned int, irq_count);
+static void (*direct_apic_vector[NR_VECTORS])(struct cpu_user_regs *);
+void set_direct_apic_vector(
+ uint8_t vector, void (*handler)(struct cpu_user_regs *))
+{
+ BUG_ON(direct_apic_vector[vector] != NULL);
+ direct_apic_vector[vector] = handler;
+}
+
void do_IRQ(struct cpu_user_regs *regs)
{
struct irqaction *action;
struct cpu_user_regs *old_regs = set_irq_regs(regs);
perfc_incr(irqs);
-
this_cpu(irq_count)++;
+ irq_enter();
if (irq < 0) {
- ack_APIC_irq();
- printk("%s: %d.%d No irq handler for vector (irq %d)\n",
- __func__, smp_processor_id(), vector, irq);
- set_irq_regs(old_regs);
- TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
- return;
+ if (direct_apic_vector[vector] != NULL) {
+ (*direct_apic_vector[vector])(regs);
+ } else {
+ ack_APIC_irq();
+ printk("%s: %d.%d No irq handler for vector (irq %d)\n",
+ __func__, smp_processor_id(), vector, irq);
+ TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
+ }
+ goto out_no_unlock;
}
- irq_enter();
-
desc = irq_to_desc(irq);
spin_lock(&desc->lock);
desc->handler->end(desc, regs->entry_vector);
out_no_end:
spin_unlock(&desc->lock);
+ out_no_unlock:
irq_exit();
set_irq_regs(old_regs);
}
static const void *flush_va;
static unsigned int flush_flags;
-fastcall void smp_invalidate_interrupt(void)
+void invalidate_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
perfc_incr(ipis);
- this_cpu(irq_count)++;
- irq_enter();
if ( !__sync_local_execstate() ||
(flush_flags & (FLUSH_TLB_GLOBAL | FLUSH_CACHE)) )
flush_area_local(flush_va, flush_flags);
cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
- irq_exit();
}
void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
send_IPI_mask(&cpu_online_map, APIC_DM_NMI);
}
-fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
+void event_check_interrupt(struct cpu_user_regs *regs)
{
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
perfc_incr(ipis);
this_cpu(irq_count)++;
- set_irq_regs(old_regs);
}
static void __smp_call_function_interrupt(void)
irq_exit();
}
-fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
+void call_function_interrupt(struct cpu_user_regs *regs)
{
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
-
ack_APIC_irq();
perfc_incr(ipis);
- this_cpu(irq_count)++;
__smp_call_function_interrupt();
- set_irq_regs(old_regs);
}
cpumask_copy(irq_to_desc(irq)->arch.cpu_mask, &cpu_online_map);
}
- /* IPI for cleanuping vectors after irq move */
- set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
-
- /* IPI for event checking. */
- set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt);
-
- /* IPI for invalidation */
- set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
-
- /* IPI for generic function call */
- set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+ /* Direct IPI vectors. */
+ set_direct_apic_vector(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
+ set_direct_apic_vector(EVENT_CHECK_VECTOR, event_check_interrupt);
+ set_direct_apic_vector(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+ set_direct_apic_vector(CALL_FUNCTION_VECTOR, call_function_interrupt);
}
#define platform_legacy_irq(irq) ((irq) < 16)
-fastcall void event_check_interrupt(void);
-fastcall void invalidate_interrupt(void);
-fastcall void call_function_interrupt(void);
-fastcall void apic_timer_interrupt(void);
-fastcall void error_interrupt(void);
-fastcall void pmu_apic_interrupt(void);
-fastcall void spurious_interrupt(void);
-fastcall void thermal_interrupt(void);
-fastcall void cmci_interrupt(void);
-fastcall void irq_move_cleanup_interrupt(void);
-
-fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_invalidate_interrupt(void);
-fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs);
-fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
+void event_check_interrupt(struct cpu_user_regs *regs);
+void invalidate_interrupt(struct cpu_user_regs *regs);
+void call_function_interrupt(struct cpu_user_regs *regs);
+void apic_timer_interrupt(struct cpu_user_regs *regs);
+void error_interrupt(struct cpu_user_regs *regs);
+void pmu_apic_interrupt(struct cpu_user_regs *regs);
+void spurious_interrupt(struct cpu_user_regs *regs);
+void thermal_interrupt(struct cpu_user_regs *regs);
+void cmci_interrupt(struct cpu_user_regs *regs);
+void irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
+
+void set_direct_apic_vector(
+ uint8_t vector, void (*handler)(struct cpu_user_regs *));
void do_IRQ(struct cpu_user_regs *regs);
#define FIXUP_RING0_GUEST_STACK
#endif
-#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
-#define XBUILD_SMP_INTERRUPT(x,v) \
-__asm__( \
- "\n"__ALIGN_STR"\n" \
- ".globl " STR(x) "\n\t" \
- STR(x) ":\n\t" \
- "pushl $"#v"<<16\n\t" \
- STR(FIXUP_RING0_GUEST_STACK) \
- STR(SAVE_ALL(1f,1f)) "\n\t" \
- "1:movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "call "STR(smp_##x)"\n\t" \
- "addl $4,%esp\n\t" \
- "jmp ret_from_intr\n");
-
#define BUILD_COMMON_IRQ() \
__asm__( \
"\n" __ALIGN_STR"\n" \
#define REX64_PREFIX "rex64/"
#endif
-#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
-#define XBUILD_SMP_INTERRUPT(x,v) \
-__asm__( \
- "\n"__ALIGN_STR"\n" \
- ".globl " STR(x) "\n\t" \
- STR(x) ":\n\t" \
- "pushq $0\n\t" \
- "movl $"#v",4(%rsp)\n\t" \
- STR(SAVE_ALL) \
- "movq %rsp,%rdi\n\t" \
- "callq "STR(smp_##x)"\n\t" \
- "jmp ret_from_intr\n");
-
#define BUILD_COMMON_IRQ() \
__asm__( \
"\n" __ALIGN_STR"\n" \