spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void __eoi_IO_APIC_irq(unsigned int irq)
+static void __eoi_IO_APIC_irq(struct irq_desc *desc)
{
- struct irq_pin_list *entry = irq_2_pin + irq;
- unsigned int pin, vector = IO_APIC_VECTOR(irq);
+ struct irq_pin_list *entry = irq_2_pin + desc->irq;
+ unsigned int pin, vector = desc->arch.vector;
for (;;) {
pin = entry->pin;
}
}
-static void eoi_IO_APIC_irq(unsigned int irq)
+static void eoi_IO_APIC_irq(struct irq_desc *desc)
{
unsigned long flags;
spin_lock_irqsave(&ioapic_lock, flags);
- __eoi_IO_APIC_irq(irq);
+ __eoi_IO_APIC_irq(desc);
spin_unlock_irqrestore(&ioapic_lock, flags);
}
struct irq_pin_list *entry = irq_2_pin + i;
if (entry->pin < 0)
continue;
- printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
+ printk(KERN_DEBUG "IRQ%d ", irq_to_desc(i)->arch.vector);
for (;;) {
printk("-> %d:%d", entry->apic, entry->pin);
if (!entry->next)
* operation to prevent an edge-triggered interrupt escaping meanwhile.
* The idea is from Manfred Spraul. --macro
*/
- i = IO_APIC_VECTOR(desc->irq);
+ i = desc->arch.vector;
v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
{
if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
{
- eoi_IO_APIC_irq(desc->irq);
+ eoi_IO_APIC_irq(desc);
return;
}
mask_IO_APIC_irq(desc);
- eoi_IO_APIC_irq(desc->irq);
+ eoi_IO_APIC_irq(desc);
if ( (desc->status & IRQ_MOVE_PENDING) &&
!io_apic_level_ack_pending(desc->irq) )
move_masked_irq(desc);
* operation to prevent an edge-triggered interrupt escaping meanwhile.
* The idea is from Manfred Spraul. --macro
*/
- i = IO_APIC_VECTOR(desc->irq);
+ i = desc->arch.vector;
/* Manually EOI the old vector if we are moving to the new */
if ( vector && i != vector )
int irq;
/* Xen: This is way simpler than the Linux implementation. */
for (irq = 0; platform_legacy_irq(irq); irq++)
- if (IO_APIC_IRQ(irq) && !IO_APIC_VECTOR(irq))
+ if (IO_APIC_IRQ(irq) && !irq_to_vector(irq))
make_8259A_irq(irq);
}
vmask_t global_used_vector_map;
-u8 __read_mostly *irq_vector;
struct irq_desc __read_mostly *irq_desc = NULL;
static DECLARE_BITMAP(used_vectors, NR_VECTORS);
set_bit(vector, desc->arch.used_vectors);
}
desc->arch.used = IRQ_USED;
- if (IO_APIC_IRQ(irq))
- irq_vector[irq] = vector;
return 0;
}
BUG_ON(irq >= nr_irqs || irq < 0);
if (IO_APIC_IRQ(irq))
- vector = irq_vector[irq];
+ {
+ vector = irq_to_desc(irq)->arch.vector;
+ if (vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR)
+ vector = 0;
+ }
else if (MSI_IRQ(irq))
vector = irq_to_desc(irq)->arch.vector;
else
this_cpu(vector_irq)[vector] = -1;
irq_desc = xzalloc_array(struct irq_desc, nr_irqs);
- irq_vector = xzalloc_array(u8, nr_irqs_gsi);
- if ( !irq_desc || !irq_vector )
+ if ( !irq_desc )
return -ENOMEM;
for (irq = 0; irq < nr_irqs_gsi; irq++) {
vmask_t *irq_used_vectors = NULL;
old_vector = irq_to_vector(irq);
- if (old_vector) {
+ if (old_vector > 0) {
cpumask_and(&tmp_mask, mask, &cpu_online_map);
if (cpumask_intersects(&tmp_mask, desc->arch.cpu_mask)) {
desc->arch.vector = old_vector;
/* Found one! */
current_vector = vector;
current_offset = offset;
- if (old_vector) {
+ if (old_vector > 0) {
desc->arch.move_in_progress = 1;
cpumask_copy(desc->arch.old_cpu_mask, desc->arch.cpu_mask);
desc->arch.old_vector = desc->arch.vector;
|| (desc->arch.used_vectors == irq_used_vectors));
desc->arch.used_vectors = irq_used_vectors;
- if (IO_APIC_IRQ(irq))
- irq_vector[irq] = vector;
-
if ( desc->arch.used_vectors )
{
ASSERT(!test_bit(vector, desc->arch.used_vectors));
* IRQ0 must be given a fixed assignment and initialized,
* because it's used before the IO-APIC is set up.
*/
- irq_vector[0] = FIRST_HIPRIORITY_VECTOR;
+ irq_to_desc(0)->arch.vector = FIRST_HIPRIORITY_VECTOR;
/*
* Also ensure serial interrupts are high priority. We do not
{
if ( (irq = serial_irq(seridx)) < 0 )
continue;
- irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1;
per_cpu(vector_irq, cpu)[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq;
irq_to_desc(irq)->arch.vector = FIRST_HIPRIORITY_VECTOR + seridx + 1;
cpumask_copy(irq_to_desc(irq)->arch.cpu_mask, &cpu_online_map);
#define IO_APIC_IRQ(irq) (platform_legacy_irq(irq) ? \
(1 << (irq)) & io_apic_irqs : \
(irq) < nr_irqs_gsi)
-#define IO_APIC_VECTOR(irq) (irq_vector[irq])
#define MSI_IRQ(irq) ((irq) >= nr_irqs_gsi && (irq) < nr_irqs)
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern u8 *irq_vector;
-
extern bool_t opt_noirqbalance;
#define OPT_IRQ_VECTOR_MAP_DEFAULT 0 /* Do the default thing */