init_8259A(0);
- for ( i = 0; i < NR_IRQS; i++ )
+ for ( i = 0; i < NR_VECTORS; i++ )
{
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].handler = &no_irq_type;
.set_affinity = set_ioapic_affinity_vector,
};
-static void mask_msi_vector(unsigned int vector)
-{
- mask_msi_irq(vector);
-}
-
-static void unmask_msi_vector(unsigned int vector)
-{
- unmask_msi_irq(vector);
-}
-
static unsigned int startup_msi_vector(unsigned int vector)
{
dprintk(XENLOG_INFO, "startup msi vector %x\n", vector);
- unmask_msi_irq(vector);
+ unmask_msi_vector(vector);
return 0;
}
static void shutdown_msi_vector(unsigned int vector)
{
dprintk(XENLOG_INFO, "shutdown msi vector %x\n", vector);
- mask_msi_irq(vector);
+ mask_msi_vector(vector);
}
static void set_msi_affinity_vector(unsigned int vector, cpumask_t cpu_mask)
{
set_native_irq_info(vector, cpu_mask);
- set_msi_irq_affinity(vector, cpu_mask);
+ set_msi_affinity(vector, cpu_mask);
}
/*
int opt_noirqbalance = 0;
boolean_param("noirqbalance", opt_noirqbalance);
-irq_desc_t irq_desc[NR_IRQS];
+irq_desc_t irq_desc[NR_VECTORS];
static void __do_IRQ_guest(int vector);
static DEFINE_PER_CPU(struct pending_eoi, pending_eoi[NR_VECTORS]);
#define pending_eoi_sp(p) ((p)[NR_VECTORS-1].vector)
-static struct timer irq_guest_eoi_timer[NR_IRQS];
+static struct timer irq_guest_eoi_timer[NR_VECTORS];
static void irq_guest_eoi_timer_fn(void *data)
{
irq_desc_t *desc = data;
void fixup_irqs(cpumask_t map)
{
- unsigned int irq, sp;
+ unsigned int vector, sp;
static int warned;
irq_guest_action_t *action;
struct pending_eoi *peoi;
/* Direct all future interrupts away from this CPU. */
- for ( irq = 0; irq < NR_IRQS; irq++ )
+ for ( vector = 0; vector < NR_VECTORS; vector++ )
{
cpumask_t mask;
- if ( irq == 2 )
+ if ( vector_to_irq(vector) == 2 )
continue;
- cpus_and(mask, irq_desc[irq].affinity, map);
+ cpus_and(mask, irq_desc[vector].affinity, map);
if ( any_online_cpu(mask) == NR_CPUS )
{
- printk("Breaking affinity for irq %i\n", irq);
+ printk("Breaking affinity for vector %u (irq %i)\n",
+ vector, vector_to_irq(vector));
mask = map;
}
- if ( irq_desc[irq].handler->set_affinity )
- irq_desc[irq].handler->set_affinity(irq, mask);
- else if ( irq_desc[irq].action && !(warned++) )
- printk("Cannot set affinity for irq %i\n", irq);
+ if ( irq_desc[vector].handler->set_affinity )
+ irq_desc[vector].handler->set_affinity(vector, mask);
+ else if ( irq_desc[vector].action && !(warned++) )
+ printk("Cannot set affinity for irq %u (irq %i)\n",
+ vector, vector_to_irq(vector));
}
/* Service any interrupts that beat us in the re-direction race. */
local_irq_disable();
/* Clean up cpu_eoi_map of every interrupt to exclude this CPU. */
- for ( irq = 0; irq < NR_IRQS; irq++ )
+ for ( vector = 0; vector < NR_VECTORS; vector++ )
{
- if ( !(irq_desc[irq].status & IRQ_GUEST) )
+ if ( !(irq_desc[vector].status & IRQ_GUEST) )
continue;
- action = (irq_guest_action_t *)irq_desc[irq].action;
+ action = (irq_guest_action_t *)irq_desc[vector].action;
cpu_clear(smp_processor_id(), action->cpu_eoi_map);
}
entry->msg = *msg;
}
-void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
+void set_msi_affinity(unsigned int vector, cpumask_t mask)
{
- struct msi_desc *desc = irq_desc[irq].msi_desc;
+ struct msi_desc *desc = irq_desc[vector].msi_desc;
struct msi_msg msg;
unsigned int dest;
if ( !desc )
return;
- ASSERT(spin_is_locked(&irq_desc[irq].lock));
+ ASSERT(spin_is_locked(&irq_desc[vector].lock));
spin_lock(&desc->dev->lock);
read_msi_msg(desc, &msg);
}
}
-static void msix_flush_writes(unsigned int irq)
+static void msix_flush_writes(unsigned int vector)
{
- struct msi_desc *entry = irq_desc[irq].msi_desc;
+ struct msi_desc *entry = irq_desc[vector].msi_desc;
BUG_ON(!entry || !entry->dev);
switch (entry->msi_attrib.type) {
|| entry->msi_attrib.maskbit;
}
-static void msi_set_mask_bit(unsigned int irq, int flag)
+static void msi_set_mask_bit(unsigned int vector, int flag)
{
- struct msi_desc *entry = irq_desc[irq].msi_desc;
+ struct msi_desc *entry = irq_desc[vector].msi_desc;
- ASSERT(spin_is_locked(&irq_desc[irq].lock));
+ ASSERT(spin_is_locked(&irq_desc[vector].lock));
BUG_ON(!entry || !entry->dev);
switch (entry->msi_attrib.type) {
case PCI_CAP_ID_MSI:
entry->msi_attrib.masked = !!flag;
}
-void mask_msi_irq(unsigned int irq)
+void mask_msi_vector(unsigned int vector)
{
- msi_set_mask_bit(irq, 1);
- msix_flush_writes(irq);
+ msi_set_mask_bit(vector, 1);
+ msix_flush_writes(vector);
}
-void unmask_msi_irq(unsigned int irq)
+void unmask_msi_vector(unsigned int vector)
{
- msi_set_mask_bit(irq, 0);
- msix_flush_writes(irq);
+ msi_set_mask_bit(vector, 0);
+ msix_flush_writes(vector);
}
static struct msi_desc* alloc_msi_entry(void)
#include <xen/irq.h>
-#define NR_VECTORS 256
#define VIOAPIC_NUM_PINS 48
#include <xen/hvm/irq.h>
* 02/29/00 D.Mosberger moved most things into hw_irq.h
*/
+#define NR_VECTORS 256
#define NR_IRQS 256
#define NR_IRQ_VECTORS NR_IRQS
};
/* Helper functions */
-extern void mask_msi_irq(unsigned int irq);
-extern void unmask_msi_irq(unsigned int irq);
-extern void set_msi_irq_affinity(unsigned int irq, cpumask_t mask);
+extern void mask_msi_vector(unsigned int vector);
+extern void unmask_msi_vector(unsigned int vector);
+extern void set_msi_affinity(unsigned int vector, cpumask_t mask);
extern int pci_enable_msi(struct msi_info *msi);
extern void pci_disable_msi(int vector);
extern void pci_cleanup_msi(struct pci_dev *pdev);
cpumask_t affinity;
} __cacheline_aligned irq_desc_t;
-extern irq_desc_t irq_desc[NR_IRQS];
+extern irq_desc_t irq_desc[NR_VECTORS];
extern int setup_irq(unsigned int, struct irqaction *);
extern void free_irq(unsigned int);
extern irq_desc_t *domain_spin_lock_irq_desc(
struct domain *d, int irq, unsigned long *pflags);
-static inline void set_native_irq_info(int irq, cpumask_t mask)
+static inline void set_native_irq_info(unsigned int vector, cpumask_t mask)
{
- irq_desc[irq].affinity = mask;
+ irq_desc[vector].affinity = mask;
}
+#ifdef irq_to_vector
static inline void set_irq_info(int irq, cpumask_t mask)
{
- set_native_irq_info(irq, mask);
+ set_native_irq_info(irq_to_vector(irq), mask);
}
+#endif
+
#endif /* __XEN_IRQ_H__ */