static void
-iosapic_set_affinity (unsigned int irq, const cpumask_t *mask)
+iosapic_set_affinity (struct irq_desc *desc, const cpumask_t *mask)
{
#ifdef CONFIG_SMP
unsigned long flags;
u32 high32, low32;
int dest, rte_index;
char __iomem *addr;
- int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
+ int redir = (desc->irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
+ unsigned int irq = desc->irq & ~IA64_IRQ_REDIRECTED;
ia64_vector vec;
struct iosapic_rte_info *rte;
- irq &= (~IA64_IRQ_REDIRECTED);
vec = irq_to_vector(irq);
if (cpumask_empty(mask))
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
}
-static void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
+static void hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
{
- unsigned int ch_idx = irq_to_channel(irq);
- struct hpet_event_channel *ch = hpet_events + ch_idx;
-
- BUG_ON(ch_idx >= num_hpets_used);
-
hpet_write32(msg->data, HPET_Tn_ROUTE(ch->idx));
hpet_write32(msg->address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
}
-static void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
+static void hpet_msi_read(struct hpet_event_channel *ch, struct msi_msg *msg)
{
- unsigned int ch_idx = irq_to_channel(irq);
- struct hpet_event_channel *ch = hpet_events + ch_idx;
-
- BUG_ON(ch_idx >= num_hpets_used);
-
msg->data = hpet_read32(HPET_Tn_ROUTE(ch->idx));
msg->address_lo = hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4);
msg->address_hi = 0;
{
}
-static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
+static void hpet_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
struct msi_msg msg;
unsigned int dest;
- struct irq_desc * desc = irq_to_desc(irq);
struct irq_cfg *cfg= desc->chip_data;
dest = set_desc_affinity(desc, mask);
if (dest == BAD_APICID)
return;
- hpet_msi_read(irq, &msg);
+ hpet_msi_read(desc->action->dev_id, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- hpet_msi_write(irq, &msg);
+ hpet_msi_write(desc->action->dev_id, &msg);
}
/*
.set_affinity = hpet_msi_set_affinity,
};
-static void __hpet_setup_msi_irq(unsigned int irq)
+static void __hpet_setup_msi_irq(struct irq_desc *desc)
{
struct msi_msg msg;
- msi_compose_msg(irq, &msg);
- hpet_msi_write(irq, &msg);
+ msi_compose_msg(desc->irq, &msg);
+ hpet_msi_write(desc->action->dev_id, &msg);
}
static int __init hpet_setup_msi_irq(unsigned int irq)
if ( ret < 0 )
return ret;
- __hpet_setup_msi_irq(irq);
+ __hpet_setup_msi_irq(desc);
return 0;
}
if ( ch->cpu != cpu )
return;
- hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu));
+ hpet_msi_set_affinity(irq_to_desc(ch->irq), cpumask_of(ch->cpu));
}
static void hpet_detach_channel(unsigned int cpu,
}
ch->cpu = first_cpu(ch->cpumask);
- hpet_msi_set_affinity(ch->irq, cpumask_of(ch->cpu));
+ hpet_msi_set_affinity(irq_to_desc(ch->irq), cpumask_of(ch->cpu));
}
#include <asm/mc146818rtc.h>
for ( i = 0; i < n; i++ )
{
if ( hpet_events[i].irq >= 0 )
- __hpet_setup_msi_irq(hpet_events[i].irq);
+ __hpet_setup_msi_irq(irq_to_desc(hpet_events[i].irq));
/* set HPET Tn as oneshot */
cfg = hpet_read32(HPET_Tn_CFG(hpet_events[i].idx));
}
static void
-set_ioapic_affinity_irq_desc(struct irq_desc *desc, const cpumask_t *mask)
+set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask)
{
unsigned long flags;
unsigned int dest;
spin_unlock_irqrestore(&ioapic_lock, flags);
}
-
-static void
-set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
- set_ioapic_affinity_irq_desc(desc, mask);
-}
#endif /* CONFIG_SMP */
/*
irq = pin_2_irq(irq_entry, ioapic, pin);
cfg = irq_cfg(irq);
BUG_ON(cpus_empty(cfg->cpu_mask));
- set_ioapic_affinity_irq(irq, &cfg->cpu_mask);
+ set_ioapic_affinity_irq(irq_to_desc(irq), &cfg->cpu_mask);
}
}
if ((irq_desc[irq].status & IRQ_MOVE_PENDING) &&
!io_apic_level_ack_pending(irq))
- move_masked_irq(irq);
+ move_masked_irq(desc);
if ( !(v & (1 << (i & 0x1f))) ) {
spin_lock(&ioapic_lock);
{
if ( directed_eoi_enabled )
{
- if ( !(irq_desc[irq].status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if ( !(desc->status & (IRQ_DISABLED|IRQ_MOVE_PENDING)) )
{
eoi_IO_APIC_irq(irq);
return;
mask_IO_APIC_irq(irq);
eoi_IO_APIC_irq(irq);
- if ( (irq_desc[irq].status & IRQ_MOVE_PENDING) &&
+ if ( (desc->status & IRQ_MOVE_PENDING) &&
!io_apic_level_ack_pending(irq) )
- move_masked_irq(irq);
+ move_masked_irq(desc);
}
if ( !(irq_desc[irq].status & IRQ_DISABLED) )
}
}
-void move_masked_irq(int irq)
+void move_masked_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
* For correct operation this depends on the caller masking the irqs.
*/
if (likely(cpus_intersects(desc->pending_mask, cpu_online_map)))
- desc->handler->set_affinity(irq, &desc->pending_mask);
+ desc->handler->set_affinity(desc, &desc->pending_mask);
cpus_clear(desc->pending_mask);
}
return;
desc->handler->disable(irq);
- move_masked_irq(irq);
+ move_masked_irq(desc);
desc->handler->enable(irq);
}
/* Attempt to bind the interrupt target to the correct CPU. */
cpu_set(v->processor, cpumask);
if ( !opt_noirqbalance && (desc->handler->set_affinity != NULL) )
- desc->handler->set_affinity(irq, &cpumask);
+ desc->handler->set_affinity(desc, &cpumask);
}
else if ( !will_share || !action->shareable )
{
desc->handler->disable(irq);
if ( desc->handler->set_affinity )
- desc->handler->set_affinity(irq, &affinity);
+ desc->handler->set_affinity(desc, &affinity);
else if ( !(warned++) )
set_affinity = 0;
}
}
-void set_msi_affinity(unsigned int irq, const cpumask_t *mask)
+void set_msi_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
struct msi_msg msg;
unsigned int dest;
- struct irq_desc *desc = irq_to_desc(irq);
struct msi_desc *msi_desc = desc->msi_desc;
struct irq_cfg *cfg = desc->chip_data;
set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
}
-static void iommu_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
+static void iommu_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
struct msi_msg msg;
unsigned int dest;
- struct amd_iommu *iommu = irq_to_iommu[irq];
- struct irq_desc *desc = irq_to_desc(irq);
+ struct amd_iommu *iommu = desc->action->dev_id;
struct irq_cfg *cfg = desc->chip_data;
u8 bus = (iommu->bdf >> 8) & 0xff;
u8 dev = PCI_SLOT(iommu->bdf & 0xff);
register_iommu_event_log_in_mmio_space(iommu);
register_iommu_exclusion_range(iommu);
- iommu_msi_set_affinity(iommu->irq, &cpu_online_map);
+ iommu_msi_set_affinity(irq_to_desc(iommu->irq), &cpu_online_map);
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
ack_APIC_irq();
}
-static void dma_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
+static void dma_msi_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
{
struct msi_msg msg;
unsigned int dest;
unsigned long flags;
-
- struct iommu *iommu = irq_to_iommu[irq];
- struct irq_desc *desc = irq_to_desc(irq);
+ struct iommu *iommu = desc->action->dev_id;
struct irq_cfg *cfg = desc->chip_data;
#ifdef CONFIG_X86
iommu = drhd->iommu;
cfg = irq_cfg(iommu->irq);
- dma_msi_set_affinity(iommu->irq, &cfg->cpu_mask);
+ dma_msi_set_affinity(irq_to_desc(iommu->irq), &cfg->cpu_mask);
clear_fault_bits(iommu);
void __setup_vector_irq(int cpu);
void move_native_irq(int irq);
-void move_masked_irq(int irq);
+void move_masked_irq(struct irq_desc *);
int __assign_irq_vector(int irq, struct irq_cfg *, const cpumask_t *);
/* Helper functions */
extern void mask_msi_irq(unsigned int irq);
extern void unmask_msi_irq(unsigned int irq);
-extern void set_msi_affinity(unsigned int vector, const cpumask_t *);
+extern void set_msi_affinity(struct irq_desc *, const cpumask_t *);
extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc);
extern void pci_disable_msi(struct msi_desc *desc);
extern void pci_cleanup_msi(struct pci_dev *pdev);
#define NEVER_ASSIGN_IRQ (-2)
#define FREE_TO_ASSIGN_IRQ (-3)
+struct irq_desc;
+
/*
* Interrupt controller descriptor. This is all we need
* to describe about the low-level hardware.
void (*disable)(unsigned int irq);
void (*ack)(unsigned int irq);
void (*end)(unsigned int irq, u8 vector);
- void (*set_affinity)(unsigned int irq, const cpumask_t *);
+ void (*set_affinity)(struct irq_desc *, const cpumask_t *);
};
typedef const struct hw_interrupt_type hw_irq_controller;