cfg->move_in_progress = 0;
}
-void irq_complete_move(struct irq_desc **descp)
+void irq_complete_move(struct irq_desc *desc)
{
- struct irq_desc *desc = *descp;
struct irq_cfg *cfg = desc->chip_data;
unsigned vector, me;
{
struct irq_desc *desc = irq_to_desc(irq);
- irq_complete_move(&desc);
+ irq_complete_move(desc);
move_native_irq(irq);
if ((desc->status & (IRQ_PENDING | IRQ_DISABLED))
int i;
struct irq_desc *desc = irq_to_desc(irq);
- irq_complete_move(&desc);
+ irq_complete_move(desc);
if ( ioapic_ack_new )
return;
{
struct irq_desc *desc = irq_to_desc(irq);
- irq_complete_move(&desc);
+ irq_complete_move(desc);
move_native_irq(irq);
if ( msi_maskable_irq(desc->msi_desc) )
struct amd_iommu *iommu = irq_to_iommu[irq];
struct irq_desc *desc = irq_to_desc(irq);
- irq_complete_move(&desc);
+ irq_complete_move(desc);
/* FIXME: do not support mask bits at the moment */
if ( iommu->maskbit )
struct iommu *iommu = irq_to_iommu[irq];
struct irq_desc *desc = irq_to_desc(irq);
- irq_complete_move(&desc);
+ irq_complete_move(desc);
/* mask it */
spin_lock_irqsave(&iommu->register_lock, flags);
void destroy_irq(unsigned int irq);
struct irq_desc;
-extern void irq_complete_move(struct irq_desc **descp);
+extern void irq_complete_move(struct irq_desc *);
extern struct irq_desc *irq_desc;