return ret;
}
-static inline int find_unassigned_irq(void)
-{
- int irq;
-
- for (irq = nr_irqs_gsi; irq < nr_irqs; irq++)
- if (irq_to_desc(irq)->arch.used == IRQ_UNUSED)
- return irq;
- return -ENOSPC;
-}
-
/*
* Dynamic irq allocate and deallocation for MSI
*/
int irq, ret;
struct irq_desc *desc;
- spin_lock_irqsave(&vector_lock, flags);
+ for (irq = nr_irqs_gsi; irq < nr_irqs; irq++)
+ {
+ desc = irq_to_desc(irq);
+ if (cmpxchg(&desc->arch.used, IRQ_UNUSED, IRQ_RESERVED) == IRQ_UNUSED)
+ break;
+ }
+
+ if (irq >= nr_irqs)
+ return -ENOSPC;
- irq = find_unassigned_irq();
- if (irq < 0)
- goto out;
- desc = irq_to_desc(irq);
ret = init_one_irq_desc(desc);
if (!ret)
+ {
+ spin_lock_irqsave(&vector_lock, flags);
ret = __assign_irq_vector(irq, desc, TARGET_CPUS);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ }
if (ret < 0)
+ {
+ desc->arch.used = IRQ_UNUSED;
irq = ret;
-out:
- spin_unlock_irqrestore(&vector_lock, flags);
+ }
return irq;
}
unsigned move_cleanup_count;
vmask_t *used_vectors;
u8 move_in_progress : 1;
- u8 used: 1;
+ s8 used;
};
/* For use with irq_cfg.used */
#define IRQ_UNUSED (0)
#define IRQ_USED (1)
+#define IRQ_RESERVED (-1)
#define IRQ_VECTOR_UNASSIGNED (-1)