#define ACKTYPE_NONE 0 /* No final acknowledgement is required */
#define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */
#define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */
- cpumask_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */
+ cpumask_var_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */
struct timer eoi_timer;
struct domain *guest[IRQ_MAX_GUESTS];
} irq_guest_action_t;
desc->handler->end(desc, 0);
break;
case ACKTYPE_EOI:
- cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
peoi[sp].vector = vector;
peoi[sp].ready = 0;
pending_eoi_sp(peoi) = sp+1;
- cpu_set(smp_processor_id(), action->cpu_eoi_map);
+ cpumask_set_cpu(smp_processor_id(), action->cpu_eoi_map);
}
for ( i = 0; i < action->nr_guests; i++ )
if ( !(desc->status & IRQ_GUEST) ||
(action->in_flight != 0) ||
- !cpu_test_and_clear(smp_processor_id(), action->cpu_eoi_map) )
+ !cpumask_test_and_clear_cpu(smp_processor_id(),
+ action->cpu_eoi_map) )
return;
sp = pending_eoi_sp(peoi);
if ( action->ack_type == ACKTYPE_UNMASK )
{
- ASSERT(cpus_empty(action->cpu_eoi_map));
+ ASSERT(cpumask_empty(action->cpu_eoi_map));
if ( desc->handler->end )
desc->handler->end(desc, 0);
spin_unlock_irq(&desc->lock);
ASSERT(action->ack_type == ACKTYPE_EOI);
- cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
if ( cpumask_test_and_clear_cpu(smp_processor_id(), &cpu_eoi_map) )
{
if ( newaction == NULL )
{
spin_unlock_irq(&desc->lock);
- if ( (newaction = xmalloc(irq_guest_action_t)) != NULL )
+ if ( (newaction = xmalloc(irq_guest_action_t)) != NULL &&
+ zalloc_cpumask_var(&newaction->cpu_eoi_map) )
goto retry;
+ xfree(newaction);
gdprintk(XENLOG_INFO,
"Cannot bind IRQ %d to guest. Out of memory.\n",
pirq->pirq);
action->in_flight = 0;
action->shareable = will_share;
action->ack_type = pirq_acktype(v->domain, pirq->pirq);
- cpumask_clear(&action->cpu_eoi_map);
init_timer(&action->eoi_timer, irq_guest_eoi_timer_fn, desc, 0);
desc->status |= IRQ_GUEST;
spin_unlock_irq(&desc->lock);
out:
if ( newaction != NULL )
+ {
+ free_cpumask_var(newaction->cpu_eoi_map);
xfree(newaction);
+ }
return rc;
}
(--action->in_flight == 0) &&
(action->nr_guests != 0) )
{
- cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
spin_unlock_irq(&desc->lock);
on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
* would need to flush all ready EOIs before returning as otherwise the
* desc->handler could change and we would call the wrong 'end' hook.
*/
- cpumask_copy(&cpu_eoi_map, &action->cpu_eoi_map);
+ cpumask_copy(&cpu_eoi_map, action->cpu_eoi_map);
if ( !cpumask_empty(&cpu_eoi_map) )
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
spin_lock_irq(&desc->lock);
}
- BUG_ON(!cpus_empty(action->cpu_eoi_map));
+ BUG_ON(!cpumask_empty(action->cpu_eoi_map));
desc->action = NULL;
desc->status &= ~(IRQ_GUEST|IRQ_INPROGRESS);
if ( oldaction != NULL )
{
kill_timer(&oldaction->eoi_timer);
+ free_cpumask_var(oldaction->cpu_eoi_map);
xfree(oldaction);
}
else if ( irq > 0 )
if ( oldaction != NULL )
{
kill_timer(&oldaction->eoi_timer);
+ free_cpumask_var(oldaction->cpu_eoi_map);
xfree(oldaction);
}
if ( !(desc->status & IRQ_GUEST) )
continue;
action = (irq_guest_action_t *)desc->action;
- cpu_clear(smp_processor_id(), action->cpu_eoi_map);
+ cpumask_clear_cpu(smp_processor_id(), action->cpu_eoi_map);
}
/* Flush the interrupt EOI stack. */