irq, vector, smp_processor_id());
__get_cpu_var(vector_irq)[vector] = -1;
- if ( cfg->used_vectors )
+ cfg->move_cleanup_count--;
+
+ if ( cfg->move_cleanup_count == 0
+ && cfg->used_vectors )
{
ASSERT(test_bit(vector, cfg->used_vectors));
clear_bit(vector, cfg->used_vectors);
}
- cfg->move_cleanup_count--;
unlock:
spin_unlock(&desc->lock);
}
cfg->vector = vector;
cfg->cpu_mask = online_mask;
if ( cfg->used_vectors )
+ {
+ ASSERT(!test_bit(vector, cfg->used_vectors));
set_bit(vector, cfg->used_vectors);
+ }
irq_status[irq] = IRQ_USED;
if (IO_APIC_IRQ(irq))
irq_vector[irq] = vector;
for_each_cpu_mask(cpu, tmp_mask)
per_cpu(vector_irq, cpu)[vector] = -1;
- if ( cfg->used_vectors )
- clear_bit(vector, cfg->used_vectors);
-
cfg->vector = IRQ_VECTOR_UNASSIGNED;
cpus_clear(cfg->cpu_mask);
init_one_irq_status(irq);
if (likely(!cfg->move_in_progress))
return;
+
cpus_and(tmp_mask, cfg->old_cpu_mask, cpu_online_map);
for_each_cpu_mask(cpu, tmp_mask) {
for (vector = FIRST_DYNAMIC_VECTOR; vector <= LAST_DYNAMIC_VECTOR;
}
}
+ if ( cfg->used_vectors )
+ {
+ ASSERT(test_bit(vector, cfg->used_vectors));
+ clear_bit(vector, cfg->used_vectors);
+ }
+
cfg->move_in_progress = 0;
}