evtchn_write_unlock(chn);
bind->port = port;
+ /*
+ * If by any, the update of virq_to_evtchn[] would need guarding by
+ * virq_lock, but since this is the last action here, there's no strict
+ * need to acquire the lock. Hence holding event_lock isn't helpful
+ * anymore at this point, but utilize that its unlocking acts as the
+ * otherwise necessary smp_wmb() here.
+ */
write_atomic(&v->virq_to_evtchn[virq], port);
out:
case ECS_VIRQ:
for_each_vcpu ( d1, v )
{
- if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) != port1 )
- continue;
- write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
- spin_barrier(&v->virq_lock);
+ unsigned long flags;
+
+ write_lock_irqsave(&v->virq_lock, flags);
+ if ( read_atomic(&v->virq_to_evtchn[chn1->u.virq]) == port1 )
+ write_atomic(&v->virq_to_evtchn[chn1->u.virq], 0);
+ write_unlock_irqrestore(&v->virq_lock, flags);
}
break;
ASSERT(!virq_is_global(virq));
- spin_lock_irqsave(&v->virq_lock, flags);
+ read_lock_irqsave(&v->virq_lock, flags);
port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
}
out:
- spin_unlock_irqrestore(&v->virq_lock, flags);
+ read_unlock_irqrestore(&v->virq_lock, flags);
}
void send_guest_global_virq(struct domain *d, uint32_t virq)
if ( unlikely(v == NULL) )
return;
- spin_lock_irqsave(&v->virq_lock, flags);
+ read_lock_irqsave(&v->virq_lock, flags);
port = read_atomic(&v->virq_to_evtchn[virq]);
if ( unlikely(port == 0) )
}
out:
- spin_unlock_irqrestore(&v->virq_lock, flags);
+ read_unlock_irqrestore(&v->virq_lock, flags);
}
void send_guest_pirq(struct domain *d, const struct pirq *pirq)