event-channel state now.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
continue;
}
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
}
if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
continue;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) )
{
hvm_pci_msi_assert(d, irq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
continue;
}
*/
set_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)],
NOW() + PT_IRQ_TIME_OUT);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
}
int rc = 0;
cpumask_t cpumask = CPU_MASK_NONE;
- WARN_ON(!spin_is_locked(&v->domain->evtchn_lock));
+ WARN_ON(!spin_is_locked(&v->domain->event_lock));
BUG_ON(!local_irq_is_enabled());
retry:
irq_desc_t *desc;
int vector;
- WARN_ON(!spin_is_locked(&d->evtchn_lock));
+ WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
desc = domain_spin_lock_irq_desc(d, irq, NULL);
irq_guest_action_t *action;
int i, bound = 0;
- WARN_ON(!spin_is_locked(&d->evtchn_lock));
+ WARN_ON(!spin_is_locked(&d->event_lock));
BUG_ON(!local_irq_is_enabled());
desc = domain_spin_lock_irq_desc(d, irq, NULL);
{
int i;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
if ( type == MAP_PIRQ_TYPE_GSI )
{
irq_desc_t *desc;
unsigned long flags;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
if ( !IS_PRIV(current->domain) )
return -EPERM;
if ( !IS_PRIV(current->domain) )
return -EINVAL;
- ASSERT(spin_is_locked(&d->evtchn_lock));
+ ASSERT(spin_is_locked(&d->event_lock));
vector = d->arch.pirq_vector[pirq];
if ( vector <= 0 )
{
int i;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
for ( i = 0; i < NR_PIRQS; i++ )
if ( d->arch.pirq_vector[i] > 0 )
unmap_domain_pirq(d, i);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
extern void dump_ioapic_irq_info(void);
}
/* Verify or get pirq. */
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( map->pirq < 0 )
{
if ( d->arch.vector_pirq[vector] )
map->pirq = pirq;
done:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
free_irq_vector(vector);
free_domain:
if ( d == NULL )
return -ESRCH;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
ret = unmap_domain_pirq(d, unmap->pirq);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
irq_op.vector = assign_irq_vector(irq);
- spin_lock(&dom0->evtchn_lock);
+ spin_lock(&dom0->event_lock);
ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
MAP_PIRQ_TYPE_GSI, NULL);
- spin_unlock(&dom0->evtchn_lock);
+ spin_unlock(&dom0->event_lock);
if ( copy_to_guest(arg, &irq_op, 1) != 0 )
ret = -EFAULT;
if ( rc )
return rc;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT_DOM(port, d);
alloc->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
/* Avoid deadlock by first acquiring lock of domain with smaller id. */
if ( ld < rd )
{
- spin_lock(&ld->evtchn_lock);
- spin_lock(&rd->evtchn_lock);
+ spin_lock(&ld->event_lock);
+ spin_lock(&rd->event_lock);
}
else
{
if ( ld != rd )
- spin_lock(&rd->evtchn_lock);
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&rd->event_lock);
+ spin_lock(&ld->event_lock);
}
if ( (lport = get_free_port(ld)) < 0 )
bind->local_port = lport;
out:
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
if ( ld != rd )
- spin_unlock(&rd->evtchn_lock);
+ spin_unlock(&rd->event_lock);
rcu_unlock_domain(rd);
((v = d->vcpu[vcpu]) == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( v->virq_to_evtchn[virq] != 0 )
ERROR_EXIT(-EEXIST);
v->virq_to_evtchn[virq] = bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
(d->vcpu[vcpu] == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
ERROR_EXIT(port);
bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
if ( !irq_access_permitted(d, pirq) )
return -EPERM;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( d->pirq_to_evtchn[pirq] != 0 )
ERROR_EXIT(-EEXIST);
bind->port = port;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
long rc = 0;
again:
- spin_lock(&d1->evtchn_lock);
+ spin_lock(&d1->event_lock);
if ( !port_is_valid(d1, port1) )
{
if ( d1 < d2 )
{
- spin_lock(&d2->evtchn_lock);
+ spin_lock(&d2->event_lock);
}
else if ( d1 != d2 )
{
- spin_unlock(&d1->evtchn_lock);
- spin_lock(&d2->evtchn_lock);
+ spin_unlock(&d1->event_lock);
+ spin_lock(&d2->event_lock);
goto again;
}
}
if ( d2 != NULL )
{
if ( d1 != d2 )
- spin_unlock(&d2->evtchn_lock);
+ spin_unlock(&d2->event_lock);
put_domain(d2);
}
- spin_unlock(&d1->evtchn_lock);
+ spin_unlock(&d1->event_lock);
return rc;
}
struct vcpu *rvcpu;
int rport, ret = 0;
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&ld->event_lock);
if ( unlikely(!port_is_valid(ld, lport)) )
{
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return -EINVAL;
}
/* Guest cannot send via a Xen-attached event channel. */
if ( unlikely(lchn->consumer_is_xen) )
{
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return -EINVAL;
}
}
out:
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
return ret;
}
if ( rc )
return rc;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
status->vcpu = chn->notify_vcpu_id;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
rcu_unlock_domain(d);
return rc;
if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
return -ENOENT;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
{
}
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return rc;
}
int port = unmask->port;
struct vcpu *v;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( unlikely(!port_is_valid(d, port)) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
vcpu_mark_events_pending(v);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return 0;
}
struct domain *d = local_vcpu->domain;
int port;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
goto out;
chn->u.unbound.remote_domid = remote_domid;
out:
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return port;
}
struct evtchn *chn;
struct domain *d = local_vcpu->domain;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
if ( unlikely(d->is_dying) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
BUG_ON(!chn->consumer_is_xen);
chn->consumer_is_xen = 0;
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
(void)__evtchn_close(d, port);
}
struct domain *ld = current->domain, *rd;
int rport;
- spin_lock(&ld->evtchn_lock);
+ spin_lock(&ld->event_lock);
ASSERT(port_is_valid(ld, lport));
lchn = evtchn_from_port(ld, lport);
evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
}
- spin_unlock(&ld->evtchn_lock);
+ spin_unlock(&ld->event_lock);
}
int evtchn_init(struct domain *d)
{
- spin_lock_init(&d->evtchn_lock);
+ spin_lock_init(&d->event_lock);
if ( get_free_port(d) != 0 )
return -EINVAL;
evtchn_from_port(d, 0)->state = ECS_RESERVED;
/* After this barrier no new event-channel allocations can occur. */
BUG_ON(!d->is_dying);
- spin_barrier(&d->evtchn_lock);
+ spin_barrier(&d->event_lock);
/* Close all existing event channels. */
for ( i = 0; port_is_valid(d, i); i++ )
}
/* Free all event-channel buckets. */
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
{
xsm_free_security_evtchn(d->evtchn[i]);
xfree(d->evtchn[i]);
d->evtchn[i] = NULL;
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
static void domain_dump_evtchn_info(struct domain *d)
printk("Domain %d polling vCPUs: %08lx\n", d->domain_id, d->poll_mask[0]);
- if ( !spin_trylock(&d->evtchn_lock) )
+ if ( !spin_trylock(&d->event_lock) )
return;
printk("Event channel information for domain %d:\n",
printk(" x=%d\n", chn->consumer_is_xen);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
static void dump_evtchn_info(unsigned char key)
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
- spin_lock(&irq_map->dom->evtchn_lock);
+ spin_lock(&irq_map->dom->event_lock);
dpci = domain_get_irq_dpci(irq_map->dom);
ASSERT(dpci);
clear_bit(machine_gsi, dpci->dirq_mask);
vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
dpci->mirq[machine_gsi].pending = 0;
- spin_unlock(&irq_map->dom->evtchn_lock);
+ spin_unlock(&irq_map->dom->event_lock);
pirq_guest_eoi(irq_map->dom, machine_gsi);
}
if ( pirq < 0 || pirq >= NR_PIRQS )
return -EINVAL;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -ENOMEM;
}
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
{
xfree(hvm_irq_dpci);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EBUSY;
}
}
digl = xmalloc(struct dev_intx_gsi_link);
if ( !digl )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -ENOMEM;
}
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return 0;
}
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return -EINVAL;
}
clear_bit(machine_gsi, hvm_irq_dpci->mapping);
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
desc = domain_spin_lock_irq_desc(d, pirq, NULL);
if (!desc)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
pirq_guest_eoi(d, pirq);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
return;
}
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if((hvm_irq_dpci == NULL) ||
(guest_gsi >= NR_ISAIRQS &&
!hvm_irq_dpci->girq[guest_gsi].valid) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
pirq_guest_eoi(d, machine_gsi);
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
if ( !is_hvm_domain(d) && !need_iommu(d) )
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci != NULL )
{
d->arch.hvm_domain.irq.dpci = NULL;
xfree(hvm_irq_dpci);
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
void pci_release_devices(struct domain *d)
if ( !vtd_enabled)
return;
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
dpci = domain_get_irq_dpci(d);
if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
return;
}
/* Multiple mirq may be mapped to one isa irq */
}
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}
/* Shadow translated domain: P2M mapping */
pagetable_t phys_table;
- /* NB. protected by d->evtchn_lock and by irq_desc[vector].lock */
+ /* NB. protected by d->event_lock and by irq_desc[vector].lock */
int vector_pirq[NR_VECTORS];
int pirq_vector[NR_PIRQS];
#define NR_ISAIRQS 16
#define NR_LINK 4
-/* Protected by domain's evtchn_lock */
+/* Protected by domain's event_lock */
struct hvm_irq_dpci {
/* Machine IRQ to guest device/intx mapping. */
DECLARE_BITMAP(mapping, NR_PIRQS);
/* Event channel information. */
struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
- spinlock_t evtchn_lock;
+ spinlock_t event_lock;
struct grant_table *grant_table;
/* a) check for event channel conflicts */
for ( bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++ )
{
- spin_lock(&d->evtchn_lock);
+ spin_lock(&d->event_lock);
ports = d->evtchn[bucket];
if ( ports == NULL)
{
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
break;
}
printkd("%s: Policy violation in event channel domain "
"%x -> domain %x.\n",
__func__, d->domain_id, rdomid);
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
acm_array_append_tuple(errors,
ACM_EVTCHN_SHARING_VIOLATION,
goto out;
}
}
- spin_unlock(&d->evtchn_lock);
+ spin_unlock(&d->event_lock);
}