struct hvm_mirq_dpci_mapping *irq_map = data;
unsigned int guest_gsi, machine_gsi = 0;
int vector;
- struct hvm_irq_dpci *dpci = domain_get_irq_dpci(irq_map->dom);
+ struct hvm_irq_dpci *dpci = NULL;
struct dev_intx_gsi_link *digl;
uint32_t device, intx;
+ spin_lock(&irq_map->dom->evtchn_lock);
+
+ dpci = domain_get_irq_dpci(irq_map->dom);
+ ASSERT(dpci);
list_for_each_entry ( digl, &irq_map->digl_list, list )
{
guest_gsi = digl->gsi;
clear_bit(machine_gsi, dpci->dirq_mask);
vector = domain_irq_to_vector(irq_map->dom, machine_gsi);
- stop_timer(&dpci->hvm_timer[vector]);
- spin_lock(&dpci->dirq_lock);
dpci->mirq[machine_gsi].pending = 0;
- spin_unlock(&dpci->dirq_lock);
+ spin_unlock(&irq_map->dom->evtchn_lock);
pirq_guest_eoi(irq_map->dom, machine_gsi);
}
int pt_irq_create_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct dev_intx_gsi_link *digl;
+ int pirq = pt_irq_bind->machine_irq;
+
+ if ( pirq < 0 || pirq >= NR_PIRQS )
+ return -EINVAL;
+
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
if ( hvm_irq_dpci == NULL )
{
hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
-
+ }
memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
- spin_lock_init(&hvm_irq_dpci->dirq_lock);
for ( int i = 0; i < NR_IRQS; i++ )
INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list);
+ }
- if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
- xfree(hvm_irq_dpci);
+ if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
+ {
+ xfree(hvm_irq_dpci);
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
}
if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
{
- int pirq = pt_irq_bind->machine_irq;
-
- if ( pirq < 0 || pirq >= NR_IRQS )
- return -EINVAL;
- if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID ) )
+ if ( !test_and_set_bit(pirq, hvm_irq_dpci->mapping))
{
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |
- HVM_IRQ_DPCI_MSI ;
+ set_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags);
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
+ /* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
pirq_guest_bind(d->vcpu[0], pirq, 0);
}
+ else if (hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec
+ ||hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] != pirq)
- hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI ;
- hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
- hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
- hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq;
-
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EBUSY;
+ }
}
else
{
digl = xmalloc(struct dev_intx_gsi_link);
if ( !digl )
+ {
+ spin_unlock(&d->evtchn_lock);
return -ENOMEM;
+ }
digl->device = device;
digl->intx = intx;
hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi;
/* Bind the same mirq once in the same domain */
- if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+ if ( !test_and_set_bit(machine_gsi, hvm_irq_dpci->mapping))
{
- hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID;
hvm_irq_dpci->mirq[machine_gsi].dom = d;
+ /* Init timer before binding */
init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)],
pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0);
/* Deal with gsi for legacy devices */
"VT-d irq bind: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
}
+ spin_unlock(&d->evtchn_lock);
return 0;
}
int pt_irq_destroy_bind_vtd(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t machine_gsi, guest_gsi;
uint32_t device, intx, link;
struct list_head *digl_list, *tmp;
struct dev_intx_gsi_link *digl;
- if ( hvm_irq_dpci == NULL )
- return 0;
-
machine_gsi = pt_irq_bind->machine_irq;
device = pt_irq_bind->u.pci.device;
intx = pt_irq_bind->u.pci.intx;
guest_gsi = hvm_pci_intx_gsi(device, intx);
link = hvm_pci_intx_link(device, intx);
- hvm_irq_dpci->link_cnt[link]--;
gdprintk(XENLOG_INFO,
"pt_irq_destroy_bind_vtd: machine_gsi=%d "
"guest_gsi=%d, device=%d, intx=%d.\n",
machine_gsi, guest_gsi, device, intx);
+ spin_lock(&d->evtchn_lock);
+
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if ( hvm_irq_dpci == NULL )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return -EINVAL;
+ }
+
+ hvm_irq_dpci->link_cnt[link]--;
memset(&hvm_irq_dpci->girq[guest_gsi], 0,
sizeof(struct hvm_girq_dpci_mapping));
/* clear the mirq info */
- if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) )
+ if ( test_bit(machine_gsi, hvm_irq_dpci->mapping))
{
list_for_each_safe ( digl_list, tmp,
&hvm_irq_dpci->mirq[machine_gsi].digl_list )
kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]);
hvm_irq_dpci->mirq[machine_gsi].dom = NULL;
hvm_irq_dpci->mirq[machine_gsi].flags = 0;
+ clear_bit(machine_gsi, hvm_irq_dpci->mapping);
}
}
-
+ spin_unlock(&d->evtchn_lock);
gdprintk(XENLOG_INFO,
"XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
machine_gsi, device, intx);
{
struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d);
+ ASSERT(spin_is_locked(&irq_desc[domain_irq_to_vector(d, mirq)].lock));
if ( !iommu_enabled || (d == dom0) || !dpci ||
- !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID )
+ !test_bit(mirq, dpci->mapping))
return 0;
/*
return 1;
}
-
void hvm_dpci_msi_eoi(struct domain *d, int vector)
{
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
- int pirq;
- unsigned long flags;
irq_desc_t *desc;
+ int pirq;
if ( !iommu_enabled || (hvm_irq_dpci == NULL) )
return;
+ spin_lock(&d->evtchn_lock);
pirq = hvm_irq_dpci->msi_gvec_pirq[vector];
if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) &&
- (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) )
- {
- int vec;
- vec = domain_irq_to_vector(d, pirq);
- desc = &irq_desc[vec];
-
- spin_lock_irqsave(&desc->lock, flags);
- desc->status &= ~IRQ_INPROGRESS;
- spin_unlock_irqrestore(&desc->lock, flags);
-
- pirq_guest_eoi(d, pirq);
- }
+ test_bit(pirq, hvm_irq_dpci->mapping) &&
+ (test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[pirq].flags)))
+ {
+ BUG_ON(!local_irq_is_enabled());
+ desc = domain_spin_lock_irq_desc(d, pirq, NULL);
+ if (!desc)
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
+
+ desc->status &= ~IRQ_INPROGRESS;
+ spin_unlock_irq(&desc->lock);
+
+ pirq_guest_eoi(d, pirq);
+ }
+
+ spin_unlock(&d->evtchn_lock);
}
void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
union vioapic_redir_entry *ent)
{
- struct hvm_irq_dpci *hvm_irq_dpci = domain_get_irq_dpci(d);
+ struct hvm_irq_dpci *hvm_irq_dpci = NULL;
uint32_t device, intx, machine_gsi;
- if ( !iommu_enabled || (hvm_irq_dpci == NULL) ||
- (guest_gsi >= NR_ISAIRQS &&
- !hvm_irq_dpci->girq[guest_gsi].valid) )
+ if ( !iommu_enabled)
return;
if ( guest_gsi < NR_ISAIRQS )
return;
}
- machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
+ spin_lock(&d->evtchn_lock);
+ hvm_irq_dpci = domain_get_irq_dpci(d);
+
+ if((hvm_irq_dpci == NULL) ||
+ (guest_gsi >= NR_ISAIRQS &&
+ !hvm_irq_dpci->girq[guest_gsi].valid) )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
+
device = hvm_irq_dpci->girq[guest_gsi].device;
intx = hvm_irq_dpci->girq[guest_gsi].intx;
hvm_pci_intx_deassert(d, device, intx);
- spin_lock(&hvm_irq_dpci->dirq_lock);
+ machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi;
if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 )
{
- spin_unlock(&hvm_irq_dpci->dirq_lock);
-
if ( (ent == NULL) || !ent->fields.mask )
{
+ /*
+ * No need to get vector lock for timer
+ * since interrupt is still not EOIed
+ */
stop_timer(&hvm_irq_dpci->hvm_timer[
domain_irq_to_vector(d, machine_gsi)]);
pirq_guest_eoi(d, machine_gsi);
}
}
- else
- spin_unlock(&hvm_irq_dpci->dirq_lock);
+ spin_unlock(&d->evtchn_lock);
}