From: Keir Fraser Date: Wed, 11 Mar 2009 10:09:21 +0000 (+0000) Subject: passthrough: fix some spinlock issues in vmsi X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~13992^2~120 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=f6be8835a30d90faebf7275c5be21ef8d8627be8;p=xen.git passthrough: fix some spinlock issues in vmsi Apart from efficiency, I hasten to fix the assertion failure. - acquire pcidevs_lock before calling pt_irq_xxx_bind_vtd - allocate msixtbl_entry beforehand - check return value from domain_spin_lock_irq_desc() - typo: spin_unlock(&irq_desc->lock) -> - spin_unlock_irq(&irq_desc->lock) - acquire msixtbl_list_lock with irq_disabled Signed-off-by: Kouya Shimura --- diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index e17197c19b..7f4d7dbbea 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -764,7 +764,11 @@ long arch_do_domctl( ret = -ESRCH; if ( iommu_enabled ) + { + spin_lock(&pcidevs_lock); ret = pt_irq_create_bind_vtd(d, bind); + spin_unlock(&pcidevs_lock); + } if ( ret < 0 ) gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n"); @@ -783,7 +787,11 @@ long arch_do_domctl( break; bind = &(domctl->u.bind_pt_irq); if ( iommu_enabled ) + { + spin_lock(&pcidevs_lock); ret = pt_irq_destroy_bind_vtd(d, bind); + spin_unlock(&pcidevs_lock); + } if ( ret < 0 ) gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n"); rcu_unlock_domain(d); diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c index 1a0a45a6c9..37c1e5c14f 100644 --- a/xen/arch/x86/hvm/vmsi.c +++ b/xen/arch/x86/hvm/vmsi.c @@ -336,17 +336,13 @@ struct hvm_mmio_handler msixtbl_mmio_handler = { .write_handler = msixtbl_write }; -static struct msixtbl_entry *add_msixtbl_entry(struct domain *d, - struct pci_dev *pdev, - uint64_t gtable) +static void add_msixtbl_entry(struct domain *d, + struct pci_dev *pdev, + uint64_t gtable, + struct msixtbl_entry *entry) { - struct msixtbl_entry *entry; u32 len; - entry = xmalloc(struct msixtbl_entry); - if ( !entry ) - return NULL; - memset(entry, 0, sizeof(struct msixtbl_entry)); INIT_LIST_HEAD(&entry->list); @@ -359,8 +355,6 @@ static struct msixtbl_entry *add_msixtbl_entry(struct domain *d, entry->gtable = (unsigned long) gtable; list_add_rcu(&entry->list, &d->arch.hvm_domain.msixtbl_list); - - return entry; } static void free_msixtbl_entry(struct rcu_head *rcu) @@ -383,12 +377,25 @@ int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable) irq_desc_t *irq_desc; struct msi_desc *msi_desc; struct pci_dev *pdev; - struct msixtbl_entry *entry; + struct msixtbl_entry *entry, *new_entry; int r = -EINVAL; ASSERT(spin_is_locked(&pcidevs_lock)); + /* + * xmalloc() with irq_disabled causes the failure of check_lock() + * for xenpool->lock. So we allocate an entry beforehand. + */ + new_entry = xmalloc(struct msixtbl_entry); + if ( !new_entry ) + return -ENOMEM; + irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL); + if ( !irq_desc ) + { + xfree(new_entry); + return r; + } if ( irq_desc->handler != &pci_msi_type ) goto out; @@ -405,12 +412,9 @@ int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable) if ( pdev == entry->pdev ) goto found; - entry = add_msixtbl_entry(d, pdev, gtable); - if ( !entry ) - { - spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock); - goto out; - } + entry = new_entry; + new_entry = NULL; + add_msixtbl_entry(d, pdev, gtable, entry); found: atomic_inc(&entry->refcnt); @@ -419,8 +423,8 @@ found: out: spin_unlock_irq(&irq_desc->lock); + xfree(new_entry); return r; - } void msixtbl_pt_unregister(struct domain *d, int pirq) @@ -433,6 +437,8 @@ void msixtbl_pt_unregister(struct domain *d, int pirq) ASSERT(spin_is_locked(&pcidevs_lock)); irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL); + if ( !irq_desc ) + return; if ( irq_desc->handler != &pci_msi_type ) goto out; @@ -453,7 +459,7 @@ void msixtbl_pt_unregister(struct domain *d, int pirq) out: - spin_unlock(&irq_desc->lock); + spin_unlock_irq(&irq_desc->lock); return; found: @@ -461,13 +467,16 @@ found: del_msixtbl_entry(entry); spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock); - spin_unlock(&irq_desc->lock); + spin_unlock_irq(&irq_desc->lock); } void msixtbl_pt_cleanup(struct domain *d, int pirq) { struct msixtbl_entry *entry, *temp; + unsigned long flags; + /* msixtbl_list_lock must be acquired with irq_disabled for check_lock() */ + local_irq_save(flags); spin_lock(&d->arch.hvm_domain.msixtbl_list_lock); list_for_each_entry_safe( entry, temp, @@ -475,4 +484,5 @@ void msixtbl_pt_cleanup(struct domain *d, int pirq) del_msixtbl_entry(entry); spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock); + local_irq_restore(flags); }