break;
}
ret = 0;
+ spin_lock(&pcidevs_lock);
ret = deassign_device(d, bus, devfn);
+ spin_unlock(&pcidevs_lock);
gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
struct msi_desc *msi_desc;
struct pci_dev *pdev = NULL;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
if ( !IS_PRIV(current->domain) )
if ( !IS_PRIV(current->domain) )
return -EINVAL;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
vector = d->arch.pirq_vector[pirq];
{
int i;
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
spin_lock(&d->event_lock);
for ( i = 0; i < NR_IRQS; i++ )
unmap_domain_pirq(d, i);
spin_unlock(&d->event_lock);
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
}
extern void dump_ioapic_irq_info(void);
u8 slot = PCI_SLOT(dev->devfn);
u8 func = PCI_FUNC(dev->devfn);
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
/* MSI Entry Initialization */
u8 slot = PCI_SLOT(dev->devfn);
u8 func = PCI_FUNC(dev->devfn);
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(desc);
pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
int status;
struct pci_dev *pdev;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(msi->bus, msi->devfn);
if ( !pdev )
return -ENODEV;
u8 slot = PCI_SLOT(msi->devfn);
u8 func = PCI_FUNC(msi->devfn);
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(msi->bus, msi->devfn);
if ( !pdev )
return -ENODEV;
*/
int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc)
{
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
return msi->table_base ? __pci_enable_msix(msi, desc) :
__pci_enable_msi(msi, desc);
goto free_domain;
}
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
/* Verify or get pirq. */
spin_lock(&d->event_lock);
if ( map->pirq < 0 )
done:
spin_unlock(&d->event_lock);
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
free_irq_vector(vector);
free_domain:
if ( d == NULL )
return -ESRCH;
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
spin_lock(&d->event_lock);
ret = unmap_domain_pirq(d, unmap->pirq);
spin_unlock(&d->event_lock);
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
rcu_unlock_domain(d);
irq_op.vector = assign_irq_vector(irq);
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
spin_lock(&dom0->event_lock);
ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
MAP_PIRQ_TYPE_GSI, NULL);
spin_unlock(&dom0->event_lock);
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
if ( copy_to_guest(arg, &irq_op, 1) != 0 )
ret = -EFAULT;
u32 l;
int bdf;
- write_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
for ( bus = 0; bus < 256; bus++ )
{
for ( dev = 0; dev < 32; dev++ )
}
}
}
- write_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
}
int amd_iov_detect(void)
struct amd_iommu *iommu;
int bdf;
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
pdev = pci_get_pdev_by_domain(source, bus, devfn);
if ( !pdev )
{
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return -ENODEV;
}
if ( !iommu )
{
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
amd_iov_error("Fail to find iommu."
" %x:%x.%x cannot be assigned to domain %d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id);
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return 0;
}
if ( !pdev->domain )
return -EINVAL;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
hd = domain_hvm_iommu(pdev->domain);
if ( !iommu_enabled || !hd->platform_ops )
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
goto done;
goto done;
}
done:
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return rc;
}
return hd->platform_ops->unmap_page(d, gfn);
}
-int deassign_device(struct domain *d, u8 bus, u8 devfn)
+/* caller should hold the pcidevs_lock */
+int deassign_device(struct domain *d, u8 bus, u8 devfn)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct pci_dev *pdev = NULL;
if ( !iommu_enabled || !hd->platform_ops )
return -EINVAL;
- read_lock(&pcidevs_lock);
+ ASSERT(spin_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(bus, devfn);
if (!pdev)
- {
- read_unlock(&pcidevs_lock);
return -ENODEV;
- }
if (pdev->domain != d)
{
- read_unlock(&pcidevs_lock);
gdprintk(XENLOG_ERR VTDPREFIX,
"IOMMU: deassign a device not owned\n");
- return -EINVAL;
+ return -EINVAL;
}
hd->platform_ops->reassign_device(d, dom0, bus, devfn);
hd->platform_ops->teardown(d);
}
- read_unlock(&pcidevs_lock);
-
return 0;
}
group_id = ops->get_device_group_id(bus, devfn);
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
for_each_pdev( d, pdev )
{
if ( (pdev->bus == bus) && (pdev->devfn == devfn) )
bdf |= (pdev->devfn & 0xff) << 8;
if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
{
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return -1;
}
i++;
}
}
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return i;
}
LIST_HEAD(alldevs_list);
-rwlock_t pcidevs_lock = RW_LOCK_UNLOCKED;
+spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED;
struct pci_dev *alloc_pdev(u8 bus, u8 devfn)
{
{
struct pci_dev *pdev = NULL;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
if ( (pdev->bus == bus || bus == -1) &&
{
struct pci_dev *pdev = NULL;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
if ( (pdev->bus == bus || bus == -1) &&
struct pci_dev *pdev;
int ret = -ENOMEM;
- write_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
pdev = alloc_pdev(bus, devfn);
if ( !pdev )
goto out;
}
out:
- write_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
printk(XENLOG_DEBUG "PCI add device %02x:%02x.%x\n", bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
return ret;
struct pci_dev *pdev;
int ret = -ENODEV;;
- write_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
if ( pdev->bus == bus && pdev->devfn == devfn )
{
break;
}
- write_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return ret;
}
struct pci_dev *pdev;
u8 bus, devfn;
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
pci_clean_dpci_irqs(d);
while ( (pdev = pci_get_pdev_by_domain(d, -1, -1)) )
{
bus = pdev->bus; devfn = pdev->devfn;
deassign_device(d, bus, devfn);
}
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
}
#ifdef SUPPORT_MSI_REMAPPING
struct msi_desc *msi;
printk("==== PCI devices ====\n");
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
{
printk(">\n");
}
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
}
static int __init setup_dump_pcidevs(void)
struct pci_dev *pdev = NULL;
int agaw;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
context_entries = (struct context_entry *)map_vtd_domain_page(maddr);
if ( !drhd )
return -ENODEV;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
type = pdev_type(bus, devfn);
switch ( type )
struct context_entry *context, *context_entries;
u64 maddr;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
struct iommu *pdev_iommu;
int ret, found = 0;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
pdev = pci_get_pdev_by_domain(source, bus, devfn);
if (!pdev)
if ( list_empty(&acpi_drhd_units) )
return;
- ASSERT(rw_is_locked(&pcidevs_lock));
spin_lock(&hd->mapping_lock);
iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
hd->pgd_maddr = 0;
u64 base, end;
unsigned long base_pfn, end_pfn;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(rmrr->base_address < rmrr->end_address);
base = rmrr->base_address & PAGE_MASK_4K;
u16 bdf;
int ret, i;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
if ( !pdev->domain )
return -EINVAL;
hd = domain_hvm_iommu(d);
- write_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
for ( bus = 0; bus < 256; bus++ )
{
for ( dev = 0; dev < 32; dev++ )
}
}
}
- write_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
}
void clear_fault_bits(struct iommu *iommu)
u16 bdf;
int ret, i;
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
for_each_rmrr_device ( rmrr, bdf, i )
{
ret = iommu_prepare_rmrr_dev(d, rmrr, PCI_BUS(bdf), PCI_DEVFN2(bdf));
gdprintk(XENLOG_ERR VTDPREFIX,
"IOMMU: mapping reserved region failed\n");
}
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
}
int intel_vtd_setup(void)
{
struct pci_dev *pdev;
- read_lock(&pcidevs_lock);
+ spin_lock(&pcidevs_lock);
pdev = pci_get_pdev_by_domain(dom0, bus, devfn);
if (!pdev)
{
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return -1;
}
- read_unlock(&pcidevs_lock);
+ spin_unlock(&pcidevs_lock);
return 0;
}
if ( list_empty(&acpi_drhd_units) )
return -ENODEV;
- ASSERT(rw_is_locked(&pcidevs_lock));
+ ASSERT(spin_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(bus, devfn);
if (!pdev)
return -ENODEV;
list_for_each_entry(pdev, &(domain->arch.pdev_list), domain_list)
/*
- * The pcidevs_lock write-lock must be held when doing alloc_pdev() or
- * free_pdev(). Never de-reference pdev without holding pdev->lock or
- * pcidevs_lock. Always aquire pcidevs_lock before pdev->lock when
- * doing free_pdev().
+ * The pcidevs_lock protect alldevs_list, and the assignment for the
+ * devices, it also sync the access to the msi capability that is not
+ * interrupt handling related (the mask bit register).
*/
-extern rwlock_t pcidevs_lock;
+extern spinlock_t pcidevs_lock;
struct pci_dev *alloc_pdev(u8 bus, u8 devfn);
void free_pdev(struct pci_dev *pdev);