struct msi_desc *msi_desc;
struct pci_dev *pdev = NULL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
if ( !IS_PRIV(current->domain) )
if ( !IS_PRIV(current->domain) )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
vector = d->arch.pirq_vector[pirq];
u8 slot = PCI_SLOT(dev->devfn);
u8 func = PCI_FUNC(dev->devfn);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSI);
control = pci_conf_read16(bus, slot, func, msi_control_reg(pos));
/* MSI Entry Initialization */
u8 slot = PCI_SLOT(dev->devfn);
u8 func = PCI_FUNC(dev->devfn);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
ASSERT(desc);
pos = pci_find_cap_offset(bus, slot, func, PCI_CAP_ID_MSIX);
int status;
struct pci_dev *pdev;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(msi->bus, msi->devfn);
if ( !pdev )
return -ENODEV;
u8 slot = PCI_SLOT(msi->devfn);
u8 func = PCI_FUNC(msi->devfn);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(msi->bus, msi->devfn);
if ( !pdev )
return -ENODEV;
*/
int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc)
{
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
return msi->table_base ? __pci_enable_msix(msi, desc) :
__pci_enable_msi(msi, desc);
_raw_write_unlock(&lock->raw);
local_irq_restore(flags);
}
+
+int _rw_is_locked(rwlock_t *lock)
+{
+ check_lock(&lock->debug);
+ return _raw_rw_is_locked(&lock->raw);
+}
if ( !pdev->domain )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
hd = domain_hvm_iommu(pdev->domain);
if ( !iommu_enabled || !hd->platform_ops )
{
struct pci_dev *pdev = NULL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
if ( (pdev->bus == bus || bus == -1) &&
{
struct pci_dev *pdev = NULL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
list_for_each_entry ( pdev, &alldevs_list, alldevs_list )
if ( (pdev->bus == bus || bus == -1) &&
struct pci_dev *pdev = NULL;
int agaw;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
context_entries = (struct context_entry *)map_vtd_domain_page(maddr);
if ( !drhd )
return -ENODEV;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
type = pdev_type(bus, devfn);
switch ( type )
struct context_entry *context, *context_entries;
u64 maddr;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
struct iommu *pdev_iommu;
int ret, found = 0;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
pdev = pci_get_pdev_by_domain(source, bus, devfn);
if (!pdev)
if ( list_empty(&acpi_drhd_units) )
return;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
spin_lock(&hd->mapping_lock);
iommu_free_pagetable(hd->pgd_maddr, agaw_to_level(hd->agaw));
hd->pgd_maddr = 0;
u64 base, end;
unsigned long base_pfn, end_pfn;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
ASSERT(rmrr->base_address < rmrr->end_address);
base = rmrr->base_address & PAGE_MASK_4K;
u16 bdf;
int ret, i;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
if ( !pdev->domain )
return -EINVAL;
if ( list_empty(&acpi_drhd_units) )
return -ENODEV;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(rw_is_locked(&pcidevs_lock));
pdev = pci_get_pdev(bus, devfn);
if (!pdev)
return -ENODEV;
clear_bit(31, (x)); \
})
+#define _raw_rw_is_locked(x) (*(int *)(x) != 0)
+
#endif /* _ASM_IA64_SPINLOCK_H */
#define _RAW_SPIN_LOCK_UNLOCKED /*(raw_spinlock_t)*/ { 1 }
-#define _raw_spin_is_locked(x) \
- (*(volatile char *)(&(x)->lock) <= 0)
+#define _raw_spin_is_locked(x) ((x)->lock <= 0)
static always_inline void _raw_spin_lock(raw_spinlock_t *lock)
{
"lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \
"=m" ((rw)->lock) : : "memory" )
+#define _raw_rw_is_locked(x) ((x)->lock < RW_LOCK_BIAS)
+
#endif /* __ASM_SPINLOCK_H */
void _write_unlock_irq(rwlock_t *lock);
void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
+int _rw_is_locked(rwlock_t *lock);
+
#define spin_lock(l) _spin_lock(l)
#define spin_lock_irq(l) _spin_lock_irq(l)
#define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l))
#define spin_unlock_irq(l) _spin_unlock_irq(l)
#define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f)
-#define spin_is_locked(l) _raw_spin_is_locked(&(l)->raw)
+#define spin_is_locked(l) _spin_is_locked(l)
#define spin_trylock(l) _spin_trylock(l)
/* Ensure a lock is quiescent between two critical operations. */
#define write_unlock_irq(l) _write_unlock_irq(l)
#define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
+#define rw_is_locked(l) _rw_is_locked(l)
+
#endif /* __SPINLOCK_H__ */