msi_free_vectors(pdev);
}
+int pci_restore_msi_state(struct pci_dev *pdev)
+{
+ unsigned long flags;
+ int vector;
+ struct msi_desc *entry, *tmp;
+ irq_desc_t *desc;
+
+ ASSERT(spin_is_locked(&pcidevs_lock));
+
+ if (!pdev)
+ return -EINVAL;
+
+ list_for_each_entry_safe( entry, tmp, &pdev->msi_list, list )
+ {
+ vector = entry->vector;
+ desc = &irq_desc[vector];
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ ASSERT(desc->msi_desc == entry);
+
+ if (desc->msi_desc != entry)
+ {
+ dprintk(XENLOG_ERR, "Restore MSI for dev %x:%x not set before?\n",
+ pdev->bus, pdev->devfn);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return -EINVAL;
+ }
+
+ msi_set_enable(pdev, 0);
+ write_msi_msg(entry, &entry->msg);
+
+ msi_set_enable(pdev, 1);
+ msi_set_mask_bit(vector, entry->msi_attrib.masked);
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+ return 0;
+}
+
break;
}
+ case PHYSDEVOP_restore_msi: {
+ struct physdev_restore_msi restore_msi;
+ struct pci_dev *pdev;
+
+ ret = -EPERM;
+ if ( !IS_PRIV(v->domain) )
+ break;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&restore_msi, arg, 1) != 0 )
+ break;
+
+ spin_lock(&pcidevs_lock);
+ pdev = pci_get_pdev(restore_msi.bus, restore_msi.devfn);
+ ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
+ spin_unlock(&pcidevs_lock);
+ break;
+ }
default:
ret = -ENOSYS;
break;
extern int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
extern void teardown_msi_vector(int vector);
extern int msi_free_vector(struct msi_desc *entry);
+extern int pci_restore_msi_state(struct pci_dev *pdev);
struct msi_desc {
struct {
typedef struct physdev_manage_pci physdev_manage_pci_t;
DEFINE_XEN_GUEST_HANDLE(physdev_manage_pci_t);
+#define PHYSDEVOP_restore_msi 19
+struct physdev_restore_msi {
+ /* IN */
+ uint8_t bus;
+ uint8_t devfn;
+};
+typedef struct physdev_restore_msi physdev_restore_msi_t;
+DEFINE_XEN_GUEST_HANDLE(physdev_restore_msi_t);
+
/*
* Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
* hypercall since 0x00030202.