cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg |= HPET_TN_ENABLE;
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
- ch->msi.msi_attrib.masked = 0;
+ ch->msi.msi_attrib.host_masked = 0;
}
static void hpet_msi_mask(struct irq_desc *desc)
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg &= ~HPET_TN_ENABLE;
hpet_write32(cfg, HPET_Tn_CFG(ch->idx));
- ch->msi.msi_attrib.masked = 1;
+ ch->msi.msi_attrib.host_masked = 1;
}
static int hpet_msi_write(struct hpet_event_channel *ch, struct msi_msg *msg)
{
unsigned long offset;
struct msixtbl_entry *entry;
- void *virt;
unsigned int nr_entry, index;
int r = X86EMUL_UNHANDLEABLE;
}
if ( offset == PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
{
- virt = msixtbl_addr_to_virt(entry, address);
+ const struct msi_desc *msi_desc;
+ void *virt = msixtbl_addr_to_virt(entry, address);
+
if ( !virt )
goto out;
+ msi_desc = virt_to_msi_desc(entry->pdev, virt);
+ if ( !msi_desc )
+ goto out;
if ( len == 4 )
- *pval = readl(virt);
+ *pval = MASK_INSR(msi_desc->msi_attrib.guest_masked,
+ PCI_MSIX_VECTOR_BITMASK);
else
- *pval |= (u64)readl(virt) << 32;
+ *pval |= (u64)MASK_INSR(msi_desc->msi_attrib.guest_masked,
+ PCI_MSIX_VECTOR_BITMASK) << 32;
}
r = X86EMUL_OKAY;
void *virt;
unsigned int nr_entry, index;
int r = X86EMUL_UNHANDLEABLE;
- unsigned long flags, orig;
+ unsigned long flags;
struct irq_desc *desc;
if ( (len != 4 && len != 8) || (address & (len - 1)) )
ASSERT(msi_desc == desc->msi_desc);
- orig = readl(virt);
-
- /*
- * Do not allow guest to modify MSI-X control bit if it is masked
- * by Xen. We'll only handle the case where Xen thinks that
- * bit is unmasked, but hardware has silently masked the bit
- * (in case of SR-IOV VF reset, etc). On the other hand, if Xen
- * thinks that the bit is masked, but it's really not,
- * we log a warning.
- */
- if ( msi_desc->msi_attrib.masked )
- {
- if ( !(orig & PCI_MSIX_VECTOR_BITMASK) )
- printk(XENLOG_WARNING "MSI-X control bit is unmasked when"
- " it is expected to be masked [%04x:%02x:%02x.%u]\n",
- entry->pdev->seg, entry->pdev->bus,
- PCI_SLOT(entry->pdev->devfn),
- PCI_FUNC(entry->pdev->devfn));
-
- goto unlock;
- }
-
- /*
- * The mask bit is the only defined bit in the word. But we
- * ought to preserve the reserved bits. Clearing the reserved
- * bits can result in undefined behaviour (see PCI Local Bus
- * Specification revision 2.3).
- */
- val &= PCI_MSIX_VECTOR_BITMASK;
- val |= (orig & ~PCI_MSIX_VECTOR_BITMASK);
- writel(val, virt);
+ guest_mask_msi_irq(desc, !!(val & PCI_MSIX_VECTOR_BITMASK));
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
|| entry->msi_attrib.maskbit;
}
-static void msi_set_mask_bit(struct irq_desc *desc, int flag)
+static void msi_set_mask_bit(struct irq_desc *desc, bool_t host, bool_t guest)
{
struct msi_desc *entry = desc->msi_desc;
+ bool_t flag = host || guest;
ASSERT(spin_is_locked(&desc->lock));
BUG_ON(!entry || !entry->dev);
BUG();
break;
}
- entry->msi_attrib.masked = !!flag;
+ entry->msi_attrib.host_masked = host;
+ entry->msi_attrib.guest_masked = guest;
}
static int msi_get_mask_bit(const struct msi_desc *entry)
void mask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(desc, 1);
+ msi_set_mask_bit(desc, 1, desc->msi_desc->msi_attrib.guest_masked);
}
void unmask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(desc, 0);
+ msi_set_mask_bit(desc, 0, desc->msi_desc->msi_attrib.guest_masked);
+}
+
+void guest_mask_msi_irq(struct irq_desc *desc, bool_t mask)
+{
+ msi_set_mask_bit(desc, desc->msi_desc->msi_attrib.host_masked, mask);
}
static unsigned int startup_msi_irq(struct irq_desc *desc)
{
- unmask_msi_irq(desc);
+ bool_t guest_masked = (desc->status & IRQ_GUEST) &&
+ is_hvm_domain(desc->msi_desc->dev->domain);
+
+ msi_set_mask_bit(desc, 0, guest_masked);
return 0;
}
+static void shutdown_msi_irq(struct irq_desc *desc)
+{
+ msi_set_mask_bit(desc, 1, 1);
+}
+
void ack_nonmaskable_msi_irq(struct irq_desc *desc)
{
irq_complete_move(desc);
static hw_irq_controller pci_msi_maskable = {
.typename = "PCI-MSI/-X",
.startup = startup_msi_irq,
- .shutdown = mask_msi_irq,
+ .shutdown = shutdown_msi_irq,
.enable = unmask_msi_irq,
.disable = mask_msi_irq,
.ack = ack_maskable_msi_irq,
entry[i].msi_attrib.is_64 = is_64bit_address(control);
entry[i].msi_attrib.entry_nr = i;
entry[i].msi_attrib.maskbit = is_mask_bit_support(control);
- entry[i].msi_attrib.masked = 1;
+ entry[i].msi_attrib.host_masked = 1;
+ entry[i].msi_attrib.guest_masked = 0;
entry[i].msi_attrib.pos = pos;
if ( entry[i].msi_attrib.maskbit )
entry[i].msi.mpos = mpos;
entry->msi_attrib.is_64 = 1;
entry->msi_attrib.entry_nr = msi->entry_nr;
entry->msi_attrib.maskbit = 1;
- entry->msi_attrib.masked = 1;
+ entry->msi_attrib.host_masked = 1;
+ entry->msi_attrib.guest_masked = 1;
entry->msi_attrib.pos = pos;
entry->irq = msi->irq;
entry->dev = dev;
for ( i = 0; ; )
{
- msi_set_mask_bit(desc, entry[i].msi_attrib.masked);
+ msi_set_mask_bit(desc, entry[i].msi_attrib.host_masked,
+ entry[i].msi_attrib.guest_masked);
if ( !--nr )
break;
else
mask = '?';
printk(" %-6s%4u vec=%02x%7s%6s%3sassert%5s%7s"
- " dest=%08x mask=%d/%d/%c\n",
+ " dest=%08x mask=%d/%c%c/%c\n",
type, irq,
(data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT,
data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed",
data & MSI_DATA_LEVEL_ASSERT ? "" : "de",
addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys",
addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu",
- dest32, attr.maskbit, attr.masked, mask);
+ dest32, attr.maskbit,
+ attr.host_masked ? 'H' : ' ',
+ attr.guest_masked ? 'G' : ' ',
+ mask);
}
}
spin_lock_irqsave(&iommu->lock, flags);
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
spin_unlock_irqrestore(&iommu->lock, flags);
- iommu->msi.msi_attrib.masked = 0;
+ iommu->msi.msi_attrib.host_masked = 0;
}
static void iommu_msi_mask(struct irq_desc *desc)
spin_lock_irqsave(&iommu->lock, flags);
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
spin_unlock_irqrestore(&iommu->lock, flags);
- iommu->msi.msi_attrib.masked = 1;
+ iommu->msi.msi_attrib.host_masked = 1;
}
static unsigned int iommu_msi_startup(struct irq_desc *desc)
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- iommu->msi.msi_attrib.masked = 0;
+ iommu->msi.msi_attrib.host_masked = 0;
}
static void dma_msi_mask(struct irq_desc *desc)
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- iommu->msi.msi_attrib.masked = 1;
+ iommu->msi.msi_attrib.host_masked = 1;
}
static unsigned int dma_msi_startup(struct irq_desc *desc)
struct msi_desc {
struct msi_attrib {
- __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 masked : 1;
+ __u8 type; /* {0: unused, 5h:MSI, 11h:MSI-X} */
+ __u8 pos; /* Location of the MSI capability */
+ __u8 maskbit : 1; /* mask/pending bit supported ? */
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u8 pos; /* Location of the msi capability */
- __u16 entry_nr; /* specific enabled entry */
+ __u8 host_masked : 1;
+ __u8 guest_masked : 1;
+ __u16 entry_nr; /* specific enabled entry */
} msi_attrib;
struct list_head list;
void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable);
void mask_msi_irq(struct irq_desc *);
void unmask_msi_irq(struct irq_desc *);
+void guest_mask_msi_irq(struct irq_desc *, bool_t mask);
void ack_nonmaskable_msi_irq(struct irq_desc *);
void end_nonmaskable_msi_irq(struct irq_desc *, u8 vector);
void set_msi_affinity(struct irq_desc *, const cpumask_t *);