int enable_qinval(struct iommu *iommu);
void disable_qinval(struct iommu *iommu);
-int enable_intremap(struct iommu *iommu);
+int enable_intremap(struct iommu *iommu, int eim);
void disable_intremap(struct iommu *iommu);
int queue_invalidate_context(struct iommu *iommu,
u16 did, u16 source_id, u8 function_mask, u8 granu);
}
#endif
-int enable_intremap(struct iommu *iommu)
+int enable_intremap(struct iommu *iommu, int eim)
{
struct acpi_drhd_unit *drhd;
struct ir_ctrl *ir_ctrl;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
ir_ctrl = iommu_ir_ctrl(iommu);
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+
+ /* Return if already enabled by Xen */
+ if ( (sts & DMA_GSTS_IRES) && ir_ctrl->iremap_maddr )
+ return 0;
+
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( !(sts & DMA_GSTS_QIES) )
+ {
+ dprintk(XENLOG_ERR VTDPREFIX,
+ "Queued invalidation is not enabled, should not enable "
+ "interrupt remapping\n");
+ return -EINVAL;
+ }
+
if ( ir_ctrl->iremap_maddr == 0 )
{
drhd = iommu_to_drhd(iommu);
- ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR );
+ ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR);
if ( ir_ctrl->iremap_maddr == 0 )
{
dprintk(XENLOG_WARNING VTDPREFIX,
#ifdef CONFIG_X86
/* set extended interrupt mode bit */
ir_ctrl->iremap_maddr |=
- x2apic_enabled ? (1 << IRTA_REG_EIME_SHIFT) : 0;
+ eim ? (1 << IRTA_REG_EIME_SHIFT) : 0;
#endif
spin_lock_irqsave(&iommu->register_lock, flags);
u32 sts;
unsigned long flags;
- ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
+ if ( !ecap_intr_remap(iommu->ecap) )
+ return;
spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( !(sts & DMA_GSTS_IRES) )
+ goto out;
+
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(sts & DMA_GSTS_IRES), sts);
+out:
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
spin_unlock_irqrestore(&iommu->register_lock, flags);
-
- /* initialize flush functions */
- flush = iommu_get_flush(iommu);
- flush->context = flush_context_reg;
- flush->iotlb = flush_iotlb_reg;
}
- if ( iommu_qinval )
+ for_each_drhd_unit ( drhd )
{
- for_each_drhd_unit ( drhd )
+ iommu = drhd->iommu;
+ /*
+ * If queued invalidation not enabled, use regiser based
+ * invalidation
+ */
+ if ( enable_qinval(iommu) != 0 )
{
- iommu = drhd->iommu;
- if ( enable_qinval(iommu) != 0 )
- {
- dprintk(XENLOG_INFO VTDPREFIX,
- "Failed to enable Queued Invalidation!\n");
- break;
- }
+ flush = iommu_get_flush(iommu);
+ flush->context = flush_context_reg;
+ flush->iotlb = flush_iotlb_reg;
}
}
for_each_drhd_unit ( drhd )
{
iommu = drhd->iommu;
- if ( enable_intremap(iommu) != 0 )
+ if ( enable_intremap(iommu, 0) != 0 )
{
- dprintk(XENLOG_INFO VTDPREFIX,
+ dprintk(XENLOG_WARNING VTDPREFIX,
"Failed to enable Interrupt Remapping!\n");
break;
}
u32 sts;
unsigned long flags;
+ if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval )
+ return -ENOENT;
+
qi_ctrl = iommu_qi_ctrl(iommu);
flush = iommu_get_flush(iommu);
- ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
+ /* Return if already enabled by Xen */
+ sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( (sts & DMA_GSTS_QIES) && qi_ctrl->qinval_maddr )
+ return 0;
if ( qi_ctrl->qinval_maddr == 0 )
{
u32 sts;
unsigned long flags;
- ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
+ if ( !ecap_queued_inval(iommu->ecap) )
+ return;
spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
+ if ( !(sts & DMA_GSTS_QIES) )
+ goto out;
+
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
!(sts & DMA_GSTS_QIES), sts);
+out:
spin_unlock_irqrestore(&iommu->register_lock, flags);
}