entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
/* Setup HT flags */
- iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT) ?
- iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT):
- iommu_clear_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT);
+ if ( iommu_has_cap(iommu, PCI_CAP_HT_TUNNEL_SHIFT) )
+ iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_HT_TUN_ENB_SHIFT) ?
+ iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT) :
+ iommu_clear_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT);
iommu_has_ht_flag(iommu, AMD_IOMMU_ACPI_RES_PASS_PW_SHIFT) ?
iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT):
return;
}
- if ( !iommu->iotlb_support )
+ if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
return;
req_id = get_dma_requestor_id(iommu->seg, bdf);
hd->paging_mode, valid);
if ( pci_ats_device(iommu->seg, bus, devfn) &&
- iommu->iotlb_support )
+ iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
iommu_dte_set_iotlb((u32 *)dte, dte_i);
invalidate_dev_table_entry(iommu, req_id);
disable_translation((u32 *)dte);
if ( pci_ats_device(iommu->seg, bus, devfn) &&
- iommu->iotlb_support )
+ iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
iommu_dte_set_iotlb((u32 *)dte, 0);
invalidate_dev_table_entry(iommu, req_id);
u16 cap_offset;
iommu_cap_t cap;
- u8 pte_not_present_cached;
- u8 ht_tunnel_support;
- u8 iotlb_support;
-
u8 ht_flags;
void *mmio_base;
return get_field_from_reg_u32(reg, 1U << bit, bit);
}
+static inline int iommu_has_cap(struct amd_iommu *iommu, uint32_t bit)
+{
+ u32 mask = 1U << bit;
+ return iommu->cap.header & mask;
+}
+
#endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */