} init;
} hpet_sbdf;
+extern unsigned int amd_iommu_acpi_info;
extern int amd_iommu_min_paging_mode;
extern void *shared_intremap_table;
static inline bool_t is_ivhd_block(u8 type)
{
return (type == ACPI_IVRS_TYPE_HARDWARE ||
- type == ACPI_IVRS_TYPE_HARDWARE_11H);
+ ((amd_iommu_acpi_info & ACPI_IVRS_EFR_SUP) &&
+ type == ACPI_IVRS_TYPE_HARDWARE_11H));
}
static inline bool_t is_ivmd_block(u8 type)
ivrs_block = (struct acpi_ivrs_header *)((u8 *)table + length);
if ( table->length < (length + ivrs_block->length) )
return -ENODEV;
- if ( ivrs_block->type == ACPI_IVRS_TYPE_HARDWARE &&
+ if ( ivrs_block->type == ivhd_type &&
amd_iommu_detect_one_acpi(to_ivhd_block(ivrs_block)) != 0 )
return -ENODEV;
length += ivrs_block->length;
return -ENODEV;
}
+ amd_iommu_acpi_info = container_of(table, const struct acpi_table_ivrs,
+ header)->info;
+
while ( table->length > (length + sizeof(*ivrs_block)) )
{
ivrs_block = (struct acpi_ivrs_header *)((u8 *)table + length);
const struct amd_iommu *first;
ASSERT( iommu->mmio_base );
- if ( !iommu_has_cap(iommu, PCI_CAP_EFRSUP_SHIFT) )
+ if ( !(amd_iommu_acpi_info & ACPI_IVRS_EFR_SUP) )
{
- iommu->features.raw = 0;
- return;
- }
+ if ( !iommu_has_cap(iommu, PCI_CAP_EFRSUP_SHIFT) )
+ return;
- iommu->features.raw =
- readq(iommu->mmio_base + IOMMU_EXT_FEATURE_MMIO_OFFSET);
+ iommu->features.raw =
+ readq(iommu->mmio_base + IOMMU_EXT_FEATURE_MMIO_OFFSET);
+ }
/* Don't log the same set of features over and over. */
first = list_first_entry(&amd_iommu_head, struct amd_iommu, list);
iommu->cap_offset = ivhd_block->capability_offset;
iommu->mmio_base_phys = ivhd_block->base_address;
+ if ( ivhd_type != ACPI_IVRS_TYPE_HARDWARE )
+ iommu->features.raw = ivhd_block->efr_image;
+ else if ( amd_iommu_acpi_info & ACPI_IVRS_EFR_SUP )
+ {
+ union {
+ uint32_t raw;
+ struct {
+ unsigned int xt_sup:1;
+ unsigned int nx_sup:1;
+ unsigned int gt_sup:1;
+ unsigned int glx_sup:2;
+ unsigned int ia_sup:1;
+ unsigned int ga_sup:1;
+ unsigned int he_sup:1;
+ unsigned int pas_max:5;
+ unsigned int pn_counters:4;
+ unsigned int pn_banks:6;
+ unsigned int msi_num_ppr:5;
+ unsigned int gats:2;
+ unsigned int hats:2;
+ };
+ } attr = { .raw = ivhd_block->iommu_attr };
+
+ iommu->features.flds.xt_sup = attr.xt_sup;
+ iommu->features.flds.nx_sup = attr.nx_sup;
+ iommu->features.flds.gt_sup = attr.gt_sup;
+ iommu->features.flds.glx_sup = attr.glx_sup;
+ iommu->features.flds.ia_sup = attr.ia_sup;
+ iommu->features.flds.ga_sup = attr.ga_sup;
+ iommu->features.flds.pas_max = attr.pas_max;
+ iommu->features.flds.gats = attr.gats;
+ iommu->features.flds.hats = attr.hats;
+ }
+ else if ( list_empty(&amd_iommu_head) )
+ AMD_IOMMU_DEBUG("EFRSup not set in ACPI table; will fall back to hardware\n");
+
/* override IOMMU HT flags */
iommu->ht_flags = ivhd_block->header.flags;
static void do_amd_iommu_irq(void *data);
static DECLARE_SOFTIRQ_TASKLET(amd_iommu_irq_tasklet, do_amd_iommu_irq, NULL);
+unsigned int __read_mostly amd_iommu_acpi_info;
unsigned int __read_mostly ivrs_bdf_entries;
u8 __read_mostly ivhd_type;
static struct radix_tree_root ivrs_maps;
get_iommu_features(iommu);
- if ( iommu->features.raw )
- iommuv2_enabled = true;
-
return 0;
}
has_xt = false;
}
+ if ( ivhd_type != ACPI_IVRS_TYPE_HARDWARE )
+ iommuv2_enabled = true;
+
for_each_amd_iommu ( iommu )
{
/* NB: There's no need to actually write these out right here. */
/* Values for Info field above */
+#define ACPI_IVRS_EFR_SUP 0x00000001 /* extended feature support */
+#define ACPI_IVRS_PREBOOT_DMA_REMAP 0x00000002 /* pre-boot DMA remapping in use */
+#define ACPI_IVRS_GVA_SIZE 0x000000E0 /* 3 bits, guest VA size */
#define ACPI_IVRS_PHYSICAL_SIZE 0x00007F00 /* 7 bits, physical address size */
#define ACPI_IVRS_VIRTUAL_SIZE 0x003F8000 /* 7 bits, virtual address size */
#define ACPI_IVRS_ATS_RESERVED 0x00400000 /* ATS address translation range reserved */