LIST_HEAD(acpi_atsr_units);
LIST_HEAD(acpi_rhsa_units);
+static u64 igd_drhd_address;
u8 dmar_host_address_width;
void dmar_scope_add_buses(struct dmar_scope *scope, u16 sec_bus, u16 sub_bus)
return NULL;
}
+int is_igd_drhd(struct acpi_drhd_unit *drhd)
+{
+ return ( drhd->address == igd_drhd_address ? 1 : 0);
+}
+
/*
* Count number of devices in device scope. Do not include PCI sub
* hierarchies.
static int __init acpi_parse_dev_scope(void *start, void *end,
- void *acpi_entry, int type)
+ void *acpi_entry, int type,
+ int *igd)
{
struct dmar_scope *scope = acpi_entry;
struct acpi_ioapic_unit *acpi_ioapic_unit;
if ( iommu_verbose )
dprintk(VTDPREFIX, " endpoint: %x:%x.%x\n",
bus, path->dev, path->fn);
+ if ( (bus == 0) && (path->dev == 2) && (path->fn == 0) )
+ *igd = 1;
break;
case ACPI_DEV_IOAPIC:
struct acpi_table_drhd * drhd = (struct acpi_table_drhd *)header;
void *dev_scope_start, *dev_scope_end;
struct acpi_drhd_unit *dmaru;
- int ret;
+ int ret, igd = 0;
static int include_all = 0;
if ( (ret = acpi_dmar_check_length(header, sizeof(*drhd))) != 0 )
dev_scope_start = (void *)(drhd + 1);
dev_scope_end = ((void *)drhd) + header->length;
ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
- dmaru, DMAR_TYPE);
+ dmaru, DMAR_TYPE, &igd);
+
+ if ( igd )
+ igd_drhd_address = dmaru->address;
if ( dmaru->include_all )
{
struct acpi_rmrr_unit *rmrru;
void *dev_scope_start, *dev_scope_end;
u64 base_addr = rmrr->base_address, end_addr = rmrr->end_address;
- int ret;
+ int ret, igd = 0;
if ( (ret = acpi_dmar_check_length(header, sizeof(*rmrr))) != 0 )
return ret;
dev_scope_start = (void *)(rmrr + 1);
dev_scope_end = ((void *)rmrr) + header->length;
ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
- rmrru, RMRR_TYPE);
+ rmrru, RMRR_TYPE, &igd);
if ( ret || (rmrru->scope.devices_cnt == 0) )
xfree(rmrru);
{
struct acpi_table_atsr *atsr = (struct acpi_table_atsr *)header;
struct acpi_atsr_unit *atsru;
- int ret;
+ int ret, igd = 0;
static int all_ports;
void *dev_scope_start, *dev_scope_end;
dev_scope_start = (void *)(atsr + 1);
dev_scope_end = ((void *)atsr) + header->length;
ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end,
- atsru, ATSR_TYPE);
+ atsru, ATSR_TYPE, &igd);
}
else
{
return 0;
}
-static void iommu_enable_translation(struct iommu *iommu)
+#define GGC 0x52
+#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
+static int is_igd_vt_enabled(void)
+{
+ unsigned short ggc;
+
+ /* integrated graphics on Intel platforms is located at 0:2.0 */
+ ggc = pci_conf_read16(0, 2, 0, GGC);
+ return ( ggc & GGC_MEMORY_VT_ENABLED ? 1 : 0 );
+}
+
+static void iommu_enable_translation(struct acpi_drhd_unit *drhd)
{
u32 sts;
unsigned long flags;
+ struct iommu *iommu = drhd->iommu;
+
+ if ( !is_igd_vt_enabled() && is_igd_drhd(drhd) )
+ {
+ if ( force_iommu )
+ panic("BIOS did not enable IGD for VT properly, crash Xen for security purpose!\n");
+ else
+ {
+ dprintk(XENLOG_WARNING VTDPREFIX,
+ "BIOS did not enable IGD for VT properly. Disabling IGD VT-d engine.\n");
+ return;
+ }
+ }
if ( iommu_verbose )
dprintk(VTDPREFIX,
static void intel_iommu_dom0_init(struct domain *d)
{
- struct iommu *iommu;
struct acpi_drhd_unit *drhd;
if ( !iommu_passthrough && !need_iommu(d) )
for_each_drhd_unit ( drhd )
{
- iommu = drhd->iommu;
- iommu_enable_translation(iommu);
+ iommu_enable_translation(drhd);
}
}
(u32) iommu_state[i][DMAR_FEUADDR_REG]);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- iommu_enable_translation(iommu);
+ iommu_enable_translation(drhd);
}
}