Introduce a new flag to disable iommu emulation on old iommu systems.
This patch is taken from my v4 patch queue, which is till pending, to
make old or non-iommu system to run cleanly without interfered by
iommuv2 codes. This might be helpful to isolate iommuv2 code in
debugging unstable regressions. The reset part of v4 will be re-based.
Signed-off-by: Wei Wang <wei.wang2@amd.com>
Committed-by: Keir Fraser <keir@xen.org>
p2m_type_t t;
struct guest_iommu *iommu = domain_iommu(d);
+ if ( !is_hvm_domain(d) || !iommu_enabled || !iommuv2_enabled )
+ return 0;
+
if ( !iommu )
return -EACCES;
struct guest_iommu *iommu;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- if ( !is_hvm_domain(d) )
+ if ( !is_hvm_domain(d) || !iommu_enabled || !iommuv2_enabled )
return 0;
iommu = xzalloc(struct guest_iommu);
{
struct guest_iommu *iommu;
- if ( !is_hvm_domain(d) )
+ if ( !is_hvm_domain(d) || !iommu_enabled || !iommuv2_enabled )
return;
iommu = domain_iommu(d);
static struct radix_tree_root ivrs_maps;
struct list_head amd_iommu_head;
struct table_struct device_table;
+bool_t iommuv2_enabled;
static int iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask)
{
get_iommu_features(iommu);
+ if ( iommu->features )
+ iommuv2_enabled = 1;
+
if ( allocate_cmd_buffer(iommu) == NULL )
goto error_out;
iommu_enabled = 0;
iommu_passthrough = 0;
iommu_intremap = 0;
+ iommuv2_enabled = 0;
}
/*
struct guest_iommu_msi msi;
};
+extern bool_t iommuv2_enabled;
+
#endif /* _ASM_X86_64_AMD_IOMMU_H */