{
if ( !shared_intremap_table )
shared_intremap_table = amd_iommu_alloc_intremap_table(
- iommu, &shared_intremap_inuse);
+ iommu, &shared_intremap_inuse, 0);
if ( !shared_intremap_table )
panic("No memory for shared IRT\n");
{
ivrs_mappings[alias_id].intremap_table =
amd_iommu_alloc_intremap_table(
- iommu, &ivrs_mappings[alias_id].intremap_inuse);
+ iommu, &ivrs_mappings[alias_id].intremap_inuse, 0);
if ( !ivrs_mappings[alias_id].intremap_table )
panic("No memory for %04x:%02x:%02x.%u's IRT\n",
pcidevs_unlock();
}
- if ( pdev )
+ if ( pdev && (pdev->msix || pdev->msi_maxvec) )
{
ivrs_mappings[bdf].intremap_table =
amd_iommu_alloc_intremap_table(
ivrs_mappings[bdf].iommu,
- &ivrs_mappings[bdf].intremap_inuse);
+ &ivrs_mappings[bdf].intremap_inuse,
+ pdev->msix ? pdev->msix->nr_entries
+ : pdev->msi_maxvec);
if ( !ivrs_mappings[bdf].intremap_table )
return -ENOMEM;
}
amd_iommu_set_intremap_table(
- dte,
- ivrs_mappings[bdf].intremap_table
- ? virt_to_maddr(ivrs_mappings[bdf].intremap_table)
- : 0,
- iommu_intremap);
+ dte, ivrs_mappings[bdf].intremap_table,
+ ivrs_mappings[bdf].iommu, iommu_intremap);
}
}
const union irte128 *ptr128;
} __transparent__;
-#define INTREMAP_MAX_ENTRIES (1 << IOMMU_INTREMAP_ORDER)
+#define INTREMAP_MAX_ORDER 0xB
+#define INTREMAP_MAX_ENTRIES (1 << INTREMAP_MAX_ORDER)
struct ioapic_sbdf ioapic_sbdf[MAX_IO_APICS];
struct hpet_sbdf hpet_sbdf;
static void dump_intremap_tables(unsigned char key);
-static unsigned int __init intremap_table_order(const struct amd_iommu *iommu)
-{
- return iommu->ctrl.ga_en
- ? get_order_from_bytes(INTREMAP_MAX_ENTRIES * sizeof(union irte128))
- : get_order_from_bytes(INTREMAP_MAX_ENTRIES * sizeof(union irte32));
-}
+#define intremap_page_order(irt) PFN_ORDER(virt_to_page(irt))
unsigned int amd_iommu_intremap_table_order(
const void *irt, const struct amd_iommu *iommu)
{
- return IOMMU_INTREMAP_ORDER;
+ return intremap_page_order(irt) + PAGE_SHIFT -
+ (iommu->ctrl.ga_en ? 4 : 2);
}
static unsigned int intremap_table_entries(
if ( *tblp )
{
- __free_amd_iommu_tables(*tblp, intremap_table_order(iommu));
+ unsigned int order = intremap_page_order(*tblp);
+
+ intremap_page_order(*tblp) = 0;
+ __free_amd_iommu_tables(*tblp, order);
*tblp = NULL;
}
}
void *amd_iommu_alloc_intremap_table(
- const struct amd_iommu *iommu, unsigned long **inuse_map)
+ const struct amd_iommu *iommu, unsigned long **inuse_map, unsigned int nr)
{
- unsigned int order = intremap_table_order(iommu);
- void *tb = __alloc_amd_iommu_tables(order);
+ unsigned int order;
+ void *tb;
+ if ( !nr )
+ nr = INTREMAP_MAX_ENTRIES;
+
+ order = iommu->ctrl.ga_en
+ ? get_order_from_bytes(nr * sizeof(union irte128))
+ : get_order_from_bytes(nr * sizeof(union irte32));
+
+ tb = __alloc_amd_iommu_tables(order);
if ( tb )
{
- unsigned int nr = intremap_table_entries(tb, iommu);
-
+ intremap_page_order(tb) = order;
+ nr = intremap_table_entries(tb, iommu);
*inuse_map = xzalloc_array(unsigned long, BITS_TO_LONGS(nr));
if ( *inuse_map )
memset(tb, 0, PAGE_SIZE << order);
}
void __init amd_iommu_set_intremap_table(
- struct amd_iommu_dte *dte, uint64_t intremap_ptr, bool valid)
+ struct amd_iommu_dte *dte, const void *ptr,
+ const struct amd_iommu *iommu, bool valid)
{
- dte->it_root = intremap_ptr >> 6;
- dte->int_tab_len = intremap_ptr ? IOMMU_INTREMAP_ORDER : 0;
- dte->int_ctl = intremap_ptr ? IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED
- : IOMMU_DEV_TABLE_INT_CONTROL_ABORTED;
+ if ( ptr )
+ {
+ dte->it_root = virt_to_maddr(ptr) >> 6;
+ dte->int_tab_len = amd_iommu_intremap_table_order(ptr, iommu);
+ dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED;
+ }
+ else
+ {
+ dte->it_root = 0;
+ dte->int_tab_len = 0;
+ dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_ABORTED;
+ }
+
dte->ig = false; /* unmapped interrupts result in i/o page faults */
dte->iv = valid;
}
{
unsigned long flags;
- ivrs_mappings[bdf].intremap_table =
- amd_iommu_alloc_intremap_table(
- iommu, &ivrs_mappings[bdf].intremap_inuse);
- if ( !ivrs_mappings[bdf].intremap_table )
- return -ENOMEM;
+ if ( pdev->msix || pdev->msi_maxvec )
+ {
+ ivrs_mappings[bdf].intremap_table =
+ amd_iommu_alloc_intremap_table(
+ iommu, &ivrs_mappings[bdf].intremap_inuse,
+ pdev->msix ? pdev->msix->nr_entries
+ : pdev->msi_maxvec);
+ if ( !ivrs_mappings[bdf].intremap_table )
+ return -ENOMEM;
+ }
spin_lock_irqsave(&iommu->lock, flags);
amd_iommu_set_intremap_table(
iommu->dev_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE),
- virt_to_maddr(ivrs_mappings[bdf].intremap_table),
- iommu_intremap);
+ ivrs_mappings[bdf].intremap_table, iommu, iommu_intremap);
amd_iommu_flush_device(iommu, bdf);
#define IOMMU_DEV_TABLE_INT_CONTROL_FORWARDED 0x1
#define IOMMU_DEV_TABLE_INT_CONTROL_TRANSLATED 0x2
-/* For now, we always allocate the maximum: 2048 entries. */
-#define IOMMU_INTREMAP_ORDER 0xB
-
struct amd_iommu_dte {
/* 0 - 63 */
bool v:1;
/* device table functions */
int get_dma_requestor_id(uint16_t seg, uint16_t bdf);
void amd_iommu_set_intremap_table(struct amd_iommu_dte *dte,
- uint64_t intremap_ptr,
+ const void *ptr,
+ const struct amd_iommu *iommu,
bool valid);
void amd_iommu_set_root_page_table(struct amd_iommu_dte *dte,
uint64_t root_ptr, uint16_t domain_id,
bool iov_supports_xt(void);
int amd_iommu_setup_ioapic_remapping(void);
void *amd_iommu_alloc_intremap_table(
- const struct amd_iommu *, unsigned long **);
+ const struct amd_iommu *, unsigned long **, unsigned int nr);
int amd_iommu_free_intremap_table(
const struct amd_iommu *, struct ivrs_mappings *, uint16_t);
unsigned int amd_iommu_intremap_table_order(