amd iommu: add interrupt remapping support
authorKeir Fraser <keir.fraser@citrix.com>
Fri, 11 Jul 2008 11:48:45 +0000 (12:48 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Fri, 11 Jul 2008 11:48:45 +0000 (12:48 +0100)
Signed-off-by: Wei Wang <wei.wang2@amd.com>
xen/drivers/passthrough/amd/Makefile
xen/drivers/passthrough/amd/iommu_intr.c [new file with mode: 0644]
xen/drivers/passthrough/amd/iommu_map.c
xen/drivers/passthrough/amd/pci_amd_iommu.c
xen/include/asm-x86/hvm/svm/amd-iommu-defs.h
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h

index bb67c933ceea5dc27d28320977b725abfeaf180f..48bd305c06f88387058e21afe36928342d2cf151 100644 (file)
@@ -3,3 +3,4 @@ obj-y += iommu_init.o
 obj-y += iommu_map.o
 obj-y += pci_amd_iommu.o
 obj-y += iommu_acpi.o
+obj-y += iommu_intr.o
diff --git a/xen/drivers/passthrough/amd/iommu_intr.c b/xen/drivers/passthrough/amd/iommu_intr.c
new file mode 100644 (file)
index 0000000..5dfdff0
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2007 Advanced Micro Devices, Inc.
+ * Author: Wei Wang <wei.wang2@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <xen/sched.h>
+#include <xen/hvm/iommu.h>
+#include <asm/amd-iommu.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
+
+DEFINE_SPINLOCK(int_remap_table_lock);
+void *int_remap_table = NULL;
+
+u8* get_intremap_entry(u8 vector, u8 dm)
+{
+    u8 *table;
+    int offset = 0;
+    table = (u8*)int_remap_table;
+
+    BUG_ON( !table );
+    offset = (dm << INT_REMAP_INDEX_DM_SHIFT) & INT_REMAP_INDEX_DM_MASK;
+    offset |= (vector << INT_REMAP_INDEX_VECTOR_SHIFT ) & 
+        INT_REMAP_INDEX_VECTOR_MASK;
+
+    return (u8*) (table + offset);
+}
+
+static void update_intremap_entry(u32* entry, u8 vector, u8 int_type,
+    u8 dest_mode, u8 dest)
+{
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
+                            INT_REMAP_ENTRY_REMAPEN_MASK,
+                            INT_REMAP_ENTRY_REMAPEN_SHIFT, entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry,
+                            INT_REMAP_ENTRY_SUPIOPF_MASK,
+                            INT_REMAP_ENTRY_SUPIOPF_SHIFT, entry);
+    set_field_in_reg_u32(int_type, *entry,
+                            INT_REMAP_ENTRY_INTTYPE_MASK,
+                            INT_REMAP_ENTRY_INTTYPE_SHIFT, entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry,
+                            INT_REMAP_ENTRY_REQEOI_MASK,
+                            INT_REMAP_ENTRY_REQEOI_SHIFT, entry);
+    set_field_in_reg_u32((u32)dest_mode, *entry,
+                            INT_REMAP_ENTRY_DM_MASK,
+                            INT_REMAP_ENTRY_DM_SHIFT, entry);
+    set_field_in_reg_u32((u32)dest, *entry,
+                            INT_REMAP_ENTRY_DEST_MAST,
+                            INT_REMAP_ENTRY_DEST_SHIFT, entry);
+    set_field_in_reg_u32((u32)vector, *entry,
+                            INT_REMAP_ENTRY_VECTOR_MASK,
+                            INT_REMAP_ENTRY_VECTOR_SHIFT, entry);
+}
+
+void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id)
+{
+    u32 cmd[4], entry;
+
+    cmd[3] = cmd[2] = 0;
+    set_field_in_reg_u32(device_id, 0,
+                         IOMMU_INV_INT_TABLE_DEVICE_ID_MASK,
+                         IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry);
+    cmd[0] = entry;
+    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0,
+                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
+                         &entry);
+    cmd[1] = entry;
+    send_iommu_command(iommu, cmd);
+}
+
+static void update_intremap_entry_from_ioapic(
+    struct IO_APIC_route_entry *ioapic_rte,
+    unsigned int rte_upper, unsigned int value)
+{
+    unsigned long flags;
+    u32* entry;
+    u8 delivery_mode, dest, vector, dest_mode;
+    struct IO_APIC_route_entry *rte = ioapic_rte;
+
+    spin_lock_irqsave(&int_remap_table_lock, flags);
+
+    if ( rte_upper )
+    {
+        dest = (value >> 24) & 0xFF;
+        delivery_mode = rte->delivery_mode;
+        vector = rte->vector;
+        dest_mode = rte->dest_mode;
+        entry = (u32*)get_intremap_entry((u8)rte->vector,
+                                        (u8)rte->delivery_mode);
+        update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+    }
+
+    spin_unlock_irqrestore(&int_remap_table_lock, flags);
+    return;
+}
+
+int amd_iommu_setup_intremap_table(void)
+{
+    unsigned long flags;
+
+    spin_lock_irqsave(&int_remap_table_lock, flags);
+    if ( int_remap_table == NULL )
+        int_remap_table = (void *)alloc_xenheap_pages(1);
+    if ( !int_remap_table )
+    {
+        spin_unlock_irqrestore(&int_remap_table_lock, flags);
+        return -ENOMEM;
+    }
+    memset((u8*)int_remap_table, 0, PAGE_SIZE*2);
+    spin_unlock_irqrestore(&int_remap_table_lock, flags);
+
+    return 0;
+}
+
+void amd_iommu_ioapic_update_ire(
+    unsigned int apic, unsigned int reg, unsigned int value)
+{
+    struct IO_APIC_route_entry ioapic_rte = { 0 };
+    unsigned int rte_upper = (reg & 1) ? 1 : 0;
+    int saved_mask;
+
+    *IO_APIC_BASE(apic) = reg;
+    *(IO_APIC_BASE(apic)+4) = value;
+
+    if ( int_remap_table == NULL )
+        return;
+    if ( !rte_upper )
+        return;
+
+    reg--;
+    /* read both lower and upper 32-bits of rte entry */
+    *IO_APIC_BASE(apic) = reg;
+    *(((u32 *)&ioapic_rte) + 0) = *(IO_APIC_BASE(apic)+4);
+    *IO_APIC_BASE(apic) = reg + 1;
+    *(((u32 *)&ioapic_rte) + 1) = *(IO_APIC_BASE(apic)+4);
+
+    /* mask the interrupt while we change the intremap table */
+    saved_mask = ioapic_rte.mask;
+    ioapic_rte.mask = 1;
+    *IO_APIC_BASE(apic) = reg;
+    *(IO_APIC_BASE(apic)+4) = *(((int *)&ioapic_rte)+0);
+    ioapic_rte.mask = saved_mask;
+
+    update_intremap_entry_from_ioapic(&ioapic_rte, rte_upper, value);
+
+    /* unmask the interrupt after we have updated the intremap table */
+    *IO_APIC_BASE(apic) = reg;
+    *(IO_APIC_BASE(apic)+4) = *(((u32 *)&ioapic_rte)+0);
+}
+
+static void update_intremap_entry_from_msi_msg(
+    struct amd_iommu *iommu, struct pci_dev *pdev, struct msi_msg *msg)
+{
+    unsigned long flags;
+    u32* entry;
+    u16 dev_id;
+
+    u8 delivery_mode, dest, vector, dest_mode;
+
+    dev_id = (pdev->bus << 8) | pdev->devfn;
+
+    spin_lock_irqsave(&int_remap_table_lock, flags);
+    dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
+    delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
+    vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK;
+    dest = (msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff;
+
+    entry = (u32*)get_intremap_entry((u8)vector, (u8)delivery_mode);
+    update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+    spin_unlock_irqrestore(&int_remap_table_lock, flags);
+
+    spin_lock_irqsave(&iommu->lock, flags);
+    invalidate_interrupt_table(iommu, dev_id);
+    flush_command_buffer(iommu);
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
+    return;
+}
+
+void amd_iommu_msi_msg_update_ire(
+    struct msi_desc *msi_desc, struct msi_msg *msg)
+{
+    struct pci_dev *pdev = msi_desc->dev;
+    struct amd_iommu *iommu = NULL;
+
+    iommu = find_iommu_for_device(pdev->bus, pdev->devfn);
+
+    if ( !iommu || !int_remap_table )
+        return;
+
+    update_intremap_entry_from_msi_msg(iommu, pdev, msg);
+}
index 50c9ef66a70e0feaf48450573421ba130b9ea4e1..75961558a3ee60a93a59a1f56c9a1062634d77cb 100644 (file)
@@ -235,13 +235,56 @@ static void amd_iommu_set_page_directory_entry(u32 *pde,
     pde[0] = entry;
 }
 
-void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id,
-                                   u8 sys_mgt, u8 dev_ex, u8 paging_mode)
+void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
+                                   u16 domain_id, u8 sys_mgt, u8 dev_ex,
+                                   u8 paging_mode)
 {
     u64 addr_hi, addr_lo;
     u32 entry;
 
-    dte[7] = dte[6] = dte[5] = dte[4] = 0;
+    dte[7] = dte[6] = 0;
+
+    addr_lo = intremap_ptr & DMA_32BIT_MASK;
+    addr_hi = intremap_ptr >> 32;
+
+    set_field_in_reg_u32((u32)addr_hi, 0,
+                        IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK,
+                        IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                        IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK,
+                        IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                        IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK,
+                        IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                        IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK,
+                        IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT, &entry);
+    /* Fixed and arbitrated interrupts remapepd */
+    set_field_in_reg_u32(2, entry,
+                        IOMMU_DEV_TABLE_INT_CONTROL_MASK,
+                        IOMMU_DEV_TABLE_INT_CONTROL_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                        IOMMU_DEV_TABLE_LINT0_ENABLE_MASK,
+                        IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                        IOMMU_DEV_TABLE_LINT1_ENABLE_MASK,
+                        IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT, &entry);
+    dte[5] = entry;
+
+    set_field_in_reg_u32((u32)addr_lo >> 6, 0,
+                        IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK,
+                        IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT, &entry);
+    /* 2048 entries */
+    set_field_in_reg_u32(0xB, entry,
+                         IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK,
+                         IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_INT_VALID_MASK,
+                         IOMMU_DEV_TABLE_INT_VALID_SHIFT, &entry);
+    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+                         IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK,
+                         IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT, &entry);
+    dte[4] = entry;
 
     set_field_in_reg_u32(sys_mgt, 0,
                          IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK,
index d908063b26d30b998b67ba155a8a3cd051b376fb..bc3f4748508d7d83404d0b8f6ce3b7155aea4edb 100644 (file)
@@ -34,6 +34,7 @@ int nr_amd_iommus;
 
 unsigned short ivrs_bdf_entries;
 struct ivrs_mappings *ivrs_mappings;
+extern void *int_remap_table;
 
 static void deallocate_domain_page_tables(struct hvm_iommu *hd)
 {
@@ -256,12 +257,13 @@ static void amd_iommu_setup_domain_device(
 {
     void *dte;
     u64 root_ptr;
+    u64 intremap_ptr;
     unsigned long flags;
     int req_id;
     u8 sys_mgt, dev_ex;
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
-    BUG_ON( !hd->root_table || !hd->paging_mode );
+    BUG_ON( !hd->root_table || !hd->paging_mode || !int_remap_table );
 
     root_ptr = (u64)virt_to_maddr(hd->root_table);
     /* get device-table entry */
@@ -269,6 +271,8 @@ static void amd_iommu_setup_domain_device(
     dte = iommu->dev_table.buffer +
         (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
 
+    intremap_ptr = (u64)virt_to_maddr(int_remap_table);
+
     if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
     {
         spin_lock_irqsave(&iommu->lock, flags); 
@@ -276,11 +280,12 @@ static void amd_iommu_setup_domain_device(
         /* bind DTE to domain page-tables */
         sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
         dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
-        amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr,
+        amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr, intremap_ptr,
                                       hd->domain_id, sys_mgt, dev_ex,
                                       hd->paging_mode);
 
         invalidate_dev_table_entry(iommu, req_id);
+        invalidate_interrupt_table(iommu, req_id);
         flush_command_buffer(iommu);
         amd_iov_info("Enable DTE:0x%x, "
                 "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
@@ -365,6 +370,12 @@ int amd_iov_detect(void)
     memset(ivrs_mappings, 0,
            ivrs_bdf_entries * sizeof(struct ivrs_mappings));
 
+    if ( amd_iommu_setup_intremap_table() != 0 )
+    {
+        amd_iov_error("Error allocating interrupt remapping table\n");
+        goto error_out;
+    }
+
     if ( amd_iommu_init() != 0 )
     {
         amd_iov_error("Error initialization\n");
index e4e58a727784dad1e199cbfc808ec10e93d8db36..be801b7ea9e9f6a575e0c8dff2a94c4404d182db 100644 (file)
 #define IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT       8
 
 /* DeviceTable Entry[159:128] */
-#define IOMMU_DEV_TABLE_INT_VALID_MASK                 0x00000001
-#define IOMMU_DEV_TABLE_INT_VALID_SHIFT                        0
-#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK          0x0000001E
-#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT         1
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK         0xFFFFFFC0
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT                6
+#define IOMMU_DEV_TABLE_INT_VALID_MASK          0x00000001
+#define IOMMU_DEV_TABLE_INT_VALID_SHIFT         0
+#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK       0x0000001E
+#define IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT      1
+#define IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK      0x0000000020
+#define IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT      5
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_MASK      0xFFFFFFC0
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_LOW_SHIFT     6
 
 /* DeviceTable Entry[191:160] */
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK                0x000FFFFF
-#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT       0
-#define IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK             0x01000000
-#define IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT            24
-#define IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK             0x02000000
-#define IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT            25
-#define IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK              0x04000000
-#define IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT             26
-#define IOMMU_DEV_TABLE_INT_CONTROL_MASK               0x30000000
-#define IOMMU_DEV_TABLE_INT_CONTROL_SHIFT              28
-#define IOMMU_DEV_TABLE_LINT0_ENABLE_MASK              0x40000000
-#define IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT             30
-#define IOMMU_DEV_TABLE_LINT1_ENABLE_MASK              0x80000000
-#define IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT             31
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_MASK     0x000FFFFF
+#define IOMMU_DEV_TABLE_INT_TABLE_PTR_HIGH_SHIFT    0
+#define IOMMU_DEV_TABLE_INIT_PASSTHRU_MASK      0x01000000
+#define IOMMU_DEV_TABLE_INIT_PASSTHRU_SHIFT     24
+#define IOMMU_DEV_TABLE_EINT_PASSTHRU_MASK      0x02000000
+#define IOMMU_DEV_TABLE_EINT_PASSTHRU_SHIFT     25
+#define IOMMU_DEV_TABLE_NMI_PASSTHRU_MASK       0x04000000
+#define IOMMU_DEV_TABLE_NMI_PASSTHRU_SHIFT      26
+#define IOMMU_DEV_TABLE_INT_CONTROL_MASK        0x30000000
+#define IOMMU_DEV_TABLE_INT_CONTROL_SHIFT       28
+#define IOMMU_DEV_TABLE_LINT0_ENABLE_MASK       0x40000000
+#define IOMMU_DEV_TABLE_LINT0_ENABLE_SHIFT      30
+#define IOMMU_DEV_TABLE_LINT1_ENABLE_MASK       0x80000000
+#define IOMMU_DEV_TABLE_LINT1_ENABLE_SHIFT      31
 
 /* Command Buffer */
 #define IOMMU_CMD_BUFFER_BASE_LOW_OFFSET       0x08
 #define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK   0x0000FFFF
 #define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT  0
 
+/* INVALIDATE_INTERRUPT_TABLE command */
+#define IOMMU_INV_INT_TABLE_DEVICE_ID_MASK   0x0000FFFF
+#define IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT  0
+
 /* Event Log */
 #define IOMMU_EVENT_LOG_BASE_LOW_OFFSET                0x10
 #define IOMMU_EVENT_LOG_BASE_HIGH_OFFSET       0x14
 #define IOMMU_IO_READ_ENABLED           1
 #define HACK_BIOS_SETTINGS                  0
 
+/* interrupt remapping table */
+#define INT_REMAP_INDEX_DM_MASK         0x1C00
+#define INT_REMAP_INDEX_DM_SHIFT        10
+#define INT_REMAP_INDEX_VECTOR_MASK     0x3FC
+#define INT_REMAP_INDEX_VECTOR_SHIFT    2
+#define INT_REMAP_ENTRY_REMAPEN_MASK    0x00000001
+#define INT_REMAP_ENTRY_REMAPEN_SHIFT   0
+#define INT_REMAP_ENTRY_SUPIOPF_MASK    0x00000002
+#define INT_REMAP_ENTRY_SUPIOPF_SHIFT   1
+#define INT_REMAP_ENTRY_INTTYPE_MASK    0x0000001C
+#define INT_REMAP_ENTRY_INTTYPE_SHIFT   2
+#define INT_REMAP_ENTRY_REQEOI_MASK     0x00000020
+#define INT_REMAP_ENTRY_REQEOI_SHIFT    5
+#define INT_REMAP_ENTRY_DM_MASK         0x00000040
+#define INT_REMAP_ENTRY_DM_SHIFT        6
+#define INT_REMAP_ENTRY_DEST_MAST       0x0000FF00
+#define INT_REMAP_ENTRY_DEST_SHIFT      8
+#define INT_REMAP_ENTRY_VECTOR_MASK     0x00FF0000
+#define INT_REMAP_ENTRY_VECTOR_SHIFT    16
+
 #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
index b6fa94f49f58fdca795d9d521dac729f203983f5..81264b0d6141c2391dad18eb29ef41b6879caef4 100644 (file)
@@ -70,7 +70,7 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
 int amd_iommu_sync_p2m(struct domain *d);
 
 /* device table functions */
-void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr,
+void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
         u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
 int amd_iommu_is_dte_page_translation_valid(u32 *entry);
 void invalidate_dev_table_entry(struct amd_iommu *iommu,
@@ -86,6 +86,14 @@ struct amd_iommu *find_iommu_for_device(int bus, int devfn);
 /* amd-iommu-acpi functions */
 int __init parse_ivrs_table(struct acpi_table_header *table);
 
+/*interrupt remapping */
+int amd_iommu_setup_intremap_table(void);
+void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id);
+void amd_iommu_ioapic_update_ire(
+    unsigned int apic, unsigned int reg, unsigned int value);
+void amd_iommu_msi_msg_update_ire(
+    struct msi_desc *msi_desc, struct msi_msg *msg);
+
 static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
 {
     u32 field;