static void __init deallocate_iommu_table_struct(
struct table_struct *table)
{
+ int order = 0;
if ( table->buffer )
{
- free_xenheap_pages(table->buffer,
- get_order_from_bytes(table->alloc_size));
+ order = get_order_from_bytes(table->alloc_size);
+ __free_amd_iommu_tables(table->buffer, order);
table->buffer = NULL;
}
}
static int __init allocate_iommu_table_struct(struct table_struct *table,
const char *name)
{
- table->buffer = (void *) alloc_xenheap_pages(
- get_order_from_bytes(table->alloc_size));
-
- if ( !table->buffer )
+ int order = 0;
+ if ( table->buffer == NULL )
{
- amd_iov_error("Error allocating %s\n", name);
- return -ENOMEM;
- }
+ order = get_order_from_bytes(table->alloc_size);
+ table->buffer = __alloc_amd_iommu_tables(order);
- memset(table->buffer, 0, table->alloc_size);
+ if ( table->buffer == NULL )
+ {
+ amd_iov_error("Error allocating %s\n", name);
+ return -ENOMEM;
+ }
+ memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
+ }
return 0;
}
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
+#define INTREMAP_TABLE_ORDER 1
DEFINE_SPINLOCK(int_remap_table_lock);
void *int_remap_table = NULL;
unsigned long flags;
spin_lock_irqsave(&int_remap_table_lock, flags);
+
if ( int_remap_table == NULL )
- int_remap_table = (void *)alloc_xenheap_pages(1);
- if ( !int_remap_table )
{
- spin_unlock_irqrestore(&int_remap_table_lock, flags);
- return -ENOMEM;
+ int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
+ if ( int_remap_table == NULL )
+ {
+ spin_unlock_irqrestore(&int_remap_table_lock, flags);
+ return -ENOMEM;
+ }
+ memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
}
- memset((u8*)int_remap_table, 0, PAGE_SIZE*2);
spin_unlock_irqrestore(&int_remap_table_lock, flags);
return 0;
spin_lock_irqsave(&int_remap_table_lock, flags);
if ( int_remap_table )
{
- free_xenheap_pages(int_remap_table, 1);
+ __free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
int_remap_table = NULL;
}
spin_unlock_irqrestore(&int_remap_table_lock, flags);
}
}
-static void clear_page_table_entry_present(u32 *pte)
+static void clear_iommu_l1e_present(u64 l2e, unsigned long gfn)
{
- set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0],
- IOMMU_PTE_PRESENT_MASK,
- IOMMU_PTE_PRESENT_SHIFT, &pte[0]);
+ u32 *l1e;
+ int offset;
+ void *l1_table;
+
+ l1_table = map_domain_page(l2e >> PAGE_SHIFT);
+
+ offset = gfn & (~PTE_PER_TABLE_MASK);
+ l1e = (u32*)(l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
+
+ /* clear l1 entry */
+ l1e[0] = l1e[1] = 0;
+
+ unmap_domain_page(l1_table);
}
-static void set_page_table_entry_present(u32 *pte, u64 page_addr,
- int iw, int ir)
+static void set_iommu_l1e_present(u64 l2e, unsigned long gfn,
+ u64 maddr, int iw, int ir)
{
u64 addr_lo, addr_hi;
u32 entry;
+ void *l1_table;
+ int offset;
+ u32 *l1e;
- addr_lo = page_addr & DMA_32BIT_MASK;
- addr_hi = page_addr >> 32;
+ l1_table = map_domain_page(l2e >> PAGE_SHIFT);
+
+ offset = gfn & (~PTE_PER_TABLE_MASK);
+ l1e = (u32*)((u8*)l1_table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE));
+
+ addr_lo = maddr & DMA_32BIT_MASK;
+ addr_hi = maddr >> 32;
set_field_in_reg_u32((u32)addr_hi, 0,
IOMMU_PTE_ADDR_HIGH_MASK,
IOMMU_CONTROL_DISABLED, entry,
IOMMU_PTE_IO_READ_PERMISSION_MASK,
IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry);
- pte[1] = entry;
+ l1e[1] = entry;
set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
IOMMU_PTE_ADDR_LOW_MASK,
set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
IOMMU_PTE_PRESENT_MASK,
IOMMU_PTE_PRESENT_SHIFT, &entry);
- pte[0] = entry;
-}
+ l1e[0] = entry;
+ unmap_domain_page(l1_table);
+}
static void amd_iommu_set_page_directory_entry(u32 *pde,
u64 next_ptr, u8 next_level)
dte[0] = entry;
}
-void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry)
+u64 amd_iommu_get_next_table_from_pte(u32 *entry)
{
u64 addr_lo, addr_hi, ptr;
IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT);
ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
- return ptr ? maddr_to_virt((unsigned long)ptr) : NULL;
+ return ptr;
}
static int amd_iommu_is_pte_present(u32 *entry)
IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT));
}
-static void *get_pte_from_page_tables(void *table, int level,
- unsigned long io_pfn)
+static u64 iommu_l2e_from_pfn(struct page_info *table, int level,
+ unsigned long io_pfn)
{
unsigned long offset;
void *pde = NULL;
+ void *table_vaddr;
+ u64 next_table_maddr = 0;
- BUG_ON(table == NULL);
+ BUG_ON( table == NULL || level == 0 );
- while ( level > 0 )
+ while ( level > 1 )
{
offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
(level - IOMMU_PAGING_MODE_LEVEL_1)));
offset &= ~PTE_PER_TABLE_MASK;
- pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
- if ( level == 1 )
- break;
- if ( !pde )
- return NULL;
+ table_vaddr = map_domain_page(page_to_mfn(table));
+ pde = table_vaddr + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE);
+ next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
+
if ( !amd_iommu_is_pte_present(pde) )
{
- void *next_table = alloc_xenheap_page();
- if ( next_table == NULL )
- return NULL;
- memset(next_table, 0, PAGE_SIZE);
- if ( *(u64 *)pde == 0 )
+ if ( next_table_maddr == 0 )
{
- unsigned long next_ptr = (u64)virt_to_maddr(next_table);
+ table = alloc_amd_iommu_pgtable();
+ if ( table == NULL )
+ return 0;
+ next_table_maddr = page_to_maddr(table);
amd_iommu_set_page_directory_entry(
- (u32 *)pde, next_ptr, level - 1);
- }
- else
- {
- free_xenheap_page(next_table);
+ (u32 *)pde, next_table_maddr, level - 1);
}
+ else /* should never reach here */
+ return 0;
}
- table = amd_iommu_get_vptr_from_page_table_entry(pde);
+
+ unmap_domain_page(table_vaddr);
+ table = maddr_to_page(next_table_maddr);
level--;
}
- return pde;
+ return next_table_maddr;
}
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
{
- void *pte;
+ u64 iommu_l2e;
unsigned long flags;
- u64 maddr;
struct hvm_iommu *hd = domain_hvm_iommu(d);
int iw = IOMMU_IO_WRITE_ENABLED;
int ir = IOMMU_IO_READ_ENABLED;
if ( is_hvm_domain(d) && !hd->p2m_synchronized )
goto out;
- maddr = (u64)mfn << PAGE_SHIFT;
- pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
- if ( pte == NULL )
+ iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
+ if ( iommu_l2e == 0 )
{
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return -EFAULT;
}
+ set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
- set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
out:
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return 0;
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
- void *pte;
+ u64 iommu_l2e;
unsigned long flags;
- u64 io_addr = gfn;
- int requestor_id;
struct amd_iommu *iommu;
struct hvm_iommu *hd = domain_hvm_iommu(d);
return 0;
}
- requestor_id = hd->domain_id;
- io_addr = (u64)gfn << PAGE_SHIFT;
+ iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
- pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
- if ( pte == NULL )
+ if ( iommu_l2e == 0 )
{
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
}
/* mark PTE as 'page not present' */
- clear_page_table_entry_present((u32 *)pte);
+ clear_iommu_l1e_present(iommu_l2e, gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
/* send INVALIDATE_IOMMU_PAGES command */
for_each_amd_iommu ( iommu )
{
spin_lock_irqsave(&iommu->lock, flags);
- invalidate_iommu_page(iommu, io_addr, requestor_id);
+ invalidate_iommu_page(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id);
flush_command_buffer(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
unsigned long phys_addr,
unsigned long size, int iw, int ir)
{
+ u64 iommu_l2e;
unsigned long flags, npages, i;
- void *pte;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
npages = region_to_pages(phys_addr, size);
spin_lock_irqsave(&hd->mapping_lock, flags);
for ( i = 0; i < npages; ++i )
{
- pte = get_pte_from_page_tables(
+ iommu_l2e = iommu_l2e_from_pfn(
hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT);
- if ( pte == NULL )
+
+ if ( iommu_l2e == 0 )
{
amd_iov_error(
"Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return -EFAULT;
}
- set_page_table_entry_present((u32 *)pte,
- phys_addr, iw, ir);
+
+ set_iommu_l1e_present(iommu_l2e,
+ (phys_addr >> PAGE_SHIFT), phys_addr, iw, ir);
+
phys_addr += PAGE_SIZE;
}
spin_unlock_irqrestore(&hd->mapping_lock, flags);
int amd_iommu_sync_p2m(struct domain *d)
{
unsigned long mfn, gfn, flags;
- void *pte;
- u64 maddr;
+ u64 iommu_l2e;
struct list_head *entry;
struct page_info *page;
struct hvm_iommu *hd;
if ( gfn == INVALID_M2P_ENTRY )
continue;
- maddr = (u64)mfn << PAGE_SHIFT;
- pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
- if ( pte == NULL )
+ iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
+
+ if ( iommu_l2e == 0 )
{
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return -EFAULT;
}
- set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
+
+ set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
}
hd->p2m_synchronized = 1;
extern struct ivrs_mappings *ivrs_mappings;
extern void *int_remap_table;
-static void deallocate_domain_page_tables(struct hvm_iommu *hd)
-{
- if ( hd->root_table )
- free_xenheap_page(hd->root_table);
-}
-
-static void deallocate_domain_resources(struct hvm_iommu *hd)
-{
- deallocate_domain_page_tables(hd);
-}
-
int __init amd_iommu_init(void)
{
struct amd_iommu *iommu;
struct domain *domain, struct amd_iommu *iommu, int bdf)
{
void *dte;
- u64 root_ptr;
- u64 intremap_ptr;
unsigned long flags;
int req_id;
u8 sys_mgt, dev_ex;
BUG_ON( !hd->root_table || !hd->paging_mode || !int_remap_table );
- root_ptr = (u64)virt_to_maddr(hd->root_table);
/* get device-table entry */
req_id = ivrs_mappings[bdf].dte_requestor_id;
- dte = iommu->dev_table.buffer +
- (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
- intremap_ptr = (u64)virt_to_maddr(int_remap_table);
+ spin_lock_irqsave(&iommu->lock, flags);
if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
- spin_lock_irqsave(&iommu->lock, flags);
-
/* bind DTE to domain page-tables */
sys_mgt = ivrs_mappings[req_id].dte_sys_mgt_enable;
dev_ex = ivrs_mappings[req_id].dte_allow_exclusion;
- amd_iommu_set_dev_table_entry((u32 *)dte, root_ptr, intremap_ptr,
+
+ amd_iommu_set_dev_table_entry((u32 *)dte,
+ page_to_maddr(hd->root_table),
+ virt_to_maddr(int_remap_table),
hd->domain_id, sys_mgt, dev_ex,
hd->paging_mode);
invalidate_interrupt_table(iommu, req_id);
flush_command_buffer(iommu);
amd_iov_info("Enable DTE:0x%x, "
- "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n",
- req_id, root_ptr, hd->domain_id, hd->paging_mode);
-
- spin_unlock_irqrestore(&iommu->lock, flags);
+ "root_table:%"PRIx64", interrupt_table:%"PRIx64", "
+ "domain_id:%d, paging_mode:%d\n",
+ req_id, (u64)page_to_maddr(hd->root_table),
+ (u64)virt_to_maddr(int_remap_table), hd->domain_id,
+ hd->paging_mode);
}
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
}
static void amd_iommu_setup_dom0_devices(struct domain *d)
spin_lock_irqsave(&hd->mapping_lock, flags);
if ( !hd->root_table )
{
- hd->root_table = (void *)alloc_xenheap_page();
+ hd->root_table = alloc_amd_iommu_pgtable();
if ( !hd->root_table )
goto error_out;
- memset((u8*)hd->root_table, 0, PAGE_SIZE);
}
spin_unlock_irqrestore(&hd->mapping_lock, flags);
/* allocate page directroy */
if ( allocate_domain_resources(hd) != 0 )
{
- deallocate_domain_resources(hd);
+ if ( hd->root_table )
+ free_domheap_page(hd->root_table);
return -ENOMEM;
}
int req_id;
req_id = ivrs_mappings[bdf].dte_requestor_id;
- dte = iommu->dev_table.buffer +
- (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
+ spin_lock_irqsave(&iommu->lock, flags);
if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) )
{
- spin_lock_irqsave(&iommu->lock, flags);
memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE);
invalidate_dev_table_entry(iommu, req_id);
flush_command_buffer(iommu);
" domain_id:%d, paging_mode:%d\n",
req_id, domain_hvm_iommu(domain)->domain_id,
domain_hvm_iommu(domain)->paging_mode);
- spin_unlock_irqrestore(&iommu->lock, flags);
}
+ spin_unlock_irqrestore(&iommu->lock, flags);
}
static int reassign_device( struct domain *source, struct domain *target,
return reassign_device(dom0, d, bus, devfn);
}
-static void deallocate_next_page_table(void *table, unsigned long index,
- int level)
+static void deallocate_next_page_table(struct page_info* pg, int level)
{
- unsigned long next_index;
- void *next_table, *pde;
- int next_level;
+ void *table_vaddr, *pde;
+ u64 next_table_maddr;
+ int index;
- pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
- next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde);
+ table_vaddr = map_domain_page(page_to_mfn(pg));
- if ( next_table )
+ if ( level > 1 )
{
- next_level = level - 1;
- if ( next_level > 1 )
+ for ( index = 0; index < PTE_PER_TABLE_SIZE; index++ )
{
- next_index = 0;
- do
+ pde = table_vaddr + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE);
+ next_table_maddr = amd_iommu_get_next_table_from_pte(pde);
+ if ( next_table_maddr != 0 )
{
- deallocate_next_page_table(next_table,
- next_index, next_level);
- next_index++;
- } while (next_index < PTE_PER_TABLE_SIZE);
+ deallocate_next_page_table(
+ maddr_to_page(next_table_maddr), level - 1);
+ }
}
-
- free_xenheap_page(next_table);
}
+
+ unmap_domain_page(table_vaddr);
+ free_amd_iommu_pgtable(pg);
}
static void deallocate_iommu_page_tables(struct domain *d)
{
- unsigned long index;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- if ( hd ->root_table )
+ if ( hd->root_table )
{
- index = 0;
-
- do
- {
- deallocate_next_page_table(hd->root_table,
- index, hd->paging_mode);
- index++;
- } while ( index < PTE_PER_TABLE_SIZE );
-
- free_xenheap_page(hd ->root_table);
+ deallocate_next_page_table(hd->root_table, hd->paging_mode);
+ hd->root_table = NULL;
}
-
- hd ->root_table = NULL;
}
+
static void amd_iommu_domain_destroy(struct domain *d)
{
deallocate_iommu_page_tables(d);
#include <xen/sched.h>
#include <asm/amd-iommu.h>
+#include <xen/domain_page.h>
#define for_each_amd_iommu(amd_iommu) \
list_for_each_entry(amd_iommu, \
/* mapping functions */
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn);
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn);
-void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry);
+u64 amd_iommu_get_next_table_from_pte(u32 *entry);
int amd_iommu_reserve_domain_unity_map(struct domain *domain,
unsigned long phys_addr, unsigned long size, int iw, int ir);
int amd_iommu_sync_p2m(struct domain *d);
void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u64 intremap_ptr,
u16 domain_id, u8 sys_mgt, u8 dev_ex, u8 paging_mode);
int amd_iommu_is_dte_page_translation_valid(u32 *entry);
-void invalidate_dev_table_entry(struct amd_iommu *iommu,
- u16 devic_id);
+void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
/* send cmd to iommu */
int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]);
return (PAGE_ALIGN(addr + size) - (addr & PAGE_MASK)) >> PAGE_SHIFT;
}
+static inline struct page_info* alloc_amd_iommu_pgtable(void)
+{
+ struct page_info *pg;
+ void *vaddr;
+
+ pg = alloc_domheap_page(NULL, 0);
+ vaddr = map_domain_page(page_to_mfn(pg));
+ if ( !vaddr )
+ return 0;
+ memset(vaddr, 0, PAGE_SIZE);
+ unmap_domain_page(vaddr);
+ return pg;
+}
+
+static inline void free_amd_iommu_pgtable(struct page_info *pg)
+{
+ if ( pg != 0 )
+ free_domheap_page(pg);
+}
+
+static inline void* __alloc_amd_iommu_tables(int order)
+{
+ void *buf;
+ buf = alloc_xenheap_pages(order);
+ return buf;
+}
+
+static inline void __free_amd_iommu_tables(void *table, int order)
+{
+ free_xenheap_pages(table, order);
+}
+
#endif /* _ASM_X86_64_AMD_IOMMU_PROTO_H */
/* amd iommu support */
int domain_id;
int paging_mode;
- void *root_table;
+ struct page_info *root_table;
bool_t p2m_synchronized;
/* iommu_ops */