pte.pt.ai = attributes;
pte.pt.xn = 1;
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_va(FIXMAP_ADDR(map));
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Remove a mapping from a fixmap entry */
{
lpae_t pte = {0};
write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte);
- flush_xen_data_tlb_va(FIXMAP_ADDR(map));
+ flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE);
}
/* Map a page of domheap memory */
* We may not have flushed this specific subpage at map time,
* since we only flush the 4k page not the superpage
*/
- flush_xen_data_tlb_va(va);
+ flush_xen_data_tlb_range_va(va, PAGE_SIZE);
return (void *)va;
}
dest_va = BOOT_MISC_VIRT_START;
pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT);
write_pte(xen_second + second_table_offset(dest_va), pte);
- flush_xen_data_tlb_va(dest_va);
+ flush_xen_data_tlb_range_va(dest_va, PAGE_SIZE);
/* Calculate virt-to-phys offset for the new location */
phys_offset = xen_paddr - (unsigned long) _start;
}
/*
- * Flush one VA's hypervisor mappings from the data TLB. This is not
+ * Flush a range of VA's hypervisor mappings from the data TLB. This is not
* sufficient when changing code mappings or for self modifying code.
*/
-static inline void flush_xen_data_tlb_va(unsigned long va)
+static inline void flush_xen_data_tlb_range_va(unsigned long va, unsigned long size)
{
- asm volatile("dsb;" /* Ensure preceding are visible */
- STORE_CP32(0, TLBIMVAH)
- "dsb;" /* Ensure completion of the TLB flush */
- "isb;"
- : : "r" (va) : "memory");
+ unsigned long end = va + size;
+ dsb(); /* Ensure preceding are visible */
+ while ( va < end ) {
+ asm volatile(STORE_CP32(0, TLBIMVAH)
+ : : "r" (va) : "memory");
+ va += PAGE_SIZE;
+ }
+ dsb(); /* Ensure completion of the TLB flush */
+ isb();
}
/* Flush all non-hypervisor mappings from the TLB */