This is almost identical on both sub architectures.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Acked-by: Julien Grall <julien.grall@linaro.org>
[ ijc -- fixed coding style ]
: : "r" (r0) /* dummy */: "memory");
}
-/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
- */
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
- unsigned long size)
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
{
- unsigned long end = va + size;
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end ) {
- asm volatile(STORE_CP32(0, TLBIMVAH)
- : : "r" (va) : "memory");
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
+ asm volatile(STORE_CP32(0, TLBIMVAH) : : "r" (va) : "memory");
}
/* Ask the MMU to translate a VA for us */
: : : "memory");
}
-/*
- * Flush a range of VA's hypervisor mappings from the data TLB of the
- * local processor. This is not sufficient when changing code mappings
- * or for self modifying code.
- */
-static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
- unsigned long size)
+/* Flush TLB of local processor for address va. */
+static inline void __flush_xen_data_tlb_one_local(vaddr_t va)
{
- unsigned long end = va + size;
- dsb(sy); /* Ensure preceding are visible */
- while ( va < end ) {
- asm volatile("tlbi vae2, %0;"
- : : "r" (va>>PAGE_SHIFT) : "memory");
- va += PAGE_SIZE;
- }
- dsb(sy); /* Ensure completion of the TLB flush */
- isb();
+ asm volatile("tlbi vae2, %0;" : : "r" (va>>PAGE_SHIFT) : "memory");
}
/* Ask the MMU to translate a VA for us */
: : "r" (_p), "m" (*_p)); \
} while (0)
+/*
+ * Flush a range of VA's hypervisor mappings from the data TLB of the
+ * local processor. This is not sufficient when changing code mappings
+ * or for self modifying code.
+ */
+static inline void flush_xen_data_tlb_range_va_local(unsigned long va,
+ unsigned long size)
+{
+ unsigned long end = va + size;
+ dsb(sy); /* Ensure preceding are visible */
+ while ( va < end )
+ {
+ __flush_xen_data_tlb_one_local(va);
+ va += PAGE_SIZE;
+ }
+ dsb(sy); /* Ensure completion of the TLB flush */
+ isb();
+}
+
/* Flush the dcache for an entire page. */
void flush_page_to_ram(unsigned long mfn);