: : "r" (pte.bits), "r" (p) : "memory");
}
+/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
+#define __flush_xen_dcache_one(R) STORE_CP32(R, DCCMVAC)
+
/*
* Flush all hypervisor mappings from the TLB and branch predictor.
* This is needed after changing Xen code mappings.
: : "r" (pte.bits), "r" (p) : "memory");
}
+/* Inline ASM to flush dcache on register R (may be an inline asm operand) */
+#define __flush_xen_dcache_one(R) "dc cvac, %" #R ";"
+
/*
* Flush all hypervisor mappings from the TLB
* This is needed after changing Xen code mappings.
void *end;
dsb(); /* So the CPU issues all writes to the range */
for ( end = p + size; p < end; p += cacheline_bytes )
- WRITE_CP32((uint32_t) p, DCCMVAC);
+ asm volatile (__flush_xen_dcache_one(0) : : "r" (p));
dsb(); /* So we know the flushes happen before continuing */
}
flush_xen_dcache_va_range(_p, sizeof(x)); \
else \
asm volatile ( \
- "dsb;" /* Finish all earlier writes */ \
- STORE_CP32(0, DCCMVAC) \
- "dsb;" /* Finish flush before continuing */ \
+ "dsb sy;" /* Finish all earlier writes */ \
+ __flush_xen_dcache_one(0) \
+ "dsb sy;" /* Finish flush before continuing */ \
: : "r" (_p), "m" (*_p)); \
} while (0)