static inline void mi_atomic_yield(void) {
std::this_thread::yield();
}
-#elif (defined(__GNUC__) || defined(__clang__)) && \
- (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))
-#if defined(__x86_64__) || defined(__i386__)
+#elif defined(__SSE2__) // AMD and Intel
+ #include <emmintrin.h>
+ static inline void mi_atomic_yield(void) {
+ _mm_pause();
+ }
+#elif defined(__x86_64__) || defined(__i386__)
static inline void mi_atomic_yield(void) {
asm volatile ("pause" ::: "memory");
}
-#elif defined(__arm__) || defined(__aarch64__)
+#elif defined(__aarch64__)
static inline void mi_atomic_yield(void) {
- asm volatile("yield");
+ asm volatile("wfe");
+ }
+#elif defined(__arm__) && __ARM_ARCH__ >= 7
+ static inline void mi_atomic_yield(void) {
+ __asm__ volatile("yield" ::: "memory");
+ }
+#elif defined(__armel__) || defined(__ARMEL__)
+ static inline void mi_atomic_yield(void) {
+ asm volatile ("nop" ::: "memory"); // default operation - does nothing => Might lead to passive spinning.
+ }
+#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) // PowerPC
+ static inline void mi_atomic_yield(void) {
+ __asm__ __volatile__ ("or 27,27,27" ::: "memory");
+ }
+#elif defined(__sun)
+ // Fallback for other archs
+ #include <synch.h>
+ static inline void mi_atomic_yield(void) {
+ smt_pause();
}
-#endif
#elif defined(__wasi__)
#include <sched.h>
static inline void mi_atomic_yield(void) {