--- /dev/null
+#ifndef SSE2NEON_H
+#define SSE2NEON_H
+
+// This header file provides a simple API translation layer
+// between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
+//
+// Contributors to this work are:
+// John W. Ratcliff <jratcliffscarab@gmail.com>
+// Brandon Rowlett <browlett@nvidia.com>
+// Ken Fast <kfast@gdeb.com>
+// Eric van Beurden <evanbeurden@nvidia.com>
+// Alexander Potylitsin <apotylitsin@nvidia.com>
+// Hasindu Gamaarachchi <hasindu2008@gmail.com>
+// Jim Huang <jserv@ccns.ncku.edu.tw>
+// Mark Cheng <marktwtn@gmail.com>
+// Malcolm James MacLeod <malcolm@gulden.com>
+// Devin Hussey (easyaspi314) <husseydevin@gmail.com>
+// Sebastian Pop <spop@amazon.com>
+// Developer Ecosystem Engineering <DeveloperEcosystemEngineering@apple.com>
+// Danila Kutenin <danilak@google.com>
+// François Turban (JishinMaster) <francois.turban@gmail.com>
+// Pei-Hsuan Hung <afcidk@gmail.com>
+// Yang-Hao Yuan <yuanyanghau@gmail.com>
+// Syoyo Fujita <syoyo@lighttransport.com>
+// Brecht Van Lommel <brecht@blender.org>
+// Jonathan Hue <jhue@adobe.com>
+// Cuda Chen <clh960524@gmail.com>
+// Aymen Qader <aymen.qader@arm.com>
+// Anthony Roberts <anthony.roberts@linaro.org>
+
+/*
+ * sse2neon is freely redistributable under the MIT License.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Tunable configurations */
+
+/* Enable precise implementation of math operations
+ * This would slow down the computation a bit, but gives consistent result with
+ * x86 SSE. (e.g. would solve a hole or NaN pixel in the rendering result)
+ */
+/* _mm_min|max_ps|ss|pd|sd */
+#ifndef SSE2NEON_PRECISE_MINMAX
+#define SSE2NEON_PRECISE_MINMAX (0)
+#endif
+/* _mm_rcp_ps and _mm_div_ps */
+#ifndef SSE2NEON_PRECISE_DIV
+#define SSE2NEON_PRECISE_DIV (0)
+#endif
+/* _mm_sqrt_ps and _mm_rsqrt_ps */
+#ifndef SSE2NEON_PRECISE_SQRT
+#define SSE2NEON_PRECISE_SQRT (0)
+#endif
+/* _mm_dp_pd */
+#ifndef SSE2NEON_PRECISE_DP
+#define SSE2NEON_PRECISE_DP (0)
+#endif
+
+/* Enable inclusion of windows.h on MSVC platforms
+ * This makes _mm_clflush functional on windows, as there is no builtin.
+ */
+#ifndef SSE2NEON_INCLUDE_WINDOWS_H
+#define SSE2NEON_INCLUDE_WINDOWS_H (0)
+#endif
+
+/* compiler specific definitions */
+#if defined(__GNUC__) || defined(__clang__)
+#pragma push_macro("FORCE_INLINE")
+#pragma push_macro("ALIGN_STRUCT")
+#define FORCE_INLINE static inline __attribute__((always_inline))
+#define ALIGN_STRUCT(x) __attribute__((aligned(x)))
+#define _sse2neon_likely(x) __builtin_expect(!!(x), 1)
+#define _sse2neon_unlikely(x) __builtin_expect(!!(x), 0)
+#elif defined(_MSC_VER)
+#if _MSVC_TRADITIONAL
+#error Using the traditional MSVC preprocessor is not supported! Use /Zc:preprocessor instead.
+#endif
+#ifndef FORCE_INLINE
+#define FORCE_INLINE static inline
+#endif
+#ifndef ALIGN_STRUCT
+#define ALIGN_STRUCT(x) __declspec(align(x))
+#endif
+#define _sse2neon_likely(x) (x)
+#define _sse2neon_unlikely(x) (x)
+#else
+#pragma message("Macro name collisions may happen with unsupported compilers.")
+#endif
+
+#if defined(__GNUC__) && __GNUC__ < 10
+#warning "GCC versions earlier than 10 are not supported."
+#endif
+
+/* C language does not allow initializing a variable with a function call. */
+#ifdef __cplusplus
+#define _sse2neon_const static const
+#else
+#define _sse2neon_const const
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#if defined(_WIN32)
+/* Definitions for _mm_{malloc,free} are provided by <malloc.h>
+ * from both MinGW-w64 and MSVC.
+ */
+#define SSE2NEON_ALLOC_DEFINED
+#endif
+
+/* If using MSVC */
+#ifdef _MSC_VER
+#include <intrin.h>
+#if SSE2NEON_INCLUDE_WINDOWS_H
+#include <processthreadsapi.h>
+#include <windows.h>
+#endif
+
+#if !defined(__cplusplus)
+#error SSE2NEON only supports C++ compilation with this compiler
+#endif
+
+#ifdef SSE2NEON_ALLOC_DEFINED
+#include <malloc.h>
+#endif
+
+#if (defined(_M_AMD64) || defined(__x86_64__)) || \
+ (defined(_M_ARM64) || defined(__arm64__))
+#define SSE2NEON_HAS_BITSCAN64
+#endif
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define _sse2neon_define0(type, s, body) \
+ __extension__({ \
+ type _a = (s); \
+ body \
+ })
+#define _sse2neon_define1(type, s, body) \
+ __extension__({ \
+ type _a = (s); \
+ body \
+ })
+#define _sse2neon_define2(type, a, b, body) \
+ __extension__({ \
+ type _a = (a), _b = (b); \
+ body \
+ })
+#define _sse2neon_return(ret) (ret)
+#else
+#define _sse2neon_define0(type, a, body) [=](type _a) { body }(a)
+#define _sse2neon_define1(type, a, body) [](type _a) { body }(a)
+#define _sse2neon_define2(type, a, b, body) \
+ [](type _a, type _b) { body }((a), (b))
+#define _sse2neon_return(ret) return ret
+#endif
+
+#define _sse2neon_init(...) \
+ { \
+ __VA_ARGS__ \
+ }
+
+/* Compiler barrier */
+#if defined(_MSC_VER)
+#define SSE2NEON_BARRIER() _ReadWriteBarrier()
+#else
+#define SSE2NEON_BARRIER() \
+ do { \
+ __asm__ __volatile__("" ::: "memory"); \
+ (void) 0; \
+ } while (0)
+#endif
+
+/* Memory barriers
+ * __atomic_thread_fence does not include a compiler barrier; instead,
+ * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
+ * semantics.
+ */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+#include <stdatomic.h>
+#endif
+
+FORCE_INLINE void _sse2neon_smp_mb(void)
+{
+ SSE2NEON_BARRIER();
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
+ !defined(__STDC_NO_ATOMICS__)
+ atomic_thread_fence(memory_order_seq_cst);
+#elif defined(__GNUC__) || defined(__clang__)
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+#else /* MSVC */
+ __dmb(_ARM64_BARRIER_ISH);
+#endif
+}
+
+/* Architecture-specific build options */
+/* FIXME: #pragma GCC push_options is only available on GCC */
+#if defined(__GNUC__)
+#if defined(__arm__) && __ARM_ARCH == 7
+/* According to ARM C Language Extensions Architecture specification,
+ * __ARM_NEON is defined to a value indicating the Advanced SIMD (NEON)
+ * architecture supported.
+ */
+#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
+#error "You must enable NEON instructions (e.g. -mfpu=neon) to use SSE2NEON."
+#endif
+#if !defined(__clang__)
+#pragma GCC push_options
+#pragma GCC target("fpu=neon")
+#endif
+#elif defined(__aarch64__) || defined(_M_ARM64)
+#if !defined(__clang__) && !defined(_MSC_VER)
+#pragma GCC push_options
+#pragma GCC target("+simd")
+#endif
+#elif __ARM_ARCH == 8
+#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
+#error \
+ "You must enable NEON instructions (e.g. -mfpu=neon-fp-armv8) to use SSE2NEON."
+#endif
+#if !defined(__clang__) && !defined(_MSC_VER)
+#pragma GCC push_options
+#endif
+#else
+#error "Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A."
+#endif
+#endif
+
+#include <arm_neon.h>
+#if (!defined(__aarch64__) && !defined(_M_ARM64)) && (__ARM_ARCH == 8)
+#if defined __has_include && __has_include(<arm_acle.h>)
+#include <arm_acle.h>
+#endif
+#endif
+
+/* Apple Silicon cache lines are double of what is commonly used by Intel, AMD
+ * and other Arm microarchitectures use.
+ * From sysctl -a on Apple M1:
+ * hw.cachelinesize: 128
+ */
+#if defined(__APPLE__) && (defined(__aarch64__) || defined(__arm64__))
+#define SSE2NEON_CACHELINE_SIZE 128
+#else
+#define SSE2NEON_CACHELINE_SIZE 64
+#endif
+
+/* Rounding functions require either Aarch64 instructions or libm fallback */
+#if !defined(__aarch64__) && !defined(_M_ARM64)
+#include <math.h>
+#endif
+
+/* On ARMv7, some registers, such as PMUSERENR and PMCCNTR, are read-only
+ * or even not accessible in user mode.
+ * To write or access to these registers in user mode,
+ * we have to perform syscall instead.
+ */
+#if (!defined(__aarch64__) && !defined(_M_ARM64))
+#include <sys/time.h>
+#endif
+
+/* "__has_builtin" can be used to query support for built-in functions
+ * provided by gcc/clang and other compilers that support it.
+ */
+#ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
+/* Compatibility with gcc <= 9 */
+#if defined(__GNUC__) && (__GNUC__ <= 9)
+#define __has_builtin(x) HAS##x
+#define HAS__builtin_popcount 1
+#define HAS__builtin_popcountll 1
+
+// __builtin_shuffle introduced in GCC 4.7.0
+#if (__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))
+#define HAS__builtin_shuffle 1
+#else
+#define HAS__builtin_shuffle 0
+#endif
+
+#define HAS__builtin_shufflevector 0
+#define HAS__builtin_nontemporal_store 0
+#else
+#define __has_builtin(x) 0
+#endif
+#endif
+
+/**
+ * MACRO for shuffle parameter for _mm_shuffle_ps().
+ * Argument fp3 is a digit[0123] that represents the fp from argument "b"
+ * of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
+ * for fp2 in result. fp1 is a digit[0123] that represents the fp from
+ * argument "a" of mm_shuffle_ps that will be places in fp1 of result.
+ * fp0 is the same for fp0 of result.
+ */
+#define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
+ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
+
+#if __has_builtin(__builtin_shufflevector)
+#define _sse2neon_shuffle(type, a, b, ...) \
+ __builtin_shufflevector(a, b, __VA_ARGS__)
+#elif __has_builtin(__builtin_shuffle)
+#define _sse2neon_shuffle(type, a, b, ...) \
+ __extension__({ \
+ type tmp = {__VA_ARGS__}; \
+ __builtin_shuffle(a, b, tmp); \
+ })
+#endif
+
+#ifdef _sse2neon_shuffle
+#define vshuffle_s16(a, b, ...) _sse2neon_shuffle(int16x4_t, a, b, __VA_ARGS__)
+#define vshuffleq_s16(a, b, ...) _sse2neon_shuffle(int16x8_t, a, b, __VA_ARGS__)
+#define vshuffle_s32(a, b, ...) _sse2neon_shuffle(int32x2_t, a, b, __VA_ARGS__)
+#define vshuffleq_s32(a, b, ...) _sse2neon_shuffle(int32x4_t, a, b, __VA_ARGS__)
+#define vshuffle_s64(a, b, ...) _sse2neon_shuffle(int64x1_t, a, b, __VA_ARGS__)
+#define vshuffleq_s64(a, b, ...) _sse2neon_shuffle(int64x2_t, a, b, __VA_ARGS__)
+#endif
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+#define _MM_FROUND_NO_EXC 0x08
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
+#define _MM_ROUND_NEAREST 0x0000
+#define _MM_ROUND_DOWN 0x2000
+#define _MM_ROUND_UP 0x4000
+#define _MM_ROUND_TOWARD_ZERO 0x6000
+/* Flush zero mode macros. */
+#define _MM_FLUSH_ZERO_MASK 0x8000
+#define _MM_FLUSH_ZERO_ON 0x8000
+#define _MM_FLUSH_ZERO_OFF 0x0000
+/* Denormals are zeros mode macros. */
+#define _MM_DENORMALS_ZERO_MASK 0x0040
+#define _MM_DENORMALS_ZERO_ON 0x0040
+#define _MM_DENORMALS_ZERO_OFF 0x0000
+
+/* indicate immediate constant argument in a given range */
+#define __constrange(a, b) const
+
+/* A few intrinsics accept traditional data types like ints or floats, but
+ * most operate on data types that are specific to SSE.
+ * If a vector type ends in d, it contains doubles, and if it does not have
+ * a suffix, it contains floats. An integer vector type can contain any type
+ * of integer, from chars to shorts to unsigned long longs.
+ */
+typedef int64x1_t __m64;
+typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
+// On ARM 32-bit architecture, the float64x2_t is not supported.
+// The data type __m128d should be represented in a different way for related
+// intrinsic conversion.
+#if defined(__aarch64__) || defined(_M_ARM64)
+typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
+#else
+typedef float32x4_t __m128d;
+#endif
+typedef int64x2_t __m128i; /* 128-bit vector containing integers */
+
+// __int64 is defined in the Intrinsics Guide which maps to different datatype
+// in different data model
+#if !(defined(_WIN32) || defined(_WIN64) || defined(__int64))
+#if (defined(__x86_64__) || defined(__i386__))
+#define __int64 long long
+#else
+#define __int64 int64_t
+#endif
+#endif
+
+/* type-safe casting between types */
+
+#define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
+#define vreinterpretq_m128_f32(x) (x)
+#define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
+
+#define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
+#define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
+#define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
+#define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
+
+#define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
+#define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
+#define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
+#define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
+
+#define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
+#define vreinterpretq_f32_m128(x) (x)
+#define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
+
+#define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
+#define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
+#define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
+#define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
+
+#define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
+#define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
+#define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
+#define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
+
+#define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
+#define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
+#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
+#define vreinterpretq_m128i_s64(x) (x)
+
+#define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
+#define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
+#define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
+#define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
+
+#define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x)
+#define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x)
+
+#define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
+#define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
+#define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
+#define vreinterpretq_s64_m128i(x) (x)
+
+#define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
+#define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
+#define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
+#define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
+
+#define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
+#define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x)
+#define vreinterpret_m64_s32(x) vreinterpret_s64_s32(x)
+#define vreinterpret_m64_s64(x) (x)
+
+#define vreinterpret_m64_u8(x) vreinterpret_s64_u8(x)
+#define vreinterpret_m64_u16(x) vreinterpret_s64_u16(x)
+#define vreinterpret_m64_u32(x) vreinterpret_s64_u32(x)
+#define vreinterpret_m64_u64(x) vreinterpret_s64_u64(x)
+
+#define vreinterpret_m64_f16(x) vreinterpret_s64_f16(x)
+#define vreinterpret_m64_f32(x) vreinterpret_s64_f32(x)
+#define vreinterpret_m64_f64(x) vreinterpret_s64_f64(x)
+
+#define vreinterpret_u8_m64(x) vreinterpret_u8_s64(x)
+#define vreinterpret_u16_m64(x) vreinterpret_u16_s64(x)
+#define vreinterpret_u32_m64(x) vreinterpret_u32_s64(x)
+#define vreinterpret_u64_m64(x) vreinterpret_u64_s64(x)
+
+#define vreinterpret_s8_m64(x) vreinterpret_s8_s64(x)
+#define vreinterpret_s16_m64(x) vreinterpret_s16_s64(x)
+#define vreinterpret_s32_m64(x) vreinterpret_s32_s64(x)
+#define vreinterpret_s64_m64(x) (x)
+
+#define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x)
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+#define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x)
+#define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x)
+
+#define vreinterpretq_m128d_u64(x) vreinterpretq_f64_u64(x)
+
+#define vreinterpretq_m128d_f32(x) vreinterpretq_f64_f32(x)
+#define vreinterpretq_m128d_f64(x) (x)
+
+#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x)
+
+#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f64(x)
+#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x)
+
+#define vreinterpretq_f64_m128d(x) (x)
+#define vreinterpretq_f32_m128d(x) vreinterpretq_f32_f64(x)
+#else
+#define vreinterpretq_m128d_s32(x) vreinterpretq_f32_s32(x)
+#define vreinterpretq_m128d_s64(x) vreinterpretq_f32_s64(x)
+
+#define vreinterpretq_m128d_u32(x) vreinterpretq_f32_u32(x)
+#define vreinterpretq_m128d_u64(x) vreinterpretq_f32_u64(x)
+
+#define vreinterpretq_m128d_f32(x) (x)
+
+#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f32(x)
+
+#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f32(x)
+#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f32(x)
+
+#define vreinterpretq_f32_m128d(x) (x)
+#endif
+
+// A struct is defined in this header file called 'SIMDVec' which can be used
+// by applications which attempt to access the contents of an __m128 struct
+// directly. It is important to note that accessing the __m128 struct directly
+// is bad coding practice by Microsoft: @see:
+// https://learn.microsoft.com/en-us/cpp/cpp/m128
+//
+// However, some legacy source code may try to access the contents of an __m128
+// struct directly so the developer can use the SIMDVec as an alias for it. Any
+// casting must be done manually by the developer, as you cannot cast or
+// otherwise alias the base NEON data type for intrinsic operations.
+//
+// union intended to allow direct access to an __m128 variable using the names
+// that the MSVC compiler provides. This union should really only be used when
+// trying to access the members of the vector as integer values. GCC/clang
+// allow native access to the float members through a simple array access
+// operator (in C since 4.6, in C++ since 4.8).
+//
+// Ideally direct accesses to SIMD vectors should not be used since it can cause
+// a performance hit. If it really is needed however, the original __m128
+// variable can be aliased with a pointer to this union and used to access
+// individual components. The use of this union should be hidden behind a macro
+// that is used throughout the codebase to access the members instead of always
+// declaring this type of variable.
+typedef union ALIGN_STRUCT(16) SIMDVec {
+ float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
+ int8_t m128_i8[16]; // as signed 8-bit integers.
+ int16_t m128_i16[8]; // as signed 16-bit integers.
+ int32_t m128_i32[4]; // as signed 32-bit integers.
+ int64_t m128_i64[2]; // as signed 64-bit integers.
+ uint8_t m128_u8[16]; // as unsigned 8-bit integers.
+ uint16_t m128_u16[8]; // as unsigned 16-bit integers.
+ uint32_t m128_u32[4]; // as unsigned 32-bit integers.
+ uint64_t m128_u64[2]; // as unsigned 64-bit integers.
+} SIMDVec;
+
+// casting using SIMDVec
+#define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *) &x)->m128_u64[n])
+#define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n])
+#define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n])
+
+/* SSE macros */
+#define _MM_GET_FLUSH_ZERO_MODE _sse2neon_mm_get_flush_zero_mode
+#define _MM_SET_FLUSH_ZERO_MODE _sse2neon_mm_set_flush_zero_mode
+#define _MM_GET_DENORMALS_ZERO_MODE _sse2neon_mm_get_denormals_zero_mode
+#define _MM_SET_DENORMALS_ZERO_MODE _sse2neon_mm_set_denormals_zero_mode
+
+// Function declaration
+// SSE
+FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(void);
+FORCE_INLINE __m128 _mm_move_ss(__m128, __m128);
+FORCE_INLINE __m128 _mm_or_ps(__m128, __m128);
+FORCE_INLINE __m128 _mm_set_ps1(float);
+FORCE_INLINE __m128 _mm_setzero_ps(void);
+// SSE2
+FORCE_INLINE __m128i _mm_and_si128(__m128i, __m128i);
+FORCE_INLINE __m128i _mm_castps_si128(__m128);
+FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i, __m128i);
+FORCE_INLINE __m128i _mm_cvtps_epi32(__m128);
+FORCE_INLINE __m128d _mm_move_sd(__m128d, __m128d);
+FORCE_INLINE __m128i _mm_or_si128(__m128i, __m128i);
+FORCE_INLINE __m128i _mm_set_epi32(int, int, int, int);
+FORCE_INLINE __m128i _mm_set_epi64x(int64_t, int64_t);
+FORCE_INLINE __m128d _mm_set_pd(double, double);
+FORCE_INLINE __m128i _mm_set1_epi32(int);
+FORCE_INLINE __m128i _mm_setzero_si128(void);
+// SSE4.1
+FORCE_INLINE __m128d _mm_ceil_pd(__m128d);
+FORCE_INLINE __m128 _mm_ceil_ps(__m128);
+FORCE_INLINE __m128d _mm_floor_pd(__m128d);
+FORCE_INLINE __m128 _mm_floor_ps(__m128);
+FORCE_INLINE __m128d _mm_round_pd(__m128d, int);
+FORCE_INLINE __m128 _mm_round_ps(__m128, int);
+// SSE4.2
+FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t, uint8_t);
+
+/* Backwards compatibility for compilers with lack of specific type support */
+
+// Older gcc does not define vld1q_u8_x4 type
+#if defined(__GNUC__) && !defined(__clang__) && \
+ ((__GNUC__ <= 13 && defined(__arm__)) || \
+ (__GNUC__ == 10 && __GNUC_MINOR__ < 3 && defined(__aarch64__)) || \
+ (__GNUC__ <= 9 && defined(__aarch64__)))
+FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
+{
+ uint8x16x4_t ret;
+ ret.val[0] = vld1q_u8(p + 0);
+ ret.val[1] = vld1q_u8(p + 16);
+ ret.val[2] = vld1q_u8(p + 32);
+ ret.val[3] = vld1q_u8(p + 48);
+ return ret;
+}
+#else
+// Wraps vld1q_u8_x4
+FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
+{
+ return vld1q_u8_x4(p);
+}
+#endif
+
+#if !defined(__aarch64__) && !defined(_M_ARM64)
+/* emulate vaddv u8 variant */
+FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
+{
+ const uint64x1_t v1 = vpaddl_u32(vpaddl_u16(vpaddl_u8(v8)));
+ return vget_lane_u8(vreinterpret_u8_u64(v1), 0);
+}
+#else
+// Wraps vaddv_u8
+FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
+{
+ return vaddv_u8(v8);
+}
+#endif
+
+#if !defined(__aarch64__) && !defined(_M_ARM64)
+/* emulate vaddvq u8 variant */
+FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
+{
+ uint8x8_t tmp = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
+ uint8_t res = 0;
+ for (int i = 0; i < 8; ++i)
+ res += tmp[i];
+ return res;
+}
+#else
+// Wraps vaddvq_u8
+FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
+{
+ return vaddvq_u8(a);
+}
+#endif
+
+#if !defined(__aarch64__) && !defined(_M_ARM64)
+/* emulate vaddvq u16 variant */
+FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
+{
+ uint32x4_t m = vpaddlq_u16(a);
+ uint64x2_t n = vpaddlq_u32(m);
+ uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
+
+ return vget_lane_u32((uint32x2_t) o, 0);
+}
+#else
+// Wraps vaddvq_u16
+FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
+{
+ return vaddvq_u16(a);
+}
+#endif
+
+/* Function Naming Conventions
+ * The naming convention of SSE intrinsics is straightforward. A generic SSE
+ * intrinsic function is given as follows:
+ * _mm_<name>_<data_type>
+ *
+ * The parts of this format are given as follows:
+ * 1. <name> describes the operation performed by the intrinsic
+ * 2. <data_type> identifies the data type of the function's primary arguments
+ *
+ * This last part, <data_type>, is a little complicated. It identifies the
+ * content of the input values, and can be set to any of the following values:
+ * + ps - vectors contain floats (ps stands for packed single-precision)
+ * + pd - vectors contain doubles (pd stands for packed double-precision)
+ * + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
+ * signed integers
+ * + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
+ * unsigned integers
+ * + si128 - unspecified 128-bit vector or 256-bit vector
+ * + m128/m128i/m128d - identifies input vector types when they are different
+ * than the type of the returned vector
+ *
+ * For example, _mm_setzero_ps. The _mm implies that the function returns
+ * a 128-bit vector. The _ps at the end implies that the argument vectors
+ * contain floats.
+ *
+ * A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
+ * // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
+ * __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
+ * // Set packed 8-bit integers
+ * // 128 bits, 16 chars, per 8 bits
+ * __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
+ * 4, 5, 12, 13, 6, 7, 14, 15);
+ * // Shuffle packed 8-bit integers
+ * __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
+ */
+
+/* Constants for use with _mm_prefetch. */
+enum _mm_hint {
+ _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
+ _MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
+ _MM_HINT_T1 = 2, /* load data to L2 cache only */
+ _MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
+};
+
+// The bit field mapping to the FPCR(floating-point control register)
+typedef struct {
+ uint16_t res0;
+ uint8_t res1 : 6;
+ uint8_t bit22 : 1;
+ uint8_t bit23 : 1;
+ uint8_t bit24 : 1;
+ uint8_t res2 : 7;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint32_t res3;
+#endif
+} fpcr_bitfield;
+
+// Takes the upper 64 bits of a and places it in the low end of the result
+// Takes the lower 64 bits of b and places it into the high end of the result.
+FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
+{
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
+}
+
+// takes the lower two 32-bit values from a and swaps them and places in high
+// end of result takes the higher two 32 bit values from b and swaps them and
+// places in low end of result.
+FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
+{
+ float32x2_t a21 = vget_high_f32(
+ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
+ float32x2_t b03 = vget_low_f32(
+ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
+ return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
+{
+ float32x2_t a03 = vget_low_f32(
+ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
+ float32x2_t b21 = vget_high_f32(
+ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
+ return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
+}
+
+// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
+// high
+FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
+{
+ float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
+{
+ float32x2_t a22 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
+{
+ float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
+ float32x2_t b22 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
+ return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
+{
+ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ float32x2_t a22 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
+ float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
+{
+ float32x2_t a33 =
+ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
+ float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
+{
+ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+ float32_t b2 = vgetq_lane_f32(b, 2);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
+}
+
+FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
+{
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32_t b2 = vgetq_lane_f32(b, 2);
+ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+ return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
+}
+
+// For MSVC, we check only if it is ARM64, as every single ARM64 processor
+// supported by WoA has crypto extensions. If this changes in the future,
+// this can be verified via the runtime-only method of:
+// IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)
+#if (defined(_M_ARM64) && !defined(__clang__)) || \
+ (defined(__ARM_FEATURE_CRYPTO) && \
+ (defined(__aarch64__) || __has_builtin(__builtin_arm_crypto_vmullp64)))
+// Wraps vmull_p64
+FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
+{
+ poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
+ poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
+#if defined(_MSC_VER)
+ __n64 a1 = {a}, b1 = {b};
+ return vreinterpretq_u64_p128(vmull_p64(a1, b1));
+#else
+ return vreinterpretq_u64_p128(vmull_p64(a, b));
+#endif
+}
+#else // ARMv7 polyfill
+// ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
+//
+// vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
+// 64-bit->128-bit polynomial multiply.
+//
+// It needs some work and is somewhat slow, but it is still faster than all
+// known scalar methods.
+//
+// Algorithm adapted to C from
+// https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
+// from "Fast Software Polynomial Multiplication on ARM Processors Using the
+// NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
+// (https://hal.inria.fr/hal-01506572)
+static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
+{
+ poly8x8_t a = vreinterpret_p8_u64(_a);
+ poly8x8_t b = vreinterpret_p8_u64(_b);
+
+ // Masks
+ uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
+ vcreate_u8(0x00000000ffffffff));
+ uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
+ vcreate_u8(0x0000000000000000));
+
+ // Do the multiplies, rotating with vext to get all combinations
+ uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
+ uint8x16_t e =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
+ uint8x16_t f =
+ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
+ uint8x16_t g =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
+ uint8x16_t h =
+ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
+ uint8x16_t i =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
+ uint8x16_t j =
+ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
+ uint8x16_t k =
+ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
+
+ // Add cross products
+ uint8x16_t l = veorq_u8(e, f); // L = E + F
+ uint8x16_t m = veorq_u8(g, h); // M = G + H
+ uint8x16_t n = veorq_u8(i, j); // N = I + J
+
+ // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
+ // instructions.
+#if defined(__aarch64__)
+ uint8x16_t lm_p0 = vreinterpretq_u8_u64(
+ vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
+ uint8x16_t lm_p1 = vreinterpretq_u8_u64(
+ vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
+ uint8x16_t nk_p0 = vreinterpretq_u8_u64(
+ vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
+ uint8x16_t nk_p1 = vreinterpretq_u8_u64(
+ vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
+#else
+ uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
+ uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
+ uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
+ uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
+#endif
+ // t0 = (L) (P0 + P1) << 8
+ // t1 = (M) (P2 + P3) << 16
+ uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
+ uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
+ uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
+
+ // t2 = (N) (P4 + P5) << 24
+ // t3 = (K) (P6 + P7) << 32
+ uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
+ uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
+ uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
+
+ // De-interleave
+#if defined(__aarch64__)
+ uint8x16_t t0 = vreinterpretq_u8_u64(
+ vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
+ uint8x16_t t1 = vreinterpretq_u8_u64(
+ vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
+ uint8x16_t t2 = vreinterpretq_u8_u64(
+ vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
+ uint8x16_t t3 = vreinterpretq_u8_u64(
+ vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
+#else
+ uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
+ uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
+ uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
+ uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
+#endif
+ // Shift the cross products
+ uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
+ uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
+ uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
+ uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
+
+ // Accumulate the products
+ uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
+ uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
+ uint8x16_t mix = veorq_u8(d, cross1);
+ uint8x16_t r = veorq_u8(mix, cross2);
+ return vreinterpretq_u64_u8(r);
+}
+#endif // ARMv7 polyfill
+
+// C equivalent:
+// __m128i _mm_shuffle_epi32_default(__m128i a,
+// __constrange(0, 255) int imm) {
+// __m128i ret;
+// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
+// ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03];
+// return ret;
+// }
+#define _mm_shuffle_epi32_default(a, imm) \
+ vreinterpretq_m128i_s32(vsetq_lane_s32( \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \
+ vsetq_lane_s32( \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \
+ vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), \
+ ((imm) >> 2) & 0x3), \
+ vmovq_n_s32(vgetq_lane_s32( \
+ vreinterpretq_s32_m128i(a), (imm) & (0x3))), \
+ 1), \
+ 2), \
+ 3))
+
+// Takes the upper 64 bits of a and places it in the low end of the result
+// Takes the lower 64 bits of a and places it into the high end of the result.
+FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
+{
+ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
+}
+
+// takes the lower two 32-bit values from a and swaps them and places in low end
+// of result takes the higher two 32 bit values from a and swaps them and places
+// in high end of result.
+FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
+{
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
+ return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
+}
+
+// rotates the least significant 32 bits into the most significant 32 bits, and
+// shifts the rest down
+FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
+{
+ return vreinterpretq_m128i_s32(
+ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
+}
+
+// rotates the most significant 32 bits into the least significant 32 bits, and
+// shifts the rest up
+FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
+{
+ return vreinterpretq_m128i_s32(
+ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
+}
+
+// gets the lower 64 bits of a, and places it in the upper 64 bits
+// gets the lower 64 bits of a and places it in the lower 64 bits
+FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
+{
+ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
+}
+
+// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the
+// lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits
+FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
+{
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
+}
+
+// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the
+// upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and
+// places it in the lower 64 bits
+FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
+{
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
+}
+
+FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
+{
+ int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
+ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
+ return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
+}
+
+FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
+{
+ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
+ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+ return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
+}
+
+FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
+{
+ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
+ return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
+}
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+#define _mm_shuffle_epi32_splat(a, imm) \
+ vreinterpretq_m128i_s32(vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm)))
+#else
+#define _mm_shuffle_epi32_splat(a, imm) \
+ vreinterpretq_m128i_s32( \
+ vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))))
+#endif
+
+// NEON does not support a general purpose permute intrinsic.
+// Shuffle single-precision (32-bit) floating-point elements in a using the
+// control in imm8, and store the results in dst.
+//
+// C equivalent:
+// __m128 _mm_shuffle_ps_default(__m128 a, __m128 b,
+// __constrange(0, 255) int imm) {
+// __m128 ret;
+// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
+// ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03];
+// return ret;
+// }
+//
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_ps
+#define _mm_shuffle_ps_default(a, b, imm) \
+ vreinterpretq_m128_f32(vsetq_lane_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \
+ vsetq_lane_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \
+ vsetq_lane_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \
+ vmovq_n_f32( \
+ vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))), \
+ 1), \
+ 2), \
+ 3))
+
+// Shuffle 16-bit integers in the low 64 bits of a using the control in imm8.
+// Store the results in the low 64 bits of dst, with the high 64 bits being
+// copied from a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflelo_epi16
+#define _mm_shufflelo_epi16_function(a, imm) \
+ _sse2neon_define1( \
+ __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \
+ int16x4_t lowBits = vget_low_s16(ret); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \
+ 1); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \
+ 2); \
+ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \
+ 3); \
+ _sse2neon_return(vreinterpretq_m128i_s16(ret));)
+
+// Shuffle 16-bit integers in the high 64 bits of a using the control in imm8.
+// Store the results in the high 64 bits of dst, with the low 64 bits being
+// copied from a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflehi_epi16
+#define _mm_shufflehi_epi16_function(a, imm) \
+ _sse2neon_define1( \
+ __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \
+ int16x4_t highBits = vget_high_s16(ret); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \
+ 5); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \
+ 6); \
+ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \
+ 7); \
+ _sse2neon_return(vreinterpretq_m128i_s16(ret));)
+
+/* MMX */
+
+//_mm_empty is a no-op on arm
+FORCE_INLINE void _mm_empty(void) {}
+
+/* SSE */
+
+// Add packed single-precision (32-bit) floating-point elements in a and b, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ps
+FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Add the lower single-precision (32-bit) floating-point element in a and b,
+// store the result in the lower element of dst, and copy the upper 3 packed
+// elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ss
+FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
+{
+ float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
+ float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
+ // the upper values in the result must be the remnants of <a>.
+ return vreinterpretq_m128_f32(vaddq_f32(a, value));
+}
+
+// Compute the bitwise AND of packed single-precision (32-bit) floating-point
+// elements in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_ps
+FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+}
+
+// Compute the bitwise NOT of packed single-precision (32-bit) floating-point
+// elements in a and then AND with b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_ps
+FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ vbicq_s32(vreinterpretq_s32_m128(b),
+ vreinterpretq_s32_m128(a))); // *NOTE* argument swap
+}
+
+// Average packed unsigned 16-bit integers in a and b, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu16
+FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u16(
+ vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)));
+}
+
+// Average packed unsigned 8-bit integers in a and b, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu8
+FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u8(
+ vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for equality, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ps
+FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for equality, store the result in the lower element of dst, and copy the
+// upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ss
+FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpeq_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for greater-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ps
+FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for greater-than-or-equal, store the result in the lower element of dst,
+// and copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ss
+FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpge_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for greater-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ps
+FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for greater-than, store the result in the lower element of dst, and copy
+// the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ss
+FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpgt_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for less-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ps
+FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for less-than-or-equal, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ss
+FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmple_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for less-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ps
+FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(
+ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for less-than, store the result in the lower element of dst, and copy the
+// upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ss
+FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmplt_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for not-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ps
+FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(vmvnq_u32(
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for not-equal, store the result in the lower element of dst, and copy the
+// upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ss
+FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpneq_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for not-greater-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ps
+FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(vmvnq_u32(
+ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for not-greater-than-or-equal, store the result in the lower element of
+// dst, and copy the upper 3 packed elements from a to the upper elements of
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ss
+FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpnge_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for not-greater-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ps
+FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(vmvnq_u32(
+ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for not-greater-than, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ss
+FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpngt_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for not-less-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ps
+FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(vmvnq_u32(
+ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for not-less-than-or-equal, store the result in the lower element of dst,
+// and copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ss
+FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpnle_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// for not-less-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ps
+FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_u32(vmvnq_u32(
+ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b for not-less-than, store the result in the lower element of dst, and copy
+// the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ss
+FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpnlt_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// to see if neither is NaN, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ps
+//
+// See also:
+// http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
+// http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
+FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b)
+{
+ // Note: NEON does not have ordered compare builtin
+ // Need to compare a eq a and b eq b to check for NaN
+ // Do AND of results to get final
+ uint32x4_t ceqaa =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t ceqbb =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b to see if neither is NaN, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ss
+FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpord_ps(a, b));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b
+// to see if either is NaN, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ps
+FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b)
+{
+ uint32x4_t f32a =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+ uint32x4_t f32b =
+ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b to see if either is NaN, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ss
+FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_cmpunord_ps(a, b));
+}
+
+// Compare the lower single-precision (32-bit) floating-point element in a and b
+// for equality, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_ss
+FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
+{
+ uint32x4_t a_eq_b =
+ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return vgetq_lane_u32(a_eq_b, 0) & 0x1;
+}
+
+// Compare the lower single-precision (32-bit) floating-point element in a and b
+// for greater-than-or-equal, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_ss
+FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
+{
+ uint32x4_t a_ge_b =
+ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return vgetq_lane_u32(a_ge_b, 0) & 0x1;
+}
+
+// Compare the lower single-precision (32-bit) floating-point element in a and b
+// for greater-than, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_ss
+FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
+{
+ uint32x4_t a_gt_b =
+ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return vgetq_lane_u32(a_gt_b, 0) & 0x1;
+}
+
+// Compare the lower single-precision (32-bit) floating-point element in a and b
+// for less-than-or-equal, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_ss
+FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
+{
+ uint32x4_t a_le_b =
+ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return vgetq_lane_u32(a_le_b, 0) & 0x1;
+}
+
+// Compare the lower single-precision (32-bit) floating-point element in a and b
+// for less-than, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_ss
+FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
+{
+ uint32x4_t a_lt_b =
+ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+ return vgetq_lane_u32(a_lt_b, 0) & 0x1;
+}
+
+// Compare the lower single-precision (32-bit) floating-point element in a and b
+// for not-equal, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_ss
+FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
+{
+ return !_mm_comieq_ss(a, b);
+}
+
+// Convert packed signed 32-bit integers in b to packed single-precision
+// (32-bit) floating-point elements, store the results in the lower 2 elements
+// of dst, and copy the upper 2 packed elements from a to the upper elements of
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_pi2ps
+FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
+ vget_high_f32(vreinterpretq_f32_m128(a))));
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ps2pi
+FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a)
+{
+#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+ return vreinterpret_m64_s32(
+ vget_low_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a)))));
+#else
+ return vreinterpret_m64_s32(vcvt_s32_f32(vget_low_f32(
+ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)))));
+#endif
+}
+
+// Convert the signed 32-bit integer b to a single-precision (32-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_si2ss
+FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b)
+{
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
+}
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ss2si
+FORCE_INLINE int _mm_cvt_ss2si(__m128 a)
+{
+#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+ return vgetq_lane_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))),
+ 0);
+#else
+ float32_t data = vgetq_lane_f32(
+ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
+ return (int32_t) data;
+#endif
+}
+
+// Convert packed 16-bit integers in a to packed single-precision (32-bit)
+// floating-point elements, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi16_ps
+FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(
+ vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a))));
+}
+
+// Convert packed 32-bit integers in b to packed single-precision (32-bit)
+// floating-point elements, store the results in the lower 2 elements of dst,
+// and copy the upper 2 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_ps
+FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
+ vget_high_f32(vreinterpretq_f32_m128(a))));
+}
+
+// Convert packed signed 32-bit integers in a to packed single-precision
+// (32-bit) floating-point elements, store the results in the lower 2 elements
+// of dst, then convert the packed signed 32-bit integers in b to
+// single-precision (32-bit) floating-point element, and store the results in
+// the upper 2 elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32x2_ps
+FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_s32(
+ vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))));
+}
+
+// Convert the lower packed 8-bit integers in a to packed single-precision
+// (32-bit) floating-point elements, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi8_ps
+FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_s32(
+ vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a))))));
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 16-bit integers, and store the results in dst. Note: this intrinsic
+// will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and
+// 0x7FFFFFFF.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi16
+FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a)
+{
+ return vreinterpret_m64_s16(
+ vqmovn_s32(vreinterpretq_s32_m128i(_mm_cvtps_epi32(a))));
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi32
+#define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a)
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 8-bit integers, and store the results in lower 4 elements of dst.
+// Note: this intrinsic will generate 0x7F, rather than 0x80, for input values
+// between 0x7F and 0x7FFFFFFF.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi8
+FORCE_INLINE __m64 _mm_cvtps_pi8(__m128 a)
+{
+ return vreinterpret_m64_s8(vqmovn_s16(
+ vcombine_s16(vreinterpret_s16_m64(_mm_cvtps_pi16(a)), vdup_n_s16(0))));
+}
+
+// Convert packed unsigned 16-bit integers in a to packed single-precision
+// (32-bit) floating-point elements, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu16_ps
+FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(
+ vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a))));
+}
+
+// Convert the lower packed unsigned 8-bit integers in a to packed
+// single-precision (32-bit) floating-point elements, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu8_ps
+FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_u32(
+ vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a))))));
+}
+
+// Convert the signed 32-bit integer b to a single-precision (32-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_ss
+#define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b)
+
+// Convert the signed 64-bit integer b to a single-precision (32-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_ss
+FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b)
+{
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
+}
+
+// Copy the lower single-precision (32-bit) floating-point element of a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_f32
+FORCE_INLINE float _mm_cvtss_f32(__m128 a)
+{
+ return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+}
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si32
+#define _mm_cvtss_si32(a) _mm_cvt_ss2si(a)
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 64-bit integer, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si64
+FORCE_INLINE int64_t _mm_cvtss_si64(__m128 a)
+{
+#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+ return (int64_t) vgetq_lane_f32(vrndiq_f32(vreinterpretq_f32_m128(a)), 0);
+#else
+ float32_t data = vgetq_lane_f32(
+ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
+ return (int64_t) data;
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers with truncation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ps2pi
+FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a)
+{
+ return vreinterpret_m64_s32(
+ vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))));
+}
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer with truncation, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ss2si
+FORCE_INLINE int _mm_cvtt_ss2si(__m128 a)
+{
+ return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0);
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers with truncation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_pi32
+#define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a)
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 32-bit integer with truncation, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si32
+#define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a)
+
+// Convert the lower single-precision (32-bit) floating-point element in a to a
+// 64-bit integer with truncation, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si64
+FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a)
+{
+ return (int64_t) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+}
+
+// Divide packed single-precision (32-bit) floating-point elements in a by
+// packed elements in b, and store the results in dst.
+// Due to ARMv7-A NEON's lack of a precise division intrinsic, we implement
+// division by multiplying a by b's reciprocal before using the Newton-Raphson
+// method to approximate the results.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ps
+FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(
+ vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b));
+ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
+ // Additional Netwon-Raphson iteration for accuracy
+ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
+ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip));
+#endif
+}
+
+// Divide the lower single-precision (32-bit) floating-point element in a by the
+// lower single-precision (32-bit) floating-point element in b, store the result
+// in the lower element of dst, and copy the upper 3 packed elements from a to
+// the upper elements of dst.
+// Warning: ARMv7-A does not produce the same result compared to Intel and not
+// IEEE-compliant.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ss
+FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
+{
+ float32_t value =
+ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+}
+
+// Extract a 16-bit integer from a, selected with imm8, and store the result in
+// the lower element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_pi16
+#define _mm_extract_pi16(a, imm) \
+ (int32_t) vget_lane_u16(vreinterpret_u16_m64(a), (imm))
+
+// Free aligned memory that was allocated with _mm_malloc.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_free
+#if !defined(SSE2NEON_ALLOC_DEFINED)
+FORCE_INLINE void _mm_free(void *addr)
+{
+ free(addr);
+}
+#endif
+
+FORCE_INLINE uint64_t _sse2neon_get_fpcr(void)
+{
+ uint64_t value;
+#if defined(_MSC_VER)
+ value = _ReadStatusReg(ARM64_FPCR);
+#else
+ __asm__ __volatile__("mrs %0, FPCR" : "=r"(value)); /* read */
+#endif
+ return value;
+}
+
+FORCE_INLINE void _sse2neon_set_fpcr(uint64_t value)
+{
+#if defined(_MSC_VER)
+ _WriteStatusReg(ARM64_FPCR, value);
+#else
+ __asm__ __volatile__("msr FPCR, %0" ::"r"(value)); /* write */
+#endif
+}
+
+// Macro: Get the flush zero bits from the MXCSR control and status register.
+// The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or
+// _MM_FLUSH_ZERO_OFF
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_FLUSH_ZERO_MODE
+FORCE_INLINE unsigned int _sse2neon_mm_get_flush_zero_mode(void)
+{
+ union {
+ fpcr_bitfield field;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint64_t value;
+#else
+ uint32_t value;
+#endif
+ } r;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ r.value = _sse2neon_get_fpcr();
+#else
+ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+#endif
+
+ return r.field.bit24 ? _MM_FLUSH_ZERO_ON : _MM_FLUSH_ZERO_OFF;
+}
+
+// Macro: Get the rounding mode bits from the MXCSR control and status register.
+// The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST,
+// _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_ROUNDING_MODE
+FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(void)
+{
+ union {
+ fpcr_bitfield field;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint64_t value;
+#else
+ uint32_t value;
+#endif
+ } r;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ r.value = _sse2neon_get_fpcr();
+#else
+ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+#endif
+
+ if (r.field.bit22) {
+ return r.field.bit23 ? _MM_ROUND_TOWARD_ZERO : _MM_ROUND_UP;
+ } else {
+ return r.field.bit23 ? _MM_ROUND_DOWN : _MM_ROUND_NEAREST;
+ }
+}
+
+// Copy a to dst, and insert the 16-bit integer i into dst at the location
+// specified by imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_pi16
+#define _mm_insert_pi16(a, b, imm) \
+ vreinterpret_m64_s16(vset_lane_s16((b), vreinterpret_s16_m64(a), (imm)))
+
+// Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+// elements) from memory into dst. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps
+FORCE_INLINE __m128 _mm_load_ps(const float *p)
+{
+ return vreinterpretq_m128_f32(vld1q_f32(p));
+}
+
+// Load a single-precision (32-bit) floating-point element from memory into all
+// elements of dst.
+//
+// dst[31:0] := MEM[mem_addr+31:mem_addr]
+// dst[63:32] := MEM[mem_addr+31:mem_addr]
+// dst[95:64] := MEM[mem_addr+31:mem_addr]
+// dst[127:96] := MEM[mem_addr+31:mem_addr]
+//
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps1
+#define _mm_load_ps1 _mm_load1_ps
+
+// Load a single-precision (32-bit) floating-point element from memory into the
+// lower of dst, and zero the upper 3 elements. mem_addr does not need to be
+// aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ss
+FORCE_INLINE __m128 _mm_load_ss(const float *p)
+{
+ return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
+}
+
+// Load a single-precision (32-bit) floating-point element from memory into all
+// elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_ps
+FORCE_INLINE __m128 _mm_load1_ps(const float *p)
+{
+ return vreinterpretq_m128_f32(vld1q_dup_f32(p));
+}
+
+// Load 2 single-precision (32-bit) floating-point elements from memory into the
+// upper 2 elements of dst, and copy the lower 2 elements from a to dst.
+// mem_addr does not need to be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pi
+FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p)));
+}
+
+// Load 2 single-precision (32-bit) floating-point elements from memory into the
+// lower 2 elements of dst, and copy the upper 2 elements from a to dst.
+// mem_addr does not need to be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pi
+FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
+{
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a)));
+}
+
+// Load 4 single-precision (32-bit) floating-point elements from memory into dst
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_ps
+FORCE_INLINE __m128 _mm_loadr_ps(const float *p)
+{
+ float32x4_t v = vrev64q_f32(vld1q_f32(p));
+ return vreinterpretq_m128_f32(vextq_f32(v, v, 2));
+}
+
+// Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+// elements) from memory into dst. mem_addr does not need to be aligned on any
+// particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_ps
+FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
+{
+ // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are
+ // equivalent for neon
+ return vreinterpretq_m128_f32(vld1q_f32(p));
+}
+
+// Load unaligned 16-bit integer from memory into the first element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si16
+FORCE_INLINE __m128i _mm_loadu_si16(const void *p)
+{
+ return vreinterpretq_m128i_s16(
+ vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0));
+}
+
+// Load unaligned 64-bit integer from memory into the first element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si64
+FORCE_INLINE __m128i _mm_loadu_si64(const void *p)
+{
+ return vreinterpretq_m128i_s64(
+ vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0)));
+}
+
+// Allocate size bytes of memory, aligned to the alignment specified in align,
+// and return a pointer to the allocated memory. _mm_free should be used to free
+// memory that is allocated with _mm_malloc.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_malloc
+#if !defined(SSE2NEON_ALLOC_DEFINED)
+FORCE_INLINE void *_mm_malloc(size_t size, size_t align)
+{
+ void *ptr;
+ if (align == 1)
+ return malloc(size);
+ if (align == 2 || (sizeof(void *) == 8 && align == 4))
+ align = sizeof(void *);
+ if (!posix_memalign(&ptr, align, size))
+ return ptr;
+ return NULL;
+}
+#endif
+
+// Conditionally store 8-bit integer elements from a into memory using mask
+// (elements are not stored when the highest bit is not set in the corresponding
+// element) and a non-temporal memory hint.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmove_si64
+FORCE_INLINE void _mm_maskmove_si64(__m64 a, __m64 mask, char *mem_addr)
+{
+ int8x8_t shr_mask = vshr_n_s8(vreinterpret_s8_m64(mask), 7);
+ __m128 b = _mm_load_ps((const float *) mem_addr);
+ int8x8_t masked =
+ vbsl_s8(vreinterpret_u8_s8(shr_mask), vreinterpret_s8_m64(a),
+ vreinterpret_s8_u64(vget_low_u64(vreinterpretq_u64_m128(b))));
+ vst1_s8((int8_t *) mem_addr, masked);
+}
+
+// Conditionally store 8-bit integer elements from a into memory using mask
+// (elements are not stored when the highest bit is not set in the corresponding
+// element) and a non-temporal memory hint.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_maskmovq
+#define _m_maskmovq(a, mask, mem_addr) _mm_maskmove_si64(a, mask, mem_addr)
+
+// Compare packed signed 16-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pi16
+FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s16(
+ vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b,
+// and store packed maximum values in dst. dst does not follow the IEEE Standard
+// for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or
+// signed-zero values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ps
+FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
+{
+#if SSE2NEON_PRECISE_MINMAX
+ float32x4_t _a = vreinterpretq_f32_m128(a);
+ float32x4_t _b = vreinterpretq_f32_m128(b);
+ return vreinterpretq_m128_f32(vbslq_f32(vcgtq_f32(_a, _b), _a, _b));
+#else
+ return vreinterpretq_m128_f32(
+ vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#endif
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pu8
+FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u8(
+ vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b, store the maximum value in the lower element of dst, and copy the upper 3
+// packed elements from a to the upper element of dst. dst does not follow the
+// IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when
+// inputs are NaN or signed-zero values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ss
+FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
+{
+ float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+}
+
+// Compare packed signed 16-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pi16
+FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s16(
+ vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+}
+
+// Compare packed single-precision (32-bit) floating-point elements in a and b,
+// and store packed minimum values in dst. dst does not follow the IEEE Standard
+// for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or
+// signed-zero values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ps
+FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
+{
+#if SSE2NEON_PRECISE_MINMAX
+ float32x4_t _a = vreinterpretq_f32_m128(a);
+ float32x4_t _b = vreinterpretq_f32_m128(b);
+ return vreinterpretq_m128_f32(vbslq_f32(vcltq_f32(_a, _b), _a, _b));
+#else
+ return vreinterpretq_m128_f32(
+ vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#endif
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pu8
+FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u8(
+ vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+}
+
+// Compare the lower single-precision (32-bit) floating-point elements in a and
+// b, store the minimum value in the lower element of dst, and copy the upper 3
+// packed elements from a to the upper element of dst. dst does not follow the
+// IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when
+// inputs are NaN or signed-zero values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ss
+FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
+{
+ float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+}
+
+// Move the lower single-precision (32-bit) floating-point element from b to the
+// lower element of dst, and copy the upper 3 packed elements from a to the
+// upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_ss
+FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0),
+ vreinterpretq_f32_m128(a), 0));
+}
+
+// Move the upper 2 single-precision (32-bit) floating-point elements from b to
+// the lower 2 elements of dst, and copy the upper 2 elements from a to the
+// upper 2 elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehl_ps
+FORCE_INLINE __m128 _mm_movehl_ps(__m128 a, __m128 b)
+{
+#if defined(aarch64__)
+ return vreinterpretq_m128_u64(
+ vzip2q_u64(vreinterpretq_u64_m128(b), vreinterpretq_u64_m128(a)));
+#else
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(vcombine_f32(b32, a32));
+#endif
+}
+
+// Move the lower 2 single-precision (32-bit) floating-point elements from b to
+// the upper 2 elements of dst, and copy the lower 2 elements from a to the
+// lower 2 elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movelh_ps
+FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
+{
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(__A));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(__B));
+ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
+}
+
+// Create mask from the most significant bit of each 8-bit element in a, and
+// store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pi8
+FORCE_INLINE int _mm_movemask_pi8(__m64 a)
+{
+ uint8x8_t input = vreinterpret_u8_m64(a);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ static const int8_t shift[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+ uint8x8_t tmp = vshr_n_u8(input, 7);
+ return vaddv_u8(vshl_u8(tmp, vld1_s8(shift)));
+#else
+ // Refer the implementation of `_mm_movemask_epi8`
+ uint16x4_t high_bits = vreinterpret_u16_u8(vshr_n_u8(input, 7));
+ uint32x2_t paired16 =
+ vreinterpret_u32_u16(vsra_n_u16(high_bits, high_bits, 7));
+ uint8x8_t paired32 =
+ vreinterpret_u8_u32(vsra_n_u32(paired16, paired16, 14));
+ return vget_lane_u8(paired32, 0) | ((int) vget_lane_u8(paired32, 4) << 4);
+#endif
+}
+
+// Set each bit of mask dst based on the most significant bit of the
+// corresponding packed single-precision (32-bit) floating-point element in a.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_ps
+FORCE_INLINE int _mm_movemask_ps(__m128 a)
+{
+ uint32x4_t input = vreinterpretq_u32_m128(a);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ static const int32_t shift[4] = {0, 1, 2, 3};
+ uint32x4_t tmp = vshrq_n_u32(input, 31);
+ return vaddvq_u32(vshlq_u32(tmp, vld1q_s32(shift)));
+#else
+ // Uses the exact same method as _mm_movemask_epi8, see that for details.
+ // Shift out everything but the sign bits with a 32-bit unsigned shift
+ // right.
+ uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31));
+ // Merge the two pairs together with a 64-bit unsigned shift right + add.
+ uint8x16_t paired =
+ vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
+ // Extract the result.
+ return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
+#endif
+}
+
+// Multiply packed single-precision (32-bit) floating-point elements in a and b,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ps
+FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Multiply the lower single-precision (32-bit) floating-point element in a and
+// b, store the result in the lower element of dst, and copy the upper 3 packed
+// elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ss
+FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_mul_ps(a, b));
+}
+
+// Multiply the packed unsigned 16-bit integers in a and b, producing
+// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+// integers in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_pu16
+FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u16(vshrn_n_u32(
+ vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16));
+}
+
+// Compute the bitwise OR of packed single-precision (32-bit) floating-point
+// elements in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_ps
+FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+}
+
+// Average packed unsigned 8-bit integers in a and b, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgb
+#define _m_pavgb(a, b) _mm_avg_pu8(a, b)
+
+// Average packed unsigned 16-bit integers in a and b, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgw
+#define _m_pavgw(a, b) _mm_avg_pu16(a, b)
+
+// Extract a 16-bit integer from a, selected with imm8, and store the result in
+// the lower element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pextrw
+#define _m_pextrw(a, imm) _mm_extract_pi16(a, imm)
+
+// Copy a to dst, and insert the 16-bit integer i into dst at the location
+// specified by imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_pinsrw
+#define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm)
+
+// Compare packed signed 16-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxsw
+#define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxub
+#define _m_pmaxub(a, b) _mm_max_pu8(a, b)
+
+// Compare packed signed 16-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminsw
+#define _m_pminsw(a, b) _mm_min_pi16(a, b)
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminub
+#define _m_pminub(a, b) _mm_min_pu8(a, b)
+
+// Create mask from the most significant bit of each 8-bit element in a, and
+// store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmovmskb
+#define _m_pmovmskb(a) _mm_movemask_pi8(a)
+
+// Multiply the packed unsigned 16-bit integers in a and b, producing
+// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+// integers in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmulhuw
+#define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b)
+
+// Fetch the line of data from memory that contains address p to a location in
+// the cache hierarchy specified by the locality hint i.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_prefetch
+FORCE_INLINE void _mm_prefetch(char const *p, int i)
+{
+ (void) i;
+#if defined(_MSC_VER)
+ switch (i) {
+ case _MM_HINT_NTA:
+ __prefetch2(p, 1);
+ break;
+ case _MM_HINT_T0:
+ __prefetch2(p, 0);
+ break;
+ case _MM_HINT_T1:
+ __prefetch2(p, 2);
+ break;
+ case _MM_HINT_T2:
+ __prefetch2(p, 4);
+ break;
+ }
+#else
+ switch (i) {
+ case _MM_HINT_NTA:
+ __builtin_prefetch(p, 0, 0);
+ break;
+ case _MM_HINT_T0:
+ __builtin_prefetch(p, 0, 3);
+ break;
+ case _MM_HINT_T1:
+ __builtin_prefetch(p, 0, 2);
+ break;
+ case _MM_HINT_T2:
+ __builtin_prefetch(p, 0, 1);
+ break;
+ }
+#endif
+}
+
+// Compute the absolute differences of packed unsigned 8-bit integers in a and
+// b, then horizontally sum each consecutive 8 differences to produce four
+// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+// 16 bits of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_psadbw
+#define _m_psadbw(a, b) _mm_sad_pu8(a, b)
+
+// Shuffle 16-bit integers in a using the control in imm8, and store the results
+// in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pshufw
+#define _m_pshufw(a, imm) _mm_shuffle_pi16(a, imm)
+
+// Compute the approximate reciprocal of packed single-precision (32-bit)
+// floating-point elements in a, and store the results in dst. The maximum
+// relative error for this approximation is less than 1.5*2^-12.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ps
+FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
+{
+ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
+ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
+ return vreinterpretq_m128_f32(recip);
+}
+
+// Compute the approximate reciprocal of the lower single-precision (32-bit)
+// floating-point element in a, store the result in the lower element of dst,
+// and copy the upper 3 packed elements from a to the upper elements of dst. The
+// maximum relative error for this approximation is less than 1.5*2^-12.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ss
+FORCE_INLINE __m128 _mm_rcp_ss(__m128 a)
+{
+ return _mm_move_ss(a, _mm_rcp_ps(a));
+}
+
+// Compute the approximate reciprocal square root of packed single-precision
+// (32-bit) floating-point elements in a, and store the results in dst. The
+// maximum relative error for this approximation is less than 1.5*2^-12.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ps
+FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
+{
+ float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+
+ // Generate masks for detecting whether input has any 0.0f/-0.0f
+ // (which becomes positive/negative infinity by IEEE-754 arithmetic rules).
+ const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
+ const uint32x4_t neg_inf = vdupq_n_u32(0xFF800000);
+ const uint32x4_t has_pos_zero =
+ vceqq_u32(pos_inf, vreinterpretq_u32_f32(out));
+ const uint32x4_t has_neg_zero =
+ vceqq_u32(neg_inf, vreinterpretq_u32_f32(out));
+
+ out = vmulq_f32(
+ out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
+
+ // Set output vector element to infinity/negative-infinity if
+ // the corresponding input vector element is 0.0f/-0.0f.
+ out = vbslq_f32(has_pos_zero, (float32x4_t) pos_inf, out);
+ out = vbslq_f32(has_neg_zero, (float32x4_t) neg_inf, out);
+
+ return vreinterpretq_m128_f32(out);
+}
+
+// Compute the approximate reciprocal square root of the lower single-precision
+// (32-bit) floating-point element in a, store the result in the lower element
+// of dst, and copy the upper 3 packed elements from a to the upper elements of
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ss
+FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
+{
+ return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0);
+}
+
+// Compute the absolute differences of packed unsigned 8-bit integers in a and
+// b, then horizontally sum each consecutive 8 differences to produce four
+// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+// 16 bits of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_pu8
+FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b)
+{
+ uint64x1_t t = vpaddl_u32(vpaddl_u16(
+ vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)))));
+ return vreinterpret_m64_u16(
+ vset_lane_u16((int) vget_lane_u64(t, 0), vdup_n_u16(0), 0));
+}
+
+// Macro: Set the flush zero bits of the MXCSR control and status register to
+// the value in unsigned 32-bit integer a. The flush zero may contain any of the
+// following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_FLUSH_ZERO_MODE
+FORCE_INLINE void _sse2neon_mm_set_flush_zero_mode(unsigned int flag)
+{
+ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
+ // regardless of the value of the FZ bit.
+ union {
+ fpcr_bitfield field;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint64_t value;
+#else
+ uint32_t value;
+#endif
+ } r;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ r.value = _sse2neon_get_fpcr();
+#else
+ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+#endif
+
+ r.field.bit24 = (flag & _MM_FLUSH_ZERO_MASK) == _MM_FLUSH_ZERO_ON;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ _sse2neon_set_fpcr(r.value);
+#else
+ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
+#endif
+}
+
+// Set packed single-precision (32-bit) floating-point elements in dst with the
+// supplied values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps
+FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
+{
+ float ALIGN_STRUCT(16) data[4] = {x, y, z, w};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+}
+
+// Broadcast single-precision (32-bit) floating-point value a to all elements of
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps1
+FORCE_INLINE __m128 _mm_set_ps1(float _w)
+{
+ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
+}
+
+// Macro: Set the rounding mode bits of the MXCSR control and status register to
+// the value in unsigned 32-bit integer a. The rounding mode may contain any of
+// the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP,
+// _MM_ROUND_TOWARD_ZERO
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_ROUNDING_MODE
+FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding)
+{
+ union {
+ fpcr_bitfield field;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint64_t value;
+#else
+ uint32_t value;
+#endif
+ } r;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ r.value = _sse2neon_get_fpcr();
+#else
+ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+#endif
+
+ switch (rounding) {
+ case _MM_ROUND_TOWARD_ZERO:
+ r.field.bit22 = 1;
+ r.field.bit23 = 1;
+ break;
+ case _MM_ROUND_DOWN:
+ r.field.bit22 = 0;
+ r.field.bit23 = 1;
+ break;
+ case _MM_ROUND_UP:
+ r.field.bit22 = 1;
+ r.field.bit23 = 0;
+ break;
+ default: //_MM_ROUND_NEAREST
+ r.field.bit22 = 0;
+ r.field.bit23 = 0;
+ }
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ _sse2neon_set_fpcr(r.value);
+#else
+ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
+#endif
+}
+
+// Copy single-precision (32-bit) floating-point element a to the lower element
+// of dst, and zero the upper 3 elements.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ss
+FORCE_INLINE __m128 _mm_set_ss(float a)
+{
+ return vreinterpretq_m128_f32(vsetq_lane_f32(a, vdupq_n_f32(0), 0));
+}
+
+// Broadcast single-precision (32-bit) floating-point value a to all elements of
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_ps
+FORCE_INLINE __m128 _mm_set1_ps(float _w)
+{
+ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
+}
+
+// Set the MXCSR control and status register with the value in unsigned 32-bit
+// integer a.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setcsr
+// FIXME: _mm_setcsr() implementation supports changing the rounding mode only.
+FORCE_INLINE void _mm_setcsr(unsigned int a)
+{
+ _MM_SET_ROUNDING_MODE(a);
+}
+
+// Get the unsigned 32-bit value of the MXCSR control and status register.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getcsr
+// FIXME: _mm_getcsr() implementation supports reading the rounding mode only.
+FORCE_INLINE unsigned int _mm_getcsr(void)
+{
+ return _MM_GET_ROUNDING_MODE();
+}
+
+// Set packed single-precision (32-bit) floating-point elements in dst with the
+// supplied values in reverse order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_ps
+FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x)
+{
+ float ALIGN_STRUCT(16) data[4] = {w, z, y, x};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+}
+
+// Return vector of type __m128 with all elements set to zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_ps
+FORCE_INLINE __m128 _mm_setzero_ps(void)
+{
+ return vreinterpretq_m128_f32(vdupq_n_f32(0));
+}
+
+// Shuffle 16-bit integers in a using the control in imm8, and store the results
+// in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi16
+#ifdef _sse2neon_shuffle
+#define _mm_shuffle_pi16(a, imm) \
+ vreinterpret_m64_s16(vshuffle_s16( \
+ vreinterpret_s16_m64(a), vreinterpret_s16_m64(a), (imm & 0x3), \
+ ((imm >> 2) & 0x3), ((imm >> 4) & 0x3), ((imm >> 6) & 0x3)))
+#else
+#define _mm_shuffle_pi16(a, imm) \
+ _sse2neon_define1( \
+ __m64, a, int16x4_t ret; \
+ ret = vmov_n_s16( \
+ vget_lane_s16(vreinterpret_s16_m64(_a), (imm) & (0x3))); \
+ ret = vset_lane_s16( \
+ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 2) & 0x3), ret, \
+ 1); \
+ ret = vset_lane_s16( \
+ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 4) & 0x3), ret, \
+ 2); \
+ ret = vset_lane_s16( \
+ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 6) & 0x3), ret, \
+ 3); \
+ _sse2neon_return(vreinterpret_m64_s16(ret));)
+#endif
+
+// Perform a serializing operation on all store-to-memory instructions that were
+// issued prior to this instruction. Guarantees that every store instruction
+// that precedes, in program order, is globally visible before any store
+// instruction which follows the fence in program order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sfence
+FORCE_INLINE void _mm_sfence(void)
+{
+ _sse2neon_smp_mb();
+}
+
+// Perform a serializing operation on all load-from-memory and store-to-memory
+// instructions that were issued prior to this instruction. Guarantees that
+// every memory access that precedes, in program order, the memory fence
+// instruction is globally visible before any memory instruction which follows
+// the fence in program order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mfence
+FORCE_INLINE void _mm_mfence(void)
+{
+ _sse2neon_smp_mb();
+}
+
+// Perform a serializing operation on all load-from-memory instructions that
+// were issued prior to this instruction. Guarantees that every load instruction
+// that precedes, in program order, is globally visible before any load
+// instruction which follows the fence in program order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lfence
+FORCE_INLINE void _mm_lfence(void)
+{
+ _sse2neon_smp_mb();
+}
+
+// FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255)
+// int imm)
+#ifdef _sse2neon_shuffle
+#define _mm_shuffle_ps(a, b, imm) \
+ __extension__({ \
+ float32x4_t _input1 = vreinterpretq_f32_m128(a); \
+ float32x4_t _input2 = vreinterpretq_f32_m128(b); \
+ float32x4_t _shuf = \
+ vshuffleq_s32(_input1, _input2, (imm) & (0x3), ((imm) >> 2) & 0x3, \
+ (((imm) >> 4) & 0x3) + 4, (((imm) >> 6) & 0x3) + 4); \
+ vreinterpretq_m128_f32(_shuf); \
+ })
+#else // generic
+#define _mm_shuffle_ps(a, b, imm) \
+ _sse2neon_define2( \
+ __m128, a, b, __m128 ret; switch (imm) { \
+ case _MM_SHUFFLE(1, 0, 3, 2): \
+ ret = _mm_shuffle_ps_1032(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(2, 3, 0, 1): \
+ ret = _mm_shuffle_ps_2301(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(0, 3, 2, 1): \
+ ret = _mm_shuffle_ps_0321(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(2, 1, 0, 3): \
+ ret = _mm_shuffle_ps_2103(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 1, 0): \
+ ret = _mm_movelh_ps(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 0, 1): \
+ ret = _mm_shuffle_ps_1001(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(0, 1, 0, 1): \
+ ret = _mm_shuffle_ps_0101(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(3, 2, 1, 0): \
+ ret = _mm_shuffle_ps_3210(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(0, 0, 1, 1): \
+ ret = _mm_shuffle_ps_0011(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(0, 0, 2, 2): \
+ ret = _mm_shuffle_ps_0022(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(2, 2, 0, 0): \
+ ret = _mm_shuffle_ps_2200(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(3, 2, 0, 2): \
+ ret = _mm_shuffle_ps_3202(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(3, 2, 3, 2): \
+ ret = _mm_movehl_ps(_b, _a); \
+ break; \
+ case _MM_SHUFFLE(1, 1, 3, 3): \
+ ret = _mm_shuffle_ps_1133(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(2, 0, 1, 0): \
+ ret = _mm_shuffle_ps_2010(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(2, 0, 0, 1): \
+ ret = _mm_shuffle_ps_2001(_a, _b); \
+ break; \
+ case _MM_SHUFFLE(2, 0, 3, 2): \
+ ret = _mm_shuffle_ps_2032(_a, _b); \
+ break; \
+ default: \
+ ret = _mm_shuffle_ps_default(_a, _b, (imm)); \
+ break; \
+ } _sse2neon_return(ret);)
+#endif
+
+// Compute the square root of packed single-precision (32-bit) floating-point
+// elements in a, and store the results in dst.
+// Due to ARMv7-A NEON's lack of a precise square root intrinsic, we implement
+// square root by multiplying input in with its reciprocal square root before
+// using the Newton-Raphson method to approximate the results.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ps
+FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in)));
+#else
+ float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+
+ // Test for vrsqrteq_f32(0) -> positive infinity case.
+ // Change to zero, so that s * 1/sqrt(s) result is zero too.
+ const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
+ const uint32x4_t div_by_zero =
+ vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip));
+ recip = vreinterpretq_f32_u32(
+ vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip)));
+
+ recip = vmulq_f32(
+ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
+ recip);
+ // Additional Netwon-Raphson iteration for accuracy
+ recip = vmulq_f32(
+ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
+ recip);
+
+ // sqrt(s) = s * 1/sqrt(s)
+ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip));
+#endif
+}
+
+// Compute the square root of the lower single-precision (32-bit) floating-point
+// element in a, store the result in the lower element of dst, and copy the
+// upper 3 packed elements from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ss
+FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
+{
+ float32_t value =
+ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
+ return vreinterpretq_m128_f32(
+ vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
+}
+
+// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
+// or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps
+FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
+{
+ vst1q_f32(p, vreinterpretq_f32_m128(a));
+}
+
+// Store the lower single-precision (32-bit) floating-point element from a into
+// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps1
+FORCE_INLINE void _mm_store_ps1(float *p, __m128 a)
+{
+ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ vst1q_f32(p, vdupq_n_f32(a0));
+}
+
+// Store the lower single-precision (32-bit) floating-point element from a into
+// memory. mem_addr does not need to be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ss
+FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
+{
+ vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
+}
+
+// Store the lower single-precision (32-bit) floating-point element from a into
+// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store1_ps
+#define _mm_store1_ps _mm_store_ps1
+
+// Store the upper 2 single-precision (32-bit) floating-point elements from a
+// into memory.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pi
+FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
+{
+ *p = vreinterpret_m64_f32(vget_high_f32(a));
+}
+
+// Store the lower 2 single-precision (32-bit) floating-point elements from a
+// into memory.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pi
+FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a)
+{
+ *p = vreinterpret_m64_f32(vget_low_f32(a));
+}
+
+// Store 4 single-precision (32-bit) floating-point elements from a into memory
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_ps
+FORCE_INLINE void _mm_storer_ps(float *p, __m128 a)
+{
+ float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a));
+ float32x4_t rev = vextq_f32(tmp, tmp, 2);
+ vst1q_f32(p, rev);
+}
+
+// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+// elements) from a into memory. mem_addr does not need to be aligned on any
+// particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_ps
+FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
+{
+ vst1q_f32(p, vreinterpretq_f32_m128(a));
+}
+
+// Stores 16-bits of integer data a at the address p.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si16
+FORCE_INLINE void _mm_storeu_si16(void *p, __m128i a)
+{
+ vst1q_lane_s16((int16_t *) p, vreinterpretq_s16_m128i(a), 0);
+}
+
+// Stores 64-bits of integer data a at the address p.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si64
+FORCE_INLINE void _mm_storeu_si64(void *p, __m128i a)
+{
+ vst1q_lane_s64((int64_t *) p, vreinterpretq_s64_m128i(a), 0);
+}
+
+// Store 64-bits of integer data from a into memory using a non-temporal memory
+// hint.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pi
+FORCE_INLINE void _mm_stream_pi(__m64 *p, __m64 a)
+{
+ vst1_s64((int64_t *) p, vreinterpret_s64_m64(a));
+}
+
+// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-
+// point elements) from a into memory using a non-temporal memory hint.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_ps
+FORCE_INLINE void _mm_stream_ps(float *p, __m128 a)
+{
+#if __has_builtin(__builtin_nontemporal_store)
+ __builtin_nontemporal_store(a, (float32x4_t *) p);
+#else
+ vst1q_f32(p, vreinterpretq_f32_m128(a));
+#endif
+}
+
+// Subtract packed single-precision (32-bit) floating-point elements in b from
+// packed single-precision (32-bit) floating-point elements in a, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ps
+FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_f32(
+ vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+}
+
+// Subtract the lower single-precision (32-bit) floating-point element in b from
+// the lower single-precision (32-bit) floating-point element in a, store the
+// result in the lower element of dst, and copy the upper 3 packed elements from
+// a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ss
+FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_sub_ps(a, b));
+}
+
+// Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision
+// (32-bit) floating-point elements in row0, row1, row2, and row3, and store the
+// transposed matrix in these vectors (row0 now contains column 0, etc.).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=MM_TRANSPOSE4_PS
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+ do { \
+ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
+ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
+ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
+ vget_low_f32(ROW23.val[0])); \
+ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
+ vget_low_f32(ROW23.val[1])); \
+ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
+ vget_high_f32(ROW23.val[0])); \
+ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
+ vget_high_f32(ROW23.val[1])); \
+ } while (0)
+
+// according to the documentation, these intrinsics behave the same as the
+// non-'u' versions. We'll just alias them here.
+#define _mm_ucomieq_ss _mm_comieq_ss
+#define _mm_ucomige_ss _mm_comige_ss
+#define _mm_ucomigt_ss _mm_comigt_ss
+#define _mm_ucomile_ss _mm_comile_ss
+#define _mm_ucomilt_ss _mm_comilt_ss
+#define _mm_ucomineq_ss _mm_comineq_ss
+
+// Return vector of type __m128i with undefined elements.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_undefined_si128
+FORCE_INLINE __m128i _mm_undefined_si128(void)
+{
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+ __m128i a;
+#if defined(_MSC_VER)
+ a = _mm_setzero_si128();
+#endif
+ return a;
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+// Return vector of type __m128 with undefined elements.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_ps
+FORCE_INLINE __m128 _mm_undefined_ps(void)
+{
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+ __m128 a;
+#if defined(_MSC_VER)
+ a = _mm_setzero_ps();
+#endif
+ return a;
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+// Unpack and interleave single-precision (32-bit) floating-point elements from
+// the high half a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_ps
+FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(
+ vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
+ float32x2x2_t result = vzip_f32(a1, b1);
+ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave single-precision (32-bit) floating-point elements from
+// the low half of a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_ps
+FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(
+ vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
+ float32x2x2_t result = vzip_f32(a1, b1);
+ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
+#endif
+}
+
+// Compute the bitwise XOR of packed single-precision (32-bit) floating-point
+// elements in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_ps
+FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
+{
+ return vreinterpretq_m128_s32(
+ veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+}
+
+/* SSE2 */
+
+// Add packed 16-bit integers in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi16
+FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Add packed 32-bit integers in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi32
+FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Add packed 64-bit integers in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi64
+FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s64(
+ vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+}
+
+// Add packed 8-bit integers in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi8
+FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Add packed double-precision (64-bit) floating-point elements in a and b, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_pd
+FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] + db[0];
+ c[1] = da[1] + db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Add the lower double-precision (64-bit) floating-point element in a and b,
+// store the result in the lower element of dst, and copy the upper element from
+// a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_sd
+FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_add_pd(a, b));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] + db[0];
+ c[1] = da[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Add 64-bit integers a and b, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_si64
+FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s64(
+ vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
+}
+
+// Add packed signed 16-bit integers in a and b using saturation, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi16
+FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Add packed signed 8-bit integers in a and b using saturation, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi8
+FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Add packed unsigned 16-bit integers in a and b using saturation, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu16
+FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Add packed unsigned 8-bit integers in a and b using saturation, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu8
+FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Compute the bitwise AND of packed double-precision (64-bit) floating-point
+// elements in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_pd
+FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_s64(
+ vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+// and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_si128
+FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compute the bitwise NOT of packed double-precision (64-bit) floating-point
+// elements in a and then AND with b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_pd
+FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b)
+{
+ // *NOTE* argument swap
+ return vreinterpretq_m128d_s64(
+ vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a)));
+}
+
+// Compute the bitwise NOT of 128 bits (representing integer data) in a and then
+// AND with b, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_si128
+FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vbicq_s32(vreinterpretq_s32_m128i(b),
+ vreinterpretq_s32_m128i(a))); // *NOTE* argument swap
+}
+
+// Average packed unsigned 16-bit integers in a and b, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu16
+FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b)
+{
+ return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a),
+ vreinterpretq_u16_m128i(b));
+}
+
+// Average packed unsigned 8-bit integers in a and b, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu8
+FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Shift a left by imm8 bytes while shifting in zeros, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bslli_si128
+#define _mm_bslli_si128(a, imm) _mm_slli_si128(a, imm)
+
+// Shift a right by imm8 bytes while shifting in zeros, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bsrli_si128
+#define _mm_bsrli_si128(a, imm) _mm_srli_si128(a, imm)
+
+// Cast vector of type __m128d to type __m128. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_ps
+FORCE_INLINE __m128 _mm_castpd_ps(__m128d a)
+{
+ return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a));
+}
+
+// Cast vector of type __m128d to type __m128i. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_si128
+FORCE_INLINE __m128i _mm_castpd_si128(__m128d a)
+{
+ return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a));
+}
+
+// Cast vector of type __m128 to type __m128d. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_pd
+FORCE_INLINE __m128d _mm_castps_pd(__m128 a)
+{
+ return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a));
+}
+
+// Cast vector of type __m128 to type __m128i. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_si128
+FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
+{
+ return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
+}
+
+// Cast vector of type __m128i to type __m128d. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_pd
+FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a));
+#else
+ return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a));
+#endif
+}
+
+// Cast vector of type __m128i to type __m128. This intrinsic is only used for
+// compilation and does not generate any instructions, thus it has zero latency.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_ps
+FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
+{
+ return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
+}
+
+// Invalidate and flush the cache line that contains p from all levels of the
+// cache hierarchy.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clflush
+#if defined(__APPLE__)
+#include <libkern/OSCacheControl.h>
+#endif
+FORCE_INLINE void _mm_clflush(void const *p)
+{
+ (void) p;
+
+ /* sys_icache_invalidate is supported since macOS 10.5.
+ * However, it does not work on non-jailbroken iOS devices, although the
+ * compilation is successful.
+ */
+#if defined(__APPLE__)
+ sys_icache_invalidate((void *) (uintptr_t) p, SSE2NEON_CACHELINE_SIZE);
+#elif defined(__GNUC__) || defined(__clang__)
+ uintptr_t ptr = (uintptr_t) p;
+ __builtin___clear_cache((char *) ptr,
+ (char *) ptr + SSE2NEON_CACHELINE_SIZE);
+#elif (_MSC_VER) && SSE2NEON_INCLUDE_WINDOWS_H
+ FlushInstructionCache(GetCurrentProcess(), p, SSE2NEON_CACHELINE_SIZE);
+#endif
+}
+
+// Compare packed 16-bit integers in a and b for equality, and store the results
+// in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi16
+FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compare packed 32-bit integers in a and b for equality, and store the results
+// in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi32
+FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compare packed 8-bit integers in a and b for equality, and store the results
+// in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi8
+FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for equality, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_pd
+FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(
+ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+ uint32x4_t cmp =
+ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
+ uint32x4_t swapped = vrev64q_u32(cmp);
+ return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for equality, store the result in the lower element of dst, and copy the
+// upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_sd
+FORCE_INLINE __m128d _mm_cmpeq_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_cmpeq_pd(a, b));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for greater-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_pd
+FORCE_INLINE __m128d _mm_cmpge_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(
+ vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = (*(double *) &a1) >= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for greater-than-or-equal, store the result in the lower element of dst,
+// and copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_sd
+FORCE_INLINE __m128d _mm_cmpge_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_cmpge_pd(a, b));
+#else
+ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = a1;
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare packed signed 16-bit integers in a and b for greater-than, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi16
+FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compare packed signed 32-bit integers in a and b for greater-than, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi32
+FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compare packed signed 8-bit integers in a and b for greater-than, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi8
+FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for greater-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_pd
+FORCE_INLINE __m128d _mm_cmpgt_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(
+ vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = (*(double *) &a1) > (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for greater-than, store the result in the lower element of dst, and copy
+// the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_sd
+FORCE_INLINE __m128d _mm_cmpgt_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_cmpgt_pd(a, b));
+#else
+ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = a1;
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for less-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_pd
+FORCE_INLINE __m128d _mm_cmple_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(
+ vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = (*(double *) &a1) <= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for less-than-or-equal, store the result in the lower element of dst, and
+// copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_sd
+FORCE_INLINE __m128d _mm_cmple_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_cmple_pd(a, b));
+#else
+ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = a1;
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare packed signed 16-bit integers in a and b for less-than, and store the
+// results in dst. Note: This intrinsic emits the pcmpgtw instruction with the
+// order of the operands switched.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16
+FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compare packed signed 32-bit integers in a and b for less-than, and store the
+// results in dst. Note: This intrinsic emits the pcmpgtd instruction with the
+// order of the operands switched.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi32
+FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compare packed signed 8-bit integers in a and b for less-than, and store the
+// results in dst. Note: This intrinsic emits the pcmpgtb instruction with the
+// order of the operands switched.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi8
+FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for less-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_pd
+FORCE_INLINE __m128d _mm_cmplt_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(
+ vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = (*(double *) &a1) < (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for less-than, store the result in the lower element of dst, and copy the
+// upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_sd
+FORCE_INLINE __m128d _mm_cmplt_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_cmplt_pd(a, b));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] = a1;
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for not-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_pd
+FORCE_INLINE __m128d _mm_cmpneq_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_s32(vmvnq_s32(vreinterpretq_s32_u64(
+ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)))));
+#else
+ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+ uint32x4_t cmp =
+ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
+ uint32x4_t swapped = vrev64q_u32(cmp);
+ return vreinterpretq_m128d_u32(vmvnq_u32(vandq_u32(cmp, swapped)));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for not-equal, store the result in the lower element of dst, and copy the
+// upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_sd
+FORCE_INLINE __m128d _mm_cmpneq_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_cmpneq_pd(a, b));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for not-greater-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_pd
+FORCE_INLINE __m128d _mm_cmpnge_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(veorq_u64(
+ vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+ vdupq_n_u64(UINT64_MAX)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] =
+ !((*(double *) &a0) >= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] =
+ !((*(double *) &a1) >= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for not-greater-than-or-equal, store the result in the lower element of
+// dst, and copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_sd
+FORCE_INLINE __m128d _mm_cmpnge_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_cmpnge_pd(a, b));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for not-greater-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_cmpngt_pd
+FORCE_INLINE __m128d _mm_cmpngt_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(veorq_u64(
+ vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+ vdupq_n_u64(UINT64_MAX)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] =
+ !((*(double *) &a0) > (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] =
+ !((*(double *) &a1) > (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for not-greater-than, store the result in the lower element of dst, and
+// copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_sd
+FORCE_INLINE __m128d _mm_cmpngt_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_cmpngt_pd(a, b));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for not-less-than-or-equal, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_pd
+FORCE_INLINE __m128d _mm_cmpnle_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(veorq_u64(
+ vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+ vdupq_n_u64(UINT64_MAX)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] =
+ !((*(double *) &a0) <= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] =
+ !((*(double *) &a1) <= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for not-less-than-or-equal, store the result in the lower element of dst,
+// and copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_sd
+FORCE_INLINE __m128d _mm_cmpnle_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_cmpnle_pd(a, b));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// for not-less-than, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_pd
+FORCE_INLINE __m128d _mm_cmpnlt_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_u64(veorq_u64(
+ vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+ vdupq_n_u64(UINT64_MAX)));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] =
+ !((*(double *) &a0) < (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+ d[1] =
+ !((*(double *) &a1) < (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b for not-less-than, store the result in the lower element of dst, and copy
+// the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_sd
+FORCE_INLINE __m128d _mm_cmpnlt_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_cmpnlt_pd(a, b));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// to see if neither is NaN, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_pd
+FORCE_INLINE __m128d _mm_cmpord_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ // Excluding NaNs, any two floating point numbers can be compared.
+ uint64x2_t not_nan_a =
+ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
+ uint64x2_t not_nan_b =
+ vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
+ return vreinterpretq_m128d_u64(vandq_u64(not_nan_a, not_nan_b));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+ (*(double *) &b0) == (*(double *) &b0))
+ ? ~UINT64_C(0)
+ : UINT64_C(0);
+ d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
+ (*(double *) &b1) == (*(double *) &b1))
+ ? ~UINT64_C(0)
+ : UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b to see if neither is NaN, store the result in the lower element of dst, and
+// copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_sd
+FORCE_INLINE __m128d _mm_cmpord_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_cmpord_pd(a, b));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t d[2];
+ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+ (*(double *) &b0) == (*(double *) &b0))
+ ? ~UINT64_C(0)
+ : UINT64_C(0);
+ d[1] = a1;
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b
+// to see if either is NaN, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_pd
+FORCE_INLINE __m128d _mm_cmpunord_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ // Two NaNs are not equal in comparison operation.
+ uint64x2_t not_nan_a =
+ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
+ uint64x2_t not_nan_b =
+ vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
+ return vreinterpretq_m128d_s32(
+ vmvnq_s32(vreinterpretq_s32_u64(vandq_u64(not_nan_a, not_nan_b))));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+ (*(double *) &b0) == (*(double *) &b0))
+ ? UINT64_C(0)
+ : ~UINT64_C(0);
+ d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
+ (*(double *) &b1) == (*(double *) &b1))
+ ? UINT64_C(0)
+ : ~UINT64_C(0);
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b to see if either is NaN, store the result in the lower element of dst, and
+// copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_sd
+FORCE_INLINE __m128d _mm_cmpunord_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_cmpunord_pd(a, b));
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t d[2];
+ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+ (*(double *) &b0) == (*(double *) &b0))
+ ? UINT64_C(0)
+ : ~UINT64_C(0);
+ d[1] = a1;
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point element in a and b
+// for greater-than-or-equal, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_sd
+FORCE_INLINE int _mm_comige_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vgetq_lane_u64(vcgeq_f64(a, b), 0) & 0x1;
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+
+ return (*(double *) &a0 >= *(double *) &b0);
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point element in a and b
+// for greater-than, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_sd
+FORCE_INLINE int _mm_comigt_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vgetq_lane_u64(vcgtq_f64(a, b), 0) & 0x1;
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+
+ return (*(double *) &a0 > *(double *) &b0);
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point element in a and b
+// for less-than-or-equal, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_sd
+FORCE_INLINE int _mm_comile_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vgetq_lane_u64(vcleq_f64(a, b), 0) & 0x1;
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+
+ return (*(double *) &a0 <= *(double *) &b0);
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point element in a and b
+// for less-than, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_sd
+FORCE_INLINE int _mm_comilt_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vgetq_lane_u64(vcltq_f64(a, b), 0) & 0x1;
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+
+ return (*(double *) &a0 < *(double *) &b0);
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point element in a and b
+// for equality, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_sd
+FORCE_INLINE int _mm_comieq_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vgetq_lane_u64(vceqq_f64(a, b), 0) & 0x1;
+#else
+ uint32x4_t a_not_nan =
+ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(a));
+ uint32x4_t b_not_nan =
+ vceqq_u32(vreinterpretq_u32_m128d(b), vreinterpretq_u32_m128d(b));
+ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
+ uint32x4_t a_eq_b =
+ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
+ uint64x2_t and_results = vandq_u64(vreinterpretq_u64_u32(a_and_b_not_nan),
+ vreinterpretq_u64_u32(a_eq_b));
+ return vgetq_lane_u64(and_results, 0) & 0x1;
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point element in a and b
+// for not-equal, and return the boolean result (0 or 1).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_sd
+FORCE_INLINE int _mm_comineq_sd(__m128d a, __m128d b)
+{
+ return !_mm_comieq_sd(a, b);
+}
+
+// Convert packed signed 32-bit integers in a to packed double-precision
+// (64-bit) floating-point elements, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_pd
+FORCE_INLINE __m128d _mm_cvtepi32_pd(__m128i a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vcvtq_f64_s64(vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a)))));
+#else
+ double a0 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
+ double a1 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 1);
+ return _mm_set_pd(a1, a0);
+#endif
+}
+
+// Convert packed signed 32-bit integers in a to packed single-precision
+// (32-bit) floating-point elements, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_ps
+FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
+{
+ return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
+}
+
+// Convert packed double-precision (64-bit) floating-point elements in a to
+// packed 32-bit integers, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_epi32
+FORCE_INLINE __m128i _mm_cvtpd_epi32(__m128d a)
+{
+// vrnd32xq_f64 not supported on clang
+#if defined(__ARM_FEATURE_FRINT) && !defined(__clang__)
+ float64x2_t rounded = vrnd32xq_f64(vreinterpretq_f64_m128d(a));
+ int64x2_t integers = vcvtq_s64_f64(rounded);
+ return vreinterpretq_m128i_s32(
+ vcombine_s32(vmovn_s64(integers), vdup_n_s32(0)));
+#else
+ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ double d0 = ((double *) &rnd)[0];
+ double d1 = ((double *) &rnd)[1];
+ return _mm_set_epi32(0, 0, (int32_t) d1, (int32_t) d0);
+#endif
+}
+
+// Convert packed double-precision (64-bit) floating-point elements in a to
+// packed 32-bit integers, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_pi32
+FORCE_INLINE __m64 _mm_cvtpd_pi32(__m128d a)
+{
+ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ double d0 = ((double *) &rnd)[0];
+ double d1 = ((double *) &rnd)[1];
+ int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) d0, (int32_t) d1};
+ return vreinterpret_m64_s32(vld1_s32(data));
+}
+
+// Convert packed double-precision (64-bit) floating-point elements in a to
+// packed single-precision (32-bit) floating-point elements, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_ps
+FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a));
+ return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0)));
+#else
+ float a0 = (float) ((double *) &a)[0];
+ float a1 = (float) ((double *) &a)[1];
+ return _mm_set_ps(0, 0, a1, a0);
+#endif
+}
+
+// Convert packed signed 32-bit integers in a to packed double-precision
+// (64-bit) floating-point elements, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_pd
+FORCE_INLINE __m128d _mm_cvtpi32_pd(__m64 a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vcvtq_f64_s64(vmovl_s32(vreinterpret_s32_m64(a))));
+#else
+ double a0 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 0);
+ double a1 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 1);
+ return _mm_set_pd(a1, a0);
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_epi32
+// *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A
+// does not support! It is supported on ARMv8-A however.
+FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
+{
+#if defined(__ARM_FEATURE_FRINT)
+ return vreinterpretq_m128i_s32(vcvtq_s32_f32(vrnd32xq_f32(a)));
+#elif (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+ switch (_MM_GET_ROUNDING_MODE()) {
+ case _MM_ROUND_NEAREST:
+ return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a));
+ case _MM_ROUND_DOWN:
+ return vreinterpretq_m128i_s32(vcvtmq_s32_f32(a));
+ case _MM_ROUND_UP:
+ return vreinterpretq_m128i_s32(vcvtpq_s32_f32(a));
+ default: // _MM_ROUND_TOWARD_ZERO
+ return vreinterpretq_m128i_s32(vcvtq_s32_f32(a));
+ }
+#else
+ float *f = (float *) &a;
+ switch (_MM_GET_ROUNDING_MODE()) {
+ case _MM_ROUND_NEAREST: {
+ uint32x4_t signmask = vdupq_n_u32(0x80000000);
+ float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
+ vdupq_n_f32(0.5f)); /* +/- 0.5 */
+ int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
+ vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
+ int32x4_t r_trunc = vcvtq_s32_f32(
+ vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
+ int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
+ vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
+ int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
+ vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
+ float32x4_t delta = vsubq_f32(
+ vreinterpretq_f32_m128(a),
+ vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
+ uint32x4_t is_delta_half =
+ vceqq_f32(delta, half); /* delta == +/- 0.5 */
+ return vreinterpretq_m128i_s32(
+ vbslq_s32(is_delta_half, r_even, r_normal));
+ }
+ case _MM_ROUND_DOWN:
+ return _mm_set_epi32(floorf(f[3]), floorf(f[2]), floorf(f[1]),
+ floorf(f[0]));
+ case _MM_ROUND_UP:
+ return _mm_set_epi32(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]),
+ ceilf(f[0]));
+ default: // _MM_ROUND_TOWARD_ZERO
+ return _mm_set_epi32((int32_t) f[3], (int32_t) f[2], (int32_t) f[1],
+ (int32_t) f[0]);
+ }
+#endif
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed double-precision (64-bit) floating-point elements, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pd
+FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a))));
+#else
+ double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
+ return _mm_set_pd(a1, a0);
+#endif
+}
+
+// Copy the lower double-precision (64-bit) floating-point element of a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_f64
+FORCE_INLINE double _mm_cvtsd_f64(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0);
+#else
+ return ((double *) &a)[0];
+#endif
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 32-bit integer, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si32
+FORCE_INLINE int32_t _mm_cvtsd_si32(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return (int32_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
+#else
+ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ double ret = ((double *) &rnd)[0];
+ return (int32_t) ret;
+#endif
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 64-bit integer, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64
+FORCE_INLINE int64_t _mm_cvtsd_si64(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return (int64_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
+#else
+ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ double ret = ((double *) &rnd)[0];
+ return (int64_t) ret;
+#endif
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 64-bit integer, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64x
+#define _mm_cvtsd_si64x _mm_cvtsd_si64
+
+// Convert the lower double-precision (64-bit) floating-point element in b to a
+// single-precision (32-bit) floating-point element, store the result in the
+// lower element of dst, and copy the upper 3 packed elements from a to the
+// upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_ss
+FORCE_INLINE __m128 _mm_cvtsd_ss(__m128 a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(vsetq_lane_f32(
+ vget_lane_f32(vcvt_f32_f64(vreinterpretq_f64_m128d(b)), 0),
+ vreinterpretq_f32_m128(a), 0));
+#else
+ return vreinterpretq_m128_f32(vsetq_lane_f32((float) ((double *) &b)[0],
+ vreinterpretq_f32_m128(a), 0));
+#endif
+}
+
+// Copy the lower 32-bit integer in a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si32
+FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
+{
+ return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
+}
+
+// Copy the lower 64-bit integer in a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64
+FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a)
+{
+ return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0);
+}
+
+// Copy the lower 64-bit integer in a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
+#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
+
+// Convert the signed 32-bit integer b to a double-precision (64-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_sd
+FORCE_INLINE __m128d _mm_cvtsi32_sd(__m128d a, int32_t b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
+#else
+ double bf = (double) b;
+ return vreinterpretq_m128d_s64(
+ vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
+#endif
+}
+
+// Copy the lower 64-bit integer in a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
+#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
+
+// Copy 32-bit integer a to the lower elements of dst, and zero the upper
+// elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_si128
+FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
+{
+ return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
+}
+
+// Convert the signed 64-bit integer b to a double-precision (64-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_sd
+FORCE_INLINE __m128d _mm_cvtsi64_sd(__m128d a, int64_t b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
+#else
+ double bf = (double) b;
+ return vreinterpretq_m128d_s64(
+ vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
+#endif
+}
+
+// Copy 64-bit integer a to the lower element of dst, and zero the upper
+// element.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_si128
+FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a)
+{
+ return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0));
+}
+
+// Copy 64-bit integer a to the lower element of dst, and zero the upper
+// element.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_si128
+#define _mm_cvtsi64x_si128(a) _mm_cvtsi64_si128(a)
+
+// Convert the signed 64-bit integer b to a double-precision (64-bit)
+// floating-point element, store the result in the lower element of dst, and
+// copy the upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_sd
+#define _mm_cvtsi64x_sd(a, b) _mm_cvtsi64_sd(a, b)
+
+// Convert the lower single-precision (32-bit) floating-point element in b to a
+// double-precision (64-bit) floating-point element, store the result in the
+// lower element of dst, and copy the upper element from a to the upper element
+// of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_sd
+FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b)
+{
+ double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0));
+#else
+ return vreinterpretq_m128d_s64(
+ vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0));
+#endif
+}
+
+// Convert packed double-precision (64-bit) floating-point elements in a to
+// packed 32-bit integers with truncation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_epi32
+FORCE_INLINE __m128i _mm_cvttpd_epi32(__m128d a)
+{
+ double a0 = ((double *) &a)[0];
+ double a1 = ((double *) &a)[1];
+ return _mm_set_epi32(0, 0, (int32_t) a1, (int32_t) a0);
+}
+
+// Convert packed double-precision (64-bit) floating-point elements in a to
+// packed 32-bit integers with truncation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_pi32
+FORCE_INLINE __m64 _mm_cvttpd_pi32(__m128d a)
+{
+ double a0 = ((double *) &a)[0];
+ double a1 = ((double *) &a)[1];
+ int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) a0, (int32_t) a1};
+ return vreinterpret_m64_s32(vld1_s32(data));
+}
+
+// Convert packed single-precision (32-bit) floating-point elements in a to
+// packed 32-bit integers with truncation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_epi32
+FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
+{
+ return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 32-bit integer with truncation, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si32
+FORCE_INLINE int32_t _mm_cvttsd_si32(__m128d a)
+{
+ double ret = *((double *) &a);
+ return (int32_t) ret;
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 64-bit integer with truncation, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64
+FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0);
+#else
+ double ret = *((double *) &a);
+ return (int64_t) ret;
+#endif
+}
+
+// Convert the lower double-precision (64-bit) floating-point element in a to a
+// 64-bit integer with truncation, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64x
+#define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a)
+
+// Divide packed double-precision (64-bit) floating-point elements in a by
+// packed elements in b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_pd
+FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] / db[0];
+ c[1] = da[1] / db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Divide the lower double-precision (64-bit) floating-point element in a by the
+// lower double-precision (64-bit) floating-point element in b, store the result
+// in the lower element of dst, and copy the upper element from a to the upper
+// element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_sd
+FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ float64x2_t tmp =
+ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b));
+ return vreinterpretq_m128d_f64(
+ vsetq_lane_f64(vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1), tmp, 1));
+#else
+ return _mm_move_sd(a, _mm_div_pd(a, b));
+#endif
+}
+
+// Extract a 16-bit integer from a, selected with imm8, and store the result in
+// the lower element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi16
+// FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
+#define _mm_extract_epi16(a, imm) \
+ vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm))
+
+// Copy a to dst, and insert the 16-bit integer i into dst at the location
+// specified by imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi16
+// FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b,
+// __constrange(0,8) int imm)
+#define _mm_insert_epi16(a, b, imm) \
+ vreinterpretq_m128i_s16( \
+ vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm)))
+
+// Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+// elements) from memory into dst. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd
+FORCE_INLINE __m128d _mm_load_pd(const double *p)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vld1q_f64(p));
+#else
+ const float *fp = (const float *) p;
+ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]};
+ return vreinterpretq_m128d_f32(vld1q_f32(data));
+#endif
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into both
+// elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd1
+#define _mm_load_pd1 _mm_load1_pd
+
+// Load a double-precision (64-bit) floating-point element from memory into the
+// lower of dst, and zero the upper element. mem_addr does not need to be
+// aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_sd
+FORCE_INLINE __m128d _mm_load_sd(const double *p)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0));
+#else
+ const float *fp = (const float *) p;
+ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0};
+ return vreinterpretq_m128d_f32(vld1q_f32(data));
+#endif
+}
+
+// Load 128-bits of integer data from memory into dst. mem_addr must be aligned
+// on a 16-byte boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_si128
+FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
+{
+ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into both
+// elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_pd
+FORCE_INLINE __m128d _mm_load1_pd(const double *p)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vld1q_dup_f64(p));
+#else
+ return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p));
+#endif
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into the
+// upper element of dst, and copy the lower element from a to dst. mem_addr does
+// not need to be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pd
+FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p)));
+#else
+ return vreinterpretq_m128d_f32(vcombine_f32(
+ vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p)));
+#endif
+}
+
+// Load 64-bit integer from memory into the first element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_epi64
+FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p)
+{
+ /* Load the lower 64 bits of the value pointed to by p into the
+ * lower 64 bits of the result, zeroing the upper 64 bits of the result.
+ */
+ return vreinterpretq_m128i_s32(
+ vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0)));
+}
+
+// Load a double-precision (64-bit) floating-point element from memory into the
+// lower element of dst, and copy the upper element from a to dst. mem_addr does
+// not need to be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pd
+FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a))));
+#else
+ return vreinterpretq_m128d_f32(
+ vcombine_f32(vld1_f32((const float *) p),
+ vget_high_f32(vreinterpretq_f32_m128d(a))));
+#endif
+}
+
+// Load 2 double-precision (64-bit) floating-point elements from memory into dst
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_pd
+FORCE_INLINE __m128d _mm_loadr_pd(const double *p)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ float64x2_t v = vld1q_f64(p);
+ return vreinterpretq_m128d_f64(vextq_f64(v, v, 1));
+#else
+ int64x2_t v = vld1q_s64((const int64_t *) p);
+ return vreinterpretq_m128d_s64(vextq_s64(v, v, 1));
+#endif
+}
+
+// Loads two double-precision from unaligned memory, floating-point values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_pd
+FORCE_INLINE __m128d _mm_loadu_pd(const double *p)
+{
+ return _mm_load_pd(p);
+}
+
+// Load 128-bits of integer data from memory into dst. mem_addr does not need to
+// be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si128
+FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
+{
+ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
+}
+
+// Load unaligned 32-bit integer from memory into the first element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si32
+FORCE_INLINE __m128i _mm_loadu_si32(const void *p)
+{
+ return vreinterpretq_m128i_s32(
+ vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0));
+}
+
+// Multiply packed signed 16-bit integers in a and b, producing intermediate
+// signed 32-bit integers. Horizontally add adjacent pairs of intermediate
+// 32-bit integers, and pack the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_madd_epi16
+FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b)
+{
+ int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
+ vget_low_s16(vreinterpretq_s16_m128i(b)));
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int32x4_t high =
+ vmull_high_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b));
+
+ return vreinterpretq_m128i_s32(vpaddq_s32(low, high));
+#else
+ int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
+ vget_high_s16(vreinterpretq_s16_m128i(b)));
+
+ int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low));
+ int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high));
+
+ return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum));
+#endif
+}
+
+// Conditionally store 8-bit integer elements from a into memory using mask
+// (elements are not stored when the highest bit is not set in the corresponding
+// element) and a non-temporal memory hint. mem_addr does not need to be aligned
+// on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmoveu_si128
+FORCE_INLINE void _mm_maskmoveu_si128(__m128i a, __m128i mask, char *mem_addr)
+{
+ int8x16_t shr_mask = vshrq_n_s8(vreinterpretq_s8_m128i(mask), 7);
+ __m128 b = _mm_load_ps((const float *) mem_addr);
+ int8x16_t masked =
+ vbslq_s8(vreinterpretq_u8_s8(shr_mask), vreinterpretq_s8_m128i(a),
+ vreinterpretq_s8_m128(b));
+ vst1q_s8((int8_t *) mem_addr, masked);
+}
+
+// Compare packed signed 16-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi16
+FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu8
+FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b,
+// and store packed maximum values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pd
+FORCE_INLINE __m128d _mm_max_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+#if SSE2NEON_PRECISE_MINMAX
+ float64x2_t _a = vreinterpretq_f64_m128d(a);
+ float64x2_t _b = vreinterpretq_f64_m128d(b);
+ return vreinterpretq_m128d_f64(vbslq_f64(vcgtq_f64(_a, _b), _a, _b));
+#else
+ return vreinterpretq_m128d_f64(
+ vmaxq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#endif
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) > (*(double *) &b0) ? a0 : b0;
+ d[1] = (*(double *) &a1) > (*(double *) &b1) ? a1 : b1;
+
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b, store the maximum value in the lower element of dst, and copy the upper
+// element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_sd
+FORCE_INLINE __m128d _mm_max_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_max_pd(a, b));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2] = {da[0] > db[0] ? da[0] : db[0], da[1]};
+ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
+#endif
+}
+
+// Compare packed signed 16-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi16
+FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu8
+FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+// Compare packed double-precision (64-bit) floating-point elements in a and b,
+// and store packed minimum values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pd
+FORCE_INLINE __m128d _mm_min_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+#if SSE2NEON_PRECISE_MINMAX
+ float64x2_t _a = vreinterpretq_f64_m128d(a);
+ float64x2_t _b = vreinterpretq_f64_m128d(b);
+ return vreinterpretq_m128d_f64(vbslq_f64(vcltq_f64(_a, _b), _a, _b));
+#else
+ return vreinterpretq_m128d_f64(
+ vminq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#endif
+#else
+ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+ uint64_t d[2];
+ d[0] = (*(double *) &a0) < (*(double *) &b0) ? a0 : b0;
+ d[1] = (*(double *) &a1) < (*(double *) &b1) ? a1 : b1;
+ return vreinterpretq_m128d_u64(vld1q_u64(d));
+#endif
+}
+
+// Compare the lower double-precision (64-bit) floating-point elements in a and
+// b, store the minimum value in the lower element of dst, and copy the upper
+// element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_sd
+FORCE_INLINE __m128d _mm_min_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_min_pd(a, b));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2] = {da[0] < db[0] ? da[0] : db[0], da[1]};
+ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
+#endif
+}
+
+// Copy the lower 64-bit integer in a to the lower element of dst, and zero the
+// upper element.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_epi64
+FORCE_INLINE __m128i _mm_move_epi64(__m128i a)
+{
+ return vreinterpretq_m128i_s64(
+ vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1));
+}
+
+// Move the lower double-precision (64-bit) floating-point element from b to the
+// lower element of dst, and copy the upper element from a to the upper element
+// of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_sd
+FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_f32(
+ vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)),
+ vget_high_f32(vreinterpretq_f32_m128d(a))));
+}
+
+// Create mask from the most significant bit of each 8-bit element in a, and
+// store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_epi8
+FORCE_INLINE int _mm_movemask_epi8(__m128i a)
+{
+ // Use increasingly wide shifts+adds to collect the sign bits
+ // together.
+ // Since the widening shifts would be rather confusing to follow in little
+ // endian, everything will be illustrated in big endian order instead. This
+ // has a different result - the bits would actually be reversed on a big
+ // endian machine.
+
+ // Starting input (only half the elements are shown):
+ // 89 ff 1d c0 00 10 99 33
+ uint8x16_t input = vreinterpretq_u8_m128i(a);
+
+ // Shift out everything but the sign bits with an unsigned shift right.
+ //
+ // Bytes of the vector::
+ // 89 ff 1d c0 00 10 99 33
+ // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
+ // | | | | | | | |
+ // 01 01 00 01 00 00 01 00
+ //
+ // Bits of first important lane(s):
+ // 10001001 (89)
+ // \______
+ // |
+ // 00000001 (01)
+ uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
+
+ // Merge the even lanes together with a 16-bit unsigned shift right + add.
+ // 'xx' represents garbage data which will be ignored in the final result.
+ // In the important bytes, the add functions like a binary OR.
+ //
+ // 01 01 00 01 00 00 01 00
+ // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
+ // \| \| \| \|
+ // xx 03 xx 01 xx 00 xx 02
+ //
+ // 00000001 00000001 (01 01)
+ // \_______ |
+ // \|
+ // xxxxxxxx xxxxxx11 (xx 03)
+ uint32x4_t paired16 =
+ vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
+
+ // Repeat with a wider 32-bit shift + add.
+ // xx 03 xx 01 xx 00 xx 02
+ // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >>
+ // 14))
+ // \| \|
+ // xx xx xx 0d xx xx xx 02
+ //
+ // 00000011 00000001 (03 01)
+ // \\_____ ||
+ // '----.\||
+ // xxxxxxxx xxxx1101 (xx 0d)
+ uint64x2_t paired32 =
+ vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
+
+ // Last, an even wider 64-bit shift + add to get our result in the low 8 bit
+ // lanes. xx xx xx 0d xx xx xx 02
+ // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >>
+ // 28))
+ // \|
+ // xx xx xx xx xx xx xx d2
+ //
+ // 00001101 00000010 (0d 02)
+ // \ \___ | |
+ // '---. \| |
+ // xxxxxxxx 11010010 (xx d2)
+ uint8x16_t paired64 =
+ vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
+
+ // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
+ // xx xx xx xx xx xx xx d2
+ // || return paired64[0]
+ // d2
+ // Note: Little endian would return the correct value 4b (01001011) instead.
+ return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8);
+}
+
+// Set each bit of mask dst based on the most significant bit of the
+// corresponding packed double-precision (64-bit) floating-point element in a.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pd
+FORCE_INLINE int _mm_movemask_pd(__m128d a)
+{
+ uint64x2_t input = vreinterpretq_u64_m128d(a);
+ uint64x2_t high_bits = vshrq_n_u64(input, 63);
+ return (int) (vgetq_lane_u64(high_bits, 0) |
+ (vgetq_lane_u64(high_bits, 1) << 1));
+}
+
+// Copy the lower 64-bit integer in a to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi64_pi64
+FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a)
+{
+ return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a)));
+}
+
+// Copy the 64-bit integer a to the lower element of dst, and zero the upper
+// element.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movpi64_epi64
+FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a)
+{
+ return vreinterpretq_m128i_s64(
+ vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0)));
+}
+
+// Multiply the low unsigned 32-bit integers from each packed 64-bit element in
+// a and b, and store the unsigned 64-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epu32
+FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b)
+{
+ // vmull_u32 upcasts instead of masking, so we downcast.
+ uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a));
+ uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b));
+ return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo));
+}
+
+// Multiply packed double-precision (64-bit) floating-point elements in a and b,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_pd
+FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] * db[0];
+ c[1] = da[1] * db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Multiply the lower double-precision (64-bit) floating-point element in a and
+// b, store the result in the lower element of dst, and copy the upper element
+// from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_sd
+FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_mul_pd(a, b));
+}
+
+// Multiply the low unsigned 32-bit integers from a and b, and store the
+// unsigned 64-bit result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_su32
+FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_u64(vget_low_u64(
+ vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b))));
+}
+
+// Multiply the packed signed 16-bit integers in a and b, producing intermediate
+// 32-bit integers, and store the high 16 bits of the intermediate integers in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epi16
+FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
+{
+ /* FIXME: issue with large values because of result saturation */
+ // int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a),
+ // vreinterpretq_s16_m128i(b)); /* =2*a*b */ return
+ // vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
+ int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
+ int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
+ int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
+ int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
+ uint16x8x2_t r =
+ vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
+ return vreinterpretq_m128i_u16(r.val[1]);
+}
+
+// Multiply the packed unsigned 16-bit integers in a and b, producing
+// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+// integers in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epu16
+FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b)
+{
+ uint16x4_t a3210 = vget_low_u16(vreinterpretq_u16_m128i(a));
+ uint16x4_t b3210 = vget_low_u16(vreinterpretq_u16_m128i(b));
+ uint32x4_t ab3210 = vmull_u16(a3210, b3210);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint32x4_t ab7654 =
+ vmull_high_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
+ uint16x8_t r = vuzp2q_u16(vreinterpretq_u16_u32(ab3210),
+ vreinterpretq_u16_u32(ab7654));
+ return vreinterpretq_m128i_u16(r);
+#else
+ uint16x4_t a7654 = vget_high_u16(vreinterpretq_u16_m128i(a));
+ uint16x4_t b7654 = vget_high_u16(vreinterpretq_u16_m128i(b));
+ uint32x4_t ab7654 = vmull_u16(a7654, b7654);
+ uint16x8x2_t r =
+ vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
+ return vreinterpretq_m128i_u16(r.val[1]);
+#endif
+}
+
+// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit
+// integers, and store the low 16 bits of the intermediate integers in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi16
+FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Compute the bitwise OR of packed double-precision (64-bit) floating-point
+// elements in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_or_pd
+FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_s64(
+ vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+}
+
+// Compute the bitwise OR of 128 bits (representing integer data) in a and b,
+// and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_si128
+FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Convert packed signed 16-bit integers from a and b to packed 8-bit integers
+// using signed saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi16
+FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)),
+ vqmovn_s16(vreinterpretq_s16_m128i(b))));
+}
+
+// Convert packed signed 32-bit integers from a and b to packed 16-bit integers
+// using signed saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi32
+FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)),
+ vqmovn_s32(vreinterpretq_s32_m128i(b))));
+}
+
+// Convert packed signed 16-bit integers from a and b to packed 8-bit integers
+// using unsigned saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi16
+FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)),
+ vqmovun_s16(vreinterpretq_s16_m128i(b))));
+}
+
+// Pause the processor. This is typically used in spin-wait loops and depending
+// on the x86 processor typical values are in the 40-100 cycle range. The
+// 'yield' instruction isn't a good fit because it's effectively a nop on most
+// Arm cores. Experience with several databases has shown has shown an 'isb' is
+// a reasonable approximation.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_pause
+FORCE_INLINE void _mm_pause(void)
+{
+#if defined(_MSC_VER)
+ __isb(_ARM64_BARRIER_SY);
+#else
+ __asm__ __volatile__("isb\n");
+#endif
+}
+
+// Compute the absolute differences of packed unsigned 8-bit integers in a and
+// b, then horizontally sum each consecutive 8 differences to produce two
+// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+// 16 bits of 64-bit elements in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_epu8
+FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b)
+{
+ uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b));
+ return vreinterpretq_m128i_u64(vpaddlq_u32(vpaddlq_u16(t)));
+}
+
+// Set packed 16-bit integers in dst with the supplied values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi16
+FORCE_INLINE __m128i _mm_set_epi16(short i7,
+ short i6,
+ short i5,
+ short i4,
+ short i3,
+ short i2,
+ short i1,
+ short i0)
+{
+ int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
+ return vreinterpretq_m128i_s16(vld1q_s16(data));
+}
+
+// Set packed 32-bit integers in dst with the supplied values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi32
+FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
+{
+ int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3};
+ return vreinterpretq_m128i_s32(vld1q_s32(data));
+}
+
+// Set packed 64-bit integers in dst with the supplied values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64
+FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2)
+{
+ return _mm_set_epi64x(vget_lane_s64(i1, 0), vget_lane_s64(i2, 0));
+}
+
+// Set packed 64-bit integers in dst with the supplied values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64x
+FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2)
+{
+ return vreinterpretq_m128i_s64(
+ vcombine_s64(vcreate_s64(i2), vcreate_s64(i1)));
+}
+
+// Set packed 8-bit integers in dst with the supplied values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi8
+FORCE_INLINE __m128i _mm_set_epi8(signed char b15,
+ signed char b14,
+ signed char b13,
+ signed char b12,
+ signed char b11,
+ signed char b10,
+ signed char b9,
+ signed char b8,
+ signed char b7,
+ signed char b6,
+ signed char b5,
+ signed char b4,
+ signed char b3,
+ signed char b2,
+ signed char b1,
+ signed char b0)
+{
+ int8_t ALIGN_STRUCT(16)
+ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
+ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
+ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
+ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
+ return (__m128i) vld1q_s8(data);
+}
+
+// Set packed double-precision (64-bit) floating-point elements in dst with the
+// supplied values.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd
+FORCE_INLINE __m128d _mm_set_pd(double e1, double e0)
+{
+ double ALIGN_STRUCT(16) data[2] = {e0, e1};
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data));
+#else
+ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data));
+#endif
+}
+
+// Broadcast double-precision (64-bit) floating-point value a to all elements of
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd1
+#define _mm_set_pd1 _mm_set1_pd
+
+// Copy double-precision (64-bit) floating-point element a to the lower element
+// of dst, and zero the upper element.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_sd
+FORCE_INLINE __m128d _mm_set_sd(double a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vsetq_lane_f64(a, vdupq_n_f64(0), 0));
+#else
+ return _mm_set_pd(0, a);
+#endif
+}
+
+// Broadcast 16-bit integer a to all elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi16
+FORCE_INLINE __m128i _mm_set1_epi16(short w)
+{
+ return vreinterpretq_m128i_s16(vdupq_n_s16(w));
+}
+
+// Broadcast 32-bit integer a to all elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi32
+FORCE_INLINE __m128i _mm_set1_epi32(int _i)
+{
+ return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
+}
+
+// Broadcast 64-bit integer a to all elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64
+FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i)
+{
+ return vreinterpretq_m128i_s64(vdupq_lane_s64(_i, 0));
+}
+
+// Broadcast 64-bit integer a to all elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64x
+FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
+{
+ return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
+}
+
+// Broadcast 8-bit integer a to all elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi8
+FORCE_INLINE __m128i _mm_set1_epi8(signed char w)
+{
+ return vreinterpretq_m128i_s8(vdupq_n_s8(w));
+}
+
+// Broadcast double-precision (64-bit) floating-point value a to all elements of
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_pd
+FORCE_INLINE __m128d _mm_set1_pd(double d)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vdupq_n_f64(d));
+#else
+ return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d));
+#endif
+}
+
+// Set packed 16-bit integers in dst with the supplied values in reverse order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi16
+FORCE_INLINE __m128i _mm_setr_epi16(short w0,
+ short w1,
+ short w2,
+ short w3,
+ short w4,
+ short w5,
+ short w6,
+ short w7)
+{
+ int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7};
+ return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data));
+}
+
+// Set packed 32-bit integers in dst with the supplied values in reverse order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi32
+FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
+{
+ int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0};
+ return vreinterpretq_m128i_s32(vld1q_s32(data));
+}
+
+// Set packed 64-bit integers in dst with the supplied values in reverse order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi64
+FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0)
+{
+ return vreinterpretq_m128i_s64(vcombine_s64(e1, e0));
+}
+
+// Set packed 8-bit integers in dst with the supplied values in reverse order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi8
+FORCE_INLINE __m128i _mm_setr_epi8(signed char b0,
+ signed char b1,
+ signed char b2,
+ signed char b3,
+ signed char b4,
+ signed char b5,
+ signed char b6,
+ signed char b7,
+ signed char b8,
+ signed char b9,
+ signed char b10,
+ signed char b11,
+ signed char b12,
+ signed char b13,
+ signed char b14,
+ signed char b15)
+{
+ int8_t ALIGN_STRUCT(16)
+ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
+ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
+ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
+ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
+ return (__m128i) vld1q_s8(data);
+}
+
+// Set packed double-precision (64-bit) floating-point elements in dst with the
+// supplied values in reverse order.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_pd
+FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0)
+{
+ return _mm_set_pd(e0, e1);
+}
+
+// Return vector of type __m128d with all elements set to zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_pd
+FORCE_INLINE __m128d _mm_setzero_pd(void)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vdupq_n_f64(0));
+#else
+ return vreinterpretq_m128d_f32(vdupq_n_f32(0));
+#endif
+}
+
+// Return vector of type __m128i with all elements set to zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_si128
+FORCE_INLINE __m128i _mm_setzero_si128(void)
+{
+ return vreinterpretq_m128i_s32(vdupq_n_s32(0));
+}
+
+// Shuffle 32-bit integers in a using the control in imm8, and store the results
+// in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi32
+// FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a,
+// __constrange(0,255) int imm)
+#if defined(_sse2neon_shuffle)
+#define _mm_shuffle_epi32(a, imm) \
+ __extension__({ \
+ int32x4_t _input = vreinterpretq_s32_m128i(a); \
+ int32x4_t _shuf = \
+ vshuffleq_s32(_input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \
+ ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \
+ vreinterpretq_m128i_s32(_shuf); \
+ })
+#else // generic
+#define _mm_shuffle_epi32(a, imm) \
+ _sse2neon_define1( \
+ __m128i, a, __m128i ret; switch (imm) { \
+ case _MM_SHUFFLE(1, 0, 3, 2): \
+ ret = _mm_shuffle_epi_1032(_a); \
+ break; \
+ case _MM_SHUFFLE(2, 3, 0, 1): \
+ ret = _mm_shuffle_epi_2301(_a); \
+ break; \
+ case _MM_SHUFFLE(0, 3, 2, 1): \
+ ret = _mm_shuffle_epi_0321(_a); \
+ break; \
+ case _MM_SHUFFLE(2, 1, 0, 3): \
+ ret = _mm_shuffle_epi_2103(_a); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 1, 0): \
+ ret = _mm_shuffle_epi_1010(_a); \
+ break; \
+ case _MM_SHUFFLE(1, 0, 0, 1): \
+ ret = _mm_shuffle_epi_1001(_a); \
+ break; \
+ case _MM_SHUFFLE(0, 1, 0, 1): \
+ ret = _mm_shuffle_epi_0101(_a); \
+ break; \
+ case _MM_SHUFFLE(2, 2, 1, 1): \
+ ret = _mm_shuffle_epi_2211(_a); \
+ break; \
+ case _MM_SHUFFLE(0, 1, 2, 2): \
+ ret = _mm_shuffle_epi_0122(_a); \
+ break; \
+ case _MM_SHUFFLE(3, 3, 3, 2): \
+ ret = _mm_shuffle_epi_3332(_a); \
+ break; \
+ case _MM_SHUFFLE(0, 0, 0, 0): \
+ ret = _mm_shuffle_epi32_splat(_a, 0); \
+ break; \
+ case _MM_SHUFFLE(1, 1, 1, 1): \
+ ret = _mm_shuffle_epi32_splat(_a, 1); \
+ break; \
+ case _MM_SHUFFLE(2, 2, 2, 2): \
+ ret = _mm_shuffle_epi32_splat(_a, 2); \
+ break; \
+ case _MM_SHUFFLE(3, 3, 3, 3): \
+ ret = _mm_shuffle_epi32_splat(_a, 3); \
+ break; \
+ default: \
+ ret = _mm_shuffle_epi32_default(_a, (imm)); \
+ break; \
+ } _sse2neon_return(ret);)
+#endif
+
+// Shuffle double-precision (64-bit) floating-point elements using the control
+// in imm8, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pd
+#ifdef _sse2neon_shuffle
+#define _mm_shuffle_pd(a, b, imm8) \
+ vreinterpretq_m128d_s64( \
+ vshuffleq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), \
+ imm8 & 0x1, ((imm8 & 0x2) >> 1) + 2))
+#else
+#define _mm_shuffle_pd(a, b, imm8) \
+ _mm_castsi128_pd(_mm_set_epi64x( \
+ vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \
+ vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1)))
+#endif
+
+// FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a,
+// __constrange(0,255) int imm)
+#if defined(_sse2neon_shuffle)
+#define _mm_shufflehi_epi16(a, imm) \
+ __extension__({ \
+ int16x8_t _input = vreinterpretq_s16_m128i(a); \
+ int16x8_t _shuf = \
+ vshuffleq_s16(_input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \
+ (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \
+ (((imm) >> 6) & 0x3) + 4); \
+ vreinterpretq_m128i_s16(_shuf); \
+ })
+#else // generic
+#define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm))
+#endif
+
+// FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a,
+// __constrange(0,255) int imm)
+#if defined(_sse2neon_shuffle)
+#define _mm_shufflelo_epi16(a, imm) \
+ __extension__({ \
+ int16x8_t _input = vreinterpretq_s16_m128i(a); \
+ int16x8_t _shuf = vshuffleq_s16( \
+ _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \
+ (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \
+ vreinterpretq_m128i_s16(_shuf); \
+ })
+#else // generic
+#define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm))
+#endif
+
+// Shift packed 16-bit integers in a left by count while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi16
+FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (_sse2neon_unlikely(c & ~15))
+ return _mm_setzero_si128();
+
+ int16x8_t vc = vdupq_n_s16((int16_t) c);
+ return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc));
+}
+
+// Shift packed 32-bit integers in a left by count while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi32
+FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (_sse2neon_unlikely(c & ~31))
+ return _mm_setzero_si128();
+
+ int32x4_t vc = vdupq_n_s32((int32_t) c);
+ return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc));
+}
+
+// Shift packed 64-bit integers in a left by count while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi64
+FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (_sse2neon_unlikely(c & ~63))
+ return _mm_setzero_si128();
+
+ int64x2_t vc = vdupq_n_s64((int64_t) c);
+ return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc));
+}
+
+// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi16
+FORCE_INLINE __m128i _mm_slli_epi16(__m128i a, int imm)
+{
+ if (_sse2neon_unlikely(imm & ~15))
+ return _mm_setzero_si128();
+ return vreinterpretq_m128i_s16(
+ vshlq_s16(vreinterpretq_s16_m128i(a), vdupq_n_s16(imm)));
+}
+
+// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi32
+FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm)
+{
+ if (_sse2neon_unlikely(imm & ~31))
+ return _mm_setzero_si128();
+ return vreinterpretq_m128i_s32(
+ vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm)));
+}
+
+// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi64
+FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm)
+{
+ if (_sse2neon_unlikely(imm & ~63))
+ return _mm_setzero_si128();
+ return vreinterpretq_m128i_s64(
+ vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm)));
+}
+
+// Shift a left by imm8 bytes while shifting in zeros, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_si128
+#define _mm_slli_si128(a, imm) \
+ _sse2neon_define1( \
+ __m128i, a, int8x16_t ret; \
+ if (_sse2neon_unlikely(imm == 0)) ret = vreinterpretq_s8_m128i(_a); \
+ else if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \
+ else ret = vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(_a), \
+ ((imm <= 0 || imm > 15) ? 0 : (16 - imm))); \
+ _sse2neon_return(vreinterpretq_m128i_s8(ret));)
+
+// Compute the square root of packed double-precision (64-bit) floating-point
+// elements in a, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_pd
+FORCE_INLINE __m128d _mm_sqrt_pd(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vsqrtq_f64(vreinterpretq_f64_m128d(a)));
+#else
+ double a0 = sqrt(((double *) &a)[0]);
+ double a1 = sqrt(((double *) &a)[1]);
+ return _mm_set_pd(a1, a0);
+#endif
+}
+
+// Compute the square root of the lower double-precision (64-bit) floating-point
+// element in b, store the result in the lower element of dst, and copy the
+// upper element from a to the upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_sd
+FORCE_INLINE __m128d _mm_sqrt_sd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return _mm_move_sd(a, _mm_sqrt_pd(b));
+#else
+ return _mm_set_pd(((double *) &a)[1], sqrt(((double *) &b)[0]));
+#endif
+}
+
+// Shift packed 16-bit integers in a right by count while shifting in sign bits,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi16
+FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count)
+{
+ int64_t c = vgetq_lane_s64(count, 0);
+ if (_sse2neon_unlikely(c & ~15))
+ return _mm_cmplt_epi16(a, _mm_setzero_si128());
+ return vreinterpretq_m128i_s16(
+ vshlq_s16((int16x8_t) a, vdupq_n_s16((int) -c)));
+}
+
+// Shift packed 32-bit integers in a right by count while shifting in sign bits,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi32
+FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count)
+{
+ int64_t c = vgetq_lane_s64(count, 0);
+ if (_sse2neon_unlikely(c & ~31))
+ return _mm_cmplt_epi32(a, _mm_setzero_si128());
+ return vreinterpretq_m128i_s32(
+ vshlq_s32((int32x4_t) a, vdupq_n_s32((int) -c)));
+}
+
+// Shift packed 16-bit integers in a right by imm8 while shifting in sign
+// bits, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi16
+FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm)
+{
+ const int count = (imm & ~15) ? 15 : imm;
+ return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count));
+}
+
+// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi32
+// FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
+#define _mm_srai_epi32(a, imm) \
+ _sse2neon_define0( \
+ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) == 0)) { \
+ ret = _a; \
+ } else if (_sse2neon_likely(0 < (imm) && (imm) < 32)) { \
+ ret = vreinterpretq_m128i_s32( \
+ vshlq_s32(vreinterpretq_s32_m128i(_a), vdupq_n_s32(-(imm)))); \
+ } else { \
+ ret = vreinterpretq_m128i_s32( \
+ vshrq_n_s32(vreinterpretq_s32_m128i(_a), 31)); \
+ } _sse2neon_return(ret);)
+
+// Shift packed 16-bit integers in a right by count while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi16
+FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (_sse2neon_unlikely(c & ~15))
+ return _mm_setzero_si128();
+
+ int16x8_t vc = vdupq_n_s16(-(int16_t) c);
+ return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc));
+}
+
+// Shift packed 32-bit integers in a right by count while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi32
+FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (_sse2neon_unlikely(c & ~31))
+ return _mm_setzero_si128();
+
+ int32x4_t vc = vdupq_n_s32(-(int32_t) c);
+ return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc));
+}
+
+// Shift packed 64-bit integers in a right by count while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi64
+FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count)
+{
+ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+ if (_sse2neon_unlikely(c & ~63))
+ return _mm_setzero_si128();
+
+ int64x2_t vc = vdupq_n_s64(-(int64_t) c);
+ return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc));
+}
+
+// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi16
+#define _mm_srli_epi16(a, imm) \
+ _sse2neon_define0( \
+ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~15)) { \
+ ret = _mm_setzero_si128(); \
+ } else { \
+ ret = vreinterpretq_m128i_u16( \
+ vshlq_u16(vreinterpretq_u16_m128i(_a), vdupq_n_s16(-(imm)))); \
+ } _sse2neon_return(ret);)
+
+// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi32
+// FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
+#define _mm_srli_epi32(a, imm) \
+ _sse2neon_define0( \
+ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~31)) { \
+ ret = _mm_setzero_si128(); \
+ } else { \
+ ret = vreinterpretq_m128i_u32( \
+ vshlq_u32(vreinterpretq_u32_m128i(_a), vdupq_n_s32(-(imm)))); \
+ } _sse2neon_return(ret);)
+
+// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi64
+#define _mm_srli_epi64(a, imm) \
+ _sse2neon_define0( \
+ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~63)) { \
+ ret = _mm_setzero_si128(); \
+ } else { \
+ ret = vreinterpretq_m128i_u64( \
+ vshlq_u64(vreinterpretq_u64_m128i(_a), vdupq_n_s64(-(imm)))); \
+ } _sse2neon_return(ret);)
+
+// Shift a right by imm8 bytes while shifting in zeros, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_si128
+#define _mm_srli_si128(a, imm) \
+ _sse2neon_define1( \
+ __m128i, a, int8x16_t ret; \
+ if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \
+ else ret = vextq_s8(vreinterpretq_s8_m128i(_a), vdupq_n_s8(0), \
+ (imm > 15 ? 0 : imm)); \
+ _sse2neon_return(vreinterpretq_m128i_s8(ret));)
+
+// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
+// or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd
+FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a));
+#else
+ vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a));
+#endif
+}
+
+// Store the lower double-precision (64-bit) floating-point element from a into
+// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd1
+FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a));
+ vst1q_f64((float64_t *) mem_addr,
+ vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low)));
+#else
+ float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a));
+ vst1q_f32((float32_t *) mem_addr,
+ vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low)));
+#endif
+}
+
+// Store the lower double-precision (64-bit) floating-point element from a into
+// memory. mem_addr does not need to be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_store_sd
+FORCE_INLINE void _mm_store_sd(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
+#else
+ vst1_u64((uint64_t *) mem_addr, vget_low_u64(vreinterpretq_u64_m128d(a)));
+#endif
+}
+
+// Store 128-bits of integer data from a into memory. mem_addr must be aligned
+// on a 16-byte boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_si128
+FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
+{
+ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
+}
+
+// Store the lower double-precision (64-bit) floating-point element from a into
+// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+// boundary or a general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#expand=9,526,5601&text=_mm_store1_pd
+#define _mm_store1_pd _mm_store_pd1
+
+// Store the upper double-precision (64-bit) floating-point element from a into
+// memory.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pd
+FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a)));
+#else
+ vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a)));
+#endif
+}
+
+// Store 64-bit integer from the first element of a into memory.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_epi64
+FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b)
+{
+ vst1_u64((uint64_t *) a, vget_low_u64(vreinterpretq_u64_m128i(b)));
+}
+
+// Store the lower double-precision (64-bit) floating-point element from a into
+// memory.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pd
+FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
+#else
+ vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a)));
+#endif
+}
+
+// Store 2 double-precision (64-bit) floating-point elements from a into memory
+// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_pd
+FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a)
+{
+ float32x4_t f = vreinterpretq_f32_m128d(a);
+ _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2)));
+}
+
+// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+// elements) from a into memory. mem_addr does not need to be aligned on any
+// particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_pd
+FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a)
+{
+ _mm_store_pd(mem_addr, a);
+}
+
+// Store 128-bits of integer data from a into memory. mem_addr does not need to
+// be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si128
+FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
+{
+ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
+}
+
+// Store 32-bit integer from the first element of a into memory. mem_addr does
+// not need to be aligned on any particular boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si32
+FORCE_INLINE void _mm_storeu_si32(void *p, __m128i a)
+{
+ vst1q_lane_s32((int32_t *) p, vreinterpretq_s32_m128i(a), 0);
+}
+
+// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+// elements) from a into memory using a non-temporal memory hint. mem_addr must
+// be aligned on a 16-byte boundary or a general-protection exception may be
+// generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pd
+FORCE_INLINE void _mm_stream_pd(double *p, __m128d a)
+{
+#if __has_builtin(__builtin_nontemporal_store)
+ __builtin_nontemporal_store(a, (__m128d *) p);
+#elif defined(__aarch64__) || defined(_M_ARM64)
+ vst1q_f64(p, vreinterpretq_f64_m128d(a));
+#else
+ vst1q_s64((int64_t *) p, vreinterpretq_s64_m128d(a));
+#endif
+}
+
+// Store 128-bits of integer data from a into memory using a non-temporal memory
+// hint. mem_addr must be aligned on a 16-byte boundary or a general-protection
+// exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si128
+FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
+{
+#if __has_builtin(__builtin_nontemporal_store)
+ __builtin_nontemporal_store(a, p);
+#else
+ vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a));
+#endif
+}
+
+// Store 32-bit integer a into memory using a non-temporal hint to minimize
+// cache pollution. If the cache line containing address mem_addr is already in
+// the cache, the cache will be updated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si32
+FORCE_INLINE void _mm_stream_si32(int *p, int a)
+{
+ vst1q_lane_s32((int32_t *) p, vdupq_n_s32(a), 0);
+}
+
+// Store 64-bit integer a into memory using a non-temporal hint to minimize
+// cache pollution. If the cache line containing address mem_addr is already in
+// the cache, the cache will be updated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si64
+FORCE_INLINE void _mm_stream_si64(__int64 *p, __int64 a)
+{
+ vst1_s64((int64_t *) p, vdup_n_s64((int64_t) a));
+}
+
+// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi16
+FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi32
+FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi64
+FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s64(
+ vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+}
+
+// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi8
+FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Subtract packed double-precision (64-bit) floating-point elements in b from
+// packed double-precision (64-bit) floating-point elements in a, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_pd
+FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[2];
+ c[0] = da[0] - db[0];
+ c[1] = da[1] - db[1];
+ return vld1q_f32((float32_t *) c);
+#endif
+}
+
+// Subtract the lower double-precision (64-bit) floating-point element in b from
+// the lower double-precision (64-bit) floating-point element in a, store the
+// result in the lower element of dst, and copy the upper element from a to the
+// upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_sd
+FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_sub_pd(a, b));
+}
+
+// Subtract 64-bit integer b from 64-bit integer a, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_si64
+FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s64(
+ vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
+}
+
+// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a
+// using saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi16
+FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s16(
+ vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+}
+
+// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a
+// using saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi8
+FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit
+// integers in a using saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu16
+FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit
+// integers in a using saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu8
+FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(
+ vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+}
+
+#define _mm_ucomieq_sd _mm_comieq_sd
+#define _mm_ucomige_sd _mm_comige_sd
+#define _mm_ucomigt_sd _mm_comigt_sd
+#define _mm_ucomile_sd _mm_comile_sd
+#define _mm_ucomilt_sd _mm_comilt_sd
+#define _mm_ucomineq_sd _mm_comineq_sd
+
+// Return vector of type __m128d with undefined elements.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_pd
+FORCE_INLINE __m128d _mm_undefined_pd(void)
+{
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#endif
+ __m128d a;
+#if defined(_MSC_VER)
+ a = _mm_setzero_pd();
+#endif
+ return a;
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+// Unpack and interleave 16-bit integers from the high half of a and b, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi16
+FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s16(
+ vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+#else
+ int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
+ int16x4x2_t result = vzip_s16(a1, b1);
+ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave 32-bit integers from the high half of a and b, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi32
+FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s32(
+ vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+#else
+ int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
+ int32x2x2_t result = vzip_s32(a1, b1);
+ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave 64-bit integers from the high half of a and b, and
+// store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi64
+FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s64(
+ vzip2q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+#else
+ int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a));
+ int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b));
+ return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h));
+#endif
+}
+
+// Unpack and interleave 8-bit integers from the high half of a and b, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi8
+FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s8(
+ vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+#else
+ int8x8_t a1 =
+ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
+ int8x8_t b1 =
+ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
+ int8x8x2_t result = vzip_s8(a1, b1);
+ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave double-precision (64-bit) floating-point elements from
+// the high half of a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_pd
+FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ return vreinterpretq_m128d_s64(
+ vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)),
+ vget_high_s64(vreinterpretq_s64_m128d(b))));
+#endif
+}
+
+// Unpack and interleave 16-bit integers from the low half of a and b, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi16
+FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s16(
+ vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+#else
+ int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
+ int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
+ int16x4x2_t result = vzip_s16(a1, b1);
+ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave 32-bit integers from the low half of a and b, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi32
+FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s32(
+ vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+#else
+ int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
+ int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
+ int32x2x2_t result = vzip_s32(a1, b1);
+ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave 64-bit integers from the low half of a and b, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi64
+FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s64(
+ vzip1q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+#else
+ int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a));
+ int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b));
+ return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l));
+#endif
+}
+
+// Unpack and interleave 8-bit integers from the low half of a and b, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi8
+FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s8(
+ vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+#else
+ int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
+ int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
+ int8x8x2_t result = vzip_s8(a1, b1);
+ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
+#endif
+}
+
+// Unpack and interleave double-precision (64-bit) floating-point elements from
+// the low half of a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_pd
+FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ return vreinterpretq_m128d_s64(
+ vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)),
+ vget_low_s64(vreinterpretq_s64_m128d(b))));
+#endif
+}
+
+// Compute the bitwise XOR of packed double-precision (64-bit) floating-point
+// elements in a and b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_pd
+FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b)
+{
+ return vreinterpretq_m128d_s64(
+ veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+}
+
+// Compute the bitwise XOR of 128 bits (representing integer data) in a and b,
+// and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_si128
+FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+/* SSE3 */
+
+// Alternatively add and subtract packed double-precision (64-bit)
+// floating-point elements in a to/from packed elements in b, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_addsub_pd
+FORCE_INLINE __m128d _mm_addsub_pd(__m128d a, __m128d b)
+{
+ _sse2neon_const __m128d mask = _mm_set_pd(1.0f, -1.0f);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vfmaq_f64(vreinterpretq_f64_m128d(a),
+ vreinterpretq_f64_m128d(b),
+ vreinterpretq_f64_m128d(mask)));
+#else
+ return _mm_add_pd(_mm_mul_pd(b, mask), a);
+#endif
+}
+
+// Alternatively add and subtract packed single-precision (32-bit)
+// floating-point elements in a to/from packed elements in b, and store the
+// results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=addsub_ps
+FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b)
+{
+ _sse2neon_const __m128 mask = _mm_setr_ps(-1.0f, 1.0f, -1.0f, 1.0f);
+#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_FMA) /* VFPv4+ */
+ return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(a),
+ vreinterpretq_f32_m128(mask),
+ vreinterpretq_f32_m128(b)));
+#else
+ return _mm_add_ps(_mm_mul_ps(b, mask), a);
+#endif
+}
+
+// Horizontally add adjacent pairs of double-precision (64-bit) floating-point
+// elements in a and b, and pack the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pd
+FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+#else
+ double *da = (double *) &a;
+ double *db = (double *) &b;
+ double c[] = {da[0] + da[1], db[0] + db[1]};
+ return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
+#endif
+}
+
+// Horizontally add adjacent pairs of single-precision (32-bit) floating-point
+// elements in a and b, and pack the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_ps
+FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(
+ vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+#else
+ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+ return vreinterpretq_m128_f32(
+ vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of double-precision (64-bit)
+// floating-point elements in a and b, and pack the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pd
+FORCE_INLINE __m128d _mm_hsub_pd(__m128d _a, __m128d _b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ float64x2_t a = vreinterpretq_f64_m128d(_a);
+ float64x2_t b = vreinterpretq_f64_m128d(_b);
+ return vreinterpretq_m128d_f64(
+ vsubq_f64(vuzp1q_f64(a, b), vuzp2q_f64(a, b)));
+#else
+ double *da = (double *) &_a;
+ double *db = (double *) &_b;
+ double c[] = {da[0] - da[1], db[0] - db[1]};
+ return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of single-precision (32-bit)
+// floating-point elements in a and b, and pack the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_ps
+FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b)
+{
+ float32x4_t a = vreinterpretq_f32_m128(_a);
+ float32x4_t b = vreinterpretq_f32_m128(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(
+ vsubq_f32(vuzp1q_f32(a, b), vuzp2q_f32(a, b)));
+#else
+ float32x4x2_t c = vuzpq_f32(a, b);
+ return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1]));
+#endif
+}
+
+// Load 128-bits of integer data from unaligned memory into dst. This intrinsic
+// may perform better than _mm_loadu_si128 when the data crosses a cache line
+// boundary.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lddqu_si128
+#define _mm_lddqu_si128 _mm_loadu_si128
+
+// Load a double-precision (64-bit) floating-point element from memory into both
+// elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loaddup_pd
+#define _mm_loaddup_pd _mm_load1_pd
+
+// Duplicate the low double-precision (64-bit) floating-point element from a,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movedup_pd
+FORCE_INLINE __m128d _mm_movedup_pd(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(
+ vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0));
+#else
+ return vreinterpretq_m128d_u64(
+ vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0)));
+#endif
+}
+
+// Duplicate odd-indexed single-precision (32-bit) floating-point elements
+// from a, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehdup_ps
+FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(
+ vtrn2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
+#elif defined(_sse2neon_shuffle)
+ return vreinterpretq_m128_f32(vshuffleq_s32(
+ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3));
+#else
+ float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
+ float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3);
+ float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+#endif
+}
+
+// Duplicate even-indexed single-precision (32-bit) floating-point elements
+// from a, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_moveldup_ps
+FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128_f32(
+ vtrn1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
+#elif defined(_sse2neon_shuffle)
+ return vreinterpretq_m128_f32(vshuffleq_s32(
+ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2));
+#else
+ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+ float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2);
+ float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2};
+ return vreinterpretq_m128_f32(vld1q_f32(data));
+#endif
+}
+
+/* SSSE3 */
+
+// Compute the absolute value of packed signed 16-bit integers in a, and store
+// the unsigned results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi16
+FORCE_INLINE __m128i _mm_abs_epi16(__m128i a)
+{
+ return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a)));
+}
+
+// Compute the absolute value of packed signed 32-bit integers in a, and store
+// the unsigned results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi32
+FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
+{
+ return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a)));
+}
+
+// Compute the absolute value of packed signed 8-bit integers in a, and store
+// the unsigned results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi8
+FORCE_INLINE __m128i _mm_abs_epi8(__m128i a)
+{
+ return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a)));
+}
+
+// Compute the absolute value of packed signed 16-bit integers in a, and store
+// the unsigned results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi16
+FORCE_INLINE __m64 _mm_abs_pi16(__m64 a)
+{
+ return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a)));
+}
+
+// Compute the absolute value of packed signed 32-bit integers in a, and store
+// the unsigned results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi32
+FORCE_INLINE __m64 _mm_abs_pi32(__m64 a)
+{
+ return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a)));
+}
+
+// Compute the absolute value of packed signed 8-bit integers in a, and store
+// the unsigned results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi8
+FORCE_INLINE __m64 _mm_abs_pi8(__m64 a)
+{
+ return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a)));
+}
+
+// Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift
+// the result right by imm8 bytes, and store the low 16 bytes in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi8
+#if defined(__GNUC__) && !defined(__clang__)
+#define _mm_alignr_epi8(a, b, imm) \
+ __extension__({ \
+ uint8x16_t _a = vreinterpretq_u8_m128i(a); \
+ uint8x16_t _b = vreinterpretq_u8_m128i(b); \
+ __m128i ret; \
+ if (_sse2neon_unlikely((imm) & ~31)) \
+ ret = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
+ else if (imm >= 16) \
+ ret = _mm_srli_si128(a, imm >= 16 ? imm - 16 : 0); \
+ else \
+ ret = \
+ vreinterpretq_m128i_u8(vextq_u8(_b, _a, imm < 16 ? imm : 0)); \
+ ret; \
+ })
+
+#else
+#define _mm_alignr_epi8(a, b, imm) \
+ _sse2neon_define2( \
+ __m128i, a, b, uint8x16_t __a = vreinterpretq_u8_m128i(_a); \
+ uint8x16_t __b = vreinterpretq_u8_m128i(_b); __m128i ret; \
+ if (_sse2neon_unlikely((imm) & ~31)) ret = \
+ vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
+ else if (imm >= 16) ret = \
+ _mm_srli_si128(_a, imm >= 16 ? imm - 16 : 0); \
+ else ret = \
+ vreinterpretq_m128i_u8(vextq_u8(__b, __a, imm < 16 ? imm : 0)); \
+ _sse2neon_return(ret);)
+
+#endif
+
+// Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift
+// the result right by imm8 bytes, and store the low 8 bytes in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_pi8
+#define _mm_alignr_pi8(a, b, imm) \
+ _sse2neon_define2( \
+ __m64, a, b, __m64 ret; if (_sse2neon_unlikely((imm) >= 16)) { \
+ ret = vreinterpret_m64_s8(vdup_n_s8(0)); \
+ } else { \
+ uint8x8_t tmp_low; \
+ uint8x8_t tmp_high; \
+ if ((imm) >= 8) { \
+ const int idx = (imm) -8; \
+ tmp_low = vreinterpret_u8_m64(_a); \
+ tmp_high = vdup_n_u8(0); \
+ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
+ } else { \
+ const int idx = (imm); \
+ tmp_low = vreinterpret_u8_m64(_b); \
+ tmp_high = vreinterpret_u8_m64(_a); \
+ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
+ } \
+ } _sse2neon_return(ret);)
+
+// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
+// signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi16
+FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b)
+{
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s16(vpaddq_s16(a, b));
+#else
+ return vreinterpretq_m128i_s16(
+ vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)),
+ vpadd_s16(vget_low_s16(b), vget_high_s16(b))));
+#endif
+}
+
+// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
+// signed 32-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi32
+FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b)
+{
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s32(vpaddq_s32(a, b));
+#else
+ return vreinterpretq_m128i_s32(
+ vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)),
+ vpadd_s32(vget_low_s32(b), vget_high_s32(b))));
+#endif
+}
+
+// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
+// signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi16
+FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s16(
+ vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+}
+
+// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
+// signed 32-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi32
+FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b)
+{
+ return vreinterpret_m64_s32(
+ vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)));
+}
+
+// Horizontally add adjacent pairs of signed 16-bit integers in a and b using
+// saturation, and pack the signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_epi16
+FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+ return vreinterpretq_s64_s16(
+ vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+#else
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+ // Interleave using vshrn/vmovn
+ // [a0|a2|a4|a6|b0|b2|b4|b6]
+ // [a1|a3|a5|a7|b1|b3|b5|b7]
+ int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
+ int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
+ // Saturated add
+ return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357));
+#endif
+}
+
+// Horizontally add adjacent pairs of signed 16-bit integers in a and b using
+// saturation, and pack the signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_pi16
+FORCE_INLINE __m64 _mm_hadds_pi16(__m64 _a, __m64 _b)
+{
+ int16x4_t a = vreinterpret_s16_m64(_a);
+ int16x4_t b = vreinterpret_s16_m64(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpret_s64_s16(vqadd_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
+#else
+ int16x4x2_t res = vuzp_s16(a, b);
+ return vreinterpret_s64_s16(vqadd_s16(res.val[0], res.val[1]));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
+// the signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi16
+FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b)
+{
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s16(
+ vsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+#else
+ int16x8x2_t c = vuzpq_s16(a, b);
+ return vreinterpretq_m128i_s16(vsubq_s16(c.val[0], c.val[1]));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
+// the signed 32-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi32
+FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b)
+{
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s32(
+ vsubq_s32(vuzp1q_s32(a, b), vuzp2q_s32(a, b)));
+#else
+ int32x4x2_t c = vuzpq_s32(a, b);
+ return vreinterpretq_m128i_s32(vsubq_s32(c.val[0], c.val[1]));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
+// the signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pi16
+FORCE_INLINE __m64 _mm_hsub_pi16(__m64 _a, __m64 _b)
+{
+ int16x4_t a = vreinterpret_s16_m64(_a);
+ int16x4_t b = vreinterpret_s16_m64(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpret_m64_s16(vsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
+#else
+ int16x4x2_t c = vuzp_s16(a, b);
+ return vreinterpret_m64_s16(vsub_s16(c.val[0], c.val[1]));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
+// the signed 32-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_hsub_pi32
+FORCE_INLINE __m64 _mm_hsub_pi32(__m64 _a, __m64 _b)
+{
+ int32x2_t a = vreinterpret_s32_m64(_a);
+ int32x2_t b = vreinterpret_s32_m64(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpret_m64_s32(vsub_s32(vuzp1_s32(a, b), vuzp2_s32(a, b)));
+#else
+ int32x2x2_t c = vuzp_s32(a, b);
+ return vreinterpret_m64_s32(vsub_s32(c.val[0], c.val[1]));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of signed 16-bit integers in a and b
+// using saturation, and pack the signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_epi16
+FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b)
+{
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s16(
+ vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+#else
+ int16x8x2_t c = vuzpq_s16(a, b);
+ return vreinterpretq_m128i_s16(vqsubq_s16(c.val[0], c.val[1]));
+#endif
+}
+
+// Horizontally subtract adjacent pairs of signed 16-bit integers in a and b
+// using saturation, and pack the signed 16-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_pi16
+FORCE_INLINE __m64 _mm_hsubs_pi16(__m64 _a, __m64 _b)
+{
+ int16x4_t a = vreinterpret_s16_m64(_a);
+ int16x4_t b = vreinterpret_s16_m64(_b);
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpret_m64_s16(vqsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
+#else
+ int16x4x2_t c = vuzp_s16(a, b);
+ return vreinterpret_m64_s16(vqsub_s16(c.val[0], c.val[1]));
+#endif
+}
+
+// Vertically multiply each unsigned 8-bit integer from a with the corresponding
+// signed 8-bit integer from b, producing intermediate signed 16-bit integers.
+// Horizontally add adjacent pairs of intermediate signed 16-bit integers,
+// and pack the saturated results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_epi16
+FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint8x16_t a = vreinterpretq_u8_m128i(_a);
+ int8x16_t b = vreinterpretq_s8_m128i(_b);
+ int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))),
+ vmovl_s8(vget_low_s8(b)));
+ int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))),
+ vmovl_s8(vget_high_s8(b)));
+ return vreinterpretq_m128i_s16(
+ vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th)));
+#else
+ // This would be much simpler if x86 would choose to zero extend OR sign
+ // extend, not both. This could probably be optimized better.
+ uint16x8_t a = vreinterpretq_u16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+
+ // Zero extend a
+ int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8));
+ int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00)));
+
+ // Sign extend by shifting left then shifting right.
+ int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8);
+ int16x8_t b_odd = vshrq_n_s16(b, 8);
+
+ // multiply
+ int16x8_t prod1 = vmulq_s16(a_even, b_even);
+ int16x8_t prod2 = vmulq_s16(a_odd, b_odd);
+
+ // saturated add
+ return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2));
+#endif
+}
+
+// Vertically multiply each unsigned 8-bit integer from a with the corresponding
+// signed 8-bit integer from b, producing intermediate signed 16-bit integers.
+// Horizontally add adjacent pairs of intermediate signed 16-bit integers, and
+// pack the saturated results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_pi16
+FORCE_INLINE __m64 _mm_maddubs_pi16(__m64 _a, __m64 _b)
+{
+ uint16x4_t a = vreinterpret_u16_m64(_a);
+ int16x4_t b = vreinterpret_s16_m64(_b);
+
+ // Zero extend a
+ int16x4_t a_odd = vreinterpret_s16_u16(vshr_n_u16(a, 8));
+ int16x4_t a_even = vreinterpret_s16_u16(vand_u16(a, vdup_n_u16(0xff)));
+
+ // Sign extend by shifting left then shifting right.
+ int16x4_t b_even = vshr_n_s16(vshl_n_s16(b, 8), 8);
+ int16x4_t b_odd = vshr_n_s16(b, 8);
+
+ // multiply
+ int16x4_t prod1 = vmul_s16(a_even, b_even);
+ int16x4_t prod2 = vmul_s16(a_odd, b_odd);
+
+ // saturated add
+ return vreinterpret_m64_s16(vqadd_s16(prod1, prod2));
+}
+
+// Multiply packed signed 16-bit integers in a and b, producing intermediate
+// signed 32-bit integers. Shift right by 15 bits while rounding up, and store
+// the packed 16-bit integers in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_epi16
+FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b)
+{
+ // Has issues due to saturation
+ // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b));
+
+ // Multiply
+ int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
+ vget_low_s16(vreinterpretq_s16_m128i(b)));
+ int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
+ vget_high_s16(vreinterpretq_s16_m128i(b)));
+
+ // Rounding narrowing shift right
+ // narrow = (int16_t)((mul + 16384) >> 15);
+ int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15);
+ int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15);
+
+ // Join together
+ return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi));
+}
+
+// Multiply packed signed 16-bit integers in a and b, producing intermediate
+// signed 32-bit integers. Truncate each intermediate integer to the 18 most
+// significant bits, round by adding 1, and store bits [16:1] to dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_pi16
+FORCE_INLINE __m64 _mm_mulhrs_pi16(__m64 a, __m64 b)
+{
+ int32x4_t mul_extend =
+ vmull_s16((vreinterpret_s16_m64(a)), (vreinterpret_s16_m64(b)));
+
+ // Rounding narrowing shift right
+ return vreinterpret_m64_s16(vrshrn_n_s32(mul_extend, 15));
+}
+
+// Shuffle packed 8-bit integers in a according to shuffle control mask in the
+// corresponding 8-bit element of b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi8
+FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
+{
+ int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a
+ uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b
+ uint8x16_t idx_masked =
+ vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked));
+#elif defined(__GNUC__)
+ int8x16_t ret;
+ // %e and %f represent the even and odd D registers
+ // respectively.
+ __asm__ __volatile__(
+ "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n"
+ "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n"
+ : [ret] "=&w"(ret)
+ : [tbl] "w"(tbl), [idx] "w"(idx_masked));
+ return vreinterpretq_m128i_s8(ret);
+#else
+ // use this line if testing on aarch64
+ int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)};
+ return vreinterpretq_m128i_s8(
+ vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)),
+ vtbl2_s8(a_split, vget_high_u8(idx_masked))));
+#endif
+}
+
+// Shuffle packed 8-bit integers in a according to shuffle control mask in the
+// corresponding 8-bit element of b, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi8
+FORCE_INLINE __m64 _mm_shuffle_pi8(__m64 a, __m64 b)
+{
+ const int8x8_t controlMask =
+ vand_s8(vreinterpret_s8_m64(b), vdup_n_s8((int8_t) (0x1 << 7 | 0x07)));
+ int8x8_t res = vtbl1_s8(vreinterpret_s8_m64(a), controlMask);
+ return vreinterpret_m64_s8(res);
+}
+
+// Negate packed 16-bit integers in a when the corresponding signed
+// 16-bit integer in b is negative, and store the results in dst.
+// Element in dst are zeroed out when the corresponding element
+// in b is zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi16
+FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b)
+{
+ int16x8_t a = vreinterpretq_s16_m128i(_a);
+ int16x8_t b = vreinterpretq_s16_m128i(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFF : 0
+ uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15));
+ // (b == 0) ? 0xFFFF : 0
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b));
+#else
+ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative
+ // 'a') based on ltMask
+ int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a);
+ // res = masked & (~zeroMask)
+ int16x8_t res = vbicq_s16(masked, zeroMask);
+ return vreinterpretq_m128i_s16(res);
+}
+
+// Negate packed 32-bit integers in a when the corresponding signed
+// 32-bit integer in b is negative, and store the results in dst.
+// Element in dst are zeroed out when the corresponding element
+// in b is zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi32
+FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b)
+{
+ int32x4_t a = vreinterpretq_s32_m128i(_a);
+ int32x4_t b = vreinterpretq_s32_m128i(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFFFFFF : 0
+ uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31));
+
+ // (b == 0) ? 0xFFFFFFFF : 0
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b));
+#else
+ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative
+ // 'a') based on ltMask
+ int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a);
+ // res = masked & (~zeroMask)
+ int32x4_t res = vbicq_s32(masked, zeroMask);
+ return vreinterpretq_m128i_s32(res);
+}
+
+// Negate packed 8-bit integers in a when the corresponding signed
+// 8-bit integer in b is negative, and store the results in dst.
+// Element in dst are zeroed out when the corresponding element
+// in b is zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi8
+FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b)
+{
+ int8x16_t a = vreinterpretq_s8_m128i(_a);
+ int8x16_t b = vreinterpretq_s8_m128i(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFF : 0
+ uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7));
+
+ // (b == 0) ? 0xFF : 0
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b));
+#else
+ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vnegq_s8(a) return negative 'a')
+ // based on ltMask
+ int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a);
+ // res = masked & (~zeroMask)
+ int8x16_t res = vbicq_s8(masked, zeroMask);
+
+ return vreinterpretq_m128i_s8(res);
+}
+
+// Negate packed 16-bit integers in a when the corresponding signed 16-bit
+// integer in b is negative, and store the results in dst. Element in dst are
+// zeroed out when the corresponding element in b is zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi16
+FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b)
+{
+ int16x4_t a = vreinterpret_s16_m64(_a);
+ int16x4_t b = vreinterpret_s16_m64(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFF : 0
+ uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15));
+
+ // (b == 0) ? 0xFFFF : 0
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b));
+#else
+ int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vneg_s16(a) return negative 'a')
+ // based on ltMask
+ int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a);
+ // res = masked & (~zeroMask)
+ int16x4_t res = vbic_s16(masked, zeroMask);
+
+ return vreinterpret_m64_s16(res);
+}
+
+// Negate packed 32-bit integers in a when the corresponding signed 32-bit
+// integer in b is negative, and store the results in dst. Element in dst are
+// zeroed out when the corresponding element in b is zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi32
+FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b)
+{
+ int32x2_t a = vreinterpret_s32_m64(_a);
+ int32x2_t b = vreinterpret_s32_m64(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFFFFFFFF : 0
+ uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31));
+
+ // (b == 0) ? 0xFFFFFFFF : 0
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b));
+#else
+ int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vneg_s32(a) return negative 'a')
+ // based on ltMask
+ int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a);
+ // res = masked & (~zeroMask)
+ int32x2_t res = vbic_s32(masked, zeroMask);
+
+ return vreinterpret_m64_s32(res);
+}
+
+// Negate packed 8-bit integers in a when the corresponding signed 8-bit integer
+// in b is negative, and store the results in dst. Element in dst are zeroed out
+// when the corresponding element in b is zero.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi8
+FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b)
+{
+ int8x8_t a = vreinterpret_s8_m64(_a);
+ int8x8_t b = vreinterpret_s8_m64(_b);
+
+ // signed shift right: faster than vclt
+ // (b < 0) ? 0xFF : 0
+ uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7));
+
+ // (b == 0) ? 0xFF : 0
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b));
+#else
+ int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0)));
+#endif
+
+ // bitwise select either a or negative 'a' (vneg_s8(a) return negative 'a')
+ // based on ltMask
+ int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a);
+ // res = masked & (~zeroMask)
+ int8x8_t res = vbic_s8(masked, zeroMask);
+
+ return vreinterpret_m64_s8(res);
+}
+
+/* SSE4.1 */
+
+// Blend packed 16-bit integers from a and b using control mask imm8, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_epi16
+// FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b,
+// __constrange(0,255) int imm)
+#define _mm_blend_epi16(a, b, imm) \
+ _sse2neon_define2( \
+ __m128i, a, b, \
+ const uint16_t _mask[8] = \
+ _sse2neon_init(((imm) & (1 << 0)) ? (uint16_t) -1 : 0x0, \
+ ((imm) & (1 << 1)) ? (uint16_t) -1 : 0x0, \
+ ((imm) & (1 << 2)) ? (uint16_t) -1 : 0x0, \
+ ((imm) & (1 << 3)) ? (uint16_t) -1 : 0x0, \
+ ((imm) & (1 << 4)) ? (uint16_t) -1 : 0x0, \
+ ((imm) & (1 << 5)) ? (uint16_t) -1 : 0x0, \
+ ((imm) & (1 << 6)) ? (uint16_t) -1 : 0x0, \
+ ((imm) & (1 << 7)) ? (uint16_t) -1 : 0x0); \
+ uint16x8_t _mask_vec = vld1q_u16(_mask); \
+ uint16x8_t __a = vreinterpretq_u16_m128i(_a); \
+ uint16x8_t __b = vreinterpretq_u16_m128i(_b); _sse2neon_return( \
+ vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, __b, __a)));)
+
+// Blend packed double-precision (64-bit) floating-point elements from a and b
+// using control mask imm8, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_pd
+#define _mm_blend_pd(a, b, imm) \
+ _sse2neon_define2( \
+ __m128d, a, b, \
+ const uint64_t _mask[2] = \
+ _sse2neon_init(((imm) & (1 << 0)) ? ~UINT64_C(0) : UINT64_C(0), \
+ ((imm) & (1 << 1)) ? ~UINT64_C(0) : UINT64_C(0)); \
+ uint64x2_t _mask_vec = vld1q_u64(_mask); \
+ uint64x2_t __a = vreinterpretq_u64_m128d(_a); \
+ uint64x2_t __b = vreinterpretq_u64_m128d(_b); _sse2neon_return( \
+ vreinterpretq_m128d_u64(vbslq_u64(_mask_vec, __b, __a)));)
+
+// Blend packed single-precision (32-bit) floating-point elements from a and b
+// using mask, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_ps
+FORCE_INLINE __m128 _mm_blend_ps(__m128 _a, __m128 _b, const char imm8)
+{
+ const uint32_t ALIGN_STRUCT(16)
+ data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0,
+ ((imm8) & (1 << 1)) ? UINT32_MAX : 0,
+ ((imm8) & (1 << 2)) ? UINT32_MAX : 0,
+ ((imm8) & (1 << 3)) ? UINT32_MAX : 0};
+ uint32x4_t mask = vld1q_u32(data);
+ float32x4_t a = vreinterpretq_f32_m128(_a);
+ float32x4_t b = vreinterpretq_f32_m128(_b);
+ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
+}
+
+// Blend packed 8-bit integers from a and b using mask, and store the results in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_epi8
+FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask)
+{
+ // Use a signed shift right to create a mask with the sign bit
+ uint8x16_t mask =
+ vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7));
+ uint8x16_t a = vreinterpretq_u8_m128i(_a);
+ uint8x16_t b = vreinterpretq_u8_m128i(_b);
+ return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a));
+}
+
+// Blend packed double-precision (64-bit) floating-point elements from a and b
+// using mask, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_pd
+FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask)
+{
+ uint64x2_t mask =
+ vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_m128d(_mask), 63));
+#if defined(__aarch64__) || defined(_M_ARM64)
+ float64x2_t a = vreinterpretq_f64_m128d(_a);
+ float64x2_t b = vreinterpretq_f64_m128d(_b);
+ return vreinterpretq_m128d_f64(vbslq_f64(mask, b, a));
+#else
+ uint64x2_t a = vreinterpretq_u64_m128d(_a);
+ uint64x2_t b = vreinterpretq_u64_m128d(_b);
+ return vreinterpretq_m128d_u64(vbslq_u64(mask, b, a));
+#endif
+}
+
+// Blend packed single-precision (32-bit) floating-point elements from a and b
+// using mask, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_ps
+FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask)
+{
+ // Use a signed shift right to create a mask with the sign bit
+ uint32x4_t mask =
+ vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31));
+ float32x4_t a = vreinterpretq_f32_m128(_a);
+ float32x4_t b = vreinterpretq_f32_m128(_b);
+ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
+}
+
+// Round the packed double-precision (64-bit) floating-point elements in a up
+// to an integer value, and store the results as packed double-precision
+// floating-point elements in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_pd
+FORCE_INLINE __m128d _mm_ceil_pd(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vrndpq_f64(vreinterpretq_f64_m128d(a)));
+#else
+ double *f = (double *) &a;
+ return _mm_set_pd(ceil(f[1]), ceil(f[0]));
+#endif
+}
+
+// Round the packed single-precision (32-bit) floating-point elements in a up to
+// an integer value, and store the results as packed single-precision
+// floating-point elements in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ps
+FORCE_INLINE __m128 _mm_ceil_ps(__m128 a)
+{
+#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+ return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a)));
+#else
+ float *f = (float *) &a;
+ return _mm_set_ps(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]), ceilf(f[0]));
+#endif
+}
+
+// Round the lower double-precision (64-bit) floating-point element in b up to
+// an integer value, store the result as a double-precision floating-point
+// element in the lower element of dst, and copy the upper element from a to the
+// upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_sd
+FORCE_INLINE __m128d _mm_ceil_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_ceil_pd(b));
+}
+
+// Round the lower single-precision (32-bit) floating-point element in b up to
+// an integer value, store the result as a single-precision floating-point
+// element in the lower element of dst, and copy the upper 3 packed elements
+// from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ss
+FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_ceil_ps(b));
+}
+
+// Compare packed 64-bit integers in a and b for equality, and store the results
+// in dst
+FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_u64(
+ vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b)));
+#else
+ // ARMv7 lacks vceqq_u64
+ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+ uint32x4_t cmp =
+ vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b));
+ uint32x4_t swapped = vrev64q_u32(cmp);
+ return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped));
+#endif
+}
+
+// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi32
+FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
+{
+ return vreinterpretq_m128i_s32(
+ vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
+}
+
+// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi64
+FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a)
+{
+ int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */
+ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
+ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_s64(s64x2);
+}
+
+// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi64
+FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a)
+{
+ return vreinterpretq_m128i_s64(
+ vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))));
+}
+
+// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi16
+FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a)
+{
+ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
+ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+ return vreinterpretq_m128i_s16(s16x8);
+}
+
+// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store
+// the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi32
+FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a)
+{
+ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
+ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */
+ return vreinterpretq_m128i_s32(s32x4);
+}
+
+// Sign extend packed 8-bit integers in the low 8 bytes of a to packed 64-bit
+// integers, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi64
+FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a)
+{
+ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */
+ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */
+ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
+ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_s64(s64x2);
+}
+
+// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi32
+FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a)
+{
+ return vreinterpretq_m128i_u32(
+ vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a))));
+}
+
+// Zero extend packed unsigned 16-bit integers in a to packed 64-bit integers,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi64
+FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a)
+{
+ uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */
+ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
+ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_u64(u64x2);
+}
+
+// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu32_epi64
+FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a)
+{
+ return vreinterpretq_m128i_u64(
+ vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a))));
+}
+
+// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi16
+FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a)
+{
+ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx HGFE DCBA */
+ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0H0G 0F0E 0D0C 0B0A */
+ return vreinterpretq_m128i_u16(u16x8);
+}
+
+// Zero extend packed unsigned 8-bit integers in a to packed 32-bit integers,
+// and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi32
+FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
+{
+ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
+ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
+ return vreinterpretq_m128i_u32(u32x4);
+}
+
+// Zero extend packed unsigned 8-bit integers in the low 8 bytes of a to packed
+// 64-bit integers, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi64
+FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a)
+{
+ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */
+ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */
+ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
+ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
+ return vreinterpretq_m128i_u64(u64x2);
+}
+
+// Conditionally multiply the packed double-precision (64-bit) floating-point
+// elements in a and b using the high 4 bits in imm8, sum the four products, and
+// conditionally store the sum in dst using the low 4 bits of imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_pd
+FORCE_INLINE __m128d _mm_dp_pd(__m128d a, __m128d b, const int imm)
+{
+ // Generate mask value from constant immediate bit value
+ const int64_t bit0Mask = imm & 0x01 ? UINT64_MAX : 0;
+ const int64_t bit1Mask = imm & 0x02 ? UINT64_MAX : 0;
+#if !SSE2NEON_PRECISE_DP
+ const int64_t bit4Mask = imm & 0x10 ? UINT64_MAX : 0;
+ const int64_t bit5Mask = imm & 0x20 ? UINT64_MAX : 0;
+#endif
+ // Conditional multiplication
+#if !SSE2NEON_PRECISE_DP
+ __m128d mul = _mm_mul_pd(a, b);
+ const __m128d mulMask =
+ _mm_castsi128_pd(_mm_set_epi64x(bit5Mask, bit4Mask));
+ __m128d tmp = _mm_and_pd(mul, mulMask);
+#else
+#if defined(__aarch64__) || defined(_M_ARM64)
+ double d0 = (imm & 0x10) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0) *
+ vgetq_lane_f64(vreinterpretq_f64_m128d(b), 0)
+ : 0;
+ double d1 = (imm & 0x20) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1) *
+ vgetq_lane_f64(vreinterpretq_f64_m128d(b), 1)
+ : 0;
+#else
+ double d0 = (imm & 0x10) ? ((double *) &a)[0] * ((double *) &b)[0] : 0;
+ double d1 = (imm & 0x20) ? ((double *) &a)[1] * ((double *) &b)[1] : 0;
+#endif
+ __m128d tmp = _mm_set_pd(d1, d0);
+#endif
+ // Sum the products
+#if defined(__aarch64__) || defined(_M_ARM64)
+ double sum = vpaddd_f64(vreinterpretq_f64_m128d(tmp));
+#else
+ double sum = *((double *) &tmp) + *(((double *) &tmp) + 1);
+#endif
+ // Conditionally store the sum
+ const __m128d sumMask =
+ _mm_castsi128_pd(_mm_set_epi64x(bit1Mask, bit0Mask));
+ __m128d res = _mm_and_pd(_mm_set_pd1(sum), sumMask);
+ return res;
+}
+
+// Conditionally multiply the packed single-precision (32-bit) floating-point
+// elements in a and b using the high 4 bits in imm8, sum the four products,
+// and conditionally store the sum in dst using the low 4 bits of imm.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_ps
+FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm)
+{
+ float32x4_t elementwise_prod = _mm_mul_ps(a, b);
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ /* shortcuts */
+ if (imm == 0xFF) {
+ return _mm_set1_ps(vaddvq_f32(elementwise_prod));
+ }
+
+ if ((imm & 0x0F) == 0x0F) {
+ if (!(imm & (1 << 4)))
+ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 0);
+ if (!(imm & (1 << 5)))
+ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 1);
+ if (!(imm & (1 << 6)))
+ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 2);
+ if (!(imm & (1 << 7)))
+ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 3);
+
+ return _mm_set1_ps(vaddvq_f32(elementwise_prod));
+ }
+#endif
+
+ float s = 0.0f;
+
+ if (imm & (1 << 4))
+ s += vgetq_lane_f32(elementwise_prod, 0);
+ if (imm & (1 << 5))
+ s += vgetq_lane_f32(elementwise_prod, 1);
+ if (imm & (1 << 6))
+ s += vgetq_lane_f32(elementwise_prod, 2);
+ if (imm & (1 << 7))
+ s += vgetq_lane_f32(elementwise_prod, 3);
+
+ const float32_t res[4] = {
+ (imm & 0x1) ? s : 0.0f,
+ (imm & 0x2) ? s : 0.0f,
+ (imm & 0x4) ? s : 0.0f,
+ (imm & 0x8) ? s : 0.0f,
+ };
+ return vreinterpretq_m128_f32(vld1q_f32(res));
+}
+
+// Extract a 32-bit integer from a, selected with imm8, and store the result in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi32
+// FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm)
+#define _mm_extract_epi32(a, imm) \
+ vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))
+
+// Extract a 64-bit integer from a, selected with imm8, and store the result in
+// dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi64
+// FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm)
+#define _mm_extract_epi64(a, imm) \
+ vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm))
+
+// Extract an 8-bit integer from a, selected with imm8, and store the result in
+// the lower element of dst. FORCE_INLINE int _mm_extract_epi8(__m128i a,
+// __constrange(0,16) int imm)
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi8
+#define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm))
+
+// Extracts the selected single-precision (32-bit) floating-point from a.
+// FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm)
+#define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm))
+
+// Round the packed double-precision (64-bit) floating-point elements in a down
+// to an integer value, and store the results as packed double-precision
+// floating-point elements in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_pd
+FORCE_INLINE __m128d _mm_floor_pd(__m128d a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128d_f64(vrndmq_f64(vreinterpretq_f64_m128d(a)));
+#else
+ double *f = (double *) &a;
+ return _mm_set_pd(floor(f[1]), floor(f[0]));
+#endif
+}
+
+// Round the packed single-precision (32-bit) floating-point elements in a down
+// to an integer value, and store the results as packed single-precision
+// floating-point elements in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ps
+FORCE_INLINE __m128 _mm_floor_ps(__m128 a)
+{
+#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+ return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a)));
+#else
+ float *f = (float *) &a;
+ return _mm_set_ps(floorf(f[3]), floorf(f[2]), floorf(f[1]), floorf(f[0]));
+#endif
+}
+
+// Round the lower double-precision (64-bit) floating-point element in b down to
+// an integer value, store the result as a double-precision floating-point
+// element in the lower element of dst, and copy the upper element from a to the
+// upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_sd
+FORCE_INLINE __m128d _mm_floor_sd(__m128d a, __m128d b)
+{
+ return _mm_move_sd(a, _mm_floor_pd(b));
+}
+
+// Round the lower single-precision (32-bit) floating-point element in b down to
+// an integer value, store the result as a single-precision floating-point
+// element in the lower element of dst, and copy the upper 3 packed elements
+// from a to the upper elements of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ss
+FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b)
+{
+ return _mm_move_ss(a, _mm_floor_ps(b));
+}
+
+// Copy a to dst, and insert the 32-bit integer i into dst at the location
+// specified by imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi32
+// FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b,
+// __constrange(0,4) int imm)
+#define _mm_insert_epi32(a, b, imm) \
+ vreinterpretq_m128i_s32( \
+ vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm)))
+
+// Copy a to dst, and insert the 64-bit integer i into dst at the location
+// specified by imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi64
+// FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b,
+// __constrange(0,2) int imm)
+#define _mm_insert_epi64(a, b, imm) \
+ vreinterpretq_m128i_s64( \
+ vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm)))
+
+// Copy a to dst, and insert the lower 8-bit integer from i into dst at the
+// location specified by imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi8
+// FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b,
+// __constrange(0,16) int imm)
+#define _mm_insert_epi8(a, b, imm) \
+ vreinterpretq_m128i_s8(vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm)))
+
+// Copy a to tmp, then insert a single-precision (32-bit) floating-point
+// element from b into tmp using the control in imm8. Store tmp to dst using
+// the mask in imm8 (elements are zeroed out when the corresponding bit is set).
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=insert_ps
+#define _mm_insert_ps(a, b, imm8) \
+ _sse2neon_define2( \
+ __m128, a, b, \
+ float32x4_t tmp1 = \
+ vsetq_lane_f32(vgetq_lane_f32(_b, (imm8 >> 6) & 0x3), \
+ vreinterpretq_f32_m128(_a), 0); \
+ float32x4_t tmp2 = \
+ vsetq_lane_f32(vgetq_lane_f32(tmp1, 0), \
+ vreinterpretq_f32_m128(_a), ((imm8 >> 4) & 0x3)); \
+ const uint32_t data[4] = \
+ _sse2neon_init(((imm8) & (1 << 0)) ? UINT32_MAX : 0, \
+ ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \
+ ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \
+ ((imm8) & (1 << 3)) ? UINT32_MAX : 0); \
+ uint32x4_t mask = vld1q_u32(data); \
+ float32x4_t all_zeros = vdupq_n_f32(0); \
+ \
+ _sse2neon_return(vreinterpretq_m128_f32( \
+ vbslq_f32(mask, all_zeros, vreinterpretq_f32_m128(tmp2))));)
+
+// Compare packed signed 32-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi32
+FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compare packed signed 8-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi8
+FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed unsigned 16-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu16
+FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Compare packed unsigned 32-bit integers in a and b, and store packed maximum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
+FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
+}
+
+// Compare packed signed 32-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi32
+FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Compare packed signed 8-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi8
+FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s8(
+ vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+}
+
+// Compare packed unsigned 16-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu16
+FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+}
+
+// Compare packed unsigned 32-bit integers in a and b, and store packed minimum
+// values in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
+FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u32(
+ vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
+}
+
+// Horizontally compute the minimum amongst the packed unsigned 16-bit integers
+// in a, store the minimum and index in dst, and zero the remaining bits in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_minpos_epu16
+FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a)
+{
+ __m128i dst;
+ uint16_t min, idx = 0;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ // Find the minimum value
+ min = vminvq_u16(vreinterpretq_u16_m128i(a));
+
+ // Get the index of the minimum value
+ static const uint16_t idxv[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ uint16x8_t minv = vdupq_n_u16(min);
+ uint16x8_t cmeq = vceqq_u16(minv, vreinterpretq_u16_m128i(a));
+ idx = vminvq_u16(vornq_u16(vld1q_u16(idxv), cmeq));
+#else
+ // Find the minimum value
+ __m64 tmp;
+ tmp = vreinterpret_m64_u16(
+ vmin_u16(vget_low_u16(vreinterpretq_u16_m128i(a)),
+ vget_high_u16(vreinterpretq_u16_m128i(a))));
+ tmp = vreinterpret_m64_u16(
+ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
+ tmp = vreinterpret_m64_u16(
+ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
+ min = vget_lane_u16(vreinterpret_u16_m64(tmp), 0);
+ // Get the index of the minimum value
+ int i;
+ for (i = 0; i < 8; i++) {
+ if (min == vgetq_lane_u16(vreinterpretq_u16_m128i(a), 0)) {
+ idx = (uint16_t) i;
+ break;
+ }
+ a = _mm_srli_si128(a, 2);
+ }
+#endif
+ // Generate result
+ dst = _mm_setzero_si128();
+ dst = vreinterpretq_m128i_u16(
+ vsetq_lane_u16(min, vreinterpretq_u16_m128i(dst), 0));
+ dst = vreinterpretq_m128i_u16(
+ vsetq_lane_u16(idx, vreinterpretq_u16_m128i(dst), 1));
+ return dst;
+}
+
+// Compute the sum of absolute differences (SADs) of quadruplets of unsigned
+// 8-bit integers in a compared to those in b, and store the 16-bit results in
+// dst. Eight SADs are performed using one quadruplet from b and eight
+// quadruplets from a. One quadruplet is selected from b starting at on the
+// offset specified in imm8. Eight quadruplets are formed from sequential 8-bit
+// integers selected from a starting at the offset specified in imm8.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mpsadbw_epu8
+FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm)
+{
+ uint8x16_t _a, _b;
+
+ switch (imm & 0x4) {
+ case 0:
+ // do nothing
+ _a = vreinterpretq_u8_m128i(a);
+ break;
+ case 4:
+ _a = vreinterpretq_u8_u32(vextq_u32(vreinterpretq_u32_m128i(a),
+ vreinterpretq_u32_m128i(a), 1));
+ break;
+ default:
+#if defined(__GNUC__) || defined(__clang__)
+ __builtin_unreachable();
+#elif defined(_MSC_VER)
+ __assume(0);
+#endif
+ break;
+ }
+
+ switch (imm & 0x3) {
+ case 0:
+ _b = vreinterpretq_u8_u32(
+ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 0)));
+ break;
+ case 1:
+ _b = vreinterpretq_u8_u32(
+ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 1)));
+ break;
+ case 2:
+ _b = vreinterpretq_u8_u32(
+ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 2)));
+ break;
+ case 3:
+ _b = vreinterpretq_u8_u32(
+ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 3)));
+ break;
+ default:
+#if defined(__GNUC__) || defined(__clang__)
+ __builtin_unreachable();
+#elif defined(_MSC_VER)
+ __assume(0);
+#endif
+ break;
+ }
+
+ int16x8_t c04, c15, c26, c37;
+ uint8x8_t low_b = vget_low_u8(_b);
+ c04 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a), low_b));
+ uint8x16_t _a_1 = vextq_u8(_a, _a, 1);
+ c15 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_1), low_b));
+ uint8x16_t _a_2 = vextq_u8(_a, _a, 2);
+ c26 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_2), low_b));
+ uint8x16_t _a_3 = vextq_u8(_a, _a, 3);
+ c37 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_3), low_b));
+#if defined(__aarch64__) || defined(_M_ARM64)
+ // |0|4|2|6|
+ c04 = vpaddq_s16(c04, c26);
+ // |1|5|3|7|
+ c15 = vpaddq_s16(c15, c37);
+
+ int32x4_t trn1_c =
+ vtrn1q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
+ int32x4_t trn2_c =
+ vtrn2q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
+ return vreinterpretq_m128i_s16(vpaddq_s16(vreinterpretq_s16_s32(trn1_c),
+ vreinterpretq_s16_s32(trn2_c)));
+#else
+ int16x4_t c01, c23, c45, c67;
+ c01 = vpadd_s16(vget_low_s16(c04), vget_low_s16(c15));
+ c23 = vpadd_s16(vget_low_s16(c26), vget_low_s16(c37));
+ c45 = vpadd_s16(vget_high_s16(c04), vget_high_s16(c15));
+ c67 = vpadd_s16(vget_high_s16(c26), vget_high_s16(c37));
+
+ return vreinterpretq_m128i_s16(
+ vcombine_s16(vpadd_s16(c01, c23), vpadd_s16(c45, c67)));
+#endif
+}
+
+// Multiply the low signed 32-bit integers from each packed 64-bit element in
+// a and b, and store the signed 64-bit results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epi32
+FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b)
+{
+ // vmull_s32 upcasts instead of masking, so we downcast.
+ int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a));
+ int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b));
+ return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo));
+}
+
+// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit
+// integers, and store the low 32 bits of the intermediate integers in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi32
+FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_s32(
+ vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+}
+
+// Convert packed signed 32-bit integers from a and b to packed 16-bit integers
+// using unsigned saturation, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi32
+FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u16(
+ vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)),
+ vqmovun_s32(vreinterpretq_s32_m128i(b))));
+}
+
+// Round the packed double-precision (64-bit) floating-point elements in a using
+// the rounding parameter, and store the results as packed double-precision
+// floating-point elements in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_pd
+FORCE_INLINE __m128d _mm_round_pd(__m128d a, int rounding)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ switch (rounding) {
+ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128d_f64(vrndnq_f64(vreinterpretq_f64_m128d(a)));
+ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
+ return _mm_floor_pd(a);
+ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
+ return _mm_ceil_pd(a);
+ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128d_f64(vrndq_f64(vreinterpretq_f64_m128d(a)));
+ default: //_MM_FROUND_CUR_DIRECTION
+ return vreinterpretq_m128d_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)));
+ }
+#else
+ double *v_double = (double *) &a;
+
+ if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
+ (rounding == _MM_FROUND_CUR_DIRECTION &&
+ _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
+ double res[2], tmp;
+ for (int i = 0; i < 2; i++) {
+ tmp = (v_double[i] < 0) ? -v_double[i] : v_double[i];
+ double roundDown = floor(tmp); // Round down value
+ double roundUp = ceil(tmp); // Round up value
+ double diffDown = tmp - roundDown;
+ double diffUp = roundUp - tmp;
+ if (diffDown < diffUp) {
+ /* If it's closer to the round down value, then use it */
+ res[i] = roundDown;
+ } else if (diffDown > diffUp) {
+ /* If it's closer to the round up value, then use it */
+ res[i] = roundUp;
+ } else {
+ /* If it's equidistant between round up and round down value,
+ * pick the one which is an even number */
+ double half = roundDown / 2;
+ if (half != floor(half)) {
+ /* If the round down value is odd, return the round up value
+ */
+ res[i] = roundUp;
+ } else {
+ /* If the round up value is odd, return the round down value
+ */
+ res[i] = roundDown;
+ }
+ }
+ res[i] = (v_double[i] < 0) ? -res[i] : res[i];
+ }
+ return _mm_set_pd(res[1], res[0]);
+ } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
+ (rounding == _MM_FROUND_CUR_DIRECTION &&
+ _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
+ return _mm_floor_pd(a);
+ } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
+ (rounding == _MM_FROUND_CUR_DIRECTION &&
+ _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
+ return _mm_ceil_pd(a);
+ }
+ return _mm_set_pd(v_double[1] > 0 ? floor(v_double[1]) : ceil(v_double[1]),
+ v_double[0] > 0 ? floor(v_double[0]) : ceil(v_double[0]));
+#endif
+}
+
+// Round the packed single-precision (32-bit) floating-point elements in a using
+// the rounding parameter, and store the results as packed single-precision
+// floating-point elements in dst.
+// software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps
+FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding)
+{
+#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+ switch (rounding) {
+ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a)));
+ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
+ return _mm_floor_ps(a);
+ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
+ return _mm_ceil_ps(a);
+ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
+ return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a)));
+ default: //_MM_FROUND_CUR_DIRECTION
+ return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a)));
+ }
+#else
+ float *v_float = (float *) &a;
+
+ if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
+ (rounding == _MM_FROUND_CUR_DIRECTION &&
+ _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
+ uint32x4_t signmask = vdupq_n_u32(0x80000000);
+ float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
+ vdupq_n_f32(0.5f)); /* +/- 0.5 */
+ int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
+ vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
+ int32x4_t r_trunc = vcvtq_s32_f32(
+ vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
+ int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
+ vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
+ int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
+ vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
+ float32x4_t delta = vsubq_f32(
+ vreinterpretq_f32_m128(a),
+ vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
+ uint32x4_t is_delta_half =
+ vceqq_f32(delta, half); /* delta == +/- 0.5 */
+ return vreinterpretq_m128_f32(
+ vcvtq_f32_s32(vbslq_s32(is_delta_half, r_even, r_normal)));
+ } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
+ (rounding == _MM_FROUND_CUR_DIRECTION &&
+ _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
+ return _mm_floor_ps(a);
+ } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
+ (rounding == _MM_FROUND_CUR_DIRECTION &&
+ _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
+ return _mm_ceil_ps(a);
+ }
+ return _mm_set_ps(v_float[3] > 0 ? floorf(v_float[3]) : ceilf(v_float[3]),
+ v_float[2] > 0 ? floorf(v_float[2]) : ceilf(v_float[2]),
+ v_float[1] > 0 ? floorf(v_float[1]) : ceilf(v_float[1]),
+ v_float[0] > 0 ? floorf(v_float[0]) : ceilf(v_float[0]));
+#endif
+}
+
+// Round the lower double-precision (64-bit) floating-point element in b using
+// the rounding parameter, store the result as a double-precision floating-point
+// element in the lower element of dst, and copy the upper element from a to the
+// upper element of dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_sd
+FORCE_INLINE __m128d _mm_round_sd(__m128d a, __m128d b, int rounding)
+{
+ return _mm_move_sd(a, _mm_round_pd(b, rounding));
+}
+
+// Round the lower single-precision (32-bit) floating-point element in b using
+// the rounding parameter, store the result as a single-precision floating-point
+// element in the lower element of dst, and copy the upper 3 packed elements
+// from a to the upper elements of dst. Rounding is done according to the
+// rounding[3:0] parameter, which can be one of:
+// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and
+// suppress exceptions
+// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and
+// suppress exceptions
+// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress
+// exceptions
+// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress
+// exceptions _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see
+// _MM_SET_ROUNDING_MODE
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_ss
+FORCE_INLINE __m128 _mm_round_ss(__m128 a, __m128 b, int rounding)
+{
+ return _mm_move_ss(a, _mm_round_ps(b, rounding));
+}
+
+// Load 128-bits of integer data from memory into dst using a non-temporal
+// memory hint. mem_addr must be aligned on a 16-byte boundary or a
+// general-protection exception may be generated.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_load_si128
+FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p)
+{
+#if __has_builtin(__builtin_nontemporal_store)
+ return __builtin_nontemporal_load(p);
+#else
+ return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p));
+#endif
+}
+
+// Compute the bitwise NOT of a and then AND with a 128-bit vector containing
+// all 1's, and return 1 if the result is zero, otherwise return 0.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_ones
+FORCE_INLINE int _mm_test_all_ones(__m128i a)
+{
+ return (uint64_t) (vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) ==
+ ~(uint64_t) 0;
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and
+// mask, and return 1 if the result is zero, otherwise return 0.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_zeros
+FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask)
+{
+ int64x2_t a_and_mask =
+ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask));
+ return !(vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1));
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and
+// mask, and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute
+// the bitwise NOT of a and then AND with mask, and set CF to 1 if the result is
+// zero, otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
+// otherwise return 0.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_test_mix_ones_zero
+// Note: Argument names may be wrong in the Intel intrinsics guide.
+FORCE_INLINE int _mm_test_mix_ones_zeros(__m128i a, __m128i mask)
+{
+ uint64x2_t v = vreinterpretq_u64_m128i(a);
+ uint64x2_t m = vreinterpretq_u64_m128i(mask);
+
+ // find ones (set-bits) and zeros (clear-bits) under clip mask
+ uint64x2_t ones = vandq_u64(m, v);
+ uint64x2_t zeros = vbicq_u64(m, v);
+
+ // If both 128-bit variables are populated (non-zero) then return 1.
+ // For comparision purposes, first compact each var down to 32-bits.
+ uint32x2_t reduced = vpmax_u32(vqmovn_u64(ones), vqmovn_u64(zeros));
+
+ // if folding minimum is non-zero then both vars must be non-zero
+ return (vget_lane_u32(vpmin_u32(reduced, reduced), 0) != 0);
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+// otherwise set CF to 0. Return the CF value.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testc_si128
+FORCE_INLINE int _mm_testc_si128(__m128i a, __m128i b)
+{
+ int64x2_t s64 =
+ vbicq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a));
+ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
+}
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+// otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
+// otherwise return 0.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testnzc_si128
+#define _mm_testnzc_si128(a, b) _mm_test_mix_ones_zeros(a, b)
+
+// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+// otherwise set CF to 0. Return the ZF value.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testz_si128
+FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b)
+{
+ int64x2_t s64 =
+ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b));
+ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
+}
+
+/* SSE4.2 */
+
+static const uint16_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask16b[8] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+};
+static const uint8_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask8b[16] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+};
+
+/* specify the source data format */
+#define _SIDD_UBYTE_OPS 0x00 /* unsigned 8-bit characters */
+#define _SIDD_UWORD_OPS 0x01 /* unsigned 16-bit characters */
+#define _SIDD_SBYTE_OPS 0x02 /* signed 8-bit characters */
+#define _SIDD_SWORD_OPS 0x03 /* signed 16-bit characters */
+
+/* specify the comparison operation */
+#define _SIDD_CMP_EQUAL_ANY 0x00 /* compare equal any: strchr */
+#define _SIDD_CMP_RANGES 0x04 /* compare ranges */
+#define _SIDD_CMP_EQUAL_EACH 0x08 /* compare equal each: strcmp */
+#define _SIDD_CMP_EQUAL_ORDERED 0x0C /* compare equal ordered */
+
+/* specify the polarity */
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_NEGATIVE_POLARITY 0x10 /* negate results */
+#define _SIDD_MASKED_NEGATIVE_POLARITY \
+ 0x30 /* negate results only before end of string */
+
+/* specify the output selection in _mm_cmpXstri */
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
+
+/* specify the output selection in _mm_cmpXstrm */
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
+
+/* Pattern Matching for C macros.
+ * https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
+ */
+
+/* catenate */
+#define SSE2NEON_PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
+#define SSE2NEON_CAT(a, b) SSE2NEON_PRIMITIVE_CAT(a, b)
+
+#define SSE2NEON_IIF(c) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_IIF_, c)
+/* run the 2nd parameter */
+#define SSE2NEON_IIF_0(t, ...) __VA_ARGS__
+/* run the 1st parameter */
+#define SSE2NEON_IIF_1(t, ...) t
+
+#define SSE2NEON_COMPL(b) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_COMPL_, b)
+#define SSE2NEON_COMPL_0 1
+#define SSE2NEON_COMPL_1 0
+
+#define SSE2NEON_DEC(x) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_DEC_, x)
+#define SSE2NEON_DEC_1 0
+#define SSE2NEON_DEC_2 1
+#define SSE2NEON_DEC_3 2
+#define SSE2NEON_DEC_4 3
+#define SSE2NEON_DEC_5 4
+#define SSE2NEON_DEC_6 5
+#define SSE2NEON_DEC_7 6
+#define SSE2NEON_DEC_8 7
+#define SSE2NEON_DEC_9 8
+#define SSE2NEON_DEC_10 9
+#define SSE2NEON_DEC_11 10
+#define SSE2NEON_DEC_12 11
+#define SSE2NEON_DEC_13 12
+#define SSE2NEON_DEC_14 13
+#define SSE2NEON_DEC_15 14
+#define SSE2NEON_DEC_16 15
+
+/* detection */
+#define SSE2NEON_CHECK_N(x, n, ...) n
+#define SSE2NEON_CHECK(...) SSE2NEON_CHECK_N(__VA_ARGS__, 0, )
+#define SSE2NEON_PROBE(x) x, 1,
+
+#define SSE2NEON_NOT(x) SSE2NEON_CHECK(SSE2NEON_PRIMITIVE_CAT(SSE2NEON_NOT_, x))
+#define SSE2NEON_NOT_0 SSE2NEON_PROBE(~)
+
+#define SSE2NEON_BOOL(x) SSE2NEON_COMPL(SSE2NEON_NOT(x))
+#define SSE2NEON_IF(c) SSE2NEON_IIF(SSE2NEON_BOOL(c))
+
+#define SSE2NEON_EAT(...)
+#define SSE2NEON_EXPAND(...) __VA_ARGS__
+#define SSE2NEON_WHEN(c) SSE2NEON_IF(c)(SSE2NEON_EXPAND, SSE2NEON_EAT)
+
+/* recursion */
+/* deferred expression */
+#define SSE2NEON_EMPTY()
+#define SSE2NEON_DEFER(id) id SSE2NEON_EMPTY()
+#define SSE2NEON_OBSTRUCT(...) __VA_ARGS__ SSE2NEON_DEFER(SSE2NEON_EMPTY)()
+#define SSE2NEON_EXPAND(...) __VA_ARGS__
+
+#define SSE2NEON_EVAL(...) \
+ SSE2NEON_EVAL1(SSE2NEON_EVAL1(SSE2NEON_EVAL1(__VA_ARGS__)))
+#define SSE2NEON_EVAL1(...) \
+ SSE2NEON_EVAL2(SSE2NEON_EVAL2(SSE2NEON_EVAL2(__VA_ARGS__)))
+#define SSE2NEON_EVAL2(...) \
+ SSE2NEON_EVAL3(SSE2NEON_EVAL3(SSE2NEON_EVAL3(__VA_ARGS__)))
+#define SSE2NEON_EVAL3(...) __VA_ARGS__
+
+#define SSE2NEON_REPEAT(count, macro, ...) \
+ SSE2NEON_WHEN(count) \
+ (SSE2NEON_OBSTRUCT(SSE2NEON_REPEAT_INDIRECT)()( \
+ SSE2NEON_DEC(count), macro, \
+ __VA_ARGS__) SSE2NEON_OBSTRUCT(macro)(SSE2NEON_DEC(count), \
+ __VA_ARGS__))
+#define SSE2NEON_REPEAT_INDIRECT() SSE2NEON_REPEAT
+
+#define SSE2NEON_SIZE_OF_byte 8
+#define SSE2NEON_NUMBER_OF_LANES_byte 16
+#define SSE2NEON_SIZE_OF_word 16
+#define SSE2NEON_NUMBER_OF_LANES_word 8
+
+#define SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE(i, type) \
+ mtx[i] = vreinterpretq_m128i_##type(vceqq_##type( \
+ vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i)), \
+ vreinterpretq_##type##_m128i(a)));
+
+#define SSE2NEON_FILL_LANE(i, type) \
+ vec_b[i] = \
+ vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i));
+
+#define PCMPSTR_RANGES(a, b, mtx, data_type_prefix, type_prefix, size, \
+ number_of_lanes, byte_or_word) \
+ do { \
+ SSE2NEON_CAT( \
+ data_type_prefix, \
+ SSE2NEON_CAT(size, \
+ SSE2NEON_CAT(x, SSE2NEON_CAT(number_of_lanes, _t)))) \
+ vec_b[number_of_lanes]; \
+ __m128i mask = SSE2NEON_IIF(byte_or_word)( \
+ vreinterpretq_m128i_u16(vdupq_n_u16(0xff)), \
+ vreinterpretq_m128i_u32(vdupq_n_u32(0xffff))); \
+ SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, SSE2NEON_FILL_LANE, \
+ SSE2NEON_CAT(type_prefix, size))) \
+ for (int i = 0; i < number_of_lanes; i++) { \
+ mtx[i] = SSE2NEON_CAT(vreinterpretq_m128i_u, \
+ size)(SSE2NEON_CAT(vbslq_u, size)( \
+ SSE2NEON_CAT(vreinterpretq_u, \
+ SSE2NEON_CAT(size, _m128i))(mask), \
+ SSE2NEON_CAT(vcgeq_, SSE2NEON_CAT(type_prefix, size))( \
+ vec_b[i], \
+ SSE2NEON_CAT( \
+ vreinterpretq_, \
+ SSE2NEON_CAT(type_prefix, \
+ SSE2NEON_CAT(size, _m128i(a))))), \
+ SSE2NEON_CAT(vcleq_, SSE2NEON_CAT(type_prefix, size))( \
+ vec_b[i], \
+ SSE2NEON_CAT( \
+ vreinterpretq_, \
+ SSE2NEON_CAT(type_prefix, \
+ SSE2NEON_CAT(size, _m128i(a))))))); \
+ } \
+ } while (0)
+
+#define PCMPSTR_EQ(a, b, mtx, size, number_of_lanes) \
+ do { \
+ SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, \
+ SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE, \
+ SSE2NEON_CAT(u, size))) \
+ } while (0)
+
+#define SSE2NEON_CMP_EQUAL_ANY_IMPL(type) \
+ static int _sse2neon_cmp_##type##_equal_any(__m128i a, int la, __m128i b, \
+ int lb) \
+ { \
+ __m128i mtx[16]; \
+ PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
+ return SSE2NEON_CAT( \
+ _sse2neon_aggregate_equal_any_, \
+ SSE2NEON_CAT( \
+ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+ SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
+ type))))(la, lb, mtx); \
+ }
+
+#define SSE2NEON_CMP_RANGES_IMPL(type, data_type, us, byte_or_word) \
+ static int _sse2neon_cmp_##us##type##_ranges(__m128i a, int la, __m128i b, \
+ int lb) \
+ { \
+ __m128i mtx[16]; \
+ PCMPSTR_RANGES( \
+ a, b, mtx, data_type, us, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), byte_or_word); \
+ return SSE2NEON_CAT( \
+ _sse2neon_aggregate_ranges_, \
+ SSE2NEON_CAT( \
+ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+ SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
+ type))))(la, lb, mtx); \
+ }
+
+#define SSE2NEON_CMP_EQUAL_ORDERED_IMPL(type) \
+ static int _sse2neon_cmp_##type##_equal_ordered(__m128i a, int la, \
+ __m128i b, int lb) \
+ { \
+ __m128i mtx[16]; \
+ PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
+ return SSE2NEON_CAT( \
+ _sse2neon_aggregate_equal_ordered_, \
+ SSE2NEON_CAT( \
+ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+ SSE2NEON_CAT(x, \
+ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type))))( \
+ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), la, lb, mtx); \
+ }
+
+static int _sse2neon_aggregate_equal_any_8x16(int la, int lb, __m128i mtx[16])
+{
+ int res = 0;
+ int m = (1 << la) - 1;
+ uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
+ uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
+ uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
+ uint8x16_t vec = vcombine_u8(t_lo, t_hi);
+ for (int j = 0; j < lb; j++) {
+ mtx[j] = vreinterpretq_m128i_u8(
+ vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
+ mtx[j] = vreinterpretq_m128i_u8(
+ vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
+ int tmp = _sse2neon_vaddvq_u8(vreinterpretq_u8_m128i(mtx[j])) ? 1 : 0;
+ res |= (tmp << j);
+ }
+ return res;
+}
+
+static int _sse2neon_aggregate_equal_any_16x8(int la, int lb, __m128i mtx[16])
+{
+ int res = 0;
+ int m = (1 << la) - 1;
+ uint16x8_t vec =
+ vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
+ for (int j = 0; j < lb; j++) {
+ mtx[j] = vreinterpretq_m128i_u16(
+ vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
+ mtx[j] = vreinterpretq_m128i_u16(
+ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
+ int tmp = _sse2neon_vaddvq_u16(vreinterpretq_u16_m128i(mtx[j])) ? 1 : 0;
+ res |= (tmp << j);
+ }
+ return res;
+}
+
+/* clang-format off */
+#define SSE2NEON_GENERATE_CMP_EQUAL_ANY(prefix) \
+ prefix##IMPL(byte) \
+ prefix##IMPL(word)
+/* clang-format on */
+
+SSE2NEON_GENERATE_CMP_EQUAL_ANY(SSE2NEON_CMP_EQUAL_ANY_)
+
+static int _sse2neon_aggregate_ranges_16x8(int la, int lb, __m128i mtx[16])
+{
+ int res = 0;
+ int m = (1 << la) - 1;
+ uint16x8_t vec =
+ vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
+ for (int j = 0; j < lb; j++) {
+ mtx[j] = vreinterpretq_m128i_u16(
+ vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
+ mtx[j] = vreinterpretq_m128i_u16(
+ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
+ __m128i tmp = vreinterpretq_m128i_u32(
+ vshrq_n_u32(vreinterpretq_u32_m128i(mtx[j]), 16));
+ uint32x4_t vec_res = vandq_u32(vreinterpretq_u32_m128i(mtx[j]),
+ vreinterpretq_u32_m128i(tmp));
+#if defined(__aarch64__) || defined(_M_ARM64)
+ int t = vaddvq_u32(vec_res) ? 1 : 0;
+#else
+ uint64x2_t sumh = vpaddlq_u32(vec_res);
+ int t = vgetq_lane_u64(sumh, 0) + vgetq_lane_u64(sumh, 1);
+#endif
+ res |= (t << j);
+ }
+ return res;
+}
+
+static int _sse2neon_aggregate_ranges_8x16(int la, int lb, __m128i mtx[16])
+{
+ int res = 0;
+ int m = (1 << la) - 1;
+ uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
+ uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
+ uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
+ uint8x16_t vec = vcombine_u8(t_lo, t_hi);
+ for (int j = 0; j < lb; j++) {
+ mtx[j] = vreinterpretq_m128i_u8(
+ vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
+ mtx[j] = vreinterpretq_m128i_u8(
+ vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
+ __m128i tmp = vreinterpretq_m128i_u16(
+ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 8));
+ uint16x8_t vec_res = vandq_u16(vreinterpretq_u16_m128i(mtx[j]),
+ vreinterpretq_u16_m128i(tmp));
+ int t = _sse2neon_vaddvq_u16(vec_res) ? 1 : 0;
+ res |= (t << j);
+ }
+ return res;
+}
+
+#define SSE2NEON_CMP_RANGES_IS_BYTE 1
+#define SSE2NEON_CMP_RANGES_IS_WORD 0
+
+/* clang-format off */
+#define SSE2NEON_GENERATE_CMP_RANGES(prefix) \
+ prefix##IMPL(byte, uint, u, prefix##IS_BYTE) \
+ prefix##IMPL(byte, int, s, prefix##IS_BYTE) \
+ prefix##IMPL(word, uint, u, prefix##IS_WORD) \
+ prefix##IMPL(word, int, s, prefix##IS_WORD)
+/* clang-format on */
+
+SSE2NEON_GENERATE_CMP_RANGES(SSE2NEON_CMP_RANGES_)
+
+#undef SSE2NEON_CMP_RANGES_IS_BYTE
+#undef SSE2NEON_CMP_RANGES_IS_WORD
+
+static int _sse2neon_cmp_byte_equal_each(__m128i a, int la, __m128i b, int lb)
+{
+ uint8x16_t mtx =
+ vceqq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b));
+ int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
+ int m1 = 0x10000 - (1 << la);
+ int tb = 0x10000 - (1 << lb);
+ uint8x8_t vec_mask, vec0_lo, vec0_hi, vec1_lo, vec1_hi;
+ uint8x8_t tmp_lo, tmp_hi, res_lo, res_hi;
+ vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
+ vec0_lo = vtst_u8(vdup_n_u8(m0), vec_mask);
+ vec0_hi = vtst_u8(vdup_n_u8(m0 >> 8), vec_mask);
+ vec1_lo = vtst_u8(vdup_n_u8(m1), vec_mask);
+ vec1_hi = vtst_u8(vdup_n_u8(m1 >> 8), vec_mask);
+ tmp_lo = vtst_u8(vdup_n_u8(tb), vec_mask);
+ tmp_hi = vtst_u8(vdup_n_u8(tb >> 8), vec_mask);
+
+ res_lo = vbsl_u8(vec0_lo, vdup_n_u8(0), vget_low_u8(mtx));
+ res_hi = vbsl_u8(vec0_hi, vdup_n_u8(0), vget_high_u8(mtx));
+ res_lo = vbsl_u8(vec1_lo, tmp_lo, res_lo);
+ res_hi = vbsl_u8(vec1_hi, tmp_hi, res_hi);
+ res_lo = vand_u8(res_lo, vec_mask);
+ res_hi = vand_u8(res_hi, vec_mask);
+
+ int res = _sse2neon_vaddv_u8(res_lo) + (_sse2neon_vaddv_u8(res_hi) << 8);
+ return res;
+}
+
+static int _sse2neon_cmp_word_equal_each(__m128i a, int la, __m128i b, int lb)
+{
+ uint16x8_t mtx =
+ vceqq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
+ int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
+ int m1 = 0x100 - (1 << la);
+ int tb = 0x100 - (1 << lb);
+ uint16x8_t vec_mask = vld1q_u16(_sse2neon_cmpestr_mask16b);
+ uint16x8_t vec0 = vtstq_u16(vdupq_n_u16(m0), vec_mask);
+ uint16x8_t vec1 = vtstq_u16(vdupq_n_u16(m1), vec_mask);
+ uint16x8_t tmp = vtstq_u16(vdupq_n_u16(tb), vec_mask);
+ mtx = vbslq_u16(vec0, vdupq_n_u16(0), mtx);
+ mtx = vbslq_u16(vec1, tmp, mtx);
+ mtx = vandq_u16(mtx, vec_mask);
+ return _sse2neon_vaddvq_u16(mtx);
+}
+
+#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE 1
+#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD 0
+
+#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IMPL(size, number_of_lanes, data_type) \
+ static int _sse2neon_aggregate_equal_ordered_##size##x##number_of_lanes( \
+ int bound, int la, int lb, __m128i mtx[16]) \
+ { \
+ int res = 0; \
+ int m1 = SSE2NEON_IIF(data_type)(0x10000, 0x100) - (1 << la); \
+ uint##size##x8_t vec_mask = SSE2NEON_IIF(data_type)( \
+ vld1_u##size(_sse2neon_cmpestr_mask##size##b), \
+ vld1q_u##size(_sse2neon_cmpestr_mask##size##b)); \
+ uint##size##x##number_of_lanes##_t vec1 = SSE2NEON_IIF(data_type)( \
+ vcombine_u##size(vtst_u##size(vdup_n_u##size(m1), vec_mask), \
+ vtst_u##size(vdup_n_u##size(m1 >> 8), vec_mask)), \
+ vtstq_u##size(vdupq_n_u##size(m1), vec_mask)); \
+ uint##size##x##number_of_lanes##_t vec_minusone = vdupq_n_u##size(-1); \
+ uint##size##x##number_of_lanes##_t vec_zero = vdupq_n_u##size(0); \
+ for (int j = 0; j < lb; j++) { \
+ mtx[j] = vreinterpretq_m128i_u##size(vbslq_u##size( \
+ vec1, vec_minusone, vreinterpretq_u##size##_m128i(mtx[j]))); \
+ } \
+ for (int j = lb; j < bound; j++) { \
+ mtx[j] = vreinterpretq_m128i_u##size( \
+ vbslq_u##size(vec1, vec_minusone, vec_zero)); \
+ } \
+ unsigned SSE2NEON_IIF(data_type)(char, short) *ptr = \
+ (unsigned SSE2NEON_IIF(data_type)(char, short) *) mtx; \
+ for (int i = 0; i < bound; i++) { \
+ int val = 1; \
+ for (int j = 0, k = i; j < bound - i && k < bound; j++, k++) \
+ val &= ptr[k * bound + j]; \
+ res += val << i; \
+ } \
+ return res; \
+ }
+
+/* clang-format off */
+#define SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(prefix) \
+ prefix##IMPL(8, 16, prefix##IS_UBYTE) \
+ prefix##IMPL(16, 8, prefix##IS_UWORD)
+/* clang-format on */
+
+SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(SSE2NEON_AGGREGATE_EQUAL_ORDER_)
+
+#undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE
+#undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD
+
+/* clang-format off */
+#define SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(prefix) \
+ prefix##IMPL(byte) \
+ prefix##IMPL(word)
+/* clang-format on */
+
+SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(SSE2NEON_CMP_EQUAL_ORDERED_)
+
+#define SSE2NEON_CMPESTR_LIST \
+ _(CMP_UBYTE_EQUAL_ANY, cmp_byte_equal_any) \
+ _(CMP_UWORD_EQUAL_ANY, cmp_word_equal_any) \
+ _(CMP_SBYTE_EQUAL_ANY, cmp_byte_equal_any) \
+ _(CMP_SWORD_EQUAL_ANY, cmp_word_equal_any) \
+ _(CMP_UBYTE_RANGES, cmp_ubyte_ranges) \
+ _(CMP_UWORD_RANGES, cmp_uword_ranges) \
+ _(CMP_SBYTE_RANGES, cmp_sbyte_ranges) \
+ _(CMP_SWORD_RANGES, cmp_sword_ranges) \
+ _(CMP_UBYTE_EQUAL_EACH, cmp_byte_equal_each) \
+ _(CMP_UWORD_EQUAL_EACH, cmp_word_equal_each) \
+ _(CMP_SBYTE_EQUAL_EACH, cmp_byte_equal_each) \
+ _(CMP_SWORD_EQUAL_EACH, cmp_word_equal_each) \
+ _(CMP_UBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
+ _(CMP_UWORD_EQUAL_ORDERED, cmp_word_equal_ordered) \
+ _(CMP_SBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
+ _(CMP_SWORD_EQUAL_ORDERED, cmp_word_equal_ordered)
+
+enum {
+#define _(name, func_suffix) name,
+ SSE2NEON_CMPESTR_LIST
+#undef _
+};
+typedef int (*cmpestr_func_t)(__m128i a, int la, __m128i b, int lb);
+static cmpestr_func_t _sse2neon_cmpfunc_table[] = {
+#define _(name, func_suffix) _sse2neon_##func_suffix,
+ SSE2NEON_CMPESTR_LIST
+#undef _
+};
+
+FORCE_INLINE int _sse2neon_sido_negative(int res, int lb, int imm8, int bound)
+{
+ switch (imm8 & 0x30) {
+ case _SIDD_NEGATIVE_POLARITY:
+ res ^= 0xffffffff;
+ break;
+ case _SIDD_MASKED_NEGATIVE_POLARITY:
+ res ^= (1 << lb) - 1;
+ break;
+ default:
+ break;
+ }
+
+ return res & ((bound == 8) ? 0xFF : 0xFFFF);
+}
+
+FORCE_INLINE int _sse2neon_clz(unsigned int x)
+{
+#ifdef _MSC_VER
+ unsigned long cnt = 0;
+ if (_BitScanReverse(&cnt, x))
+ return 31 - cnt;
+ return 32;
+#else
+ return x != 0 ? __builtin_clz(x) : 32;
+#endif
+}
+
+FORCE_INLINE int _sse2neon_ctz(unsigned int x)
+{
+#ifdef _MSC_VER
+ unsigned long cnt = 0;
+ if (_BitScanForward(&cnt, x))
+ return cnt;
+ return 32;
+#else
+ return x != 0 ? __builtin_ctz(x) : 32;
+#endif
+}
+
+FORCE_INLINE int _sse2neon_ctzll(unsigned long long x)
+{
+#ifdef _MSC_VER
+ unsigned long cnt;
+#if defined(SSE2NEON_HAS_BITSCAN64)
+ if (_BitScanForward64(&cnt, x))
+ return (int) (cnt);
+#else
+ if (_BitScanForward(&cnt, (unsigned long) (x)))
+ return (int) cnt;
+ if (_BitScanForward(&cnt, (unsigned long) (x >> 32)))
+ return (int) (cnt + 32);
+#endif /* SSE2NEON_HAS_BITSCAN64 */
+ return 64;
+#else /* assume GNU compatible compilers */
+ return x != 0 ? __builtin_ctzll(x) : 64;
+#endif
+}
+
+#define SSE2NEON_MIN(x, y) (x) < (y) ? (x) : (y)
+
+#define SSE2NEON_CMPSTR_SET_UPPER(var, imm) \
+ const int var = (imm & 0x01) ? 8 : 16
+
+#define SSE2NEON_CMPESTRX_LEN_PAIR(a, b, la, lb) \
+ int tmp1 = la ^ (la >> 31); \
+ la = tmp1 - (la >> 31); \
+ int tmp2 = lb ^ (lb >> 31); \
+ lb = tmp2 - (lb >> 31); \
+ la = SSE2NEON_MIN(la, bound); \
+ lb = SSE2NEON_MIN(lb, bound)
+
+// Compare all pairs of character in string a and b,
+// then aggregate the result.
+// As the only difference of PCMPESTR* and PCMPISTR* is the way to calculate the
+// length of string, we use SSE2NEON_CMP{I,E}STRX_GET_LEN to get the length of
+// string a and b.
+#define SSE2NEON_COMP_AGG(a, b, la, lb, imm8, IE) \
+ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8); \
+ SSE2NEON_##IE##_LEN_PAIR(a, b, la, lb); \
+ int r2 = (_sse2neon_cmpfunc_table[imm8 & 0x0f])(a, la, b, lb); \
+ r2 = _sse2neon_sido_negative(r2, lb, imm8, bound)
+
+#define SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8) \
+ return (r2 == 0) ? bound \
+ : ((imm8 & 0x40) ? (31 - _sse2neon_clz(r2)) \
+ : _sse2neon_ctz(r2))
+
+#define SSE2NEON_CMPSTR_GENERATE_MASK(dst) \
+ __m128i dst = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
+ if (imm8 & 0x40) { \
+ if (bound == 8) { \
+ uint16x8_t tmp = vtstq_u16(vdupq_n_u16(r2), \
+ vld1q_u16(_sse2neon_cmpestr_mask16b)); \
+ dst = vreinterpretq_m128i_u16(vbslq_u16( \
+ tmp, vdupq_n_u16(-1), vreinterpretq_u16_m128i(dst))); \
+ } else { \
+ uint8x16_t vec_r2 = \
+ vcombine_u8(vdup_n_u8(r2), vdup_n_u8(r2 >> 8)); \
+ uint8x16_t tmp = \
+ vtstq_u8(vec_r2, vld1q_u8(_sse2neon_cmpestr_mask8b)); \
+ dst = vreinterpretq_m128i_u8( \
+ vbslq_u8(tmp, vdupq_n_u8(-1), vreinterpretq_u8_m128i(dst))); \
+ } \
+ } else { \
+ if (bound == 16) { \
+ dst = vreinterpretq_m128i_u16( \
+ vsetq_lane_u16(r2 & 0xffff, vreinterpretq_u16_m128i(dst), 0)); \
+ } else { \
+ dst = vreinterpretq_m128i_u8( \
+ vsetq_lane_u8(r2 & 0xff, vreinterpretq_u8_m128i(dst), 0)); \
+ } \
+ } \
+ return dst
+
+// Compare packed strings in a and b with lengths la and lb using the control
+// in imm8, and returns 1 if b did not contain a null character and the
+// resulting mask was zero, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestra
+FORCE_INLINE int _mm_cmpestra(__m128i a,
+ int la,
+ __m128i b,
+ int lb,
+ const int imm8)
+{
+ int lb_cpy = lb;
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+ return !r2 & (lb_cpy > bound);
+}
+
+// Compare packed strings in a and b with lengths la and lb using the control in
+// imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrc
+FORCE_INLINE int _mm_cmpestrc(__m128i a,
+ int la,
+ __m128i b,
+ int lb,
+ const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+ return r2 != 0;
+}
+
+// Compare packed strings in a and b with lengths la and lb using the control
+// in imm8, and store the generated index in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestri
+FORCE_INLINE int _mm_cmpestri(__m128i a,
+ int la,
+ __m128i b,
+ int lb,
+ const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+ SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
+}
+
+// Compare packed strings in a and b with lengths la and lb using the control
+// in imm8, and store the generated mask in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrm
+FORCE_INLINE __m128i
+_mm_cmpestrm(__m128i a, int la, __m128i b, int lb, const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+ SSE2NEON_CMPSTR_GENERATE_MASK(dst);
+}
+
+// Compare packed strings in a and b with lengths la and lb using the control in
+// imm8, and returns bit 0 of the resulting bit mask.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestro
+FORCE_INLINE int _mm_cmpestro(__m128i a,
+ int la,
+ __m128i b,
+ int lb,
+ const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+ return r2 & 1;
+}
+
+// Compare packed strings in a and b with lengths la and lb using the control in
+// imm8, and returns 1 if any character in a was null, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrs
+FORCE_INLINE int _mm_cmpestrs(__m128i a,
+ int la,
+ __m128i b,
+ int lb,
+ const int imm8)
+{
+ (void) a;
+ (void) b;
+ (void) lb;
+ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+ return la <= (bound - 1);
+}
+
+// Compare packed strings in a and b with lengths la and lb using the control in
+// imm8, and returns 1 if any character in b was null, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrz
+FORCE_INLINE int _mm_cmpestrz(__m128i a,
+ int la,
+ __m128i b,
+ int lb,
+ const int imm8)
+{
+ (void) a;
+ (void) b;
+ (void) la;
+ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+ return lb <= (bound - 1);
+}
+
+#define SSE2NEON_CMPISTRX_LENGTH(str, len, imm8) \
+ do { \
+ if (imm8 & 0x01) { \
+ uint16x8_t equal_mask_##str = \
+ vceqq_u16(vreinterpretq_u16_m128i(str), vdupq_n_u16(0)); \
+ uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
+ uint64_t matches_##str = \
+ vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
+ len = _sse2neon_ctzll(matches_##str) >> 3; \
+ } else { \
+ uint16x8_t equal_mask_##str = vreinterpretq_u16_u8( \
+ vceqq_u8(vreinterpretq_u8_m128i(str), vdupq_n_u8(0))); \
+ uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
+ uint64_t matches_##str = \
+ vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
+ len = _sse2neon_ctzll(matches_##str) >> 2; \
+ } \
+ } while (0)
+
+#define SSE2NEON_CMPISTRX_LEN_PAIR(a, b, la, lb) \
+ int la, lb; \
+ do { \
+ SSE2NEON_CMPISTRX_LENGTH(a, la, imm8); \
+ SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8); \
+ } while (0)
+
+// Compare packed strings with implicit lengths in a and b using the control in
+// imm8, and returns 1 if b did not contain a null character and the resulting
+// mask was zero, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistra
+FORCE_INLINE int _mm_cmpistra(__m128i a, __m128i b, const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+ return !r2 & (lb >= bound);
+}
+
+// Compare packed strings with implicit lengths in a and b using the control in
+// imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrc
+FORCE_INLINE int _mm_cmpistrc(__m128i a, __m128i b, const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+ return r2 != 0;
+}
+
+// Compare packed strings with implicit lengths in a and b using the control in
+// imm8, and store the generated index in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistri
+FORCE_INLINE int _mm_cmpistri(__m128i a, __m128i b, const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+ SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
+}
+
+// Compare packed strings with implicit lengths in a and b using the control in
+// imm8, and store the generated mask in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrm
+FORCE_INLINE __m128i _mm_cmpistrm(__m128i a, __m128i b, const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+ SSE2NEON_CMPSTR_GENERATE_MASK(dst);
+}
+
+// Compare packed strings with implicit lengths in a and b using the control in
+// imm8, and returns bit 0 of the resulting bit mask.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistro
+FORCE_INLINE int _mm_cmpistro(__m128i a, __m128i b, const int imm8)
+{
+ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+ return r2 & 1;
+}
+
+// Compare packed strings with implicit lengths in a and b using the control in
+// imm8, and returns 1 if any character in a was null, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrs
+FORCE_INLINE int _mm_cmpistrs(__m128i a, __m128i b, const int imm8)
+{
+ (void) b;
+ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+ int la;
+ SSE2NEON_CMPISTRX_LENGTH(a, la, imm8);
+ return la <= (bound - 1);
+}
+
+// Compare packed strings with implicit lengths in a and b using the control in
+// imm8, and returns 1 if any character in b was null, and 0 otherwise.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrz
+FORCE_INLINE int _mm_cmpistrz(__m128i a, __m128i b, const int imm8)
+{
+ (void) a;
+ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+ int lb;
+ SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8);
+ return lb <= (bound - 1);
+}
+
+// Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers
+// in b for greater than.
+FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ return vreinterpretq_m128i_u64(
+ vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+#else
+ return vreinterpretq_m128i_s64(vshrq_n_s64(
+ vqsubq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a)),
+ 63));
+#endif
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 16-bit integer v, and stores the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u16
+FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
+ (defined(_M_ARM64) && !defined(__clang__))
+ crc = __crc32ch(crc, v);
+#else
+ crc = _mm_crc32_u8(crc, v & 0xff);
+ crc = _mm_crc32_u8(crc, (v >> 8) & 0xff);
+#endif
+ return crc;
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 32-bit integer v, and stores the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u32
+FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
+ (defined(_M_ARM64) && !defined(__clang__))
+ crc = __crc32cw(crc, v);
+#else
+ crc = _mm_crc32_u16(crc, v & 0xffff);
+ crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff);
+#endif
+ return crc;
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 64-bit integer v, and stores the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u64
+FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#elif (defined(_M_ARM64) && !defined(__clang__))
+ crc = __crc32cd((uint32_t) crc, v);
+#else
+ crc = _mm_crc32_u32((uint32_t) (crc), v & 0xffffffff);
+ crc = _mm_crc32_u32((uint32_t) (crc), (v >> 32) & 0xffffffff);
+#endif
+ return crc;
+}
+
+// Starting with the initial value in crc, accumulates a CRC32 value for
+// unsigned 8-bit integer v, and stores the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u8
+FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v)
+{
+#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+ __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t"
+ : [c] "+r"(crc)
+ : [v] "r"(v));
+#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
+ (defined(_M_ARM64) && !defined(__clang__))
+ crc = __crc32cb(crc, v);
+#else
+ crc ^= v;
+ for (int bit = 0; bit < 8; bit++) {
+ if (crc & 1)
+ crc = (crc >> 1) ^ UINT32_C(0x82f63b78);
+ else
+ crc = (crc >> 1);
+ }
+#endif
+ return crc;
+}
+
+/* AES */
+
+#if !defined(__ARM_FEATURE_CRYPTO) && (!defined(_M_ARM64) || defined(__clang__))
+/* clang-format off */
+#define SSE2NEON_AES_SBOX(w) \
+ { \
+ w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), \
+ w(0xc5), w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), \
+ w(0xab), w(0x76), w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), \
+ w(0x59), w(0x47), w(0xf0), w(0xad), w(0xd4), w(0xa2), w(0xaf), \
+ w(0x9c), w(0xa4), w(0x72), w(0xc0), w(0xb7), w(0xfd), w(0x93), \
+ w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc), w(0x34), w(0xa5), \
+ w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15), w(0x04), \
+ w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a), \
+ w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), \
+ w(0x75), w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), \
+ w(0x5a), w(0xa0), w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), \
+ w(0xe3), w(0x2f), w(0x84), w(0x53), w(0xd1), w(0x00), w(0xed), \
+ w(0x20), w(0xfc), w(0xb1), w(0x5b), w(0x6a), w(0xcb), w(0xbe), \
+ w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf), w(0xd0), w(0xef), \
+ w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85), w(0x45), \
+ w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8), \
+ w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), \
+ w(0xf5), w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), \
+ w(0xf3), w(0xd2), w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), \
+ w(0x97), w(0x44), w(0x17), w(0xc4), w(0xa7), w(0x7e), w(0x3d), \
+ w(0x64), w(0x5d), w(0x19), w(0x73), w(0x60), w(0x81), w(0x4f), \
+ w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88), w(0x46), w(0xee), \
+ w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb), w(0xe0), \
+ w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c), \
+ w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), \
+ w(0x79), w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), \
+ w(0x4e), w(0xa9), w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), \
+ w(0x7a), w(0xae), w(0x08), w(0xba), w(0x78), w(0x25), w(0x2e), \
+ w(0x1c), w(0xa6), w(0xb4), w(0xc6), w(0xe8), w(0xdd), w(0x74), \
+ w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a), w(0x70), w(0x3e), \
+ w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e), w(0x61), \
+ w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e), \
+ w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), \
+ w(0x94), w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), \
+ w(0x28), w(0xdf), w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), \
+ w(0xe6), w(0x42), w(0x68), w(0x41), w(0x99), w(0x2d), w(0x0f), \
+ w(0xb0), w(0x54), w(0xbb), w(0x16) \
+ }
+#define SSE2NEON_AES_RSBOX(w) \
+ { \
+ w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), \
+ w(0x38), w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), \
+ w(0xd7), w(0xfb), w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), \
+ w(0x2f), w(0xff), w(0x87), w(0x34), w(0x8e), w(0x43), w(0x44), \
+ w(0xc4), w(0xde), w(0xe9), w(0xcb), w(0x54), w(0x7b), w(0x94), \
+ w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d), w(0xee), w(0x4c), \
+ w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e), w(0x08), \
+ w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2), \
+ w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), \
+ w(0x25), w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), \
+ w(0x98), w(0x16), w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), \
+ w(0x65), w(0xb6), w(0x92), w(0x6c), w(0x70), w(0x48), w(0x50), \
+ w(0xfd), w(0xed), w(0xb9), w(0xda), w(0x5e), w(0x15), w(0x46), \
+ w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84), w(0x90), w(0xd8), \
+ w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a), w(0xf7), \
+ w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06), \
+ w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), \
+ w(0x02), w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), \
+ w(0x8a), w(0x6b), w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), \
+ w(0x67), w(0xdc), w(0xea), w(0x97), w(0xf2), w(0xcf), w(0xce), \
+ w(0xf0), w(0xb4), w(0xe6), w(0x73), w(0x96), w(0xac), w(0x74), \
+ w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85), w(0xe2), w(0xf9), \
+ w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e), w(0x47), \
+ w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89), \
+ w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), \
+ w(0x1b), w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), \
+ w(0x79), w(0x20), w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), \
+ w(0xcd), w(0x5a), w(0xf4), w(0x1f), w(0xdd), w(0xa8), w(0x33), \
+ w(0x88), w(0x07), w(0xc7), w(0x31), w(0xb1), w(0x12), w(0x10), \
+ w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f), w(0x60), w(0x51), \
+ w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d), w(0x2d), \
+ w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef), \
+ w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), \
+ w(0xb0), w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), \
+ w(0x99), w(0x61), w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), \
+ w(0x77), w(0xd6), w(0x26), w(0xe1), w(0x69), w(0x14), w(0x63), \
+ w(0x55), w(0x21), w(0x0c), w(0x7d) \
+ }
+/* clang-format on */
+
+/* X Macro trick. See https://en.wikipedia.org/wiki/X_Macro */
+#define SSE2NEON_AES_H0(x) (x)
+static const uint8_t _sse2neon_sbox[256] = SSE2NEON_AES_SBOX(SSE2NEON_AES_H0);
+static const uint8_t _sse2neon_rsbox[256] = SSE2NEON_AES_RSBOX(SSE2NEON_AES_H0);
+#undef SSE2NEON_AES_H0
+
+/* x_time function and matrix multiply function */
+#if !defined(__aarch64__) && !defined(_M_ARM64)
+#define SSE2NEON_XT(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b))
+#define SSE2NEON_MULTIPLY(x, y) \
+ (((y & 1) * x) ^ ((y >> 1 & 1) * SSE2NEON_XT(x)) ^ \
+ ((y >> 2 & 1) * SSE2NEON_XT(SSE2NEON_XT(x))) ^ \
+ ((y >> 3 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x)))) ^ \
+ ((y >> 4 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x))))))
+#endif
+
+// In the absence of crypto extensions, implement aesenc using regular NEON
+// intrinsics instead. See:
+// https://www.workofard.com/2017/01/accelerated-aes-for-the-arm64-linux-kernel/
+// https://www.workofard.com/2017/07/ghash-for-low-end-cores/ and
+// for more information.
+FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i RoundKey)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ static const uint8_t shift_rows[] = {
+ 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
+ 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
+ };
+ static const uint8_t ror32by8[] = {
+ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
+ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
+ };
+
+ uint8x16_t v;
+ uint8x16_t w = vreinterpretq_u8_m128i(a);
+
+ /* shift rows */
+ w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
+
+ /* sub bytes */
+ // Here, we separate the whole 256-bytes table into 4 64-bytes tables, and
+ // look up each of the table. After each lookup, we load the next table
+ // which locates at the next 64-bytes. In the meantime, the index in the
+ // table would be smaller than it was, so the index parameters of
+ // `vqtbx4q_u8()` need to be added the same constant as the loaded tables.
+ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
+ // 'w-0x40' equals to 'vsubq_u8(w, vdupq_n_u8(0x40))'
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
+
+ /* mix columns */
+ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
+ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
+
+ /* add round key */
+ return vreinterpretq_m128i_u8(w) ^ RoundKey;
+
+#else /* ARMv7-A implementation for a table-based AES */
+#define SSE2NEON_AES_B2W(b0, b1, b2, b3) \
+ (((uint32_t) (b3) << 24) | ((uint32_t) (b2) << 16) | \
+ ((uint32_t) (b1) << 8) | (uint32_t) (b0))
+// muliplying 'x' by 2 in GF(2^8)
+#define SSE2NEON_AES_F2(x) ((x << 1) ^ (((x >> 7) & 1) * 0x011b /* WPOLY */))
+// muliplying 'x' by 3 in GF(2^8)
+#define SSE2NEON_AES_F3(x) (SSE2NEON_AES_F2(x) ^ x)
+#define SSE2NEON_AES_U0(p) \
+ SSE2NEON_AES_B2W(SSE2NEON_AES_F2(p), p, p, SSE2NEON_AES_F3(p))
+#define SSE2NEON_AES_U1(p) \
+ SSE2NEON_AES_B2W(SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p, p)
+#define SSE2NEON_AES_U2(p) \
+ SSE2NEON_AES_B2W(p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p)
+#define SSE2NEON_AES_U3(p) \
+ SSE2NEON_AES_B2W(p, p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p))
+
+ // this generates a table containing every possible permutation of
+ // shift_rows() and sub_bytes() with mix_columns().
+ static const uint32_t ALIGN_STRUCT(16) aes_table[4][256] = {
+ SSE2NEON_AES_SBOX(SSE2NEON_AES_U0),
+ SSE2NEON_AES_SBOX(SSE2NEON_AES_U1),
+ SSE2NEON_AES_SBOX(SSE2NEON_AES_U2),
+ SSE2NEON_AES_SBOX(SSE2NEON_AES_U3),
+ };
+#undef SSE2NEON_AES_B2W
+#undef SSE2NEON_AES_F2
+#undef SSE2NEON_AES_F3
+#undef SSE2NEON_AES_U0
+#undef SSE2NEON_AES_U1
+#undef SSE2NEON_AES_U2
+#undef SSE2NEON_AES_U3
+
+ uint32_t x0 = _mm_cvtsi128_si32(a); // get a[31:0]
+ uint32_t x1 =
+ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55)); // get a[63:32]
+ uint32_t x2 =
+ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xAA)); // get a[95:64]
+ uint32_t x3 =
+ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF)); // get a[127:96]
+
+ // finish the modulo addition step in mix_columns()
+ __m128i out = _mm_set_epi32(
+ (aes_table[0][x3 & 0xff] ^ aes_table[1][(x0 >> 8) & 0xff] ^
+ aes_table[2][(x1 >> 16) & 0xff] ^ aes_table[3][x2 >> 24]),
+ (aes_table[0][x2 & 0xff] ^ aes_table[1][(x3 >> 8) & 0xff] ^
+ aes_table[2][(x0 >> 16) & 0xff] ^ aes_table[3][x1 >> 24]),
+ (aes_table[0][x1 & 0xff] ^ aes_table[1][(x2 >> 8) & 0xff] ^
+ aes_table[2][(x3 >> 16) & 0xff] ^ aes_table[3][x0 >> 24]),
+ (aes_table[0][x0 & 0xff] ^ aes_table[1][(x1 >> 8) & 0xff] ^
+ aes_table[2][(x2 >> 16) & 0xff] ^ aes_table[3][x3 >> 24]));
+
+ return _mm_xor_si128(out, RoundKey);
+#endif
+}
+
+// Perform one round of an AES decryption flow on data (state) in a using the
+// round key in RoundKey, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
+FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
+{
+#if defined(__aarch64__)
+ static const uint8_t inv_shift_rows[] = {
+ 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
+ 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
+ };
+ static const uint8_t ror32by8[] = {
+ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
+ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
+ };
+
+ uint8x16_t v;
+ uint8x16_t w = vreinterpretq_u8_m128i(a);
+
+ // inverse shift rows
+ w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
+
+ // inverse sub bytes
+ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
+
+ // inverse mix columns
+ // multiplying 'v' by 4 in GF(2^8)
+ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+ w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
+ v ^= w;
+ v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
+
+ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) &
+ 0x1b); // muliplying 'v' by 2 in GF(2^8)
+ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
+ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
+
+ // add round key
+ return vreinterpretq_m128i_u8(w) ^ RoundKey;
+
+#else /* ARMv7-A NEON implementation */
+ /* FIXME: optimized for NEON */
+ uint8_t i, e, f, g, h, v[4][4];
+ uint8_t *_a = (uint8_t *) &a;
+ for (i = 0; i < 16; ++i) {
+ v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
+ }
+
+ // inverse mix columns
+ for (i = 0; i < 4; ++i) {
+ e = v[i][0];
+ f = v[i][1];
+ g = v[i][2];
+ h = v[i][3];
+
+ v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
+ SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
+ v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
+ SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
+ v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
+ SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
+ v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
+ SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
+ }
+
+ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
+#endif
+}
+
+// Perform the last round of an AES encryption flow on data (state) in a using
+// the round key in RoundKey, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
+FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
+{
+#if defined(__aarch64__)
+ static const uint8_t shift_rows[] = {
+ 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
+ 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
+ };
+
+ uint8x16_t v;
+ uint8x16_t w = vreinterpretq_u8_m128i(a);
+
+ // shift rows
+ w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
+
+ // sub bytes
+ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
+
+ // add round key
+ return vreinterpretq_m128i_u8(v) ^ RoundKey;
+
+#else /* ARMv7-A implementation */
+ uint8_t v[16] = {
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 0)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 5)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 10)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 15)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 4)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 9)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 14)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 3)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 8)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 13)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 2)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 7)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 12)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 1)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 6)],
+ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 11)],
+ };
+
+ return vreinterpretq_m128i_u8(vld1q_u8(v)) ^ RoundKey;
+#endif
+}
+
+// Perform the last round of an AES decryption flow on data (state) in a using
+// the round key in RoundKey, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
+FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
+{
+#if defined(__aarch64__)
+ static const uint8_t inv_shift_rows[] = {
+ 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
+ 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
+ };
+
+ uint8x16_t v;
+ uint8x16_t w = vreinterpretq_u8_m128i(a);
+
+ // inverse shift rows
+ w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
+
+ // inverse sub bytes
+ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
+
+ // add round key
+ return vreinterpretq_m128i_u8(v) ^ RoundKey;
+
+#else /* ARMv7-A NEON implementation */
+ /* FIXME: optimized for NEON */
+ uint8_t v[4][4];
+ uint8_t *_a = (uint8_t *) &a;
+ for (int i = 0; i < 16; ++i) {
+ v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
+ }
+
+ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
+#endif
+}
+
+// Perform the InvMixColumns transformation on a and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
+FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
+{
+#if defined(__aarch64__)
+ static const uint8_t ror32by8[] = {
+ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
+ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
+ };
+ uint8x16_t v = vreinterpretq_u8_m128i(a);
+ uint8x16_t w;
+
+ // multiplying 'v' by 4 in GF(2^8)
+ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+ w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
+ v ^= w;
+ v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
+
+ // multiplying 'v' by 2 in GF(2^8)
+ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
+ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
+ return vreinterpretq_m128i_u8(w);
+
+#else /* ARMv7-A NEON implementation */
+ uint8_t i, e, f, g, h, v[4][4];
+ vst1q_u8((uint8_t *) v, vreinterpretq_u8_m128i(a));
+ for (i = 0; i < 4; ++i) {
+ e = v[i][0];
+ f = v[i][1];
+ g = v[i][2];
+ h = v[i][3];
+
+ v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
+ SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
+ v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
+ SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
+ v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
+ SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
+ v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
+ SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
+ }
+
+ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v));
+#endif
+}
+
+// Assist in expanding the AES cipher key by computing steps towards generating
+// a round key for encryption cipher using data from a and an 8-bit round
+// constant specified in imm8, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
+//
+// Emits the Advanced Encryption Standard (AES) instruction aeskeygenassist.
+// This instruction generates a round key for AES encryption. See
+// https://kazakov.life/2017/11/01/cryptocurrency-mining-on-ios-devices/
+// for details.
+FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
+{
+#if defined(__aarch64__)
+ uint8x16_t _a = vreinterpretq_u8_m128i(a);
+ uint8x16_t v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), _a);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), _a - 0x40);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), _a - 0x80);
+ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), _a - 0xc0);
+
+ uint32x4_t v_u32 = vreinterpretq_u32_u8(v);
+ uint32x4_t ror_v = vorrq_u32(vshrq_n_u32(v_u32, 8), vshlq_n_u32(v_u32, 24));
+ uint32x4_t ror_xor_v = veorq_u32(ror_v, vdupq_n_u32(rcon));
+
+ return vreinterpretq_m128i_u32(vtrn2q_u32(v_u32, ror_xor_v));
+
+#else /* ARMv7-A NEON implementation */
+ uint32_t X1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55));
+ uint32_t X3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF));
+ for (int i = 0; i < 4; ++i) {
+ ((uint8_t *) &X1)[i] = _sse2neon_sbox[((uint8_t *) &X1)[i]];
+ ((uint8_t *) &X3)[i] = _sse2neon_sbox[((uint8_t *) &X3)[i]];
+ }
+ return _mm_set_epi32(((X3 >> 8) | (X3 << 24)) ^ rcon, X3,
+ ((X1 >> 8) | (X1 << 24)) ^ rcon, X1);
+#endif
+}
+#undef SSE2NEON_AES_SBOX
+#undef SSE2NEON_AES_RSBOX
+
+#if defined(__aarch64__)
+#undef SSE2NEON_XT
+#undef SSE2NEON_MULTIPLY
+#endif
+
+#else /* __ARM_FEATURE_CRYPTO */
+// Implements equivalent of 'aesenc' by combining AESE (with an empty key) and
+// AESMC and then manually applying the real key as an xor operation. This
+// unfortunately means an additional xor op; the compiler should be able to
+// optimize this away for repeated calls however. See
+// https://blog.michaelbrase.com/2018/05/08/emulating-x86-aes-intrinsics-on-armv8-a
+// for more details.
+FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i b)
+{
+ return vreinterpretq_m128i_u8(veorq_u8(
+ vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
+ vreinterpretq_u8_m128i(b)));
+}
+
+// Perform one round of an AES decryption flow on data (state) in a using the
+// round key in RoundKey, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
+FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
+{
+ return vreinterpretq_m128i_u8(veorq_u8(
+ vaesimcq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
+ vreinterpretq_u8_m128i(RoundKey)));
+}
+
+// Perform the last round of an AES encryption flow on data (state) in a using
+// the round key in RoundKey, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
+FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
+{
+ return _mm_xor_si128(vreinterpretq_m128i_u8(vaeseq_u8(
+ vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
+ RoundKey);
+}
+
+// Perform the last round of an AES decryption flow on data (state) in a using
+// the round key in RoundKey, and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
+FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
+{
+ return vreinterpretq_m128i_u8(
+ veorq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0)),
+ vreinterpretq_u8_m128i(RoundKey)));
+}
+
+// Perform the InvMixColumns transformation on a and store the result in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
+FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
+{
+ return vreinterpretq_m128i_u8(vaesimcq_u8(vreinterpretq_u8_m128i(a)));
+}
+
+// Assist in expanding the AES cipher key by computing steps towards generating
+// a round key for encryption cipher using data from a and an 8-bit round
+// constant specified in imm8, and store the result in dst."
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
+FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
+{
+ // AESE does ShiftRows and SubBytes on A
+ uint8x16_t u8 = vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0));
+
+#ifndef _MSC_VER
+ uint8x16_t dest = {
+ // Undo ShiftRows step from AESE and extract X1 and X3
+ u8[0x4], u8[0x1], u8[0xE], u8[0xB], // SubBytes(X1)
+ u8[0x1], u8[0xE], u8[0xB], u8[0x4], // ROT(SubBytes(X1))
+ u8[0xC], u8[0x9], u8[0x6], u8[0x3], // SubBytes(X3)
+ u8[0x9], u8[0x6], u8[0x3], u8[0xC], // ROT(SubBytes(X3))
+ };
+ uint32x4_t r = {0, (unsigned) rcon, 0, (unsigned) rcon};
+ return vreinterpretq_m128i_u8(dest) ^ vreinterpretq_m128i_u32(r);
+#else
+ // We have to do this hack because MSVC is strictly adhering to the CPP
+ // standard, in particular C++03 8.5.1 sub-section 15, which states that
+ // unions must be initialized by their first member type.
+
+ // As per the Windows ARM64 ABI, it is always little endian, so this works
+ __n128 dest{
+ ((uint64_t) u8.n128_u8[0x4] << 0) | ((uint64_t) u8.n128_u8[0x1] << 8) |
+ ((uint64_t) u8.n128_u8[0xE] << 16) |
+ ((uint64_t) u8.n128_u8[0xB] << 24) |
+ ((uint64_t) u8.n128_u8[0x1] << 32) |
+ ((uint64_t) u8.n128_u8[0xE] << 40) |
+ ((uint64_t) u8.n128_u8[0xB] << 48) |
+ ((uint64_t) u8.n128_u8[0x4] << 56),
+ ((uint64_t) u8.n128_u8[0xC] << 0) | ((uint64_t) u8.n128_u8[0x9] << 8) |
+ ((uint64_t) u8.n128_u8[0x6] << 16) |
+ ((uint64_t) u8.n128_u8[0x3] << 24) |
+ ((uint64_t) u8.n128_u8[0x9] << 32) |
+ ((uint64_t) u8.n128_u8[0x6] << 40) |
+ ((uint64_t) u8.n128_u8[0x3] << 48) |
+ ((uint64_t) u8.n128_u8[0xC] << 56)};
+
+ dest.n128_u32[1] = dest.n128_u32[1] ^ rcon;
+ dest.n128_u32[3] = dest.n128_u32[3] ^ rcon;
+
+ return dest;
+#endif
+}
+#endif
+
+/* Others */
+
+// Perform a carry-less multiplication of two 64-bit integers, selected from a
+// and b according to imm8, and store the results in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clmulepi64_si128
+FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm)
+{
+ uint64x2_t a = vreinterpretq_u64_m128i(_a);
+ uint64x2_t b = vreinterpretq_u64_m128i(_b);
+ switch (imm & 0x11) {
+ case 0x00:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b)));
+ case 0x01:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b)));
+ case 0x10:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b)));
+ case 0x11:
+ return vreinterpretq_m128i_u64(
+ _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b)));
+ default:
+ abort();
+ }
+}
+
+FORCE_INLINE unsigned int _sse2neon_mm_get_denormals_zero_mode(void)
+{
+ union {
+ fpcr_bitfield field;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint64_t value;
+#else
+ uint32_t value;
+#endif
+ } r;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ r.value = _sse2neon_get_fpcr();
+#else
+ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+#endif
+
+ return r.field.bit24 ? _MM_DENORMALS_ZERO_ON : _MM_DENORMALS_ZERO_OFF;
+}
+
+// Count the number of bits set to 1 in unsigned 32-bit integer a, and
+// return that count in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u32
+FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+#if __has_builtin(__builtin_popcount)
+ return __builtin_popcount(a);
+#elif defined(_MSC_VER)
+ return _CountOneBits(a);
+#else
+ return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a)));
+#endif
+#else
+ uint32_t count = 0;
+ uint8x8_t input_val, count8x8_val;
+ uint16x4_t count16x4_val;
+ uint32x2_t count32x2_val;
+
+ input_val = vld1_u8((uint8_t *) &a);
+ count8x8_val = vcnt_u8(input_val);
+ count16x4_val = vpaddl_u8(count8x8_val);
+ count32x2_val = vpaddl_u16(count16x4_val);
+
+ vst1_u32(&count, count32x2_val);
+ return count;
+#endif
+}
+
+// Count the number of bits set to 1 in unsigned 64-bit integer a, and
+// return that count in dst.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u64
+FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+#if __has_builtin(__builtin_popcountll)
+ return __builtin_popcountll(a);
+#elif defined(_MSC_VER)
+ return _CountOneBits64(a);
+#else
+ return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a)));
+#endif
+#else
+ uint64_t count = 0;
+ uint8x8_t input_val, count8x8_val;
+ uint16x4_t count16x4_val;
+ uint32x2_t count32x2_val;
+ uint64x1_t count64x1_val;
+
+ input_val = vld1_u8((uint8_t *) &a);
+ count8x8_val = vcnt_u8(input_val);
+ count16x4_val = vpaddl_u8(count8x8_val);
+ count32x2_val = vpaddl_u16(count16x4_val);
+ count64x1_val = vpaddl_u32(count32x2_val);
+ vst1_u64(&count, count64x1_val);
+ return count;
+#endif
+}
+
+FORCE_INLINE void _sse2neon_mm_set_denormals_zero_mode(unsigned int flag)
+{
+ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
+ // regardless of the value of the FZ bit.
+ union {
+ fpcr_bitfield field;
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint64_t value;
+#else
+ uint32_t value;
+#endif
+ } r;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ r.value = _sse2neon_get_fpcr();
+#else
+ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+#endif
+
+ r.field.bit24 = (flag & _MM_DENORMALS_ZERO_MASK) == _MM_DENORMALS_ZERO_ON;
+
+#if defined(__aarch64__) || defined(_M_ARM64)
+ _sse2neon_set_fpcr(r.value);
+#else
+ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
+#endif
+}
+
+// Return the current 64-bit value of the processor's time-stamp counter.
+// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=rdtsc
+FORCE_INLINE uint64_t _rdtsc(void)
+{
+#if defined(__aarch64__) || defined(_M_ARM64)
+ uint64_t val;
+
+ /* According to ARM DDI 0487F.c, from Armv8.0 to Armv8.5 inclusive, the
+ * system counter is at least 56 bits wide; from Armv8.6, the counter
+ * must be 64 bits wide. So the system counter could be less than 64
+ * bits wide and it is attributed with the flag 'cap_user_time_short'
+ * is true.
+ */
+#if defined(_MSC_VER)
+ val = _ReadStatusReg(ARM64_SYSREG(3, 3, 14, 0, 2));
+#else
+ __asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(val));
+#endif
+
+ return val;
+#else
+ uint32_t pmccntr, pmuseren, pmcntenset;
+ // Read the user mode Performance Monitoring Unit (PMU)
+ // User Enable Register (PMUSERENR) access permissions.
+ __asm__ __volatile__("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
+ if (pmuseren & 1) { // Allows reading PMUSERENR for user mode code.
+ __asm__ __volatile__("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
+ if (pmcntenset & 0x80000000UL) { // Is it counting?
+ __asm__ __volatile__("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
+ // The counter is set up to count every 64th cycle
+ return (uint64_t) (pmccntr) << 6;
+ }
+ }
+
+ // Fallback to syscall as we can't enable PMUSERENR in user mode.
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return (uint64_t) (tv.tv_sec) * 1000000 + tv.tv_usec;
+#endif
+}
+
+#if defined(__GNUC__) || defined(__clang__)
+#pragma pop_macro("ALIGN_STRUCT")
+#pragma pop_macro("FORCE_INLINE")
+#endif
+
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC pop_options
+#endif
+
+#endif
--- /dev/null
+#include <assert.h>
+#include <float.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stdalign.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <utility>
+
+#include "binding.h"
+#include "impl.h"
+
+// Try 10,000 random floating point values for each test we run
+#define MAX_TEST_VALUE 10000
+
+/* Pattern Matching for C macros.
+ * https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
+ */
+
+/* catenate */
+#define PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
+
+#define IIF(c) PRIMITIVE_CAT(IIF_, c)
+/* run the 2nd parameter */
+#define IIF_0(t, ...) __VA_ARGS__
+/* run the 1st parameter */
+#define IIF_1(t, ...) t
+
+// This program a set of unit tests to ensure that each SSE call provide the
+// output we expect. If this fires an assert, then something didn't match up.
+//
+// Functions with "test_" prefix will be called in runSingleTest.
+namespace SSE2NEON
+{
+// Forward declaration
+class SSE2NEONTestImpl : public SSE2NEONTest
+{
+public:
+ SSE2NEONTestImpl(void);
+ result_t loadTestFloatPointers(uint32_t i);
+ result_t loadTestIntPointers(uint32_t i);
+ result_t runSingleTest(InstructionTest test, uint32_t i);
+
+ float *mTestFloatPointer1;
+ float *mTestFloatPointer2;
+ int32_t *mTestIntPointer1;
+ int32_t *mTestIntPointer2;
+ float mTestFloats[MAX_TEST_VALUE];
+ int32_t mTestInts[MAX_TEST_VALUE];
+
+ virtual ~SSE2NEONTestImpl(void)
+ {
+ platformAlignedFree(mTestFloatPointer1);
+ platformAlignedFree(mTestFloatPointer2);
+ platformAlignedFree(mTestIntPointer1);
+ platformAlignedFree(mTestIntPointer2);
+ }
+ virtual void release(void) { delete this; }
+ virtual result_t runTest(InstructionTest test)
+ {
+ result_t ret = TEST_SUCCESS;
+
+ // Test a whole bunch of values
+ for (uint32_t i = 0; i < (MAX_TEST_VALUE - 8); i++) {
+ ret = loadTestFloatPointers(i); // Load some random float values
+ if (ret == TEST_FAIL)
+ break; // load test float failed??
+ ret = loadTestIntPointers(i); // load some random int values
+ if (ret == TEST_FAIL)
+ break; // load test float failed??
+ // If we are testing the reciprocal, then invert the input data
+ // (easier for debugging)
+ if (test == it_mm_rcp_ps) {
+ mTestFloatPointer1[0] = 1.0f / mTestFloatPointer1[0];
+ mTestFloatPointer1[1] = 1.0f / mTestFloatPointer1[1];
+ mTestFloatPointer1[2] = 1.0f / mTestFloatPointer1[2];
+ mTestFloatPointer1[3] = 1.0f / mTestFloatPointer1[3];
+ }
+ if (test == it_mm_rcp_ps || test == it_mm_rcp_ss ||
+ test == it_mm_rsqrt_ps || test == it_mm_rsqrt_ss) {
+ if ((rand() & 3) == 0) {
+ uint32_t r1 = rand() & 3;
+ uint32_t r2 = rand() & 3;
+ uint32_t r3 = rand() & 3;
+ uint32_t r4 = rand() & 3;
+ uint32_t r5 = rand() & 3;
+ uint32_t r6 = rand() & 3;
+ uint32_t r7 = rand() & 3;
+ uint32_t r8 = rand() & 3;
+ mTestFloatPointer1[r1] = 0.0f;
+ mTestFloatPointer1[r2] = 0.0f;
+ mTestFloatPointer1[r3] = 0.0f;
+ mTestFloatPointer1[r4] = 0.0f;
+ mTestFloatPointer1[r5] = -0.0f;
+ mTestFloatPointer1[r6] = -0.0f;
+ mTestFloatPointer1[r7] = -0.0f;
+ mTestFloatPointer1[r8] = -0.0f;
+ }
+ }
+ if (test == it_mm_cmpge_ps || test == it_mm_cmpge_ss ||
+ test == it_mm_cmple_ps || test == it_mm_cmple_ss ||
+ test == it_mm_cmpeq_ps || test == it_mm_cmpeq_ss) {
+ // Make sure at least one value is the same.
+ mTestFloatPointer1[3] = mTestFloatPointer2[3];
+ }
+
+ if (test == it_mm_cmpord_ps || test == it_mm_cmpord_ss ||
+ test == it_mm_cmpunord_ps || test == it_mm_cmpunord_ss ||
+ test == it_mm_cmpeq_ps || test == it_mm_cmpeq_ss ||
+ test == it_mm_cmpge_ps || test == it_mm_cmpge_ss ||
+ test == it_mm_cmpgt_ps || test == it_mm_cmpgt_ss ||
+ test == it_mm_cmple_ps || test == it_mm_cmple_ss ||
+ test == it_mm_cmplt_ps || test == it_mm_cmplt_ss ||
+ test == it_mm_cmpneq_ps || test == it_mm_cmpneq_ss ||
+ test == it_mm_cmpnge_ps || test == it_mm_cmpnge_ss ||
+ test == it_mm_cmpngt_ps || test == it_mm_cmpngt_ss ||
+ test == it_mm_cmpnle_ps || test == it_mm_cmpnle_ss ||
+ test == it_mm_cmpnlt_ps || test == it_mm_cmpnlt_ss ||
+ test == it_mm_comieq_ss || test == it_mm_ucomieq_ss ||
+ test == it_mm_comige_ss || test == it_mm_ucomige_ss ||
+ test == it_mm_comigt_ss || test == it_mm_ucomigt_ss ||
+ test == it_mm_comile_ss || test == it_mm_ucomile_ss ||
+ test == it_mm_comilt_ss || test == it_mm_ucomilt_ss ||
+ test == it_mm_comineq_ss || test == it_mm_ucomineq_ss) {
+ // Make sure the NaN values are included in the testing
+ // one out of four times.
+ if ((rand() & 3) == 0) {
+ uint32_t r1 = rand() & 3;
+ uint32_t r2 = rand() & 3;
+ mTestFloatPointer1[r1] = nanf("");
+ mTestFloatPointer2[r2] = nanf("");
+ }
+ }
+
+ if (test == it_mm_cmpord_pd || test == it_mm_cmpord_sd ||
+ test == it_mm_cmpunord_pd || test == it_mm_cmpunord_sd ||
+ test == it_mm_cmpeq_pd || test == it_mm_cmpeq_sd ||
+ test == it_mm_cmpge_pd || test == it_mm_cmpge_sd ||
+ test == it_mm_cmpgt_pd || test == it_mm_cmpgt_sd ||
+ test == it_mm_cmple_pd || test == it_mm_cmple_sd ||
+ test == it_mm_cmplt_pd || test == it_mm_cmplt_sd ||
+ test == it_mm_cmpneq_pd || test == it_mm_cmpneq_sd ||
+ test == it_mm_cmpnge_pd || test == it_mm_cmpnge_sd ||
+ test == it_mm_cmpngt_pd || test == it_mm_cmpngt_sd ||
+ test == it_mm_cmpnle_pd || test == it_mm_cmpnle_sd ||
+ test == it_mm_cmpnlt_pd || test == it_mm_cmpnlt_sd ||
+ test == it_mm_comieq_sd || test == it_mm_ucomieq_sd ||
+ test == it_mm_comige_sd || test == it_mm_ucomige_sd ||
+ test == it_mm_comigt_sd || test == it_mm_ucomigt_sd ||
+ test == it_mm_comile_sd || test == it_mm_ucomile_sd ||
+ test == it_mm_comilt_sd || test == it_mm_ucomilt_sd ||
+ test == it_mm_comineq_sd || test == it_mm_ucomineq_sd) {
+ // Make sure the NaN values are included in the testing
+ // one out of four times.
+ if ((rand() & 3) == 0) {
+ // FIXME:
+ // The argument "0xFFFFFFFFFFFF" is a tricky workaround to
+ // set the NaN value for doubles. The code is not intuitive
+ // and should be fixed in the future.
+ uint32_t r1 = ((rand() & 1) << 1) + 1;
+ uint32_t r2 = ((rand() & 1) << 1) + 1;
+ mTestFloatPointer1[r1] = nanf("0xFFFFFFFFFFFF");
+ mTestFloatPointer2[r2] = nanf("0xFFFFFFFFFFFF");
+ }
+ }
+
+ if (test == it_mm_max_pd || test == it_mm_max_sd ||
+ test == it_mm_min_pd || test == it_mm_min_sd) {
+ // Make sure the positive/negative inifinity values are included
+ // in the testing one out of four times.
+ if ((rand() & 3) == 0) {
+ uint32_t r1 = ((rand() & 1) << 1) + 1;
+ uint32_t r2 = ((rand() & 1) << 1) + 1;
+ uint32_t r3 = ((rand() & 1) << 1) + 1;
+ uint32_t r4 = ((rand() & 1) << 1) + 1;
+ mTestFloatPointer1[r1] = INFINITY;
+ mTestFloatPointer2[r2] = INFINITY;
+ mTestFloatPointer1[r3] = -INFINITY;
+ mTestFloatPointer1[r4] = -INFINITY;
+ }
+ }
+
+#if SSE2NEON_PRECISE_MINMAX
+ if (test == it_mm_max_ps || test == it_mm_max_ss ||
+ test == it_mm_min_ps || test == it_mm_min_ss) {
+ // Make sure the NaN values are included in the testing
+ // one out of four times.
+ if ((rand() & 3) == 0) {
+ uint32_t r1 = rand() & 3;
+ uint32_t r2 = rand() & 3;
+ mTestFloatPointer1[r1] = nanf("");
+ mTestFloatPointer2[r2] = nanf("");
+ }
+ }
+
+ if (test == it_mm_max_pd || test == it_mm_max_sd ||
+ test == it_mm_min_pd || test == it_mm_min_sd) {
+ // Make sure the NaN values are included in the testing
+ // one out of four times.
+ if ((rand() & 3) == 0) {
+ // FIXME:
+ // The argument "0xFFFFFFFFFFFF" is a tricky workaround to
+ // set the NaN value for doubles. The code is not intuitive
+ // and should be fixed in the future.
+ uint32_t r1 = ((rand() & 1) << 1) + 1;
+ uint32_t r2 = ((rand() & 1) << 1) + 1;
+ mTestFloatPointer1[r1] = nanf("0xFFFFFFFFFFFF");
+ mTestFloatPointer2[r2] = nanf("0xFFFFFFFFFFFF");
+ }
+ }
+#endif
+
+ // one out of every random 64 times or so, mix up the test floats to
+ // contain some integer values
+ if ((rand() & 63) == 0) {
+ uint32_t option = rand() & 3;
+ switch (option) {
+ // All integers..
+ case 0:
+ mTestFloatPointer1[0] = float(mTestIntPointer1[0]);
+ mTestFloatPointer1[1] = float(mTestIntPointer1[1]);
+ mTestFloatPointer1[2] = float(mTestIntPointer1[2]);
+ mTestFloatPointer1[3] = float(mTestIntPointer1[3]);
+
+ mTestFloatPointer2[0] = float(mTestIntPointer2[0]);
+ mTestFloatPointer2[1] = float(mTestIntPointer2[1]);
+ mTestFloatPointer2[2] = float(mTestIntPointer2[2]);
+ mTestFloatPointer2[3] = float(mTestIntPointer2[3]);
+
+ break;
+ case 1: {
+ uint32_t index = rand() & 3;
+ mTestFloatPointer1[index] = float(mTestIntPointer1[index]);
+ index = rand() & 3;
+ mTestFloatPointer2[index] = float(mTestIntPointer2[index]);
+ } break;
+ case 2: {
+ uint32_t index1 = rand() & 3;
+ uint32_t index2 = rand() & 3;
+ mTestFloatPointer1[index1] =
+ float(mTestIntPointer1[index1]);
+ mTestFloatPointer1[index2] =
+ float(mTestIntPointer1[index2]);
+ index1 = rand() & 3;
+ index2 = rand() & 3;
+ mTestFloatPointer1[index1] =
+ float(mTestIntPointer1[index1]);
+ mTestFloatPointer1[index2] =
+ float(mTestIntPointer1[index2]);
+ } break;
+ case 3:
+ mTestFloatPointer1[0] = float(mTestIntPointer1[0]);
+ mTestFloatPointer1[1] = float(mTestIntPointer1[1]);
+ mTestFloatPointer1[2] = float(mTestIntPointer1[2]);
+ mTestFloatPointer1[3] = float(mTestIntPointer1[3]);
+ break;
+ }
+ if ((rand() & 3) == 0) { // one out of 4 times, make halves
+ for (uint32_t j = 0; j < 4; j++) {
+ mTestFloatPointer1[j] *= 0.5f;
+ mTestFloatPointer2[j] *= 0.5f;
+ }
+ }
+ }
+
+ ret = runSingleTest(test, i);
+ if (ret == TEST_FAIL) // the test failed...
+ {
+ // Set a breakpoint here if you want to step through the failure
+ // case in the debugger
+ ret = runSingleTest(test, i);
+ break;
+ }
+ }
+ return ret;
+ }
+};
+
+const char *instructionString[] = {
+#define _(x) #x,
+ INTRIN_LIST
+#undef _
+};
+
+// Produce rounding which is the same as SSE instructions with _MM_ROUND_NEAREST
+// rounding mode
+static inline float bankersRounding(float val)
+{
+ if (val < 0)
+ return -bankersRounding(-val);
+
+ float ret;
+ float roundDown = floorf(val); // Round down value
+ float roundUp = ceilf(val); // Round up value
+ float diffDown = val - roundDown;
+ float diffUp = roundUp - val;
+
+ if (diffDown < diffUp) {
+ /* If it's closer to the round down value, then use it */
+ ret = roundDown;
+ } else if (diffDown > diffUp) {
+ /* If it's closer to the round up value, then use it */
+ ret = roundUp;
+ } else {
+ /* If it's equidistant between round up and round down value, pick the
+ * one which is an even number */
+ float half = roundDown / 2;
+ if (half != floorf(half)) {
+ /* If the round down value is odd, return the round up value */
+ ret = roundUp;
+ } else {
+ /* If the round up value is odd, return the round down value */
+ ret = roundDown;
+ }
+ }
+ return ret;
+}
+
+static inline double bankersRounding(double val)
+{
+ if (val < 0)
+ return -bankersRounding(-val);
+
+ double ret;
+ double roundDown = floor(val); // Round down value
+ double roundUp = ceil(val); // Round up value
+ double diffDown = val - roundDown;
+ double diffUp = roundUp - val;
+
+ if (diffDown < diffUp) {
+ /* If it's closer to the round down value, then use it */
+ ret = roundDown;
+ } else if (diffDown > diffUp) {
+ /* If it's closer to the round up value, then use it */
+ ret = roundUp;
+ } else {
+ /* If it's equidistant between round up and round down value, pick the
+ * one which is an even number */
+ double half = roundDown / 2;
+ if (half != floor(half)) {
+ /* If the round down value is odd, return the round up value */
+ ret = roundUp;
+ } else {
+ /* If the round up value is odd, return the round down value */
+ ret = roundDown;
+ }
+ }
+ return ret;
+}
+
+// SplitMix64 PRNG by Sebastiano Vigna, see:
+// <https://xoshiro.di.unimi.it/splitmix64.c>
+static uint64_t state; // the state of SplitMix64 PRNG
+const double TWOPOWER64 = pow(2, 64);
+
+#define SSE2NEON_INIT_RNG(seed) \
+ do { \
+ state = seed; \
+ } while (0)
+
+static double next()
+{
+ uint64_t z = (state += 0x9e3779b97f4a7c15);
+ z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
+ z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
+ return (double) (z ^ (z >> 31));
+}
+
+static float ranf()
+{
+ return (float) (next() / TWOPOWER64);
+}
+
+static float ranf(float low, float high)
+{
+ return ranf() * (high - low) + low;
+}
+
+// Enable the tests which are using the macro of another tests
+result_t test_mm_slli_si128(const SSE2NEONTestImpl &impl, uint32_t iter);
+result_t test_mm_srli_si128(const SSE2NEONTestImpl &impl, uint32_t iter);
+result_t test_mm_shuffle_pi16(const SSE2NEONTestImpl &impl, uint32_t iter);
+
+// This function is not called from "runSingleTest", but for other intrinsic
+// tests that might need to call "_mm_set_epi32".
+__m128i do_mm_set_epi32(int32_t x, int32_t y, int32_t z, int32_t w)
+{
+ __m128i a = _mm_set_epi32(x, y, z, w);
+ validateInt32(a, w, z, y, x);
+ return a;
+}
+
+// This function is not called from "runSingleTest", but for other intrinsic
+// tests that might need to load __m64 data.
+template <class T>
+__m64 load_m64(const T *p)
+{
+ return *((const __m64 *) p);
+}
+
+// This function is not called from "runSingleTest", but for other intrinsic
+// tests that might need to call "_mm_load_ps".
+template <class T>
+__m128 load_m128(const T *p)
+{
+ return _mm_loadu_ps((const float *) p);
+}
+
+// This function is not called from "runSingleTest", but for other intrinsic
+// tests that might need to call "_mm_load_ps".
+template <class T>
+__m128i load_m128i(const T *p)
+{
+ __m128 a = _mm_loadu_ps((const float *) p);
+ __m128i ia = *(const __m128i *) &a;
+ return ia;
+}
+
+// This function is not called from "runSingleTest", but for other intrinsic
+// tests that might need to call "_mm_load_pd".
+template <class T>
+__m128d load_m128d(const T *p)
+{
+ return _mm_loadu_pd((const double *) p);
+}
+
+// This function is not called from "runSingleTest", but for other intrinsic
+// tests that might need to call "_mm_store_ps".
+result_t do_mm_store_ps(float *p, float x, float y, float z, float w)
+{
+ __m128 a = _mm_set_ps(x, y, z, w);
+ _mm_store_ps(p, a);
+ ASSERT_RETURN(p[0] == w);
+ ASSERT_RETURN(p[1] == z);
+ ASSERT_RETURN(p[2] == y);
+ ASSERT_RETURN(p[3] == x);
+ return TEST_SUCCESS;
+}
+
+// This function is not called from "runSingleTest", but for other intrinsic
+// tests that might need to call "_mm_store_ps".
+result_t do_mm_store_ps(int32_t *p, int32_t x, int32_t y, int32_t z, int32_t w)
+{
+ __m128i a = _mm_set_epi32(x, y, z, w);
+ _mm_store_ps((float *) p, *(const __m128 *) &a);
+ ASSERT_RETURN(p[0] == w);
+ ASSERT_RETURN(p[1] == z);
+ ASSERT_RETURN(p[2] == y);
+ ASSERT_RETURN(p[3] == x);
+ return TEST_SUCCESS;
+}
+
+float cmp_noNaN(float a, float b)
+{
+ return (!isnan(a) && !isnan(b)) ? ALL_BIT_1_32 : 0.0f;
+}
+
+double cmp_noNaN(double a, double b)
+{
+ return (!isnan(a) && !isnan(b)) ? ALL_BIT_1_64 : 0.0f;
+}
+
+float cmp_hasNaN(float a, float b)
+{
+ return (isnan(a) || isnan(b)) ? ALL_BIT_1_32 : 0.0f;
+}
+
+double cmp_hasNaN(double a, double b)
+{
+ return (isnan(a) || isnan(b)) ? ALL_BIT_1_64 : 0.0f;
+}
+
+int32_t comilt_ss(float a, float b)
+{
+ if (isnan(a) || isnan(b))
+ return 0;
+ return (a < b);
+}
+
+int32_t comigt_ss(float a, float b)
+{
+ if (isnan(a) || isnan(b))
+ return 0;
+ return (a > b);
+}
+
+int32_t comile_ss(float a, float b)
+{
+ if (isnan(a) || isnan(b))
+ return 0;
+ return (a <= b);
+}
+
+int32_t comige_ss(float a, float b)
+{
+ if (isnan(a) || isnan(b))
+ return 0;
+ return (a >= b);
+}
+
+int32_t comieq_ss(float a, float b)
+{
+ if (isnan(a) || isnan(b))
+ return 0;
+ return (a == b);
+}
+
+int32_t comineq_ss(float a, float b)
+{
+ if (isnan(a) || isnan(b))
+ return 1;
+ return (a != b);
+}
+
+static inline int16_t saturate_16(int32_t a)
+{
+ int32_t max = (1 << 15) - 1;
+ int32_t min = -(1 << 15);
+ if (a > max)
+ return max;
+ if (a < min)
+ return min;
+ return a;
+}
+
+uint32_t canonical_crc32_u8(uint32_t crc, uint8_t v)
+{
+ crc ^= v;
+ for (int bit = 0; bit < 8; bit++) {
+ if (crc & 1)
+ crc = (crc >> 1) ^ uint32_t(0x82f63b78);
+ else
+ crc = (crc >> 1);
+ }
+ return crc;
+}
+
+uint32_t canonical_crc32_u16(uint32_t crc, uint16_t v)
+{
+ crc = canonical_crc32_u8(crc, v & 0xff);
+ crc = canonical_crc32_u8(crc, (v >> 8) & 0xff);
+ return crc;
+}
+
+uint32_t canonical_crc32_u32(uint32_t crc, uint32_t v)
+{
+ crc = canonical_crc32_u16(crc, v & 0xffff);
+ crc = canonical_crc32_u16(crc, (v >> 16) & 0xffff);
+ return crc;
+}
+
+uint64_t canonical_crc32_u64(uint64_t crc, uint64_t v)
+{
+ crc = canonical_crc32_u32((uint32_t) (crc), v & 0xffffffff);
+ crc = canonical_crc32_u32((uint32_t) (crc), (v >> 32) & 0xffffffff);
+ return crc;
+}
+
+static const uint8_t crypto_aes_sbox[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
+ 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
+ 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
+ 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
+ 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
+ 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
+ 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
+ 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
+ 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
+ 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
+ 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
+ 0xb0, 0x54, 0xbb, 0x16,
+};
+
+static const uint8_t crypto_aes_rsbox[256] = {
+ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e,
+ 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
+ 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32,
+ 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
+ 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49,
+ 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
+ 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50,
+ 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
+ 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05,
+ 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
+ 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
+ 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
+ 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8,
+ 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
+ 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b,
+ 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
+ 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59,
+ 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
+ 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d,
+ 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
+ 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63,
+ 0x55, 0x21, 0x0c, 0x7d,
+};
+
+// XT is x_time function that muliplies 'x' by 2 in GF(2^8)
+#define XT(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b))
+
+inline __m128i aesenc_128_reference(__m128i a, __m128i b)
+{
+ uint8_t i, t, u, v[4][4];
+ for (i = 0; i < 16; ++i) {
+ v[((i / 4) + 4 - (i % 4)) % 4][i % 4] =
+ crypto_aes_sbox[((SIMDVec *) &a)->m128_u8[i]];
+ }
+ for (i = 0; i < 4; ++i) {
+ t = v[i][0];
+ u = v[i][0] ^ v[i][1] ^ v[i][2] ^ v[i][3];
+ v[i][0] ^= u ^ XT(v[i][0] ^ v[i][1]);
+ v[i][1] ^= u ^ XT(v[i][1] ^ v[i][2]);
+ v[i][2] ^= u ^ XT(v[i][2] ^ v[i][3]);
+ v[i][3] ^= u ^ XT(v[i][3] ^ t);
+ }
+
+ for (i = 0; i < 16; ++i) {
+ ((SIMDVec *) &a)->m128_u8[i] =
+ v[i / 4][i % 4] ^ ((SIMDVec *) &b)->m128_u8[i];
+ }
+
+ return a;
+}
+
+#define MULTIPLY(x, y) \
+ (((y & 1) * x) ^ ((y >> 1 & 1) * XT(x)) ^ ((y >> 2 & 1) * XT(XT(x))) ^ \
+ ((y >> 3 & 1) * XT(XT(XT(x)))) ^ ((y >> 4 & 1) * XT(XT(XT(XT(x))))))
+
+inline __m128i aesdec_128_reference(__m128i a, __m128i b)
+{
+ uint8_t i, e, f, g, h, v[4][4];
+ for (i = 0; i < 16; ++i) {
+ v[((i / 4) + (i % 4)) % 4][i % 4] =
+ crypto_aes_rsbox[((SIMDVec *) &a)->m128_u8[i]];
+ }
+
+ for (i = 0; i < 4; ++i) {
+ e = v[i][0];
+ f = v[i][1];
+ g = v[i][2];
+ h = v[i][3];
+
+ v[i][0] = MULTIPLY(e, 0x0e) ^ MULTIPLY(f, 0x0b) ^ MULTIPLY(g, 0x0d) ^
+ MULTIPLY(h, 0x09);
+ v[i][1] = MULTIPLY(e, 0x09) ^ MULTIPLY(f, 0x0e) ^ MULTIPLY(g, 0x0b) ^
+ MULTIPLY(h, 0x0d);
+ v[i][2] = MULTIPLY(e, 0x0d) ^ MULTIPLY(f, 0x09) ^ MULTIPLY(g, 0x0e) ^
+ MULTIPLY(h, 0x0b);
+ v[i][3] = MULTIPLY(e, 0x0b) ^ MULTIPLY(f, 0x0d) ^ MULTIPLY(g, 0x09) ^
+ MULTIPLY(h, 0x0e);
+ }
+
+ for (i = 0; i < 16; ++i) {
+ ((SIMDVec *) &a)->m128_u8[i] =
+ v[i / 4][i % 4] ^ ((SIMDVec *) &b)->m128_u8[i];
+ }
+ return a;
+}
+
+inline __m128i aesenclast_128_reference(__m128i s, __m128i rk)
+{
+ uint8_t i, v[4][4];
+ for (i = 0; i < 16; ++i)
+ v[((i / 4) + 4 - (i % 4)) % 4][i % 4] =
+ crypto_aes_sbox[((SIMDVec *) &s)->m128_u8[i]];
+ for (i = 0; i < 16; ++i)
+ ((SIMDVec *) &s)->m128_u8[i] =
+ v[i / 4][i % 4] ^ ((SIMDVec *) &rk)->m128_u8[i];
+ return s;
+}
+
+// Rotates right (circular right shift) value by "amount" positions
+static inline uint32_t rotr(uint32_t value, uint32_t amount)
+{
+ return (value >> amount) | (value << ((32 - amount) & 31));
+}
+
+static inline uint64_t MUL(uint32_t a, uint32_t b)
+{
+ return (uint64_t) a * (uint64_t) b;
+}
+
+// From BearSSL. Performs a 32-bit->64-bit carryless/polynomial
+// long multiply.
+//
+// This implementation was chosen because it is reasonably fast
+// without a lookup table or branching.
+//
+// This does it by splitting up the bits in a way that they
+// would not carry, then combine them together with xor (a
+// carryless add).
+//
+// https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/hash/ghash_ctmul.c;h=3623202;hb=5f045c7#l164
+static uint64_t clmul_32(uint32_t x, uint32_t y)
+{
+ uint32_t x0, x1, x2, x3;
+ uint32_t y0, y1, y2, y3;
+ uint64_t z0, z1, z2, z3;
+
+ x0 = x & (uint32_t) 0x11111111;
+ x1 = x & (uint32_t) 0x22222222;
+ x2 = x & (uint32_t) 0x44444444;
+ x3 = x & (uint32_t) 0x88888888;
+ y0 = y & (uint32_t) 0x11111111;
+ y1 = y & (uint32_t) 0x22222222;
+ y2 = y & (uint32_t) 0x44444444;
+ y3 = y & (uint32_t) 0x88888888;
+ z0 = MUL(x0, y0) ^ MUL(x1, y3) ^ MUL(x2, y2) ^ MUL(x3, y1);
+ z1 = MUL(x0, y1) ^ MUL(x1, y0) ^ MUL(x2, y3) ^ MUL(x3, y2);
+ z2 = MUL(x0, y2) ^ MUL(x1, y1) ^ MUL(x2, y0) ^ MUL(x3, y3);
+ z3 = MUL(x0, y3) ^ MUL(x1, y2) ^ MUL(x2, y1) ^ MUL(x3, y0);
+ z0 &= (uint64_t) 0x1111111111111111;
+ z1 &= (uint64_t) 0x2222222222222222;
+ z2 &= (uint64_t) 0x4444444444444444;
+ z3 &= (uint64_t) 0x8888888888888888;
+ return z0 | z1 | z2 | z3;
+}
+
+// Performs a 64x64->128-bit carryless/polynomial long
+// multiply, using the above routine to calculate the
+// subproducts needed for the full-size multiply.
+//
+// This uses the Karatsuba algorithm.
+//
+// Normally, the Karatsuba algorithm isn't beneficial
+// until very large numbers due to carry tracking and
+// multiplication being relatively cheap.
+//
+// However, we have no carries and multiplication is
+// definitely not cheap, so the Karatsuba algorithm is
+// a low cost and easy optimization.
+//
+// https://en.m.wikipedia.org/wiki/Karatsuba_algorithm
+//
+// Note that addition and subtraction are both
+// performed with xor, since all operations are
+// carryless.
+//
+// The comments represent the actual mathematical
+// operations being performed (instead of the bitwise
+// operations) and to reflect the linked Wikipedia article.
+static std::pair<uint64_t, uint64_t> clmul_64(uint64_t x, uint64_t y)
+{
+ // B = 2
+ // m = 32
+ // x = (x1 * B^m) + x0
+ uint32_t x0 = x & 0xffffffff;
+ uint32_t x1 = x >> 32;
+ // y = (y1 * B^m) + y0
+ uint32_t y0 = y & 0xffffffff;
+ uint32_t y1 = y >> 32;
+
+ // z0 = x0 * y0
+ uint64_t z0 = clmul_32(x0, y0);
+ // z2 = x1 * y1
+ uint64_t z2 = clmul_32(x1, y1);
+ // z1 = (x0 + x1) * (y0 + y1) - z0 - z2
+ uint64_t z1 = clmul_32(x0 ^ x1, y0 ^ y1) ^ z0 ^ z2;
+
+ // xy = z0 + (z1 * B^m) + (z2 * B^2m)
+ // note: z1 is split between the low and high halves
+ uint64_t xy0 = z0 ^ (z1 << 32);
+ uint64_t xy1 = z2 ^ (z1 >> 32);
+
+ return std::make_pair(xy0, xy1);
+}
+
+/* MMX */
+result_t test_mm_empty(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return TEST_SUCCESS;
+}
+
+/* SSE */
+result_t test_mm_add_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float dx = _a[0] + _b[0];
+ float dy = _a[1] + _b[1];
+ float dz = _a[2] + _b[2];
+ float dw = _a[3] + _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_add_ps(a, b);
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_add_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer1;
+
+ float f0 = _a[0] + _b[0];
+ float f1 = _a[1];
+ float f2 = _a[2];
+ float f3 = _a[3];
+
+ __m128 a = _mm_load_ps(_a);
+ __m128 b = _mm_load_ps(_b);
+ __m128 c = _mm_add_ss(a, b);
+
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_and_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_and_ps(a, b);
+ // now for the assertion...
+ const uint32_t *ia = (const uint32_t *) &a;
+ const uint32_t *ib = (const uint32_t *) &b;
+ uint32_t r[4];
+ r[0] = ia[0] & ib[0];
+ r[1] = ia[1] & ib[1];
+ r[2] = ia[2] & ib[2];
+ r[3] = ia[3] & ib[3];
+ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+ result_t res = VALIDATE_INT32_M128(*(const __m128i *) &c, r);
+ if (res) {
+ res = VALIDATE_INT32_M128(ret, r);
+ }
+ return res;
+}
+
+// r0 := ~a0 & b0
+// r1 := ~a1 & b1
+// r2 := ~a2 & b2
+// r3 := ~a3 & b3
+result_t test_mm_andnot_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_andnot_ps(a, b);
+ // now for the assertion...
+ const uint32_t *ia = (const uint32_t *) &a;
+ const uint32_t *ib = (const uint32_t *) &b;
+ uint32_t r[4];
+ r[0] = ~ia[0] & ib[0];
+ r[1] = ~ia[1] & ib[1];
+ r[2] = ~ia[2] & ib[2];
+ r[3] = ~ia[3] & ib[3];
+ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+ result_t res = TEST_FAIL;
+ res = VALIDATE_INT32_M128(*(const __m128i *) &c, r);
+ if (res) {
+ res = VALIDATE_INT32_M128(ret, r);
+ }
+ return res;
+}
+
+result_t test_mm_avg_pu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+ uint16_t d[4];
+ d[0] = (_a[0] + _b[0] + 1) >> 1;
+ d[1] = (_a[1] + _b[1] + 1) >> 1;
+ d[2] = (_a[2] + _b[2] + 1) >> 1;
+ d[3] = (_a[3] + _b[3] + 1) >> 1;
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_avg_pu16(a, b);
+
+ return VALIDATE_UINT16_M64(c, d);
+}
+
+result_t test_mm_avg_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ uint8_t d[8];
+ d[0] = (_a[0] + _b[0] + 1) >> 1;
+ d[1] = (_a[1] + _b[1] + 1) >> 1;
+ d[2] = (_a[2] + _b[2] + 1) >> 1;
+ d[3] = (_a[3] + _b[3] + 1) >> 1;
+ d[4] = (_a[4] + _b[4] + 1) >> 1;
+ d[5] = (_a[5] + _b[5] + 1) >> 1;
+ d[6] = (_a[6] + _b[6] + 1) >> 1;
+ d[7] = (_a[7] + _b[7] + 1) >> 1;
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_avg_pu8(a, b);
+
+ return VALIDATE_UINT8_M64(c, d);
+}
+
+result_t test_mm_cmpeq_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result[4];
+ result[0] = _a[0] == _b[0] ? -1 : 0;
+ result[1] = _a[1] == _b[1] ? -1 : 0;
+ result[2] = _a[2] == _b[2] ? -1 : 0;
+ result[3] = _a[3] == _b[3] ? -1 : 0;
+
+ __m128 ret = _mm_cmpeq_ps(a, b);
+ __m128i iret = *(const __m128i *) &ret;
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmpeq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = _a[0] == _b[0] ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpeq_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpge_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result[4];
+ result[0] = _a[0] >= _b[0] ? -1 : 0;
+ result[1] = _a[1] >= _b[1] ? -1 : 0;
+ result[2] = _a[2] >= _b[2] ? -1 : 0;
+ result[3] = _a[3] >= _b[3] ? -1 : 0;
+
+ __m128 ret = _mm_cmpge_ps(a, b);
+ __m128i iret = *(const __m128i *) &ret;
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmpge_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = _a[0] >= _b[0] ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpge_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpgt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result[4];
+ result[0] = _a[0] > _b[0] ? -1 : 0;
+ result[1] = _a[1] > _b[1] ? -1 : 0;
+ result[2] = _a[2] > _b[2] ? -1 : 0;
+ result[3] = _a[3] > _b[3] ? -1 : 0;
+
+ __m128 ret = _mm_cmpgt_ps(a, b);
+ __m128i iret = *(const __m128i *) &ret;
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmpgt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = _a[0] > _b[0] ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpgt_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmple_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result[4];
+ result[0] = _a[0] <= _b[0] ? -1 : 0;
+ result[1] = _a[1] <= _b[1] ? -1 : 0;
+ result[2] = _a[2] <= _b[2] ? -1 : 0;
+ result[3] = _a[3] <= _b[3] ? -1 : 0;
+
+ __m128 ret = _mm_cmple_ps(a, b);
+ __m128i iret = *(const __m128i *) &ret;
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmple_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = _a[0] <= _b[0] ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmple_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmplt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result[4];
+ result[0] = _a[0] < _b[0] ? -1 : 0;
+ result[1] = _a[1] < _b[1] ? -1 : 0;
+ result[2] = _a[2] < _b[2] ? -1 : 0;
+ result[3] = _a[3] < _b[3] ? -1 : 0;
+
+ __m128 ret = _mm_cmplt_ps(a, b);
+ __m128i iret = *(const __m128i *) &ret;
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmplt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = _a[0] < _b[0] ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmplt_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpneq_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result[4];
+ result[0] = _a[0] != _b[0] ? -1 : 0;
+ result[1] = _a[1] != _b[1] ? -1 : 0;
+ result[2] = _a[2] != _b[2] ? -1 : 0;
+ result[3] = _a[3] != _b[3] ? -1 : 0;
+
+ __m128 ret = _mm_cmpneq_ps(a, b);
+ __m128i iret = *(const __m128i *) &ret;
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmpneq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = _a[0] != _b[0] ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpneq_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpnge_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] >= _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = !(_a[1] >= _b[1]) ? ALL_BIT_1_32 : 0;
+ result[2] = !(_a[2] >= _b[2]) ? ALL_BIT_1_32 : 0;
+ result[3] = !(_a[3] >= _b[3]) ? ALL_BIT_1_32 : 0;
+
+ __m128 ret = _mm_cmpnge_ps(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpnge_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] >= _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpnge_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpngt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] > _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = !(_a[1] > _b[1]) ? ALL_BIT_1_32 : 0;
+ result[2] = !(_a[2] > _b[2]) ? ALL_BIT_1_32 : 0;
+ result[3] = !(_a[3] > _b[3]) ? ALL_BIT_1_32 : 0;
+
+ __m128 ret = _mm_cmpngt_ps(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpngt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] > _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpngt_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpnle_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] <= _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = !(_a[1] <= _b[1]) ? ALL_BIT_1_32 : 0;
+ result[2] = !(_a[2] <= _b[2]) ? ALL_BIT_1_32 : 0;
+ result[3] = !(_a[3] <= _b[3]) ? ALL_BIT_1_32 : 0;
+
+ __m128 ret = _mm_cmpnle_ps(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpnle_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] <= _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpnle_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpnlt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] < _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = !(_a[1] < _b[1]) ? ALL_BIT_1_32 : 0;
+ result[2] = !(_a[2] < _b[2]) ? ALL_BIT_1_32 : 0;
+ result[3] = !(_a[3] < _b[3]) ? ALL_BIT_1_32 : 0;
+
+ __m128 ret = _mm_cmpnlt_ps(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpnlt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = !(_a[0] < _b[0]) ? ALL_BIT_1_32 : 0;
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpnlt_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpord_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+
+ for (uint32_t i = 0; i < 4; i++) {
+ result[i] = cmp_noNaN(_a[i], _b[i]);
+ }
+
+ __m128 ret = _mm_cmpord_ps(a, b);
+
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpord_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = cmp_noNaN(_a[0], _b[0]);
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpord_ss(a, b);
+
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpunord_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+
+ for (uint32_t i = 0; i < 4; i++) {
+ result[i] = cmp_hasNaN(_a[i], _b[i]);
+ }
+
+ __m128 ret = _mm_cmpunord_ps(a, b);
+
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_cmpunord_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = cmp_hasNaN(_a[0], _b[0]);
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_cmpunord_ss(a, b);
+
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_comieq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comieq_ss correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result = comieq_ss(_a[0], _b[0]);
+ int32_t ret = _mm_comieq_ss(a, b);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+#endif
+}
+
+result_t test_mm_comige_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result = comige_ss(_a[0], _b[0]);
+ int32_t ret = _mm_comige_ss(a, b);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_comigt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result = comigt_ss(_a[0], _b[0]);
+ int32_t ret = _mm_comigt_ss(a, b);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_comile_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comile_ss correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result = comile_ss(_a[0], _b[0]);
+ int32_t ret = _mm_comile_ss(a, b);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+#endif
+}
+
+result_t test_mm_comilt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comilt_ss correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result = comilt_ss(_a[0], _b[0]);
+
+ int32_t ret = _mm_comilt_ss(a, b);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+#endif
+}
+
+result_t test_mm_comineq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comineq_ss correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ int32_t result = comineq_ss(_a[0], _b[0]);
+ int32_t ret = _mm_comineq_ss(a, b);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+#endif
+}
+
+result_t test_mm_cvt_pi2ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+
+ float dx = (float) _b[0];
+ float dy = (float) _b[1];
+ float dz = _a[2];
+ float dw = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m64 b = load_m64(_b);
+ __m128 c = _mm_cvt_pi2ps(a, b);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvt_ps2pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ int32_t d[2];
+
+ for (int idx = 0; idx < 2; idx++) {
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d[idx] = (int32_t) (bankersRounding(_a[idx]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d[idx] = (int32_t) (floorf(_a[idx]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d[idx] = (int32_t) (ceilf(_a[idx]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d[idx] = (int32_t) (_a[idx]);
+ break;
+ }
+ }
+
+ __m128 a = load_m128(_a);
+ __m64 ret = _mm_cvt_ps2pi(a);
+
+ return VALIDATE_INT32_M64(ret, d);
+}
+
+result_t test_mm_cvt_si2ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const int32_t b = *impl.mTestIntPointer2;
+
+ float dx = (float) b;
+ float dy = _a[1];
+ float dz = _a[2];
+ float dw = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_cvt_si2ss(a, b);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvt_ss2si(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ int32_t d0;
+
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d0 = (int32_t) (bankersRounding(_a[0]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d0 = (int32_t) (floorf(_a[0]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d0 = (int32_t) (ceilf(_a[0]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d0 = (int32_t) (_a[0]);
+ break;
+ }
+
+ __m128 a = load_m128(_a);
+ int32_t ret = _mm_cvt_ss2si(a);
+ return ret == d0 ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtpi16_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+
+ float dx = (float) _a[0];
+ float dy = (float) _a[1];
+ float dz = (float) _a[2];
+ float dw = (float) _a[3];
+
+ __m64 a = load_m64(_a);
+ __m128 c = _mm_cvtpi16_ps(a);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtpi32_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ float dx = (float) _b[0];
+ float dy = (float) _b[1];
+ float dz = _a[2];
+ float dw = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m64 b = load_m64(_b);
+ __m128 c = _mm_cvtpi32_ps(a, b);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtpi32x2_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ float dx = (float) _a[0];
+ float dy = (float) _a[1];
+ float dz = (float) _b[0];
+ float dw = (float) _b[1];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m128 c = _mm_cvtpi32x2_ps(a, b);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtpi8_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+
+ float dx = (float) _a[0];
+ float dy = (float) _a[1];
+ float dz = (float) _a[2];
+ float dw = (float) _a[3];
+
+ __m64 a = load_m64(_a);
+ __m128 c = _mm_cvtpi8_ps(a);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtps_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ int16_t rnd[4];
+
+ for (int i = 0; i < 4; i++) {
+ if ((float) INT16_MAX <= _a[i] && _a[i] <= (float) INT32_MAX) {
+ rnd[i] = INT16_MAX;
+ } else if (INT16_MIN < _a[i] && _a[i] < INT16_MAX) {
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ rnd[i] = (int16_t) bankersRounding(_a[i]);
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ rnd[i] = (int16_t) floorf(_a[i]);
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ rnd[i] = (int16_t) ceilf(_a[i]);
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ rnd[i] = (int16_t) _a[i];
+ break;
+ }
+ } else {
+ rnd[i] = INT16_MIN;
+ }
+ }
+
+ __m128 a = load_m128(_a);
+ __m64 ret = _mm_cvtps_pi16(a);
+ return VALIDATE_INT16_M64(ret, rnd);
+}
+
+result_t test_mm_cvtps_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ int32_t d[2];
+
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d[0] = (int32_t) bankersRounding(_a[0]);
+ d[1] = (int32_t) bankersRounding(_a[1]);
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d[0] = (int32_t) floorf(_a[0]);
+ d[1] = (int32_t) floorf(_a[1]);
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d[0] = (int32_t) ceilf(_a[0]);
+ d[1] = (int32_t) ceilf(_a[1]);
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d[0] = (int32_t) _a[0];
+ d[1] = (int32_t) _a[1];
+ break;
+ }
+
+ __m128 a = load_m128(_a);
+ __m64 ret = _mm_cvtps_pi32(a);
+
+ return VALIDATE_INT32_M64(ret, d);
+}
+
+result_t test_mm_cvtps_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ int8_t rnd[8] = {};
+
+ for (int i = 0; i < 4; i++) {
+ if ((float) INT8_MAX <= _a[i] && _a[i] <= (float) INT32_MAX) {
+ rnd[i] = INT8_MAX;
+ } else if (INT8_MIN < _a[i] && _a[i] < INT8_MAX) {
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ rnd[i] = (int8_t) bankersRounding(_a[i]);
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ rnd[i] = (int8_t) floorf(_a[i]);
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ rnd[i] = (int8_t) ceilf(_a[i]);
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ rnd[i] = (int8_t) _a[i];
+ break;
+ }
+ } else {
+ rnd[i] = INT8_MIN;
+ }
+ }
+
+ __m128 a = load_m128(_a);
+ __m64 ret = _mm_cvtps_pi8(a);
+ return VALIDATE_INT8_M64(ret, rnd);
+}
+
+result_t test_mm_cvtpu16_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+
+ float dx = (float) _a[0];
+ float dy = (float) _a[1];
+ float dz = (float) _a[2];
+ float dw = (float) _a[3];
+
+ __m64 a = load_m64(_a);
+ __m128 c = _mm_cvtpu16_ps(a);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtpu8_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+
+ float dx = (float) _a[0];
+ float dy = (float) _a[1];
+ float dz = (float) _a[2];
+ float dw = (float) _a[3];
+
+ __m64 a = load_m64(_a);
+ __m128 c = _mm_cvtpu8_ps(a);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtsi32_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const int32_t b = *impl.mTestIntPointer2;
+
+ float dx = (float) b;
+ float dy = _a[1];
+ float dz = _a[2];
+ float dw = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_cvtsi32_ss(a, b);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtsi64_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const int64_t b = *(int64_t *) impl.mTestIntPointer2;
+
+ float dx = (float) b;
+ float dy = _a[1];
+ float dz = _a[2];
+ float dw = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_cvtsi64_ss(a, b);
+
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_cvtss_f32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ float f = _a[0];
+
+ __m128 a = load_m128(_a);
+ float c = _mm_cvtss_f32(a);
+
+ return f == c ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtss_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ int32_t d0;
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d0 = (int32_t) (bankersRounding(_a[0]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d0 = (int32_t) (floorf(_a[0]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d0 = (int32_t) (ceilf(_a[0]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d0 = (int32_t) (_a[0]);
+ break;
+ }
+
+ __m128 a = load_m128(_a);
+ int32_t ret = _mm_cvtss_si32(a);
+
+ return ret == d0 ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtss_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ int64_t d0;
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d0 = (int64_t) (bankersRounding(_a[0]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d0 = (int64_t) (floorf(_a[0]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d0 = (int64_t) (ceilf(_a[0]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d0 = (int64_t) (_a[0]);
+ break;
+ }
+
+ __m128 a = load_m128(_a);
+ int64_t ret = _mm_cvtss_si64(a);
+
+ return ret == d0 ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtt_ps2pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ int32_t d[2];
+
+ d[0] = (int32_t) _a[0];
+ d[1] = (int32_t) _a[1];
+
+ __m128 a = load_m128(_a);
+ __m64 ret = _mm_cvtt_ps2pi(a);
+
+ return VALIDATE_INT32_M64(ret, d);
+}
+
+result_t test_mm_cvtt_ss2si(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ __m128 a = load_m128(_a);
+ int ret = _mm_cvtt_ss2si(a);
+
+ return ret == (int32_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvttps_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ int32_t d[2];
+
+ d[0] = (int32_t) _a[0];
+ d[1] = (int32_t) _a[1];
+
+ __m128 a = load_m128(_a);
+ __m64 ret = _mm_cvttps_pi32(a);
+
+ return VALIDATE_INT32_M64(ret, d);
+}
+
+result_t test_mm_cvttss_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ __m128 a = load_m128(_a);
+ int ret = _mm_cvttss_si32(a);
+
+ return ret == (int32_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvttss_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ __m128 a = load_m128(_a);
+ int64_t ret = _mm_cvttss_si64(a);
+
+ return ret == (int64_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_div_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float f0 = _a[0] / _b[0];
+ float f1 = _a[1] / _b[1];
+ float f2 = _a[2] / _b[2];
+ float f3 = _a[3] / _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_div_ps(a, b);
+
+#if defined(__arm__) && !defined(__aarch64__) && !defined(_M_ARM64)
+ // The implementation of "_mm_div_ps()" on ARM 32bit doesn't use "DIV"
+ // instruction directly, instead it uses "FRECPE" instruction to approximate
+ // it. Therefore, the precision is not as small as other architecture
+ return validateFloatError(c, f0, f1, f2, f3, 0.00001f);
+#else
+ return validateFloat(c, f0, f1, f2, f3);
+#endif
+}
+
+result_t test_mm_div_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ float d0 = _a[0] / _b[0];
+ float d1 = _a[1];
+ float d2 = _a[2];
+ float d3 = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_div_ss(a, b);
+
+#if defined(__arm__) && !defined(__aarch64__) && !defined(_M_ARM64)
+ // The implementation of "_mm_div_ps()" on ARM 32bit doesn't use "DIV"
+ // instruction directly, instead it uses "FRECPE" instruction to approximate
+ // it. Therefore, the precision is not as small as other architecture
+ return validateFloatError(c, d0, d1, d2, d3, 0.00001f);
+#else
+ return validateFloat(c, d0, d1, d2, d3);
+#endif
+}
+
+result_t test_mm_extract_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME GCC has bug on "_mm_extract_pi16" intrinsics. We will enable this
+ // test when GCC fix this bug.
+ // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98495 for more
+ // information
+#if defined(__clang__) || defined(_MSC_VER)
+ uint64_t *_a = (uint64_t *) impl.mTestIntPointer1;
+ const int idx = iter & 0x3;
+
+ __m64 a = load_m64(_a);
+ int c;
+ switch (idx) {
+ case 0:
+ c = _mm_extract_pi16(a, 0);
+ break;
+ case 1:
+ c = _mm_extract_pi16(a, 1);
+ break;
+ case 2:
+ c = _mm_extract_pi16(a, 2);
+ break;
+ case 3:
+ c = _mm_extract_pi16(a, 3);
+ break;
+ }
+
+ ASSERT_RETURN((uint64_t) c == ((*_a >> (idx * 16)) & 0xFFFF));
+ ASSERT_RETURN(0 == ((uint64_t) c & 0xFFFF0000));
+ return TEST_SUCCESS;
+#else
+ return TEST_UNIMPL;
+#endif
+}
+
+result_t test_mm_malloc(const SSE2NEONTestImpl &impl, uint32_t iter);
+result_t test_mm_free(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ /* We verify _mm_malloc first, and there is no need to check _mm_free . */
+ return test_mm_malloc(impl, iter);
+}
+
+result_t test_mm_get_flush_zero_mode(const SSE2NEONTestImpl &impl,
+ uint32_t iter)
+{
+ int res_flush_zero_on, res_flush_zero_off;
+ _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
+ res_flush_zero_on = _MM_GET_FLUSH_ZERO_MODE() == _MM_FLUSH_ZERO_ON;
+ _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_OFF);
+ res_flush_zero_off = _MM_GET_FLUSH_ZERO_MODE() == _MM_FLUSH_ZERO_OFF;
+
+ return (res_flush_zero_on && res_flush_zero_off) ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_get_rounding_mode(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int res_toward_zero, res_to_neg_inf, res_to_pos_inf, res_nearest;
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ res_toward_zero = _MM_GET_ROUNDING_MODE() == _MM_ROUND_TOWARD_ZERO ? 1 : 0;
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ res_to_neg_inf = _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN ? 1 : 0;
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ res_to_pos_inf = _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP ? 1 : 0;
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ res_nearest = _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST ? 1 : 0;
+
+ if (res_toward_zero && res_to_neg_inf && res_to_pos_inf && res_nearest) {
+ return TEST_SUCCESS;
+ } else {
+ return TEST_FAIL;
+ }
+}
+
+result_t test_mm_getcsr(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // store original csr value for post test restoring
+ unsigned int originalCsr = _mm_getcsr();
+
+ unsigned int roundings[] = {_MM_ROUND_TOWARD_ZERO, _MM_ROUND_DOWN,
+ _MM_ROUND_UP, _MM_ROUND_NEAREST};
+ for (size_t i = 0; i < sizeof(roundings) / sizeof(roundings[0]); i++) {
+ _mm_setcsr(_mm_getcsr() | roundings[i]);
+ if ((_mm_getcsr() & roundings[i]) != roundings[i]) {
+ return TEST_FAIL;
+ }
+ }
+
+ // restore original csr value for remaining tests
+ _mm_setcsr(originalCsr);
+
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_insert_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t insert = (int16_t) impl.mTestInts[iter];
+ __m64 a;
+ __m64 b;
+
+#define TEST_IMPL(IDX) \
+ int16_t d##IDX[4]; \
+ for (int i = 0; i < 4; i++) { \
+ d##IDX[i] = _a[i]; \
+ } \
+ d##IDX[IDX] = insert; \
+ \
+ a = load_m64(_a); \
+ b = _mm_insert_pi16(a, insert, IDX); \
+ CHECK_RESULT(VALIDATE_INT16_M64(b, d##IDX))
+
+ IMM_4_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_load_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *addr = impl.mTestFloatPointer1;
+
+ __m128 ret = _mm_load_ps(addr);
+
+ return validateFloat(ret, addr[0], addr[1], addr[2], addr[3]);
+}
+
+result_t test_mm_load_ps1(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *addr = impl.mTestFloatPointer1;
+
+ __m128 ret = _mm_load_ps1(addr);
+
+ return validateFloat(ret, addr[0], addr[0], addr[0], addr[0]);
+}
+
+result_t test_mm_load_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *addr = impl.mTestFloatPointer1;
+
+ __m128 ret = _mm_load_ss(addr);
+
+ return validateFloat(ret, addr[0], 0, 0, 0);
+}
+
+result_t test_mm_load1_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p = impl.mTestFloatPointer1;
+ __m128 a = _mm_load1_ps(p);
+ return validateFloat(a, p[0], p[0], p[0], p[0]);
+}
+
+result_t test_mm_loadh_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p1 = impl.mTestFloatPointer1;
+ const float *p2 = impl.mTestFloatPointer2;
+ const __m64 *b = (const __m64 *) p2;
+ __m128 a = _mm_load_ps(p1);
+ __m128 c = _mm_loadh_pi(a, b);
+
+ return validateFloat(c, p1[0], p1[1], p2[0], p2[1]);
+}
+
+result_t test_mm_loadl_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p1 = impl.mTestFloatPointer1;
+ const float *p2 = impl.mTestFloatPointer2;
+ __m128 a = _mm_load_ps(p1);
+ const __m64 *b = (const __m64 *) p2;
+ __m128 c = _mm_loadl_pi(a, b);
+
+ return validateFloat(c, p2[0], p2[1], p1[2], p1[3]);
+}
+
+result_t test_mm_loadr_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *addr = impl.mTestFloatPointer1;
+
+ __m128 ret = _mm_loadr_ps(addr);
+
+ return validateFloat(ret, addr[3], addr[2], addr[1], addr[0]);
+}
+
+result_t test_mm_loadu_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *addr = impl.mTestFloatPointer1;
+
+ __m128 ret = _mm_loadu_ps(addr);
+
+ return validateFloat(ret, addr[0], addr[1], addr[2], addr[3]);
+}
+
+result_t test_mm_loadu_si16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // The GCC version before 11 does not implement intrinsic function
+ // _mm_loadu_si16. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+ // for more information.
+#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+ return TEST_UNIMPL;
+#else
+ const int16_t *addr = (const int16_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_loadu_si16((const void *) addr);
+
+ return validateInt16(ret, addr[0], 0, 0, 0, 0, 0, 0, 0);
+#endif
+}
+
+result_t test_mm_loadu_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // Versions of GCC prior to 9 do not implement intrinsic function
+ // _mm_loadu_si64. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78782
+ // for more information.
+#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 9)
+ return TEST_UNIMPL;
+#else
+ const int64_t *addr = (const int64_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_loadu_si64((const void *) addr);
+
+ return validateInt64(ret, addr[0], 0);
+#endif
+}
+
+result_t test_mm_malloc(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const size_t *a = (const size_t *) impl.mTestIntPointer1;
+ const size_t *b = (const size_t *) impl.mTestIntPointer2;
+ size_t size = *a % (1024 * 16) + 1;
+ size_t align = 2 << (*b % 5);
+
+ void *p = _mm_malloc(size, align);
+ if (!p)
+ return TEST_FAIL;
+ result_t res = (((uintptr_t) p % align) == 0) ? TEST_SUCCESS : TEST_FAIL;
+ _mm_free(p);
+ return res;
+}
+
+result_t test_mm_maskmove_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_mask = (const uint8_t *) impl.mTestIntPointer2;
+ char mem_addr[16];
+
+ const __m64 *a = (const __m64 *) _a;
+ const __m64 *mask = (const __m64 *) _mask;
+ _mm_maskmove_si64(*a, *mask, (char *) mem_addr);
+
+ for (int i = 0; i < 8; i++) {
+ if (_mask[i] >> 7) {
+ ASSERT_RETURN(_a[i] == (uint8_t) mem_addr[i]);
+ }
+ }
+
+ return TEST_SUCCESS;
+}
+
+result_t test_m_maskmovq(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_maskmove_si64(impl, iter);
+}
+
+result_t test_mm_max_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t c[4];
+
+ c[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ c[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ c[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ c[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret = _mm_max_pi16(a, b);
+ return VALIDATE_INT16_M64(ret, c);
+}
+
+result_t test_mm_max_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float c[4];
+
+ c[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ c[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ c[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ c[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 ret = _mm_max_ps(a, b);
+ return validateFloat(ret, c[0], c[1], c[2], c[3]);
+}
+
+result_t test_mm_max_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ uint8_t c[8];
+
+ c[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ c[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ c[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ c[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+ c[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+ c[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+ c[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+ c[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret = _mm_max_pu8(a, b);
+ return VALIDATE_UINT8_M64(ret, c);
+}
+
+result_t test_mm_max_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer1;
+
+ float f0 = _a[0] > _b[0] ? _a[0] : _b[0];
+ float f1 = _a[1];
+ float f2 = _a[2];
+ float f3 = _a[3];
+
+ __m128 a = _mm_load_ps(_a);
+ __m128 b = _mm_load_ps(_b);
+ __m128 c = _mm_max_ss(a, b);
+
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_min_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t c[4];
+
+ c[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ c[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ c[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ c[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret = _mm_min_pi16(a, b);
+ return VALIDATE_INT16_M64(ret, c);
+}
+
+result_t test_mm_min_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float c[4];
+
+ c[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ c[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ c[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ c[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 ret = _mm_min_ps(a, b);
+ return validateFloat(ret, c[0], c[1], c[2], c[3]);
+}
+
+result_t test_mm_min_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ uint8_t c[8];
+
+ c[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ c[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ c[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ c[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+ c[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+ c[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+ c[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+ c[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret = _mm_min_pu8(a, b);
+ return VALIDATE_UINT8_M64(ret, c);
+}
+
+result_t test_mm_min_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float c;
+
+ c = _a[0] < _b[0] ? _a[0] : _b[0];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 ret = _mm_min_ss(a, b);
+
+ return validateFloat(ret, c, _a[1], _a[2], _a[3]);
+}
+
+result_t test_mm_move_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+
+ float result[4];
+ result[0] = _b[0];
+ result[1] = _a[1];
+ result[2] = _a[2];
+ result[3] = _a[3];
+
+ __m128 ret = _mm_move_ss(a, b);
+ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+}
+
+result_t test_mm_movehl_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ float f0 = _b[2];
+ float f1 = _b[3];
+ float f2 = _a[2];
+ float f3 = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 ret = _mm_movehl_ps(a, b);
+
+ return validateFloat(ret, f0, f1, f2, f3);
+}
+
+result_t test_mm_movelh_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ float f0 = _a[0];
+ float f1 = _a[1];
+ float f2 = _b[0];
+ float f3 = _b[1];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 ret = _mm_movelh_ps(a, b);
+
+ return validateFloat(ret, f0, f1, f2, f3);
+}
+
+result_t test_mm_movemask_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ unsigned int _c = 0;
+ for (int i = 0; i < 8; i++) {
+ if (_a[i] & 0x80) {
+ _c |= (1 << i);
+ }
+ }
+
+ const __m64 *a = (const __m64 *) _a;
+ int c = _mm_movemask_pi8(*a);
+
+ ASSERT_RETURN((unsigned int) c == _c);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_movemask_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p = impl.mTestFloatPointer1;
+ int ret = 0;
+
+ const uint32_t *ip = (const uint32_t *) p;
+ if (ip[0] & 0x80000000) {
+ ret |= 1;
+ }
+ if (ip[1] & 0x80000000) {
+ ret |= 2;
+ }
+ if (ip[2] & 0x80000000) {
+ ret |= 4;
+ }
+ if (ip[3] & 0x80000000) {
+ ret |= 8;
+ }
+ __m128 a = load_m128(p);
+ int val = _mm_movemask_ps(a);
+ return val == ret ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_mul_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float dx = _a[0] * _b[0];
+ float dy = _a[1] * _b[1];
+ float dz = _a[2] * _b[2];
+ float dw = _a[3] * _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_mul_ps(a, b);
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_mul_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ float dx = _a[0] * _b[0];
+ float dy = _a[1];
+ float dz = _a[2];
+ float dw = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_mul_ss(a, b);
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_mulhi_pu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+ uint16_t d[4];
+ for (uint32_t i = 0; i < 4; i++) {
+ uint32_t m = (uint32_t) _a[i] * (uint32_t) _b[i];
+ d[i] = (uint16_t) (m >> 16);
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_mulhi_pu16(a, b);
+ return VALIDATE_UINT16_M64(c, d);
+}
+
+result_t test_mm_or_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_or_ps(a, b);
+ // now for the assertion...
+ const uint32_t *ia = (const uint32_t *) &a;
+ const uint32_t *ib = (const uint32_t *) &b;
+ uint32_t r[4];
+ r[0] = ia[0] | ib[0];
+ r[1] = ia[1] | ib[1];
+ r[2] = ia[2] | ib[2];
+ r[3] = ia[3] | ib[3];
+ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+ result_t res = VALIDATE_INT32_M128(*(const __m128i *) &c, r);
+ if (res) {
+ res = VALIDATE_INT32_M128(ret, r);
+ }
+
+ return res;
+}
+
+result_t test_m_pavgb(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_avg_pu8(impl, iter);
+}
+
+result_t test_m_pavgw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_avg_pu16(impl, iter);
+}
+
+result_t test_m_pextrw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_extract_pi16(impl, iter);
+}
+
+result_t test_m_pinsrw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_insert_pi16(impl, iter);
+}
+
+result_t test_m_pmaxsw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_max_pi16(impl, iter);
+}
+
+result_t test_m_pmaxub(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_max_pu8(impl, iter);
+}
+
+result_t test_m_pminsw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_min_pi16(impl, iter);
+}
+
+result_t test_m_pminub(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_min_pu8(impl, iter);
+}
+
+result_t test_m_pmovmskb(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_movemask_pi8(impl, iter);
+}
+
+result_t test_m_pmulhuw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_mulhi_pu16(impl, iter);
+}
+
+result_t test_mm_prefetch(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ typedef struct {
+ __m128 a;
+ float r[4];
+ } prefetch_test_t;
+ prefetch_test_t test_vec[8] = {
+ {
+ _mm_set_ps(-0.1f, 0.2f, 0.3f, 0.4f),
+ {0.4f, 0.3f, 0.2f, -0.1f},
+ },
+ {
+ _mm_set_ps(0.5f, 0.6f, -0.7f, -0.8f),
+ {-0.8f, -0.7f, 0.6f, 0.5f},
+ },
+ {
+ _mm_set_ps(0.9f, 0.10f, -0.11f, 0.12f),
+ {0.12f, -0.11f, 0.10f, 0.9f},
+ },
+ {
+ _mm_set_ps(-1.1f, -2.1f, -3.1f, -4.1f),
+ {-4.1f, -3.1f, -2.1f, -1.1f},
+ },
+ {
+ _mm_set_ps(100.0f, -110.0f, 120.0f, -130.0f),
+ {-130.0f, 120.0f, -110.0f, 100.0f},
+ },
+ {
+ _mm_set_ps(200.5f, 210.5f, -220.5f, 230.5f),
+ {995.74f, -93.04f, 144.03f, 902.50f},
+ },
+ {
+ _mm_set_ps(10.11f, -11.12f, -12.13f, 13.14f),
+ {13.14f, -12.13f, -11.12f, 10.11f},
+ },
+ {
+ _mm_set_ps(10.1f, -20.2f, 30.3f, 40.4f),
+ {40.4f, 30.3f, -20.2f, 10.1f},
+ },
+ };
+
+ for (size_t i = 0; i < (sizeof(test_vec) / (sizeof(test_vec[0]))); i++) {
+ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_T0);
+ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_T1);
+ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_T2);
+ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_NTA);
+ }
+
+ return TEST_SUCCESS;
+}
+
+result_t test_m_psadbw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ uint16_t d = 0;
+ for (int i = 0; i < 8; i++) {
+ d += abs(_a[i] - _b[i]);
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _m_psadbw(a, b);
+ return validateUInt16(c, d, 0, 0, 0);
+}
+
+result_t test_m_pshufw(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_shuffle_pi16(impl, iter);
+}
+
+result_t test_mm_rcp_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ float dx = 1.0f / _a[0];
+ float dy = 1.0f / _a[1];
+ float dz = 1.0f / _a[2];
+ float dw = 1.0f / _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_rcp_ps(a);
+ return validateFloatError(c, dx, dy, dz, dw, 0.001f);
+}
+
+result_t test_mm_rcp_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ float dx = 1.0f / _a[0];
+ float dy = _a[1];
+ float dz = _a[2];
+ float dw = _a[3];
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_rcp_ss(a);
+ return validateFloatError(c, dx, dy, dz, dw, 0.001f);
+}
+
+result_t test_mm_rsqrt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = (const float *) impl.mTestFloatPointer1;
+
+ float f0 = 1 / sqrtf(_a[0]);
+ float f1 = 1 / sqrtf(_a[1]);
+ float f2 = 1 / sqrtf(_a[2]);
+ float f3 = 1 / sqrtf(_a[3]);
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_rsqrt_ps(a);
+
+ // Here, we ensure the error rate of "_mm_rsqrt_ps()" is under 0.1% compared
+ // to the C implementation.
+ return validateFloatError(c, f0, f1, f2, f3, 0.001f);
+}
+
+result_t test_mm_rsqrt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = (const float *) impl.mTestFloatPointer1;
+
+ float f0 = 1 / sqrtf(_a[0]);
+ float f1 = _a[1];
+ float f2 = _a[2];
+ float f3 = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_rsqrt_ss(a);
+
+ // Here, we ensure the error rate of "_mm_rsqrt_ps()" is under 0.1% compared
+ // to the C implementation.
+ return validateFloatError(c, f0, f1, f2, f3, 0.001f);
+}
+
+result_t test_mm_sad_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ uint16_t d = 0;
+ for (int i = 0; i < 8; i++) {
+ d += abs(_a[i] - _b[i]);
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_sad_pu8(a, b);
+ return validateUInt16(c, d, 0, 0, 0);
+}
+
+result_t test_mm_set_flush_zero_mode(const SSE2NEONTestImpl &impl,
+ uint32_t iter)
+{
+ // TODO:
+ // After the behavior of denormal number and flush zero mode is fully
+ // investigated, the testing would be added.
+ return TEST_UNIMPL;
+}
+
+result_t test_mm_set_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float x = impl.mTestFloats[iter];
+ float y = impl.mTestFloats[iter + 1];
+ float z = impl.mTestFloats[iter + 2];
+ float w = impl.mTestFloats[iter + 3];
+ __m128 a = _mm_set_ps(x, y, z, w);
+ return validateFloat(a, w, z, y, x);
+}
+
+result_t test_mm_set_ps1(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float a = impl.mTestFloats[iter];
+
+ __m128 ret = _mm_set_ps1(a);
+
+ return validateFloat(ret, a, a, a, a);
+}
+
+result_t test_mm_set_rounding_mode(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ result_t res_toward_zero, res_to_neg_inf, res_to_pos_inf, res_nearest;
+
+ __m128 a = load_m128(_a);
+ __m128 b, c;
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ c = _mm_round_ps(a, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ res_toward_zero = validate128(c, b);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ c = _mm_round_ps(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ res_to_neg_inf = validate128(c, b);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ c = _mm_round_ps(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ res_to_pos_inf = validate128(c, b);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ c = _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ res_nearest = validate128(c, b);
+
+ if (res_toward_zero == TEST_SUCCESS && res_to_neg_inf == TEST_SUCCESS &&
+ res_to_pos_inf == TEST_SUCCESS && res_nearest == TEST_SUCCESS) {
+ return TEST_SUCCESS;
+ } else {
+ return TEST_FAIL;
+ }
+}
+
+result_t test_mm_set_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float a = impl.mTestFloats[iter];
+ __m128 c = _mm_set_ss(a);
+ return validateFloat(c, a, 0, 0, 0);
+}
+
+result_t test_mm_set1_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float w = impl.mTestFloats[iter];
+ __m128 a = _mm_set1_ps(w);
+ return validateFloat(a, w, w, w, w);
+}
+
+result_t test_mm_setcsr(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_set_rounding_mode(impl, iter);
+}
+
+result_t test_mm_setr_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float x = impl.mTestFloats[iter];
+ float y = impl.mTestFloats[iter + 1];
+ float z = impl.mTestFloats[iter + 2];
+ float w = impl.mTestFloats[iter + 3];
+
+ __m128 ret = _mm_setr_ps(w, z, y, x);
+
+ return validateFloat(ret, w, z, y, x);
+}
+
+result_t test_mm_setzero_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ __m128 a = _mm_setzero_ps();
+ return validateFloat(a, 0, 0, 0, 0);
+}
+
+result_t test_mm_sfence(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ /* FIXME: Assume that memory barriers always function as intended. */
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_shuffle_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ __m64 a;
+ __m64 d;
+
+#define TEST_IMPL(IDX) \
+ a = load_m64(_a); \
+ d = _mm_shuffle_pi16(a, IDX); \
+ \
+ int16_t _d##IDX[4]; \
+ _d##IDX[0] = _a[IDX & 0x3]; \
+ _d##IDX[1] = _a[(IDX >> 2) & 0x3]; \
+ _d##IDX[2] = _a[(IDX >> 4) & 0x3]; \
+ _d##IDX[3] = _a[(IDX >> 6) & 0x3]; \
+ if (VALIDATE_INT16_M64(d, _d##IDX) != TEST_SUCCESS) { \
+ return TEST_FAIL; \
+ }
+
+ IMM_256_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+// Note, NEON does not have a general purpose shuffled command like SSE.
+// When invoking this method, there is special code for a number of the most
+// common shuffle permutations
+result_t test_mm_shuffle_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ result_t isValid = TEST_SUCCESS;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ // Test many permutations of the shuffle operation, including all
+ // permutations which have an optimized/customized implementation
+ __m128 ret;
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(0, 1, 2, 3));
+ if (!validateFloat(ret, _a[3], _a[2], _b[1], _b[0])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(3, 2, 1, 0));
+ if (!validateFloat(ret, _a[0], _a[1], _b[2], _b[3])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(0, 0, 1, 1));
+ if (!validateFloat(ret, _a[1], _a[1], _b[0], _b[0])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(3, 1, 0, 2));
+ if (!validateFloat(ret, _a[2], _a[0], _b[1], _b[3])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(1, 0, 3, 2));
+ if (!validateFloat(ret, _a[2], _a[3], _b[0], _b[1])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 3, 0, 1));
+ if (!validateFloat(ret, _a[1], _a[0], _b[3], _b[2])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(0, 0, 2, 2));
+ if (!validateFloat(ret, _a[2], _a[2], _b[0], _b[0])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 2, 0, 0));
+ if (!validateFloat(ret, _a[0], _a[0], _b[2], _b[2])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(3, 2, 0, 2));
+ if (!validateFloat(ret, _a[2], _a[0], _b[2], _b[3])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(1, 1, 3, 3));
+ if (!validateFloat(ret, _a[3], _a[3], _b[1], _b[1])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 0, 1, 0));
+ if (!validateFloat(ret, _a[0], _a[1], _b[0], _b[2])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 0, 0, 1));
+ if (!validateFloat(ret, _a[1], _a[0], _b[0], _b[2])) {
+ isValid = TEST_FAIL;
+ }
+ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 0, 3, 2));
+ if (!validateFloat(ret, _a[2], _a[3], _b[0], _b[2])) {
+ isValid = TEST_FAIL;
+ }
+
+ return isValid;
+}
+
+result_t test_mm_sqrt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = (const float *) impl.mTestFloatPointer1;
+
+ float f0 = sqrtf(_a[0]);
+ float f1 = sqrtf(_a[1]);
+ float f2 = sqrtf(_a[2]);
+ float f3 = sqrtf(_a[3]);
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_sqrt_ps(a);
+
+#if defined(__arm__) && !defined(__arm64__) && !defined(_M_ARM64)
+ // Here, we ensure the error rate of "_mm_sqrt_ps()" ARMv7-A implementation
+ // is under 10^-4% compared to the C implementation.
+ return validateFloatError(c, f0, f1, f2, f3, 0.0001f);
+#else
+ // Here, we ensure the error rate of "_mm_sqrt_ps()" is under 10^-6%
+ // compared to the C implementation.
+ return validateFloatError(c, f0, f1, f2, f3, 0.000001f);
+#endif
+}
+
+result_t test_mm_sqrt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = (const float *) impl.mTestFloatPointer1;
+
+ float f0 = sqrtf(_a[0]);
+ float f1 = _a[1];
+ float f2 = _a[2];
+ float f3 = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_sqrt_ss(a);
+
+#if defined(__arm__) && !defined(__arm64__) && !defined(_M_ARM64)
+ // Here, we ensure the error rate of "_mm_sqrt_ps()" ARMv7-A implementation
+ // is under 10^-4% compared to the C implementation.
+ return validateFloatError(c, f0, f1, f2, f3, 0.0001f);
+#else
+ // Here, we ensure the error rate of "_mm_sqrt_ps()" is under 10^-6%
+ // compared to the C implementation.
+ return validateFloatError(c, f0, f1, f2, f3, 0.000001f);
+#endif
+}
+
+result_t test_mm_store_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int32_t *p = impl.mTestIntPointer1;
+ int32_t x = impl.mTestInts[iter];
+ int32_t y = impl.mTestInts[iter + 1];
+ int32_t z = impl.mTestInts[iter + 2];
+ int32_t w = impl.mTestInts[iter + 3];
+ __m128i a = _mm_set_epi32(x, y, z, w);
+ _mm_store_ps((float *) p, *(const __m128 *) &a);
+ ASSERT_RETURN(p[0] == w);
+ ASSERT_RETURN(p[1] == z);
+ ASSERT_RETURN(p[2] == y);
+ ASSERT_RETURN(p[3] == x);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_store_ps1(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float *p = impl.mTestFloatPointer1;
+ float d[4];
+
+ __m128 a = load_m128(p);
+ _mm_store_ps1(d, a);
+
+ ASSERT_RETURN(d[0] == *p);
+ ASSERT_RETURN(d[1] == *p);
+ ASSERT_RETURN(d[2] == *p);
+ ASSERT_RETURN(d[3] == *p);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_store_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float x = impl.mTestFloats[iter];
+ float p[4];
+
+ __m128 a = _mm_set_ss(x);
+ _mm_store_ss(p, a);
+ ASSERT_RETURN(p[0] == x);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_store1_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float *p = impl.mTestFloatPointer1;
+ float d[4];
+
+ __m128 a = load_m128(p);
+ _mm_store1_ps(d, a);
+
+ ASSERT_RETURN(d[0] == *p);
+ ASSERT_RETURN(d[1] == *p);
+ ASSERT_RETURN(d[2] == *p);
+ ASSERT_RETURN(d[3] == *p);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storeh_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p = impl.mTestFloatPointer1;
+ float d[4] = {1.0f, 2.0f, 3.0f, 4.0f};
+ __m128 a = _mm_load_ps(p);
+ __m64 *b = (__m64 *) d;
+
+ _mm_storeh_pi(b, a);
+ ASSERT_RETURN(d[0] == p[2]);
+ ASSERT_RETURN(d[1] == p[3]);
+ ASSERT_RETURN(d[2] == 3.0f);
+ ASSERT_RETURN(d[3] == 4.0f);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storel_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p = impl.mTestFloatPointer1;
+ float d[4] = {1.0f, 2.0f, 3.0f, 4.0f};
+ __m128 a = _mm_load_ps(p);
+ __m64 *b = (__m64 *) d;
+
+ _mm_storel_pi(b, a);
+ ASSERT_RETURN(d[0] == p[0]);
+ ASSERT_RETURN(d[1] == p[1]);
+ ASSERT_RETURN(d[2] == 3.0f);
+ ASSERT_RETURN(d[3] == 4.0f);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storer_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float *p = impl.mTestFloatPointer1;
+ float d[4];
+
+ __m128 a = load_m128(p);
+ _mm_storer_ps(d, a);
+
+ ASSERT_RETURN(d[0] == p[3]);
+ ASSERT_RETURN(d[1] == p[2]);
+ ASSERT_RETURN(d[2] == p[1]);
+ ASSERT_RETURN(d[3] == p[0]);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storeu_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float *_a = impl.mTestFloatPointer1;
+ float f[4];
+ __m128 a = _mm_load_ps(_a);
+
+ _mm_storeu_ps(f, a);
+ return validateFloat(a, f[0], f[1], f[2], f[3]);
+}
+
+result_t test_mm_storeu_si16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // The GCC version before 11 does not implement intrinsic function
+ // _mm_storeu_si16. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+ // for more information.
+#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+ return TEST_UNIMPL;
+#else
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i b;
+ __m128i a = load_m128i(_a);
+ _mm_storeu_si16(&b, a);
+ int16_t *_b = (int16_t *) &b;
+ int16_t *_c = (int16_t *) &a;
+ return validateInt16(b, _c[0], _b[1], _b[2], _b[3], _b[4], _b[5], _b[6],
+ _b[7]);
+#endif
+}
+
+result_t test_mm_storeu_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // Versions of GCC prior to 9 do not implement intrinsic function
+ // _mm_storeu_si64. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87558
+ // for more information.
+#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 9)
+ return TEST_UNIMPL;
+#else
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i b;
+ __m128i a = load_m128i(_a);
+ _mm_storeu_si64(&b, a);
+ int64_t *_b = (int64_t *) &b;
+ int64_t *_c = (int64_t *) &a;
+ return validateInt64(b, _c[0], _b[1]);
+#endif
+}
+
+result_t test_mm_stream_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ __m64 a = load_m64(_a);
+ __m64 p;
+
+ _mm_stream_pi(&p, a);
+ return validateInt64(p, _a[0]);
+}
+
+result_t test_mm_stream_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ __m128 a = load_m128(_a);
+ alignas(16) float p[4];
+
+ _mm_stream_ps(p, a);
+ ASSERT_RETURN(p[0] == _a[0]);
+ ASSERT_RETURN(p[1] == _a[1]);
+ ASSERT_RETURN(p[2] == _a[2]);
+ ASSERT_RETURN(p[3] == _a[3]);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_sub_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float dx = _a[0] - _b[0];
+ float dy = _a[1] - _b[1];
+ float dz = _a[2] - _b[2];
+ float dw = _a[3] - _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_sub_ps(a, b);
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_sub_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float dx = _a[0] - _b[0];
+ float dy = _a[1];
+ float dz = _a[2];
+ float dw = _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_sub_ss(a, b);
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_ucomieq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // _mm_ucomieq_ss is equal to _mm_comieq_ss
+ return test_mm_comieq_ss(impl, iter);
+}
+
+result_t test_mm_ucomige_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // _mm_ucomige_ss is equal to _mm_comige_ss
+ return test_mm_comige_ss(impl, iter);
+}
+
+result_t test_mm_ucomigt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // _mm_ucomigt_ss is equal to _mm_comigt_ss
+ return test_mm_comigt_ss(impl, iter);
+}
+
+result_t test_mm_ucomile_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // _mm_ucomile_ss is equal to _mm_comile_ss
+ return test_mm_comile_ss(impl, iter);
+}
+
+result_t test_mm_ucomilt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // _mm_ucomilt_ss is equal to _mm_comilt_ss
+ return test_mm_comilt_ss(impl, iter);
+}
+
+result_t test_mm_ucomineq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // _mm_ucomineq_ss is equal to _mm_comineq_ss
+ return test_mm_comineq_ss(impl, iter);
+}
+
+result_t test_mm_undefined_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ __m128 a = _mm_undefined_ps();
+ a = _mm_xor_ps(a, a);
+ return validateFloat(a, 0, 0, 0, 0);
+}
+
+result_t test_mm_unpackhi_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float *_a = impl.mTestFloatPointer1;
+ float *_b = impl.mTestFloatPointer1;
+
+ float f0 = _a[2];
+ float f1 = _b[2];
+ float f2 = _a[3];
+ float f3 = _b[3];
+
+ __m128 a = _mm_load_ps(_a);
+ __m128 b = _mm_load_ps(_b);
+ __m128 c = _mm_unpackhi_ps(a, b);
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_unpacklo_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ float *_a = impl.mTestFloatPointer1;
+ float *_b = impl.mTestFloatPointer1;
+
+ float f0 = _a[0];
+ float f1 = _b[0];
+ float f2 = _a[1];
+ float f3 = _b[1];
+
+ __m128 a = _mm_load_ps(_a);
+ __m128 b = _mm_load_ps(_b);
+ __m128 c = _mm_unpacklo_ps(a, b);
+
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_xor_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestFloatPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestFloatPointer2;
+
+ int32_t d0 = _a[0] ^ _b[0];
+ int32_t d1 = _a[1] ^ _b[1];
+ int32_t d2 = _a[2] ^ _b[2];
+ int32_t d3 = _a[3] ^ _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_xor_ps(a, b);
+
+ return validateFloat(c, *((float *) &d0), *((float *) &d1),
+ *((float *) &d2), *((float *) &d3));
+}
+
+/* SSE2 */
+result_t test_mm_add_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int16_t d[8];
+ d[0] = _a[0] + _b[0];
+ d[1] = _a[1] + _b[1];
+ d[2] = _a[2] + _b[2];
+ d[3] = _a[3] + _b[3];
+ d[4] = _a[4] + _b[4];
+ d[5] = _a[5] + _b[5];
+ d[6] = _a[6] + _b[6];
+ d[7] = _a[7] + _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_add_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_add_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ int32_t d[4];
+ d[0] = _a[0] + _b[0];
+ d[1] = _a[1] + _b[1];
+ d[2] = _a[2] + _b[2];
+ d[3] = _a[3] + _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_add_epi32(a, b);
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_add_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+
+ int64_t d0 = _a[0] + _b[0];
+ int64_t d1 = _a[1] + _b[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_add_epi64(a, b);
+
+ return validateInt64(c, d0, d1);
+}
+
+result_t test_mm_add_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t d[16];
+ d[0] = _a[0] + _b[0];
+ d[1] = _a[1] + _b[1];
+ d[2] = _a[2] + _b[2];
+ d[3] = _a[3] + _b[3];
+ d[4] = _a[4] + _b[4];
+ d[5] = _a[5] + _b[5];
+ d[6] = _a[6] + _b[6];
+ d[7] = _a[7] + _b[7];
+ d[8] = _a[8] + _b[8];
+ d[9] = _a[9] + _b[9];
+ d[10] = _a[10] + _b[10];
+ d[11] = _a[11] + _b[11];
+ d[12] = _a[12] + _b[12];
+ d[13] = _a[13] + _b[13];
+ d[14] = _a[14] + _b[14];
+ d[15] = _a[15] + _b[15];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_add_epi8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_add_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = _a[0] + _b[0];
+ double d1 = _a[1] + _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_add_pd(a, b);
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_add_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = _a[0] + _b[0];
+ double d1 = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_add_sd(a, b);
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_add_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+
+ int64_t d0 = _a[0] + _b[0];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_add_si64(a, b);
+
+ return validateInt64(c, d0);
+}
+
+result_t test_mm_adds_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int32_t d[8];
+ d[0] = (int32_t) _a[0] + (int32_t) _b[0];
+ if (d[0] > 32767)
+ d[0] = 32767;
+ if (d[0] < -32768)
+ d[0] = -32768;
+ d[1] = (int32_t) _a[1] + (int32_t) _b[1];
+ if (d[1] > 32767)
+ d[1] = 32767;
+ if (d[1] < -32768)
+ d[1] = -32768;
+ d[2] = (int32_t) _a[2] + (int32_t) _b[2];
+ if (d[2] > 32767)
+ d[2] = 32767;
+ if (d[2] < -32768)
+ d[2] = -32768;
+ d[3] = (int32_t) _a[3] + (int32_t) _b[3];
+ if (d[3] > 32767)
+ d[3] = 32767;
+ if (d[3] < -32768)
+ d[3] = -32768;
+ d[4] = (int32_t) _a[4] + (int32_t) _b[4];
+ if (d[4] > 32767)
+ d[4] = 32767;
+ if (d[4] < -32768)
+ d[4] = -32768;
+ d[5] = (int32_t) _a[5] + (int32_t) _b[5];
+ if (d[5] > 32767)
+ d[5] = 32767;
+ if (d[5] < -32768)
+ d[5] = -32768;
+ d[6] = (int32_t) _a[6] + (int32_t) _b[6];
+ if (d[6] > 32767)
+ d[6] = 32767;
+ if (d[6] < -32768)
+ d[6] = -32768;
+ d[7] = (int32_t) _a[7] + (int32_t) _b[7];
+ if (d[7] > 32767)
+ d[7] = 32767;
+ if (d[7] < -32768)
+ d[7] = -32768;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+
+ __m128i c = _mm_adds_epi16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_adds_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+
+ int16_t d[16];
+ for (int i = 0; i < 16; i++) {
+ d[i] = (int16_t) _a[i] + (int16_t) _b[i];
+ if (d[i] > 127)
+ d[i] = 127;
+ if (d[i] < -128)
+ d[i] = -128;
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_adds_epi8(a, b);
+
+ return VALIDATE_INT8_M128(c, (int8_t) d);
+}
+
+result_t test_mm_adds_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint32_t max = 0xFFFF;
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+
+ uint16_t d[8];
+ d[0] = (uint32_t) _a[0] + (uint32_t) _b[0] > max ? max : _a[0] + _b[0];
+ d[1] = (uint32_t) _a[1] + (uint32_t) _b[1] > max ? max : _a[1] + _b[1];
+ d[2] = (uint32_t) _a[2] + (uint32_t) _b[2] > max ? max : _a[2] + _b[2];
+ d[3] = (uint32_t) _a[3] + (uint32_t) _b[3] > max ? max : _a[3] + _b[3];
+ d[4] = (uint32_t) _a[4] + (uint32_t) _b[4] > max ? max : _a[4] + _b[4];
+ d[5] = (uint32_t) _a[5] + (uint32_t) _b[5] > max ? max : _a[5] + _b[5];
+ d[6] = (uint32_t) _a[6] + (uint32_t) _b[6] > max ? max : _a[6] + _b[6];
+ d[7] = (uint32_t) _a[7] + (uint32_t) _b[7] > max ? max : _a[7] + _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_adds_epu16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_adds_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ uint8_t d[16];
+ d[0] = (uint8_t) _a[0] + (uint8_t) _b[0];
+ if (d[0] < (uint8_t) _a[0])
+ d[0] = 255;
+ d[1] = (uint8_t) _a[1] + (uint8_t) _b[1];
+ if (d[1] < (uint8_t) _a[1])
+ d[1] = 255;
+ d[2] = (uint8_t) _a[2] + (uint8_t) _b[2];
+ if (d[2] < (uint8_t) _a[2])
+ d[2] = 255;
+ d[3] = (uint8_t) _a[3] + (uint8_t) _b[3];
+ if (d[3] < (uint8_t) _a[3])
+ d[3] = 255;
+ d[4] = (uint8_t) _a[4] + (uint8_t) _b[4];
+ if (d[4] < (uint8_t) _a[4])
+ d[4] = 255;
+ d[5] = (uint8_t) _a[5] + (uint8_t) _b[5];
+ if (d[5] < (uint8_t) _a[5])
+ d[5] = 255;
+ d[6] = (uint8_t) _a[6] + (uint8_t) _b[6];
+ if (d[6] < (uint8_t) _a[6])
+ d[6] = 255;
+ d[7] = (uint8_t) _a[7] + (uint8_t) _b[7];
+ if (d[7] < (uint8_t) _a[7])
+ d[7] = 255;
+ d[8] = (uint8_t) _a[8] + (uint8_t) _b[8];
+ if (d[8] < (uint8_t) _a[8])
+ d[8] = 255;
+ d[9] = (uint8_t) _a[9] + (uint8_t) _b[9];
+ if (d[9] < (uint8_t) _a[9])
+ d[9] = 255;
+ d[10] = (uint8_t) _a[10] + (uint8_t) _b[10];
+ if (d[10] < (uint8_t) _a[10])
+ d[10] = 255;
+ d[11] = (uint8_t) _a[11] + (uint8_t) _b[11];
+ if (d[11] < (uint8_t) _a[11])
+ d[11] = 255;
+ d[12] = (uint8_t) _a[12] + (uint8_t) _b[12];
+ if (d[12] < (uint8_t) _a[12])
+ d[12] = 255;
+ d[13] = (uint8_t) _a[13] + (uint8_t) _b[13];
+ if (d[13] < (uint8_t) _a[13])
+ d[13] = 255;
+ d[14] = (uint8_t) _a[14] + (uint8_t) _b[14];
+ if (d[14] < (uint8_t) _a[14])
+ d[14] = 255;
+ d[15] = (uint8_t) _a[15] + (uint8_t) _b[15];
+ if (d[15] < (uint8_t) _a[15])
+ d[15] = 255;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_adds_epu8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_and_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestFloatPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestFloatPointer2;
+
+ int64_t d0 = _a[0] & _b[0];
+ int64_t d1 = _a[1] & _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_and_pd(a, b);
+
+ return validateDouble(c, *((double *) &d0), *((double *) &d1));
+}
+
+result_t test_mm_and_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128 fc = _mm_and_ps(*(const __m128 *) &a, *(const __m128 *) &b);
+ __m128i c = *(const __m128i *) &fc;
+ // now for the assertion...
+ const uint32_t *ia = (const uint32_t *) &a;
+ const uint32_t *ib = (const uint32_t *) &b;
+ uint32_t r[4];
+ r[0] = ia[0] & ib[0];
+ r[1] = ia[1] & ib[1];
+ r[2] = ia[2] & ib[2];
+ r[3] = ia[3] & ib[3];
+ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+ result_t res = VALIDATE_INT32_M128(c, r);
+ if (res) {
+ res = VALIDATE_INT32_M128(ret, r);
+ }
+ return res;
+}
+
+result_t test_mm_andnot_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_andnot_pd(a, b);
+
+ // Take AND operation a complement of 'a' and 'b'. Bitwise operations are
+ // not allowed on float/double datatype, so 'a' and 'b' are calculated in
+ // uint64_t datatype.
+ const uint64_t *ia = (const uint64_t *) &a;
+ const uint64_t *ib = (const uint64_t *) &b;
+ uint64_t r0 = ~ia[0] & ib[0];
+ uint64_t r1 = ~ia[1] & ib[1];
+ return validateUInt64(*(const __m128i *) &c, r0, r1);
+}
+
+result_t test_mm_andnot_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128 fc = _mm_andnot_ps(*(const __m128 *) &a, *(const __m128 *) &b);
+ __m128i c = *(const __m128i *) &fc;
+ // now for the assertion...
+ const uint32_t *ia = (const uint32_t *) &a;
+ const uint32_t *ib = (const uint32_t *) &b;
+ uint32_t r[4];
+ r[0] = ~ia[0] & ib[0];
+ r[1] = ~ia[1] & ib[1];
+ r[2] = ~ia[2] & ib[2];
+ r[3] = ~ia[3] & ib[3];
+ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+ result_t res = TEST_SUCCESS;
+ res = VALIDATE_INT32_M128(c, r);
+ if (res) {
+ res = VALIDATE_INT32_M128(ret, r);
+ }
+ return res;
+}
+
+result_t test_mm_avg_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ uint16_t d[8];
+ d[0] = ((uint16_t) _a[0] + (uint16_t) _b[0] + 1) >> 1;
+ d[1] = ((uint16_t) _a[1] + (uint16_t) _b[1] + 1) >> 1;
+ d[2] = ((uint16_t) _a[2] + (uint16_t) _b[2] + 1) >> 1;
+ d[3] = ((uint16_t) _a[3] + (uint16_t) _b[3] + 1) >> 1;
+ d[4] = ((uint16_t) _a[4] + (uint16_t) _b[4] + 1) >> 1;
+ d[5] = ((uint16_t) _a[5] + (uint16_t) _b[5] + 1) >> 1;
+ d[6] = ((uint16_t) _a[6] + (uint16_t) _b[6] + 1) >> 1;
+ d[7] = ((uint16_t) _a[7] + (uint16_t) _b[7] + 1) >> 1;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_avg_epu16(a, b);
+ return VALIDATE_UINT16_M128(c, d);
+}
+
+result_t test_mm_avg_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ uint8_t d[16];
+ d[0] = ((uint8_t) _a[0] + (uint8_t) _b[0] + 1) >> 1;
+ d[1] = ((uint8_t) _a[1] + (uint8_t) _b[1] + 1) >> 1;
+ d[2] = ((uint8_t) _a[2] + (uint8_t) _b[2] + 1) >> 1;
+ d[3] = ((uint8_t) _a[3] + (uint8_t) _b[3] + 1) >> 1;
+ d[4] = ((uint8_t) _a[4] + (uint8_t) _b[4] + 1) >> 1;
+ d[5] = ((uint8_t) _a[5] + (uint8_t) _b[5] + 1) >> 1;
+ d[6] = ((uint8_t) _a[6] + (uint8_t) _b[6] + 1) >> 1;
+ d[7] = ((uint8_t) _a[7] + (uint8_t) _b[7] + 1) >> 1;
+ d[8] = ((uint8_t) _a[8] + (uint8_t) _b[8] + 1) >> 1;
+ d[9] = ((uint8_t) _a[9] + (uint8_t) _b[9] + 1) >> 1;
+ d[10] = ((uint8_t) _a[10] + (uint8_t) _b[10] + 1) >> 1;
+ d[11] = ((uint8_t) _a[11] + (uint8_t) _b[11] + 1) >> 1;
+ d[12] = ((uint8_t) _a[12] + (uint8_t) _b[12] + 1) >> 1;
+ d[13] = ((uint8_t) _a[13] + (uint8_t) _b[13] + 1) >> 1;
+ d[14] = ((uint8_t) _a[14] + (uint8_t) _b[14] + 1) >> 1;
+ d[15] = ((uint8_t) _a[15] + (uint8_t) _b[15] + 1) >> 1;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_avg_epu8(a, b);
+ return VALIDATE_UINT8_M128(c, d);
+}
+
+result_t test_mm_bslli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_slli_si128(impl, iter);
+}
+
+result_t test_mm_bsrli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_srli_si128(impl, iter);
+}
+
+result_t test_mm_castpd_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const __m128d a = load_m128d(_a);
+ const __m128 _c = load_m128(_a);
+
+ __m128 r = _mm_castpd_ps(a);
+
+ return validate128(r, _c);
+}
+
+result_t test_mm_castpd_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const __m128d a = load_m128d(_a);
+ const __m128i *_c = (const __m128i *) _a;
+
+ __m128i r = _mm_castpd_si128(a);
+
+ return validate128(r, *_c);
+}
+
+result_t test_mm_castps_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const __m128 a = load_m128(_a);
+ const __m128d *_c = (const __m128d *) _a;
+
+ __m128d r = _mm_castps_pd(a);
+
+ return validate128(r, *_c);
+}
+
+result_t test_mm_castps_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+
+ const __m128i *_c = (const __m128i *) _a;
+
+ const __m128 a = load_m128(_a);
+ __m128i r = _mm_castps_si128(a);
+
+ return validate128(r, *_c);
+}
+
+result_t test_mm_castsi128_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+
+ const __m128d *_c = (const __m128d *) _a;
+
+ const __m128i a = load_m128i(_a);
+ __m128d r = _mm_castsi128_pd(a);
+
+ return validate128(r, *_c);
+}
+
+result_t test_mm_castsi128_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+
+ const __m128 *_c = (const __m128 *) _a;
+
+ const __m128i a = load_m128i(_a);
+ __m128 r = _mm_castsi128_ps(a);
+
+ return validate128(r, *_c);
+}
+
+result_t test_mm_clflush(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ /* FIXME: Assume that we have portable mechanisms to flush cache. */
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_cmpeq_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[8];
+ d[0] = (_a[0] == _b[0]) ? ~UINT16_C(0) : 0x0;
+ d[1] = (_a[1] == _b[1]) ? ~UINT16_C(0) : 0x0;
+ d[2] = (_a[2] == _b[2]) ? ~UINT16_C(0) : 0x0;
+ d[3] = (_a[3] == _b[3]) ? ~UINT16_C(0) : 0x0;
+ d[4] = (_a[4] == _b[4]) ? ~UINT16_C(0) : 0x0;
+ d[5] = (_a[5] == _b[5]) ? ~UINT16_C(0) : 0x0;
+ d[6] = (_a[6] == _b[6]) ? ~UINT16_C(0) : 0x0;
+ d[7] = (_a[7] == _b[7]) ? ~UINT16_C(0) : 0x0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmpeq_epi16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_cmpeq_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+
+ int32_t d[4];
+ d[0] = (_a[0] == _b[0]) ? ~UINT32_C(0) : 0x0;
+ d[1] = (_a[1] == _b[1]) ? ~UINT32_C(0) : 0x0;
+ d[2] = (_a[2] == _b[2]) ? ~UINT32_C(0) : 0x0;
+ d[3] = (_a[3] == _b[3]) ? ~UINT32_C(0) : 0x0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmpeq_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_cmpeq_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t d[16];
+ d[0] = (_a[0] == _b[0]) ? ~UINT8_C(0) : 0x00;
+ d[1] = (_a[1] == _b[1]) ? ~UINT8_C(0) : 0x00;
+ d[2] = (_a[2] == _b[2]) ? ~UINT8_C(0) : 0x00;
+ d[3] = (_a[3] == _b[3]) ? ~UINT8_C(0) : 0x00;
+ d[4] = (_a[4] == _b[4]) ? ~UINT8_C(0) : 0x00;
+ d[5] = (_a[5] == _b[5]) ? ~UINT8_C(0) : 0x00;
+ d[6] = (_a[6] == _b[6]) ? ~UINT8_C(0) : 0x00;
+ d[7] = (_a[7] == _b[7]) ? ~UINT8_C(0) : 0x00;
+ d[8] = (_a[8] == _b[8]) ? ~UINT8_C(0) : 0x00;
+ d[9] = (_a[9] == _b[9]) ? ~UINT8_C(0) : 0x00;
+ d[10] = (_a[10] == _b[10]) ? ~UINT8_C(0) : 0x00;
+ d[11] = (_a[11] == _b[11]) ? ~UINT8_C(0) : 0x00;
+ d[12] = (_a[12] == _b[12]) ? ~UINT8_C(0) : 0x00;
+ d[13] = (_a[13] == _b[13]) ? ~UINT8_C(0) : 0x00;
+ d[14] = (_a[14] == _b[14]) ? ~UINT8_C(0) : 0x00;
+ d[15] = (_a[15] == _b[15]) ? ~UINT8_C(0) : 0x00;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmpeq_epi8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_cmpeq_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] == _b[0]) ? 0xffffffffffffffff : 0;
+ uint64_t d1 = (_a[1] == _b[1]) ? 0xffffffffffffffff : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpeq_pd(a, b);
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpeq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ const uint64_t d0 = (_a[0] == _b[0]) ? ~UINT64_C(0) : 0;
+ const uint64_t d1 = ((const uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpeq_sd(a, b);
+
+ return validateDouble(c, *(const double *) &d0, *(const double *) &d1);
+}
+
+result_t test_mm_cmpge_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = (_a[1] >= _b[1]) ? ~UINT64_C(0) : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpge_pd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpge_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpge_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpgt_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ uint16_t d[8];
+ d[0] = _a[0] > _b[0] ? ~UINT16_C(0) : 0;
+ d[1] = _a[1] > _b[1] ? ~UINT16_C(0) : 0;
+ d[2] = _a[2] > _b[2] ? ~UINT16_C(0) : 0;
+ d[3] = _a[3] > _b[3] ? ~UINT16_C(0) : 0;
+ d[4] = _a[4] > _b[4] ? ~UINT16_C(0) : 0;
+ d[5] = _a[5] > _b[5] ? ~UINT16_C(0) : 0;
+ d[6] = _a[6] > _b[6] ? ~UINT16_C(0) : 0;
+ d[7] = _a[7] > _b[7] ? ~UINT16_C(0) : 0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmpgt_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_cmpgt_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+
+ int32_t result[4];
+
+ result[0] = _a[0] > _b[0] ? -1 : 0;
+ result[1] = _a[1] > _b[1] ? -1 : 0;
+ result[2] = _a[2] > _b[2] ? -1 : 0;
+ result[3] = _a[3] > _b[3] ? -1 : 0;
+
+ __m128i iret = _mm_cmpgt_epi32(a, b);
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmpgt_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t d[16];
+ d[0] = (_a[0] > _b[0]) ? ~UINT8_C(0) : 0x00;
+ d[1] = (_a[1] > _b[1]) ? ~UINT8_C(0) : 0x00;
+ d[2] = (_a[2] > _b[2]) ? ~UINT8_C(0) : 0x00;
+ d[3] = (_a[3] > _b[3]) ? ~UINT8_C(0) : 0x00;
+ d[4] = (_a[4] > _b[4]) ? ~UINT8_C(0) : 0x00;
+ d[5] = (_a[5] > _b[5]) ? ~UINT8_C(0) : 0x00;
+ d[6] = (_a[6] > _b[6]) ? ~UINT8_C(0) : 0x00;
+ d[7] = (_a[7] > _b[7]) ? ~UINT8_C(0) : 0x00;
+ d[8] = (_a[8] > _b[8]) ? ~UINT8_C(0) : 0x00;
+ d[9] = (_a[9] > _b[9]) ? ~UINT8_C(0) : 0x00;
+ d[10] = (_a[10] > _b[10]) ? ~UINT8_C(0) : 0x00;
+ d[11] = (_a[11] > _b[11]) ? ~UINT8_C(0) : 0x00;
+ d[12] = (_a[12] > _b[12]) ? ~UINT8_C(0) : 0x00;
+ d[13] = (_a[13] > _b[13]) ? ~UINT8_C(0) : 0x00;
+ d[14] = (_a[14] > _b[14]) ? ~UINT8_C(0) : 0x00;
+ d[15] = (_a[15] > _b[15]) ? ~UINT8_C(0) : 0x00;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmpgt_epi8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_cmpgt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = (_a[1] > _b[1]) ? ~UINT64_C(0) : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpgt_pd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpgt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpgt_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmple_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = (_a[1] <= _b[1]) ? ~UINT64_C(0) : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmple_pd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmple_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmple_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmplt_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ uint16_t d[8];
+ d[0] = _a[0] < _b[0] ? ~UINT16_C(0) : 0;
+ d[1] = _a[1] < _b[1] ? ~UINT16_C(0) : 0;
+ d[2] = _a[2] < _b[2] ? ~UINT16_C(0) : 0;
+ d[3] = _a[3] < _b[3] ? ~UINT16_C(0) : 0;
+ d[4] = _a[4] < _b[4] ? ~UINT16_C(0) : 0;
+ d[5] = _a[5] < _b[5] ? ~UINT16_C(0) : 0;
+ d[6] = _a[6] < _b[6] ? ~UINT16_C(0) : 0;
+ d[7] = _a[7] < _b[7] ? ~UINT16_C(0) : 0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmplt_epi16(a, b);
+
+ return VALIDATE_UINT16_M128(c, d);
+}
+
+result_t test_mm_cmplt_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+
+ int32_t result[4];
+ result[0] = _a[0] < _b[0] ? -1 : 0;
+ result[1] = _a[1] < _b[1] ? -1 : 0;
+ result[2] = _a[2] < _b[2] ? -1 : 0;
+ result[3] = _a[3] < _b[3] ? -1 : 0;
+
+ __m128i iret = _mm_cmplt_epi32(a, b);
+ return VALIDATE_INT32_M128(iret, result);
+}
+
+result_t test_mm_cmplt_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t d[16];
+ d[0] = (_a[0] < _b[0]) ? ~UINT8_C(0) : 0x00;
+ d[1] = (_a[1] < _b[1]) ? ~UINT8_C(0) : 0x00;
+ d[2] = (_a[2] < _b[2]) ? ~UINT8_C(0) : 0x00;
+ d[3] = (_a[3] < _b[3]) ? ~UINT8_C(0) : 0x00;
+ d[4] = (_a[4] < _b[4]) ? ~UINT8_C(0) : 0x00;
+ d[5] = (_a[5] < _b[5]) ? ~UINT8_C(0) : 0x00;
+ d[6] = (_a[6] < _b[6]) ? ~UINT8_C(0) : 0x00;
+ d[7] = (_a[7] < _b[7]) ? ~UINT8_C(0) : 0x00;
+ d[8] = (_a[8] < _b[8]) ? ~UINT8_C(0) : 0x00;
+ d[9] = (_a[9] < _b[9]) ? ~UINT8_C(0) : 0x00;
+ d[10] = (_a[10] < _b[10]) ? ~UINT8_C(0) : 0x00;
+ d[11] = (_a[11] < _b[11]) ? ~UINT8_C(0) : 0x00;
+ d[12] = (_a[12] < _b[12]) ? ~UINT8_C(0) : 0x00;
+ d[13] = (_a[13] < _b[13]) ? ~UINT8_C(0) : 0x00;
+ d[14] = (_a[14] < _b[14]) ? ~UINT8_C(0) : 0x00;
+ d[15] = (_a[15] < _b[15]) ? ~UINT8_C(0) : 0x00;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmplt_epi8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_cmplt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ int64_t f0 = (_a[0] < _b[0]) ? ~UINT64_C(0) : UINT64_C(0);
+ int64_t f1 = (_a[1] < _b[1]) ? ~UINT64_C(0) : UINT64_C(0);
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmplt_pd(a, b);
+
+ return validateDouble(c, *(double *) &f0, *(double *) &f1);
+}
+
+result_t test_mm_cmplt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = (_a[0] < _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmplt_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpneq_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ int64_t f0 = (_a[0] != _b[0]) ? ~UINT64_C(0) : UINT64_C(0);
+ int64_t f1 = (_a[1] != _b[1]) ? ~UINT64_C(0) : UINT64_C(0);
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpneq_pd(a, b);
+
+ return validateDouble(c, *(double *) &f0, *(double *) &f1);
+}
+
+result_t test_mm_cmpneq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+
+ int64_t f0 = (_a[0] != _b[0]) ? ~UINT64_C(0) : UINT64_C(0);
+ int64_t f1 = ((int64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpneq_sd(a, b);
+
+ return validateDouble(c, *(double *) &f0, *(double *) &f1);
+}
+
+result_t test_mm_cmpnge_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = !(_a[1] >= _b[1]) ? ~UINT64_C(0) : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpnge_pd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpnge_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpnge_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpngt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = !(_a[1] > _b[1]) ? ~UINT64_C(0) : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpngt_pd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpngt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpngt_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpnle_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = !(_a[1] <= _b[1]) ? ~UINT64_C(0) : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpnle_pd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpnle_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpnle_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpnlt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] < _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = !(_a[1] < _b[1]) ? ~UINT64_C(0) : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpnlt_pd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpnlt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ uint64_t d0 = !(_a[0] < _b[0]) ? ~UINT64_C(0) : 0;
+ uint64_t d1 = ((uint64_t *) _a)[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_cmpnlt_sd(a, b);
+
+ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+}
+
+result_t test_mm_cmpord_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ __m128d a = _mm_load_pd(_a);
+ __m128d b = _mm_load_pd(_b);
+
+ double result[2];
+
+ for (uint32_t i = 0; i < 2; i++) {
+ result[i] = cmp_noNaN(_a[i], _b[i]);
+ }
+
+ __m128d ret = _mm_cmpord_pd(a, b);
+
+ return validateDouble(ret, result[0], result[1]);
+}
+
+result_t test_mm_cmpord_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ __m128d a = _mm_load_pd(_a);
+ __m128d b = _mm_load_pd(_b);
+
+ double c0 = cmp_noNaN(_a[0], _b[0]);
+ double c1 = _a[1];
+
+ __m128d ret = _mm_cmpord_sd(a, b);
+ return validateDouble(ret, c0, c1);
+}
+
+result_t test_mm_cmpunord_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ __m128d a = _mm_load_pd(_a);
+ __m128d b = _mm_load_pd(_b);
+
+ double result[2];
+ result[0] = cmp_hasNaN(_a[0], _b[0]);
+ result[1] = cmp_hasNaN(_a[1], _b[1]);
+
+ __m128d ret = _mm_cmpunord_pd(a, b);
+ return validateDouble(ret, result[0], result[1]);
+}
+
+result_t test_mm_cmpunord_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *_a = (double *) impl.mTestFloatPointer1;
+ double *_b = (double *) impl.mTestFloatPointer2;
+ __m128d a = _mm_load_pd(_a);
+ __m128d b = _mm_load_pd(_b);
+
+ double result[2];
+ result[0] = cmp_hasNaN(_a[0], _b[0]);
+ result[1] = _a[1];
+
+ __m128d ret = _mm_cmpunord_sd(a, b);
+ return validateDouble(ret, result[0], result[1]);
+}
+
+result_t test_mm_comieq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comieq_sd correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ int32_t _c = (_a[0] == _b[0]) ? 1 : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ int32_t c = _mm_comieq_sd(a, b);
+
+ ASSERT_RETURN(c == _c);
+ return TEST_SUCCESS;
+#endif
+}
+
+result_t test_mm_comige_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ int32_t _c = (_a[0] >= _b[0]) ? 1 : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ int32_t c = _mm_comige_sd(a, b);
+
+ ASSERT_RETURN(c == _c);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_comigt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ int32_t _c = (_a[0] > _b[0]) ? 1 : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ int32_t c = _mm_comigt_sd(a, b);
+
+ ASSERT_RETURN(c == _c);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_comile_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comile_sd correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ int32_t _c = (_a[0] <= _b[0]) ? 1 : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ int32_t c = _mm_comile_sd(a, b);
+
+ ASSERT_RETURN(c == _c);
+ return TEST_SUCCESS;
+#endif
+}
+
+result_t test_mm_comilt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comilt_sd correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ int32_t _c = (_a[0] < _b[0]) ? 1 : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ int32_t c = _mm_comilt_sd(a, b);
+
+ ASSERT_RETURN(c == _c);
+ return TEST_SUCCESS;
+#endif
+}
+
+result_t test_mm_comineq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME:
+ // The GCC does not implement _mm_comineq_sd correctly.
+ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+ // information.
+#if defined(__GNUC__) && !defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ int32_t _c = (_a[0] != _b[0]) ? 1 : 0;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ int32_t c = _mm_comineq_sd(a, b);
+
+ ASSERT_RETURN(c == _c);
+ return TEST_SUCCESS;
+#endif
+}
+
+result_t test_mm_cvtepi32_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ double trun[2] = {(double) _a[0], (double) _a[1]};
+
+ __m128d ret = _mm_cvtepi32_pd(a);
+ return validateDouble(ret, trun[0], trun[1]);
+}
+
+result_t test_mm_cvtepi32_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ float trun[4];
+ for (uint32_t i = 0; i < 4; i++) {
+ trun[i] = (float) _a[i];
+ }
+
+ __m128 ret = _mm_cvtepi32_ps(a);
+ return validateFloat(ret, trun[0], trun[1], trun[2], trun[3]);
+}
+
+result_t test_mm_cvtpd_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ int32_t d[2];
+
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d[0] = (int32_t) (bankersRounding(_a[0]));
+ d[1] = (int32_t) (bankersRounding(_a[1]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d[0] = (int32_t) (floor(_a[0]));
+ d[1] = (int32_t) (floor(_a[1]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d[0] = (int32_t) (ceil(_a[0]));
+ d[1] = (int32_t) (ceil(_a[1]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d[0] = (int32_t) (_a[0]);
+ d[1] = (int32_t) (_a[1]);
+ break;
+ }
+
+#if defined(__ARM_FEATURE_FRINT) && !defined(__clang__)
+ /* Floats that cannot fit into 32-bits should instead return
+ * indefinite integer value (INT32_MIN). This behaviour is
+ * currently only emulated when using the round-to-integral
+ * instructions. */
+ for (int i = 0; i < 2; i++) {
+ if (_a[i] > (float) INT32_MAX || _a[i] < (float) INT32_MIN)
+ d[i] = INT32_MIN;
+ }
+#endif
+
+ __m128d a = load_m128d(_a);
+ __m128i ret = _mm_cvtpd_epi32(a);
+
+ return validateInt32(ret, d[0], d[1], 0, 0);
+}
+
+result_t test_mm_cvtpd_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ int32_t d[2];
+
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d[0] = (int32_t) (bankersRounding(_a[0]));
+ d[1] = (int32_t) (bankersRounding(_a[1]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d[0] = (int32_t) (floor(_a[0]));
+ d[1] = (int32_t) (floor(_a[1]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d[0] = (int32_t) (ceil(_a[0]));
+ d[1] = (int32_t) (ceil(_a[1]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d[0] = (int32_t) (_a[0]);
+ d[1] = (int32_t) (_a[1]);
+ break;
+ }
+
+ __m128d a = load_m128d(_a);
+ __m64 ret = _mm_cvtpd_pi32(a);
+
+ return VALIDATE_INT32_M64(ret, d);
+}
+
+result_t test_mm_cvtpd_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ float f0 = (float) _a[0];
+ float f1 = (float) _a[1];
+ const __m128d a = load_m128d(_a);
+
+ __m128 r = _mm_cvtpd_ps(a);
+
+ return validateFloat(r, f0, f1, 0, 0);
+}
+
+result_t test_mm_cvtpi32_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ __m64 a = load_m64(_a);
+
+ double trun[2] = {(double) _a[0], (double) _a[1]};
+
+ __m128d ret = _mm_cvtpi32_pd(a);
+
+ return validateDouble(ret, trun[0], trun[1]);
+}
+
+result_t test_mm_cvtps_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ __m128 a = load_m128(_a);
+ int32_t d[4];
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ for (uint32_t i = 0; i < 4; i++) {
+ d[i] = (int32_t) (bankersRounding(_a[i]));
+ }
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ for (uint32_t i = 0; i < 4; i++) {
+ d[i] = (int32_t) (floorf(_a[i]));
+ }
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ for (uint32_t i = 0; i < 4; i++) {
+ d[i] = (int32_t) (ceilf(_a[i]));
+ }
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ for (uint32_t i = 0; i < 4; i++) {
+ d[i] = (int32_t) (_a[i]);
+ }
+ break;
+ }
+
+ __m128i ret = _mm_cvtps_epi32(a);
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_cvtps_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ double d0 = (double) _a[0];
+ double d1 = (double) _a[1];
+ const __m128 a = load_m128(_a);
+
+ __m128d r = _mm_cvtps_pd(a);
+
+ return validateDouble(r, d0, d1);
+}
+
+result_t test_mm_cvtsd_f64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ double d = _a[0];
+
+ const __m128d *a = (const __m128d *) _a;
+ double r = _mm_cvtsd_f64(*a);
+
+ return r == d ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtsd_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ int32_t d;
+
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d = (int32_t) (bankersRounding(_a[0]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d = (int32_t) (floor(_a[0]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d = (int32_t) (ceil(_a[0]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d = (int32_t) (_a[0]);
+ break;
+ }
+
+ __m128d a = load_m128d(_a);
+ int32_t ret = _mm_cvtsd_si32(a);
+
+ return ret == d ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtsd_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ int64_t d;
+
+ switch (iter & 0x3) {
+ case 0:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ d = (int64_t) (bankersRounding(_a[0]));
+ break;
+ case 1:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ d = (int64_t) (floor(_a[0]));
+ break;
+ case 2:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ d = (int64_t) (ceil(_a[0]));
+ break;
+ case 3:
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ d = (int64_t) (_a[0]);
+ break;
+ }
+
+ __m128d a = load_m128d(_a);
+ int64_t ret = _mm_cvtsd_si64(a);
+
+ return ret == d ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtsd_si64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_cvtsd_si64(impl, iter);
+}
+
+result_t test_mm_cvtsd_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ float f0 = (float) _b[0];
+ float f1 = (float) _a[1];
+ float f2 = (float) _a[2];
+ float f3 = (float) _a[3];
+
+ __m128 a = load_m128(_a);
+ __m128d b = load_m128d(_b);
+ __m128 c = _mm_cvtsd_ss(a, b);
+
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_cvtsi128_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+
+ int32_t d = _a[0];
+
+ __m128i a = load_m128i(_a);
+ int c = _mm_cvtsi128_si32(a);
+
+ return d == c ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtsi128_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ int64_t d = _a[0];
+
+ __m128i a = load_m128i(_a);
+ int64_t c = _mm_cvtsi128_si64(a);
+
+ return d == c ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvtsi128_si64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_cvtsi128_si64(impl, iter);
+}
+
+result_t test_mm_cvtsi32_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const int32_t b = (const int32_t) impl.mTestInts[iter];
+
+ __m128d a = load_m128d(_a);
+ __m128d c = _mm_cvtsi32_sd(a, b);
+
+ return validateDouble(c, b, _a[1]);
+}
+
+result_t test_mm_cvtsi32_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+
+ int32_t d = _a[0];
+
+ __m128i c = _mm_cvtsi32_si128(*_a);
+
+ return validateInt32(c, d, 0, 0, 0);
+}
+
+result_t test_mm_cvtsi64_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const int64_t b = (const int64_t) impl.mTestInts[iter];
+
+ __m128d a = load_m128d(_a);
+ __m128d c = _mm_cvtsi64_sd(a, b);
+
+ return validateDouble(c, (double) b, _a[1]);
+}
+
+result_t test_mm_cvtsi64_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ int64_t d = _a[0];
+
+ __m128i c = _mm_cvtsi64_si128(*_a);
+
+ return validateInt64(c, d, 0);
+}
+
+result_t test_mm_cvtsi64x_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_cvtsi64_sd(impl, iter);
+}
+
+result_t test_mm_cvtsi64x_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_cvtsi64_si128(impl, iter);
+}
+
+result_t test_mm_cvtss_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ double d0 = double(_b[0]);
+ double d1 = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128 b = load_m128(_b);
+ __m128d c = _mm_cvtss_sd(a, b);
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_cvttpd_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ __m128d a = load_m128d(_a);
+ int32_t d0 = (int32_t) (_a[0]);
+ int32_t d1 = (int32_t) (_a[1]);
+
+ __m128i ret = _mm_cvttpd_epi32(a);
+ return validateInt32(ret, d0, d1, 0, 0);
+}
+
+result_t test_mm_cvttpd_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ __m128d a = load_m128d(_a);
+ int32_t d0 = (int32_t) (_a[0]);
+ int32_t d1 = (int32_t) (_a[1]);
+
+ __m64 ret = _mm_cvttpd_pi32(a);
+ return validateInt32(ret, d0, d1);
+}
+
+result_t test_mm_cvttps_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ __m128 a = load_m128(_a);
+ int32_t trun[4];
+ for (uint32_t i = 0; i < 4; i++) {
+ trun[i] = (int32_t) _a[i];
+ }
+
+ __m128i ret = _mm_cvttps_epi32(a);
+ return VALIDATE_INT32_M128(ret, trun);
+}
+
+result_t test_mm_cvttsd_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ __m128d a = _mm_load_sd(_a);
+ int32_t ret = _mm_cvttsd_si32(a);
+
+ return ret == (int32_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvttsd_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ __m128d a = _mm_load_sd(_a);
+ int64_t ret = _mm_cvttsd_si64(a);
+
+ return ret == (int64_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_cvttsd_si64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+#if defined(__clang__)
+ // The intrinsic _mm_cvttsd_si64x() does not exist in Clang
+ return TEST_UNIMPL;
+#else
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ __m128d a = _mm_load_sd(_a);
+ int64_t ret = _mm_cvttsd_si64x(a);
+
+ return ret == (int64_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+#endif
+}
+
+result_t test_mm_div_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = 0.0, d1 = 0.0;
+
+ if (_b[0] != 0.0)
+ d0 = _a[0] / _b[0];
+ if (_b[1] != 0.0)
+ d1 = _a[1] / _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_div_pd(a, b);
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_div_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double d0 = _a[0] / _b[0];
+ double d1 = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+
+ __m128d c = _mm_div_sd(a, b);
+
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_extract_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint16_t *_a = (uint16_t *) impl.mTestIntPointer1;
+ const int idx = iter & 0x7;
+ __m128i a = load_m128i(_a);
+ int c;
+ switch (idx) {
+ case 0:
+ c = _mm_extract_epi16(a, 0);
+ break;
+ case 1:
+ c = _mm_extract_epi16(a, 1);
+ break;
+ case 2:
+ c = _mm_extract_epi16(a, 2);
+ break;
+ case 3:
+ c = _mm_extract_epi16(a, 3);
+ break;
+ case 4:
+ c = _mm_extract_epi16(a, 4);
+ break;
+ case 5:
+ c = _mm_extract_epi16(a, 5);
+ break;
+ case 6:
+ c = _mm_extract_epi16(a, 6);
+ break;
+ case 7:
+ c = _mm_extract_epi16(a, 7);
+ break;
+ }
+
+ ASSERT_RETURN(c == *(_a + idx));
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_insert_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t insert = (int16_t) *impl.mTestIntPointer2;
+
+#define TEST_IMPL(IDX) \
+ int16_t d##IDX[8]; \
+ for (int i = 0; i < 8; i++) { \
+ d##IDX[i] = _a[i]; \
+ } \
+ d##IDX[IDX] = insert; \
+ \
+ __m128i a##IDX = load_m128i(_a); \
+ __m128i b##IDX = _mm_insert_epi16(a##IDX, insert, IDX); \
+ CHECK_RESULT(VALIDATE_INT16_M128(b##IDX, d##IDX))
+
+ IMM_8_ITER
+#undef TEST_IMPL
+
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_lfence(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ /* FIXME: Assume that memory barriers always function as intended. */
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_load_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *p = (const double *) impl.mTestFloatPointer1;
+ __m128d a = _mm_load_pd(p);
+ return validateDouble(a, p[0], p[1]);
+}
+
+result_t test_mm_load_pd1(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *p = (const double *) impl.mTestFloatPointer1;
+ __m128d a = _mm_load_pd1(p);
+ return validateDouble(a, p[0], p[0]);
+}
+
+result_t test_mm_load_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *p = (const double *) impl.mTestFloatPointer1;
+ __m128d a = _mm_load_sd(p);
+ return validateDouble(a, p[0], 0);
+}
+
+result_t test_mm_load_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *addr = impl.mTestIntPointer1;
+
+ __m128i ret = _mm_load_si128((const __m128i *) addr);
+
+ return VALIDATE_INT32_M128(ret, addr);
+}
+
+result_t test_mm_load1_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *addr = (const double *) impl.mTestFloatPointer1;
+
+ __m128d ret = _mm_load1_pd(addr);
+
+ return validateDouble(ret, addr[0], addr[0]);
+}
+
+result_t test_mm_loadh_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *addr = (const double *) impl.mTestFloatPointer2;
+
+ __m128d a = load_m128d(_a);
+ __m128d ret = _mm_loadh_pd(a, addr);
+
+ return validateDouble(ret, _a[0], addr[0]);
+}
+
+result_t test_mm_loadl_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *addr = (const int64_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_loadl_epi64((const __m128i *) addr);
+
+ return validateInt64(ret, addr[0], 0);
+}
+
+result_t test_mm_loadl_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *addr = (const double *) impl.mTestFloatPointer2;
+
+ __m128d a = load_m128d(_a);
+ __m128d ret = _mm_loadl_pd(a, addr);
+
+ return validateDouble(ret, addr[0], _a[1]);
+}
+
+result_t test_mm_loadr_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *addr = (const double *) impl.mTestFloatPointer1;
+
+ __m128d ret = _mm_loadr_pd(addr);
+
+ return validateDouble(ret, addr[1], addr[0]);
+}
+
+result_t test_mm_loadu_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *p = (const double *) impl.mTestFloatPointer1;
+ __m128d a = _mm_loadu_pd(p);
+ return validateDouble(a, p[0], p[1]);
+}
+
+result_t test_mm_loadu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i c = _mm_loadu_si128((const __m128i *) _a);
+ return VALIDATE_INT32_M128(c, _a);
+}
+
+result_t test_mm_loadu_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // The GCC version before 11 does not implement intrinsic function
+ // _mm_loadu_si32. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+ // for more information.
+#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+ return TEST_UNIMPL;
+#else
+ const int32_t *addr = (const int32_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_loadu_si32((const void *) addr);
+
+ return validateInt32(ret, addr[0], 0, 0, 0);
+#endif
+}
+
+result_t test_mm_madd_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int32_t d0 = (int32_t) _a[0] * _b[0];
+ int32_t d1 = (int32_t) _a[1] * _b[1];
+ int32_t d2 = (int32_t) _a[2] * _b[2];
+ int32_t d3 = (int32_t) _a[3] * _b[3];
+ int32_t d4 = (int32_t) _a[4] * _b[4];
+ int32_t d5 = (int32_t) _a[5] * _b[5];
+ int32_t d6 = (int32_t) _a[6] * _b[6];
+ int32_t d7 = (int32_t) _a[7] * _b[7];
+
+ int32_t e[4];
+ e[0] = d0 + d1;
+ e[1] = d2 + d3;
+ e[2] = d4 + d5;
+ e[3] = d6 + d7;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_madd_epi16(a, b);
+ return VALIDATE_INT32_M128(c, e);
+}
+
+result_t test_mm_maskmoveu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_mask = (const uint8_t *) impl.mTestIntPointer2;
+ char mem_addr[16];
+
+ __m128i a = load_m128i(_a);
+ __m128i mask = load_m128i(_mask);
+ _mm_maskmoveu_si128(a, mask, mem_addr);
+
+ for (int i = 0; i < 16; i++) {
+ if (_mask[i] >> 7) {
+ ASSERT_RETURN(_a[i] == (uint8_t) mem_addr[i]);
+ }
+ }
+
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_max_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[8];
+ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+ d[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+ d[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+ d[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+ d[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+
+ __m128i c = _mm_max_epi16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_max_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ uint8_t d[16];
+ d[0] = ((uint8_t) _a[0] > (uint8_t) _b[0]) ? ((uint8_t) _a[0])
+ : ((uint8_t) _b[0]);
+ d[1] = ((uint8_t) _a[1] > (uint8_t) _b[1]) ? ((uint8_t) _a[1])
+ : ((uint8_t) _b[1]);
+ d[2] = ((uint8_t) _a[2] > (uint8_t) _b[2]) ? ((uint8_t) _a[2])
+ : ((uint8_t) _b[2]);
+ d[3] = ((uint8_t) _a[3] > (uint8_t) _b[3]) ? ((uint8_t) _a[3])
+ : ((uint8_t) _b[3]);
+ d[4] = ((uint8_t) _a[4] > (uint8_t) _b[4]) ? ((uint8_t) _a[4])
+ : ((uint8_t) _b[4]);
+ d[5] = ((uint8_t) _a[5] > (uint8_t) _b[5]) ? ((uint8_t) _a[5])
+ : ((uint8_t) _b[5]);
+ d[6] = ((uint8_t) _a[6] > (uint8_t) _b[6]) ? ((uint8_t) _a[6])
+ : ((uint8_t) _b[6]);
+ d[7] = ((uint8_t) _a[7] > (uint8_t) _b[7]) ? ((uint8_t) _a[7])
+ : ((uint8_t) _b[7]);
+ d[8] = ((uint8_t) _a[8] > (uint8_t) _b[8]) ? ((uint8_t) _a[8])
+ : ((uint8_t) _b[8]);
+ d[9] = ((uint8_t) _a[9] > (uint8_t) _b[9]) ? ((uint8_t) _a[9])
+ : ((uint8_t) _b[9]);
+ d[10] = ((uint8_t) _a[10] > (uint8_t) _b[10]) ? ((uint8_t) _a[10])
+ : ((uint8_t) _b[10]);
+ d[11] = ((uint8_t) _a[11] > (uint8_t) _b[11]) ? ((uint8_t) _a[11])
+ : ((uint8_t) _b[11]);
+ d[12] = ((uint8_t) _a[12] > (uint8_t) _b[12]) ? ((uint8_t) _a[12])
+ : ((uint8_t) _b[12]);
+ d[13] = ((uint8_t) _a[13] > (uint8_t) _b[13]) ? ((uint8_t) _a[13])
+ : ((uint8_t) _b[13]);
+ d[14] = ((uint8_t) _a[14] > (uint8_t) _b[14]) ? ((uint8_t) _a[14])
+ : ((uint8_t) _b[14]);
+ d[15] = ((uint8_t) _a[15] > (uint8_t) _b[15]) ? ((uint8_t) _a[15])
+ : ((uint8_t) _b[15]);
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_max_epu8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_max_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double f0 = _a[0] > _b[0] ? _a[0] : _b[0];
+ double f1 = _a[1] > _b[1] ? _a[1] : _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_max_pd(a, b);
+
+ return validateDouble(c, f0, f1);
+}
+
+result_t test_mm_max_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = _a[0] > _b[0] ? _a[0] : _b[0];
+ double d1 = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_max_sd(a, b);
+
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_mfence(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ /* FIXME: Assume that memory barriers always function as intended. */
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_min_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[8];
+ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+ d[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+ d[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+ d[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+ d[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_min_epi16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_min_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ uint8_t d[16];
+ d[0] =
+ ((uint8_t) _a[0] < (uint8_t) _b[0]) ? (uint8_t) _a[0] : (uint8_t) _b[0];
+ d[1] =
+ ((uint8_t) _a[1] < (uint8_t) _b[1]) ? (uint8_t) _a[1] : (uint8_t) _b[1];
+ d[2] =
+ ((uint8_t) _a[2] < (uint8_t) _b[2]) ? (uint8_t) _a[2] : (uint8_t) _b[2];
+ d[3] =
+ ((uint8_t) _a[3] < (uint8_t) _b[3]) ? (uint8_t) _a[3] : (uint8_t) _b[3];
+ d[4] =
+ ((uint8_t) _a[4] < (uint8_t) _b[4]) ? (uint8_t) _a[4] : (uint8_t) _b[4];
+ d[5] =
+ ((uint8_t) _a[5] < (uint8_t) _b[5]) ? (uint8_t) _a[5] : (uint8_t) _b[5];
+ d[6] =
+ ((uint8_t) _a[6] < (uint8_t) _b[6]) ? (uint8_t) _a[6] : (uint8_t) _b[6];
+ d[7] =
+ ((uint8_t) _a[7] < (uint8_t) _b[7]) ? (uint8_t) _a[7] : (uint8_t) _b[7];
+ d[8] =
+ ((uint8_t) _a[8] < (uint8_t) _b[8]) ? (uint8_t) _a[8] : (uint8_t) _b[8];
+ d[9] =
+ ((uint8_t) _a[9] < (uint8_t) _b[9]) ? (uint8_t) _a[9] : (uint8_t) _b[9];
+ d[10] = ((uint8_t) _a[10] < (uint8_t) _b[10]) ? (uint8_t) _a[10]
+ : (uint8_t) _b[10];
+ d[11] = ((uint8_t) _a[11] < (uint8_t) _b[11]) ? (uint8_t) _a[11]
+ : (uint8_t) _b[11];
+ d[12] = ((uint8_t) _a[12] < (uint8_t) _b[12]) ? (uint8_t) _a[12]
+ : (uint8_t) _b[12];
+ d[13] = ((uint8_t) _a[13] < (uint8_t) _b[13]) ? (uint8_t) _a[13]
+ : (uint8_t) _b[13];
+ d[14] = ((uint8_t) _a[14] < (uint8_t) _b[14]) ? (uint8_t) _a[14]
+ : (uint8_t) _b[14];
+ d[15] = ((uint8_t) _a[15] < (uint8_t) _b[15]) ? (uint8_t) _a[15]
+ : (uint8_t) _b[15];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_min_epu8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_min_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double f0 = _a[0] < _b[0] ? _a[0] : _b[0];
+ double f1 = _a[1] < _b[1] ? _a[1] : _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+
+ __m128d c = _mm_min_pd(a, b);
+ return validateDouble(c, f0, f1);
+}
+
+result_t test_mm_min_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = _a[0] < _b[0] ? _a[0] : _b[0];
+ double d1 = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_min_sd(a, b);
+
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_move_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ int64_t d0 = _a[0];
+ int64_t d1 = 0;
+
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_move_epi64(a);
+
+ return validateInt64(c, d0, d1);
+}
+
+result_t test_mm_move_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+
+ double result[2];
+ result[0] = _b[0];
+ result[1] = _a[1];
+
+ __m128d ret = _mm_move_sd(a, b);
+ return validateDouble(ret, result[0], result[1]);
+}
+
+result_t test_mm_movemask_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+
+ const uint8_t *ip = (const uint8_t *) _a;
+ int ret = 0;
+ uint32_t mask = 1;
+ for (uint32_t i = 0; i < 16; i++) {
+ if (ip[i] & 0x80) {
+ ret |= mask;
+ }
+ mask = mask << 1;
+ }
+ int test = _mm_movemask_epi8(a);
+ ASSERT_RETURN(test == ret);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_movemask_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ unsigned int _c = 0;
+ _c |= ((*(const uint64_t *) _a) >> 63) & 0x1;
+ _c |= (((*(const uint64_t *) (_a + 1)) >> 62) & 0x2);
+
+ __m128d a = load_m128d(_a);
+ int c = _mm_movemask_pd(a);
+
+ ASSERT_RETURN((unsigned int) c == _c);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_movepi64_pi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ int64_t d0 = _a[0];
+
+ __m128i a = load_m128i(_a);
+ __m64 c = _mm_movepi64_pi64(a);
+
+ return validateInt64(c, d0);
+}
+
+result_t test_mm_movpi64_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ int64_t d0 = _a[0];
+
+ __m64 a = load_m64(_a);
+ __m128i c = _mm_movpi64_epi64(a);
+
+ return validateInt64(c, d0, 0);
+}
+
+result_t test_mm_mul_epu32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+ uint64_t dx = (uint64_t) (_a[0]) * (uint64_t) (_b[0]);
+ uint64_t dy = (uint64_t) (_a[2]) * (uint64_t) (_b[2]);
+
+ __m128i a = _mm_loadu_si128((const __m128i *) _a);
+ __m128i b = _mm_loadu_si128((const __m128i *) _b);
+ __m128i r = _mm_mul_epu32(a, b);
+ return validateUInt64(r, dx, dy);
+}
+
+result_t test_mm_mul_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = _a[0] * _b[0];
+ double d1 = _a[1] * _b[1];
+
+ __m128d a = _mm_load_pd(_a);
+ __m128d b = _mm_load_pd(_b);
+ __m128d c = _mm_mul_pd(a, b);
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_mul_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double dx = _a[0] * _b[0];
+ double dy = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_mul_sd(a, b);
+ return validateDouble(c, dx, dy);
+}
+
+result_t test_mm_mul_su32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+
+ uint64_t u = (uint64_t) (_a[0]) * (uint64_t) (_b[0]);
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 r = _mm_mul_su32(a, b);
+
+ return validateUInt64(r, u);
+}
+
+result_t test_mm_mulhi_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[8];
+ for (uint32_t i = 0; i < 8; i++) {
+ int32_t m = (int32_t) _a[i] * (int32_t) _b[i];
+ d[i] = (int16_t) (m >> 16);
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_mulhi_epi16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_mulhi_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+ uint16_t d[8];
+ for (uint32_t i = 0; i < 8; i++) {
+ uint32_t m = (uint32_t) _a[i] * (uint32_t) _b[i];
+ d[i] = (uint16_t) (m >> 16);
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_mulhi_epu16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_mullo_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[8];
+ d[0] = _a[0] * _b[0];
+ d[1] = _a[1] * _b[1];
+ d[2] = _a[2] * _b[2];
+ d[3] = _a[3] * _b[3];
+ d[4] = _a[4] * _b[4];
+ d[5] = _a[5] * _b[5];
+ d[6] = _a[6] * _b[6];
+ d[7] = _a[7] * _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_mullo_epi16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_or_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestFloatPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestFloatPointer2;
+
+ int64_t d0 = _a[0] | _b[0];
+ int64_t d1 = _a[1] | _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_or_pd(a, b);
+
+ return validateDouble(c, *((double *) &d0), *((double *) &d1));
+}
+
+result_t test_mm_or_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128 fc = _mm_or_ps(*(const __m128 *) &a, *(const __m128 *) &b);
+ __m128i c = *(const __m128i *) &fc;
+ // now for the assertion...
+ const uint32_t *ia = (const uint32_t *) &a;
+ const uint32_t *ib = (const uint32_t *) &b;
+ uint32_t r[4];
+ r[0] = ia[0] | ib[0];
+ r[1] = ia[1] | ib[1];
+ r[2] = ia[2] | ib[2];
+ r[3] = ia[3] | ib[3];
+ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+ result_t res = VALIDATE_INT32_M128(c, r);
+ if (res) {
+ res = VALIDATE_INT32_M128(ret, r);
+ }
+ return res;
+}
+
+result_t test_mm_packs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int8_t max = INT8_MAX;
+ int8_t min = INT8_MIN;
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int8_t d[16];
+ for (int i = 0; i < 8; i++) {
+ if (_a[i] > max)
+ d[i] = max;
+ else if (_a[i] < min)
+ d[i] = min;
+ else
+ d[i] = (int8_t) _a[i];
+ }
+ for (int i = 0; i < 8; i++) {
+ if (_b[i] > max)
+ d[i + 8] = max;
+ else if (_b[i] < min)
+ d[i + 8] = min;
+ else
+ d[i + 8] = (int8_t) _b[i];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_packs_epi16(a, b);
+
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_packs_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int16_t max = INT16_MAX;
+ int16_t min = INT16_MIN;
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int16_t d[8];
+ for (int i = 0; i < 4; i++) {
+ if (_a[i] > max)
+ d[i] = max;
+ else if (_a[i] < min)
+ d[i] = min;
+ else
+ d[i] = (int16_t) _a[i];
+ }
+ for (int i = 0; i < 4; i++) {
+ if (_b[i] > max)
+ d[i + 4] = max;
+ else if (_b[i] < min)
+ d[i + 4] = min;
+ else
+ d[i + 4] = (int16_t) _b[i];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_packs_epi32(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_packus_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint8_t max = UINT8_MAX;
+ uint8_t min = 0;
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ uint8_t d[16];
+ for (int i = 0; i < 8; i++) {
+ if (_a[i] > (int16_t) max)
+ d[i] = max;
+ else if (_a[i] < (int16_t) min)
+ d[i] = min;
+ else
+ d[i] = (uint8_t) _a[i];
+ }
+ for (int i = 0; i < 8; i++) {
+ if (_b[i] > (int16_t) max)
+ d[i + 8] = max;
+ else if (_b[i] < (int16_t) min)
+ d[i + 8] = min;
+ else
+ d[i + 8] = (uint8_t) _b[i];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_packus_epi16(a, b);
+
+ return VALIDATE_UINT8_M128(c, d);
+}
+
+result_t test_mm_pause(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ _mm_pause();
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_sad_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ uint16_t d0 = 0;
+ uint16_t d1 = 0;
+ for (int i = 0; i < 8; i++) {
+ d0 += abs(_a[i] - _b[i]);
+ }
+ for (int i = 8; i < 16; i++) {
+ d1 += abs(_a[i] - _b[i]);
+ }
+
+ const __m128i a = load_m128i(_a);
+ const __m128i b = load_m128i(_b);
+ __m128i c = _mm_sad_epu8(a, b);
+ return validateUInt16(c, d0, 0, 0, 0, d1, 0, 0, 0);
+}
+
+result_t test_mm_set_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ int16_t d[8];
+ d[0] = _a[0];
+ d[1] = _a[1];
+ d[2] = _a[2];
+ d[3] = _a[3];
+ d[4] = _a[4];
+ d[5] = _a[5];
+ d[6] = _a[6];
+ d[7] = _a[7];
+
+ __m128i c = _mm_set_epi16(d[7], d[6], d[5], d[4], d[3], d[2], d[1], d[0]);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_set_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int32_t d[4];
+ d[3] = impl.mTestInts[iter];
+ d[2] = impl.mTestInts[iter + 1];
+ d[1] = impl.mTestInts[iter + 2];
+ d[0] = impl.mTestInts[iter + 3];
+ __m128i a = _mm_set_epi32(d[3], d[2], d[1], d[0]);
+ return VALIDATE_INT32_M128(a, d);
+}
+
+result_t test_mm_set_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_set_epi64(load_m64(&_a[1]), load_m64(&_a[0]));
+
+ return validateInt64(ret, _a[0], _a[1]);
+}
+
+result_t test_mm_set_epi64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_set_epi64x(_a[1], _a[0]);
+
+ return validateInt64(ret, _a[0], _a[1]);
+}
+
+result_t test_mm_set_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ int8_t d[16];
+ d[0] = _a[0];
+ d[1] = _a[1];
+ d[2] = _a[2];
+ d[3] = _a[3];
+ d[4] = _a[4];
+ d[5] = _a[5];
+ d[6] = _a[6];
+ d[7] = _a[7];
+ d[8] = _a[8];
+ d[9] = _a[9];
+ d[10] = _a[10];
+ d[11] = _a[11];
+ d[12] = _a[12];
+ d[13] = _a[13];
+ d[14] = _a[14];
+ d[15] = _a[15];
+
+ __m128i c =
+ _mm_set_epi8(d[15], d[14], d[13], d[12], d[11], d[10], d[9], d[8], d[7],
+ d[6], d[5], d[4], d[3], d[2], d[1], d[0]);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_set_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *p = (const double *) impl.mTestFloatPointer1;
+ double x = p[0];
+ double y = p[1];
+ __m128d a = _mm_set_pd(x, y);
+ return validateDouble(a, y, x);
+}
+
+result_t test_mm_set_pd1(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double _a = impl.mTestFloats[iter];
+
+ __m128d a = _mm_set_pd1(_a);
+
+ return validateDouble(a, _a, _a);
+}
+
+result_t test_mm_set_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ double f0 = _a[0];
+ double f1 = 0.0;
+
+ __m128d a = _mm_set_sd(_a[0]);
+ return validateDouble(a, f0, f1);
+}
+
+result_t test_mm_set1_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ int16_t d0 = _a[0];
+
+ __m128i c = _mm_set1_epi16(d0);
+ return validateInt16(c, d0, d0, d0, d0, d0, d0, d0, d0);
+}
+
+result_t test_mm_set1_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int32_t x = impl.mTestInts[iter];
+ __m128i a = _mm_set1_epi32(x);
+ return validateInt32(a, x, x, x, x);
+}
+
+result_t test_mm_set1_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_set1_epi64(load_m64(&_a[0]));
+
+ return validateInt64(ret, _a[0], _a[0]);
+}
+
+result_t test_mm_set1_epi64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+
+ __m128i ret = _mm_set1_epi64x(_a[0]);
+
+ return validateInt64(ret, _a[0], _a[0]);
+}
+
+result_t test_mm_set1_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ int8_t d0 = _a[0];
+ __m128i c = _mm_set1_epi8(d0);
+ return validateInt8(c, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0,
+ d0, d0, d0);
+}
+
+result_t test_mm_set1_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ double d0 = _a[0];
+ __m128d c = _mm_set1_pd(d0);
+ return validateDouble(c, d0, d0);
+}
+
+result_t test_mm_setr_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+
+ __m128i c =
+ _mm_setr_epi16(_a[0], _a[1], _a[2], _a[3], _a[4], _a[5], _a[6], _a[7]);
+
+ return VALIDATE_INT16_M128(c, _a);
+}
+
+result_t test_mm_setr_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i c = _mm_setr_epi32(_a[0], _a[1], _a[2], _a[3]);
+ return VALIDATE_INT32_M128(c, _a);
+}
+
+result_t test_mm_setr_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ __m128i c = _mm_setr_epi64(load_m64(&_a[0]), load_m64(&_a[1]));
+ return validateInt64(c, _a[0], _a[1]);
+}
+
+result_t test_mm_setr_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+
+ __m128i c = _mm_setr_epi8(_a[0], _a[1], _a[2], _a[3], _a[4], _a[5], _a[6],
+ _a[7], _a[8], _a[9], _a[10], _a[11], _a[12],
+ _a[13], _a[14], _a[15]);
+
+ return VALIDATE_INT8_M128(c, _a);
+}
+
+result_t test_mm_setr_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *p = (const double *) impl.mTestFloatPointer1;
+
+ double x = p[0];
+ double y = p[1];
+
+ __m128d a = _mm_setr_pd(x, y);
+
+ return validateDouble(a, x, y);
+}
+
+result_t test_mm_setzero_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ __m128d a = _mm_setzero_pd();
+ return validateDouble(a, 0, 0);
+}
+
+result_t test_mm_setzero_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ __m128i a = _mm_setzero_si128();
+ return validateInt32(a, 0, 0, 0, 0);
+}
+
+result_t test_mm_shuffle_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ __m128i a, c;
+
+#define TEST_IMPL(IDX) \
+ int32_t d##IDX[4]; \
+ d##IDX[0] = _a[((IDX) &0x3)]; \
+ d##IDX[1] = _a[((IDX >> 2) & 0x3)]; \
+ d##IDX[2] = _a[((IDX >> 4) & 0x3)]; \
+ d##IDX[3] = _a[((IDX >> 6) & 0x3)]; \
+ \
+ a = load_m128i(_a); \
+ c = _mm_shuffle_epi32(a, IDX); \
+ CHECK_RESULT(VALIDATE_INT32_M128(c, d##IDX))
+
+ IMM_256_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_shuffle_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ __m128d a, b, c;
+
+#define TEST_IMPL(IDX) \
+ a = load_m128d(_a); \
+ b = load_m128d(_b); \
+ c = _mm_shuffle_pd(a, b, IDX); \
+ \
+ double d0##IDX = _a[IDX & 0x1]; \
+ double d1##IDX = _b[(IDX & 0x2) >> 1]; \
+ CHECK_RESULT(validateDouble(c, d0##IDX, d1##IDX))
+
+ IMM_4_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_shufflehi_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ __m128i a, c;
+
+#define TEST_IMPL(IDX) \
+ int16_t d##IDX[8]; \
+ d##IDX[0] = _a[0]; \
+ d##IDX[1] = _a[1]; \
+ d##IDX[2] = _a[2]; \
+ d##IDX[3] = _a[3]; \
+ d##IDX[4] = (int16_t) (((const int64_t *) _a)[1] >> ((IDX & 0x3) * 16)); \
+ d##IDX[5] = \
+ (int16_t) (((const int64_t *) _a)[1] >> (((IDX >> 2) & 0x3) * 16)); \
+ d##IDX[6] = \
+ (int16_t) (((const int64_t *) _a)[1] >> (((IDX >> 4) & 0x3) * 16)); \
+ d##IDX[7] = \
+ (int16_t) (((const int64_t *) _a)[1] >> (((IDX >> 6) & 0x3) * 16)); \
+ \
+ a = load_m128i(_a); \
+ c = _mm_shufflehi_epi16(a, IDX); \
+ \
+ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+
+ IMM_256_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_shufflelo_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ __m128i a, c;
+
+#define TEST_IMPL(IDX) \
+ int16_t d##IDX[8]; \
+ d##IDX[0] = (int16_t) (((const int64_t *) _a)[0] >> ((IDX & 0x3) * 16)); \
+ d##IDX[1] = \
+ (int16_t) (((const int64_t *) _a)[0] >> (((IDX >> 2) & 0x3) * 16)); \
+ d##IDX[2] = \
+ (int16_t) (((const int64_t *) _a)[0] >> (((IDX >> 4) & 0x3) * 16)); \
+ d##IDX[3] = \
+ (int16_t) (((const int64_t *) _a)[0] >> (((IDX >> 6) & 0x3) * 16)); \
+ d##IDX[4] = _a[4]; \
+ d##IDX[5] = _a[5]; \
+ d##IDX[6] = _a[6]; \
+ d##IDX[7] = _a[7]; \
+ \
+ a = load_m128i(_a); \
+ c = _mm_shufflelo_epi16(a, IDX); \
+ \
+ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+
+ IMM_256_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_sll_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ __m128i a, b, c;
+ uint8_t idx;
+#define TEST_IMPL(IDX) \
+ uint16_t d##IDX[8]; \
+ idx = IDX; \
+ d##IDX[0] = (idx > 15) ? 0 : _a[0] << idx; \
+ d##IDX[1] = (idx > 15) ? 0 : _a[1] << idx; \
+ d##IDX[2] = (idx > 15) ? 0 : _a[2] << idx; \
+ d##IDX[3] = (idx > 15) ? 0 : _a[3] << idx; \
+ d##IDX[4] = (idx > 15) ? 0 : _a[4] << idx; \
+ d##IDX[5] = (idx > 15) ? 0 : _a[5] << idx; \
+ d##IDX[6] = (idx > 15) ? 0 : _a[6] << idx; \
+ d##IDX[7] = (idx > 15) ? 0 : _a[7] << idx; \
+ \
+ a = load_m128i(_a); \
+ b = _mm_set1_epi64x(IDX); \
+ c = _mm_sll_epi16(a, b); \
+ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+
+ IMM_64_ITER
+#undef TEST_IMPL
+
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_sll_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i a, b, c;
+ uint8_t idx;
+
+#define TEST_IMPL(IDX) \
+ uint32_t d##IDX[4]; \
+ idx = IDX; \
+ d##IDX[0] = (idx > 31) ? 0 : _a[0] << idx; \
+ d##IDX[1] = (idx > 31) ? 0 : _a[1] << idx; \
+ d##IDX[2] = (idx > 31) ? 0 : _a[2] << idx; \
+ d##IDX[3] = (idx > 31) ? 0 : _a[3] << idx; \
+ \
+ a = load_m128i(_a); \
+ b = _mm_set1_epi64x(IDX); \
+ c = _mm_sll_epi32(a, b); \
+ CHECK_RESULT(VALIDATE_INT32_M128(c, d##IDX))
+
+ IMM_64_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_sll_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ __m128i a, b, c;
+
+#define TEST_IMPL(IDX) \
+ uint64_t d0##IDX = (IDX & ~63) ? 0 : _a[0] << IDX; \
+ uint64_t d1##IDX = (IDX & ~63) ? 0 : _a[1] << IDX; \
+ \
+ a = load_m128i(_a); \
+ b = _mm_set1_epi64x(IDX); \
+ c = _mm_sll_epi64(a, b); \
+ \
+ CHECK_RESULT(validateInt64(c, d0##IDX, d1##IDX))
+
+ IMM_64_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_slli_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ __m128i a, c;
+ uint8_t idx;
+#define TEST_IMPL(IDX) \
+ int16_t d##IDX[8]; \
+ idx = IDX; \
+ d##IDX[0] = (idx > 15) ? 0 : _a[0] << idx; \
+ d##IDX[1] = (idx > 15) ? 0 : _a[1] << idx; \
+ d##IDX[2] = (idx > 15) ? 0 : _a[2] << idx; \
+ d##IDX[3] = (idx > 15) ? 0 : _a[3] << idx; \
+ d##IDX[4] = (idx > 15) ? 0 : _a[4] << idx; \
+ d##IDX[5] = (idx > 15) ? 0 : _a[5] << idx; \
+ d##IDX[6] = (idx > 15) ? 0 : _a[6] << idx; \
+ d##IDX[7] = (idx > 15) ? 0 : _a[7] << idx; \
+ \
+ a = load_m128i(_a); \
+ c = _mm_slli_epi16(a, IDX); \
+ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+
+ IMM_64_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_slli_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+#if defined(__clang__)
+ // Clang compiler does not allow the second argument of _mm_slli_epi32() to
+ // be greater than 31.
+ const int count = (int) (iter % 33 - 1); // range: -1 ~ 31
+#else
+ const int count = (int) (iter % 34 - 1); // range: -1 ~ 32
+#endif
+
+ int32_t d[4];
+ d[0] = (count & ~31) ? 0 : _a[0] << count;
+ d[1] = (count & ~31) ? 0 : _a[1] << count;
+ d[2] = (count & ~31) ? 0 : _a[2] << count;
+ d[3] = (count & ~31) ? 0 : _a[3] << count;
+
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_slli_epi32(a, count);
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_slli_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+#if defined(__clang__)
+ // Clang compiler does not allow the second argument of "_mm_slli_epi64()"
+ // to be greater than 63.
+ const int count = (int) (iter % 65 - 1); // range: -1 ~ 63
+#else
+ const int count = (int) (iter % 66 - 1); // range: -1 ~ 64
+#endif
+ int64_t d0 = (count & ~63) ? 0 : _a[0] << count;
+ int64_t d1 = (count & ~63) ? 0 : _a[1] << count;
+
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_slli_epi64(a, count);
+ return validateInt64(c, d0, d1);
+}
+
+result_t test_mm_slli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+
+ int8_t d[16];
+ int count = (iter % 5) << 2;
+ for (int i = 0; i < 16; i++) {
+ if (i < count)
+ d[i] = 0;
+ else
+ d[i] = ((const int8_t *) _a)[i - count];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i ret;
+ switch (iter % 5) {
+ case 0:
+ ret = _mm_slli_si128(a, 0);
+ break;
+ case 1:
+ ret = _mm_slli_si128(a, 4);
+ break;
+ case 2:
+ ret = _mm_slli_si128(a, 8);
+ break;
+ case 3:
+ ret = _mm_slli_si128(a, 12);
+ break;
+ case 4:
+ ret = _mm_slli_si128(a, 16);
+ break;
+ }
+
+ return VALIDATE_INT8_M128(ret, d);
+}
+
+result_t test_mm_sqrt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ double f0 = sqrt(_a[0]);
+ double f1 = sqrt(_a[1]);
+
+ __m128d a = load_m128d(_a);
+ __m128d c = _mm_sqrt_pd(a);
+
+ return validateFloatError(c, f0, f1, 1.0e-15);
+}
+
+result_t test_mm_sqrt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double f0 = sqrt(_b[0]);
+ double f1 = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_sqrt_sd(a, b);
+
+ return validateFloatError(c, f0, f1, 1.0e-15);
+}
+
+result_t test_mm_sra_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int64_t count = (int64_t) (iter % 18 - 1); // range: -1 ~ 16
+
+ int16_t d[8];
+ d[0] = (count & ~15) ? (_a[0] < 0 ? ~UINT16_C(0) : 0) : (_a[0] >> count);
+ d[1] = (count & ~15) ? (_a[1] < 0 ? ~UINT16_C(0) : 0) : (_a[1] >> count);
+ d[2] = (count & ~15) ? (_a[2] < 0 ? ~UINT16_C(0) : 0) : (_a[2] >> count);
+ d[3] = (count & ~15) ? (_a[3] < 0 ? ~UINT16_C(0) : 0) : (_a[3] >> count);
+ d[4] = (count & ~15) ? (_a[4] < 0 ? ~UINT16_C(0) : 0) : (_a[4] >> count);
+ d[5] = (count & ~15) ? (_a[5] < 0 ? ~UINT16_C(0) : 0) : (_a[5] >> count);
+ d[6] = (count & ~15) ? (_a[6] < 0 ? ~UINT16_C(0) : 0) : (_a[6] >> count);
+ d[7] = (count & ~15) ? (_a[7] < 0 ? ~UINT16_C(0) : 0) : (_a[7] >> count);
+
+ __m128i a = _mm_load_si128((const __m128i *) _a);
+ __m128i b = _mm_set1_epi64x(count);
+ __m128i c = _mm_sra_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_sra_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int64_t count = (int64_t) (iter % 34 - 1); // range: -1 ~ 32
+
+ int32_t d[4];
+ d[0] = (count & ~31) ? (_a[0] < 0 ? ~UINT32_C(0) : 0) : _a[0] >> count;
+ d[1] = (count & ~31) ? (_a[1] < 0 ? ~UINT32_C(0) : 0) : _a[1] >> count;
+ d[2] = (count & ~31) ? (_a[2] < 0 ? ~UINT32_C(0) : 0) : _a[2] >> count;
+ d[3] = (count & ~31) ? (_a[3] < 0 ? ~UINT32_C(0) : 0) : _a[3] >> count;
+
+ __m128i a = _mm_load_si128((const __m128i *) _a);
+ __m128i b = _mm_set1_epi64x(count);
+ __m128i c = _mm_sra_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_srai_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int32_t b = (int32_t) (iter % 18 - 1); // range: -1 ~ 16
+ int16_t d[8];
+ int count = (b & ~15) ? 15 : b;
+
+ for (int i = 0; i < 8; i++) {
+ d[i] = _a[i] >> count;
+ }
+
+ __m128i a = _mm_load_si128((const __m128i *) _a);
+ __m128i c = _mm_srai_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_srai_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t b = (int32_t) (iter % 34 - 1); // range: -1 ~ 32
+
+ int32_t d[4];
+ int count = (b & ~31) ? 31 : b;
+ for (int i = 0; i < 4; i++) {
+ d[i] = _a[i] >> count;
+ }
+
+ __m128i a = _mm_load_si128((const __m128i *) _a);
+ __m128i c = _mm_srai_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_srl_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int64_t count = (int64_t) (iter % 18 - 1); // range: -1 ~ 16
+
+ uint16_t d[8];
+ d[0] = (count & ~15) ? 0 : (uint16_t) (_a[0]) >> count;
+ d[1] = (count & ~15) ? 0 : (uint16_t) (_a[1]) >> count;
+ d[2] = (count & ~15) ? 0 : (uint16_t) (_a[2]) >> count;
+ d[3] = (count & ~15) ? 0 : (uint16_t) (_a[3]) >> count;
+ d[4] = (count & ~15) ? 0 : (uint16_t) (_a[4]) >> count;
+ d[5] = (count & ~15) ? 0 : (uint16_t) (_a[5]) >> count;
+ d[6] = (count & ~15) ? 0 : (uint16_t) (_a[6]) >> count;
+ d[7] = (count & ~15) ? 0 : (uint16_t) (_a[7]) >> count;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = _mm_set1_epi64x(count);
+ __m128i c = _mm_srl_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_srl_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int64_t count = (int64_t) (iter % 34 - 1); // range: -1 ~ 32
+
+ uint32_t d[4];
+ d[0] = (count & ~31) ? 0 : (uint32_t) (_a[0]) >> count;
+ d[1] = (count & ~31) ? 0 : (uint32_t) (_a[1]) >> count;
+ d[2] = (count & ~31) ? 0 : (uint32_t) (_a[2]) >> count;
+ d[3] = (count & ~31) ? 0 : (uint32_t) (_a[3]) >> count;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = _mm_set1_epi64x(count);
+ __m128i c = _mm_srl_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_srl_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t count = (int64_t) (iter % 66 - 1); // range: -1 ~ 64
+
+ uint64_t d0 = (count & ~63) ? 0 : (uint64_t) (_a[0]) >> count;
+ uint64_t d1 = (count & ~63) ? 0 : (uint64_t) (_a[1]) >> count;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = _mm_set1_epi64x(count);
+ __m128i c = _mm_srl_epi64(a, b);
+
+ return validateInt64(c, d0, d1);
+}
+
+result_t test_mm_srli_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int count = (int) (iter % 18 - 1); // range: -1 ~ 16
+
+ int16_t d[8];
+ d[0] = count & (~15) ? 0 : (uint16_t) (_a[0]) >> count;
+ d[1] = count & (~15) ? 0 : (uint16_t) (_a[1]) >> count;
+ d[2] = count & (~15) ? 0 : (uint16_t) (_a[2]) >> count;
+ d[3] = count & (~15) ? 0 : (uint16_t) (_a[3]) >> count;
+ d[4] = count & (~15) ? 0 : (uint16_t) (_a[4]) >> count;
+ d[5] = count & (~15) ? 0 : (uint16_t) (_a[5]) >> count;
+ d[6] = count & (~15) ? 0 : (uint16_t) (_a[6]) >> count;
+ d[7] = count & (~15) ? 0 : (uint16_t) (_a[7]) >> count;
+
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_srli_epi16(a, count);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_srli_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int count = (int) (iter % 34 - 1); // range: -1 ~ 32
+
+ int32_t d[4];
+ d[0] = count & (~31) ? 0 : (uint32_t) (_a[0]) >> count;
+ d[1] = count & (~31) ? 0 : (uint32_t) (_a[1]) >> count;
+ d[2] = count & (~31) ? 0 : (uint32_t) (_a[2]) >> count;
+ d[3] = count & (~31) ? 0 : (uint32_t) (_a[3]) >> count;
+
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_srli_epi32(a, count);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_srli_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int count = (int) (iter % 66 - 1); // range: -1 ~ 64
+
+ int64_t d0 = count & (~63) ? 0 : (uint64_t) (_a[0]) >> count;
+ int64_t d1 = count & (~63) ? 0 : (uint64_t) (_a[1]) >> count;
+
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_srli_epi64(a, count);
+
+ return validateInt64(c, d0, d1);
+}
+
+result_t test_mm_srli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int count = (iter % 5) << 2;
+
+ int8_t d[16];
+ for (int i = 0; i < 16; i++) {
+ if (i >= (16 - count))
+ d[i] = 0;
+ else
+ d[i] = _a[i + count];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i ret;
+ switch (iter % 5) {
+ case 0:
+ ret = _mm_srli_si128(a, 0);
+ break;
+ case 1:
+ ret = _mm_srli_si128(a, 4);
+ break;
+ case 2:
+ ret = _mm_srli_si128(a, 8);
+ break;
+ case 3:
+ ret = _mm_srli_si128(a, 12);
+ break;
+ case 4:
+ ret = _mm_srli_si128(a, 16);
+ break;
+ }
+
+ return VALIDATE_INT8_M128(ret, d);
+}
+
+result_t test_mm_store_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *p = (double *) impl.mTestFloatPointer1;
+ double x = impl.mTestFloats[iter + 4];
+ double y = impl.mTestFloats[iter + 6];
+
+ __m128d a = _mm_set_pd(x, y);
+ _mm_store_pd(p, a);
+ ASSERT_RETURN(p[0] == y);
+ ASSERT_RETURN(p[1] == x);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_store_pd1(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *p = (double *) impl.mTestFloatPointer1;
+ double _a[2] = {(double) impl.mTestFloats[iter],
+ (double) impl.mTestFloats[iter + 1]};
+
+ __m128d a = load_m128d(_a);
+ _mm_store_pd1(p, a);
+ ASSERT_RETURN(p[0] == impl.mTestFloats[iter]);
+ ASSERT_RETURN(p[1] == impl.mTestFloats[iter]);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_store_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *p = (double *) impl.mTestFloatPointer1;
+ double _a[2] = {(double) impl.mTestFloats[iter],
+ (double) impl.mTestFloats[iter + 1]};
+
+ __m128d a = load_m128d(_a);
+ _mm_store_sd(p, a);
+ ASSERT_RETURN(p[0] == impl.mTestFloats[iter]);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_store_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ alignas(16) int32_t p[4];
+
+ __m128i a = load_m128i(_a);
+ _mm_store_si128((__m128i *) p, a);
+
+ return VALIDATE_INT32_M128(a, p);
+}
+
+result_t test_mm_store1_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_store_pd1(impl, iter);
+}
+
+result_t test_mm_storeh_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *p = (double *) impl.mTestFloatPointer1;
+ double mem;
+
+ __m128d a = load_m128d(p);
+ _mm_storeh_pd(&mem, a);
+
+ ASSERT_RETURN(mem == p[1]);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storel_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int64_t *p = (int64_t *) impl.mTestIntPointer1;
+ __m128i mem;
+
+ __m128i a = load_m128i(p);
+ _mm_storel_epi64(&mem, a);
+
+ ASSERT_RETURN(((SIMDVec *) &mem)->m128_u64[0] == (uint64_t) p[0]);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storel_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *p = (double *) impl.mTestFloatPointer1;
+ double mem;
+
+ __m128d a = load_m128d(p);
+ _mm_storel_pd(&mem, a);
+
+ ASSERT_RETURN(mem == p[0]);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storer_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *p = (double *) impl.mTestFloatPointer1;
+ double mem[2];
+
+ __m128d a = load_m128d(p);
+ _mm_storer_pd(mem, a);
+
+ __m128d res = load_m128d(mem);
+ return validateDouble(res, p[1], p[0]);
+}
+
+result_t test_mm_storeu_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ double *p = (double *) impl.mTestFloatPointer1;
+ double x = impl.mTestFloats[iter + 4];
+ double y = impl.mTestFloats[iter + 6];
+
+ __m128d a = _mm_set_pd(x, y);
+ _mm_storeu_pd(p, a);
+ ASSERT_RETURN(p[0] == y);
+ ASSERT_RETURN(p[1] == x);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_storeu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i b;
+ __m128i a = load_m128i(_a);
+ _mm_storeu_si128(&b, a);
+ int32_t *_b = (int32_t *) &b;
+ return VALIDATE_INT32_M128(a, _b);
+}
+
+result_t test_mm_storeu_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // The GCC version before 11 does not implement intrinsic function
+ // _mm_storeu_si32. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+ // for more information.
+#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+ return TEST_UNIMPL;
+#else
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i b;
+ __m128i a = load_m128i(_a);
+ _mm_storeu_si32(&b, a);
+ int32_t *_b = (int32_t *) &b;
+ return validateInt32(b, _a[0], _b[1], _b[2], _b[3]);
+#endif
+}
+
+result_t test_mm_stream_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ double p[2];
+
+ __m128d a = load_m128d(_a);
+ _mm_stream_pd(p, a);
+
+ return validateDouble(a, p[0], p[1]);
+}
+
+result_t test_mm_stream_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ alignas(16) int32_t p[4];
+
+ __m128i a = load_m128i(_a);
+ _mm_stream_si128((__m128i *) p, a);
+
+ return VALIDATE_INT32_M128(a, p);
+}
+
+result_t test_mm_stream_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t a = (const int32_t) impl.mTestInts[iter];
+ int32_t p;
+
+ _mm_stream_si32(&p, a);
+
+ ASSERT_RETURN(a == p)
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_stream_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t a = (const int64_t) impl.mTestInts[iter];
+ __int64 p[1];
+ _mm_stream_si64(p, a);
+ ASSERT_RETURN(p[0] == a);
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_sub_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[8];
+ d[0] = _a[0] - _b[0];
+ d[1] = _a[1] - _b[1];
+ d[2] = _a[2] - _b[2];
+ d[3] = _a[3] - _b[3];
+ d[4] = _a[4] - _b[4];
+ d[5] = _a[5] - _b[5];
+ d[6] = _a[6] - _b[6];
+ d[7] = _a[7] - _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_sub_epi16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_sub_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ int32_t d[4];
+ d[0] = _a[0] - _b[0];
+ d[1] = _a[1] - _b[1];
+ d[2] = _a[2] - _b[2];
+ d[3] = _a[3] - _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_sub_epi32(a, b);
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_sub_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (int64_t *) impl.mTestIntPointer2;
+ int64_t d0 = _a[0] - _b[0];
+ int64_t d1 = _a[1] - _b[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_sub_epi64(a, b);
+ return validateInt64(c, d0, d1);
+}
+
+result_t test_mm_sub_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t d[16];
+ d[0] = _a[0] - _b[0];
+ d[1] = _a[1] - _b[1];
+ d[2] = _a[2] - _b[2];
+ d[3] = _a[3] - _b[3];
+ d[4] = _a[4] - _b[4];
+ d[5] = _a[5] - _b[5];
+ d[6] = _a[6] - _b[6];
+ d[7] = _a[7] - _b[7];
+ d[8] = _a[8] - _b[8];
+ d[9] = _a[9] - _b[9];
+ d[10] = _a[10] - _b[10];
+ d[11] = _a[11] - _b[11];
+ d[12] = _a[12] - _b[12];
+ d[13] = _a[13] - _b[13];
+ d[14] = _a[14] - _b[14];
+ d[15] = _a[15] - _b[15];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_sub_epi8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_sub_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = _a[0] - _b[0];
+ double d1 = _a[1] - _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_sub_pd(a, b);
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_sub_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ double d0 = _a[0] - _b[0];
+ double d1 = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_sub_sd(a, b);
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_sub_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+
+ int64_t d = _a[0] - _b[0];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_sub_si64(a, b);
+
+ return validateInt64(c, d);
+}
+
+result_t test_mm_subs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int32_t max = 32767;
+ int32_t min = -32768;
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int16_t d[8];
+ for (int i = 0; i < 8; i++) {
+ int32_t res = (int32_t) _a[i] - (int32_t) _b[i];
+ if (res > max)
+ d[i] = max;
+ else if (res < min)
+ d[i] = min;
+ else
+ d[i] = (int16_t) res;
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_subs_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_subs_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int16_t max = 127;
+ int16_t min = -128;
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+
+ int8_t d[16];
+ for (int i = 0; i < 16; i++) {
+ int16_t res = (int16_t) _a[i] - (int16_t) _b[i];
+ if (res > max)
+ d[i] = (int8_t) max;
+ else if (res < min)
+ d[i] = (int8_t) min;
+ else
+ d[i] = (int8_t) res;
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_subs_epi8(a, b);
+
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_subs_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ uint16_t d[8];
+ d[0] = (uint16_t) _a[0] - (uint16_t) _b[0];
+ if (d[0] > (uint16_t) _a[0])
+ d[0] = 0;
+ d[1] = (uint16_t) _a[1] - (uint16_t) _b[1];
+ if (d[1] > (uint16_t) _a[1])
+ d[1] = 0;
+ d[2] = (uint16_t) _a[2] - (uint16_t) _b[2];
+ if (d[2] > (uint16_t) _a[2])
+ d[2] = 0;
+ d[3] = (uint16_t) _a[3] - (uint16_t) _b[3];
+ if (d[3] > (uint16_t) _a[3])
+ d[3] = 0;
+ d[4] = (uint16_t) _a[4] - (uint16_t) _b[4];
+ if (d[4] > (uint16_t) _a[4])
+ d[4] = 0;
+ d[5] = (uint16_t) _a[5] - (uint16_t) _b[5];
+ if (d[5] > (uint16_t) _a[5])
+ d[5] = 0;
+ d[6] = (uint16_t) _a[6] - (uint16_t) _b[6];
+ if (d[6] > (uint16_t) _a[6])
+ d[6] = 0;
+ d[7] = (uint16_t) _a[7] - (uint16_t) _b[7];
+ if (d[7] > (uint16_t) _a[7])
+ d[7] = 0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+
+ __m128i c = _mm_subs_epu16(a, b);
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_subs_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ uint8_t d[16];
+ d[0] = (uint8_t) _a[0] - (uint8_t) _b[0];
+ if (d[0] > (uint8_t) _a[0])
+ d[0] = 0;
+ d[1] = (uint8_t) _a[1] - (uint8_t) _b[1];
+ if (d[1] > (uint8_t) _a[1])
+ d[1] = 0;
+ d[2] = (uint8_t) _a[2] - (uint8_t) _b[2];
+ if (d[2] > (uint8_t) _a[2])
+ d[2] = 0;
+ d[3] = (uint8_t) _a[3] - (uint8_t) _b[3];
+ if (d[3] > (uint8_t) _a[3])
+ d[3] = 0;
+ d[4] = (uint8_t) _a[4] - (uint8_t) _b[4];
+ if (d[4] > (uint8_t) _a[4])
+ d[4] = 0;
+ d[5] = (uint8_t) _a[5] - (uint8_t) _b[5];
+ if (d[5] > (uint8_t) _a[5])
+ d[5] = 0;
+ d[6] = (uint8_t) _a[6] - (uint8_t) _b[6];
+ if (d[6] > (uint8_t) _a[6])
+ d[6] = 0;
+ d[7] = (uint8_t) _a[7] - (uint8_t) _b[7];
+ if (d[7] > (uint8_t) _a[7])
+ d[7] = 0;
+ d[8] = (uint8_t) _a[8] - (uint8_t) _b[8];
+ if (d[8] > (uint8_t) _a[8])
+ d[8] = 0;
+ d[9] = (uint8_t) _a[9] - (uint8_t) _b[9];
+ if (d[9] > (uint8_t) _a[9])
+ d[9] = 0;
+ d[10] = (uint8_t) _a[10] - (uint8_t) _b[10];
+ if (d[10] > (uint8_t) _a[10])
+ d[10] = 0;
+ d[11] = (uint8_t) _a[11] - (uint8_t) _b[11];
+ if (d[11] > (uint8_t) _a[11])
+ d[11] = 0;
+ d[12] = (uint8_t) _a[12] - (uint8_t) _b[12];
+ if (d[12] > (uint8_t) _a[12])
+ d[12] = 0;
+ d[13] = (uint8_t) _a[13] - (uint8_t) _b[13];
+ if (d[13] > (uint8_t) _a[13])
+ d[13] = 0;
+ d[14] = (uint8_t) _a[14] - (uint8_t) _b[14];
+ if (d[14] > (uint8_t) _a[14])
+ d[14] = 0;
+ d[15] = (uint8_t) _a[15] - (uint8_t) _b[15];
+ if (d[15] > (uint8_t) _a[15])
+ d[15] = 0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_subs_epu8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_ucomieq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_comieq_sd(impl, iter);
+}
+
+result_t test_mm_ucomige_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_comige_sd(impl, iter);
+}
+
+result_t test_mm_ucomigt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_comigt_sd(impl, iter);
+}
+
+result_t test_mm_ucomile_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_comile_sd(impl, iter);
+}
+
+result_t test_mm_ucomilt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_comilt_sd(impl, iter);
+}
+
+result_t test_mm_ucomineq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_comineq_sd(impl, iter);
+}
+
+result_t test_mm_undefined_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ __m128d a = _mm_undefined_pd();
+ a = _mm_xor_pd(a, a);
+ return validateDouble(a, 0, 0);
+}
+
+result_t test_mm_undefined_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ __m128i a = _mm_undefined_si128();
+ a = _mm_xor_si128(a, a);
+ return validateInt64(a, 0, 0);
+}
+
+result_t test_mm_unpackhi_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int16_t d[8];
+ d[0] = _a[4];
+ d[1] = _b[4];
+ d[2] = _a[5];
+ d[3] = _b[5];
+ d[4] = _a[6];
+ d[5] = _b[6];
+ d[6] = _a[7];
+ d[7] = _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpackhi_epi16(a, b);
+
+ return VALIDATE_INT16_M128(ret, d);
+}
+
+result_t test_mm_unpackhi_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int32_t d[4];
+ d[0] = _a[2];
+ d[1] = _b[2];
+ d[2] = _a[3];
+ d[3] = _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpackhi_epi32(a, b);
+
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_unpackhi_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+
+ int64_t i0 = _a[1];
+ int64_t i1 = _b[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpackhi_epi64(a, b);
+
+ return validateInt64(ret, i0, i1);
+}
+
+result_t test_mm_unpackhi_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+
+ int8_t d[16];
+ d[0] = _a[8];
+ d[1] = _b[8];
+ d[2] = _a[9];
+ d[3] = _b[9];
+ d[4] = _a[10];
+ d[5] = _b[10];
+ d[6] = _a[11];
+ d[7] = _b[11];
+ d[8] = _a[12];
+ d[9] = _b[12];
+ d[10] = _a[13];
+ d[11] = _b[13];
+ d[12] = _a[14];
+ d[13] = _b[14];
+ d[14] = _a[15];
+ d[15] = _b[15];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpackhi_epi8(a, b);
+
+ return VALIDATE_INT8_M128(ret, d);
+}
+
+result_t test_mm_unpackhi_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d ret = _mm_unpackhi_pd(a, b);
+
+ return validateDouble(ret, _a[1], _b[1]);
+}
+
+result_t test_mm_unpacklo_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int16_t d[8];
+ d[0] = _a[0];
+ d[1] = _b[0];
+ d[2] = _a[1];
+ d[3] = _b[1];
+ d[4] = _a[2];
+ d[5] = _b[2];
+ d[6] = _a[3];
+ d[7] = _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpacklo_epi16(a, b);
+
+ return VALIDATE_INT16_M128(ret, d);
+}
+
+result_t test_mm_unpacklo_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int32_t d[4];
+ d[0] = _a[0];
+ d[1] = _b[0];
+ d[2] = _a[1];
+ d[3] = _b[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpacklo_epi32(a, b);
+
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_unpacklo_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+
+ int64_t i0 = _a[0];
+ int64_t i1 = _b[0];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpacklo_epi64(a, b);
+
+ return validateInt64(ret, i0, i1);
+}
+
+result_t test_mm_unpacklo_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+
+ int8_t d[16];
+ d[0] = _a[0];
+ d[1] = _b[0];
+ d[2] = _a[1];
+ d[3] = _b[1];
+ d[4] = _a[2];
+ d[5] = _b[2];
+ d[6] = _a[3];
+ d[7] = _b[3];
+ d[8] = _a[4];
+ d[9] = _b[4];
+ d[10] = _a[5];
+ d[11] = _b[5];
+ d[12] = _a[6];
+ d[13] = _b[6];
+ d[14] = _a[7];
+ d[15] = _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_unpacklo_epi8(a, b);
+
+ return VALIDATE_INT8_M128(ret, d);
+}
+
+result_t test_mm_unpacklo_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d ret = _mm_unpacklo_pd(a, b);
+
+ return validateDouble(ret, _a[0], _b[0]);
+}
+
+result_t test_mm_xor_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestFloatPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestFloatPointer2;
+
+ int64_t d0 = _a[0] ^ _b[0];
+ int64_t d1 = _a[1] ^ _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_xor_pd(a, b);
+
+ return validateDouble(c, *((double *) &d0), *((double *) &d1));
+}
+
+result_t test_mm_xor_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+
+ int64_t d0 = _a[0] ^ _b[0];
+ int64_t d1 = _a[1] ^ _b[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_xor_si128(a, b);
+
+ return validateInt64(c, d0, d1);
+}
+
+/* SSE3 */
+result_t test_mm_addsub_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double d0 = _a[0] - _b[0];
+ double d1 = _a[1] + _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_addsub_pd(a, b);
+
+ return validateDouble(c, d0, d1);
+}
+
+result_t test_mm_addsub_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME: The rounding mode would affect the testing result on ARM platform.
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ float f0 = _a[0] - _b[0];
+ float f1 = _a[1] + _b[1];
+ float f2 = _a[2] - _b[2];
+ float f3 = _a[3] + _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_addsub_ps(a, b);
+
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_hadd_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double f0 = _a[0] + _a[1];
+ double f1 = _b[0] + _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_hadd_pd(a, b);
+
+ return validateDouble(c, f0, f1);
+}
+
+result_t test_mm_hadd_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME: The rounding mode would affect the testing result on ARM platform.
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ float f0 = _a[0] + _a[1];
+ float f1 = _a[2] + _a[3];
+ float f2 = _b[0] + _b[1];
+ float f3 = _b[2] + _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_hadd_ps(a, b);
+
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_hsub_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double f0 = _a[0] - _a[1];
+ double f1 = _b[0] - _b[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d c = _mm_hsub_pd(a, b);
+
+ return validateDouble(c, f0, f1);
+}
+
+result_t test_mm_hsub_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ // FIXME: The rounding mode would affect the testing result on ARM platform.
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ float f0 = _a[0] - _a[1];
+ float f1 = _a[2] - _a[3];
+ float f2 = _b[0] - _b[1];
+ float f3 = _b[2] - _b[3];
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_hsub_ps(a, b);
+
+ return validateFloat(c, f0, f1, f2, f3);
+}
+
+result_t test_mm_lddqu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_loadu_si128(impl, iter);
+}
+
+result_t test_mm_loaddup_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *addr = (const double *) impl.mTestFloatPointer1;
+
+ __m128d ret = _mm_loaddup_pd(addr);
+
+ return validateDouble(ret, addr[0], addr[0]);
+}
+
+result_t test_mm_movedup_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *p = (const double *) impl.mTestFloatPointer1;
+ __m128d a = load_m128d(p);
+ __m128d b = _mm_movedup_pd(a);
+
+ return validateDouble(b, p[0], p[0]);
+}
+
+result_t test_mm_movehdup_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p = impl.mTestFloatPointer1;
+ __m128 a = load_m128(p);
+ return validateFloat(_mm_movehdup_ps(a), p[1], p[1], p[3], p[3]);
+}
+
+result_t test_mm_moveldup_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *p = impl.mTestFloatPointer1;
+ __m128 a = load_m128(p);
+ return validateFloat(_mm_moveldup_ps(a), p[0], p[0], p[2], p[2]);
+}
+
+/* SSSE3 */
+result_t test_mm_abs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_abs_epi16(a);
+
+ uint32_t d[8];
+ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+ d[4] = (_a[4] < 0) ? -_a[4] : _a[4];
+ d[5] = (_a[5] < 0) ? -_a[5] : _a[5];
+ d[6] = (_a[6] < 0) ? -_a[6] : _a[6];
+ d[7] = (_a[7] < 0) ? -_a[7] : _a[7];
+
+ return VALIDATE_UINT16_M128(c, d);
+}
+
+result_t test_mm_abs_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_abs_epi32(a);
+
+ uint32_t d[4];
+ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+
+ return VALIDATE_UINT32_M128(c, d);
+}
+
+result_t test_mm_abs_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ __m128i c = _mm_abs_epi8(a);
+
+ uint32_t d[16];
+ for (int i = 0; i < 16; i++) {
+ d[i] = (_a[i] < 0) ? -_a[i] : _a[i];
+ }
+
+ return VALIDATE_UINT8_M128(c, d);
+}
+
+result_t test_mm_abs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ __m64 a = load_m64(_a);
+ __m64 c = _mm_abs_pi16(a);
+
+ uint32_t d[4];
+ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+
+ return VALIDATE_UINT16_M64(c, d);
+}
+
+result_t test_mm_abs_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m64 a = load_m64(_a);
+ __m64 c = _mm_abs_pi32(a);
+
+ uint32_t d[2];
+ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+
+ return VALIDATE_UINT32_M64(c, d);
+}
+
+result_t test_mm_abs_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ __m64 a = load_m64(_a);
+ __m64 c = _mm_abs_pi8(a);
+
+ uint32_t d[8];
+ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+ d[4] = (_a[4] < 0) ? -_a[4] : _a[4];
+ d[5] = (_a[5] < 0) ? -_a[5] : _a[5];
+ d[6] = (_a[6] < 0) ? -_a[6] : _a[6];
+ d[7] = (_a[7] < 0) ? -_a[7] : _a[7];
+
+ return VALIDATE_UINT8_M64(c, d);
+}
+
+result_t test_mm_alignr_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+#if defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ unsigned int shift = (iter % 5) << 3;
+ uint8_t d[32];
+
+ if (shift >= 32) {
+ memset((void *) d, 0, sizeof(d));
+ } else {
+ memcpy((void *) d, (const void *) _b, 16);
+ memcpy((void *) (d + 16), (const void *) _a, 16);
+ // shifting
+ for (size_t x = 0; x < sizeof(d); x++) {
+ if (x + shift >= sizeof(d))
+ d[x] = 0;
+ else
+ d[x] = d[x + shift];
+ }
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret;
+ switch (iter % 5) {
+ case 0:
+ ret = _mm_alignr_epi8(a, b, 0);
+ break;
+ case 1:
+ ret = _mm_alignr_epi8(a, b, 8);
+ break;
+ case 2:
+ ret = _mm_alignr_epi8(a, b, 16);
+ break;
+ case 3:
+ ret = _mm_alignr_epi8(a, b, 24);
+ break;
+ case 4:
+ ret = _mm_alignr_epi8(a, b, 32);
+ break;
+ }
+
+ return VALIDATE_UINT8_M128(ret, d);
+#endif
+}
+
+result_t test_mm_alignr_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+#if defined(__clang__)
+ return TEST_UNIMPL;
+#else
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+ unsigned int shift = (iter % 3) << 3;
+ uint8_t d[16];
+
+ if (shift >= 16) {
+ memset((void *) d, 0, sizeof(d));
+ } else {
+ memcpy((void *) d, (const void *) _b, 8);
+ memcpy((void *) (d + 8), (const void *) _a, 8);
+ // shifting
+ for (size_t x = 0; x < sizeof(d); x++) {
+ if (x + shift >= sizeof(d))
+ d[x] = 0;
+ else
+ d[x] = d[x + shift];
+ }
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret;
+ switch (iter % 3) {
+ case 0:
+ ret = _mm_alignr_pi8(a, b, 0);
+ break;
+ case 1:
+ ret = _mm_alignr_pi8(a, b, 8);
+ break;
+ case 2:
+ ret = _mm_alignr_pi8(a, b, 16);
+ break;
+ }
+
+ return VALIDATE_UINT8_M64(ret, d);
+#endif
+}
+
+result_t test_mm_hadd_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[8];
+ d[0] = _a[0] + _a[1];
+ d[1] = _a[2] + _a[3];
+ d[2] = _a[4] + _a[5];
+ d[3] = _a[6] + _a[7];
+ d[4] = _b[0] + _b[1];
+ d[5] = _b[2] + _b[3];
+ d[6] = _b[4] + _b[5];
+ d[7] = _b[6] + _b[7];
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_hadd_epi16(a, b);
+ return VALIDATE_INT16_M128(ret, d);
+}
+
+result_t test_mm_hadd_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+ int32_t d[4];
+ d[0] = _a[0] + _a[1];
+ d[1] = _a[2] + _a[3];
+ d[2] = _b[0] + _b[1];
+ d[3] = _b[2] + _b[3];
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_hadd_epi32(a, b);
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_hadd_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t d[4];
+ d[0] = _a[0] + _a[1];
+ d[1] = _a[2] + _a[3];
+ d[2] = _b[0] + _b[1];
+ d[3] = _b[2] + _b[3];
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret = _mm_hadd_pi16(a, b);
+ return VALIDATE_INT16_M64(ret, d);
+}
+
+result_t test_mm_hadd_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+ int32_t d[2];
+ d[0] = _a[0] + _a[1];
+ d[1] = _b[0] + _b[1];
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret = _mm_hadd_pi32(a, b);
+ return VALIDATE_INT32_M64(ret, d);
+}
+
+result_t test_mm_hadds_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+
+ int16_t d16[8];
+ int32_t d32[8];
+ d32[0] = (int32_t) _a[0] + (int32_t) _a[1];
+ d32[1] = (int32_t) _a[2] + (int32_t) _a[3];
+ d32[2] = (int32_t) _a[4] + (int32_t) _a[5];
+ d32[3] = (int32_t) _a[6] + (int32_t) _a[7];
+ d32[4] = (int32_t) _b[0] + (int32_t) _b[1];
+ d32[5] = (int32_t) _b[2] + (int32_t) _b[3];
+ d32[6] = (int32_t) _b[4] + (int32_t) _b[5];
+ d32[7] = (int32_t) _b[6] + (int32_t) _b[7];
+ for (int i = 0; i < 8; i++) {
+ if (d32[i] > (int32_t) INT16_MAX)
+ d16[i] = INT16_MAX;
+ else if (d32[i] < (int32_t) INT16_MIN)
+ d16[i] = INT16_MIN;
+ else
+ d16[i] = (int16_t) d32[i];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_hadds_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d16);
+}
+
+result_t test_mm_hadds_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+
+ int16_t d16[8];
+ int32_t d32[8];
+ d32[0] = (int32_t) _a[0] + (int32_t) _a[1];
+ d32[1] = (int32_t) _a[2] + (int32_t) _a[3];
+ d32[2] = (int32_t) _b[0] + (int32_t) _b[1];
+ d32[3] = (int32_t) _b[2] + (int32_t) _b[3];
+ for (int i = 0; i < 8; i++) {
+ if (d32[i] > (int32_t) INT16_MAX)
+ d16[i] = INT16_MAX;
+ else if (d32[i] < (int32_t) INT16_MIN)
+ d16[i] = INT16_MIN;
+ else
+ d16[i] = (int16_t) d32[i];
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_hadds_pi16(a, b);
+
+ return VALIDATE_INT16_M64(c, d16);
+}
+
+result_t test_mm_hsub_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+
+ int16_t d[8];
+ d[0] = _a[0] - _a[1];
+ d[1] = _a[2] - _a[3];
+ d[2] = _a[4] - _a[5];
+ d[3] = _a[6] - _a[7];
+ d[4] = _b[0] - _b[1];
+ d[5] = _b[2] - _b[3];
+ d[6] = _b[4] - _b[5];
+ d[7] = _b[6] - _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_hsub_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_hsub_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer1;
+
+ int32_t d[4];
+ d[0] = _a[0] - _a[1];
+ d[1] = _a[2] - _a[3];
+ d[2] = _b[0] - _b[1];
+ d[3] = _b[2] - _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_hsub_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_hsub_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int16_t d[4];
+ d[0] = _a[0] - _a[1];
+ d[1] = _a[2] - _a[3];
+ d[2] = _b[0] - _b[1];
+ d[3] = _b[2] - _b[3];
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_hsub_pi16(a, b);
+
+ return VALIDATE_INT16_M64(c, d);
+}
+
+result_t test_mm_hsub_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+
+ int32_t d[2];
+ d[0] = _a[0] - _a[1];
+ d[1] = _b[0] - _b[1];
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_hsub_pi32(a, b);
+
+ return VALIDATE_INT32_M64(c, d);
+}
+
+result_t test_mm_hsubs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+
+ int16_t d16[8];
+ int32_t d32[8];
+ d32[0] = (int32_t) _a[0] - (int32_t) _a[1];
+ d32[1] = (int32_t) _a[2] - (int32_t) _a[3];
+ d32[2] = (int32_t) _a[4] - (int32_t) _a[5];
+ d32[3] = (int32_t) _a[6] - (int32_t) _a[7];
+ d32[4] = (int32_t) _b[0] - (int32_t) _b[1];
+ d32[5] = (int32_t) _b[2] - (int32_t) _b[3];
+ d32[6] = (int32_t) _b[4] - (int32_t) _b[5];
+ d32[7] = (int32_t) _b[6] - (int32_t) _b[7];
+ for (int i = 0; i < 8; i++) {
+ if (d32[i] > (int32_t) INT16_MAX)
+ d16[i] = INT16_MAX;
+ else if (d32[i] < (int32_t) INT16_MIN)
+ d16[i] = INT16_MIN;
+ else
+ d16[i] = (int16_t) d32[i];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_hsubs_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d16);
+}
+
+result_t test_mm_hsubs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+
+ int32_t _d[4];
+ _d[0] = (int32_t) _a[0] - (int32_t) _a[1];
+ _d[1] = (int32_t) _a[2] - (int32_t) _a[3];
+ _d[2] = (int32_t) _b[0] - (int32_t) _b[1];
+ _d[3] = (int32_t) _b[2] - (int32_t) _b[3];
+
+ for (int i = 0; i < 4; i++) {
+ if (_d[i] > (int32_t) INT16_MAX) {
+ _d[i] = INT16_MAX;
+ } else if (_d[i] < (int32_t) INT16_MIN) {
+ _d[i] = INT16_MIN;
+ }
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_hsubs_pi16(a, b);
+
+ return VALIDATE_INT16_M64(c, _d);
+}
+
+result_t test_mm_maddubs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int32_t d0 = (int32_t) (_a[0] * _b[0]);
+ int32_t d1 = (int32_t) (_a[1] * _b[1]);
+ int32_t d2 = (int32_t) (_a[2] * _b[2]);
+ int32_t d3 = (int32_t) (_a[3] * _b[3]);
+ int32_t d4 = (int32_t) (_a[4] * _b[4]);
+ int32_t d5 = (int32_t) (_a[5] * _b[5]);
+ int32_t d6 = (int32_t) (_a[6] * _b[6]);
+ int32_t d7 = (int32_t) (_a[7] * _b[7]);
+ int32_t d8 = (int32_t) (_a[8] * _b[8]);
+ int32_t d9 = (int32_t) (_a[9] * _b[9]);
+ int32_t d10 = (int32_t) (_a[10] * _b[10]);
+ int32_t d11 = (int32_t) (_a[11] * _b[11]);
+ int32_t d12 = (int32_t) (_a[12] * _b[12]);
+ int32_t d13 = (int32_t) (_a[13] * _b[13]);
+ int32_t d14 = (int32_t) (_a[14] * _b[14]);
+ int32_t d15 = (int32_t) (_a[15] * _b[15]);
+
+ int16_t e[8];
+ e[0] = saturate_16(d0 + d1);
+ e[1] = saturate_16(d2 + d3);
+ e[2] = saturate_16(d4 + d5);
+ e[3] = saturate_16(d6 + d7);
+ e[4] = saturate_16(d8 + d9);
+ e[5] = saturate_16(d10 + d11);
+ e[6] = saturate_16(d12 + d13);
+ e[7] = saturate_16(d14 + d15);
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_maddubs_epi16(a, b);
+ return VALIDATE_INT16_M128(c, e);
+}
+
+result_t test_mm_maddubs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int16_t d0 = (int16_t) (_a[0] * _b[0]);
+ int16_t d1 = (int16_t) (_a[1] * _b[1]);
+ int16_t d2 = (int16_t) (_a[2] * _b[2]);
+ int16_t d3 = (int16_t) (_a[3] * _b[3]);
+ int16_t d4 = (int16_t) (_a[4] * _b[4]);
+ int16_t d5 = (int16_t) (_a[5] * _b[5]);
+ int16_t d6 = (int16_t) (_a[6] * _b[6]);
+ int16_t d7 = (int16_t) (_a[7] * _b[7]);
+
+ int16_t e[4];
+ e[0] = saturate_16(d0 + d1);
+ e[1] = saturate_16(d2 + d3);
+ e[2] = saturate_16(d4 + d5);
+ e[3] = saturate_16(d6 + d7);
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_maddubs_pi16(a, b);
+
+ return VALIDATE_INT16_M64(c, e);
+}
+
+result_t test_mm_mulhrs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ int32_t _c[8];
+ for (int i = 0; i < 8; i++) {
+ _c[i] =
+ (((((int32_t) _a[i] * (int32_t) _b[i]) >> 14) + 1) & 0x1FFFE) >> 1;
+ }
+ __m128i c = _mm_mulhrs_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, _c);
+}
+
+result_t test_mm_mulhrs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ int32_t _c[4];
+ for (int i = 0; i < 4; i++) {
+ _c[i] =
+ (((((int32_t) _a[i] * (int32_t) _b[i]) >> 14) + 1) & 0x1FFFE) >> 1;
+ }
+ __m64 c = _mm_mulhrs_pi16(a, b);
+
+ return VALIDATE_INT16_M64(c, _c);
+}
+
+result_t test_mm_shuffle_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t dst[16];
+
+ for (int i = 0; i < 16; i++) {
+ if (_b[i] & 0x80) {
+ dst[i] = 0;
+ } else {
+ dst[i] = _a[_b[i] & 0x0F];
+ }
+ }
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i ret = _mm_shuffle_epi8(a, b);
+
+ return VALIDATE_INT8_M128(ret, dst);
+}
+
+result_t test_mm_shuffle_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t dst[8];
+
+ for (int i = 0; i < 8; i++) {
+ if (_b[i] & 0x80) {
+ dst[i] = 0;
+ } else {
+ dst[i] = _a[_b[i] & 0x07];
+ }
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 ret = _mm_shuffle_pi8(a, b);
+
+ return VALIDATE_INT8_M64(ret, dst);
+}
+
+result_t test_mm_sign_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int16_t d[8];
+ for (int i = 0; i < 8; i++) {
+ if (_b[i] < 0) {
+ d[i] = -_a[i];
+ } else if (_b[i] == 0) {
+ d[i] = 0;
+ } else {
+ d[i] = _a[i];
+ }
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_sign_epi16(a, b);
+
+ return VALIDATE_INT16_M128(c, d);
+}
+
+result_t test_mm_sign_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int32_t d[4];
+ for (int i = 0; i < 4; i++) {
+ if (_b[i] < 0) {
+ d[i] = -_a[i];
+ } else if (_b[i] == 0) {
+ d[i] = 0;
+ } else {
+ d[i] = _a[i];
+ }
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_sign_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_sign_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+
+ int8_t d[16];
+ for (int i = 0; i < 16; i++) {
+ if (_b[i] < 0) {
+ d[i] = -_a[i];
+ } else if (_b[i] == 0) {
+ d[i] = 0;
+ } else {
+ d[i] = _a[i];
+ }
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_sign_epi8(a, b);
+
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_sign_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+
+ int16_t d[4];
+ for (int i = 0; i < 4; i++) {
+ if (_b[i] < 0) {
+ d[i] = -_a[i];
+ } else if (_b[i] == 0) {
+ d[i] = 0;
+ } else {
+ d[i] = _a[i];
+ }
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_sign_pi16(a, b);
+
+ return VALIDATE_INT16_M64(c, d);
+}
+
+result_t test_mm_sign_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int32_t d[2];
+ for (int i = 0; i < 2; i++) {
+ if (_b[i] < 0) {
+ d[i] = -_a[i];
+ } else if (_b[i] == 0) {
+ d[i] = 0;
+ } else {
+ d[i] = _a[i];
+ }
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_sign_pi32(a, b);
+
+ return VALIDATE_INT32_M64(c, d);
+}
+
+result_t test_mm_sign_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+
+ int8_t d[8];
+ for (int i = 0; i < 8; i++) {
+ if (_b[i] < 0) {
+ d[i] = -_a[i];
+ } else if (_b[i] == 0) {
+ d[i] = 0;
+ } else {
+ d[i] = _a[i];
+ }
+ }
+
+ __m64 a = load_m64(_a);
+ __m64 b = load_m64(_b);
+ __m64 c = _mm_sign_pi8(a, b);
+
+ return VALIDATE_INT8_M64(c, d);
+}
+
+/* SSE4.1 */
+result_t test_mm_blend_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+ int16_t _c[8];
+ __m128i a, b, c;
+
+#define TEST_IMPL(IDX) \
+ for (int j = 0; j < 8; j++) { \
+ if ((IDX >> j) & 0x1) { \
+ _c[j] = _b[j]; \
+ } else { \
+ _c[j] = _a[j]; \
+ } \
+ } \
+ a = load_m128i(_a); \
+ b = load_m128i(_b); \
+ c = _mm_blend_epi16(a, b, IDX); \
+ CHECK_RESULT(VALIDATE_INT16_M128(c, _c));
+
+ IMM_256_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_blend_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ __m128d a, b, c;
+
+#define TEST_IMPL(IDX) \
+ double _c##IDX[2]; \
+ for (int j = 0; j < 2; j++) { \
+ if ((IDX >> j) & 0x1) { \
+ _c##IDX[j] = _b[j]; \
+ } else { \
+ _c##IDX[j] = _a[j]; \
+ } \
+ } \
+ \
+ a = load_m128d(_a); \
+ b = load_m128d(_b); \
+ c = _mm_blend_pd(a, b, IDX); \
+ CHECK_RESULT(validateDouble(c, _c##IDX[0], _c##IDX[1]))
+
+ IMM_4_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_blend_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c;
+
+ // gcc and clang can't compile call to _mm_blend_ps with 3rd argument as
+ // integer type due 4 bit size limitation.
+#define TEST_IMPL(IDX) \
+ float _c##IDX[4]; \
+ for (int i = 0; i < 4; i++) { \
+ if (IDX & (1 << i)) { \
+ _c##IDX[i] = _b[i]; \
+ } else { \
+ _c##IDX[i] = _a[i]; \
+ } \
+ } \
+ \
+ c = _mm_blend_ps(a, b, IDX); \
+ CHECK_RESULT( \
+ validateFloat(c, _c##IDX[0], _c##IDX[1], _c##IDX[2], _c##IDX[3]))
+
+ IMM_4_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_blendv_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ const int8_t _mask[16] = {(const int8_t) impl.mTestInts[iter],
+ (const int8_t) impl.mTestInts[iter + 1],
+ (const int8_t) impl.mTestInts[iter + 2],
+ (const int8_t) impl.mTestInts[iter + 3],
+ (const int8_t) impl.mTestInts[iter + 4],
+ (const int8_t) impl.mTestInts[iter + 5],
+ (const int8_t) impl.mTestInts[iter + 6],
+ (const int8_t) impl.mTestInts[iter + 7]};
+
+ int8_t _c[16];
+ for (int i = 0; i < 16; i++) {
+ if (_mask[i] >> 7) {
+ _c[i] = _b[i];
+ } else {
+ _c[i] = _a[i];
+ }
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i mask = load_m128i(_mask);
+ __m128i c = _mm_blendv_epi8(a, b, mask);
+
+ return VALIDATE_INT8_M128(c, _c);
+}
+
+result_t test_mm_blendv_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+ const double _mask[] = {(double) impl.mTestFloats[iter],
+ (double) impl.mTestFloats[iter + 1]};
+
+ double _c[2];
+ for (int i = 0; i < 2; i++) {
+ // signed shift right would return a result which is either all 1's from
+ // negative numbers or all 0's from positive numbers
+ if ((*(const int64_t *) (_mask + i)) >> 63) {
+ _c[i] = _b[i];
+ } else {
+ _c[i] = _a[i];
+ }
+ }
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d mask = load_m128d(_mask);
+
+ __m128d c = _mm_blendv_pd(a, b, mask);
+
+ return validateDouble(c, _c[0], _c[1]);
+}
+
+result_t test_mm_blendv_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ const float _mask[] = {impl.mTestFloats[iter], impl.mTestFloats[iter + 1],
+ impl.mTestFloats[iter + 2],
+ impl.mTestFloats[iter + 3]};
+
+ float _c[4];
+ for (int i = 0; i < 4; i++) {
+ // signed shift right would return a result which is either all 1's from
+ // negative numbers or all 0's from positive numbers
+ if ((*(const int32_t *) (_mask + i)) >> 31) {
+ _c[i] = _b[i];
+ } else {
+ _c[i] = _a[i];
+ }
+ }
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 mask = load_m128(_mask);
+
+ __m128 c = _mm_blendv_ps(a, b, mask);
+
+ return validateFloat(c, _c[0], _c[1], _c[2], _c[3]);
+}
+
+result_t test_mm_ceil_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ double dx = ceil(_a[0]);
+ double dy = ceil(_a[1]);
+
+ __m128d a = load_m128d(_a);
+ __m128d ret = _mm_ceil_pd(a);
+
+ return validateDouble(ret, dx, dy);
+}
+
+result_t test_mm_ceil_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ float dx = ceilf(_a[0]);
+ float dy = ceilf(_a[1]);
+ float dz = ceilf(_a[2]);
+ float dw = ceilf(_a[3]);
+
+ __m128 a = _mm_load_ps(_a);
+ __m128 c = _mm_ceil_ps(a);
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_ceil_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double dx = ceil(_b[0]);
+ double dy = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d ret = _mm_ceil_sd(a, b);
+
+ return validateDouble(ret, dx, dy);
+}
+
+result_t test_mm_ceil_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer1;
+
+ float f0 = ceilf(_b[0]);
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_ceil_ss(a, b);
+
+ return validateFloat(c, f0, _a[1], _a[2], _a[3]);
+}
+
+result_t test_mm_cmpeq_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+ int64_t d0 = (_a[0] == _b[0]) ? 0xffffffffffffffff : 0x0;
+ int64_t d1 = (_a[1] == _b[1]) ? 0xffffffffffffffff : 0x0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_cmpeq_epi64(a, b);
+ return validateInt64(c, d0, d1);
+}
+
+result_t test_mm_cvtepi16_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+
+ int32_t d[4];
+ d[0] = (int32_t) _a[0];
+ d[1] = (int32_t) _a[1];
+ d[2] = (int32_t) _a[2];
+ d[3] = (int32_t) _a[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepi16_epi32(a);
+
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_cvtepi16_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+
+ int64_t i0 = (int64_t) _a[0];
+ int64_t i1 = (int64_t) _a[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepi16_epi64(a);
+
+ return validateInt64(ret, i0, i1);
+}
+
+result_t test_mm_cvtepi32_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+
+ int64_t i0 = (int64_t) _a[0];
+ int64_t i1 = (int64_t) _a[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepi32_epi64(a);
+
+ return validateInt64(ret, i0, i1);
+}
+
+result_t test_mm_cvtepi8_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+
+ int16_t d[8];
+ d[0] = (int16_t) _a[0];
+ d[1] = (int16_t) _a[1];
+ d[2] = (int16_t) _a[2];
+ d[3] = (int16_t) _a[3];
+ d[4] = (int16_t) _a[4];
+ d[5] = (int16_t) _a[5];
+ d[6] = (int16_t) _a[6];
+ d[7] = (int16_t) _a[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepi8_epi16(a);
+
+ return VALIDATE_INT16_M128(ret, d);
+}
+
+result_t test_mm_cvtepi8_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+
+ int32_t d[4];
+ d[0] = (int32_t) _a[0];
+ d[1] = (int32_t) _a[1];
+ d[2] = (int32_t) _a[2];
+ d[3] = (int32_t) _a[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepi8_epi32(a);
+
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_cvtepi8_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+
+ int64_t i0 = (int64_t) _a[0];
+ int64_t i1 = (int64_t) _a[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepi8_epi64(a);
+
+ return validateInt64(ret, i0, i1);
+}
+
+result_t test_mm_cvtepu16_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+
+ int32_t d[4];
+ d[0] = (int32_t) _a[0];
+ d[1] = (int32_t) _a[1];
+ d[2] = (int32_t) _a[2];
+ d[3] = (int32_t) _a[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepu16_epi32(a);
+
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_cvtepu16_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+
+ int64_t i0 = (int64_t) _a[0];
+ int64_t i1 = (int64_t) _a[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepu16_epi64(a);
+
+ return validateInt64(ret, i0, i1);
+}
+
+result_t test_mm_cvtepu32_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+
+ int64_t i0 = (int64_t) _a[0];
+ int64_t i1 = (int64_t) _a[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepu32_epi64(a);
+
+ return validateInt64(ret, i0, i1);
+}
+
+result_t test_mm_cvtepu8_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+
+ int16_t d[8];
+ d[0] = (int16_t) _a[0];
+ d[1] = (int16_t) _a[1];
+ d[2] = (int16_t) _a[2];
+ d[3] = (int16_t) _a[3];
+ d[4] = (int16_t) _a[4];
+ d[5] = (int16_t) _a[5];
+ d[6] = (int16_t) _a[6];
+ d[7] = (int16_t) _a[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepu8_epi16(a);
+
+ return VALIDATE_INT16_M128(ret, d);
+}
+
+result_t test_mm_cvtepu8_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+
+ int32_t d[4];
+ d[0] = (int32_t) _a[0];
+ d[1] = (int32_t) _a[1];
+ d[2] = (int32_t) _a[2];
+ d[3] = (int32_t) _a[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepu8_epi32(a);
+
+ return VALIDATE_INT32_M128(ret, d);
+}
+
+result_t test_mm_cvtepu8_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+
+ int64_t i0 = (int64_t) _a[0];
+ int64_t i1 = (int64_t) _a[1];
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_cvtepu8_epi64(a);
+
+ return validateInt64(ret, i0, i1);
+}
+
+#define MM_DP_PD_TEST_CASE_WITH(imm8) \
+ do { \
+ const double *_a = (const double *) impl.mTestFloatPointer1; \
+ const double *_b = (const double *) impl.mTestFloatPointer2; \
+ const int imm = imm8; \
+ double d[2]; \
+ double sum = 0; \
+ for (size_t i = 0; i < 2; i++) \
+ sum += ((imm) & (1 << (i + 4))) ? _a[i] * _b[i] : 0; \
+ for (size_t i = 0; i < 2; i++) \
+ d[i] = (imm & (1 << i)) ? sum : 0; \
+ __m128d a = load_m128d(_a); \
+ __m128d b = load_m128d(_b); \
+ __m128d ret = _mm_dp_pd(a, b, imm); \
+ if (validateDouble(ret, d[0], d[1]) != TEST_SUCCESS) \
+ return TEST_FAIL; \
+ } while (0)
+
+#define GENERATE_MM_DP_PD_TEST_CASES \
+ MM_DP_PD_TEST_CASE_WITH(0xF0); \
+ MM_DP_PD_TEST_CASE_WITH(0xF1); \
+ MM_DP_PD_TEST_CASE_WITH(0xF2); \
+ MM_DP_PD_TEST_CASE_WITH(0xFF); \
+ MM_DP_PD_TEST_CASE_WITH(0x10); \
+ MM_DP_PD_TEST_CASE_WITH(0x11); \
+ MM_DP_PD_TEST_CASE_WITH(0x12); \
+ MM_DP_PD_TEST_CASE_WITH(0x13); \
+ MM_DP_PD_TEST_CASE_WITH(0x00); \
+ MM_DP_PD_TEST_CASE_WITH(0x01); \
+ MM_DP_PD_TEST_CASE_WITH(0x02); \
+ MM_DP_PD_TEST_CASE_WITH(0x03); \
+ MM_DP_PD_TEST_CASE_WITH(0x20); \
+ MM_DP_PD_TEST_CASE_WITH(0x21); \
+ MM_DP_PD_TEST_CASE_WITH(0x22); \
+ MM_DP_PD_TEST_CASE_WITH(0x23);
+
+result_t test_mm_dp_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_DP_PD_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define MM_DP_PS_TEST_CASE_WITH(IMM) \
+ do { \
+ const float *_a = impl.mTestFloatPointer1; \
+ const float *_b = impl.mTestFloatPointer2; \
+ const int imm = IMM; \
+ __m128 a = load_m128(_a); \
+ __m128 b = load_m128(_b); \
+ __m128 out = _mm_dp_ps(a, b, imm); \
+ float r[4]; /* the reference */ \
+ float sum = 0; \
+ for (size_t i = 0; i < 4; i++) \
+ sum += ((imm) & (1 << (i + 4))) ? _a[i] * _b[i] : 0; \
+ for (size_t i = 0; i < 4; i++) \
+ r[i] = (imm & (1 << i)) ? sum : 0; \
+ /* the epsilon has to be large enough, otherwise test suite fails. */ \
+ if (validateFloatEpsilon(out, r[0], r[1], r[2], r[3], 2050.0f) != \
+ TEST_SUCCESS) \
+ return TEST_FAIL; \
+ } while (0)
+
+#define GENERATE_MM_DP_PS_TEST_CASES \
+ MM_DP_PS_TEST_CASE_WITH(0xFF); \
+ MM_DP_PS_TEST_CASE_WITH(0x7F); \
+ MM_DP_PS_TEST_CASE_WITH(0x9F); \
+ MM_DP_PS_TEST_CASE_WITH(0x2F); \
+ MM_DP_PS_TEST_CASE_WITH(0x0F); \
+ MM_DP_PS_TEST_CASE_WITH(0x23); \
+ MM_DP_PS_TEST_CASE_WITH(0xB5);
+
+result_t test_mm_dp_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_DP_PS_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_extract_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int32_t *_a = (int32_t *) impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ int c;
+
+#define TEST_IMPL(IDX) \
+ c = _mm_extract_epi32(a, IDX); \
+ ASSERT_RETURN(c == *(_a + IDX));
+
+ IMM_4_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_extract_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int64_t *_a = (int64_t *) impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ __int64 c;
+
+#define TEST_IMPL(IDX) \
+ c = _mm_extract_epi64(a, IDX); \
+ ASSERT_RETURN(c == *(_a + IDX));
+
+ IMM_2_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_extract_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint8_t *_a = (uint8_t *) impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+ int c;
+
+#define TEST_IMPL(IDX) \
+ c = _mm_extract_epi8(a, IDX); \
+ ASSERT_RETURN(c == *(_a + IDX));
+
+ IMM_8_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_extract_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = (const float *) impl.mTestFloatPointer1;
+
+ __m128 a = _mm_load_ps(_a);
+ int32_t c;
+
+#define TEST_IMPL(IDX) \
+ c = _mm_extract_ps(a, IDX); \
+ ASSERT_RETURN(c == *(const int32_t *) (_a + IDX));
+
+ IMM_4_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_floor_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+
+ double dx = floor(_a[0]);
+ double dy = floor(_a[1]);
+
+ __m128d a = load_m128d(_a);
+ __m128d ret = _mm_floor_pd(a);
+
+ return validateDouble(ret, dx, dy);
+}
+
+result_t test_mm_floor_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ float dx = floorf(_a[0]);
+ float dy = floorf(_a[1]);
+ float dz = floorf(_a[2]);
+ float dw = floorf(_a[3]);
+
+ __m128 a = load_m128(_a);
+ __m128 c = _mm_floor_ps(a);
+ return validateFloat(c, dx, dy, dz, dw);
+}
+
+result_t test_mm_floor_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (const double *) impl.mTestFloatPointer1;
+ const double *_b = (const double *) impl.mTestFloatPointer2;
+
+ double dx = floor(_b[0]);
+ double dy = _a[1];
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ __m128d ret = _mm_floor_sd(a, b);
+
+ return validateDouble(ret, dx, dy);
+}
+
+result_t test_mm_floor_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer1;
+
+ float f0 = floorf(_b[0]);
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ __m128 c = _mm_floor_ss(a, b);
+
+ return validateFloat(c, f0, _a[1], _a[2], _a[3]);
+}
+
+result_t test_mm_insert_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t insert = (int32_t) *impl.mTestIntPointer2;
+ __m128i a, b;
+
+#define TEST_IMPL(IDX) \
+ int32_t d##IDX[4]; \
+ for (int i = 0; i < 4; i++) { \
+ d##IDX[i] = _a[i]; \
+ } \
+ d##IDX[IDX] = insert; \
+ \
+ a = load_m128i(_a); \
+ b = _mm_insert_epi32(a, (int) insert, IDX); \
+ CHECK_RESULT(VALIDATE_INT32_M128(b, d##IDX));
+
+ IMM_4_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_insert_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ int64_t insert = (int64_t) *impl.mTestIntPointer2;
+
+ __m128i a, b;
+ int64_t d[2];
+#define TEST_IMPL(IDX) \
+ d[0] = _a[0]; \
+ d[1] = _a[1]; \
+ d[IDX] = insert; \
+ a = load_m128i(_a); \
+ b = _mm_insert_epi64(a, insert, IDX); \
+ CHECK_RESULT(validateInt64(b, d[0], d[1]));
+
+ IMM_2_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_insert_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t insert = (int8_t) *impl.mTestIntPointer2;
+ __m128i a, b;
+ int8_t d[16];
+
+#define TEST_IMPL(IDX) \
+ for (int i = 0; i < 16; i++) { \
+ d[i] = _a[i]; \
+ } \
+ d[IDX] = insert; \
+ a = load_m128i(_a); \
+ b = _mm_insert_epi8(a, insert, IDX); \
+ CHECK_RESULT(VALIDATE_INT8_M128(b, d));
+
+ IMM_16_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_insert_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+
+ __m128 a, b, c;
+#define TEST_IMPL(IDX) \
+ float d##IDX[4] = {_a[0], _a[1], _a[2], _a[3]}; \
+ d##IDX[(IDX >> 4) & 0x3] = _b[(IDX >> 6) & 0x3]; \
+ \
+ for (int j = 0; j < 4; j++) { \
+ if (IDX & (1 << j)) { \
+ d##IDX[j] = 0; \
+ } \
+ } \
+ \
+ a = _mm_load_ps(_a); \
+ b = _mm_load_ps(_b); \
+ c = _mm_insert_ps(a, b, IDX); \
+ CHECK_RESULT(validateFloat(c, d##IDX[0], d##IDX[1], d##IDX[2], d##IDX[3]));
+
+ IMM_256_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_max_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int32_t d[4];
+ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_max_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_max_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+ int8_t d[16];
+ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+ d[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+ d[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+ d[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+ d[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+ d[8] = _a[8] > _b[8] ? _a[8] : _b[8];
+ d[9] = _a[9] > _b[9] ? _a[9] : _b[9];
+ d[10] = _a[10] > _b[10] ? _a[10] : _b[10];
+ d[11] = _a[11] > _b[11] ? _a[11] : _b[11];
+ d[12] = _a[12] > _b[12] ? _a[12] : _b[12];
+ d[13] = _a[13] > _b[13] ? _a[13] : _b[13];
+ d[14] = _a[14] > _b[14] ? _a[14] : _b[14];
+ d[15] = _a[15] > _b[15] ? _a[15] : _b[15];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+
+ __m128i c = _mm_max_epi8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_max_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+
+ uint16_t d[8];
+ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+ d[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+ d[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+ d[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+ d[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_max_epu16(a, b);
+
+ return VALIDATE_UINT16_M128(c, d);
+}
+
+result_t test_mm_max_epu32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+
+ uint32_t d[4];
+ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_max_epu32(a, b);
+
+ return VALIDATE_UINT32_M128(c, d);
+}
+
+result_t test_mm_min_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int32_t d[4];
+ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_min_epi32(a, b);
+
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_min_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+
+ int8_t d[16];
+ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+ d[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+ d[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+ d[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+ d[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+ d[8] = _a[8] < _b[8] ? _a[8] : _b[8];
+ d[9] = _a[9] < _b[9] ? _a[9] : _b[9];
+ d[10] = _a[10] < _b[10] ? _a[10] : _b[10];
+ d[11] = _a[11] < _b[11] ? _a[11] : _b[11];
+ d[12] = _a[12] < _b[12] ? _a[12] : _b[12];
+ d[13] = _a[13] < _b[13] ? _a[13] : _b[13];
+ d[14] = _a[14] < _b[14] ? _a[14] : _b[14];
+ d[15] = _a[15] < _b[15] ? _a[15] : _b[15];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+
+ __m128i c = _mm_min_epi8(a, b);
+ return VALIDATE_INT8_M128(c, d);
+}
+
+result_t test_mm_min_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+
+ uint16_t d[8];
+ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+ d[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+ d[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+ d[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+ d[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_min_epu16(a, b);
+
+ return VALIDATE_UINT16_M128(c, d);
+}
+
+result_t test_mm_min_epu32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+
+ uint32_t d[4];
+ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_min_epu32(a, b);
+
+ return VALIDATE_UINT32_M128(c, d);
+}
+
+result_t test_mm_minpos_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+ uint16_t index = 0, min = (uint16_t) _a[0];
+ for (int i = 0; i < 8; i++) {
+ if ((uint16_t) _a[i] < min) {
+ index = (uint16_t) i;
+ min = (uint16_t) _a[i];
+ }
+ }
+
+ uint16_t d[8] = {min, index, 0, 0, 0, 0, 0, 0};
+
+ __m128i a = load_m128i(_a);
+ __m128i ret = _mm_minpos_epu16(a);
+ return VALIDATE_UINT16_M128(ret, d);
+}
+
+result_t test_mm_mpsadbw_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c;
+#define TEST_IMPL(IDX) \
+ uint8_t a_offset##IDX = ((IDX >> 2) & 0x1) * 4; \
+ uint8_t b_offset##IDX = (IDX & 0x3) * 4; \
+ \
+ uint16_t d##IDX[8] = {}; \
+ for (int i = 0; i < 8; i++) { \
+ for (int j = 0; j < 4; j++) { \
+ d##IDX[i] += \
+ abs(_a[(a_offset##IDX + i) + j] - _b[b_offset##IDX + j]); \
+ } \
+ } \
+ c = _mm_mpsadbw_epu8(a, b, IDX); \
+ CHECK_RESULT(VALIDATE_UINT16_M128(c, d##IDX));
+
+ IMM_8_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_mul_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ int64_t dx = (int64_t) (_a[0]) * (int64_t) (_b[0]);
+ int64_t dy = (int64_t) (_a[2]) * (int64_t) (_b[2]);
+
+ __m128i a = _mm_loadu_si128((const __m128i *) _a);
+ __m128i b = _mm_loadu_si128((const __m128i *) _b);
+ __m128i r = _mm_mul_epi32(a, b);
+
+ return validateInt64(r, dx, dy);
+}
+
+result_t test_mm_mullo_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ int32_t d[4];
+
+ for (int i = 0; i < 4; i++) {
+ d[i] = (int32_t) ((int64_t) _a[i] * (int64_t) _b[i]);
+ }
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_mullo_epi32(a, b);
+ return VALIDATE_INT32_M128(c, d);
+}
+
+result_t test_mm_packus_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint16_t max = UINT16_MAX;
+ uint16_t min = 0;
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+
+ uint16_t d[8];
+ for (int i = 0; i < 4; i++) {
+ if (_a[i] > (int32_t) max)
+ d[i] = max;
+ else if (_a[i] < (int32_t) min)
+ d[i] = min;
+ else
+ d[i] = (uint16_t) _a[i];
+ }
+ for (int i = 0; i < 4; i++) {
+ if (_b[i] > (int32_t) max)
+ d[i + 4] = max;
+ else if (_b[i] < (int32_t) min)
+ d[i + 4] = min;
+ else
+ d[i + 4] = (uint16_t) _b[i];
+ }
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i c = _mm_packus_epi32(a, b);
+
+ return VALIDATE_UINT16_M128(c, d);
+}
+
+result_t test_mm_round_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (double *) impl.mTestFloatPointer1;
+ double d[2];
+ __m128d ret;
+
+ __m128d a = load_m128d(_a);
+ switch (iter & 0x7) {
+ case 0:
+ d[0] = bankersRounding(_a[0]);
+ d[1] = bankersRounding(_a[1]);
+
+ ret = _mm_round_pd(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ break;
+ case 1:
+ d[0] = floor(_a[0]);
+ d[1] = floor(_a[1]);
+
+ ret = _mm_round_pd(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 2:
+ d[0] = ceil(_a[0]);
+ d[1] = ceil(_a[1]);
+
+ ret = _mm_round_pd(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 3:
+ d[0] = _a[0] > 0 ? floor(_a[0]) : ceil(_a[0]);
+ d[1] = _a[1] > 0 ? floor(_a[1]) : ceil(_a[1]);
+
+ ret = _mm_round_pd(a, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ break;
+ case 4:
+ d[0] = bankersRounding(_a[0]);
+ d[1] = bankersRounding(_a[1]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 5:
+ d[0] = floor(_a[0]);
+ d[1] = floor(_a[1]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 6:
+ d[0] = ceil(_a[0]);
+ d[1] = ceil(_a[1]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 7:
+ d[0] = _a[0] > 0 ? floor(_a[0]) : ceil(_a[0]);
+ d[1] = _a[1] > 0 ? floor(_a[1]) : ceil(_a[1]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ }
+
+ return validateDouble(ret, d[0], d[1]);
+}
+
+result_t test_mm_round_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ float f[4];
+ __m128 ret;
+
+ __m128 a = load_m128(_a);
+ switch (iter & 0x7) {
+ case 0:
+ f[0] = bankersRounding(_a[0]);
+ f[1] = bankersRounding(_a[1]);
+ f[2] = bankersRounding(_a[2]);
+ f[3] = bankersRounding(_a[3]);
+
+ ret = _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ break;
+ case 1:
+ f[0] = floorf(_a[0]);
+ f[1] = floorf(_a[1]);
+ f[2] = floorf(_a[2]);
+ f[3] = floorf(_a[3]);
+
+ ret = _mm_round_ps(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 2:
+ f[0] = ceilf(_a[0]);
+ f[1] = ceilf(_a[1]);
+ f[2] = ceilf(_a[2]);
+ f[3] = ceilf(_a[3]);
+
+ ret = _mm_round_ps(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 3:
+ f[0] = _a[0] > 0 ? floorf(_a[0]) : ceilf(_a[0]);
+ f[1] = _a[1] > 0 ? floorf(_a[1]) : ceilf(_a[1]);
+ f[2] = _a[2] > 0 ? floorf(_a[2]) : ceilf(_a[2]);
+ f[3] = _a[3] > 0 ? floorf(_a[3]) : ceilf(_a[3]);
+
+ ret = _mm_round_ps(a, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ break;
+ case 4:
+ f[0] = bankersRounding(_a[0]);
+ f[1] = bankersRounding(_a[1]);
+ f[2] = bankersRounding(_a[2]);
+ f[3] = bankersRounding(_a[3]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 5:
+ f[0] = floorf(_a[0]);
+ f[1] = floorf(_a[1]);
+ f[2] = floorf(_a[2]);
+ f[3] = floorf(_a[3]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 6:
+ f[0] = ceilf(_a[0]);
+ f[1] = ceilf(_a[1]);
+ f[2] = ceilf(_a[2]);
+ f[3] = ceilf(_a[3]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 7:
+ f[0] = _a[0] > 0 ? floorf(_a[0]) : ceilf(_a[0]);
+ f[1] = _a[1] > 0 ? floorf(_a[1]) : ceilf(_a[1]);
+ f[2] = _a[2] > 0 ? floorf(_a[2]) : ceilf(_a[2]);
+ f[3] = _a[3] > 0 ? floorf(_a[3]) : ceilf(_a[3]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+ break;
+ }
+
+ return validateFloat(ret, f[0], f[1], f[2], f[3]);
+}
+
+result_t test_mm_round_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const double *_a = (double *) impl.mTestFloatPointer1;
+ const double *_b = (double *) impl.mTestFloatPointer2;
+ double d[2];
+ __m128d ret;
+
+ __m128d a = load_m128d(_a);
+ __m128d b = load_m128d(_b);
+ d[1] = _a[1];
+ switch (iter & 0x7) {
+ case 0:
+ d[0] = bankersRounding(_b[0]);
+
+ ret = _mm_round_sd(a, b, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ break;
+ case 1:
+ d[0] = floor(_b[0]);
+
+ ret = _mm_round_sd(a, b, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 2:
+ d[0] = ceil(_b[0]);
+
+ ret = _mm_round_sd(a, b, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 3:
+ d[0] = _b[0] > 0 ? floor(_b[0]) : ceil(_b[0]);
+
+ ret = _mm_round_sd(a, b, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ break;
+ case 4:
+ d[0] = bankersRounding(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 5:
+ d[0] = floor(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 6:
+ d[0] = ceil(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 7:
+ d[0] = _b[0] > 0 ? floor(_b[0]) : ceil(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ }
+
+ return validateDouble(ret, d[0], d[1]);
+}
+
+result_t test_mm_round_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const float *_a = impl.mTestFloatPointer1;
+ const float *_b = impl.mTestFloatPointer2;
+ float f[4];
+ __m128 ret;
+
+ __m128 a = load_m128(_a);
+ __m128 b = load_m128(_b);
+ switch (iter & 0x7) {
+ case 0:
+ f[0] = bankersRounding(_b[0]);
+
+ ret = _mm_round_ss(a, b, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ break;
+ case 1:
+ f[0] = floorf(_b[0]);
+
+ ret = _mm_round_ss(a, b, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 2:
+ f[0] = ceilf(_b[0]);
+
+ ret = _mm_round_ss(a, b, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+ break;
+ case 3:
+ f[0] = _b[0] > 0 ? floorf(_b[0]) : ceilf(_b[0]);
+
+ ret = _mm_round_ss(a, b, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+ break;
+ case 4:
+ f[0] = bankersRounding(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 5:
+ f[0] = floorf(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 6:
+ f[0] = ceilf(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ case 7:
+ f[0] = _b[0] > 0 ? floorf(_b[0]) : ceilf(_b[0]);
+
+ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+ break;
+ }
+ f[1] = _a[1];
+ f[2] = _a[2];
+ f[3] = _a[3];
+
+
+ return validateFloat(ret, f[0], f[1], f[2], f[3]);
+}
+
+result_t test_mm_stream_load_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ int32_t *addr = impl.mTestIntPointer1;
+
+ __m128i ret = _mm_stream_load_si128((__m128i *) addr);
+
+ return VALIDATE_INT32_M128(ret, addr);
+}
+
+result_t test_mm_test_all_ones(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ __m128i a = load_m128i(_a);
+
+ int32_t d0 = ~_a[0] & (~(uint32_t) 0);
+ int32_t d1 = ~_a[1] & (~(uint32_t) 0);
+ int32_t d2 = ~_a[2] & (~(uint32_t) 0);
+ int32_t d3 = ~_a[3] & (~(uint32_t) 0);
+ int32_t result = ((d0 | d1 | d2 | d3) == 0) ? 1 : 0;
+
+ int32_t ret = _mm_test_all_ones(a);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_test_all_zeros(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_mask = (const int32_t *) impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i mask = load_m128i(_mask);
+
+ int32_t d0 = _a[0] & _mask[0];
+ int32_t d1 = _a[1] & _mask[1];
+ int32_t d2 = _a[2] & _mask[2];
+ int32_t d3 = _a[3] & _mask[3];
+ int32_t result = ((d0 | d1 | d2 | d3) == 0) ? 1 : 0;
+
+ int32_t ret = _mm_test_all_zeros(a, mask);
+
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_test_mix_ones_zeros(const SSE2NEONTestImpl &impl,
+ uint32_t iter)
+{
+ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *_mask = (const int32_t *) impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i mask = load_m128i(_mask);
+
+ int32_t ZF = 1;
+ int32_t CF = 1;
+ for (int i = 0; i < 4; i++) {
+ ZF &= ((_a[i] & _mask[i]) == 0);
+ CF &= ((~_a[i] & _mask[i]) == 0);
+ }
+ int32_t result = (ZF == 0 && CF == 0);
+
+ int32_t ret = _mm_test_mix_ones_zeros(a, mask);
+ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_testc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ __m128i a = _mm_load_si128((const __m128i *) _a);
+ __m128i b = _mm_load_si128((const __m128i *) _b);
+ int testc = 1;
+ for (int i = 0; i < 2; i++) {
+ if ((~(((SIMDVec *) &a)->m128_u64[i]) &
+ ((SIMDVec *) &b)->m128_u64[i])) {
+ testc = 0;
+ break;
+ }
+ }
+ return _mm_testc_si128(a, b) == testc ? TEST_SUCCESS : TEST_FAIL;
+}
+
+result_t test_mm_testnzc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return test_mm_test_mix_ones_zeros(impl, iter);
+}
+
+result_t test_mm_testz_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *_a = impl.mTestIntPointer1;
+ const int32_t *_b = impl.mTestIntPointer2;
+ __m128i a = _mm_load_si128((const __m128i *) _a);
+ __m128i b = _mm_load_si128((const __m128i *) _b);
+ int testz = 1;
+ for (int i = 0; i < 2; i++) {
+ if ((((SIMDVec *) &a)->m128_u64[i] & ((SIMDVec *) &b)->m128_u64[i])) {
+ testz = 0;
+ break;
+ }
+ }
+ return _mm_testz_si128(a, b) == testz ? TEST_SUCCESS : TEST_FAIL;
+}
+
+/* SSE4.2 */
+#define IS_CMPESTRI 1
+
+#define DEF_ENUM_MM_CMPESTRX_VARIANT(c, ...) c,
+
+#define EVAL_MM_CMPESTRX_TEST_CASE(c, type, data_type, im, IM) \
+ do { \
+ data_type *a = test_mm_##im##_##type##_data[c].a, \
+ *b = test_mm_##im##_##type##_data[c].b; \
+ int la = test_mm_##im##_##type##_data[c].la, \
+ lb = test_mm_##im##_##type##_data[c].lb; \
+ const int imm8 = IMM_##c; \
+ IIF(IM) \
+ (int expect = test_mm_##im##_##type##_data[c].expect, \
+ data_type *expect = test_mm_##im##_##type##_data[c].expect); \
+ __m128i ma, mb; \
+ memcpy(&ma, a, sizeof(ma)); \
+ memcpy(&mb, b, sizeof(mb)); \
+ IIF(IM) \
+ (int res = _mm_##im(ma, la, mb, lb, imm8), \
+ __m128i res = _mm_##im(ma, la, mb, lb, imm8)); \
+ if (IIF(IM)(res != expect, memcmp(expect, &res, sizeof(__m128i)))) \
+ return TEST_FAIL; \
+ } while (0);
+
+#define ENUM_MM_CMPESTRX_TEST_CASES(type, type_lower, data_type, func, FUNC, \
+ IM) \
+ enum { MM_##FUNC##_##type##_TEST_CASES(DEF_ENUM_MM_CMPESTRX_VARIANT) }; \
+ MM_##FUNC##_##type##_TEST_CASES(EVAL_MM_CMPESTRX_TEST_CASE, type_lower, \
+ data_type, func, IM)
+
+#define IMM_UBYTE_EACH_LEAST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UBYTE_EACH_LEAST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_EACH_LEAST_MASKED_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UBYTE_EACH_MOST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UBYTE_EACH_MOST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_EACH_MOST_MASKED_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ANY_LEAST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UBYTE_ANY_LEAST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ANY_LEAST_MASKED_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ANY_MOST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UBYTE_ANY_MOST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ANY_MOST_MASKED_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UBYTE_RANGES_LEAST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UBYTE_RANGES_MOST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UBYTE_RANGES_LEAST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_RANGES_MOST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_RANGES_LEAST_MASKED_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UBYTE_RANGES_MOST_MASKED_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ORDERED_LEAST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UBYTE_ORDERED_LEAST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ORDERED_MOST \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UBYTE_ORDERED_MOST_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ORDERED_MOST_MASKED_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+
+#define IMM_SBYTE_EACH_LEAST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SBYTE_EACH_LEAST_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SBYTE_EACH_LEAST_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_EACH_MOST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SBYTE_EACH_MOST_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SBYTE_EACH_MOST_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_ANY_LEAST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SBYTE_ANY_LEAST_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SBYTE_ANY_MOST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SBYTE_ANY_MOST_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_RANGES_LEAST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SBYTE_RANGES_LEAST_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SBYTE_RANGES_LEAST_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_RANGES_MOST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SBYTE_RANGES_MOST_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SBYTE_RANGES_MOST_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_ORDERED_LEAST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SBYTE_ORDERED_LEAST_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SBYTE_ORDERED_LEAST_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_ORDERED_MOST_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SBYTE_ORDERED_MOST \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SBYTE_ORDERED_MOST_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+
+#define IMM_UWORD_RANGES_LEAST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UWORD_RANGES_LEAST_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UWORD_RANGES_LEAST_MASKED_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UWORD_RANGES_MOST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UWORD_RANGES_MOST_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UWORD_RANGES_MOST_MASKED_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UWORD_EACH_LEAST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UWORD_EACH_MOST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UWORD_EACH_LEAST_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UWORD_EACH_LEAST_MASKED_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UWORD_EACH_MOST_MASKED_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UWORD_ANY_LEAST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UWORD_ANY_MOST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UWORD_ANY_MOST_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UWORD_ANY_LEAST_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UWORD_ANY_LEAST_MASKED_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UWORD_ORDERED_LEAST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_UWORD_ORDERED_LEAST_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UWORD_ORDERED_LEAST_MASKED_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_UWORD_ORDERED_MOST \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+#define IMM_UWORD_ORDERED_MOST_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UWORD_ORDERED_MOST_MASKED_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+
+#define IMM_SWORD_RANGES_LEAST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SWORD_RANGES_MOST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SWORD_RANGES_LEAST_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SWORD_RANGES_LEAST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_RANGES_MOST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_EACH_LEAST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SWORD_EACH_MOST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SWORD_EACH_LEAST_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SWORD_EACH_LEAST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_EACH_MOST_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SWORD_EACH_MOST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_ANY_LEAST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SWORD_ANY_LEAST_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SWORD_ANY_LEAST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_ANY_MOST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SWORD_ANY_MOST_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SWORD_ANY_MOST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_ANY_MOST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_ORDERED_LEAST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+#define IMM_SWORD_ORDERED_LEAST_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_SWORD_ORDERED_LEAST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SWORD_ORDERED_MOST \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+#define IMM_SWORD_ORDERED_MOST_MASKED_NEGATIVE \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+
+typedef struct {
+ uint8_t a[16], b[16];
+ int la, lb;
+ const int imm8;
+ int expect;
+} test_mm_cmpestri_ubyte_data_t;
+typedef struct {
+ int8_t a[16], b[16];
+ int la, lb;
+ const int imm8;
+ int expect;
+} test_mm_cmpestri_sbyte_data_t;
+typedef struct {
+ uint16_t a[8], b[8];
+ int la, lb;
+ const int imm8;
+ int expect;
+} test_mm_cmpestri_uword_data_t;
+typedef struct {
+ int16_t a[8], b[8];
+ int la, lb;
+ const int imm8;
+ int expect;
+} test_mm_cmpestri_sword_data_t;
+
+#define TEST_MM_CMPESTRA_UBYTE_DATA_LEN 3
+static test_mm_cmpestri_ubyte_data_t
+ test_mm_cmpestra_ubyte_data[TEST_MM_CMPESTRA_UBYTE_DATA_LEN] = {
+ {{20, 10, 33, 56, 78},
+ {20, 10, 34, 98, 127, 20, 10, 32, 20, 10, 32, 11, 3, 20, 10, 31},
+ 3,
+ 17,
+ IMM_UBYTE_ORDERED_MOST,
+ 1},
+ {{20, 127, 0, 45, 77, 1, 34, 43, 109},
+ {2, 127, 0, 54, 6, 43, 12, 110, 100},
+ 9,
+ 20,
+ IMM_UBYTE_EACH_LEAST_NEGATIVE,
+ 0},
+ {{22, 33, 90, 1},
+ {22, 33, 90, 1, 1, 5, 4, 7, 98, 34, 1, 12, 13, 14, 15, 16},
+ 4,
+ 11,
+ IMM_UBYTE_ANY_LEAST_MASKED_NEGATIVE,
+ 0},
+};
+
+#define TEST_MM_CMPESTRA_SBYTE_DATA_LEN 3
+static test_mm_cmpestri_sbyte_data_t
+ test_mm_cmpestra_sbyte_data[TEST_MM_CMPESTRA_SBYTE_DATA_LEN] = {
+ {{45, -94, 38, -11, 84, -123, -43, -49, 25, -55, -121, -6, 57, 108, -55,
+ 69},
+ {-26, -61, -21, -96, 48, -112, 95, -56, 29, -55, -121, -6, 57, 108,
+ -55, 69},
+ 23,
+ 28,
+ IMM_SBYTE_RANGES_LEAST,
+ 0},
+ {{-12, 8},
+ {-12, 7, -12, 8, -13, 45, -12, 8},
+ 2,
+ 8,
+ IMM_SBYTE_ORDERED_MOST_NEGATIVE,
+ 0},
+ {{-100, -127, 56, 78, 21, -1, 9, 127, 45},
+ {100, 126, 30, 65, 87, 54, 80, 81, -98, -101, 90, 1, 5, 60, -77, -65},
+ 10,
+ 20,
+ IMM_SBYTE_ANY_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPESTRA_UWORD_DATA_LEN 3
+static test_mm_cmpestri_uword_data_t
+ test_mm_cmpestra_uword_data[TEST_MM_CMPESTRA_UWORD_DATA_LEN] = {
+ {{10000, 20000, 30000, 40000, 50000},
+ {40001, 50002, 10000, 20000, 30000, 40000, 50000},
+ 5,
+ 10,
+ IMM_UWORD_ORDERED_LEAST,
+ 0},
+ {{1001, 9487, 9487, 8000},
+ {1001, 1002, 1003, 8709, 100, 1, 1000, 999},
+ 4,
+ 6,
+ IMM_UWORD_RANGES_LEAST_MASKED_NEGATIVE,
+ 0},
+ {{12, 21, 0, 45, 88, 10001, 10002, 65535},
+ {22, 13, 3, 54, 888, 10003, 10000, 65530},
+ 13,
+ 13,
+ IMM_UWORD_EACH_MOST,
+ 1},
+};
+
+#define TEST_MM_CMPESTRA_SWORD_DATA_LEN 3
+static test_mm_cmpestri_sword_data_t
+ test_mm_cmpestra_sword_data[TEST_MM_CMPESTRA_SWORD_DATA_LEN] = {
+ {{-100, -80, -5, -1, 10, 1000},
+ {-100, -99, -80, -2, 11, 789, 889, 999},
+ 6,
+ 12,
+ IMM_SWORD_RANGES_LEAST_NEGATIVE,
+ 1},
+ {{-30000, -90, -32766, 1200, 5},
+ {-30001, 21, 10000, 1201, 888},
+ 5,
+ 5,
+ IMM_SWORD_EACH_MOST,
+ 0},
+ {{2001, -1928},
+ {2000, 1928, 3000, 2289, 4000, 111, 2002, -1928},
+ 2,
+ 9,
+ IMM_SWORD_ANY_LEAST_MASKED_NEGATIVE,
+ 0},
+};
+
+
+#define MM_CMPESTRA_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ORDERED_MOST, __VA_ARGS__) \
+ _(UBYTE_EACH_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPESTRA_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(SBYTE_ORDERED_MOST_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST, __VA_ARGS__)
+
+#define MM_CMPESTRA_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_ORDERED_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_EACH_MOST, __VA_ARGS__)
+
+#define MM_CMPESTRA_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_EACH_MOST, __VA_ARGS__) \
+ _(SWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define GENERATE_MM_CMPESTRA_TEST_CASES \
+ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestra, CMPESTRA, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestra, CMPESTRA, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestra, CMPESTRA, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestra, CMPESTRA, \
+ IS_CMPESTRI)
+
+result_t test_mm_cmpestra(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPESTRA_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPESTRC_UBYTE_DATA_LEN 4
+static test_mm_cmpestri_ubyte_data_t
+ test_mm_cmpestrc_ubyte_data[TEST_MM_CMPESTRC_UBYTE_DATA_LEN] = {
+ {{66, 3, 3, 65},
+ {66, 3, 3, 65, 67, 2, 2, 67, 56, 11, 1, 23, 66, 3, 3, 65},
+ 4,
+ 16,
+ IMM_UBYTE_ORDERED_MOST_MASKED_NEGATIVE,
+ 1},
+ {{1, 11, 2, 22, 3, 33, 4, 44, 5, 55, 6, 66, 7, 77, 8, 88},
+ {2, 22, 3, 23, 5, 66, 255, 43, 6, 66, 7, 77, 9, 99, 10, 100},
+ 16,
+ 16,
+ IMM_UBYTE_EACH_MOST,
+ 0},
+ {{36, 72, 108}, {12, 24, 48, 96, 77, 84}, 3, 6, IMM_UBYTE_ANY_LEAST, 0},
+ {{12, 24, 36, 48},
+ {11, 49, 50, 56, 77, 15, 10},
+ 4,
+ 7,
+ IMM_UBYTE_RANGES_LEAST_NEGATIVE,
+ 1},
+};
+
+#define TEST_MM_CMPESTRC_SBYTE_DATA_LEN 4
+static test_mm_cmpestri_sbyte_data_t
+ test_mm_cmpestrc_sbyte_data[TEST_MM_CMPESTRC_SBYTE_DATA_LEN] = {
+ {{-22, -30, 40, 45},
+ {-31, -32, 46, 77},
+ 4,
+ 4,
+ IMM_SBYTE_RANGES_MOST,
+ 0},
+ {{-12, -7, 33, 100, 12},
+ {-12, -7, 33, 100, 11, -11, -7, 33, 100, 12},
+ 5,
+ 10,
+ IMM_SBYTE_ORDERED_MOST_MASKED_NEGATIVE,
+ 1},
+ {{1, 2, 3, 4, 5, -1, -2, -3, -4, -5},
+ {1, 2, 3, 4, 5, -1, -2, -3, -5},
+ 10,
+ 9,
+ IMM_SBYTE_ANY_MOST_MASKED_NEGATIVE,
+ 0},
+ {{101, -128, -88, -76, 89, 109, 44, -12, -45, -100, 22, 1, 91},
+ {102, -120, 88, -76, 98, 107, 33, 12, 45, -100, 22, 10, 19},
+ 13,
+ 13,
+ IMM_SBYTE_EACH_MOST,
+ 1},
+};
+
+#define TEST_MM_CMPESTRC_UWORD_DATA_LEN 4
+static test_mm_cmpestri_uword_data_t
+ test_mm_cmpestrc_uword_data[TEST_MM_CMPESTRC_UWORD_DATA_LEN] = {
+ {{1000, 2000, 4000, 8000, 16000},
+ {40001, 1000, 2000, 40000, 8000, 16000},
+ 5,
+ 6,
+ IMM_UWORD_ORDERED_LEAST_NEGATIVE,
+ 1},
+ {{1111, 1212},
+ {1110, 1213, 1110, 1214, 1100, 1220, 1000, 1233},
+ 2,
+ 8,
+ IMM_UWORD_RANGES_MOST,
+ 0},
+ {{10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000},
+ {9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000},
+ 13,
+ 13,
+ IMM_UWORD_EACH_LEAST_MASKED_NEGATIVE,
+ 1},
+ {{12}, {11, 13, 14, 15, 10}, 1, 5, IMM_UWORD_ANY_MOST, 0},
+};
+
+#define TEST_MM_CMPESTRC_SWORD_DATA_LEN 4
+static test_mm_cmpestri_sword_data_t
+ test_mm_cmpestrc_sword_data[TEST_MM_CMPESTRC_SWORD_DATA_LEN] = {
+ {{-100, -90, -80, -66, 1},
+ {-101, -102, -1000, 2, 67, 10000},
+ 5,
+ 6,
+ IMM_SWORD_RANGES_LEAST,
+ 0},
+ {{12, 13, -700, 888, 44, -987, 19},
+ {12, 13, -700, 888, 44, -987, 19},
+ 7,
+ 7,
+ IMM_SWORD_EACH_MOST_NEGATIVE,
+ 0},
+ {{2001, -1992, 1995, 10007, 2000},
+ {2000, 1928, 3000, 9822, 5000, 1111, 2002, -1928},
+ 5,
+ 9,
+ IMM_SWORD_ANY_LEAST_NEGATIVE,
+ 1},
+ {{13, -26, 39},
+ {12, -25, 33, 13, -26, 39},
+ 3,
+ 6,
+ IMM_SWORD_ORDERED_MOST,
+ 1},
+};
+
+
+#define MM_CMPESTRC_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_EACH_MOST, __VA_ARGS__) \
+ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPESTRC_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_RANGES_MOST, __VA_ARGS__) \
+ _(SBYTE_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ANY_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_EACH_MOST, __VA_ARGS__)
+
+#define MM_CMPESTRC_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_ORDERED_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_RANGES_MOST, __VA_ARGS__) \
+ _(UWORD_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_ANY_MOST, __VA_ARGS__)
+
+#define MM_CMPESTRC_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(SWORD_EACH_MOST_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_ORDERED_MOST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPESTRC_TEST_CASES \
+ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrc, CMPESTRC, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrc, CMPESTRC, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrc, CMPESTRC, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrc, CMPESTRC, \
+ IS_CMPESTRI)
+
+result_t test_mm_cmpestrc(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPESTRC_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPESTRI_UBYTE_DATA_LEN 4
+static test_mm_cmpestri_ubyte_data_t
+ test_mm_cmpestri_ubyte_data[TEST_MM_CMPESTRI_UBYTE_DATA_LEN] = {
+ {{23, 89, 255, 0, 90, 45, 67, 12, 1, 56, 200, 141, 3, 4, 2, 76},
+ {32, 89, 255, 128, 9, 54, 78, 12, 1, 56, 100, 41, 42, 68, 32, 5},
+ 16,
+ 16,
+ IMM_UBYTE_ANY_LEAST_NEGATIVE,
+ 0},
+ {{0, 83, 112, 12, 221, 54, 76, 83, 112, 10},
+ {0, 83, 112, 83, 122, 45, 67, 83, 112, 9},
+ 10,
+ 10,
+ IMM_UBYTE_EACH_LEAST,
+ 0},
+ {{34, 78, 12},
+ {56, 100, 11, 67, 35, 79, 67, 255, 0, 43, 121, 234, 225, 91, 31, 23},
+ 3,
+ 16,
+ IMM_UBYTE_RANGES_LEAST,
+ 0},
+ {{13, 10, 9, 32, 105, 103, 110, 111, 114, 101, 32, 116, 104, 105, 115,
+ 32},
+ {83, 112, 108, 105, 116, 32, 13, 10, 9, 32, 108, 105, 110, 101, 32,
+ 32},
+ 3,
+ 15,
+ IMM_UBYTE_ORDERED_LEAST,
+ 6},
+};
+
+#define TEST_MM_CMPESTRI_SBYTE_DATA_LEN 4
+static test_mm_cmpestri_sbyte_data_t
+ test_mm_cmpestri_sbyte_data[TEST_MM_CMPESTRI_SBYTE_DATA_LEN] = {
+ {{-12, -1, 90, -128, 43, 6, 87, 127},
+ {-1, -1, 9, -127, 126, 6, 78, 23},
+ 8,
+ 8,
+ IMM_SBYTE_EACH_LEAST,
+ 1},
+ {{34, 67, -90, 33, 123, -100, 43, 56},
+ {43, 76, -90, 44, 20, -100, 54, 56},
+ 8,
+ 8,
+ IMM_SBYTE_ANY_LEAST,
+ 0},
+ {{-43, 67, 89},
+ {-44, -54, -30, -128, 127, 34, 10, -62},
+ 3,
+ 7,
+ IMM_SBYTE_RANGES_LEAST,
+ 2},
+ {{90, 34, -32, 0, 5},
+ {19, 34, -32, 90, 34, -32, 45, 0, 5, 90, 34, -32, 0, 5, 19, 87},
+ 3,
+ 16,
+ IMM_SBYTE_ORDERED_LEAST,
+ 3},
+};
+
+#define TEST_MM_CMPESTRI_UWORD_DATA_LEN 4
+static test_mm_cmpestri_uword_data_t
+ test_mm_cmpestri_uword_data[TEST_MM_CMPESTRI_UWORD_DATA_LEN] = {
+ {{45, 65535, 0, 87, 1000, 10, 45, 26},
+ {65534, 0, 0, 78, 1000, 10, 32, 26},
+ 8,
+ 8,
+ IMM_UWORD_EACH_LEAST,
+ 2},
+ {{45, 23, 10, 54, 88, 10000, 20000, 100},
+ {544, 10000, 20000, 1, 0, 2897, 2330, 2892},
+ 8,
+ 8,
+ IMM_UWORD_ANY_LEAST,
+ 1},
+ {{10000, 15000},
+ {12, 45, 67, 899, 10001, 32, 15001, 15000},
+ 2,
+ 8,
+ IMM_UWORD_RANGES_LEAST,
+ 4},
+ {{0, 1, 54, 89, 100},
+ {101, 102, 65535, 0, 1, 54, 89, 100},
+ 5,
+ 8,
+ IMM_UWORD_ORDERED_LEAST,
+ 3},
+};
+
+#define TEST_MM_CMPESTRI_SWORD_DATA_LEN 4
+static test_mm_cmpestri_sword_data_t
+ test_mm_cmpestri_sword_data[TEST_MM_CMPESTRI_SWORD_DATA_LEN] = {
+ {{13, 6, 5, 4, 3, 2, 1, 3},
+ {-7, 16, 5, 4, -1, 6, 1, 3},
+ 10,
+ 10,
+ IMM_SWORD_RANGES_MOST,
+ 7},
+ {{13, 6, 5, 4, 3, 2, 1, 3},
+ {-7, 16, 5, 4, -1, 6, 1, 3},
+ 8,
+ 8,
+ IMM_SWORD_EACH_LEAST,
+ 2},
+ {{-32768, 90, 455, 67, -1000, -10000, 21, 12},
+ {-7, 61, 455, 67, -32768, 32767, 11, 888},
+ 8,
+ 8,
+ IMM_SWORD_ANY_LEAST,
+ 2},
+ {{-12, -56},
+ {-7, 16, 555, 554, -12, 61, -16, 3},
+ 2,
+ 8,
+ IMM_SWORD_ORDERED_LEAST,
+ 8},
+};
+
+#define MM_CMPESTRI_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPESTRI_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPESTRI_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPESTRI_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_MOST, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPESTRI_TEST_CASES \
+ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestri, CMPESTRI, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestri, CMPESTRI, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestri, CMPESTRI, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestri, CMPESTRI, \
+ IS_CMPESTRI)
+
+result_t test_mm_cmpestri(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPESTRI_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define IS_CMPESTRM 0
+
+typedef struct {
+ uint8_t a[16], b[16];
+ int la, lb;
+ const int imm8;
+ uint8_t expect[16];
+} test_mm_cmpestrm_ubyte_data_t;
+typedef struct {
+ int8_t a[16], b[16];
+ int la, lb;
+ const int imm8;
+ int8_t expect[16];
+} test_mm_cmpestrm_sbyte_data_t;
+typedef struct {
+ uint16_t a[8], b[8];
+ int la, lb;
+ const int imm8;
+ uint16_t expect[8];
+} test_mm_cmpestrm_uword_data_t;
+typedef struct {
+ int16_t a[8], b[8];
+ int la, lb;
+ const int imm8;
+ int16_t expect[8];
+} test_mm_cmpestrm_sword_data_t;
+
+#define IMM_UBYTE_EACH_UNIT \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+#define IMM_UBYTE_EACH_UNIT_NEGATIVE \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK | \
+ _SIDD_NEGATIVE_POLARITY)
+#define IMM_UBYTE_ANY_UNIT \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+#define IMM_UBYTE_ANY_BIT \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK)
+#define IMM_UBYTE_RANGES_UNIT \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+#define IMM_UBYTE_ORDERED_UNIT \
+ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+
+#define IMM_SBYTE_EACH_UNIT \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+#define IMM_SBYTE_EACH_BIT_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_BIT_MASK | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_ANY_UNIT \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+#define IMM_SBYTE_ANY_UNIT_MASKED_NEGATIVE \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK | \
+ _SIDD_MASKED_NEGATIVE_POLARITY)
+#define IMM_SBYTE_RANGES_UNIT \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+#define IMM_SBYTE_ORDERED_UNIT \
+ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+
+#define IMM_UWORD_RANGES_UNIT \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+#define IMM_UWORD_EACH_UNIT \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+#define IMM_UWORD_ANY_UNIT \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+#define IMM_UWORD_ANY_BIT \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK)
+#define IMM_UWORD_ORDERED_UNIT \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+#define IMM_UWORD_ORDERED_UNIT_NEGATIVE \
+ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK | \
+ _SIDD_NEGATIVE_POLARITY)
+
+#define IMM_SWORD_RANGES_UNIT \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+#define IMM_SWORD_RANGES_BIT \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_BIT_MASK)
+#define IMM_SWORD_EACH_UNIT \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+#define IMM_SWORD_ANY_UNIT \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+#define IMM_SWORD_ORDERED_UNIT \
+ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+
+#define TEST_MM_CMPESTRM_UBYTE_DATA_LEN 4
+static test_mm_cmpestrm_ubyte_data_t
+ test_mm_cmpestrm_ubyte_data[TEST_MM_CMPESTRM_UBYTE_DATA_LEN] = {
+ {{85, 115, 101, 70, 108, 97, 116, 65, 115, 115, 101, 109, 98, 108, 101,
+ 114},
+ {85, 115, 105, 110, 103, 65, 110, 65, 115, 115, 101, 109, 98, 108, 101,
+ 114},
+ 16,
+ 16,
+ IMM_UBYTE_EACH_UNIT_NEGATIVE,
+ {0, 0, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{97, 101, 105, 111, 117, 121},
+ {89, 111, 117, 32, 68, 114, 105, 118, 101, 32, 77, 101, 32, 77, 97,
+ 100},
+ 6,
+ 16,
+ IMM_UBYTE_ANY_UNIT,
+ {0, 255, 255, 0, 0, 0, 255, 0, 255, 0, 0, 255, 0, 0, 255, 0}},
+ {{97, 122, 65, 90},
+ {73, 39, 109, 32, 104, 101, 114, 101, 32, 98, 101, 99, 97, 117, 115,
+ 101},
+ 4,
+ 16,
+ IMM_UBYTE_RANGES_UNIT,
+ {255, 0, 255, 0, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255,
+ 255}},
+ {{87, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {87, 104, 101, 110, 87, 101, 87, 105, 108, 108, 66, 101, 87, 101, 100,
+ 33},
+ 2,
+ 16,
+ IMM_UBYTE_ORDERED_UNIT,
+ {0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0}},
+};
+
+#define TEST_MM_CMPESTRM_SBYTE_DATA_LEN 4
+static test_mm_cmpestrm_sbyte_data_t
+ test_mm_cmpestrm_sbyte_data[TEST_MM_CMPESTRM_SBYTE_DATA_LEN] = {
+ {{-127, -127, 34, 88, 0, 1, -1, 78, 90, 9, 23, 34, 3, -128, 127, 0},
+ {0, -127, 34, 88, 12, 43, -128, 78, 8, 9, 43, 32, 7, 126, 115, 0},
+ 16,
+ 16,
+ IMM_SBYTE_EACH_UNIT,
+ {0, -1, -1, -1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, 0, -1}},
+ {{0, 32, 7, 115, -128, 44, 33},
+ {0, -127, 34, 88, 12, 43, -128, 78, 8, 9, 43, 32, 7, 126, 115, 0},
+ 7,
+ 10,
+ IMM_SBYTE_ANY_UNIT_MASKED_NEGATIVE,
+ {0, -1, -1, -1, -1, -1, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0}},
+ {{-128, -80, -90, 10, 33},
+ {-126, -93, -80, -77, -56, -23, -10, -1, 0, 3, 10, 12, 13, 33, 34, 56},
+ 5,
+ 16,
+ IMM_SBYTE_RANGES_UNIT,
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0}},
+ {{104, 9, -12},
+ {0, 0, 87, 104, 9, -12, 89, -117, 9, 10, -11, 87, -114, 104, 9, -61},
+ 3,
+ 16,
+ IMM_SBYTE_ORDERED_UNIT,
+ {0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+};
+
+#define TEST_MM_CMPESTRM_UWORD_DATA_LEN 4
+static test_mm_cmpestrm_uword_data_t
+ test_mm_cmpestrm_uword_data[TEST_MM_CMPESTRM_UWORD_DATA_LEN] = {
+ {{1, 5, 13, 19, 22},
+ {12, 60000, 5, 1, 100, 1000, 34, 20},
+ 5,
+ 8,
+ IMM_UWORD_RANGES_UNIT,
+ {0, 0, 65535, 65535, 0, 0, 0, 0}},
+ {{65535, 12, 7, 9876, 3456, 12345, 10, 98},
+ {65535, 0, 10, 9876, 3456, 0, 13, 32},
+ 8,
+ 8,
+ IMM_UWORD_EACH_UNIT,
+ {65535, 0, 0, 65535, 65535, 0, 0, 0}},
+ {{100, 0},
+ {12345, 6766, 234, 0, 1, 34, 89, 100},
+ 2,
+ 8,
+ IMM_UWORD_ANY_BIT,
+ {136, 0, 0, 0, 0, 0, 0, 0}},
+ {{123, 67, 890},
+ {123, 67, 890, 8900, 4, 0, 123, 67},
+ 3,
+ 8,
+ IMM_UWORD_ORDERED_UNIT,
+ {65535, 0, 0, 0, 0, 0, 65535, 0}},
+};
+
+#define TEST_MM_CMPESTRM_SWORD_DATA_LEN 4
+static test_mm_cmpestrm_sword_data_t
+ test_mm_cmpestrm_sword_data[TEST_MM_CMPESTRM_SWORD_DATA_LEN] = {
+ {{13, 6, 5, 4, 3, 2, 1, 3},
+ {-7, 16, 5, 4, -1, 6, 1, 3},
+ 10,
+ 10,
+ IMM_SWORD_RANGES_UNIT,
+ {0, 0, 0, 0, 0, 0, -1, -1}},
+ {{85, 115, 101, 70, 108, 97, 116, 65},
+ {85, 115, 105, 110, 103, 65, 110, 65},
+ 8,
+ 8,
+ IMM_SWORD_EACH_UNIT,
+ {-1, -1, 0, 0, 0, 0, 0, -1}},
+ {{-32768, 10000, 10, -13},
+ {-32767, 32767, -32768, 90, 0, -13, 23, 45},
+ 4,
+ 8,
+ IMM_SWORD_ANY_UNIT,
+ {0, 0, -1, 0, 0, -1, 0, 0}},
+ {{10, 20, -10, 60},
+ {0, 0, 0, 10, 20, -10, 60, 10},
+ 4,
+ 8,
+ IMM_SWORD_ORDERED_UNIT,
+ {0, 0, 0, -1, 0, 0, 0, -1}},
+};
+
+#define MM_CMPESTRM_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_EACH_UNIT_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_ANY_UNIT, __VA_ARGS__) \
+ _(UBYTE_RANGES_UNIT, __VA_ARGS__) \
+ _(UBYTE_ORDERED_UNIT, __VA_ARGS__)
+
+#define MM_CMPESTRM_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_UNIT, __VA_ARGS__) \
+ _(SBYTE_ANY_UNIT_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_RANGES_UNIT, __VA_ARGS__) \
+ _(SBYTE_ORDERED_UNIT, __VA_ARGS__)
+
+#define MM_CMPESTRM_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_RANGES_UNIT, __VA_ARGS__) \
+ _(UWORD_EACH_UNIT, __VA_ARGS__) \
+ _(UWORD_ANY_BIT, __VA_ARGS__) \
+ _(UWORD_ORDERED_UNIT, __VA_ARGS__)
+
+#define MM_CMPESTRM_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_UNIT, __VA_ARGS__) \
+ _(SWORD_EACH_UNIT, __VA_ARGS__) \
+ _(SWORD_ANY_UNIT, __VA_ARGS__) \
+ _(SWORD_ORDERED_UNIT, __VA_ARGS__)
+
+#define GENERATE_MM_CMPESTRM_TEST_CASES \
+ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrm, CMPESTRM, \
+ IS_CMPESTRM) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrm, CMPESTRM, \
+ IS_CMPESTRM) \
+ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrm, CMPESTRM, \
+ IS_CMPESTRM) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrm, CMPESTRM, \
+ IS_CMPESTRM)
+
+result_t test_mm_cmpestrm(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPESTRM_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#undef IS_CMPESTRM
+
+#define TEST_MM_CMPESTRO_UBYTE_DATA_LEN 4
+static test_mm_cmpestri_ubyte_data_t
+ test_mm_cmpestro_ubyte_data[TEST_MM_CMPESTRO_UBYTE_DATA_LEN] = {
+ {{56, 78, 255, 1, 9},
+ {56, 78, 43, 255, 1, 6, 9},
+ 5,
+ 7,
+ IMM_UBYTE_ANY_MOST_NEGATIVE,
+ 0},
+ {{33, 44, 100, 24, 3, 89, 127, 254, 33, 45, 250},
+ {33, 44, 100, 22, 3, 98, 125, 254, 33, 4, 243},
+ 11,
+ 11,
+ IMM_UBYTE_EACH_LEAST_MASKED_NEGATIVE,
+ 0},
+ {{34, 27, 18, 9}, {}, 4, 16, IMM_UBYTE_RANGES_LEAST_MASKED_NEGATIVE, 1},
+ {{3, 18, 216},
+ {3, 18, 222, 3, 17, 216, 3, 18, 216},
+ 3,
+ 9,
+ IMM_UBYTE_ORDERED_LEAST_NEGATIVE,
+ 1},
+};
+
+#define TEST_MM_CMPESTRO_SBYTE_DATA_LEN 4
+static test_mm_cmpestri_sbyte_data_t
+ test_mm_cmpestro_sbyte_data[TEST_MM_CMPESTRO_SBYTE_DATA_LEN] = {
+ {{23, -23, 24, -24, 25, -25, 26, -26, 27, -27, 28, -28, -29, 29, 30,
+ 31},
+ {24, -23, 25, -24, 25, -25, 26, -26, 27, -27, 28, -28, -29, 29, 30,
+ 31},
+ 16,
+ 16,
+ IMM_SBYTE_EACH_MOST_NEGATIVE,
+ 1},
+ {{34, 33, 67, 72, -90, 127, 33, -128, 123, -90, -100, 34, 43, 15, 56,
+ 3},
+ {3, 14, 15, 65, 90, -127, 100, 100},
+ 16,
+ 8,
+ IMM_SBYTE_ANY_MOST,
+ 1},
+ {{-13, 0, 34},
+ {-12, -11, 1, 12, 56, 57, 3, 2, -17},
+ 6,
+ 9,
+ IMM_SBYTE_RANGES_MOST_MASKED_NEGATIVE,
+ 0},
+ {{1, 2, 3, 4, 5, 6, 7, 8},
+ {-1, -2, -3, -4, -5, -6, -7, -8, 1, 2, 3, 4, 5, 6, 7, 8},
+ 8,
+ 16,
+ IMM_SBYTE_ORDERED_MOST,
+ 0},
+};
+
+#define TEST_MM_CMPESTRO_UWORD_DATA_LEN 4
+static test_mm_cmpestri_uword_data_t
+ test_mm_cmpestro_uword_data[TEST_MM_CMPESTRO_UWORD_DATA_LEN] = {
+ {{0, 0, 0, 4, 4, 4, 8, 8},
+ {0, 0, 0, 3, 3, 16653, 3333, 222},
+ 8,
+ 8,
+ IMM_UWORD_EACH_MOST_MASKED_NEGATIVE,
+ 0},
+ {{12, 666, 9456, 10000, 32, 444, 57, 0},
+ {11, 777, 9999, 32767, 23},
+ 8,
+ 5,
+ IMM_UWORD_ANY_LEAST_MASKED_NEGATIVE,
+ 1},
+ {{23, 32, 45, 67},
+ {10022, 23, 32, 44, 66, 67, 12, 22},
+ 4,
+ 8,
+ IMM_UWORD_RANGES_LEAST_NEGATIVE,
+ 1},
+ {{222, 45, 8989},
+ {221, 222, 45, 8989, 222, 45, 8989},
+ 3,
+ 7,
+ IMM_UWORD_ORDERED_MOST,
+ 0},
+};
+
+#define TEST_MM_CMPESTRO_SWORD_DATA_LEN 4
+static test_mm_cmpestri_sword_data_t
+ test_mm_cmpestro_sword_data[TEST_MM_CMPESTRO_SWORD_DATA_LEN] = {
+ {{-9999, -9487, -5000, -4433, -3000, -2999, -2000, -1087},
+ {-32767, -30000, -4998},
+ 100,
+ 3,
+ IMM_SWORD_RANGES_MOST_MASKED_NEGATIVE,
+ 1},
+ {{-30, 89, 7777},
+ {-30, 89, 7777},
+ 3,
+ 3,
+ IMM_SWORD_EACH_MOST_MASKED_NEGATIVE,
+ 0},
+ {{8, 9, -100, 1000, -5000, -32000, 32000, 7},
+ {29999, 32001, 5, 555},
+ 8,
+ 4,
+ IMM_SWORD_ANY_MOST_MASKED_NEGATIVE,
+ 1},
+ {{-1, 56, -888, 9000, -23, 12, -1, -1},
+ {-1, 56, -888, 9000, -23, 12, -1, -1},
+ 8,
+ 8,
+ IMM_SWORD_ORDERED_MOST_MASKED_NEGATIVE,
+ 0},
+};
+
+#define MM_CMPESTRO_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_MOST_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_ORDERED_LEAST_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPESTRO_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_MOST_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ANY_MOST, __VA_ARGS__) \
+ _(SBYTE_RANGES_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ORDERED_MOST, __VA_ARGS__)
+
+#define MM_CMPESTRO_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_ORDERED_MOST, __VA_ARGS__)
+
+#define MM_CMPESTRO_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_ANY_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define GENERATE_MM_CMPESTRO_TEST_CASES \
+ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestro, CMPESTRO, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestro, CMPESTRO, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestro, CMPESTRO, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestro, CMPESTRO, \
+ IS_CMPESTRI)
+
+result_t test_mm_cmpestro(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPESTRO_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPESTRS_UBYTE_DATA_LEN 2
+static test_mm_cmpestri_ubyte_data_t
+ test_mm_cmpestrs_ubyte_data[TEST_MM_CMPESTRS_UBYTE_DATA_LEN] = {
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {0},
+ 16,
+ 0,
+ IMM_UBYTE_ANY_MOST,
+ 0},
+ {{1, 2, 3}, {1, 2, 3}, 3, 8, IMM_UBYTE_RANGES_MOST, 1},
+};
+
+#define TEST_MM_CMPESTRS_SBYTE_DATA_LEN 2
+static test_mm_cmpestri_sbyte_data_t
+ test_mm_cmpestrs_sbyte_data[TEST_MM_CMPESTRS_SBYTE_DATA_LEN] = {
+ {{-1, -2, -3, -4, -100, 100, 1, 2, 3, 4},
+ {-90, -80, 111, 67, 88},
+ 10,
+ 5,
+ IMM_SBYTE_EACH_LEAST_MASKED_NEGATIVE,
+ 1},
+ {{99, 100, 101, -99, -100, -101, 56, 7},
+ {-128, -126, 100, 127},
+ 23,
+ 4,
+ IMM_SBYTE_ORDERED_LEAST_MASKED_NEGATIVE,
+ 0},
+};
+
+#define TEST_MM_CMPESTRS_UWORD_DATA_LEN 2
+static test_mm_cmpestri_uword_data_t
+ test_mm_cmpestrs_uword_data[TEST_MM_CMPESTRS_UWORD_DATA_LEN] = {
+ {{1},
+ {90, 65535, 63355, 12, 8, 5, 34, 10000},
+ 100,
+ 7,
+ IMM_UWORD_ANY_MOST_NEGATIVE,
+ 0},
+ {{}, {0}, 0, 28, IMM_UWORD_RANGES_MOST_MASKED_NEGATIVE, 1},
+};
+
+#define TEST_MM_CMPESTRS_SWORD_DATA_LEN 2
+static test_mm_cmpestri_sword_data_t
+ test_mm_cmpestrs_sword_data[TEST_MM_CMPESTRS_SWORD_DATA_LEN] = {
+ {{-30000, 2897, 1111, -4455},
+ {30, 40, 500, 6000, 20, -10, -789, -29999},
+ 4,
+ 8,
+ IMM_SWORD_ORDERED_LEAST_MASKED_NEGATIVE,
+ 1},
+ {{34, 56, 789, 1024, 2048, 4096, 8192, -16384},
+ {3, 9, -27, 81, -216, 1011},
+ 9,
+ 6,
+ IMM_SWORD_EACH_LEAST_NEGATIVE,
+ 0},
+};
+
+#define MM_CMPESTRS_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_MOST, __VA_ARGS__) \
+ _(UBYTE_RANGES_MOST, __VA_ARGS__)
+
+#define MM_CMPESTRS_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPESTRS_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_ANY_MOST_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_RANGES_MOST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPESTRS_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST_NEGATIVE, __VA_ARGS__)
+
+#define GENERATE_MM_CMPESTRS_TEST_CASES \
+ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrs, CMPESTRS, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrs, CMPESTRS, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrs, CMPESTRS, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrs, CMPESTRS, \
+ IS_CMPESTRI)
+
+result_t test_mm_cmpestrs(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPESTRS_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPESTRZ_UBYTE_DATA_LEN 2
+static test_mm_cmpestri_ubyte_data_t
+ test_mm_cmpestrz_ubyte_data[TEST_MM_CMPESTRZ_UBYTE_DATA_LEN] = {
+ {{0, 1, 2, 3, 4, 5, 6, 7},
+ {12, 67, 0, 3},
+ 8,
+ 4,
+ IMM_UBYTE_ANY_MOST_MASKED_NEGATIVE,
+ 1},
+ {{255, 0, 127, 88},
+ {1, 2, 4, 8, 16, 32, 64, 128, 254, 233, 209, 41, 66, 77, 90, 100},
+ 4,
+ 16,
+ IMM_UBYTE_RANGES_MOST_MASKED_NEGATIVE,
+ 0},
+};
+
+#define TEST_MM_CMPESTRZ_SBYTE_DATA_LEN 2
+static test_mm_cmpestri_sbyte_data_t
+ test_mm_cmpestrz_sbyte_data[TEST_MM_CMPESTRZ_SBYTE_DATA_LEN] = {
+ {{}, {-90, -80, 111, 67, 88}, 0, 18, IMM_SBYTE_EACH_LEAST_NEGATIVE, 0},
+ {{9, 10, 10, -99, -100, -101, 56, 76},
+ {-127, 127, -100, -120, 13, 108, 1, -66, -34, 89, -89, 123, 22, -19,
+ -8},
+ 7,
+ 15,
+ IMM_SBYTE_ORDERED_LEAST_NEGATIVE,
+ 1},
+};
+
+#define TEST_MM_CMPESTRZ_UWORD_DATA_LEN 2
+static test_mm_cmpestri_uword_data_t
+ test_mm_cmpestrz_uword_data[TEST_MM_CMPESTRZ_UWORD_DATA_LEN] = {
+ {{1},
+ {9000, 33333, 63333, 120, 8, 55, 34, 100},
+ 100,
+ 7,
+ IMM_UWORD_ANY_LEAST_NEGATIVE,
+ 1},
+ {{1, 2, 3},
+ {1, 10000, 65535, 8964, 9487, 32, 451, 666},
+ 3,
+ 8,
+ IMM_UWORD_RANGES_MOST_NEGATIVE,
+ 0},
+};
+
+#define TEST_MM_CMPESTRZ_SWORD_DATA_LEN 2
+static test_mm_cmpestri_sword_data_t
+ test_mm_cmpestrz_sword_data[TEST_MM_CMPESTRZ_SWORD_DATA_LEN] = {
+ {{30000, 28997, 11111, 4455},
+ {30, 40, 500, 6000, 20, -10, -789, -29999},
+ 4,
+ 8,
+ IMM_SWORD_ORDERED_LEAST_MASKED_NEGATIVE,
+ 0},
+ {{789, 1024, 2048, 4096, 8192},
+ {-3, 9, -27, 18, -217, 10111, 22222},
+ 5,
+ 7,
+ IMM_SWORD_EACH_LEAST_MASKED_NEGATIVE,
+ 1},
+};
+
+#define MM_CMPESTRZ_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_MOST, __VA_ARGS__) \
+ _(UBYTE_RANGES_MOST, __VA_ARGS__)
+
+#define MM_CMPESTRZ_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPESTRZ_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_RANGES_MOST_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPESTRZ_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define GENERATE_MM_CMPESTRZ_TEST_CASES \
+ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrz, CMPESTRZ, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrz, CMPESTRZ, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrz, CMPESTRZ, \
+ IS_CMPESTRI) \
+ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrz, CMPESTRZ, \
+ IS_CMPESTRI)
+
+result_t test_mm_cmpestrz(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPESTRZ_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#undef IS_CMPESTRI
+
+result_t test_mm_cmpgt_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+
+ int64_t result[2];
+ result[0] = _a[0] > _b[0] ? -1 : 0;
+ result[1] = _a[1] > _b[1] ? -1 : 0;
+
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ __m128i iret = _mm_cmpgt_epi64(a, b);
+
+ return validateInt64(iret, result[0], result[1]);
+}
+
+#define IS_CMPISTRI 1
+
+#define DEF_ENUM_MM_CMPISTRX_VARIANT(c, ...) c,
+
+#define EVAL_MM_CMPISTRX_TEST_CASE(c, type, data_type, im, IM) \
+ do { \
+ data_type *a = test_mm_##im##_##type##_data[c].a, \
+ *b = test_mm_##im##_##type##_data[c].b; \
+ const int imm8 = IMM_##c; \
+ IIF(IM) \
+ (int expect = test_mm_##im##_##type##_data[c].expect, \
+ data_type *expect = test_mm_##im##_##type##_data[c].expect); \
+ __m128i ma, mb; \
+ memcpy(&ma, a, sizeof(ma)); \
+ memcpy(&mb, b, sizeof(mb)); \
+ IIF(IM) \
+ (int res = _mm_##im(ma, mb, imm8), \
+ __m128i res = _mm_##im(ma, mb, imm8)); \
+ if (IIF(IM)(res != expect, memcmp(expect, &res, sizeof(__m128i)))) \
+ return TEST_FAIL; \
+ } while (0);
+
+#define ENUM_MM_CMPISTRX_TEST_CASES(type, type_lower, data_type, func, FUNC, \
+ IM) \
+ enum { MM_##FUNC##_##type##_TEST_CASES(DEF_ENUM_MM_CMPISTRX_VARIANT) }; \
+ MM_##FUNC##_##type##_TEST_CASES(EVAL_MM_CMPISTRX_TEST_CASE, type_lower, \
+ data_type, func, IM)
+
+typedef struct {
+ uint8_t a[16], b[16];
+ const int imm8;
+ int expect;
+} test_mm_cmpistri_ubyte_data_t;
+typedef struct {
+ int8_t a[16], b[16];
+ const int imm8;
+ int expect;
+} test_mm_cmpistri_sbyte_data_t;
+typedef struct {
+ uint16_t a[8], b[8];
+ const int imm8;
+ int expect;
+} test_mm_cmpistri_uword_data_t;
+typedef struct {
+ int16_t a[8], b[8];
+ const int imm8;
+ int expect;
+} test_mm_cmpistri_sword_data_t;
+
+#define TEST_MM_CMPISTRA_UBYTE_DATA_LEN 4
+static test_mm_cmpistri_ubyte_data_t
+ test_mm_cmpistra_ubyte_data[TEST_MM_CMPISTRA_UBYTE_DATA_LEN] = {
+ {{10, 11, 12, 13, 14, 15, 16, 17, 18, 9, 20, 98, 97, 96, 95, 127},
+ {1, 2, 3, 4, 5, 6, 7, 8, 99, 100, 101, 102, 103, 104, 105, 106},
+ IMM_UBYTE_ANY_LEAST,
+ 1},
+ {{1, 22, 33, 44, 5, 66, 7, 88, 9, 10, 111, 0},
+ {2, 23, 34, 21, 6, 65, 8, 84, 99, 100, 11, 112, 123, 14, 15, 6},
+ IMM_UBYTE_EACH_LEAST,
+ 1},
+ {{5, 15, 25, 35, 45, 55, 65, 75, 0},
+ {4, 6, 14, 16, 24, 26, 34, 36, 44, 46, 54, 56, 74, 76},
+ IMM_UBYTE_RANGES_LEAST,
+ 0},
+ {{4, 14, 64, 84, 0},
+ {4, 14, 64, 84, 0, 4, 14, 65, 84, 0, 4, 14, 64, 84, 0, 1},
+ IMM_UBYTE_ORDERED_MOST_NEGATIVE,
+ 0},
+};
+
+#define TEST_MM_CMPISTRA_SBYTE_DATA_LEN 4
+static test_mm_cmpistri_sbyte_data_t
+ test_mm_cmpistra_sbyte_data[TEST_MM_CMPISTRA_SBYTE_DATA_LEN] = {
+ {{-11, -13, -43, -50, 66, 77, 87, 98, -128, 127, 126, 99, 1, 2, 3, -5},
+ {-12, -13, -43, -56, 66, 78, 88, 98, -125, 127, 120, 9, 100, 22, 54,
+ -10},
+ IMM_SBYTE_EACH_LEAST,
+ 0},
+ {{10, 11, 100, -90, 0},
+ {8, 9, 10, 11, 0, 8, 9, 10, -90, 0},
+ IMM_SBYTE_ANY_LEAST_NEGATIVE,
+ 0},
+ {{-90, -60, -34, -25, 34, 56, 70, 79, 0},
+ {-100, -59, -35, -24, -101, 33, 57, 69, 80, 81, -128, 100, 101, 102,
+ -101, -102},
+ IMM_SBYTE_RANGES_LEAST,
+ 1},
+ {{1, 1, 1, 1, -1, -1, -1, -1, -10, 10, -10, 10, 44, -44, 44, -44},
+ {1, 1, -1, 1, -1, -1, -1, -1, -10, 10, -10, 10, 44, -44, 44, -44},
+ IMM_SBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRA_UWORD_DATA_LEN 4
+static test_mm_cmpistri_uword_data_t
+ test_mm_cmpistra_uword_data[TEST_MM_CMPISTRA_UWORD_DATA_LEN] = {
+ {{88, 888, 8888, 31888, 10888, 18088, 10880, 28888},
+ {888, 88, 8888, 32000, 10888, 18000, 10888, 28888},
+ IMM_UWORD_EACH_LEAST_NEGATIVE,
+ 0},
+ {{3, 4, 555, 6666, 7777, 888, 9, 100},
+ {1, 2, 333, 4444, 5555, 666, 7, 8},
+ IMM_UWORD_ANY_LEAST,
+ 1},
+ {{1000, 2000, 2002, 3000, 3002, 4000, 5000, 5999},
+ {999, 2001, 3001, 4001, 4002, 4999, 6000, 6001},
+ IMM_UWORD_RANGES_LEAST,
+ 1},
+ {{55, 66, 77, 888, 0},
+ {55, 66, 77, 888, 0, 33, 2, 10000},
+ IMM_UWORD_ORDERED_LEAST,
+ 0},
+};
+
+#define TEST_MM_CMPISTRA_SWORD_DATA_LEN 4
+static test_mm_cmpistri_sword_data_t
+ test_mm_cmpistra_sword_data[TEST_MM_CMPISTRA_SWORD_DATA_LEN] = {
+ {{-32000, -28000, 0},
+ {-32001, -29999, -28001, -28000, -27999, -26000, -32768},
+ IMM_SWORD_RANGES_LEAST_MASKED_NEGATIVE,
+ 0},
+ {{-12, -11, -10, -9, -8, -7, 90, 1000},
+ {-13, -10, 9, -8, -7, 1000, 1000, 90},
+ IMM_SWORD_EACH_LEAST,
+ 1},
+ {{33, 44, 787, 23, 0},
+ {32, 43, 788, 0, 32, 0, 43, 0},
+ IMM_SWORD_ANY_LEAST,
+ 0},
+ {{18, 78, 999, -56, 0},
+ {18, 78, 999, 56, 18, 78, 999, 4},
+ IMM_SWORD_ORDERED_LEAST,
+ 1},
+};
+
+#define MM_CMPISTRA_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(UBYTE_ORDERED_MOST_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPISTRA_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRA_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRA_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPISTRA_TEST_CASES \
+ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistra, CMPISTRA, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistra, CMPISTRA, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistra, CMPISTRA, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistra, CMPISTRA, \
+ IS_CMPISTRI)
+
+result_t test_mm_cmpistra(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPISTRA_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPISTRC_UBYTE_DATA_LEN 4
+static test_mm_cmpistri_ubyte_data_t
+ test_mm_cmpistrc_ubyte_data[TEST_MM_CMPISTRC_UBYTE_DATA_LEN] = {
+ {{89, 64, 88, 23, 11, 109, 34, 55, 0},
+ {2, 64, 87, 32, 1, 110, 43, 66, 0},
+ IMM_UBYTE_ANY_LEAST,
+ 1},
+ {{99, 67, 2, 127, 125, 3, 24, 77, 32, 68, 96, 74, 70, 110, 111, 5},
+ {98, 88, 67, 125, 111, 4, 56, 88, 33, 69, 99, 79, 123, 11, 10, 6},
+ IMM_UBYTE_EACH_LEAST,
+ 0},
+ {{2, 3, 74, 78, 81, 83, 85, 87, 89, 90, 0},
+ {86, 90, 74, 85, 87, 81, 2, 3, 3, 3, 75, 76, 77, 78, 82, 85},
+ IMM_UBYTE_RANGES_MOST_NEGATIVE,
+ 0},
+ {{45, 67, 8, 9, 0},
+ {67, 45, 67, 8, 9, 45, 67, 8, 9, 45, 67, 8, 9, 45, 67, 8},
+ IMM_UBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRC_SBYTE_DATA_LEN 4
+static test_mm_cmpistri_sbyte_data_t
+ test_mm_cmpistrc_sbyte_data[TEST_MM_CMPISTRC_SBYTE_DATA_LEN] = {
+ {{35, -35, 67, -66, 34, 55, 12, -100, 34, -34, 66, -67, 52, 100, 127,
+ -128},
+ {35, -35, 67, -66, 0, 55, 12, -100, 0, -34, 66, -67, 0, 100, 127,
+ -128},
+ IMM_SBYTE_EACH_MOST_MASKED_NEGATIVE,
+ 0},
+ {{-119, 112, 105, 104, 0},
+ {119, -112, 105, -104, 104, -34, 112, -119, 0},
+ IMM_SBYTE_ANY_LEAST,
+ 1},
+ {{-79, -69, -40, -35, 34, 45, 67, 88, 0},
+ {1, 2, 3, 4, 5, 6, 7, 8, 0},
+ IMM_SBYTE_RANGES_LEAST,
+ 0},
+ {{22, -109, 123, 115, -12, 0},
+ {22, -109, 12, 115, 22, -109, 123, 115, -12, 0},
+ IMM_SBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRC_UWORD_DATA_LEN 4
+static test_mm_cmpistri_uword_data_t
+ test_mm_cmpistrc_uword_data[TEST_MM_CMPISTRC_UWORD_DATA_LEN] = {
+ {{23, 45, 67, 89, 102, 121, 23, 45},
+ {23, 45, 67, 89, 102, 121, 23, 44},
+ IMM_UWORD_EACH_LEAST,
+ 1},
+ {{1, 11, 55, 75}, {13, 14, 56, 77, 0}, IMM_UWORD_ANY_LEAST, 0},
+ {{1, 9, 11, 19, 21, 29, 91, 99},
+ {10, 29, 30, 40, 50, 60, 70, 80},
+ IMM_UWORD_RANGES_LEAST,
+ 1},
+ {{3, 4, 5, 0},
+ {0, 3, 4, 5, 3, 4, 5, 0},
+ IMM_UWORD_ORDERED_LEAST_MASKED_NEGATIVE,
+ 0},
+};
+
+#define TEST_MM_CMPISTRC_SWORD_DATA_LEN 4
+static test_mm_cmpistri_sword_data_t
+ test_mm_cmpistrc_sword_data[TEST_MM_CMPISTRC_SWORD_DATA_LEN] = {
+ {{-78, -56, 1000, 1002},
+ {-79, -55, -12, -13, 999, 1003, -80, 10000},
+ IMM_SWORD_RANGES_LEAST,
+ 0},
+ {{45, 32767, -30000, 2345, -23450, 0},
+ {45, 32767, -30000, 2346, -23456, 0, 45, 333},
+ IMM_SWORD_EACH_LEAST,
+ 1},
+ {{-10000, -20000, -30000, 10000, 20000, 30000, 0},
+ {10000, 20000, 30000, -10000, -20000, 20000, -30000, 12},
+ IMM_SWORD_ANY_MOST_NEGATIVE,
+ 1},
+ {{1, 2, -3, -55, -666, -7777, 8888},
+ {2, -3, -55, -666, -7777, 8888, 1},
+ IMM_SWORD_ORDERED_LEAST,
+ 0},
+};
+
+#define MM_CMPISTRC_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(UBYTE_RANGES_MOST_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRC_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRC_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(UWORD_ORDERED_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPISTRC_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+ _(SWORD_ANY_MOST_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPISTRC_TEST_CASES \
+ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrc, CMPISTRC, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrc, CMPISTRC, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrc, CMPISTRC, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrc, CMPISTRC, \
+ IS_CMPISTRI)
+
+result_t test_mm_cmpistrc(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPISTRC_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPISTRI_UBYTE_DATA_LEN 4
+static test_mm_cmpistri_ubyte_data_t
+ test_mm_cmpistri_ubyte_data[TEST_MM_CMPISTRI_UBYTE_DATA_LEN] = {
+ {{104, 117, 110, 116, 114, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {33, 64, 35, 36, 37, 94, 38, 42, 40, 41, 91, 93, 58, 59, 60, 62},
+ IMM_UBYTE_ANY_LEAST,
+ 16},
+ {{4, 5, 6, 7, 8, 111, 34, 21, 0, 0, 0, 0, 0, 0, 0, 0},
+ {5, 6, 7, 8, 8, 111, 43, 12, 0, 0, 0, 0, 0, 0, 0, 0},
+ IMM_UBYTE_EACH_MOST_MASKED_NEGATIVE,
+ 15},
+ {{65, 90, 97, 122, 48, 57, 0},
+ {47, 46, 43, 44, 42, 43, 45, 41, 40, 123, 124, 125, 126, 127, 1, 2},
+ IMM_UBYTE_RANGES_LEAST,
+ 16},
+ {{111, 222, 22, 0},
+ {33, 44, 55, 66, 77, 88, 99, 111, 222, 22, 11, 0},
+ IMM_UBYTE_ORDERED_LEAST,
+ 7},
+};
+
+#define TEST_MM_CMPISTRI_SBYTE_DATA_LEN 4
+static test_mm_cmpistri_sbyte_data_t
+ test_mm_cmpistri_sbyte_data[TEST_MM_CMPISTRI_SBYTE_DATA_LEN] = {
+ {{1, 2, 3, 4, 5, -99, -128, -100, -1, 49, 0},
+ {2, 3, 3, 4, 5, -100, -128, -99, 1, 44, 0},
+ IMM_SBYTE_EACH_LEAST,
+ 2},
+ {{99, 100, 23, -90, 0},
+ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 99, 100, 23, -90, -90, 100},
+ IMM_SBYTE_ANY_LEAST,
+ 10},
+ {{-10, -2, 89, 97, 0},
+ {-11, -12, -3, 1, 97, 0},
+ IMM_SBYTE_RANGES_LEAST_NEGATIVE,
+ 0},
+ {{-10, -90, -22, 30, 87, 127, 0}, {0}, IMM_SBYTE_ORDERED_LEAST, 16},
+};
+
+#define TEST_MM_CMPISTRI_UWORD_DATA_LEN 4
+static test_mm_cmpistri_uword_data_t
+ test_mm_cmpistri_uword_data[TEST_MM_CMPISTRI_UWORD_DATA_LEN] = {
+ {{38767, 99, 1234, 65535, 2222, 1, 34456, 11},
+ {38768, 999, 1235, 4444, 2222, 1, 34456, 12},
+ IMM_UWORD_EACH_LEAST,
+ 4},
+ {{22222, 33333, 44444, 55555, 6000, 600, 60, 6},
+ {0},
+ IMM_UWORD_ANY_LEAST,
+ 8},
+ {{34, 777, 1000, 1004, 0},
+ {33, 32, 889, 1003, 0},
+ IMM_UWORD_RANGES_LEAST,
+ 3},
+ {{44, 555, 44, 0},
+ {44, 555, 44, 555, 44, 555, 44, 0},
+ IMM_UWORD_ORDERED_MOST_NEGATIVE,
+ 7},
+};
+
+#define TEST_MM_CMPISTRI_SWORD_DATA_LEN 4
+static test_mm_cmpistri_sword_data_t
+ test_mm_cmpistri_sword_data[TEST_MM_CMPISTRI_SWORD_DATA_LEN] = {
+ {{-1, -5, 10, 30, 40, 0},
+ {13, -2, 7, 80, 11, 0},
+ IMM_SWORD_RANGES_LEAST,
+ 0},
+ {{-12, 12, 6666, 777, 0},
+ {11, 12, 6666, 777, 0},
+ IMM_SWORD_EACH_LEAST,
+ 1},
+ {{23, 22, 33, 567, 9999, 12345, 0},
+ {23, 22, 23, 22, 23, 22, 23, 12222},
+ IMM_SWORD_ANY_MOST,
+ 6},
+ {{12, -234, -567, 8888, 0},
+ {13, -234, -567, 8888, 12, -234, -567, 8889},
+ IMM_SWORD_ORDERED_LEAST,
+ 8},
+};
+
+#define MM_CMPISTRI_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(UBYTE_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRI_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(SBYTE_RANGES_LEAST_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRI_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(UWORD_ORDERED_MOST_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPISTRI_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+ _(SWORD_ANY_MOST, __VA_ARGS__) \
+ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPISTRI_TEST_CASES \
+ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistri, CMPISTRI, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistri, CMPISTRI, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistri, CMPISTRI, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistri, CMPISTRI, \
+ IS_CMPISTRI)
+
+result_t test_mm_cmpistri(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPISTRI_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define IS_CMPISTRM 0
+
+typedef struct {
+ uint8_t a[16], b[16];
+ const int imm8;
+ uint8_t expect[16];
+} test_mm_cmpistrm_ubyte_data_t;
+typedef struct {
+ int8_t a[16], b[16];
+ const int imm8;
+ int8_t expect[16];
+} test_mm_cmpistrm_sbyte_data_t;
+typedef struct {
+ uint16_t a[8], b[8];
+ const int imm8;
+ uint16_t expect[8];
+} test_mm_cmpistrm_uword_data_t;
+typedef struct {
+ int16_t a[8], b[8];
+ const int imm8;
+ int16_t expect[8];
+} test_mm_cmpistrm_sword_data_t;
+
+#define TEST_MM_CMPISTRM_UBYTE_DATA_LEN 4
+static test_mm_cmpistrm_ubyte_data_t
+ test_mm_cmpistrm_ubyte_data[TEST_MM_CMPISTRM_UBYTE_DATA_LEN] = {
+ {{88, 89, 90, 91, 92, 93, 0},
+ {78, 88, 99, 127, 92, 93, 0},
+ IMM_UBYTE_EACH_UNIT,
+ {0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255}},
+ {{30, 41, 52, 63, 74, 85, 0},
+ {30, 42, 51, 63, 74, 85, 0},
+ IMM_UBYTE_ANY_BIT,
+ {57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{34, 32, 21, 16, 7, 0},
+ {34, 33, 32, 31, 30, 29, 10, 6, 0},
+ IMM_UBYTE_RANGES_UNIT,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{33, 21, 123, 89, 76, 56, 0},
+ {33, 21, 124, 33, 21, 123, 89, 76, 56, 33, 21, 123, 89, 76, 56, 22},
+ IMM_UBYTE_ORDERED_UNIT,
+ {0, 0, 0, 255, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0}},
+};
+
+#define TEST_MM_CMPISTRM_SBYTE_DATA_LEN 4
+static test_mm_cmpistrm_sbyte_data_t
+ test_mm_cmpistrm_sbyte_data[TEST_MM_CMPISTRM_SBYTE_DATA_LEN] = {
+ {{-11, -90, -128, 127, 66, 45, 23, 32, 99, 10, 0},
+ {-10, -90, -124, 33, 66, 45, 23, 22, 99, 100, 0},
+ IMM_SBYTE_EACH_BIT_MASKED_NEGATIVE,
+ {-115, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{13, 14, 55, 1, 32, 100, 101, 102, 103, 97, 23, 21, 45, 54, 55, 56},
+ {22, 109, 87, 45, 1, 103, 22, 102, 43, 87, 78, 56, 65, 55, 44, 33},
+ IMM_SBYTE_ANY_UNIT,
+ {0, 0, 0, -1, -1, -1, 0, -1, 0, 0, 0, -1, 0, -1, 0, 0}},
+ {{-31, -28, -9, 10, 45, 67, 88, 0},
+ {-30, -32, -33, -44, 93, 44, 9, 89, 0},
+ IMM_SBYTE_RANGES_UNIT,
+ {-1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {{34, -10, 78, -99, -100, 100, 0},
+ {34, 123, 88, 4, 34, -10, 78, -99, -100, 100, 34, -10, 78, -99, -100,
+ -100},
+ IMM_SBYTE_ORDERED_UNIT,
+ {0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+};
+
+#define TEST_MM_CMPISTRM_UWORD_DATA_LEN 4
+static test_mm_cmpistrm_uword_data_t
+ test_mm_cmpistrm_uword_data[TEST_MM_CMPISTRM_UWORD_DATA_LEN] = {
+ {{1024, 2048, 4096, 5000, 0},
+ {1023, 1000, 2047, 1596, 5566, 5666, 4477, 9487},
+ IMM_UWORD_RANGES_UNIT,
+ {0, 0, 65535, 65535, 0, 0, 65535, 0}},
+ {{1, 2, 345, 7788, 10000, 0},
+ {2, 1, 345, 7788, 10000, 0},
+ IMM_UWORD_EACH_UNIT,
+ {0, 0, 65535, 65535, 65535, 65535, 65535, 65535}},
+ {{100, 0},
+ {12345, 6766, 234, 0, 1, 34, 89, 100},
+ IMM_UWORD_ANY_UNIT,
+ {0, 0, 0, 0, 0, 0, 0, 0}},
+ {{34, 122, 9000, 0},
+ {34, 122, 9000, 34, 122, 9000, 34, 122},
+ IMM_UWORD_ORDERED_UNIT_NEGATIVE,
+ {0, 65535, 65535, 0, 65535, 65535, 0, 65535}},
+};
+
+#define TEST_MM_CMPISTRM_SWORD_DATA_LEN 4
+static test_mm_cmpistrm_sword_data_t
+ test_mm_cmpistrm_sword_data[TEST_MM_CMPISTRM_SWORD_DATA_LEN] = {
+ {{-39, -10, 17, 89, 998, 1000, 1234, 4566},
+ {-40, -52, -39, -29, 100, 1024, 4565, 4600},
+ IMM_SWORD_RANGES_BIT,
+ {0, 0, -1, -1, 0, 0, -1, 0}},
+ {{345, -1900, -10000, -30000, 50, 6789, 0},
+ {103, -1901, -10000, 32767, 50, 6780, 0},
+ IMM_SWORD_EACH_UNIT,
+ {0, 0, -1, 0, -1, 0, -1, -1}},
+ {{677, 10001, 1001, 23, 0},
+ {345, 677, 10001, 1003, 1001, 32, 23, 677},
+ IMM_SWORD_ANY_UNIT,
+ {0, -1, -1, 0, -1, 0, -1, -1}},
+ {{1024, -2288, 3752, -4096, 0},
+ {1024, 1024, -2288, 3752, -4096, 1024, -2288, 3752},
+ IMM_SWORD_ORDERED_UNIT,
+ {0, -1, 0, 0, 0, -1, 0, 0}},
+};
+
+#define MM_CMPISTRM_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_EACH_UNIT, __VA_ARGS__) \
+ _(UBYTE_ANY_BIT, __VA_ARGS__) \
+ _(UBYTE_RANGES_UNIT, __VA_ARGS__) \
+ _(UBYTE_ORDERED_UNIT, __VA_ARGS__)
+
+#define MM_CMPISTRM_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_BIT_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ANY_UNIT, __VA_ARGS__) \
+ _(SBYTE_RANGES_UNIT, __VA_ARGS__) \
+ _(SBYTE_ORDERED_UNIT, __VA_ARGS__)
+
+#define MM_CMPISTRM_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_RANGES_UNIT, __VA_ARGS__) \
+ _(UWORD_EACH_UNIT, __VA_ARGS__) \
+ _(UWORD_ANY_UNIT, __VA_ARGS__) \
+ _(UWORD_ORDERED_UNIT_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPISTRM_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_UNIT, __VA_ARGS__) \
+ _(SWORD_EACH_UNIT, __VA_ARGS__) \
+ _(SWORD_ANY_UNIT, __VA_ARGS__) \
+ _(SWORD_ORDERED_UNIT, __VA_ARGS__)
+
+#define GENERATE_MM_CMPISTRM_TEST_CASES \
+ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrm, CMPISTRM, \
+ IS_CMPISTRM) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrm, CMPISTRM, \
+ IS_CMPISTRM) \
+ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrm, CMPISTRM, \
+ IS_CMPISTRM) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrm, CMPISTRM, \
+ IS_CMPISTRM)
+
+result_t test_mm_cmpistrm(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPISTRM_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#undef IS_CMPISTRM
+
+#define TEST_MM_CMPISTRO_UBYTE_DATA_LEN 4
+static test_mm_cmpistri_ubyte_data_t
+ test_mm_cmpistro_ubyte_data[TEST_MM_CMPISTRO_UBYTE_DATA_LEN] = {
+ {{3, 4, 5, 0}, {5, 5, 5, 4, 3, 0}, IMM_UBYTE_ANY_LEAST, 1},
+ {{23, 127, 88, 3, 45, 6, 7, 2, 0},
+ {32, 127, 87, 2, 44, 32, 1, 2, 0},
+ IMM_UBYTE_EACH_MOST_NEGATIVE,
+ 1},
+ {{3, 4, 55, 56, 0},
+ {2, 3, 4, 5, 43, 54, 55, 56, 0},
+ IMM_UBYTE_RANGES_LEAST,
+ 0},
+ {{55, 66, 77, 11, 23, 0},
+ {55, 55, 66, 77, 11, 23, 55, 66, 77, 11, 23, 33, 123, 18, 0},
+ IMM_UBYTE_ORDERED_LEAST,
+ 0},
+};
+
+#define TEST_MM_CMPISTRO_SBYTE_DATA_LEN 4
+static test_mm_cmpistri_sbyte_data_t
+ test_mm_cmpistro_sbyte_data[TEST_MM_CMPISTRO_SBYTE_DATA_LEN] = {
+ {{33, -33, 23, -32, -1, -1, 23, 46, 78, 34, 54, 100, 90, 91, 92, 101},
+ {32, 33, 23, -33, -2, -3, 23, 46, -78, 43, 56, 10, 9, 91, 90, 126},
+ IMM_SBYTE_EACH_LEAST,
+ 0},
+ {{-1, -2, -3, -4, -5, -6, -7, -8, 87, 86, 85, 84, 83, 82, 81, 80},
+ {87, 79, 0},
+ IMM_SBYTE_ANY_LEAST,
+ 1},
+ {{3, 4, 2, 0},
+ {3, 3, 4, 5, 6, 2, 0},
+ IMM_SBYTE_RANGES_MOST_NEGATIVE,
+ 0},
+ {{23, 66, 1, 13, 17, 1, 13, 17, 0},
+ {23, 66, 1, 13, 17, 1, 13, 17, 32, 23, 66, 1, 13, 17, 1, 13},
+ IMM_SBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRO_UWORD_DATA_LEN 4
+static test_mm_cmpistri_uword_data_t
+ test_mm_cmpistro_uword_data[TEST_MM_CMPISTRO_UWORD_DATA_LEN] = {
+ {{3333, 4444, 10000, 20000, 40000, 50000, 65535, 0},
+ {3332, 4443, 10000, 20001, 40000, 50000, 65534, 0},
+ IMM_UWORD_EACH_LEAST,
+ 0},
+ {{1, 2, 333, 4444, 55555, 7777, 23, 347},
+ {4444, 7777, 55555, 23, 347, 2, 1, 0},
+ IMM_UWORD_ANY_LEAST,
+ 1},
+ {{356, 380, 320, 456, 0},
+ {455, 379, 333, 319, 300, 299, 0},
+ IMM_UWORD_RANGES_LEAST,
+ 1},
+ {{3, 1001, 235, 0},
+ {3, 1001, 235, 0, 3, 1001, 235, 0},
+ IMM_UWORD_ORDERED_MOST_MASKED_NEGATIVE,
+ 0},
+};
+
+#define TEST_MM_CMPISTRO_SWORD_DATA_LEN 4
+static test_mm_cmpistri_sword_data_t
+ test_mm_cmpistro_sword_data[TEST_MM_CMPISTRO_SWORD_DATA_LEN] = {
+ {{-10, -5, -100, -90, 45, 56, 1000, 1009},
+ {54, -1, -5, -6, 1001, 10001, 1009, 1009},
+ IMM_SWORD_RANGES_LEAST,
+ 1},
+ {{456, -32768, 32767, 13, 0},
+ {455, -32768, 32767, 31, 0},
+ IMM_SWORD_EACH_LEAST,
+ 0},
+ {{23, 46, -44, 32000, 0},
+ {23, 66, -44, 678, 32000, 0},
+ IMM_SWORD_ANY_MOST_MASKED_NEGATIVE,
+ 0},
+ {{-7900, -101, -34, 666, 345, 0},
+ {-7900, -101, -34, 666, 345, -7900, -191, -34},
+ IMM_SWORD_ORDERED_LEAST,
+ 1},
+};
+
+#define MM_CMPISTRO_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(UBYTE_EACH_MOST_NEGATIVE, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRO_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(SBYTE_RANGES_MOST_NEGATIVE, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRO_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(UWORD_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__)
+
+#define MM_CMPISTRO_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+ _(SWORD_ANY_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPISTRO_TEST_CASES \
+ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistro, CMPISTRO, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistro, CMPISTRO, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistro, CMPISTRO, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistro, CMPISTRO, \
+ IS_CMPISTRI)
+
+result_t test_mm_cmpistro(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPISTRO_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPISTRS_UBYTE_DATA_LEN 4
+static test_mm_cmpistri_ubyte_data_t
+ test_mm_cmpistrs_ubyte_data[TEST_MM_CMPISTRS_UBYTE_DATA_LEN] = {
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {1, 2, 3, 4, 5, 0},
+ IMM_UBYTE_ANY_LEAST,
+ 0},
+ {{127, 126, 125, 124, 0},
+ {127, 1, 34, 43, 54, 0},
+ IMM_UBYTE_EACH_LEAST,
+ 1},
+ {{127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+ 127, 127},
+ {56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 0},
+ IMM_UBYTE_RANGES_LEAST,
+ 0},
+ {{33, 44, 55, 78, 99, 100, 101, 102, 0},
+ {0},
+ IMM_UBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRS_SBYTE_DATA_LEN 4
+static test_mm_cmpistri_sbyte_data_t
+ test_mm_cmpistrs_sbyte_data[TEST_MM_CMPISTRS_SBYTE_DATA_LEN] = {
+ {{100, 99, 98, 97, -67, -4, -5, -6, -7, -1, -2, -3, -128, -128, -128,
+ -128},
+ {0},
+ IMM_SBYTE_EACH_LEAST,
+ 0},
+ {{-128, -128, -128, -128, 127, 127, 127, 127, -128, -128, -128, -128,
+ 127, 127, 127, 127},
+ {-1, -2, -11, -98, -12, 0},
+ IMM_SBYTE_ANY_LEAST,
+ 0},
+ {{0, 1, 2, 3, 4, 5, -6, -7},
+ {0, 1, 2, 3, 4, 5, 6, 7},
+ IMM_SBYTE_RANGES_LEAST,
+ 1},
+ {{0, 1, 0, -1, 0, -2, 0, 0, -3, 4, 0, 0, 5, 6, 7, 8},
+ {0},
+ IMM_SBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRS_UWORD_DATA_LEN 4
+static test_mm_cmpistri_uword_data_t
+ test_mm_cmpistrs_uword_data[TEST_MM_CMPISTRS_UWORD_DATA_LEN] = {
+ {{0, 1, 2, 3, 65535, 0, 0, 0},
+ {9, 8, 7, 6, 5, 4, 3, 2},
+ IMM_UWORD_EACH_LEAST,
+ 1},
+ {{4, 567, 65535, 32, 34, 0}, {0}, IMM_UWORD_ANY_LEAST, 1},
+ {{65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535},
+ {1, 2, 3, 4, 900, 7890, 6767, 0},
+ IMM_UWORD_RANGES_LEAST,
+ 0},
+ {{1, 2, 3, 4, 5, 6, 7, 8}, {1, 2, 3, 4, 0}, IMM_UWORD_ORDERED_LEAST, 0},
+};
+
+#define TEST_MM_CMPISTRS_SWORD_DATA_LEN 4
+static test_mm_cmpistri_sword_data_t
+ test_mm_cmpistrs_sword_data[TEST_MM_CMPISTRS_SWORD_DATA_LEN] = {
+ {{-32768, -32768, -32768, -32768, -32768, -32768, -32768, -3276},
+ {34, 45, 6, 7, 9, 8, 7, 6},
+ IMM_SWORD_RANGES_LEAST,
+ 0},
+ {{1000, 2000, 4000, 8000, 16000, 32000, 32767, 0},
+ {3, 4, 56, 23, 0},
+ IMM_SWORD_EACH_LEAST,
+ 1},
+ {{0, 1, 3, 4, -32768, 9, 0, 1},
+ {56, 47, 43, 999, 1111, 0},
+ IMM_SWORD_ANY_LEAST,
+ 1},
+ {{1111, 1212, 831, 2345, 32767, 32767, -32768, 32767},
+ {0},
+ IMM_SWORD_ORDERED_LEAST,
+ 0},
+};
+
+#define MM_CMPISTRS_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRS_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRS_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRS_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPISTRS_TEST_CASES \
+ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrs, CMPISTRS, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrs, CMPISTRS, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrs, CMPISTRS, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrs, CMPISTRS, \
+ IS_CMPISTRI)
+
+result_t test_mm_cmpistrs(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPISTRS_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+#define TEST_MM_CMPISTRZ_UBYTE_DATA_LEN 4
+static test_mm_cmpistri_ubyte_data_t
+ test_mm_cmpistrz_ubyte_data[TEST_MM_CMPISTRZ_UBYTE_DATA_LEN] = {
+ {{0},
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255},
+ IMM_UBYTE_ANY_LEAST,
+ 0},
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ {1, 1, 1, 1, 2, 2, 2, 2, 4, 5, 6, 7, 89, 89, 89, 89},
+ IMM_UBYTE_EACH_LEAST,
+ 0},
+ {{1, 2, 3, 4, 0}, {}, IMM_UBYTE_RANGES_LEAST, 1},
+ {{127, 126, 125, 124, 124, 0},
+ {100, 101, 123, 100, 111, 122, 0},
+ IMM_UBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRZ_SBYTE_DATA_LEN 4
+static test_mm_cmpistri_sbyte_data_t
+ test_mm_cmpistrz_sbyte_data[TEST_MM_CMPISTRZ_SBYTE_DATA_LEN] = {
+ {{127, 126, 99, -100, 0},
+ {-128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+ -128, -128, -128, -128, -128},
+ IMM_SBYTE_EACH_LEAST,
+ 0},
+ {{120, 66, 54, 0}, {3, 4, 5, -99, -6, 0}, IMM_SBYTE_ANY_LEAST, 1},
+ {{0},
+ {127, 127, 127, 127, 126, 126, 126, 126, -127, -127, -127, -127, -1,
+ -1, -1, -1},
+ IMM_SBYTE_RANGES_LEAST,
+ 0},
+ {{12, 3, 4, 5, 6, 7, 8, 0},
+ {-1, -2, -3, -4, -6, 75, 0},
+ IMM_SBYTE_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRZ_UWORD_DATA_LEN 4
+static test_mm_cmpistri_uword_data_t
+ test_mm_cmpistrz_uword_data[TEST_MM_CMPISTRZ_UWORD_DATA_LEN] = {
+ {{10000, 20000, 50000, 40000, 0},
+ {65535, 65533, 60000, 60000, 50000, 123, 1, 2},
+ IMM_UWORD_EACH_LEAST,
+ 0},
+ {{0},
+ {65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535},
+ IMM_UWORD_ANY_LEAST,
+ 0},
+ {{3, 333, 3333, 33333, 0}, {0}, IMM_UWORD_RANGES_LEAST, 1},
+ {{123, 456, 7, 890, 0},
+ {123, 456, 7, 900, 0},
+ IMM_UWORD_ORDERED_LEAST,
+ 1},
+};
+
+#define TEST_MM_CMPISTRZ_SWORD_DATA_LEN 4
+static test_mm_cmpistri_sword_data_t
+ test_mm_cmpistrz_sword_data[TEST_MM_CMPISTRZ_SWORD_DATA_LEN] = {
+ {{2, 22, 222, 2222, 22222, -2222, -222, -22},
+ {-32768, 32767, -32767, 32766, -32766, 32765, -32768, 32767},
+ IMM_SWORD_RANGES_LEAST,
+ 0},
+ {{345, 10000, -10000, -30000, 0},
+ {1, 2, 3, 4, 5, 6, 7, 0},
+ IMM_SWORD_EACH_LEAST,
+ 1},
+ {{}, {0}, IMM_SWORD_ANY_LEAST, 1},
+ {{1, 2, -789, -1, -90, 0},
+ {1, 10, 100, 1000, 10000, -10000, -1000, 1000},
+ IMM_SWORD_ORDERED_LEAST,
+ 0},
+};
+
+#define MM_CMPISTRZ_UBYTE_TEST_CASES(_, ...) \
+ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRZ_SBYTE_TEST_CASES(_, ...) \
+ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRZ_UWORD_TEST_CASES(_, ...) \
+ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define MM_CMPISTRZ_SWORD_TEST_CASES(_, ...) \
+ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+
+#define GENERATE_MM_CMPISTRZ_TEST_CASES \
+ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrz, CMPISTRZ, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrz, CMPISTRZ, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrz, CMPISTRZ, \
+ IS_CMPISTRI) \
+ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrz, CMPISTRZ, \
+ IS_CMPISTRI)
+
+result_t test_mm_cmpistrz(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ GENERATE_MM_CMPISTRZ_TEST_CASES
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_crc32_u16(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint32_t crc = *(const uint32_t *) impl.mTestIntPointer1;
+ uint16_t v = iter;
+ uint32_t result = _mm_crc32_u16(crc, v);
+ ASSERT_RETURN(result == canonical_crc32_u16(crc, v));
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_crc32_u32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint32_t crc = *(const uint32_t *) impl.mTestIntPointer1;
+ uint32_t v = *(const uint32_t *) impl.mTestIntPointer2;
+ uint32_t result = _mm_crc32_u32(crc, v);
+ ASSERT_RETURN(result == canonical_crc32_u32(crc, v));
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_crc32_u64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint64_t crc = *(const uint64_t *) impl.mTestIntPointer1;
+ uint64_t v = *(const uint64_t *) impl.mTestIntPointer2;
+ uint64_t result = _mm_crc32_u64(crc, v);
+ ASSERT_RETURN(result == canonical_crc32_u64(crc, v));
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_crc32_u8(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint32_t crc = *(const uint32_t *) impl.mTestIntPointer1;
+ uint8_t v = iter;
+ uint32_t result = _mm_crc32_u8(crc, v);
+ ASSERT_RETURN(result == canonical_crc32_u8(crc, v));
+ return TEST_SUCCESS;
+}
+
+/* AES */
+result_t test_mm_aesenc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *a = (int32_t *) impl.mTestIntPointer1;
+ const int32_t *b = (int32_t *) impl.mTestIntPointer2;
+ __m128i data = _mm_loadu_si128((const __m128i *) a);
+ __m128i rk = _mm_loadu_si128((const __m128i *) b);
+
+ __m128i resultReference = aesenc_128_reference(data, rk);
+ __m128i resultIntrinsic = _mm_aesenc_si128(data, rk);
+
+ return validate128(resultReference, resultIntrinsic);
+}
+
+result_t test_mm_aesdec_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *a = (int32_t *) impl.mTestIntPointer1;
+ const int32_t *b = (int32_t *) impl.mTestIntPointer2;
+ __m128i data = _mm_loadu_si128((const __m128i *) a);
+ __m128i rk = _mm_loadu_si128((const __m128i *) b);
+
+ __m128i resultReference = aesdec_128_reference(data, rk);
+ __m128i resultIntrinsic = _mm_aesdec_si128(data, rk);
+
+ return validate128(resultReference, resultIntrinsic);
+}
+
+result_t test_mm_aesenclast_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const int32_t *a = (const int32_t *) impl.mTestIntPointer1;
+ const int32_t *b = (const int32_t *) impl.mTestIntPointer2;
+ __m128i data = _mm_loadu_si128((const __m128i *) a);
+ __m128i rk = _mm_loadu_si128((const __m128i *) b);
+
+ __m128i resultReference = aesenclast_128_reference(data, rk);
+ __m128i resultIntrinsic = _mm_aesenclast_si128(data, rk);
+
+ return validate128(resultReference, resultIntrinsic);
+}
+
+result_t test_mm_aesdeclast_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *a = (uint8_t *) impl.mTestIntPointer1;
+ const uint8_t *rk = (uint8_t *) impl.mTestIntPointer2;
+ __m128i _a = _mm_loadu_si128((const __m128i *) a);
+ __m128i _rk = _mm_loadu_si128((const __m128i *) rk);
+ uint8_t c[16] = {};
+
+ uint8_t v[4][4];
+ for (int i = 0; i < 16; ++i) {
+ v[((i / 4) + (i % 4)) % 4][i % 4] = crypto_aes_rsbox[a[i]];
+ }
+ for (int i = 0; i < 16; ++i) {
+ c[i] = v[i / 4][i % 4] ^ rk[i];
+ }
+
+ __m128i result_reference = _mm_loadu_si128((const __m128i *) c);
+ __m128i result_intrinsic = _mm_aesdeclast_si128(_a, _rk);
+
+ return validate128(result_reference, result_intrinsic);
+}
+
+result_t test_mm_aesimc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint8_t *a = (uint8_t *) impl.mTestIntPointer1;
+ __m128i _a = _mm_loadu_si128((const __m128i *) a);
+
+ uint8_t e, f, g, h, v[4][4];
+ for (int i = 0; i < 16; ++i) {
+ ((uint8_t *) v)[i] = a[i];
+ }
+ for (int i = 0; i < 4; ++i) {
+ e = v[i][0];
+ f = v[i][1];
+ g = v[i][2];
+ h = v[i][3];
+
+ v[i][0] = MULTIPLY(e, 0x0e) ^ MULTIPLY(f, 0x0b) ^ MULTIPLY(g, 0x0d) ^
+ MULTIPLY(h, 0x09);
+ v[i][1] = MULTIPLY(e, 0x09) ^ MULTIPLY(f, 0x0e) ^ MULTIPLY(g, 0x0b) ^
+ MULTIPLY(h, 0x0d);
+ v[i][2] = MULTIPLY(e, 0x0d) ^ MULTIPLY(f, 0x09) ^ MULTIPLY(g, 0x0e) ^
+ MULTIPLY(h, 0x0b);
+ v[i][3] = MULTIPLY(e, 0x0b) ^ MULTIPLY(f, 0x0d) ^ MULTIPLY(g, 0x09) ^
+ MULTIPLY(h, 0x0e);
+ }
+
+ __m128i result_reference = _mm_loadu_si128((const __m128i *) v);
+ __m128i result_intrinsic = _mm_aesimc_si128(_a);
+
+ return validate128(result_reference, result_intrinsic);
+}
+
+static inline uint32_t sub_word(uint32_t in)
+{
+ return (crypto_aes_sbox[(in >> 24) & 0xff] << 24) |
+ (crypto_aes_sbox[(in >> 16) & 0xff] << 16) |
+ (crypto_aes_sbox[(in >> 8) & 0xff] << 8) |
+ (crypto_aes_sbox[in & 0xff]);
+}
+
+// FIXME: improve the test case for AES-256 key expansion.
+// Reference:
+// https://github.com/randombit/botan/blob/master/src/lib/block/aes/aes_ni/aes_ni.cpp
+result_t test_mm_aeskeygenassist_si128(const SSE2NEONTestImpl &impl,
+ uint32_t iter)
+{
+ const uint32_t *a = (uint32_t *) impl.mTestIntPointer1;
+ __m128i data = load_m128i(a);
+ uint32_t sub_x1 = sub_word(a[1]);
+ uint32_t sub_x3 = sub_word(a[3]);
+ __m128i result_reference;
+ __m128i result_intrinsic;
+#define TEST_IMPL(IDX) \
+ uint32_t res##IDX[4] = { \
+ sub_x1, \
+ rotr(sub_x1, 8) ^ IDX, \
+ sub_x3, \
+ rotr(sub_x3, 8) ^ IDX, \
+ }; \
+ result_reference = load_m128i(res##IDX); \
+ result_intrinsic = _mm_aeskeygenassist_si128(data, IDX); \
+ CHECK_RESULT(validate128(result_reference, result_intrinsic));
+
+ IMM_256_ITER
+#undef TEST_IMPL
+ return TEST_SUCCESS;
+}
+
+/* Others */
+result_t test_mm_clmulepi64_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint64_t *_a = (const uint64_t *) impl.mTestIntPointer1;
+ const uint64_t *_b = (const uint64_t *) impl.mTestIntPointer2;
+ __m128i a = load_m128i(_a);
+ __m128i b = load_m128i(_b);
+ auto result = clmul_64(_a[0], _b[0]);
+ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x00), result.first,
+ result.second))
+ return TEST_FAIL;
+ result = clmul_64(_a[1], _b[0]);
+ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x01), result.first,
+ result.second))
+ return TEST_FAIL;
+ result = clmul_64(_a[0], _b[1]);
+ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x10), result.first,
+ result.second))
+ return TEST_FAIL;
+ result = clmul_64(_a[1], _b[1]);
+ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x11), result.first,
+ result.second))
+ return TEST_FAIL;
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_get_denormals_zero_mode(const SSE2NEONTestImpl &impl,
+ uint32_t iter)
+{
+ int res_denormals_zero_on, res_denormals_zero_off;
+
+ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
+ res_denormals_zero_on =
+ _MM_GET_DENORMALS_ZERO_MODE() == _MM_DENORMALS_ZERO_ON;
+
+ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_OFF);
+ res_denormals_zero_off =
+ _MM_GET_DENORMALS_ZERO_MODE() == _MM_DENORMALS_ZERO_OFF;
+
+ return (res_denormals_zero_on && res_denormals_zero_off) ? TEST_SUCCESS
+ : TEST_FAIL;
+}
+
+static int popcnt_reference(uint64_t a)
+{
+ int count = 0;
+ while (a != 0) {
+ count += a & 1;
+ a >>= 1;
+ }
+ return count;
+}
+
+result_t test_mm_popcnt_u32(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint64_t *a = (const uint64_t *) impl.mTestIntPointer1;
+ ASSERT_RETURN(popcnt_reference((uint32_t) a[0]) ==
+ _mm_popcnt_u32((unsigned int) a[0]));
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_popcnt_u64(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ const uint64_t *a = (const uint64_t *) impl.mTestIntPointer1;
+ ASSERT_RETURN(popcnt_reference(a[0]) == _mm_popcnt_u64(a[0]));
+ return TEST_SUCCESS;
+}
+
+result_t test_mm_set_denormals_zero_mode(const SSE2NEONTestImpl &impl,
+ uint32_t iter)
+{
+ result_t res_set_denormals_zero_on, res_set_denormals_zero_off;
+ float factor = 2;
+ float denormal = FLT_MIN / factor;
+ float denormals[4] = {denormal, denormal, denormal, denormal};
+ float factors[4] = {factor, factor, factor, factor};
+ __m128 ret;
+
+ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
+ ret = _mm_mul_ps(load_m128(denormals), load_m128(factors));
+ res_set_denormals_zero_on = validateFloat(ret, 0, 0, 0, 0);
+
+ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_OFF);
+ ret = _mm_mul_ps(load_m128(denormals), load_m128(factors));
+#if defined(__arm__)
+ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
+ // regardless of the value of the FZ bit.
+ res_set_denormals_zero_off = validateFloat(ret, 0, 0, 0, 0);
+#else
+ res_set_denormals_zero_off =
+ validateFloat(ret, FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
+#endif
+
+ if (res_set_denormals_zero_on == TEST_FAIL ||
+ res_set_denormals_zero_off == TEST_FAIL)
+ return TEST_FAIL;
+ return TEST_SUCCESS;
+}
+
+result_t test_rdtsc(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ uint64_t start = _rdtsc();
+ for (int i = 0; i < 100000; i++) {
+#if defined(_MSC_VER)
+ _ReadWriteBarrier();
+#else
+ __asm__ __volatile__("" ::: "memory");
+#endif
+ }
+ uint64_t end = _rdtsc();
+ return end > start ? TEST_SUCCESS : TEST_FAIL;
+}
+
+SSE2NEONTestImpl::SSE2NEONTestImpl(void)
+{
+ mTestFloatPointer1 = (float *) platformAlignedAlloc(sizeof(__m128));
+ mTestFloatPointer2 = (float *) platformAlignedAlloc(sizeof(__m128));
+ mTestIntPointer1 = (int32_t *) platformAlignedAlloc(sizeof(__m128i));
+ mTestIntPointer2 = (int32_t *) platformAlignedAlloc(sizeof(__m128i));
+ SSE2NEON_INIT_RNG(123456);
+ for (uint32_t i = 0; i < MAX_TEST_VALUE; i++) {
+ mTestFloats[i] = ranf(-100000, 100000);
+ mTestInts[i] = (int32_t) ranf(-100000, 100000);
+ }
+}
+
+// Dummy function to match the case label in runSingleTest.
+result_t test_last(const SSE2NEONTestImpl &impl, uint32_t iter)
+{
+ return TEST_SUCCESS;
+}
+
+result_t SSE2NEONTestImpl::loadTestFloatPointers(uint32_t i)
+{
+ result_t ret =
+ do_mm_store_ps(mTestFloatPointer1, mTestFloats[i], mTestFloats[i + 1],
+ mTestFloats[i + 2], mTestFloats[i + 3]);
+ if (ret == TEST_SUCCESS) {
+ ret = do_mm_store_ps(mTestFloatPointer2, mTestFloats[i + 4],
+ mTestFloats[i + 5], mTestFloats[i + 6],
+ mTestFloats[i + 7]);
+ }
+ return ret;
+}
+
+result_t SSE2NEONTestImpl::loadTestIntPointers(uint32_t i)
+{
+ result_t ret =
+ do_mm_store_ps(mTestIntPointer1, mTestInts[i], mTestInts[i + 1],
+ mTestInts[i + 2], mTestInts[i + 3]);
+ if (ret == TEST_SUCCESS) {
+ ret =
+ do_mm_store_ps(mTestIntPointer2, mTestInts[i + 4], mTestInts[i + 5],
+ mTestInts[i + 6], mTestInts[i + 7]);
+ }
+
+ return ret;
+}
+
+result_t SSE2NEONTestImpl::runSingleTest(InstructionTest test, uint32_t i)
+{
+ result_t ret = TEST_SUCCESS;
+
+ switch (test) {
+#define _(x) \
+ case it_##x: \
+ ret = test_##x(*this, i); \
+ break;
+ INTRIN_LIST
+#undef _
+ }
+
+ return ret;
+}
+
+SSE2NEONTest *SSE2NEONTest::create(void)
+{
+ SSE2NEONTestImpl *st = new SSE2NEONTestImpl;
+ return static_cast<SSE2NEONTest *>(st);
+}
+
+} // namespace SSE2NEON