--- /dev/null
--- /dev/null
--- /dev/null
+++volk (3.1.1-4) unstable; urgency=medium
+++
+++ * fix clean rule to keep sse2neon.h in place
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 30 Jan 2024 19:00:06 -0500
+++
+++volk (3.1.1-3) unstable; urgency=medium
+++
+++ * Just patch in sse2neon.h where it should be.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 30 Jan 2024 13:36:17 -0500
+++
+++volk (3.1.1-2) unstable; urgency=medium
+++
+++ * arm64 build uses sse2neon.h
+++ update debian/watch and debian/rules to provide it
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 30 Jan 2024 13:36:08 -0500
+++
+++volk (3.1.1-1) unstable; urgency=medium
+++
+++ * New upstream release
+++ This is a maintenance release to fix subtle bugs in many areas and to
+++ improve our tests where possible. All in all, our CI is more stable now
+++ and catches more errors.
+++ CI fixes
+++ - Allow for rounding error in float-to-int conversions
+++ - Allow for rounding error in `volk_32fc_s32f_magnitude_16i`
+++ - Allow for rounding error in float-to-int interleave
+++ - Add missing `volk_16_byteswap_u_orc` to puppet
+++ - Fix 64-bit integer testing
+++ - Build and test neonv7 protokernels on armv7
+++
+++ kernels
+++ - Remove broken sse32 kernels
+++ - Fix flaky `fm_detect` test
+++ - Fix flaky `mod_range` test
+++ - Remove unnecessary volatiles from `volk_32fc_s32f_magnitude_16i`
+++ - Remove SSE protokernels written in assembly
+++ - Remove inline assembler from `volk_32fc_convert_16ic_neon`
+++ - Use bit shifts in generic and `byte_shuffle` reverse
+++ - Remove disabled SSE4.1 dot product
+++ - Fix `conv_k7_r2` kernel and puppet
+++ - Remove unused argument from renormalize
+++ - Align types in ORC function signatures
+++ - Uncomment AVX2 implementation
+++ - Renormalize in every iteration on AVX2
+++ - Remove extraneous permutations
+++ - Compute the minimum over both register lanes
+++ - `volk_32fc_s32f_atan2_32f`: Add NaN tests for avx2 and avx2fma code
+++
+++ fixes
+++ - Express version information in decimal
+++ - Remove `__VOLK_VOLATILE`
+++ - Remove references to simdmath library
+++ - cmake: Switch to GNUInstallDirs
+++ - fprintf: Remove fprintf statements from `volk_malloc`
+++ - release: Prepare release with updated files
+++ - Get the sse2neon.h file to a git submodule to avoid random copies.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 30 Jan 2024 00:13:23 -0500
+++
+++volk (3.1.0-3) unstable; urgency=medium
+++
+++ * add powerpc to cpu-features dependency
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 16 Dec 2023 09:31:28 -0500
+++
+++volk (3.1.0-2) unstable; urgency=medium
+++
+++ * Avoid ORC on x32, led to test failures
+++ * include argilo-volk/all-i386-patches
+++ * update available cpu-features architecture list
+++ * Express version information in decimal
+++ * add debian/upstream/metadata
+++ * clean __pycahe__ (Closes: #1048046)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Fri, 15 Dec 2023 17:57:14 -0500
+++
+++volk (3.1.0-1) experimental; urgency=medium
+++
+++ * New upstream release
+++ Improved CMake build
+++ Better tests
+++ New kernel API (old API still available)
+++ New and Updated kernels
+++ * Upload to experimental for soversion bump
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 05 Dec 2023 18:12:20 -0500
+++
+++volk (3.0.0-2) unstable; urgency=medium
+++
+++ * upload to unstable
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 15 Jul 2023 21:58:53 -0400
+++
+++volk (3.0.0-1) experimental; urgency=medium
+++
+++ * New upstream release
+++ - License switch to LGPLv3+
+++ - Fix build for 32 bit arm with neon
+++ - Add experimental support for MIPS and RISC-V
+++ * Upload to experimental for package renames and soversion bump
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 14 Jan 2023 14:01:06 -0500
+++
+++volk (2.5.2-3) unstable; urgency=medium
+++
+++ * orc 1:0.4.33-1 dropped building static library,
+++ so now volk will drop building its static library too. (Closes: #1026593)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 20 Dec 2022 20:03:23 -0500
+++
+++volk (2.5.2-2) unstable; urgency=medium
+++
+++ * revert changes to kernels/volk/volk_8u_x2_encodeframepolar_8u.h
+++ made by make-acc-happy patch since version 1.3-1 (Closes: #1021856)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 15 Oct 2022 23:41:48 -0400
+++
+++volk (2.5.2-1) unstable; urgency=medium
+++
+++ * New upstream release.
+++ * volk_8u_x4_conv_k7_r2_8u
+++ - Add NEON implementation `neonspiral` via `sse2neon.h`
+++ * Fixes
+++ - Fix out-of-bounds reads
+++ - Fix broken neon kernels
+++ - Fix float to int conversion
+++ * CMake
+++ - Suppress superfluous warning
+++ - Fix Python install path calculation and documentation
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 04 Sep 2022 12:00:56 -0400
+++
+++volk (2.5.1-2) unstable; urgency=medium
+++
+++ * VolkPython use posix prefix scheme (Closes: #1009394)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 12 Apr 2022 18:39:33 -0400
+++
+++volk (2.5.1-1) unstable; urgency=medium
+++
+++ * New upstream release.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 13 Feb 2022 00:18:58 -0500
+++
+++volk (2.5.0-2) unstable; urgency=medium
+++
+++ * upload to unstable
+++ * with some upstream bugfixes
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 21 Oct 2021 23:30:05 -0400
+++
+++volk (2.5.0-1) experimental; urgency=medium
+++
+++ * New upstream release
+++ * Use libcpu-features-dev on powerpc and x32 (Closes: #978602)
+++ * Mention volk-config-info and volk_modtool in description (Closes: #989263)
+++ * Upload to experimental for soversion bump
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 10 Jun 2021 18:29:47 -0400
+++
+++volk (2.4.1-2) unstable; urgency=medium
+++
+++ [ Shengjing Zhu ]
+++ * Use system cpu_features package
+++
+++ [ A. Maitland Bottoms ]
+++ * Adopt Use system cpu_features package patch (Closes: #978096)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 27 Dec 2020 15:16:07 -0500
+++
+++volk (2.4.1-1) unstable; urgency=medium
+++
+++ * New upstream release
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 17 Dec 2020 23:53:21 -0500
+++
+++volk (2.4.0-4) unstable; urgency=medium
+++
+++ * skip cpu_features on "Unsupported OS" kFreeBSD
+++ * bump Standards-Version - no other changes.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 15 Dec 2020 19:53:16 -0500
+++
+++volk (2.4.0-3) unstable; urgency=medium
+++
+++ * Fix binary-indep build (Closes: #976300)
+++ * Upload to unstable
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 03 Dec 2020 20:43:29 -0500
+++
+++volk (2.4.0-2) experimental; urgency=medium
+++
+++ * Make use of cpu_features a CMake option with sensible defaults per arch
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Mon, 30 Nov 2020 16:19:19 -0500
+++
+++volk (2.4.0-1) experimental; urgency=medium
+++
+++ * New upstream release
+++ * cpu_features git submodule packaged as cpu-features source component.
+++ * Upload to experimental for soversion bump
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 22 Nov 2020 12:35:43 -0500
+++
+++volk (2.3.0-3) unstable; urgency=medium
+++
+++ * update to v2.3.0-14-g91e5d07
+++ emit an emms instruction after using the mmx extension
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 30 Jun 2020 19:48:20 -0400
+++
+++volk (2.3.0-2) unstable; urgency=medium
+++
+++ * Upload to unstable
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Mon, 11 May 2020 07:26:03 -0400
+++
+++volk (2.3.0-1) experimental; urgency=medium
+++
+++ * New upstream release, to experimental for soversion bump
+++ * Kernels
+++ - volk: accurate exp kernel
+++ - exp: Rename SSE4.1 to SSE2 kernel
+++ - Add 32f_s32f_add_32f kernel
+++ - This kernel adds in vector + scalar functionality
+++ - Fix the broken index max kernels
+++ - Treat the mod_range puppet as such
+++ - Add puppet for power spectral density kernel
+++ - Updated log10 calcs to use faster log2 approach
+++ - fix: Use unaligned load
+++ - divide: Optimize complexmultiplyconjugate
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 09 May 2020 15:42:23 -0400
+++
+++volk (2.2.1-3) unstable; urgency=medium
+++
+++ * update to v2.2.1-34-gd4756c5
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 05 Apr 2020 10:37:46 -0400
+++
+++volk (2.2.1-2) unstable; urgency=medium
+++
+++ * update to v2.2.1-11-gfaf230e
+++ * cmake: Remove the ORC from the VOLK public link interface
+++ * Fix the broken index max kernels
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Fri, 27 Mar 2020 21:48:10 -0400
+++
+++volk (2.2.1-1) unstable; urgency=high
+++
+++ * New upstream bugfix release
+++ reason for high urgency:
+++ - Fix loop bound in AVX rotator (only one fixed in 2.2.0-3)
+++ - Fix out-of-bounds read in AVX2 square dist kernel
+++ - Fix length checks in AVX2 index max kernels
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Mon, 24 Feb 2020 18:08:05 -0500
+++
+++volk (2.2.0-3) unstable; urgency=high
+++
+++ * Update to v2.2.0-6-g5701f8f
+++ reason for high urgency:
+++ - Fix loop bound in AVX rotator
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 23 Feb 2020 23:49:18 -0500
+++
+++volk (2.2.0-2) unstable; urgency=medium
+++
+++ * Upload to unstable
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 18 Feb 2020 17:56:58 -0500
+++
+++volk (2.2.0-1) experimental; urgency=medium
+++
+++ * New upstream release
+++ - Remove build dependency on python six
+++ - Fixup VolkConfigVersion
+++ - add volk_version.h
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 16 Feb 2020 18:25:20 -0500
+++
+++volk (2.1.0-2) unstable; urgency=medium
+++
+++ * Upload to unstable
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 05 Jan 2020 23:17:57 -0500
+++
+++volk (2.1.0-1) experimental; urgency=medium
+++
+++ * New upstream release
+++ - The AVX FMA rotator bug is fixed
+++ - VOLK offers `volk::vector<>` for C++ to follow RAII
+++ - Use C++17 `std::filesystem`
+++ - This enables VOLK to be built without Boost if available!
+++ - lots of bugfixes
+++ - more optimized kernels, especially more NEON versions
+++ * Upload to experimental for new ABI library package libvolk2.1
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 22 Dec 2019 10:27:36 -0500
+++
+++volk (2.0.0-3) unstable; urgency=medium
+++
+++ * update to v2.0.0-4-gf04a46f
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 14 Nov 2019 22:47:23 -0500
+++
+++volk (2.0.0-2) unstable; urgency=medium
+++
+++ * Upload to unstable
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Mon, 12 Aug 2019 22:49:11 -0400
+++
+++volk (2.0.0-1) experimental; urgency=medium
+++
+++ * New upstream release
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Wed, 07 Aug 2019 23:31:20 -0400
+++
+++volk (1.4-4) unstable; urgency=medium
+++
+++ * working volk_modtool with Python 3
+++ * build and install libvolk.a
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Mon, 29 Oct 2018 01:32:05 -0400
+++
+++volk (1.4-3) unstable; urgency=medium
+++
+++ * update to v1.4-9-g297fefd
+++ Added an AVX protokernel for volk_32fc_x2_32f_square_dist_scalar_mult_32f
+++ fixed a buffer over-read and over-write in
+++ volk_32fc_x2_s32f_square_dist_scalar_mult_32f_a_avx
+++ Fix 32u_reverse_32u for ARM
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 12 May 2018 15:25:04 -0400
+++
+++volk (1.4-2) unstable; urgency=medium
+++
+++ * Upload to unstable, needed by gnuradio (>= 3.7.12.0)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 03 Apr 2018 01:03:19 -0400
+++
+++volk (1.4-1) experimental; urgency=medium
+++
+++ * New upstream release
+++ upstream changelog http://libvolk.org/release-v14.html
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 27 Mar 2018 22:57:42 -0400
+++
+++volk (1.3.1-1) unstable; urgency=medium
+++
+++ * New upstream bugfix release
+++ * Refresh all debian patches for use with git am
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 27 Mar 2018 21:54:29 -0400
+++
+++volk (1.3-3) unstable; urgency=medium
+++
+++ * update to v1.3-23-g0109b2e
+++ * update debian/libvolk1-dev.abi.tar.gz.amd64
+++ * Add breaks/replaces gnuradio (<=3.7.2.1) (LP: #1614235)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 04 Feb 2018 13:12:21 -0500
+++
+++volk (1.3-2) unstable; urgency=medium
+++
+++ * update to v1.3-16-g28b03a9
+++ apps: fix profile update reading end of lines
+++ qa: lower tolerance for 32fc_mag to fix issue #96
+++ * include upstream master patch to sort input files
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 27 Aug 2017 13:44:55 -0400
+++
+++volk (1.3-1) unstable; urgency=medium
+++
+++ * New upstream release
+++ * The index_max kernels were named with the wrong output datatype. To
+++ fix this there are new kernels that return a 32u (int32_t) and the
+++ existing kernels had their signatures changed to return 16u (int16_t).
+++ * The output to stdout and stderr has been shuffled around. There is no
+++ longer a message that prints what VOLK machine is being used and the
+++ warning messages go to stderr rather than stdout.
+++ * The 32fc_index_max kernels previously were only accurate to the SSE
+++ register width (4 points). This was a pretty serious and long-lived
+++ bug that's been fixed and the QA updated appropriately.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 02 Jul 2016 16:30:47 -0400
+++
+++volk (1.2.2-2) unstable; urgency=medium
+++
+++ * update to v1.2.2-11-g78c8bc4 (to follow gnuradio maint branch)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 19 Jun 2016 14:44:15 -0400
+++
+++volk (1.2.2-1) unstable; urgency=medium
+++
+++ * New upstream release
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Fri, 08 Apr 2016 00:12:10 -0400
+++
+++volk (1.2.1-2) unstable; urgency=medium
+++
+++ * Upstream patches:
+++ Fix some CMake complaints
+++ The fix for compilation with cmake 3.5
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Wed, 23 Mar 2016 17:47:54 -0400
+++
+++volk (1.2.1-1) unstable; urgency=medium
+++
+++ * New upstream release
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 07 Feb 2016 19:38:32 -0500
+++
+++volk (1.2-1) unstable; urgency=medium
+++
+++ * New upstream release
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 24 Dec 2015 20:28:13 -0500
+++
+++volk (1.1.1-5) experimental; urgency=medium
+++
+++ * update to v1.1.1-22-gef53547 to support gnuradio 3.7.9
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Fri, 11 Dec 2015 13:12:55 -0500
+++
+++volk (1.1.1-4) unstable; urgency=medium
+++
+++ * more lintian fixes
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Wed, 25 Nov 2015 21:49:58 -0500
+++
+++volk (1.1.1-3) unstable; urgency=medium
+++
+++ * Lintian fixes Pre-Depends
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 19 Nov 2015 21:24:27 -0500
+++
+++volk (1.1.1-2) unstable; urgency=medium
+++
+++ * Note that libvolk1-dev replaces files in gnuradio-dev versions <<3.7.8
+++ (Closes: #802646) again. Thanks Andreas Beckmann.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Fri, 13 Nov 2015 18:45:49 -0500
+++
+++volk (1.1.1-1) unstable; urgency=medium
+++
+++ * New upstream release
+++ * New architectures exist for the AVX2 and FMA ISAs.
+++ * The profiler now generates buffers that are vlen + a tiny amount and
+++ generates random data to fill buffers. This is intended to catch bugs
+++ in protokernels that write beyond num_points.
+++ * Note that libvolk1-dev replaces files in earlier gnuradio-dev versions
+++ (Closes: #802646)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 01 Nov 2015 18:45:43 -0500
+++
+++volk (1.1-4) unstable; urgency=medium
+++
+++ * update to v1.1-12-g264addc
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Tue, 29 Sep 2015 23:41:50 -0400
+++
+++volk (1.1-3) unstable; urgency=low
+++
+++ * drop dh_acc to get reproducible builds
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Fri, 11 Sep 2015 22:57:06 -0400
+++
+++volk (1.1-2) unstable; urgency=low
+++
+++ * use dh-acc
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Mon, 07 Sep 2015 15:45:20 -0400
+++
+++volk (1.1-1) unstable; urgency=medium
+++
+++ * re-organize package naming convention
+++ * New upstream release tag v1.1
+++ New architectures exist for the AVX2 and FMA ISAs. Along
+++ with the build-system support the following kernels have
+++ no proto-kernels taking advantage of these architectures:
+++
+++ * 32f_x2_dot_prod_32f
+++ * 32fc_x2_multiply_32fc
+++ * 64_byteswap
+++ * 32f_binary_slicer_8i
+++ * 16u_byteswap
+++ * 32u_byteswap
+++
+++ QA/profiler
+++ -----------
+++
+++ The profiler now generates buffers that are vlen + a tiny
+++ amount and generates random data to fill buffers. This is
+++ intended to catch bugs in protokernels that write beyond
+++ num_points.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Wed, 26 Aug 2015 09:22:48 -0400
+++
+++volk (1.0.2-2) unstable; urgency=low
+++
+++ * Use SOURCE_DATE_EPOCH from the environment, if defined,
+++ rather than current date and time to implement volk_build_date()
+++ (embedding build date in a library does not help reproducible builds)
+++ * add watch file
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 15 Aug 2015 17:43:15 -0400
+++
+++volk (1.0.2-1) unstable; urgency=medium
+++
+++ * Maintenance release 24 Jul 2015 by Nathan West
+++ * The major change is the CMake logic to add ASM protokernels. Rather
+++ than depending on CFLAGS and ASMFLAGS we use the results of VOLK's
+++ built in has_ARCH tests. All configurations should work the same as
+++ before, but manually specifying CFLAGS and ASMFLAGS on the cmake call
+++ for ARM native builds should no longer be necessary.
+++ * The 32fc_s32fc_x2_rotator_32fc generic protokernel now includes a
+++ previously implied header.
+++ * Finally, there is a fix to return the "best" protokernel to the
+++ dispatcher when no volk_config exists. Thanks to Alexandre Raymond for
+++ pointing this out.
+++ * with maint branch patch:
+++ kernels-add-missing-include-arm_neon.h
+++ * removed unused build-dependency on liboil0.3-dev (closes: #793626)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Wed, 05 Aug 2015 00:43:40 -0400
+++
+++volk (1.0.1-1) unstable; urgency=low
+++
+++ * Maintenance Release v1.0.1 08 Jul 2015 by Nathan West
+++ This is a maintenance release with bug fixes since the initial release of
+++ v1.0 in April.
+++
+++ * Contributors
+++
+++ The following authors have contributed code to this release:
+++
+++ Doug Geiger doug.geiger@bioradiation.net
+++ Elliot Briggs elliot.briggs@gmail.com
+++ Marcus Mueller marcus@hostalia.de
+++ Nathan West nathan.west@okstate.edu
+++ Tom Rondeau tom@trondeau.com
+++
+++ * Kernels
+++
+++ Several bug fixes in different kernels. The NEON implementations of the
+++ following kernels have been fixed:
+++
+++ 32f_x2_add_32f
+++ 32f_x2_dot_prod_32f
+++ 32fc_s32fc_multiply_32fc
+++ 32fc_x2_multiply_32fc
+++
+++ Additionally the NEON asm based 32f_x2_add_32f protokernels were not being
+++ used and are now included and available for use via the dispatcher.
+++
+++ The 32f_s32f_x2_fm_detect_32f kernel now has a puppet. This solves QA seg
+++ faults on 32-bit machines and provide a better test for this kernel.
+++
+++ The 32fc_s32fc_x2_rotator_32fc generic protokernel replaced cabsf with
+++ hypotf for better Android support.
+++
+++ * Building
+++
+++ Static builds now trigger the applications (volk_profile and
+++ volk-config-info) to be statically linked.
+++
+++ The file gcc_x86_cpuid.h has been removed since it was no longer being
+++ used. Previously it provided cpuid functionality for ancient compilers
+++ that we do not support.
+++
+++ All build types now use -Wall.
+++
+++ * QA and Testing
+++
+++ The documentation around the --update option to volk_profile now makes it
+++ clear that the option will only profile kernels without entries in
+++ volk_profile. The signature of run_volk_tests with expanded args changed
+++ signed types to unsigned types to reflect the actual input.
+++
+++ The remaining changes are all non-functional changes to address issues
+++ from Coverity.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Fri, 10 Jul 2015 17:57:42 -0400
+++
+++volk (1.0-5) unstable; urgency=medium
+++
+++ * native-armv7-build-support skips neon on Debian armel (Closes: #789972)
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sat, 04 Jul 2015 12:36:36 -0400
+++
+++volk (1.0-4) unstable; urgency=low
+++
+++ * update native-armv7-build-support patch from gnuradio volk package
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 25 Jun 2015 16:38:49 -0400
+++
+++volk (1.0-3) unstable; urgency=medium
+++
+++ * Add Breaks/Replaces (Closes: #789893, #789894)
+++ * Allow failing tests
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Thu, 25 Jun 2015 12:46:06 -0400
+++
+++volk (1.0-2) unstable; urgency=medium
+++
+++ * kernels-add-missing-math.h-include-to-rotator
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Wed, 24 Jun 2015 21:09:32 -0400
+++
+++volk (1.0-1) unstable; urgency=low
+++
+++ * Initial package (Closes: #782417)
+++ Initial Release 11 Apr 2015 by Nathan West
+++
+++ VOLK 1.0 is available. This is the first release of VOLK as an independently
+++ tracked sub-project of GNU Radio.
+++
+++ * Contributors
+++
+++ VOLK has been tracked separately from GNU Radio since 2014 Dec 23.
+++ Contributors between the split and the initial release are
+++
+++ Albert Holguin aholguin_77@yahoo.com
+++ Doug Geiger doug.geiger@bioradiation.net
+++ Elliot Briggs elliot.briggs@gmail.com
+++ Julien Olivain julien.olivain@lsv.ens-cachan.fr
+++ Michael Dickens michael.dickens@ettus.com
+++ Nathan West nathan.west@okstate.edu
+++ Tom Rondeau tom@trondeau.com
+++
+++ * QA
+++
+++ The test and profiler have significantly changed. The profiler supports
+++ run-time changes to vlen and iters to help kernel development and provide
+++ more flexibility on embedded systems. Additionally there is a new option
+++ to update an existing volk_profile results file with only new kernels which
+++ will save time when updating to newer versions of VOLK
+++
+++ The QA system creates a static list of kernels and test cases. The QA
+++ testing and profiler iterate over this static list rather than each source
+++ file keeping its own list. The QA also emits XML results to
+++ lib/.unittest/kernels.xml which is formatted similarly to JUnit results.
+++
+++ * Modtool
+++
+++ Modtool was updated to support the QA and profiler changes.
+++
+++ * Kernels
+++
+++ New proto-kernels:
+++
+++ 16ic_deinterleave_real_8i_neon
+++ 16ic_s32f_deinterleave_32f_neon
+++ fix preprocessor errors for some compilers on byteswap and popcount puppets
+++
+++ ORC was moved to the asm kernels directory.
+++ volk_malloc
+++
+++ The posix_memalign implementation of Volk_malloc now falls back to a standard
+++ malloc if alignment is 1.
+++
+++ * Miscellaneous
+++
+++ Several build system and cmake changes have made it possible to build VOLK
+++ both independently with proper soname versions and in-tree for projects
+++ such as GNU Radio.
+++
+++ The static builds take advantage of cmake object libraries to speed up builds.
+++
+++ Finally, there are a number of changes to satisfy compiler warnings and make
+++ QA work on multiple machines.
+++
+++ -- A. Maitland Bottoms <bottoms@debian.org> Sun, 12 Apr 2015 23:20:41 -0400
--- /dev/null
--- /dev/null
--- /dev/null
+++Source: volk
+++Section: libdevel
+++Priority: optional
+++Maintainer: A. Maitland Bottoms <bottoms@debian.org>
+++Build-Depends: cmake,
+++ debhelper-compat (= 13),
+++ dh-python,
+++ libcpu-features-dev [amd64 arm64 armel armhf i386 mips64el ppc64 ppc64el riscv64 s390x powerpc x32],
+++ liborc-0.4-dev [!x32],
+++ python3-dev,
+++ python3-mako
+++Build-Depends-Indep: doxygen, graphviz
+++Standards-Version: 4.6.2
+++Rules-Requires-Root: no
+++Homepage: https://libvolk.org
+++Vcs-Browser: https://salsa.debian.org/bottoms/pkg-volk
+++Vcs-Git: https://salsa.debian.org/bottoms/pkg-volk.git
+++
+++Package: libvolk3.1
+++Section: libs
+++Architecture: any
+++Pre-Depends: ${misc:Pre-Depends}
+++Depends: ${misc:Depends}, ${shlibs:Depends}
+++Multi-Arch: same
+++Recommends: libvolk-bin
+++Suggests: libvolk-dev
+++Description: vector optimized functions
+++ Vector-Optimized Library of Kernels is designed to help applications
+++ work with the processor's SIMD instruction sets. These are very
+++ powerful vector operations that can give signal processing a huge
+++ boost in performance.
+++
+++Package: libvolk-dev
+++Architecture: any
+++Pre-Depends: ${misc:Pre-Depends}
+++Depends: libvolk3.1 (=${binary:Version}), ${misc:Depends}
+++Breaks: libvolk1-dev, libvolk1.0-dev, libvolk2-dev
+++Replaces: libvolk1-dev, libvolk1.0-dev, libvolk2-dev
+++Suggests: libvolk-doc
+++Multi-Arch: same
+++Description: vector optimized function headers
+++ Vector-Optimized Library of Kernels is designed to help applications
+++ work with the processor's SIMD instruction sets. These are very
+++ powerful vector operations that can give signal processing a huge
+++ boost in performance.
+++ .
+++ This package contains the header files.
+++ For documentation, see libvolk-doc.
+++
+++Package: libvolk-bin
+++Section: libs
+++Architecture: any
+++Pre-Depends: ${misc:Pre-Depends}
+++Depends: libvolk3.1 (=${binary:Version}),
+++ ${misc:Depends},
+++ ${python3:Depends},
+++ ${shlibs:Depends}
+++Breaks: libvolk1-bin, libvolk1.0-bin, libvolk2-bin
+++Replaces: libvolk1-bin, libvolk1.0-bin, libvolk2-bin
+++Description: vector optimized runtime tools
+++ Vector-Optimized Library of Kernels is designed to help applications
+++ work with the processor's SIMD instruction sets. These are very
+++ powerful vector operations that can give signal processing a huge
+++ boost in performance.
+++ .
+++ This package includes: the volk_profile tool to customize settings for
+++ the system; volk_modtool to create new optimized modules; and
+++ volk-config-info to show settings.
+++
+++Package: libvolk-doc
+++Section: doc
+++Architecture: all
+++Multi-Arch: foreign
+++Depends: ${misc:Depends}
+++Recommends: www-browser
+++Description: vector optimized library documentation
+++ Vector-Optimized Library of Kernels is designed to help applications
+++ work with the processor's SIMD instruction sets. These are very
+++ powerful vector operations that can give signal processing a huge
+++ boost in performance.
+++ .
+++ This package includes the Doxygen generated documentation in
+++ /usr/share/doc/libvolk-dev/html/index.html
--- /dev/null
--- /dev/null
--- /dev/null
+++Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+++Upstream-Name: volk
+++Upstream-Contact: http://libvolk.org/
+++Source:
+++ https://github.com/gnuradio/volk
+++Comment:
+++ Debian packages by A. Maitland Bottoms <bottoms@debian.org>
+++ .
+++ Upstream Maintainers:
+++ Johannes Demel <demel@uni-bremen.de>
+++ Michael Dickens <michael.dickens@ettus.com>
+++Copyright: 2014-2023 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: *
+++Copyright: 2006, 2009-2023, Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: apps/volk_profile.h
+++Copyright: 2014-2020 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: appveyor.yml
+++Copyright: 2016 Paul Cercueil <paul.cercueil@analog.com>
+++License: LGPL-3+
+++
+++Files: cmake/*
+++Copyright: 2014-2020 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: cmake/Modules/*
+++Copyright: 2006, 2009-2020, Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: cmake/Modules/CMakeParseArgumentsCopy.cmake
+++Copyright: 2010 Alexander Neundorf <neundorf@kde.org>
+++License: Kitware-BSD
+++ All rights reserved.
+++ .
+++ Redistribution and use in source and binary forms, with or without
+++ modification, are permitted provided that the following conditions
+++ are met:
+++ .
+++ * Redistributions of source code must retain the above copyright
+++ notice, this list of conditions and the following disclaimer.
+++ .
+++ * Redistributions in binary form must reproduce the above copyright
+++ notice, this list of conditions and the following disclaimer in the
+++ documentation and/or other materials provided with the distribution.
+++ .
+++ * Neither the names of Kitware, Inc., the Insight Software Consortium,
+++ nor the names of their contributors may be used to endorse or promote
+++ products derived from this software without specific prior written
+++ permission.
+++ .
+++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+++ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+++
+++Files: cmake/Modules/FindORC.cmake
+++ cmake/Modules/VolkConfig.cmake.in
+++Copyright: 2014-2015 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: cmake/msvc/*
+++Copyright: 2006-2008, Alexander Chemeris
+++License: BSD-2-clause
+++ Redistribution and use in source and binary forms, with or without
+++ modification, are permitted provided that the following conditions are met:
+++ .
+++ 1. Redistributions of source code must retain the above copyright notice,
+++ this list of conditions and the following disclaimer.
+++ .
+++ 2. Redistributions in binary form must reproduce the above copyright
+++ notice, this list of conditions and the following disclaimer in the
+++ documentation and/or other materials provided with the distribution.
+++ .
+++ 3. The name of the author may be used to endorse or promote products
+++ derived from this software without specific prior written permission.
+++ .
+++ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+++ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+++ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+++ EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+++ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+++ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+++ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+++ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+++ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+++
+++Files: debian/*
+++Copyright: 2015-2020 Free Software Foundation, Inc
+++License: LGPL-3+
+++Comment: assigned by A. Maitland Bottoms <bottoms@debian.org>
+++
+++Files: docs/*
+++Copyright: 2014-2015 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: gen/archs.xml
+++ gen/machines.xml
+++Copyright: 2014-2015 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: include/volk/volk_common.h
+++ include/volk/volk_complex.h
+++ include/volk/volk_prefs.h
+++Copyright: 2014-2015 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: kernels/volk/asm/*
+++Copyright: 2014-2015 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: kernels/volk/volk_16u_byteswappuppet_16u.h
+++ kernels/volk/volk_32u_byteswappuppet_32u.h
+++ kernels/volk/volk_64u_byteswappuppet_64u.h
+++Copyright: 2014-2015 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: lib/kernel_tests.h
+++ lib/qa_utils.cc
+++ lib/qa_utils.h
+++ lib/volk_prefs.c
+++Copyright: 2014-2015 Free Software Foundation, Inc.
+++License: LGPL-3+
+++
+++Files: sse2neon/*
+++Copyright: 2015-2023 SSE2NEON Contributors
+++License: MIT
+++ Permission is hereby granted, free of charge, to any person obtaining a copy
+++ of this software and associated documentation files (the "Software"), to deal
+++ in the Software without restriction, including without limitation the rights
+++ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+++ copies of the Software, and to permit persons to whom the Software is
+++ furnished to do so, subject to the following conditions:
+++ .
+++ The above copyright notice and this permission notice shall be included in all
+++ copies or substantial portions of the Software.
+++ .
+++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+++ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+++ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+++ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+++ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+++ SOFTWARE.
+++
+++License: LGPL-3+
+++ This program is free software: you can redistribute it and/or modify
+++ it under the terms of the GNU Lesser General Public License as published by
+++ the Free Software Foundation; either version 3 of the License, or
+++ (at your option) any later version.
+++ .
+++ This program is distributed in the hope that it will be useful,
+++ but WITHOUT ANY WARRANTY; without even the implied warranty of
+++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+++ GNU General Public License for more details.
+++ .
+++ You should have received a copy of the GNU Lesser General Public License
+++ along with this program. If not, see <http://www.gnu.org/licenses/>.
+++ .
+++ On Debian systems, the complete text of the GNU Lesser General
+++ Public License version 3 can be found in "/usr/share/common-licenses/LGPL-3".
--- /dev/null
--- /dev/null
--- /dev/null
+++usr/bin/volk*
+++usr/lib/python3*/site-packages/* usr/lib/python3/dist-packages/
--- /dev/null
--- /dev/null
--- /dev/null
+++debian/volk-config-info.1
+++debian/volk_modtool.1
+++debian/volk_profile.1
--- /dev/null
--- /dev/null
--- /dev/null
+++usr/include/*
+++usr/lib/*/*volk*so
+++usr/lib/*/cmake/volk
+++usr/lib/*/pkgconfig/*volk*
--- /dev/null
--- /dev/null
--- /dev/null
+++Document: libvolk-doc
+++Title: Vector-Optimized Library of Kernels Reference Manual
+++Author: GNU Radio Developers
+++Abstract: VOLK is the Vector-Optimized Library of Kernels.
+++ It is a library that contains kernels of hand-written SIMD code for
+++ different mathematical operations. Since each SIMD architecture can
+++ be very different and no compiler has yet come along to handle
+++ vectorization properly or highly efficiently, VOLK approaches the
+++ problem differently. For each architecture or platform that a
+++ developer wishes to vectorize for, a new proto-kernel is added to
+++ VOLK. At runtime, VOLK will select the correct proto-kernel. In this
+++ way, the users of VOLK call a kernel for performing the operation
+++ that is platform/architecture agnostic. This allows us to write
+++ portable SIMD code.
+++Section: Programming/C++
+++
+++Format: HTML
+++Index: /usr/share/doc/libvolk-dev/html/index.html
+++Files: /usr/share/doc/libvolk-dev/html/*.html
--- /dev/null
--- /dev/null
--- /dev/null
+++obj-*/html
--- /dev/null
--- /dev/null
--- /dev/null
+++usr/lib/*/libvolk.so.*
--- /dev/null
--- /dev/null
--- /dev/null
+++usr/bin/list_cpu_features
+++usr/lib/*/cmake/CpuFeatures/CpuFeaturesConfig.cmake
+++usr/lib/*/cmake/CpuFeatures/CpuFeaturesConfigVersion.cmake
+++usr/lib/*/cmake/CpuFeatures/CpuFeaturesTargets-relwithdebinfo.cmake
+++usr/lib/*/cmake/CpuFeatures/CpuFeaturesTargets.cmake
+++usr/lib/*/libcpu_features.a
--- /dev/null
--- /dev/null
--- /dev/null
+++Author: A. Maitland Bottoms <bottoms@debian.org>
+++Forwarded: not-needed
+++Description: doxygen pdf
+++ Work-In-Progress Debian inclusion of PDF format documentation.
+++
+++--- a/docs/CMakeLists.txt
++++++ b/docs/CMakeLists.txt
+++@@ -5,10 +5,18 @@
+++ # SPDX-License-Identifier: LGPL-3.0-or-later
+++ #
+++
++++option(ENABLE_DOXYGEN_PDF "Build Doxygen PDF" OFF)
+++ find_package(Doxygen)
+++ if(DOXYGEN_FOUND)
+++-
+++ message(STATUS "Doxygen found. Building docs ...")
++++find_package(LATEX COMPONENTS PDFLATEX)
++++
++++if(ENABLE_DOXYGEN_PDF AND LATEX_FOUND)
++++ set(enable_pdf_docs YES)
++++ message(STATUS "latex found. Building PDF docs ...")
++++else()
++++ set(enable_pdf_docs NO)
++++endif()
+++
+++ configure_file(
+++ ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
+++@@ -21,4 +29,11 @@
+++ COMMENT "Generating documentation with Doxygen" VERBATIM
+++ )
+++
++++add_custom_target(volk_pdf_doc
++++ make
++++ DEPENDS volk_doc
++++ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/latex
++++ COMMENT "Generating PDF documentation with Doxygen" VERBATIM
++++)
++++
+++ endif(DOXYGEN_FOUND)
+++--- a/docs/Doxyfile.in
++++++ b/docs/Doxyfile.in
+++@@ -1894,7 +1894,7 @@
+++ # If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+++ # The default value is: YES.
+++
+++-GENERATE_LATEX = NO
++++GENERATE_LATEX = @enable_pdf_docs@
+++
+++ # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+++ # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+++@@ -1914,7 +1914,7 @@
+++ # the output language.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++-LATEX_CMD_NAME = latex
++++LATEX_CMD_NAME = xelatex
+++
+++ # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+++ # index for LaTeX.
+++@@ -1951,7 +1951,7 @@
+++ # The default value is: a4.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++-PAPER_TYPE = a4
++++PAPER_TYPE = letter
+++
+++ # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+++ # that should be included in the LaTeX output. The package can be specified just
+++@@ -1963,7 +1963,7 @@
+++ # If left blank no extra packages will be included.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++-EXTRA_PACKAGES =
++++EXTRA_PACKAGES = {amsmath} {pmboxdraw} {unicode-math}
+++
+++ # The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for
+++ # the generated LaTeX document. The header should contain everything until the
+++@@ -2429,7 +2429,7 @@
+++ # set to NO
+++ # The default value is: YES.
+++
+++-HAVE_DOT = NO
++++HAVE_DOT = YES
+++
+++ # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+++ # to run in parallel. When set to 0 doxygen will base this on the number of
--- /dev/null
--- /dev/null
--- /dev/null
+++Author: A. Maitland Bottoms <bottoms@debian.org>
+++Forwarded: not-needed
+++Description: doxygen without sse2neon
+++ Skip processing external sse2neon files.
+++ Improved privacy by not fetching sse2neon image URLs from github.
+++
+++--- a/docs/Doxyfile.in
++++++ b/docs/Doxyfile.in
+++@@ -995,6 +995,7 @@
+++
+++ EXCLUDE = @CMAKE_BINARY_DIR@ \
+++ @CMAKE_SOURCE_DIR@/cpu_features \
++++ @CMAKE_SOURCE_DIR@/sse2neon \
+++ @CMAKE_SOURCE_DIR@/README.md \
+++ @CMAKE_SOURCE_DIR@/cmake \
+++ @CMAKE_SOURCE_DIR@/docs/AUTHORS_RESUBMITTING_UNDER_LGPL_LICENSE.md \
--- /dev/null
--- /dev/null
--- /dev/null
+++From 58cc2b105211f0e5beab4dc228b478ebe105be06 Mon Sep 17 00:00:00 2001
+++From: "A. Maitland Bottoms" <bottoms@debian.org>
+++Date: Sun, 4 Sep 2022 21:37:45 -0400
+++Subject: [PATCH] omit doxygen build paths
+++
+++Use reproducible-builds friendly configuration settings.
+++
+++Signed-off-by: A. Maitland Bottoms <bottoms@debian.org>
+++---
+++ docs/Doxyfile.in | 14 +++++++-------
+++ 1 file changed, 7 insertions(+), 7 deletions(-)
+++
+++--- a/docs/Doxyfile.in
++++++ b/docs/Doxyfile.in
+++@@ -157,7 +157,7 @@
+++ # will be relative from the directory where doxygen is started.
+++ # This tag requires that the tag FULL_PATH_NAMES is set to YES.
+++
+++-STRIP_FROM_PATH =
++++STRIP_FROM_PATH = @CMAKE_BINARY_DIR@ @CMAKE_SOURCE_DIR@
+++
+++ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+++ # path mentioned in the documentation of a class, which tells the reader which
+++@@ -166,7 +166,7 @@
+++ # specify the list of include paths that are normally passed to the compiler
+++ # using the -I flag.
+++
+++-STRIP_FROM_INC_PATH =
++++STRIP_FROM_INC_PATH = @CMAKE_SOURCE_DIR@ @CMAKE_BINARY_DIR@
+++
+++ # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+++ # less readable) file names. This can be useful is your file systems doesn't
+++@@ -637,7 +637,7 @@
+++ # will mention the files that were used to generate the documentation.
+++ # The default value is: YES.
+++
+++-SHOW_USED_FILES = YES
++++SHOW_USED_FILES = NO
+++
+++ # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+++ # will remove the Files entry from the Quick Index and from the Folder Tree View
+++@@ -832,7 +832,7 @@
+++ # Note that relative paths are relative to the directory from which doxygen is
+++ # run.
+++
+++-EXCLUDE = @CMAKE_BINARY_DIR@ @CMAKE_SOURCE_DIR@/cpu_features @CMAKE_SOURCE_DIR@/README.md @CMAKE_SOURCE_DIR@/docs/AUTHORS_RESUBMITTING_UNDER_LGPL_LICENSE.md
++++EXCLUDE = @CMAKE_BINARY_DIR@ @CMAKE_SOURCE_DIR@/cpu_features @CMAKE_SOURCE_DIR@/README.md @CMAKE_SOURCE_DIR@/cmake @CMAKE_SOURCE_DIR@/docs/AUTHORS_RESUBMITTING_UNDER_LGPL_LICENSE.md @CMAKE_SOURCE_DIR@/apps @CMAKE_SOURCE_DIR@/lib/*qa* @CMAKE_SOURCE_DIR@/tmpl
+++
+++ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+++ # directories that are symbolic links (a Unix file system feature) are excluded
+++@@ -979,7 +979,7 @@
+++ # link to the documentation.
+++ # The default value is: YES.
+++
+++-REFERENCES_LINK_SOURCE = YES
++++REFERENCES_LINK_SOURCE = NO
+++
+++ # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+++ # source code will show a tooltip with additional information such as prototype,
+++@@ -989,7 +989,7 @@
+++ # The default value is: YES.
+++ # This tag requires that the tag SOURCE_BROWSER is set to YES.
+++
+++-SOURCE_TOOLTIPS = YES
++++SOURCE_TOOLTIPS = NO
+++
+++ # If the USE_HTAGS tag is set to YES then the references to source code will
+++ # point to the HTML generated by the htags(1) tool instead of doxygen built-in
+++@@ -1099,7 +1099,7 @@
+++ # that doxygen normally uses.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++
+++-HTML_FOOTER =
++++HTML_FOOTER = ""
+++
+++ # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+++ # sheet that is used by each HTML page. It can be used to fine-tune the look of
--- /dev/null
--- /dev/null
--- /dev/null
+++Author: A. Maitland Bottoms <bottoms@debian.org>
+++Forwarded: not-needed
+++Description: optional static apps
+++ For Debian, build apps with static libs if ENABLE_STATIC_APPS.
+++
+++--- a/apps/CMakeLists.txt
++++++ b/apps/CMakeLists.txt
+++@@ -44,7 +44,7 @@
+++ endif()
+++ target_link_libraries(volk_profile PRIVATE std::filesystem)
+++
+++-if(ENABLE_STATIC_LIBS)
++++if(ENABLE_STATIC_LIBS AND ENABLE_STATIC_APPS)
+++ target_link_libraries(volk_profile PRIVATE volk_static)
+++ set_target_properties(volk_profile PROPERTIES LINK_FLAGS "-static")
+++ else()
+++@@ -61,7 +61,7 @@
+++ add_executable(volk-config-info volk-config-info.cc ${CMAKE_CURRENT_SOURCE_DIR}/volk_option_helpers.cc
+++ )
+++
+++-if(ENABLE_STATIC_LIBS)
++++if(ENABLE_STATIC_LIBS AND ENABLE_STATIC_APPS)
+++ target_link_libraries(volk-config-info volk_static)
+++ set_target_properties(volk-config-info PROPERTIES LINK_FLAGS "-static")
+++ else()
--- /dev/null
--- /dev/null
--- /dev/null
+++Author: A. Maitland Bottoms <bottoms@debian.org>
+++Forwarded: not-needed
+++Description: patch sse2neon here
+++ Simply manage sse2neon.h in debian/patches.
+++
+++--- /dev/null
++++++ b/include/volk/sse2neon/sse2neon.h
+++@@ -0,0 +1,9236 @@
++++#ifndef SSE2NEON_H
++++#define SSE2NEON_H
++++
++++// This header file provides a simple API translation layer
++++// between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
++++//
++++// Contributors to this work are:
++++// John W. Ratcliff <jratcliffscarab@gmail.com>
++++// Brandon Rowlett <browlett@nvidia.com>
++++// Ken Fast <kfast@gdeb.com>
++++// Eric van Beurden <evanbeurden@nvidia.com>
++++// Alexander Potylitsin <apotylitsin@nvidia.com>
++++// Hasindu Gamaarachchi <hasindu2008@gmail.com>
++++// Jim Huang <jserv@ccns.ncku.edu.tw>
++++// Mark Cheng <marktwtn@gmail.com>
++++// Malcolm James MacLeod <malcolm@gulden.com>
++++// Devin Hussey (easyaspi314) <husseydevin@gmail.com>
++++// Sebastian Pop <spop@amazon.com>
++++// Developer Ecosystem Engineering <DeveloperEcosystemEngineering@apple.com>
++++// Danila Kutenin <danilak@google.com>
++++// François Turban (JishinMaster) <francois.turban@gmail.com>
++++// Pei-Hsuan Hung <afcidk@gmail.com>
++++// Yang-Hao Yuan <yuanyanghau@gmail.com>
++++// Syoyo Fujita <syoyo@lighttransport.com>
++++// Brecht Van Lommel <brecht@blender.org>
++++// Jonathan Hue <jhue@adobe.com>
++++// Cuda Chen <clh960524@gmail.com>
++++// Aymen Qader <aymen.qader@arm.com>
++++// Anthony Roberts <anthony.roberts@linaro.org>
++++
++++/*
++++ * sse2neon is freely redistributable under the MIT License.
++++ *
++++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++++ * of this software and associated documentation files (the "Software"), to deal
++++ * in the Software without restriction, including without limitation the rights
++++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++++ * copies of the Software, and to permit persons to whom the Software is
++++ * furnished to do so, subject to the following conditions:
++++ *
++++ * The above copyright notice and this permission notice shall be included in
++++ * all copies or substantial portions of the Software.
++++ *
++++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++++ * SOFTWARE.
++++ */
++++
++++/* Tunable configurations */
++++
++++/* Enable precise implementation of math operations
++++ * This would slow down the computation a bit, but gives consistent result with
++++ * x86 SSE. (e.g. would solve a hole or NaN pixel in the rendering result)
++++ */
++++/* _mm_min|max_ps|ss|pd|sd */
++++#ifndef SSE2NEON_PRECISE_MINMAX
++++#define SSE2NEON_PRECISE_MINMAX (0)
++++#endif
++++/* _mm_rcp_ps and _mm_div_ps */
++++#ifndef SSE2NEON_PRECISE_DIV
++++#define SSE2NEON_PRECISE_DIV (0)
++++#endif
++++/* _mm_sqrt_ps and _mm_rsqrt_ps */
++++#ifndef SSE2NEON_PRECISE_SQRT
++++#define SSE2NEON_PRECISE_SQRT (0)
++++#endif
++++/* _mm_dp_pd */
++++#ifndef SSE2NEON_PRECISE_DP
++++#define SSE2NEON_PRECISE_DP (0)
++++#endif
++++
++++/* Enable inclusion of windows.h on MSVC platforms
++++ * This makes _mm_clflush functional on windows, as there is no builtin.
++++ */
++++#ifndef SSE2NEON_INCLUDE_WINDOWS_H
++++#define SSE2NEON_INCLUDE_WINDOWS_H (0)
++++#endif
++++
++++/* compiler specific definitions */
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma push_macro("FORCE_INLINE")
++++#pragma push_macro("ALIGN_STRUCT")
++++#define FORCE_INLINE static inline __attribute__((always_inline))
++++#define ALIGN_STRUCT(x) __attribute__((aligned(x)))
++++#define _sse2neon_likely(x) __builtin_expect(!!(x), 1)
++++#define _sse2neon_unlikely(x) __builtin_expect(!!(x), 0)
++++#elif defined(_MSC_VER)
++++#if _MSVC_TRADITIONAL
++++#error Using the traditional MSVC preprocessor is not supported! Use /Zc:preprocessor instead.
++++#endif
++++#ifndef FORCE_INLINE
++++#define FORCE_INLINE static inline
++++#endif
++++#ifndef ALIGN_STRUCT
++++#define ALIGN_STRUCT(x) __declspec(align(x))
++++#endif
++++#define _sse2neon_likely(x) (x)
++++#define _sse2neon_unlikely(x) (x)
++++#else
++++#pragma message("Macro name collisions may happen with unsupported compilers.")
++++#endif
++++
++++#if defined(__GNUC__) && __GNUC__ < 10
++++#warning "GCC versions earlier than 10 are not supported."
++++#endif
++++
++++/* C language does not allow initializing a variable with a function call. */
++++#ifdef __cplusplus
++++#define _sse2neon_const static const
++++#else
++++#define _sse2neon_const const
++++#endif
++++
++++#include <stdint.h>
++++#include <stdlib.h>
++++
++++#if defined(_WIN32)
++++/* Definitions for _mm_{malloc,free} are provided by <malloc.h>
++++ * from both MinGW-w64 and MSVC.
++++ */
++++#define SSE2NEON_ALLOC_DEFINED
++++#endif
++++
++++/* If using MSVC */
++++#ifdef _MSC_VER
++++#include <intrin.h>
++++#if SSE2NEON_INCLUDE_WINDOWS_H
++++#include <processthreadsapi.h>
++++#include <windows.h>
++++#endif
++++
++++#if !defined(__cplusplus)
++++#error SSE2NEON only supports C++ compilation with this compiler
++++#endif
++++
++++#ifdef SSE2NEON_ALLOC_DEFINED
++++#include <malloc.h>
++++#endif
++++
++++#if (defined(_M_AMD64) || defined(__x86_64__)) || \
++++ (defined(_M_ARM64) || defined(__arm64__))
++++#define SSE2NEON_HAS_BITSCAN64
++++#endif
++++#endif
++++
++++#if defined(__GNUC__) || defined(__clang__)
++++#define _sse2neon_define0(type, s, body) \
++++ __extension__({ \
++++ type _a = (s); \
++++ body \
++++ })
++++#define _sse2neon_define1(type, s, body) \
++++ __extension__({ \
++++ type _a = (s); \
++++ body \
++++ })
++++#define _sse2neon_define2(type, a, b, body) \
++++ __extension__({ \
++++ type _a = (a), _b = (b); \
++++ body \
++++ })
++++#define _sse2neon_return(ret) (ret)
++++#else
++++#define _sse2neon_define0(type, a, body) [=](type _a) { body }(a)
++++#define _sse2neon_define1(type, a, body) [](type _a) { body }(a)
++++#define _sse2neon_define2(type, a, b, body) \
++++ [](type _a, type _b) { body }((a), (b))
++++#define _sse2neon_return(ret) return ret
++++#endif
++++
++++#define _sse2neon_init(...) \
++++ { \
++++ __VA_ARGS__ \
++++ }
++++
++++/* Compiler barrier */
++++#if defined(_MSC_VER)
++++#define SSE2NEON_BARRIER() _ReadWriteBarrier()
++++#else
++++#define SSE2NEON_BARRIER() \
++++ do { \
++++ __asm__ __volatile__("" ::: "memory"); \
++++ (void) 0; \
++++ } while (0)
++++#endif
++++
++++/* Memory barriers
++++ * __atomic_thread_fence does not include a compiler barrier; instead,
++++ * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
++++ * semantics.
++++ */
++++#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
++++#include <stdatomic.h>
++++#endif
++++
++++FORCE_INLINE void _sse2neon_smp_mb(void)
++++{
++++ SSE2NEON_BARRIER();
++++#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
++++ !defined(__STDC_NO_ATOMICS__)
++++ atomic_thread_fence(memory_order_seq_cst);
++++#elif defined(__GNUC__) || defined(__clang__)
++++ __atomic_thread_fence(__ATOMIC_SEQ_CST);
++++#else /* MSVC */
++++ __dmb(_ARM64_BARRIER_ISH);
++++#endif
++++}
++++
++++/* Architecture-specific build options */
++++/* FIXME: #pragma GCC push_options is only available on GCC */
++++#if defined(__GNUC__)
++++#if defined(__arm__) && __ARM_ARCH == 7
++++/* According to ARM C Language Extensions Architecture specification,
++++ * __ARM_NEON is defined to a value indicating the Advanced SIMD (NEON)
++++ * architecture supported.
++++ */
++++#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
++++#error "You must enable NEON instructions (e.g. -mfpu=neon) to use SSE2NEON."
++++#endif
++++#if !defined(__clang__)
++++#pragma GCC push_options
++++#pragma GCC target("fpu=neon")
++++#endif
++++#elif defined(__aarch64__) || defined(_M_ARM64)
++++#if !defined(__clang__) && !defined(_MSC_VER)
++++#pragma GCC push_options
++++#pragma GCC target("+simd")
++++#endif
++++#elif __ARM_ARCH == 8
++++#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
++++#error \
++++ "You must enable NEON instructions (e.g. -mfpu=neon-fp-armv8) to use SSE2NEON."
++++#endif
++++#if !defined(__clang__) && !defined(_MSC_VER)
++++#pragma GCC push_options
++++#endif
++++#else
++++#error "Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A."
++++#endif
++++#endif
++++
++++#include <arm_neon.h>
++++#if (!defined(__aarch64__) && !defined(_M_ARM64)) && (__ARM_ARCH == 8)
++++#if defined __has_include && __has_include(<arm_acle.h>)
++++#include <arm_acle.h>
++++#endif
++++#endif
++++
++++/* Apple Silicon cache lines are double of what is commonly used by Intel, AMD
++++ * and other Arm microarchitectures use.
++++ * From sysctl -a on Apple M1:
++++ * hw.cachelinesize: 128
++++ */
++++#if defined(__APPLE__) && (defined(__aarch64__) || defined(__arm64__))
++++#define SSE2NEON_CACHELINE_SIZE 128
++++#else
++++#define SSE2NEON_CACHELINE_SIZE 64
++++#endif
++++
++++/* Rounding functions require either Aarch64 instructions or libm fallback */
++++#if !defined(__aarch64__) && !defined(_M_ARM64)
++++#include <math.h>
++++#endif
++++
++++/* On ARMv7, some registers, such as PMUSERENR and PMCCNTR, are read-only
++++ * or even not accessible in user mode.
++++ * To write or access to these registers in user mode,
++++ * we have to perform syscall instead.
++++ */
++++#if (!defined(__aarch64__) && !defined(_M_ARM64))
++++#include <sys/time.h>
++++#endif
++++
++++/* "__has_builtin" can be used to query support for built-in functions
++++ * provided by gcc/clang and other compilers that support it.
++++ */
++++#ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
++++/* Compatibility with gcc <= 9 */
++++#if defined(__GNUC__) && (__GNUC__ <= 9)
++++#define __has_builtin(x) HAS##x
++++#define HAS__builtin_popcount 1
++++#define HAS__builtin_popcountll 1
++++
++++// __builtin_shuffle introduced in GCC 4.7.0
++++#if (__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))
++++#define HAS__builtin_shuffle 1
++++#else
++++#define HAS__builtin_shuffle 0
++++#endif
++++
++++#define HAS__builtin_shufflevector 0
++++#define HAS__builtin_nontemporal_store 0
++++#else
++++#define __has_builtin(x) 0
++++#endif
++++#endif
++++
++++/**
++++ * MACRO for shuffle parameter for _mm_shuffle_ps().
++++ * Argument fp3 is a digit[0123] that represents the fp from argument "b"
++++ * of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
++++ * for fp2 in result. fp1 is a digit[0123] that represents the fp from
++++ * argument "a" of mm_shuffle_ps that will be places in fp1 of result.
++++ * fp0 is the same for fp0 of result.
++++ */
++++#define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
++++ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
++++
++++#if __has_builtin(__builtin_shufflevector)
++++#define _sse2neon_shuffle(type, a, b, ...) \
++++ __builtin_shufflevector(a, b, __VA_ARGS__)
++++#elif __has_builtin(__builtin_shuffle)
++++#define _sse2neon_shuffle(type, a, b, ...) \
++++ __extension__({ \
++++ type tmp = {__VA_ARGS__}; \
++++ __builtin_shuffle(a, b, tmp); \
++++ })
++++#endif
++++
++++#ifdef _sse2neon_shuffle
++++#define vshuffle_s16(a, b, ...) _sse2neon_shuffle(int16x4_t, a, b, __VA_ARGS__)
++++#define vshuffleq_s16(a, b, ...) _sse2neon_shuffle(int16x8_t, a, b, __VA_ARGS__)
++++#define vshuffle_s32(a, b, ...) _sse2neon_shuffle(int32x2_t, a, b, __VA_ARGS__)
++++#define vshuffleq_s32(a, b, ...) _sse2neon_shuffle(int32x4_t, a, b, __VA_ARGS__)
++++#define vshuffle_s64(a, b, ...) _sse2neon_shuffle(int64x1_t, a, b, __VA_ARGS__)
++++#define vshuffleq_s64(a, b, ...) _sse2neon_shuffle(int64x2_t, a, b, __VA_ARGS__)
++++#endif
++++
++++/* Rounding mode macros. */
++++#define _MM_FROUND_TO_NEAREST_INT 0x00
++++#define _MM_FROUND_TO_NEG_INF 0x01
++++#define _MM_FROUND_TO_POS_INF 0x02
++++#define _MM_FROUND_TO_ZERO 0x03
++++#define _MM_FROUND_CUR_DIRECTION 0x04
++++#define _MM_FROUND_NO_EXC 0x08
++++#define _MM_FROUND_RAISE_EXC 0x00
++++#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
++++#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
++++#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
++++#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
++++#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
++++#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
++++#define _MM_ROUND_NEAREST 0x0000
++++#define _MM_ROUND_DOWN 0x2000
++++#define _MM_ROUND_UP 0x4000
++++#define _MM_ROUND_TOWARD_ZERO 0x6000
++++/* Flush zero mode macros. */
++++#define _MM_FLUSH_ZERO_MASK 0x8000
++++#define _MM_FLUSH_ZERO_ON 0x8000
++++#define _MM_FLUSH_ZERO_OFF 0x0000
++++/* Denormals are zeros mode macros. */
++++#define _MM_DENORMALS_ZERO_MASK 0x0040
++++#define _MM_DENORMALS_ZERO_ON 0x0040
++++#define _MM_DENORMALS_ZERO_OFF 0x0000
++++
++++/* indicate immediate constant argument in a given range */
++++#define __constrange(a, b) const
++++
++++/* A few intrinsics accept traditional data types like ints or floats, but
++++ * most operate on data types that are specific to SSE.
++++ * If a vector type ends in d, it contains doubles, and if it does not have
++++ * a suffix, it contains floats. An integer vector type can contain any type
++++ * of integer, from chars to shorts to unsigned long longs.
++++ */
++++typedef int64x1_t __m64;
++++typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
++++// On ARM 32-bit architecture, the float64x2_t is not supported.
++++// The data type __m128d should be represented in a different way for related
++++// intrinsic conversion.
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
++++#else
++++typedef float32x4_t __m128d;
++++#endif
++++typedef int64x2_t __m128i; /* 128-bit vector containing integers */
++++
++++// __int64 is defined in the Intrinsics Guide which maps to different datatype
++++// in different data model
++++#if !(defined(_WIN32) || defined(_WIN64) || defined(__int64))
++++#if (defined(__x86_64__) || defined(__i386__))
++++#define __int64 long long
++++#else
++++#define __int64 int64_t
++++#endif
++++#endif
++++
++++/* type-safe casting between types */
++++
++++#define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
++++#define vreinterpretq_m128_f32(x) (x)
++++#define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
++++
++++#define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
++++#define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
++++#define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
++++#define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
++++
++++#define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
++++#define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
++++#define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
++++#define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
++++
++++#define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
++++#define vreinterpretq_f32_m128(x) (x)
++++#define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
++++
++++#define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
++++#define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
++++#define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
++++#define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
++++
++++#define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
++++#define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
++++#define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
++++#define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
++++
++++#define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
++++#define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
++++#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
++++#define vreinterpretq_m128i_s64(x) (x)
++++
++++#define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
++++#define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
++++#define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
++++#define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
++++
++++#define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x)
++++#define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x)
++++
++++#define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
++++#define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
++++#define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
++++#define vreinterpretq_s64_m128i(x) (x)
++++
++++#define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
++++#define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
++++#define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
++++#define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
++++
++++#define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
++++#define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x)
++++#define vreinterpret_m64_s32(x) vreinterpret_s64_s32(x)
++++#define vreinterpret_m64_s64(x) (x)
++++
++++#define vreinterpret_m64_u8(x) vreinterpret_s64_u8(x)
++++#define vreinterpret_m64_u16(x) vreinterpret_s64_u16(x)
++++#define vreinterpret_m64_u32(x) vreinterpret_s64_u32(x)
++++#define vreinterpret_m64_u64(x) vreinterpret_s64_u64(x)
++++
++++#define vreinterpret_m64_f16(x) vreinterpret_s64_f16(x)
++++#define vreinterpret_m64_f32(x) vreinterpret_s64_f32(x)
++++#define vreinterpret_m64_f64(x) vreinterpret_s64_f64(x)
++++
++++#define vreinterpret_u8_m64(x) vreinterpret_u8_s64(x)
++++#define vreinterpret_u16_m64(x) vreinterpret_u16_s64(x)
++++#define vreinterpret_u32_m64(x) vreinterpret_u32_s64(x)
++++#define vreinterpret_u64_m64(x) vreinterpret_u64_s64(x)
++++
++++#define vreinterpret_s8_m64(x) vreinterpret_s8_s64(x)
++++#define vreinterpret_s16_m64(x) vreinterpret_s16_s64(x)
++++#define vreinterpret_s32_m64(x) vreinterpret_s32_s64(x)
++++#define vreinterpret_s64_m64(x) (x)
++++
++++#define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x)
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++#define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x)
++++#define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x)
++++
++++#define vreinterpretq_m128d_u64(x) vreinterpretq_f64_u64(x)
++++
++++#define vreinterpretq_m128d_f32(x) vreinterpretq_f64_f32(x)
++++#define vreinterpretq_m128d_f64(x) (x)
++++
++++#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x)
++++
++++#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f64(x)
++++#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x)
++++
++++#define vreinterpretq_f64_m128d(x) (x)
++++#define vreinterpretq_f32_m128d(x) vreinterpretq_f32_f64(x)
++++#else
++++#define vreinterpretq_m128d_s32(x) vreinterpretq_f32_s32(x)
++++#define vreinterpretq_m128d_s64(x) vreinterpretq_f32_s64(x)
++++
++++#define vreinterpretq_m128d_u32(x) vreinterpretq_f32_u32(x)
++++#define vreinterpretq_m128d_u64(x) vreinterpretq_f32_u64(x)
++++
++++#define vreinterpretq_m128d_f32(x) (x)
++++
++++#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f32(x)
++++
++++#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f32(x)
++++#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f32(x)
++++
++++#define vreinterpretq_f32_m128d(x) (x)
++++#endif
++++
++++// A struct is defined in this header file called 'SIMDVec' which can be used
++++// by applications which attempt to access the contents of an __m128 struct
++++// directly. It is important to note that accessing the __m128 struct directly
++++// is bad coding practice by Microsoft: @see:
++++// https://learn.microsoft.com/en-us/cpp/cpp/m128
++++//
++++// However, some legacy source code may try to access the contents of an __m128
++++// struct directly so the developer can use the SIMDVec as an alias for it. Any
++++// casting must be done manually by the developer, as you cannot cast or
++++// otherwise alias the base NEON data type for intrinsic operations.
++++//
++++// union intended to allow direct access to an __m128 variable using the names
++++// that the MSVC compiler provides. This union should really only be used when
++++// trying to access the members of the vector as integer values. GCC/clang
++++// allow native access to the float members through a simple array access
++++// operator (in C since 4.6, in C++ since 4.8).
++++//
++++// Ideally direct accesses to SIMD vectors should not be used since it can cause
++++// a performance hit. If it really is needed however, the original __m128
++++// variable can be aliased with a pointer to this union and used to access
++++// individual components. The use of this union should be hidden behind a macro
++++// that is used throughout the codebase to access the members instead of always
++++// declaring this type of variable.
++++typedef union ALIGN_STRUCT(16) SIMDVec {
++++ float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
++++ int8_t m128_i8[16]; // as signed 8-bit integers.
++++ int16_t m128_i16[8]; // as signed 16-bit integers.
++++ int32_t m128_i32[4]; // as signed 32-bit integers.
++++ int64_t m128_i64[2]; // as signed 64-bit integers.
++++ uint8_t m128_u8[16]; // as unsigned 8-bit integers.
++++ uint16_t m128_u16[8]; // as unsigned 16-bit integers.
++++ uint32_t m128_u32[4]; // as unsigned 32-bit integers.
++++ uint64_t m128_u64[2]; // as unsigned 64-bit integers.
++++} SIMDVec;
++++
++++// casting using SIMDVec
++++#define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *) &x)->m128_u64[n])
++++#define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n])
++++#define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n])
++++
++++/* SSE macros */
++++#define _MM_GET_FLUSH_ZERO_MODE _sse2neon_mm_get_flush_zero_mode
++++#define _MM_SET_FLUSH_ZERO_MODE _sse2neon_mm_set_flush_zero_mode
++++#define _MM_GET_DENORMALS_ZERO_MODE _sse2neon_mm_get_denormals_zero_mode
++++#define _MM_SET_DENORMALS_ZERO_MODE _sse2neon_mm_set_denormals_zero_mode
++++
++++// Function declaration
++++// SSE
++++FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(void);
++++FORCE_INLINE __m128 _mm_move_ss(__m128, __m128);
++++FORCE_INLINE __m128 _mm_or_ps(__m128, __m128);
++++FORCE_INLINE __m128 _mm_set_ps1(float);
++++FORCE_INLINE __m128 _mm_setzero_ps(void);
++++// SSE2
++++FORCE_INLINE __m128i _mm_and_si128(__m128i, __m128i);
++++FORCE_INLINE __m128i _mm_castps_si128(__m128);
++++FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i, __m128i);
++++FORCE_INLINE __m128i _mm_cvtps_epi32(__m128);
++++FORCE_INLINE __m128d _mm_move_sd(__m128d, __m128d);
++++FORCE_INLINE __m128i _mm_or_si128(__m128i, __m128i);
++++FORCE_INLINE __m128i _mm_set_epi32(int, int, int, int);
++++FORCE_INLINE __m128i _mm_set_epi64x(int64_t, int64_t);
++++FORCE_INLINE __m128d _mm_set_pd(double, double);
++++FORCE_INLINE __m128i _mm_set1_epi32(int);
++++FORCE_INLINE __m128i _mm_setzero_si128(void);
++++// SSE4.1
++++FORCE_INLINE __m128d _mm_ceil_pd(__m128d);
++++FORCE_INLINE __m128 _mm_ceil_ps(__m128);
++++FORCE_INLINE __m128d _mm_floor_pd(__m128d);
++++FORCE_INLINE __m128 _mm_floor_ps(__m128);
++++FORCE_INLINE __m128d _mm_round_pd(__m128d, int);
++++FORCE_INLINE __m128 _mm_round_ps(__m128, int);
++++// SSE4.2
++++FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t, uint8_t);
++++
++++/* Backwards compatibility for compilers with lack of specific type support */
++++
++++// Older gcc does not define vld1q_u8_x4 type
++++#if defined(__GNUC__) && !defined(__clang__) && \
++++ ((__GNUC__ <= 13 && defined(__arm__)) || \
++++ (__GNUC__ == 10 && __GNUC_MINOR__ < 3 && defined(__aarch64__)) || \
++++ (__GNUC__ <= 9 && defined(__aarch64__)))
++++FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
++++{
++++ uint8x16x4_t ret;
++++ ret.val[0] = vld1q_u8(p + 0);
++++ ret.val[1] = vld1q_u8(p + 16);
++++ ret.val[2] = vld1q_u8(p + 32);
++++ ret.val[3] = vld1q_u8(p + 48);
++++ return ret;
++++}
++++#else
++++// Wraps vld1q_u8_x4
++++FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
++++{
++++ return vld1q_u8_x4(p);
++++}
++++#endif
++++
++++#if !defined(__aarch64__) && !defined(_M_ARM64)
++++/* emulate vaddv u8 variant */
++++FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
++++{
++++ const uint64x1_t v1 = vpaddl_u32(vpaddl_u16(vpaddl_u8(v8)));
++++ return vget_lane_u8(vreinterpret_u8_u64(v1), 0);
++++}
++++#else
++++// Wraps vaddv_u8
++++FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
++++{
++++ return vaddv_u8(v8);
++++}
++++#endif
++++
++++#if !defined(__aarch64__) && !defined(_M_ARM64)
++++/* emulate vaddvq u8 variant */
++++FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
++++{
++++ uint8x8_t tmp = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
++++ uint8_t res = 0;
++++ for (int i = 0; i < 8; ++i)
++++ res += tmp[i];
++++ return res;
++++}
++++#else
++++// Wraps vaddvq_u8
++++FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
++++{
++++ return vaddvq_u8(a);
++++}
++++#endif
++++
++++#if !defined(__aarch64__) && !defined(_M_ARM64)
++++/* emulate vaddvq u16 variant */
++++FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
++++{
++++ uint32x4_t m = vpaddlq_u16(a);
++++ uint64x2_t n = vpaddlq_u32(m);
++++ uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
++++
++++ return vget_lane_u32((uint32x2_t) o, 0);
++++}
++++#else
++++// Wraps vaddvq_u16
++++FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
++++{
++++ return vaddvq_u16(a);
++++}
++++#endif
++++
++++/* Function Naming Conventions
++++ * The naming convention of SSE intrinsics is straightforward. A generic SSE
++++ * intrinsic function is given as follows:
++++ * _mm_<name>_<data_type>
++++ *
++++ * The parts of this format are given as follows:
++++ * 1. <name> describes the operation performed by the intrinsic
++++ * 2. <data_type> identifies the data type of the function's primary arguments
++++ *
++++ * This last part, <data_type>, is a little complicated. It identifies the
++++ * content of the input values, and can be set to any of the following values:
++++ * + ps - vectors contain floats (ps stands for packed single-precision)
++++ * + pd - vectors contain doubles (pd stands for packed double-precision)
++++ * + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
++++ * signed integers
++++ * + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
++++ * unsigned integers
++++ * + si128 - unspecified 128-bit vector or 256-bit vector
++++ * + m128/m128i/m128d - identifies input vector types when they are different
++++ * than the type of the returned vector
++++ *
++++ * For example, _mm_setzero_ps. The _mm implies that the function returns
++++ * a 128-bit vector. The _ps at the end implies that the argument vectors
++++ * contain floats.
++++ *
++++ * A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
++++ * // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
++++ * __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
++++ * // Set packed 8-bit integers
++++ * // 128 bits, 16 chars, per 8 bits
++++ * __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
++++ * 4, 5, 12, 13, 6, 7, 14, 15);
++++ * // Shuffle packed 8-bit integers
++++ * __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
++++ */
++++
++++/* Constants for use with _mm_prefetch. */
++++enum _mm_hint {
++++ _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
++++ _MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
++++ _MM_HINT_T1 = 2, /* load data to L2 cache only */
++++ _MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
++++};
++++
++++// The bit field mapping to the FPCR(floating-point control register)
++++typedef struct {
++++ uint16_t res0;
++++ uint8_t res1 : 6;
++++ uint8_t bit22 : 1;
++++ uint8_t bit23 : 1;
++++ uint8_t bit24 : 1;
++++ uint8_t res2 : 7;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint32_t res3;
++++#endif
++++} fpcr_bitfield;
++++
++++// Takes the upper 64 bits of a and places it in the low end of the result
++++// Takes the lower 64 bits of b and places it into the high end of the result.
++++FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
++++{
++++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
++++}
++++
++++// takes the lower two 32-bit values from a and swaps them and places in high
++++// end of result takes the higher two 32 bit values from b and swaps them and
++++// places in low end of result.
++++FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
++++{
++++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
++++ float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
++++ return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
++++{
++++ float32x2_t a21 = vget_high_f32(
++++ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
++++ float32x2_t b03 = vget_low_f32(
++++ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
++++ return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
++++{
++++ float32x2_t a03 = vget_low_f32(
++++ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
++++ float32x2_t b21 = vget_high_f32(
++++ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
++++ return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
++++{
++++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
++++{
++++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
++++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
++++{
++++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
++++ float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
++++ return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
++++}
++++
++++// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
++++// high
++++FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
++++{
++++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
++++{
++++ float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
++++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
++++ return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
++++{
++++ float32x2_t a22 =
++++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
++++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
++++ return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
++++{
++++ float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
++++ float32x2_t b22 =
++++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
++++ return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
++++{
++++ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
++++ float32x2_t a22 =
++++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
++++ float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
++++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
++++{
++++ float32x2_t a33 =
++++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
++++ float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
++++ return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
++++{
++++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
++++ float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
++++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
++++ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
++++ return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
++++{
++++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
++++ float32_t b2 = vgetq_lane_f32(b, 2);
++++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
++++ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
++++ return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
++++}
++++
++++FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
++++{
++++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
++++ float32_t b2 = vgetq_lane_f32(b, 2);
++++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
++++ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
++++ return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
++++}
++++
++++// For MSVC, we check only if it is ARM64, as every single ARM64 processor
++++// supported by WoA has crypto extensions. If this changes in the future,
++++// this can be verified via the runtime-only method of:
++++// IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)
++++#if (defined(_M_ARM64) && !defined(__clang__)) || \
++++ (defined(__ARM_FEATURE_CRYPTO) && \
++++ (defined(__aarch64__) || __has_builtin(__builtin_arm_crypto_vmullp64)))
++++// Wraps vmull_p64
++++FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
++++{
++++ poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
++++ poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
++++#if defined(_MSC_VER)
++++ __n64 a1 = {a}, b1 = {b};
++++ return vreinterpretq_u64_p128(vmull_p64(a1, b1));
++++#else
++++ return vreinterpretq_u64_p128(vmull_p64(a, b));
++++#endif
++++}
++++#else // ARMv7 polyfill
++++// ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
++++//
++++// vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
++++// 64-bit->128-bit polynomial multiply.
++++//
++++// It needs some work and is somewhat slow, but it is still faster than all
++++// known scalar methods.
++++//
++++// Algorithm adapted to C from
++++// https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
++++// from "Fast Software Polynomial Multiplication on ARM Processors Using the
++++// NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
++++// (https://hal.inria.fr/hal-01506572)
++++static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
++++{
++++ poly8x8_t a = vreinterpret_p8_u64(_a);
++++ poly8x8_t b = vreinterpret_p8_u64(_b);
++++
++++ // Masks
++++ uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
++++ vcreate_u8(0x00000000ffffffff));
++++ uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
++++ vcreate_u8(0x0000000000000000));
++++
++++ // Do the multiplies, rotating with vext to get all combinations
++++ uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
++++ uint8x16_t e =
++++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
++++ uint8x16_t f =
++++ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
++++ uint8x16_t g =
++++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
++++ uint8x16_t h =
++++ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
++++ uint8x16_t i =
++++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
++++ uint8x16_t j =
++++ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
++++ uint8x16_t k =
++++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
++++
++++ // Add cross products
++++ uint8x16_t l = veorq_u8(e, f); // L = E + F
++++ uint8x16_t m = veorq_u8(g, h); // M = G + H
++++ uint8x16_t n = veorq_u8(i, j); // N = I + J
++++
++++ // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
++++ // instructions.
++++#if defined(__aarch64__)
++++ uint8x16_t lm_p0 = vreinterpretq_u8_u64(
++++ vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
++++ uint8x16_t lm_p1 = vreinterpretq_u8_u64(
++++ vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
++++ uint8x16_t nk_p0 = vreinterpretq_u8_u64(
++++ vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
++++ uint8x16_t nk_p1 = vreinterpretq_u8_u64(
++++ vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
++++#else
++++ uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
++++ uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
++++ uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
++++ uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
++++#endif
++++ // t0 = (L) (P0 + P1) << 8
++++ // t1 = (M) (P2 + P3) << 16
++++ uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
++++ uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
++++ uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
++++
++++ // t2 = (N) (P4 + P5) << 24
++++ // t3 = (K) (P6 + P7) << 32
++++ uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
++++ uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
++++ uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
++++
++++ // De-interleave
++++#if defined(__aarch64__)
++++ uint8x16_t t0 = vreinterpretq_u8_u64(
++++ vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
++++ uint8x16_t t1 = vreinterpretq_u8_u64(
++++ vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
++++ uint8x16_t t2 = vreinterpretq_u8_u64(
++++ vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
++++ uint8x16_t t3 = vreinterpretq_u8_u64(
++++ vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
++++#else
++++ uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
++++ uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
++++ uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
++++ uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
++++#endif
++++ // Shift the cross products
++++ uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
++++ uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
++++ uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
++++ uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
++++
++++ // Accumulate the products
++++ uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
++++ uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
++++ uint8x16_t mix = veorq_u8(d, cross1);
++++ uint8x16_t r = veorq_u8(mix, cross2);
++++ return vreinterpretq_u64_u8(r);
++++}
++++#endif // ARMv7 polyfill
++++
++++// C equivalent:
++++// __m128i _mm_shuffle_epi32_default(__m128i a,
++++// __constrange(0, 255) int imm) {
++++// __m128i ret;
++++// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
++++// ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03];
++++// return ret;
++++// }
++++#define _mm_shuffle_epi32_default(a, imm) \
++++ vreinterpretq_m128i_s32(vsetq_lane_s32( \
++++ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \
++++ vsetq_lane_s32( \
++++ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \
++++ vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), \
++++ ((imm) >> 2) & 0x3), \
++++ vmovq_n_s32(vgetq_lane_s32( \
++++ vreinterpretq_s32_m128i(a), (imm) & (0x3))), \
++++ 1), \
++++ 2), \
++++ 3))
++++
++++// Takes the upper 64 bits of a and places it in the low end of the result
++++// Takes the lower 64 bits of a and places it into the high end of the result.
++++FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
++++{
++++ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
++++ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
++++ return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
++++}
++++
++++// takes the lower two 32-bit values from a and swaps them and places in low end
++++// of result takes the higher two 32 bit values from a and swaps them and places
++++// in high end of result.
++++FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
++++{
++++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
++++ int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
++++ return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
++++}
++++
++++// rotates the least significant 32 bits into the most significant 32 bits, and
++++// shifts the rest down
++++FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
++++}
++++
++++// rotates the most significant 32 bits into the least significant 32 bits, and
++++// shifts the rest up
++++FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
++++}
++++
++++// gets the lower 64 bits of a, and places it in the upper 64 bits
++++// gets the lower 64 bits of a and places it in the lower 64 bits
++++FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
++++{
++++ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
++++ return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
++++}
++++
++++// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the
++++// lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits
++++FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
++++{
++++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
++++ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
++++ return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
++++}
++++
++++// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the
++++// upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and
++++// places it in the lower 64 bits
++++FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
++++{
++++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
++++ return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
++++}
++++
++++FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
++++{
++++ int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
++++ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
++++ return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
++++}
++++
++++FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
++++{
++++ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
++++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
++++ return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
++++}
++++
++++FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
++++{
++++ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
++++ int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
++++ return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
++++}
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++#define _mm_shuffle_epi32_splat(a, imm) \
++++ vreinterpretq_m128i_s32(vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm)))
++++#else
++++#define _mm_shuffle_epi32_splat(a, imm) \
++++ vreinterpretq_m128i_s32( \
++++ vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))))
++++#endif
++++
++++// NEON does not support a general purpose permute intrinsic.
++++// Shuffle single-precision (32-bit) floating-point elements in a using the
++++// control in imm8, and store the results in dst.
++++//
++++// C equivalent:
++++// __m128 _mm_shuffle_ps_default(__m128 a, __m128 b,
++++// __constrange(0, 255) int imm) {
++++// __m128 ret;
++++// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
++++// ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03];
++++// return ret;
++++// }
++++//
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_ps
++++#define _mm_shuffle_ps_default(a, b, imm) \
++++ vreinterpretq_m128_f32(vsetq_lane_f32( \
++++ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \
++++ vsetq_lane_f32( \
++++ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \
++++ vsetq_lane_f32( \
++++ vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \
++++ vmovq_n_f32( \
++++ vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))), \
++++ 1), \
++++ 2), \
++++ 3))
++++
++++// Shuffle 16-bit integers in the low 64 bits of a using the control in imm8.
++++// Store the results in the low 64 bits of dst, with the high 64 bits being
++++// copied from a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflelo_epi16
++++#define _mm_shufflelo_epi16_function(a, imm) \
++++ _sse2neon_define1( \
++++ __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \
++++ int16x4_t lowBits = vget_low_s16(ret); \
++++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \
++++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \
++++ 1); \
++++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \
++++ 2); \
++++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \
++++ 3); \
++++ _sse2neon_return(vreinterpretq_m128i_s16(ret));)
++++
++++// Shuffle 16-bit integers in the high 64 bits of a using the control in imm8.
++++// Store the results in the high 64 bits of dst, with the low 64 bits being
++++// copied from a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflehi_epi16
++++#define _mm_shufflehi_epi16_function(a, imm) \
++++ _sse2neon_define1( \
++++ __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \
++++ int16x4_t highBits = vget_high_s16(ret); \
++++ ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \
++++ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \
++++ 5); \
++++ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \
++++ 6); \
++++ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \
++++ 7); \
++++ _sse2neon_return(vreinterpretq_m128i_s16(ret));)
++++
++++/* MMX */
++++
++++//_mm_empty is a no-op on arm
++++FORCE_INLINE void _mm_empty(void) {}
++++
++++/* SSE */
++++
++++// Add packed single-precision (32-bit) floating-point elements in a and b, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ps
++++FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Add the lower single-precision (32-bit) floating-point element in a and b,
++++// store the result in the lower element of dst, and copy the upper 3 packed
++++// elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ss
++++FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
++++{
++++ float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
++++ float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
++++ // the upper values in the result must be the remnants of <a>.
++++ return vreinterpretq_m128_f32(vaddq_f32(a, value));
++++}
++++
++++// Compute the bitwise AND of packed single-precision (32-bit) floating-point
++++// elements in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_ps
++++FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_s32(
++++ vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
++++}
++++
++++// Compute the bitwise NOT of packed single-precision (32-bit) floating-point
++++// elements in a and then AND with b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_ps
++++FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_s32(
++++ vbicq_s32(vreinterpretq_s32_m128(b),
++++ vreinterpretq_s32_m128(a))); // *NOTE* argument swap
++++}
++++
++++// Average packed unsigned 16-bit integers in a and b, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu16
++++FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_u16(
++++ vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)));
++++}
++++
++++// Average packed unsigned 8-bit integers in a and b, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu8
++++FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_u8(
++++ vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for equality, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ps
++++FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(
++++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for equality, store the result in the lower element of dst, and copy the
++++// upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ss
++++FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpeq_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for greater-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ps
++++FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(
++++ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for greater-than-or-equal, store the result in the lower element of dst,
++++// and copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ss
++++FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpge_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for greater-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ps
++++FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(
++++ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for greater-than, store the result in the lower element of dst, and copy
++++// the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ss
++++FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpgt_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for less-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ps
++++FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(
++++ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for less-than-or-equal, store the result in the lower element of dst, and
++++// copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ss
++++FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmple_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for less-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ps
++++FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(
++++ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for less-than, store the result in the lower element of dst, and copy the
++++// upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ss
++++FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmplt_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for not-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ps
++++FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(vmvnq_u32(
++++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for not-equal, store the result in the lower element of dst, and copy the
++++// upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ss
++++FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpneq_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for not-greater-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ps
++++FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(vmvnq_u32(
++++ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for not-greater-than-or-equal, store the result in the lower element of
++++// dst, and copy the upper 3 packed elements from a to the upper elements of
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ss
++++FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpnge_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for not-greater-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ps
++++FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(vmvnq_u32(
++++ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for not-greater-than, store the result in the lower element of dst, and
++++// copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ss
++++FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpngt_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for not-less-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ps
++++FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(vmvnq_u32(
++++ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for not-less-than-or-equal, store the result in the lower element of dst,
++++// and copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ss
++++FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpnle_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// for not-less-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ps
++++FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_u32(vmvnq_u32(
++++ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b for not-less-than, store the result in the lower element of dst, and copy
++++// the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ss
++++FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpnlt_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// to see if neither is NaN, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ps
++++//
++++// See also:
++++// http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
++++// http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
++++FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b)
++++{
++++ // Note: NEON does not have ordered compare builtin
++++ // Need to compare a eq a and b eq b to check for NaN
++++ // Do AND of results to get final
++++ uint32x4_t ceqaa =
++++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
++++ uint32x4_t ceqbb =
++++ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b to see if neither is NaN, store the result in the lower element of dst, and
++++// copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ss
++++FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpord_ps(a, b));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b
++++// to see if either is NaN, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ps
++++FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b)
++++{
++++ uint32x4_t f32a =
++++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
++++ uint32x4_t f32b =
++++ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b to see if either is NaN, store the result in the lower element of dst, and
++++// copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ss
++++FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_cmpunord_ps(a, b));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point element in a and b
++++// for equality, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_ss
++++FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
++++{
++++ uint32x4_t a_eq_b =
++++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
++++ return vgetq_lane_u32(a_eq_b, 0) & 0x1;
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point element in a and b
++++// for greater-than-or-equal, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_ss
++++FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
++++{
++++ uint32x4_t a_ge_b =
++++ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
++++ return vgetq_lane_u32(a_ge_b, 0) & 0x1;
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point element in a and b
++++// for greater-than, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_ss
++++FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
++++{
++++ uint32x4_t a_gt_b =
++++ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
++++ return vgetq_lane_u32(a_gt_b, 0) & 0x1;
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point element in a and b
++++// for less-than-or-equal, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_ss
++++FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
++++{
++++ uint32x4_t a_le_b =
++++ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
++++ return vgetq_lane_u32(a_le_b, 0) & 0x1;
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point element in a and b
++++// for less-than, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_ss
++++FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
++++{
++++ uint32x4_t a_lt_b =
++++ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
++++ return vgetq_lane_u32(a_lt_b, 0) & 0x1;
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point element in a and b
++++// for not-equal, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_ss
++++FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
++++{
++++ return !_mm_comieq_ss(a, b);
++++}
++++
++++// Convert packed signed 32-bit integers in b to packed single-precision
++++// (32-bit) floating-point elements, store the results in the lower 2 elements
++++// of dst, and copy the upper 2 packed elements from a to the upper elements of
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_pi2ps
++++FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
++++ vget_high_f32(vreinterpretq_f32_m128(a))));
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 32-bit integers, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ps2pi
++++FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a)
++++{
++++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
++++ return vreinterpret_m64_s32(
++++ vget_low_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a)))));
++++#else
++++ return vreinterpret_m64_s32(vcvt_s32_f32(vget_low_f32(
++++ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)))));
++++#endif
++++}
++++
++++// Convert the signed 32-bit integer b to a single-precision (32-bit)
++++// floating-point element, store the result in the lower element of dst, and
++++// copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_si2ss
++++FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
++++}
++++
++++// Convert the lower single-precision (32-bit) floating-point element in a to a
++++// 32-bit integer, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ss2si
++++FORCE_INLINE int _mm_cvt_ss2si(__m128 a)
++++{
++++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
++++ return vgetq_lane_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))),
++++ 0);
++++#else
++++ float32_t data = vgetq_lane_f32(
++++ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
++++ return (int32_t) data;
++++#endif
++++}
++++
++++// Convert packed 16-bit integers in a to packed single-precision (32-bit)
++++// floating-point elements, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi16_ps
++++FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a)
++++{
++++ return vreinterpretq_m128_f32(
++++ vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a))));
++++}
++++
++++// Convert packed 32-bit integers in b to packed single-precision (32-bit)
++++// floating-point elements, store the results in the lower 2 elements of dst,
++++// and copy the upper 2 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_ps
++++FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
++++ vget_high_f32(vreinterpretq_f32_m128(a))));
++++}
++++
++++// Convert packed signed 32-bit integers in a to packed single-precision
++++// (32-bit) floating-point elements, store the results in the lower 2 elements
++++// of dst, then convert the packed signed 32-bit integers in b to
++++// single-precision (32-bit) floating-point element, and store the results in
++++// the upper 2 elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32x2_ps
++++FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b)
++++{
++++ return vreinterpretq_m128_f32(vcvtq_f32_s32(
++++ vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))));
++++}
++++
++++// Convert the lower packed 8-bit integers in a to packed single-precision
++++// (32-bit) floating-point elements, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi8_ps
++++FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a)
++++{
++++ return vreinterpretq_m128_f32(vcvtq_f32_s32(
++++ vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a))))));
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 16-bit integers, and store the results in dst. Note: this intrinsic
++++// will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and
++++// 0x7FFFFFFF.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi16
++++FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a)
++++{
++++ return vreinterpret_m64_s16(
++++ vqmovn_s32(vreinterpretq_s32_m128i(_mm_cvtps_epi32(a))));
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 32-bit integers, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi32
++++#define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a)
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 8-bit integers, and store the results in lower 4 elements of dst.
++++// Note: this intrinsic will generate 0x7F, rather than 0x80, for input values
++++// between 0x7F and 0x7FFFFFFF.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi8
++++FORCE_INLINE __m64 _mm_cvtps_pi8(__m128 a)
++++{
++++ return vreinterpret_m64_s8(vqmovn_s16(
++++ vcombine_s16(vreinterpret_s16_m64(_mm_cvtps_pi16(a)), vdup_n_s16(0))));
++++}
++++
++++// Convert packed unsigned 16-bit integers in a to packed single-precision
++++// (32-bit) floating-point elements, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu16_ps
++++FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a)
++++{
++++ return vreinterpretq_m128_f32(
++++ vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a))));
++++}
++++
++++// Convert the lower packed unsigned 8-bit integers in a to packed
++++// single-precision (32-bit) floating-point elements, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu8_ps
++++FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a)
++++{
++++ return vreinterpretq_m128_f32(vcvtq_f32_u32(
++++ vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a))))));
++++}
++++
++++// Convert the signed 32-bit integer b to a single-precision (32-bit)
++++// floating-point element, store the result in the lower element of dst, and
++++// copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_ss
++++#define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b)
++++
++++// Convert the signed 64-bit integer b to a single-precision (32-bit)
++++// floating-point element, store the result in the lower element of dst, and
++++// copy the upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_ss
++++FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
++++}
++++
++++// Copy the lower single-precision (32-bit) floating-point element of a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_f32
++++FORCE_INLINE float _mm_cvtss_f32(__m128 a)
++++{
++++ return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
++++}
++++
++++// Convert the lower single-precision (32-bit) floating-point element in a to a
++++// 32-bit integer, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si32
++++#define _mm_cvtss_si32(a) _mm_cvt_ss2si(a)
++++
++++// Convert the lower single-precision (32-bit) floating-point element in a to a
++++// 64-bit integer, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si64
++++FORCE_INLINE int64_t _mm_cvtss_si64(__m128 a)
++++{
++++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
++++ return (int64_t) vgetq_lane_f32(vrndiq_f32(vreinterpretq_f32_m128(a)), 0);
++++#else
++++ float32_t data = vgetq_lane_f32(
++++ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
++++ return (int64_t) data;
++++#endif
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 32-bit integers with truncation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ps2pi
++++FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a)
++++{
++++ return vreinterpret_m64_s32(
++++ vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))));
++++}
++++
++++// Convert the lower single-precision (32-bit) floating-point element in a to a
++++// 32-bit integer with truncation, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ss2si
++++FORCE_INLINE int _mm_cvtt_ss2si(__m128 a)
++++{
++++ return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0);
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 32-bit integers with truncation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_pi32
++++#define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a)
++++
++++// Convert the lower single-precision (32-bit) floating-point element in a to a
++++// 32-bit integer with truncation, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si32
++++#define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a)
++++
++++// Convert the lower single-precision (32-bit) floating-point element in a to a
++++// 64-bit integer with truncation, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si64
++++FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a)
++++{
++++ return (int64_t) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
++++}
++++
++++// Divide packed single-precision (32-bit) floating-point elements in a by
++++// packed elements in b, and store the results in dst.
++++// Due to ARMv7-A NEON's lack of a precise division intrinsic, we implement
++++// division by multiplying a by b's reciprocal before using the Newton-Raphson
++++// method to approximate the results.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ps
++++FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(
++++ vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++#else
++++ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b));
++++ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
++++ // Additional Netwon-Raphson iteration for accuracy
++++ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
++++ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip));
++++#endif
++++}
++++
++++// Divide the lower single-precision (32-bit) floating-point element in a by the
++++// lower single-precision (32-bit) floating-point element in b, store the result
++++// in the lower element of dst, and copy the upper 3 packed elements from a to
++++// the upper elements of dst.
++++// Warning: ARMv7-A does not produce the same result compared to Intel and not
++++// IEEE-compliant.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ss
++++FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
++++{
++++ float32_t value =
++++ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
++++ return vreinterpretq_m128_f32(
++++ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
++++}
++++
++++// Extract a 16-bit integer from a, selected with imm8, and store the result in
++++// the lower element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_pi16
++++#define _mm_extract_pi16(a, imm) \
++++ (int32_t) vget_lane_u16(vreinterpret_u16_m64(a), (imm))
++++
++++// Free aligned memory that was allocated with _mm_malloc.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_free
++++#if !defined(SSE2NEON_ALLOC_DEFINED)
++++FORCE_INLINE void _mm_free(void *addr)
++++{
++++ free(addr);
++++}
++++#endif
++++
++++FORCE_INLINE uint64_t _sse2neon_get_fpcr(void)
++++{
++++ uint64_t value;
++++#if defined(_MSC_VER)
++++ value = _ReadStatusReg(ARM64_FPCR);
++++#else
++++ __asm__ __volatile__("mrs %0, FPCR" : "=r"(value)); /* read */
++++#endif
++++ return value;
++++}
++++
++++FORCE_INLINE void _sse2neon_set_fpcr(uint64_t value)
++++{
++++#if defined(_MSC_VER)
++++ _WriteStatusReg(ARM64_FPCR, value);
++++#else
++++ __asm__ __volatile__("msr FPCR, %0" ::"r"(value)); /* write */
++++#endif
++++}
++++
++++// Macro: Get the flush zero bits from the MXCSR control and status register.
++++// The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or
++++// _MM_FLUSH_ZERO_OFF
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_FLUSH_ZERO_MODE
++++FORCE_INLINE unsigned int _sse2neon_mm_get_flush_zero_mode(void)
++++{
++++ union {
++++ fpcr_bitfield field;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint64_t value;
++++#else
++++ uint32_t value;
++++#endif
++++ } r;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ r.value = _sse2neon_get_fpcr();
++++#else
++++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
++++#endif
++++
++++ return r.field.bit24 ? _MM_FLUSH_ZERO_ON : _MM_FLUSH_ZERO_OFF;
++++}
++++
++++// Macro: Get the rounding mode bits from the MXCSR control and status register.
++++// The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST,
++++// _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_ROUNDING_MODE
++++FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(void)
++++{
++++ union {
++++ fpcr_bitfield field;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint64_t value;
++++#else
++++ uint32_t value;
++++#endif
++++ } r;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ r.value = _sse2neon_get_fpcr();
++++#else
++++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
++++#endif
++++
++++ if (r.field.bit22) {
++++ return r.field.bit23 ? _MM_ROUND_TOWARD_ZERO : _MM_ROUND_UP;
++++ } else {
++++ return r.field.bit23 ? _MM_ROUND_DOWN : _MM_ROUND_NEAREST;
++++ }
++++}
++++
++++// Copy a to dst, and insert the 16-bit integer i into dst at the location
++++// specified by imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_pi16
++++#define _mm_insert_pi16(a, b, imm) \
++++ vreinterpret_m64_s16(vset_lane_s16((b), vreinterpret_s16_m64(a), (imm)))
++++
++++// Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point
++++// elements) from memory into dst. mem_addr must be aligned on a 16-byte
++++// boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps
++++FORCE_INLINE __m128 _mm_load_ps(const float *p)
++++{
++++ return vreinterpretq_m128_f32(vld1q_f32(p));
++++}
++++
++++// Load a single-precision (32-bit) floating-point element from memory into all
++++// elements of dst.
++++//
++++// dst[31:0] := MEM[mem_addr+31:mem_addr]
++++// dst[63:32] := MEM[mem_addr+31:mem_addr]
++++// dst[95:64] := MEM[mem_addr+31:mem_addr]
++++// dst[127:96] := MEM[mem_addr+31:mem_addr]
++++//
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps1
++++#define _mm_load_ps1 _mm_load1_ps
++++
++++// Load a single-precision (32-bit) floating-point element from memory into the
++++// lower of dst, and zero the upper 3 elements. mem_addr does not need to be
++++// aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ss
++++FORCE_INLINE __m128 _mm_load_ss(const float *p)
++++{
++++ return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
++++}
++++
++++// Load a single-precision (32-bit) floating-point element from memory into all
++++// elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_ps
++++FORCE_INLINE __m128 _mm_load1_ps(const float *p)
++++{
++++ return vreinterpretq_m128_f32(vld1q_dup_f32(p));
++++}
++++
++++// Load 2 single-precision (32-bit) floating-point elements from memory into the
++++// upper 2 elements of dst, and copy the lower 2 elements from a to dst.
++++// mem_addr does not need to be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pi
++++FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p)
++++{
++++ return vreinterpretq_m128_f32(
++++ vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p)));
++++}
++++
++++// Load 2 single-precision (32-bit) floating-point elements from memory into the
++++// lower 2 elements of dst, and copy the upper 2 elements from a to dst.
++++// mem_addr does not need to be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pi
++++FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
++++{
++++ return vreinterpretq_m128_f32(
++++ vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a)));
++++}
++++
++++// Load 4 single-precision (32-bit) floating-point elements from memory into dst
++++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
++++// general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_ps
++++FORCE_INLINE __m128 _mm_loadr_ps(const float *p)
++++{
++++ float32x4_t v = vrev64q_f32(vld1q_f32(p));
++++ return vreinterpretq_m128_f32(vextq_f32(v, v, 2));
++++}
++++
++++// Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point
++++// elements) from memory into dst. mem_addr does not need to be aligned on any
++++// particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_ps
++++FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
++++{
++++ // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are
++++ // equivalent for neon
++++ return vreinterpretq_m128_f32(vld1q_f32(p));
++++}
++++
++++// Load unaligned 16-bit integer from memory into the first element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si16
++++FORCE_INLINE __m128i _mm_loadu_si16(const void *p)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0));
++++}
++++
++++// Load unaligned 64-bit integer from memory into the first element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si64
++++FORCE_INLINE __m128i _mm_loadu_si64(const void *p)
++++{
++++ return vreinterpretq_m128i_s64(
++++ vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0)));
++++}
++++
++++// Allocate size bytes of memory, aligned to the alignment specified in align,
++++// and return a pointer to the allocated memory. _mm_free should be used to free
++++// memory that is allocated with _mm_malloc.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_malloc
++++#if !defined(SSE2NEON_ALLOC_DEFINED)
++++FORCE_INLINE void *_mm_malloc(size_t size, size_t align)
++++{
++++ void *ptr;
++++ if (align == 1)
++++ return malloc(size);
++++ if (align == 2 || (sizeof(void *) == 8 && align == 4))
++++ align = sizeof(void *);
++++ if (!posix_memalign(&ptr, align, size))
++++ return ptr;
++++ return NULL;
++++}
++++#endif
++++
++++// Conditionally store 8-bit integer elements from a into memory using mask
++++// (elements are not stored when the highest bit is not set in the corresponding
++++// element) and a non-temporal memory hint.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmove_si64
++++FORCE_INLINE void _mm_maskmove_si64(__m64 a, __m64 mask, char *mem_addr)
++++{
++++ int8x8_t shr_mask = vshr_n_s8(vreinterpret_s8_m64(mask), 7);
++++ __m128 b = _mm_load_ps((const float *) mem_addr);
++++ int8x8_t masked =
++++ vbsl_s8(vreinterpret_u8_s8(shr_mask), vreinterpret_s8_m64(a),
++++ vreinterpret_s8_u64(vget_low_u64(vreinterpretq_u64_m128(b))));
++++ vst1_s8((int8_t *) mem_addr, masked);
++++}
++++
++++// Conditionally store 8-bit integer elements from a into memory using mask
++++// (elements are not stored when the highest bit is not set in the corresponding
++++// element) and a non-temporal memory hint.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_maskmovq
++++#define _m_maskmovq(a, mask, mem_addr) _mm_maskmove_si64(a, mask, mem_addr)
++++
++++// Compare packed signed 16-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pi16
++++FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_s16(
++++ vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b,
++++// and store packed maximum values in dst. dst does not follow the IEEE Standard
++++// for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or
++++// signed-zero values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ps
++++FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
++++{
++++#if SSE2NEON_PRECISE_MINMAX
++++ float32x4_t _a = vreinterpretq_f32_m128(a);
++++ float32x4_t _b = vreinterpretq_f32_m128(b);
++++ return vreinterpretq_m128_f32(vbslq_f32(vcgtq_f32(_a, _b), _a, _b));
++++#else
++++ return vreinterpretq_m128_f32(
++++ vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++#endif
++++}
++++
++++// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pu8
++++FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_u8(
++++ vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b, store the maximum value in the lower element of dst, and copy the upper 3
++++// packed elements from a to the upper element of dst. dst does not follow the
++++// IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when
++++// inputs are NaN or signed-zero values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ss
++++FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
++++{
++++ float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0);
++++ return vreinterpretq_m128_f32(
++++ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
++++}
++++
++++// Compare packed signed 16-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pi16
++++FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_s16(
++++ vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
++++}
++++
++++// Compare packed single-precision (32-bit) floating-point elements in a and b,
++++// and store packed minimum values in dst. dst does not follow the IEEE Standard
++++// for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or
++++// signed-zero values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ps
++++FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
++++{
++++#if SSE2NEON_PRECISE_MINMAX
++++ float32x4_t _a = vreinterpretq_f32_m128(a);
++++ float32x4_t _b = vreinterpretq_f32_m128(b);
++++ return vreinterpretq_m128_f32(vbslq_f32(vcltq_f32(_a, _b), _a, _b));
++++#else
++++ return vreinterpretq_m128_f32(
++++ vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++#endif
++++}
++++
++++// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pu8
++++FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_u8(
++++ vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
++++}
++++
++++// Compare the lower single-precision (32-bit) floating-point elements in a and
++++// b, store the minimum value in the lower element of dst, and copy the upper 3
++++// packed elements from a to the upper element of dst. dst does not follow the
++++// IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when
++++// inputs are NaN or signed-zero values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ss
++++FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
++++{
++++ float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0);
++++ return vreinterpretq_m128_f32(
++++ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
++++}
++++
++++// Move the lower single-precision (32-bit) floating-point element from b to the
++++// lower element of dst, and copy the upper 3 packed elements from a to the
++++// upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_ss
++++FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0),
++++ vreinterpretq_f32_m128(a), 0));
++++}
++++
++++// Move the upper 2 single-precision (32-bit) floating-point elements from b to
++++// the lower 2 elements of dst, and copy the upper 2 elements from a to the
++++// upper 2 elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehl_ps
++++FORCE_INLINE __m128 _mm_movehl_ps(__m128 a, __m128 b)
++++{
++++#if defined(aarch64__)
++++ return vreinterpretq_m128_u64(
++++ vzip2q_u64(vreinterpretq_u64_m128(b), vreinterpretq_u64_m128(a)));
++++#else
++++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_f32(vcombine_f32(b32, a32));
++++#endif
++++}
++++
++++// Move the lower 2 single-precision (32-bit) floating-point elements from b to
++++// the upper 2 elements of dst, and copy the lower 2 elements from a to the
++++// lower 2 elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movelh_ps
++++FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
++++{
++++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(__A));
++++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(__B));
++++ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
++++}
++++
++++// Create mask from the most significant bit of each 8-bit element in a, and
++++// store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pi8
++++FORCE_INLINE int _mm_movemask_pi8(__m64 a)
++++{
++++ uint8x8_t input = vreinterpret_u8_m64(a);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ static const int8_t shift[8] = {0, 1, 2, 3, 4, 5, 6, 7};
++++ uint8x8_t tmp = vshr_n_u8(input, 7);
++++ return vaddv_u8(vshl_u8(tmp, vld1_s8(shift)));
++++#else
++++ // Refer the implementation of `_mm_movemask_epi8`
++++ uint16x4_t high_bits = vreinterpret_u16_u8(vshr_n_u8(input, 7));
++++ uint32x2_t paired16 =
++++ vreinterpret_u32_u16(vsra_n_u16(high_bits, high_bits, 7));
++++ uint8x8_t paired32 =
++++ vreinterpret_u8_u32(vsra_n_u32(paired16, paired16, 14));
++++ return vget_lane_u8(paired32, 0) | ((int) vget_lane_u8(paired32, 4) << 4);
++++#endif
++++}
++++
++++// Set each bit of mask dst based on the most significant bit of the
++++// corresponding packed single-precision (32-bit) floating-point element in a.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_ps
++++FORCE_INLINE int _mm_movemask_ps(__m128 a)
++++{
++++ uint32x4_t input = vreinterpretq_u32_m128(a);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ static const int32_t shift[4] = {0, 1, 2, 3};
++++ uint32x4_t tmp = vshrq_n_u32(input, 31);
++++ return vaddvq_u32(vshlq_u32(tmp, vld1q_s32(shift)));
++++#else
++++ // Uses the exact same method as _mm_movemask_epi8, see that for details.
++++ // Shift out everything but the sign bits with a 32-bit unsigned shift
++++ // right.
++++ uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31));
++++ // Merge the two pairs together with a 64-bit unsigned shift right + add.
++++ uint8x16_t paired =
++++ vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
++++ // Extract the result.
++++ return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
++++#endif
++++}
++++
++++// Multiply packed single-precision (32-bit) floating-point elements in a and b,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ps
++++FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Multiply the lower single-precision (32-bit) floating-point element in a and
++++// b, store the result in the lower element of dst, and copy the upper 3 packed
++++// elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ss
++++FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_mul_ps(a, b));
++++}
++++
++++// Multiply the packed unsigned 16-bit integers in a and b, producing
++++// intermediate 32-bit integers, and store the high 16 bits of the intermediate
++++// integers in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_pu16
++++FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_u16(vshrn_n_u32(
++++ vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16));
++++}
++++
++++// Compute the bitwise OR of packed single-precision (32-bit) floating-point
++++// elements in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_ps
++++FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_s32(
++++ vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
++++}
++++
++++// Average packed unsigned 8-bit integers in a and b, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgb
++++#define _m_pavgb(a, b) _mm_avg_pu8(a, b)
++++
++++// Average packed unsigned 16-bit integers in a and b, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgw
++++#define _m_pavgw(a, b) _mm_avg_pu16(a, b)
++++
++++// Extract a 16-bit integer from a, selected with imm8, and store the result in
++++// the lower element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pextrw
++++#define _m_pextrw(a, imm) _mm_extract_pi16(a, imm)
++++
++++// Copy a to dst, and insert the 16-bit integer i into dst at the location
++++// specified by imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_pinsrw
++++#define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm)
++++
++++// Compare packed signed 16-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxsw
++++#define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
++++
++++// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxub
++++#define _m_pmaxub(a, b) _mm_max_pu8(a, b)
++++
++++// Compare packed signed 16-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminsw
++++#define _m_pminsw(a, b) _mm_min_pi16(a, b)
++++
++++// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminub
++++#define _m_pminub(a, b) _mm_min_pu8(a, b)
++++
++++// Create mask from the most significant bit of each 8-bit element in a, and
++++// store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmovmskb
++++#define _m_pmovmskb(a) _mm_movemask_pi8(a)
++++
++++// Multiply the packed unsigned 16-bit integers in a and b, producing
++++// intermediate 32-bit integers, and store the high 16 bits of the intermediate
++++// integers in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmulhuw
++++#define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b)
++++
++++// Fetch the line of data from memory that contains address p to a location in
++++// the cache hierarchy specified by the locality hint i.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_prefetch
++++FORCE_INLINE void _mm_prefetch(char const *p, int i)
++++{
++++ (void) i;
++++#if defined(_MSC_VER)
++++ switch (i) {
++++ case _MM_HINT_NTA:
++++ __prefetch2(p, 1);
++++ break;
++++ case _MM_HINT_T0:
++++ __prefetch2(p, 0);
++++ break;
++++ case _MM_HINT_T1:
++++ __prefetch2(p, 2);
++++ break;
++++ case _MM_HINT_T2:
++++ __prefetch2(p, 4);
++++ break;
++++ }
++++#else
++++ switch (i) {
++++ case _MM_HINT_NTA:
++++ __builtin_prefetch(p, 0, 0);
++++ break;
++++ case _MM_HINT_T0:
++++ __builtin_prefetch(p, 0, 3);
++++ break;
++++ case _MM_HINT_T1:
++++ __builtin_prefetch(p, 0, 2);
++++ break;
++++ case _MM_HINT_T2:
++++ __builtin_prefetch(p, 0, 1);
++++ break;
++++ }
++++#endif
++++}
++++
++++// Compute the absolute differences of packed unsigned 8-bit integers in a and
++++// b, then horizontally sum each consecutive 8 differences to produce four
++++// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
++++// 16 bits of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_psadbw
++++#define _m_psadbw(a, b) _mm_sad_pu8(a, b)
++++
++++// Shuffle 16-bit integers in a using the control in imm8, and store the results
++++// in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pshufw
++++#define _m_pshufw(a, imm) _mm_shuffle_pi16(a, imm)
++++
++++// Compute the approximate reciprocal of packed single-precision (32-bit)
++++// floating-point elements in a, and store the results in dst. The maximum
++++// relative error for this approximation is less than 1.5*2^-12.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ps
++++FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
++++{
++++ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
++++ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
++++ return vreinterpretq_m128_f32(recip);
++++}
++++
++++// Compute the approximate reciprocal of the lower single-precision (32-bit)
++++// floating-point element in a, store the result in the lower element of dst,
++++// and copy the upper 3 packed elements from a to the upper elements of dst. The
++++// maximum relative error for this approximation is less than 1.5*2^-12.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ss
++++FORCE_INLINE __m128 _mm_rcp_ss(__m128 a)
++++{
++++ return _mm_move_ss(a, _mm_rcp_ps(a));
++++}
++++
++++// Compute the approximate reciprocal square root of packed single-precision
++++// (32-bit) floating-point elements in a, and store the results in dst. The
++++// maximum relative error for this approximation is less than 1.5*2^-12.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ps
++++FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
++++{
++++ float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in));
++++
++++ // Generate masks for detecting whether input has any 0.0f/-0.0f
++++ // (which becomes positive/negative infinity by IEEE-754 arithmetic rules).
++++ const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
++++ const uint32x4_t neg_inf = vdupq_n_u32(0xFF800000);
++++ const uint32x4_t has_pos_zero =
++++ vceqq_u32(pos_inf, vreinterpretq_u32_f32(out));
++++ const uint32x4_t has_neg_zero =
++++ vceqq_u32(neg_inf, vreinterpretq_u32_f32(out));
++++
++++ out = vmulq_f32(
++++ out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
++++
++++ // Set output vector element to infinity/negative-infinity if
++++ // the corresponding input vector element is 0.0f/-0.0f.
++++ out = vbslq_f32(has_pos_zero, (float32x4_t) pos_inf, out);
++++ out = vbslq_f32(has_neg_zero, (float32x4_t) neg_inf, out);
++++
++++ return vreinterpretq_m128_f32(out);
++++}
++++
++++// Compute the approximate reciprocal square root of the lower single-precision
++++// (32-bit) floating-point element in a, store the result in the lower element
++++// of dst, and copy the upper 3 packed elements from a to the upper elements of
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ss
++++FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
++++{
++++ return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0);
++++}
++++
++++// Compute the absolute differences of packed unsigned 8-bit integers in a and
++++// b, then horizontally sum each consecutive 8 differences to produce four
++++// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
++++// 16 bits of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_pu8
++++FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b)
++++{
++++ uint64x1_t t = vpaddl_u32(vpaddl_u16(
++++ vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)))));
++++ return vreinterpret_m64_u16(
++++ vset_lane_u16((int) vget_lane_u64(t, 0), vdup_n_u16(0), 0));
++++}
++++
++++// Macro: Set the flush zero bits of the MXCSR control and status register to
++++// the value in unsigned 32-bit integer a. The flush zero may contain any of the
++++// following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_FLUSH_ZERO_MODE
++++FORCE_INLINE void _sse2neon_mm_set_flush_zero_mode(unsigned int flag)
++++{
++++ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
++++ // regardless of the value of the FZ bit.
++++ union {
++++ fpcr_bitfield field;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint64_t value;
++++#else
++++ uint32_t value;
++++#endif
++++ } r;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ r.value = _sse2neon_get_fpcr();
++++#else
++++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
++++#endif
++++
++++ r.field.bit24 = (flag & _MM_FLUSH_ZERO_MASK) == _MM_FLUSH_ZERO_ON;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ _sse2neon_set_fpcr(r.value);
++++#else
++++ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
++++#endif
++++}
++++
++++// Set packed single-precision (32-bit) floating-point elements in dst with the
++++// supplied values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps
++++FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
++++{
++++ float ALIGN_STRUCT(16) data[4] = {x, y, z, w};
++++ return vreinterpretq_m128_f32(vld1q_f32(data));
++++}
++++
++++// Broadcast single-precision (32-bit) floating-point value a to all elements of
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps1
++++FORCE_INLINE __m128 _mm_set_ps1(float _w)
++++{
++++ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
++++}
++++
++++// Macro: Set the rounding mode bits of the MXCSR control and status register to
++++// the value in unsigned 32-bit integer a. The rounding mode may contain any of
++++// the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP,
++++// _MM_ROUND_TOWARD_ZERO
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_ROUNDING_MODE
++++FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding)
++++{
++++ union {
++++ fpcr_bitfield field;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint64_t value;
++++#else
++++ uint32_t value;
++++#endif
++++ } r;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ r.value = _sse2neon_get_fpcr();
++++#else
++++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
++++#endif
++++
++++ switch (rounding) {
++++ case _MM_ROUND_TOWARD_ZERO:
++++ r.field.bit22 = 1;
++++ r.field.bit23 = 1;
++++ break;
++++ case _MM_ROUND_DOWN:
++++ r.field.bit22 = 0;
++++ r.field.bit23 = 1;
++++ break;
++++ case _MM_ROUND_UP:
++++ r.field.bit22 = 1;
++++ r.field.bit23 = 0;
++++ break;
++++ default: //_MM_ROUND_NEAREST
++++ r.field.bit22 = 0;
++++ r.field.bit23 = 0;
++++ }
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ _sse2neon_set_fpcr(r.value);
++++#else
++++ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
++++#endif
++++}
++++
++++// Copy single-precision (32-bit) floating-point element a to the lower element
++++// of dst, and zero the upper 3 elements.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ss
++++FORCE_INLINE __m128 _mm_set_ss(float a)
++++{
++++ return vreinterpretq_m128_f32(vsetq_lane_f32(a, vdupq_n_f32(0), 0));
++++}
++++
++++// Broadcast single-precision (32-bit) floating-point value a to all elements of
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_ps
++++FORCE_INLINE __m128 _mm_set1_ps(float _w)
++++{
++++ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
++++}
++++
++++// Set the MXCSR control and status register with the value in unsigned 32-bit
++++// integer a.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setcsr
++++// FIXME: _mm_setcsr() implementation supports changing the rounding mode only.
++++FORCE_INLINE void _mm_setcsr(unsigned int a)
++++{
++++ _MM_SET_ROUNDING_MODE(a);
++++}
++++
++++// Get the unsigned 32-bit value of the MXCSR control and status register.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getcsr
++++// FIXME: _mm_getcsr() implementation supports reading the rounding mode only.
++++FORCE_INLINE unsigned int _mm_getcsr(void)
++++{
++++ return _MM_GET_ROUNDING_MODE();
++++}
++++
++++// Set packed single-precision (32-bit) floating-point elements in dst with the
++++// supplied values in reverse order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_ps
++++FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x)
++++{
++++ float ALIGN_STRUCT(16) data[4] = {w, z, y, x};
++++ return vreinterpretq_m128_f32(vld1q_f32(data));
++++}
++++
++++// Return vector of type __m128 with all elements set to zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_ps
++++FORCE_INLINE __m128 _mm_setzero_ps(void)
++++{
++++ return vreinterpretq_m128_f32(vdupq_n_f32(0));
++++}
++++
++++// Shuffle 16-bit integers in a using the control in imm8, and store the results
++++// in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi16
++++#ifdef _sse2neon_shuffle
++++#define _mm_shuffle_pi16(a, imm) \
++++ vreinterpret_m64_s16(vshuffle_s16( \
++++ vreinterpret_s16_m64(a), vreinterpret_s16_m64(a), (imm & 0x3), \
++++ ((imm >> 2) & 0x3), ((imm >> 4) & 0x3), ((imm >> 6) & 0x3)))
++++#else
++++#define _mm_shuffle_pi16(a, imm) \
++++ _sse2neon_define1( \
++++ __m64, a, int16x4_t ret; \
++++ ret = vmov_n_s16( \
++++ vget_lane_s16(vreinterpret_s16_m64(_a), (imm) & (0x3))); \
++++ ret = vset_lane_s16( \
++++ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 2) & 0x3), ret, \
++++ 1); \
++++ ret = vset_lane_s16( \
++++ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 4) & 0x3), ret, \
++++ 2); \
++++ ret = vset_lane_s16( \
++++ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 6) & 0x3), ret, \
++++ 3); \
++++ _sse2neon_return(vreinterpret_m64_s16(ret));)
++++#endif
++++
++++// Perform a serializing operation on all store-to-memory instructions that were
++++// issued prior to this instruction. Guarantees that every store instruction
++++// that precedes, in program order, is globally visible before any store
++++// instruction which follows the fence in program order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sfence
++++FORCE_INLINE void _mm_sfence(void)
++++{
++++ _sse2neon_smp_mb();
++++}
++++
++++// Perform a serializing operation on all load-from-memory and store-to-memory
++++// instructions that were issued prior to this instruction. Guarantees that
++++// every memory access that precedes, in program order, the memory fence
++++// instruction is globally visible before any memory instruction which follows
++++// the fence in program order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mfence
++++FORCE_INLINE void _mm_mfence(void)
++++{
++++ _sse2neon_smp_mb();
++++}
++++
++++// Perform a serializing operation on all load-from-memory instructions that
++++// were issued prior to this instruction. Guarantees that every load instruction
++++// that precedes, in program order, is globally visible before any load
++++// instruction which follows the fence in program order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lfence
++++FORCE_INLINE void _mm_lfence(void)
++++{
++++ _sse2neon_smp_mb();
++++}
++++
++++// FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255)
++++// int imm)
++++#ifdef _sse2neon_shuffle
++++#define _mm_shuffle_ps(a, b, imm) \
++++ __extension__({ \
++++ float32x4_t _input1 = vreinterpretq_f32_m128(a); \
++++ float32x4_t _input2 = vreinterpretq_f32_m128(b); \
++++ float32x4_t _shuf = \
++++ vshuffleq_s32(_input1, _input2, (imm) & (0x3), ((imm) >> 2) & 0x3, \
++++ (((imm) >> 4) & 0x3) + 4, (((imm) >> 6) & 0x3) + 4); \
++++ vreinterpretq_m128_f32(_shuf); \
++++ })
++++#else // generic
++++#define _mm_shuffle_ps(a, b, imm) \
++++ _sse2neon_define2( \
++++ __m128, a, b, __m128 ret; switch (imm) { \
++++ case _MM_SHUFFLE(1, 0, 3, 2): \
++++ ret = _mm_shuffle_ps_1032(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(2, 3, 0, 1): \
++++ ret = _mm_shuffle_ps_2301(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(0, 3, 2, 1): \
++++ ret = _mm_shuffle_ps_0321(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(2, 1, 0, 3): \
++++ ret = _mm_shuffle_ps_2103(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(1, 0, 1, 0): \
++++ ret = _mm_movelh_ps(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(1, 0, 0, 1): \
++++ ret = _mm_shuffle_ps_1001(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(0, 1, 0, 1): \
++++ ret = _mm_shuffle_ps_0101(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(3, 2, 1, 0): \
++++ ret = _mm_shuffle_ps_3210(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(0, 0, 1, 1): \
++++ ret = _mm_shuffle_ps_0011(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(0, 0, 2, 2): \
++++ ret = _mm_shuffle_ps_0022(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(2, 2, 0, 0): \
++++ ret = _mm_shuffle_ps_2200(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(3, 2, 0, 2): \
++++ ret = _mm_shuffle_ps_3202(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(3, 2, 3, 2): \
++++ ret = _mm_movehl_ps(_b, _a); \
++++ break; \
++++ case _MM_SHUFFLE(1, 1, 3, 3): \
++++ ret = _mm_shuffle_ps_1133(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(2, 0, 1, 0): \
++++ ret = _mm_shuffle_ps_2010(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(2, 0, 0, 1): \
++++ ret = _mm_shuffle_ps_2001(_a, _b); \
++++ break; \
++++ case _MM_SHUFFLE(2, 0, 3, 2): \
++++ ret = _mm_shuffle_ps_2032(_a, _b); \
++++ break; \
++++ default: \
++++ ret = _mm_shuffle_ps_default(_a, _b, (imm)); \
++++ break; \
++++ } _sse2neon_return(ret);)
++++#endif
++++
++++// Compute the square root of packed single-precision (32-bit) floating-point
++++// elements in a, and store the results in dst.
++++// Due to ARMv7-A NEON's lack of a precise square root intrinsic, we implement
++++// square root by multiplying input in with its reciprocal square root before
++++// using the Newton-Raphson method to approximate the results.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ps
++++FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in)));
++++#else
++++ float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in));
++++
++++ // Test for vrsqrteq_f32(0) -> positive infinity case.
++++ // Change to zero, so that s * 1/sqrt(s) result is zero too.
++++ const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
++++ const uint32x4_t div_by_zero =
++++ vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip));
++++ recip = vreinterpretq_f32_u32(
++++ vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip)));
++++
++++ recip = vmulq_f32(
++++ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
++++ recip);
++++ // Additional Netwon-Raphson iteration for accuracy
++++ recip = vmulq_f32(
++++ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
++++ recip);
++++
++++ // sqrt(s) = s * 1/sqrt(s)
++++ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip));
++++#endif
++++}
++++
++++// Compute the square root of the lower single-precision (32-bit) floating-point
++++// element in a, store the result in the lower element of dst, and copy the
++++// upper 3 packed elements from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ss
++++FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
++++{
++++ float32_t value =
++++ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
++++ return vreinterpretq_m128_f32(
++++ vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
++++}
++++
++++// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point
++++// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
++++// or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps
++++FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
++++{
++++ vst1q_f32(p, vreinterpretq_f32_m128(a));
++++}
++++
++++// Store the lower single-precision (32-bit) floating-point element from a into
++++// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
++++// boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps1
++++FORCE_INLINE void _mm_store_ps1(float *p, __m128 a)
++++{
++++ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
++++ vst1q_f32(p, vdupq_n_f32(a0));
++++}
++++
++++// Store the lower single-precision (32-bit) floating-point element from a into
++++// memory. mem_addr does not need to be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ss
++++FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
++++{
++++ vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
++++}
++++
++++// Store the lower single-precision (32-bit) floating-point element from a into
++++// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
++++// boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store1_ps
++++#define _mm_store1_ps _mm_store_ps1
++++
++++// Store the upper 2 single-precision (32-bit) floating-point elements from a
++++// into memory.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pi
++++FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
++++{
++++ *p = vreinterpret_m64_f32(vget_high_f32(a));
++++}
++++
++++// Store the lower 2 single-precision (32-bit) floating-point elements from a
++++// into memory.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pi
++++FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a)
++++{
++++ *p = vreinterpret_m64_f32(vget_low_f32(a));
++++}
++++
++++// Store 4 single-precision (32-bit) floating-point elements from a into memory
++++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
++++// general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_ps
++++FORCE_INLINE void _mm_storer_ps(float *p, __m128 a)
++++{
++++ float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a));
++++ float32x4_t rev = vextq_f32(tmp, tmp, 2);
++++ vst1q_f32(p, rev);
++++}
++++
++++// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point
++++// elements) from a into memory. mem_addr does not need to be aligned on any
++++// particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_ps
++++FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
++++{
++++ vst1q_f32(p, vreinterpretq_f32_m128(a));
++++}
++++
++++// Stores 16-bits of integer data a at the address p.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si16
++++FORCE_INLINE void _mm_storeu_si16(void *p, __m128i a)
++++{
++++ vst1q_lane_s16((int16_t *) p, vreinterpretq_s16_m128i(a), 0);
++++}
++++
++++// Stores 64-bits of integer data a at the address p.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si64
++++FORCE_INLINE void _mm_storeu_si64(void *p, __m128i a)
++++{
++++ vst1q_lane_s64((int64_t *) p, vreinterpretq_s64_m128i(a), 0);
++++}
++++
++++// Store 64-bits of integer data from a into memory using a non-temporal memory
++++// hint.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pi
++++FORCE_INLINE void _mm_stream_pi(__m64 *p, __m64 a)
++++{
++++ vst1_s64((int64_t *) p, vreinterpret_s64_m64(a));
++++}
++++
++++// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-
++++// point elements) from a into memory using a non-temporal memory hint.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_ps
++++FORCE_INLINE void _mm_stream_ps(float *p, __m128 a)
++++{
++++#if __has_builtin(__builtin_nontemporal_store)
++++ __builtin_nontemporal_store(a, (float32x4_t *) p);
++++#else
++++ vst1q_f32(p, vreinterpretq_f32_m128(a));
++++#endif
++++}
++++
++++// Subtract packed single-precision (32-bit) floating-point elements in b from
++++// packed single-precision (32-bit) floating-point elements in a, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ps
++++FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_f32(
++++ vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++}
++++
++++// Subtract the lower single-precision (32-bit) floating-point element in b from
++++// the lower single-precision (32-bit) floating-point element in a, store the
++++// result in the lower element of dst, and copy the upper 3 packed elements from
++++// a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ss
++++FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_sub_ps(a, b));
++++}
++++
++++// Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision
++++// (32-bit) floating-point elements in row0, row1, row2, and row3, and store the
++++// transposed matrix in these vectors (row0 now contains column 0, etc.).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=MM_TRANSPOSE4_PS
++++#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
++++ do { \
++++ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
++++ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
++++ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
++++ vget_low_f32(ROW23.val[0])); \
++++ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
++++ vget_low_f32(ROW23.val[1])); \
++++ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
++++ vget_high_f32(ROW23.val[0])); \
++++ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
++++ vget_high_f32(ROW23.val[1])); \
++++ } while (0)
++++
++++// according to the documentation, these intrinsics behave the same as the
++++// non-'u' versions. We'll just alias them here.
++++#define _mm_ucomieq_ss _mm_comieq_ss
++++#define _mm_ucomige_ss _mm_comige_ss
++++#define _mm_ucomigt_ss _mm_comigt_ss
++++#define _mm_ucomile_ss _mm_comile_ss
++++#define _mm_ucomilt_ss _mm_comilt_ss
++++#define _mm_ucomineq_ss _mm_comineq_ss
++++
++++// Return vector of type __m128i with undefined elements.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_undefined_si128
++++FORCE_INLINE __m128i _mm_undefined_si128(void)
++++{
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma GCC diagnostic push
++++#pragma GCC diagnostic ignored "-Wuninitialized"
++++#endif
++++ __m128i a;
++++#if defined(_MSC_VER)
++++ a = _mm_setzero_si128();
++++#endif
++++ return a;
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma GCC diagnostic pop
++++#endif
++++}
++++
++++// Return vector of type __m128 with undefined elements.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_ps
++++FORCE_INLINE __m128 _mm_undefined_ps(void)
++++{
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma GCC diagnostic push
++++#pragma GCC diagnostic ignored "-Wuninitialized"
++++#endif
++++ __m128 a;
++++#if defined(_MSC_VER)
++++ a = _mm_setzero_ps();
++++#endif
++++ return a;
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma GCC diagnostic pop
++++#endif
++++}
++++
++++// Unpack and interleave single-precision (32-bit) floating-point elements from
++++// the high half a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_ps
++++FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(
++++ vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++#else
++++ float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
++++ float32x2x2_t result = vzip_f32(a1, b1);
++++ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Unpack and interleave single-precision (32-bit) floating-point elements from
++++// the low half of a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_ps
++++FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(
++++ vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++#else
++++ float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
++++ float32x2x2_t result = vzip_f32(a1, b1);
++++ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Compute the bitwise XOR of packed single-precision (32-bit) floating-point
++++// elements in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_ps
++++FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
++++{
++++ return vreinterpretq_m128_s32(
++++ veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
++++}
++++
++++/* SSE2 */
++++
++++// Add packed 16-bit integers in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi16
++++FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Add packed 32-bit integers in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi32
++++FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Add packed 64-bit integers in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi64
++++FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s64(
++++ vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
++++}
++++
++++// Add packed 8-bit integers in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi8
++++FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s8(
++++ vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Add packed double-precision (64-bit) floating-point elements in a and b, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_pd
++++FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[2];
++++ c[0] = da[0] + db[0];
++++ c[1] = da[1] + db[1];
++++ return vld1q_f32((float32_t *) c);
++++#endif
++++}
++++
++++// Add the lower double-precision (64-bit) floating-point element in a and b,
++++// store the result in the lower element of dst, and copy the upper element from
++++// a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_sd
++++FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_add_pd(a, b));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[2];
++++ c[0] = da[0] + db[0];
++++ c[1] = da[1];
++++ return vld1q_f32((float32_t *) c);
++++#endif
++++}
++++
++++// Add 64-bit integers a and b, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_si64
++++FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_s64(
++++ vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
++++}
++++
++++// Add packed signed 16-bit integers in a and b using saturation, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi16
++++FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Add packed signed 8-bit integers in a and b using saturation, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi8
++++FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s8(
++++ vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Add packed unsigned 16-bit integers in a and b using saturation, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu16
++++FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
++++}
++++
++++// Add packed unsigned 8-bit integers in a and b using saturation, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu8
++++FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
++++}
++++
++++// Compute the bitwise AND of packed double-precision (64-bit) floating-point
++++// elements in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_pd
++++FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b)
++++{
++++ return vreinterpretq_m128d_s64(
++++ vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
++++}
++++
++++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
++++// and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_si128
++++FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Compute the bitwise NOT of packed double-precision (64-bit) floating-point
++++// elements in a and then AND with b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_pd
++++FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b)
++++{
++++ // *NOTE* argument swap
++++ return vreinterpretq_m128d_s64(
++++ vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a)));
++++}
++++
++++// Compute the bitwise NOT of 128 bits (representing integer data) in a and then
++++// AND with b, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_si128
++++FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vbicq_s32(vreinterpretq_s32_m128i(b),
++++ vreinterpretq_s32_m128i(a))); // *NOTE* argument swap
++++}
++++
++++// Average packed unsigned 16-bit integers in a and b, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu16
++++FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b)
++++{
++++ return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a),
++++ vreinterpretq_u16_m128i(b));
++++}
++++
++++// Average packed unsigned 8-bit integers in a and b, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu8
++++FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
++++}
++++
++++// Shift a left by imm8 bytes while shifting in zeros, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bslli_si128
++++#define _mm_bslli_si128(a, imm) _mm_slli_si128(a, imm)
++++
++++// Shift a right by imm8 bytes while shifting in zeros, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bsrli_si128
++++#define _mm_bsrli_si128(a, imm) _mm_srli_si128(a, imm)
++++
++++// Cast vector of type __m128d to type __m128. This intrinsic is only used for
++++// compilation and does not generate any instructions, thus it has zero latency.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_ps
++++FORCE_INLINE __m128 _mm_castpd_ps(__m128d a)
++++{
++++ return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a));
++++}
++++
++++// Cast vector of type __m128d to type __m128i. This intrinsic is only used for
++++// compilation and does not generate any instructions, thus it has zero latency.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_si128
++++FORCE_INLINE __m128i _mm_castpd_si128(__m128d a)
++++{
++++ return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a));
++++}
++++
++++// Cast vector of type __m128 to type __m128d. This intrinsic is only used for
++++// compilation and does not generate any instructions, thus it has zero latency.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_pd
++++FORCE_INLINE __m128d _mm_castps_pd(__m128 a)
++++{
++++ return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a));
++++}
++++
++++// Cast vector of type __m128 to type __m128i. This intrinsic is only used for
++++// compilation and does not generate any instructions, thus it has zero latency.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_si128
++++FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
++++{
++++ return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
++++}
++++
++++// Cast vector of type __m128i to type __m128d. This intrinsic is only used for
++++// compilation and does not generate any instructions, thus it has zero latency.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_pd
++++FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a));
++++#else
++++ return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a));
++++#endif
++++}
++++
++++// Cast vector of type __m128i to type __m128. This intrinsic is only used for
++++// compilation and does not generate any instructions, thus it has zero latency.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_ps
++++FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
++++{
++++ return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
++++}
++++
++++// Invalidate and flush the cache line that contains p from all levels of the
++++// cache hierarchy.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clflush
++++#if defined(__APPLE__)
++++#include <libkern/OSCacheControl.h>
++++#endif
++++FORCE_INLINE void _mm_clflush(void const *p)
++++{
++++ (void) p;
++++
++++ /* sys_icache_invalidate is supported since macOS 10.5.
++++ * However, it does not work on non-jailbroken iOS devices, although the
++++ * compilation is successful.
++++ */
++++#if defined(__APPLE__)
++++ sys_icache_invalidate((void *) (uintptr_t) p, SSE2NEON_CACHELINE_SIZE);
++++#elif defined(__GNUC__) || defined(__clang__)
++++ uintptr_t ptr = (uintptr_t) p;
++++ __builtin___clear_cache((char *) ptr,
++++ (char *) ptr + SSE2NEON_CACHELINE_SIZE);
++++#elif (_MSC_VER) && SSE2NEON_INCLUDE_WINDOWS_H
++++ FlushInstructionCache(GetCurrentProcess(), p, SSE2NEON_CACHELINE_SIZE);
++++#endif
++++}
++++
++++// Compare packed 16-bit integers in a and b for equality, and store the results
++++// in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi16
++++FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Compare packed 32-bit integers in a and b for equality, and store the results
++++// in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi32
++++FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u32(
++++ vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Compare packed 8-bit integers in a and b for equality, and store the results
++++// in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi8
++++FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for equality, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_pd
++++FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(
++++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
++++ uint32x4_t cmp =
++++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
++++ uint32x4_t swapped = vrev64q_u32(cmp);
++++ return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for equality, store the result in the lower element of dst, and copy the
++++// upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_sd
++++FORCE_INLINE __m128d _mm_cmpeq_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_cmpeq_pd(a, b));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for greater-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_pd
++++FORCE_INLINE __m128d _mm_cmpge_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(
++++ vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = (*(double *) &a1) >= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for greater-than-or-equal, store the result in the lower element of dst,
++++// and copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_sd
++++FORCE_INLINE __m128d _mm_cmpge_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_cmpge_pd(a, b));
++++#else
++++ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = a1;
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare packed signed 16-bit integers in a and b for greater-than, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi16
++++FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Compare packed signed 32-bit integers in a and b for greater-than, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi32
++++FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u32(
++++ vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Compare packed signed 8-bit integers in a and b for greater-than, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi8
++++FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for greater-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_pd
++++FORCE_INLINE __m128d _mm_cmpgt_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(
++++ vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = (*(double *) &a1) > (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for greater-than, store the result in the lower element of dst, and copy
++++// the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_sd
++++FORCE_INLINE __m128d _mm_cmpgt_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_cmpgt_pd(a, b));
++++#else
++++ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = a1;
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for less-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_pd
++++FORCE_INLINE __m128d _mm_cmple_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(
++++ vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = (*(double *) &a1) <= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for less-than-or-equal, store the result in the lower element of dst, and
++++// copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_sd
++++FORCE_INLINE __m128d _mm_cmple_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_cmple_pd(a, b));
++++#else
++++ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = a1;
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare packed signed 16-bit integers in a and b for less-than, and store the
++++// results in dst. Note: This intrinsic emits the pcmpgtw instruction with the
++++// order of the operands switched.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16
++++FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Compare packed signed 32-bit integers in a and b for less-than, and store the
++++// results in dst. Note: This intrinsic emits the pcmpgtd instruction with the
++++// order of the operands switched.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi32
++++FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u32(
++++ vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Compare packed signed 8-bit integers in a and b for less-than, and store the
++++// results in dst. Note: This intrinsic emits the pcmpgtb instruction with the
++++// order of the operands switched.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi8
++++FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for less-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_pd
++++FORCE_INLINE __m128d _mm_cmplt_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(
++++ vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = (*(double *) &a1) < (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for less-than, store the result in the lower element of dst, and copy the
++++// upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_sd
++++FORCE_INLINE __m128d _mm_cmplt_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_cmplt_pd(a, b));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] = a1;
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for not-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_pd
++++FORCE_INLINE __m128d _mm_cmpneq_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_s32(vmvnq_s32(vreinterpretq_s32_u64(
++++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)))));
++++#else
++++ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
++++ uint32x4_t cmp =
++++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
++++ uint32x4_t swapped = vrev64q_u32(cmp);
++++ return vreinterpretq_m128d_u32(vmvnq_u32(vandq_u32(cmp, swapped)));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for not-equal, store the result in the lower element of dst, and copy the
++++// upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_sd
++++FORCE_INLINE __m128d _mm_cmpneq_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_cmpneq_pd(a, b));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for not-greater-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_pd
++++FORCE_INLINE __m128d _mm_cmpnge_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(veorq_u64(
++++ vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
++++ vdupq_n_u64(UINT64_MAX)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] =
++++ !((*(double *) &a0) >= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] =
++++ !((*(double *) &a1) >= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for not-greater-than-or-equal, store the result in the lower element of
++++// dst, and copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_sd
++++FORCE_INLINE __m128d _mm_cmpnge_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_cmpnge_pd(a, b));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for not-greater-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_cmpngt_pd
++++FORCE_INLINE __m128d _mm_cmpngt_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(veorq_u64(
++++ vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
++++ vdupq_n_u64(UINT64_MAX)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] =
++++ !((*(double *) &a0) > (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] =
++++ !((*(double *) &a1) > (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for not-greater-than, store the result in the lower element of dst, and
++++// copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_sd
++++FORCE_INLINE __m128d _mm_cmpngt_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_cmpngt_pd(a, b));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for not-less-than-or-equal, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_pd
++++FORCE_INLINE __m128d _mm_cmpnle_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(veorq_u64(
++++ vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
++++ vdupq_n_u64(UINT64_MAX)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] =
++++ !((*(double *) &a0) <= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] =
++++ !((*(double *) &a1) <= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for not-less-than-or-equal, store the result in the lower element of dst,
++++// and copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_sd
++++FORCE_INLINE __m128d _mm_cmpnle_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_cmpnle_pd(a, b));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// for not-less-than, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_pd
++++FORCE_INLINE __m128d _mm_cmpnlt_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_u64(veorq_u64(
++++ vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
++++ vdupq_n_u64(UINT64_MAX)));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] =
++++ !((*(double *) &a0) < (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
++++ d[1] =
++++ !((*(double *) &a1) < (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b for not-less-than, store the result in the lower element of dst, and copy
++++// the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_sd
++++FORCE_INLINE __m128d _mm_cmpnlt_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_cmpnlt_pd(a, b));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// to see if neither is NaN, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_pd
++++FORCE_INLINE __m128d _mm_cmpord_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ // Excluding NaNs, any two floating point numbers can be compared.
++++ uint64x2_t not_nan_a =
++++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
++++ uint64x2_t not_nan_b =
++++ vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
++++ return vreinterpretq_m128d_u64(vandq_u64(not_nan_a, not_nan_b));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
++++ (*(double *) &b0) == (*(double *) &b0))
++++ ? ~UINT64_C(0)
++++ : UINT64_C(0);
++++ d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
++++ (*(double *) &b1) == (*(double *) &b1))
++++ ? ~UINT64_C(0)
++++ : UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b to see if neither is NaN, store the result in the lower element of dst, and
++++// copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_sd
++++FORCE_INLINE __m128d _mm_cmpord_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_cmpord_pd(a, b));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t d[2];
++++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
++++ (*(double *) &b0) == (*(double *) &b0))
++++ ? ~UINT64_C(0)
++++ : UINT64_C(0);
++++ d[1] = a1;
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b
++++// to see if either is NaN, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_pd
++++FORCE_INLINE __m128d _mm_cmpunord_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ // Two NaNs are not equal in comparison operation.
++++ uint64x2_t not_nan_a =
++++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
++++ uint64x2_t not_nan_b =
++++ vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
++++ return vreinterpretq_m128d_s32(
++++ vmvnq_s32(vreinterpretq_s32_u64(vandq_u64(not_nan_a, not_nan_b))));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
++++ (*(double *) &b0) == (*(double *) &b0))
++++ ? UINT64_C(0)
++++ : ~UINT64_C(0);
++++ d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
++++ (*(double *) &b1) == (*(double *) &b1))
++++ ? UINT64_C(0)
++++ : ~UINT64_C(0);
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b to see if either is NaN, store the result in the lower element of dst, and
++++// copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_sd
++++FORCE_INLINE __m128d _mm_cmpunord_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_cmpunord_pd(a, b));
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t d[2];
++++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
++++ (*(double *) &b0) == (*(double *) &b0))
++++ ? UINT64_C(0)
++++ : ~UINT64_C(0);
++++ d[1] = a1;
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point element in a and b
++++// for greater-than-or-equal, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_sd
++++FORCE_INLINE int _mm_comige_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vgetq_lane_u64(vcgeq_f64(a, b), 0) & 0x1;
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++
++++ return (*(double *) &a0 >= *(double *) &b0);
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point element in a and b
++++// for greater-than, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_sd
++++FORCE_INLINE int _mm_comigt_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vgetq_lane_u64(vcgtq_f64(a, b), 0) & 0x1;
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++
++++ return (*(double *) &a0 > *(double *) &b0);
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point element in a and b
++++// for less-than-or-equal, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_sd
++++FORCE_INLINE int _mm_comile_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vgetq_lane_u64(vcleq_f64(a, b), 0) & 0x1;
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++
++++ return (*(double *) &a0 <= *(double *) &b0);
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point element in a and b
++++// for less-than, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_sd
++++FORCE_INLINE int _mm_comilt_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vgetq_lane_u64(vcltq_f64(a, b), 0) & 0x1;
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++
++++ return (*(double *) &a0 < *(double *) &b0);
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point element in a and b
++++// for equality, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_sd
++++FORCE_INLINE int _mm_comieq_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vgetq_lane_u64(vceqq_f64(a, b), 0) & 0x1;
++++#else
++++ uint32x4_t a_not_nan =
++++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(a));
++++ uint32x4_t b_not_nan =
++++ vceqq_u32(vreinterpretq_u32_m128d(b), vreinterpretq_u32_m128d(b));
++++ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
++++ uint32x4_t a_eq_b =
++++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
++++ uint64x2_t and_results = vandq_u64(vreinterpretq_u64_u32(a_and_b_not_nan),
++++ vreinterpretq_u64_u32(a_eq_b));
++++ return vgetq_lane_u64(and_results, 0) & 0x1;
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point element in a and b
++++// for not-equal, and return the boolean result (0 or 1).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_sd
++++FORCE_INLINE int _mm_comineq_sd(__m128d a, __m128d b)
++++{
++++ return !_mm_comieq_sd(a, b);
++++}
++++
++++// Convert packed signed 32-bit integers in a to packed double-precision
++++// (64-bit) floating-point elements, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_pd
++++FORCE_INLINE __m128d _mm_cvtepi32_pd(__m128i a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vcvtq_f64_s64(vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a)))));
++++#else
++++ double a0 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
++++ double a1 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 1);
++++ return _mm_set_pd(a1, a0);
++++#endif
++++}
++++
++++// Convert packed signed 32-bit integers in a to packed single-precision
++++// (32-bit) floating-point elements, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_ps
++++FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
++++{
++++ return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
++++}
++++
++++// Convert packed double-precision (64-bit) floating-point elements in a to
++++// packed 32-bit integers, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_epi32
++++FORCE_INLINE __m128i _mm_cvtpd_epi32(__m128d a)
++++{
++++// vrnd32xq_f64 not supported on clang
++++#if defined(__ARM_FEATURE_FRINT) && !defined(__clang__)
++++ float64x2_t rounded = vrnd32xq_f64(vreinterpretq_f64_m128d(a));
++++ int64x2_t integers = vcvtq_s64_f64(rounded);
++++ return vreinterpretq_m128i_s32(
++++ vcombine_s32(vmovn_s64(integers), vdup_n_s32(0)));
++++#else
++++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
++++ double d0 = ((double *) &rnd)[0];
++++ double d1 = ((double *) &rnd)[1];
++++ return _mm_set_epi32(0, 0, (int32_t) d1, (int32_t) d0);
++++#endif
++++}
++++
++++// Convert packed double-precision (64-bit) floating-point elements in a to
++++// packed 32-bit integers, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_pi32
++++FORCE_INLINE __m64 _mm_cvtpd_pi32(__m128d a)
++++{
++++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
++++ double d0 = ((double *) &rnd)[0];
++++ double d1 = ((double *) &rnd)[1];
++++ int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) d0, (int32_t) d1};
++++ return vreinterpret_m64_s32(vld1_s32(data));
++++}
++++
++++// Convert packed double-precision (64-bit) floating-point elements in a to
++++// packed single-precision (32-bit) floating-point elements, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_ps
++++FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a));
++++ return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0)));
++++#else
++++ float a0 = (float) ((double *) &a)[0];
++++ float a1 = (float) ((double *) &a)[1];
++++ return _mm_set_ps(0, 0, a1, a0);
++++#endif
++++}
++++
++++// Convert packed signed 32-bit integers in a to packed double-precision
++++// (64-bit) floating-point elements, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_pd
++++FORCE_INLINE __m128d _mm_cvtpi32_pd(__m64 a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vcvtq_f64_s64(vmovl_s32(vreinterpret_s32_m64(a))));
++++#else
++++ double a0 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 0);
++++ double a1 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 1);
++++ return _mm_set_pd(a1, a0);
++++#endif
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 32-bit integers, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_epi32
++++// *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A
++++// does not support! It is supported on ARMv8-A however.
++++FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
++++{
++++#if defined(__ARM_FEATURE_FRINT)
++++ return vreinterpretq_m128i_s32(vcvtq_s32_f32(vrnd32xq_f32(a)));
++++#elif (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
++++ switch (_MM_GET_ROUNDING_MODE()) {
++++ case _MM_ROUND_NEAREST:
++++ return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a));
++++ case _MM_ROUND_DOWN:
++++ return vreinterpretq_m128i_s32(vcvtmq_s32_f32(a));
++++ case _MM_ROUND_UP:
++++ return vreinterpretq_m128i_s32(vcvtpq_s32_f32(a));
++++ default: // _MM_ROUND_TOWARD_ZERO
++++ return vreinterpretq_m128i_s32(vcvtq_s32_f32(a));
++++ }
++++#else
++++ float *f = (float *) &a;
++++ switch (_MM_GET_ROUNDING_MODE()) {
++++ case _MM_ROUND_NEAREST: {
++++ uint32x4_t signmask = vdupq_n_u32(0x80000000);
++++ float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
++++ vdupq_n_f32(0.5f)); /* +/- 0.5 */
++++ int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
++++ vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
++++ int32x4_t r_trunc = vcvtq_s32_f32(
++++ vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
++++ int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
++++ vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
++++ int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
++++ vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
++++ float32x4_t delta = vsubq_f32(
++++ vreinterpretq_f32_m128(a),
++++ vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
++++ uint32x4_t is_delta_half =
++++ vceqq_f32(delta, half); /* delta == +/- 0.5 */
++++ return vreinterpretq_m128i_s32(
++++ vbslq_s32(is_delta_half, r_even, r_normal));
++++ }
++++ case _MM_ROUND_DOWN:
++++ return _mm_set_epi32(floorf(f[3]), floorf(f[2]), floorf(f[1]),
++++ floorf(f[0]));
++++ case _MM_ROUND_UP:
++++ return _mm_set_epi32(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]),
++++ ceilf(f[0]));
++++ default: // _MM_ROUND_TOWARD_ZERO
++++ return _mm_set_epi32((int32_t) f[3], (int32_t) f[2], (int32_t) f[1],
++++ (int32_t) f[0]);
++++ }
++++#endif
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed double-precision (64-bit) floating-point elements, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pd
++++FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a))));
++++#else
++++ double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
++++ double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
++++ return _mm_set_pd(a1, a0);
++++#endif
++++}
++++
++++// Copy the lower double-precision (64-bit) floating-point element of a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_f64
++++FORCE_INLINE double _mm_cvtsd_f64(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0);
++++#else
++++ return ((double *) &a)[0];
++++#endif
++++}
++++
++++// Convert the lower double-precision (64-bit) floating-point element in a to a
++++// 32-bit integer, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si32
++++FORCE_INLINE int32_t _mm_cvtsd_si32(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return (int32_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
++++#else
++++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
++++ double ret = ((double *) &rnd)[0];
++++ return (int32_t) ret;
++++#endif
++++}
++++
++++// Convert the lower double-precision (64-bit) floating-point element in a to a
++++// 64-bit integer, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64
++++FORCE_INLINE int64_t _mm_cvtsd_si64(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return (int64_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
++++#else
++++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
++++ double ret = ((double *) &rnd)[0];
++++ return (int64_t) ret;
++++#endif
++++}
++++
++++// Convert the lower double-precision (64-bit) floating-point element in a to a
++++// 64-bit integer, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64x
++++#define _mm_cvtsd_si64x _mm_cvtsd_si64
++++
++++// Convert the lower double-precision (64-bit) floating-point element in b to a
++++// single-precision (32-bit) floating-point element, store the result in the
++++// lower element of dst, and copy the upper 3 packed elements from a to the
++++// upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_ss
++++FORCE_INLINE __m128 _mm_cvtsd_ss(__m128 a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(vsetq_lane_f32(
++++ vget_lane_f32(vcvt_f32_f64(vreinterpretq_f64_m128d(b)), 0),
++++ vreinterpretq_f32_m128(a), 0));
++++#else
++++ return vreinterpretq_m128_f32(vsetq_lane_f32((float) ((double *) &b)[0],
++++ vreinterpretq_f32_m128(a), 0));
++++#endif
++++}
++++
++++// Copy the lower 32-bit integer in a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si32
++++FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
++++{
++++ return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
++++}
++++
++++// Copy the lower 64-bit integer in a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64
++++FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a)
++++{
++++ return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0);
++++}
++++
++++// Copy the lower 64-bit integer in a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
++++#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
++++
++++// Convert the signed 32-bit integer b to a double-precision (64-bit)
++++// floating-point element, store the result in the lower element of dst, and
++++// copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_sd
++++FORCE_INLINE __m128d _mm_cvtsi32_sd(__m128d a, int32_t b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
++++#else
++++ double bf = (double) b;
++++ return vreinterpretq_m128d_s64(
++++ vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
++++#endif
++++}
++++
++++// Copy the lower 64-bit integer in a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
++++#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
++++
++++// Copy 32-bit integer a to the lower elements of dst, and zero the upper
++++// elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_si128
++++FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
++++{
++++ return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
++++}
++++
++++// Convert the signed 64-bit integer b to a double-precision (64-bit)
++++// floating-point element, store the result in the lower element of dst, and
++++// copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_sd
++++FORCE_INLINE __m128d _mm_cvtsi64_sd(__m128d a, int64_t b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
++++#else
++++ double bf = (double) b;
++++ return vreinterpretq_m128d_s64(
++++ vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
++++#endif
++++}
++++
++++// Copy 64-bit integer a to the lower element of dst, and zero the upper
++++// element.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_si128
++++FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a)
++++{
++++ return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0));
++++}
++++
++++// Copy 64-bit integer a to the lower element of dst, and zero the upper
++++// element.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_si128
++++#define _mm_cvtsi64x_si128(a) _mm_cvtsi64_si128(a)
++++
++++// Convert the signed 64-bit integer b to a double-precision (64-bit)
++++// floating-point element, store the result in the lower element of dst, and
++++// copy the upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_sd
++++#define _mm_cvtsi64x_sd(a, b) _mm_cvtsi64_sd(a, b)
++++
++++// Convert the lower single-precision (32-bit) floating-point element in b to a
++++// double-precision (64-bit) floating-point element, store the result in the
++++// lower element of dst, and copy the upper element from a to the upper element
++++// of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_sd
++++FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b)
++++{
++++ double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0));
++++#else
++++ return vreinterpretq_m128d_s64(
++++ vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0));
++++#endif
++++}
++++
++++// Convert packed double-precision (64-bit) floating-point elements in a to
++++// packed 32-bit integers with truncation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_epi32
++++FORCE_INLINE __m128i _mm_cvttpd_epi32(__m128d a)
++++{
++++ double a0 = ((double *) &a)[0];
++++ double a1 = ((double *) &a)[1];
++++ return _mm_set_epi32(0, 0, (int32_t) a1, (int32_t) a0);
++++}
++++
++++// Convert packed double-precision (64-bit) floating-point elements in a to
++++// packed 32-bit integers with truncation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_pi32
++++FORCE_INLINE __m64 _mm_cvttpd_pi32(__m128d a)
++++{
++++ double a0 = ((double *) &a)[0];
++++ double a1 = ((double *) &a)[1];
++++ int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) a0, (int32_t) a1};
++++ return vreinterpret_m64_s32(vld1_s32(data));
++++}
++++
++++// Convert packed single-precision (32-bit) floating-point elements in a to
++++// packed 32-bit integers with truncation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_epi32
++++FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
++++{
++++ return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
++++}
++++
++++// Convert the lower double-precision (64-bit) floating-point element in a to a
++++// 32-bit integer with truncation, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si32
++++FORCE_INLINE int32_t _mm_cvttsd_si32(__m128d a)
++++{
++++ double ret = *((double *) &a);
++++ return (int32_t) ret;
++++}
++++
++++// Convert the lower double-precision (64-bit) floating-point element in a to a
++++// 64-bit integer with truncation, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64
++++FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0);
++++#else
++++ double ret = *((double *) &a);
++++ return (int64_t) ret;
++++#endif
++++}
++++
++++// Convert the lower double-precision (64-bit) floating-point element in a to a
++++// 64-bit integer with truncation, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64x
++++#define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a)
++++
++++// Divide packed double-precision (64-bit) floating-point elements in a by
++++// packed elements in b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_pd
++++FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[2];
++++ c[0] = da[0] / db[0];
++++ c[1] = da[1] / db[1];
++++ return vld1q_f32((float32_t *) c);
++++#endif
++++}
++++
++++// Divide the lower double-precision (64-bit) floating-point element in a by the
++++// lower double-precision (64-bit) floating-point element in b, store the result
++++// in the lower element of dst, and copy the upper element from a to the upper
++++// element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_sd
++++FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ float64x2_t tmp =
++++ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b));
++++ return vreinterpretq_m128d_f64(
++++ vsetq_lane_f64(vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1), tmp, 1));
++++#else
++++ return _mm_move_sd(a, _mm_div_pd(a, b));
++++#endif
++++}
++++
++++// Extract a 16-bit integer from a, selected with imm8, and store the result in
++++// the lower element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi16
++++// FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
++++#define _mm_extract_epi16(a, imm) \
++++ vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm))
++++
++++// Copy a to dst, and insert the 16-bit integer i into dst at the location
++++// specified by imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi16
++++// FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b,
++++// __constrange(0,8) int imm)
++++#define _mm_insert_epi16(a, b, imm) \
++++ vreinterpretq_m128i_s16( \
++++ vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm)))
++++
++++// Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point
++++// elements) from memory into dst. mem_addr must be aligned on a 16-byte
++++// boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd
++++FORCE_INLINE __m128d _mm_load_pd(const double *p)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vld1q_f64(p));
++++#else
++++ const float *fp = (const float *) p;
++++ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]};
++++ return vreinterpretq_m128d_f32(vld1q_f32(data));
++++#endif
++++}
++++
++++// Load a double-precision (64-bit) floating-point element from memory into both
++++// elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd1
++++#define _mm_load_pd1 _mm_load1_pd
++++
++++// Load a double-precision (64-bit) floating-point element from memory into the
++++// lower of dst, and zero the upper element. mem_addr does not need to be
++++// aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_sd
++++FORCE_INLINE __m128d _mm_load_sd(const double *p)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0));
++++#else
++++ const float *fp = (const float *) p;
++++ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0};
++++ return vreinterpretq_m128d_f32(vld1q_f32(data));
++++#endif
++++}
++++
++++// Load 128-bits of integer data from memory into dst. mem_addr must be aligned
++++// on a 16-byte boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_si128
++++FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
++++{
++++ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
++++}
++++
++++// Load a double-precision (64-bit) floating-point element from memory into both
++++// elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_pd
++++FORCE_INLINE __m128d _mm_load1_pd(const double *p)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vld1q_dup_f64(p));
++++#else
++++ return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p));
++++#endif
++++}
++++
++++// Load a double-precision (64-bit) floating-point element from memory into the
++++// upper element of dst, and copy the lower element from a to dst. mem_addr does
++++// not need to be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pd
++++FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p)));
++++#else
++++ return vreinterpretq_m128d_f32(vcombine_f32(
++++ vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p)));
++++#endif
++++}
++++
++++// Load 64-bit integer from memory into the first element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_epi64
++++FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p)
++++{
++++ /* Load the lower 64 bits of the value pointed to by p into the
++++ * lower 64 bits of the result, zeroing the upper 64 bits of the result.
++++ */
++++ return vreinterpretq_m128i_s32(
++++ vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0)));
++++}
++++
++++// Load a double-precision (64-bit) floating-point element from memory into the
++++// lower element of dst, and copy the upper element from a to dst. mem_addr does
++++// not need to be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pd
++++FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a))));
++++#else
++++ return vreinterpretq_m128d_f32(
++++ vcombine_f32(vld1_f32((const float *) p),
++++ vget_high_f32(vreinterpretq_f32_m128d(a))));
++++#endif
++++}
++++
++++// Load 2 double-precision (64-bit) floating-point elements from memory into dst
++++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
++++// general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_pd
++++FORCE_INLINE __m128d _mm_loadr_pd(const double *p)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ float64x2_t v = vld1q_f64(p);
++++ return vreinterpretq_m128d_f64(vextq_f64(v, v, 1));
++++#else
++++ int64x2_t v = vld1q_s64((const int64_t *) p);
++++ return vreinterpretq_m128d_s64(vextq_s64(v, v, 1));
++++#endif
++++}
++++
++++// Loads two double-precision from unaligned memory, floating-point values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_pd
++++FORCE_INLINE __m128d _mm_loadu_pd(const double *p)
++++{
++++ return _mm_load_pd(p);
++++}
++++
++++// Load 128-bits of integer data from memory into dst. mem_addr does not need to
++++// be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si128
++++FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
++++{
++++ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
++++}
++++
++++// Load unaligned 32-bit integer from memory into the first element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si32
++++FORCE_INLINE __m128i _mm_loadu_si32(const void *p)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0));
++++}
++++
++++// Multiply packed signed 16-bit integers in a and b, producing intermediate
++++// signed 32-bit integers. Horizontally add adjacent pairs of intermediate
++++// 32-bit integers, and pack the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_madd_epi16
++++FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b)
++++{
++++ int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
++++ vget_low_s16(vreinterpretq_s16_m128i(b)));
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int32x4_t high =
++++ vmull_high_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b));
++++
++++ return vreinterpretq_m128i_s32(vpaddq_s32(low, high));
++++#else
++++ int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
++++ vget_high_s16(vreinterpretq_s16_m128i(b)));
++++
++++ int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low));
++++ int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high));
++++
++++ return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum));
++++#endif
++++}
++++
++++// Conditionally store 8-bit integer elements from a into memory using mask
++++// (elements are not stored when the highest bit is not set in the corresponding
++++// element) and a non-temporal memory hint. mem_addr does not need to be aligned
++++// on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmoveu_si128
++++FORCE_INLINE void _mm_maskmoveu_si128(__m128i a, __m128i mask, char *mem_addr)
++++{
++++ int8x16_t shr_mask = vshrq_n_s8(vreinterpretq_s8_m128i(mask), 7);
++++ __m128 b = _mm_load_ps((const float *) mem_addr);
++++ int8x16_t masked =
++++ vbslq_s8(vreinterpretq_u8_s8(shr_mask), vreinterpretq_s8_m128i(a),
++++ vreinterpretq_s8_m128(b));
++++ vst1q_s8((int8_t *) mem_addr, masked);
++++}
++++
++++// Compare packed signed 16-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi16
++++FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu8
++++FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b,
++++// and store packed maximum values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pd
++++FORCE_INLINE __m128d _mm_max_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++#if SSE2NEON_PRECISE_MINMAX
++++ float64x2_t _a = vreinterpretq_f64_m128d(a);
++++ float64x2_t _b = vreinterpretq_f64_m128d(b);
++++ return vreinterpretq_m128d_f64(vbslq_f64(vcgtq_f64(_a, _b), _a, _b));
++++#else
++++ return vreinterpretq_m128d_f64(
++++ vmaxq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#endif
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) > (*(double *) &b0) ? a0 : b0;
++++ d[1] = (*(double *) &a1) > (*(double *) &b1) ? a1 : b1;
++++
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b, store the maximum value in the lower element of dst, and copy the upper
++++// element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_sd
++++FORCE_INLINE __m128d _mm_max_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_max_pd(a, b));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[2] = {da[0] > db[0] ? da[0] : db[0], da[1]};
++++ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
++++#endif
++++}
++++
++++// Compare packed signed 16-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi16
++++FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu8
++++FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
++++}
++++
++++// Compare packed double-precision (64-bit) floating-point elements in a and b,
++++// and store packed minimum values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pd
++++FORCE_INLINE __m128d _mm_min_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++#if SSE2NEON_PRECISE_MINMAX
++++ float64x2_t _a = vreinterpretq_f64_m128d(a);
++++ float64x2_t _b = vreinterpretq_f64_m128d(b);
++++ return vreinterpretq_m128d_f64(vbslq_f64(vcltq_f64(_a, _b), _a, _b));
++++#else
++++ return vreinterpretq_m128d_f64(
++++ vminq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#endif
++++#else
++++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
++++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
++++ uint64_t d[2];
++++ d[0] = (*(double *) &a0) < (*(double *) &b0) ? a0 : b0;
++++ d[1] = (*(double *) &a1) < (*(double *) &b1) ? a1 : b1;
++++ return vreinterpretq_m128d_u64(vld1q_u64(d));
++++#endif
++++}
++++
++++// Compare the lower double-precision (64-bit) floating-point elements in a and
++++// b, store the minimum value in the lower element of dst, and copy the upper
++++// element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_sd
++++FORCE_INLINE __m128d _mm_min_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_min_pd(a, b));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[2] = {da[0] < db[0] ? da[0] : db[0], da[1]};
++++ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
++++#endif
++++}
++++
++++// Copy the lower 64-bit integer in a to the lower element of dst, and zero the
++++// upper element.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_epi64
++++FORCE_INLINE __m128i _mm_move_epi64(__m128i a)
++++{
++++ return vreinterpretq_m128i_s64(
++++ vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1));
++++}
++++
++++// Move the lower double-precision (64-bit) floating-point element from b to the
++++// lower element of dst, and copy the upper element from a to the upper element
++++// of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_sd
++++FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b)
++++{
++++ return vreinterpretq_m128d_f32(
++++ vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)),
++++ vget_high_f32(vreinterpretq_f32_m128d(a))));
++++}
++++
++++// Create mask from the most significant bit of each 8-bit element in a, and
++++// store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_epi8
++++FORCE_INLINE int _mm_movemask_epi8(__m128i a)
++++{
++++ // Use increasingly wide shifts+adds to collect the sign bits
++++ // together.
++++ // Since the widening shifts would be rather confusing to follow in little
++++ // endian, everything will be illustrated in big endian order instead. This
++++ // has a different result - the bits would actually be reversed on a big
++++ // endian machine.
++++
++++ // Starting input (only half the elements are shown):
++++ // 89 ff 1d c0 00 10 99 33
++++ uint8x16_t input = vreinterpretq_u8_m128i(a);
++++
++++ // Shift out everything but the sign bits with an unsigned shift right.
++++ //
++++ // Bytes of the vector::
++++ // 89 ff 1d c0 00 10 99 33
++++ // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
++++ // | | | | | | | |
++++ // 01 01 00 01 00 00 01 00
++++ //
++++ // Bits of first important lane(s):
++++ // 10001001 (89)
++++ // \______
++++ // |
++++ // 00000001 (01)
++++ uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
++++
++++ // Merge the even lanes together with a 16-bit unsigned shift right + add.
++++ // 'xx' represents garbage data which will be ignored in the final result.
++++ // In the important bytes, the add functions like a binary OR.
++++ //
++++ // 01 01 00 01 00 00 01 00
++++ // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
++++ // \| \| \| \|
++++ // xx 03 xx 01 xx 00 xx 02
++++ //
++++ // 00000001 00000001 (01 01)
++++ // \_______ |
++++ // \|
++++ // xxxxxxxx xxxxxx11 (xx 03)
++++ uint32x4_t paired16 =
++++ vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
++++
++++ // Repeat with a wider 32-bit shift + add.
++++ // xx 03 xx 01 xx 00 xx 02
++++ // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >>
++++ // 14))
++++ // \| \|
++++ // xx xx xx 0d xx xx xx 02
++++ //
++++ // 00000011 00000001 (03 01)
++++ // \\_____ ||
++++ // '----.\||
++++ // xxxxxxxx xxxx1101 (xx 0d)
++++ uint64x2_t paired32 =
++++ vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
++++
++++ // Last, an even wider 64-bit shift + add to get our result in the low 8 bit
++++ // lanes. xx xx xx 0d xx xx xx 02
++++ // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >>
++++ // 28))
++++ // \|
++++ // xx xx xx xx xx xx xx d2
++++ //
++++ // 00001101 00000010 (0d 02)
++++ // \ \___ | |
++++ // '---. \| |
++++ // xxxxxxxx 11010010 (xx d2)
++++ uint8x16_t paired64 =
++++ vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
++++
++++ // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
++++ // xx xx xx xx xx xx xx d2
++++ // || return paired64[0]
++++ // d2
++++ // Note: Little endian would return the correct value 4b (01001011) instead.
++++ return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8);
++++}
++++
++++// Set each bit of mask dst based on the most significant bit of the
++++// corresponding packed double-precision (64-bit) floating-point element in a.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pd
++++FORCE_INLINE int _mm_movemask_pd(__m128d a)
++++{
++++ uint64x2_t input = vreinterpretq_u64_m128d(a);
++++ uint64x2_t high_bits = vshrq_n_u64(input, 63);
++++ return (int) (vgetq_lane_u64(high_bits, 0) |
++++ (vgetq_lane_u64(high_bits, 1) << 1));
++++}
++++
++++// Copy the lower 64-bit integer in a to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi64_pi64
++++FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a)
++++{
++++ return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a)));
++++}
++++
++++// Copy the 64-bit integer a to the lower element of dst, and zero the upper
++++// element.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movpi64_epi64
++++FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a)
++++{
++++ return vreinterpretq_m128i_s64(
++++ vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0)));
++++}
++++
++++// Multiply the low unsigned 32-bit integers from each packed 64-bit element in
++++// a and b, and store the unsigned 64-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epu32
++++FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b)
++++{
++++ // vmull_u32 upcasts instead of masking, so we downcast.
++++ uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a));
++++ uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b));
++++ return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo));
++++}
++++
++++// Multiply packed double-precision (64-bit) floating-point elements in a and b,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_pd
++++FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[2];
++++ c[0] = da[0] * db[0];
++++ c[1] = da[1] * db[1];
++++ return vld1q_f32((float32_t *) c);
++++#endif
++++}
++++
++++// Multiply the lower double-precision (64-bit) floating-point element in a and
++++// b, store the result in the lower element of dst, and copy the upper element
++++// from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_sd
++++FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_mul_pd(a, b));
++++}
++++
++++// Multiply the low unsigned 32-bit integers from a and b, and store the
++++// unsigned 64-bit result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_su32
++++FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_u64(vget_low_u64(
++++ vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b))));
++++}
++++
++++// Multiply the packed signed 16-bit integers in a and b, producing intermediate
++++// 32-bit integers, and store the high 16 bits of the intermediate integers in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epi16
++++FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
++++{
++++ /* FIXME: issue with large values because of result saturation */
++++ // int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a),
++++ // vreinterpretq_s16_m128i(b)); /* =2*a*b */ return
++++ // vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
++++ int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
++++ int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
++++ int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
++++ int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
++++ int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
++++ int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
++++ uint16x8x2_t r =
++++ vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
++++ return vreinterpretq_m128i_u16(r.val[1]);
++++}
++++
++++// Multiply the packed unsigned 16-bit integers in a and b, producing
++++// intermediate 32-bit integers, and store the high 16 bits of the intermediate
++++// integers in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epu16
++++FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b)
++++{
++++ uint16x4_t a3210 = vget_low_u16(vreinterpretq_u16_m128i(a));
++++ uint16x4_t b3210 = vget_low_u16(vreinterpretq_u16_m128i(b));
++++ uint32x4_t ab3210 = vmull_u16(a3210, b3210);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint32x4_t ab7654 =
++++ vmull_high_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
++++ uint16x8_t r = vuzp2q_u16(vreinterpretq_u16_u32(ab3210),
++++ vreinterpretq_u16_u32(ab7654));
++++ return vreinterpretq_m128i_u16(r);
++++#else
++++ uint16x4_t a7654 = vget_high_u16(vreinterpretq_u16_m128i(a));
++++ uint16x4_t b7654 = vget_high_u16(vreinterpretq_u16_m128i(b));
++++ uint32x4_t ab7654 = vmull_u16(a7654, b7654);
++++ uint16x8x2_t r =
++++ vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
++++ return vreinterpretq_m128i_u16(r.val[1]);
++++#endif
++++}
++++
++++// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit
++++// integers, and store the low 16 bits of the intermediate integers in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi16
++++FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Compute the bitwise OR of packed double-precision (64-bit) floating-point
++++// elements in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_or_pd
++++FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b)
++++{
++++ return vreinterpretq_m128d_s64(
++++ vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
++++}
++++
++++// Compute the bitwise OR of 128 bits (representing integer data) in a and b,
++++// and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_si128
++++FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Convert packed signed 16-bit integers from a and b to packed 8-bit integers
++++// using signed saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi16
++++FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s8(
++++ vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)),
++++ vqmovn_s16(vreinterpretq_s16_m128i(b))));
++++}
++++
++++// Convert packed signed 32-bit integers from a and b to packed 16-bit integers
++++// using signed saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi32
++++FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)),
++++ vqmovn_s32(vreinterpretq_s32_m128i(b))));
++++}
++++
++++// Convert packed signed 16-bit integers from a and b to packed 8-bit integers
++++// using unsigned saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi16
++++FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)),
++++ vqmovun_s16(vreinterpretq_s16_m128i(b))));
++++}
++++
++++// Pause the processor. This is typically used in spin-wait loops and depending
++++// on the x86 processor typical values are in the 40-100 cycle range. The
++++// 'yield' instruction isn't a good fit because it's effectively a nop on most
++++// Arm cores. Experience with several databases has shown has shown an 'isb' is
++++// a reasonable approximation.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_pause
++++FORCE_INLINE void _mm_pause(void)
++++{
++++#if defined(_MSC_VER)
++++ __isb(_ARM64_BARRIER_SY);
++++#else
++++ __asm__ __volatile__("isb\n");
++++#endif
++++}
++++
++++// Compute the absolute differences of packed unsigned 8-bit integers in a and
++++// b, then horizontally sum each consecutive 8 differences to produce two
++++// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
++++// 16 bits of 64-bit elements in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_epu8
++++FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b)
++++{
++++ uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b));
++++ return vreinterpretq_m128i_u64(vpaddlq_u32(vpaddlq_u16(t)));
++++}
++++
++++// Set packed 16-bit integers in dst with the supplied values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi16
++++FORCE_INLINE __m128i _mm_set_epi16(short i7,
++++ short i6,
++++ short i5,
++++ short i4,
++++ short i3,
++++ short i2,
++++ short i1,
++++ short i0)
++++{
++++ int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
++++ return vreinterpretq_m128i_s16(vld1q_s16(data));
++++}
++++
++++// Set packed 32-bit integers in dst with the supplied values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi32
++++FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
++++{
++++ int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3};
++++ return vreinterpretq_m128i_s32(vld1q_s32(data));
++++}
++++
++++// Set packed 64-bit integers in dst with the supplied values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64
++++FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2)
++++{
++++ return _mm_set_epi64x(vget_lane_s64(i1, 0), vget_lane_s64(i2, 0));
++++}
++++
++++// Set packed 64-bit integers in dst with the supplied values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64x
++++FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2)
++++{
++++ return vreinterpretq_m128i_s64(
++++ vcombine_s64(vcreate_s64(i2), vcreate_s64(i1)));
++++}
++++
++++// Set packed 8-bit integers in dst with the supplied values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi8
++++FORCE_INLINE __m128i _mm_set_epi8(signed char b15,
++++ signed char b14,
++++ signed char b13,
++++ signed char b12,
++++ signed char b11,
++++ signed char b10,
++++ signed char b9,
++++ signed char b8,
++++ signed char b7,
++++ signed char b6,
++++ signed char b5,
++++ signed char b4,
++++ signed char b3,
++++ signed char b2,
++++ signed char b1,
++++ signed char b0)
++++{
++++ int8_t ALIGN_STRUCT(16)
++++ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
++++ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
++++ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
++++ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
++++ return (__m128i) vld1q_s8(data);
++++}
++++
++++// Set packed double-precision (64-bit) floating-point elements in dst with the
++++// supplied values.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd
++++FORCE_INLINE __m128d _mm_set_pd(double e1, double e0)
++++{
++++ double ALIGN_STRUCT(16) data[2] = {e0, e1};
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data));
++++#else
++++ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data));
++++#endif
++++}
++++
++++// Broadcast double-precision (64-bit) floating-point value a to all elements of
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd1
++++#define _mm_set_pd1 _mm_set1_pd
++++
++++// Copy double-precision (64-bit) floating-point element a to the lower element
++++// of dst, and zero the upper element.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_sd
++++FORCE_INLINE __m128d _mm_set_sd(double a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vsetq_lane_f64(a, vdupq_n_f64(0), 0));
++++#else
++++ return _mm_set_pd(0, a);
++++#endif
++++}
++++
++++// Broadcast 16-bit integer a to all elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi16
++++FORCE_INLINE __m128i _mm_set1_epi16(short w)
++++{
++++ return vreinterpretq_m128i_s16(vdupq_n_s16(w));
++++}
++++
++++// Broadcast 32-bit integer a to all elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi32
++++FORCE_INLINE __m128i _mm_set1_epi32(int _i)
++++{
++++ return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
++++}
++++
++++// Broadcast 64-bit integer a to all elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64
++++FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i)
++++{
++++ return vreinterpretq_m128i_s64(vdupq_lane_s64(_i, 0));
++++}
++++
++++// Broadcast 64-bit integer a to all elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64x
++++FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
++++{
++++ return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
++++}
++++
++++// Broadcast 8-bit integer a to all elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi8
++++FORCE_INLINE __m128i _mm_set1_epi8(signed char w)
++++{
++++ return vreinterpretq_m128i_s8(vdupq_n_s8(w));
++++}
++++
++++// Broadcast double-precision (64-bit) floating-point value a to all elements of
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_pd
++++FORCE_INLINE __m128d _mm_set1_pd(double d)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vdupq_n_f64(d));
++++#else
++++ return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d));
++++#endif
++++}
++++
++++// Set packed 16-bit integers in dst with the supplied values in reverse order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi16
++++FORCE_INLINE __m128i _mm_setr_epi16(short w0,
++++ short w1,
++++ short w2,
++++ short w3,
++++ short w4,
++++ short w5,
++++ short w6,
++++ short w7)
++++{
++++ int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7};
++++ return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data));
++++}
++++
++++// Set packed 32-bit integers in dst with the supplied values in reverse order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi32
++++FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
++++{
++++ int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0};
++++ return vreinterpretq_m128i_s32(vld1q_s32(data));
++++}
++++
++++// Set packed 64-bit integers in dst with the supplied values in reverse order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi64
++++FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0)
++++{
++++ return vreinterpretq_m128i_s64(vcombine_s64(e1, e0));
++++}
++++
++++// Set packed 8-bit integers in dst with the supplied values in reverse order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi8
++++FORCE_INLINE __m128i _mm_setr_epi8(signed char b0,
++++ signed char b1,
++++ signed char b2,
++++ signed char b3,
++++ signed char b4,
++++ signed char b5,
++++ signed char b6,
++++ signed char b7,
++++ signed char b8,
++++ signed char b9,
++++ signed char b10,
++++ signed char b11,
++++ signed char b12,
++++ signed char b13,
++++ signed char b14,
++++ signed char b15)
++++{
++++ int8_t ALIGN_STRUCT(16)
++++ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
++++ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
++++ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
++++ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
++++ return (__m128i) vld1q_s8(data);
++++}
++++
++++// Set packed double-precision (64-bit) floating-point elements in dst with the
++++// supplied values in reverse order.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_pd
++++FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0)
++++{
++++ return _mm_set_pd(e0, e1);
++++}
++++
++++// Return vector of type __m128d with all elements set to zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_pd
++++FORCE_INLINE __m128d _mm_setzero_pd(void)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vdupq_n_f64(0));
++++#else
++++ return vreinterpretq_m128d_f32(vdupq_n_f32(0));
++++#endif
++++}
++++
++++// Return vector of type __m128i with all elements set to zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_si128
++++FORCE_INLINE __m128i _mm_setzero_si128(void)
++++{
++++ return vreinterpretq_m128i_s32(vdupq_n_s32(0));
++++}
++++
++++// Shuffle 32-bit integers in a using the control in imm8, and store the results
++++// in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi32
++++// FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a,
++++// __constrange(0,255) int imm)
++++#if defined(_sse2neon_shuffle)
++++#define _mm_shuffle_epi32(a, imm) \
++++ __extension__({ \
++++ int32x4_t _input = vreinterpretq_s32_m128i(a); \
++++ int32x4_t _shuf = \
++++ vshuffleq_s32(_input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \
++++ ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \
++++ vreinterpretq_m128i_s32(_shuf); \
++++ })
++++#else // generic
++++#define _mm_shuffle_epi32(a, imm) \
++++ _sse2neon_define1( \
++++ __m128i, a, __m128i ret; switch (imm) { \
++++ case _MM_SHUFFLE(1, 0, 3, 2): \
++++ ret = _mm_shuffle_epi_1032(_a); \
++++ break; \
++++ case _MM_SHUFFLE(2, 3, 0, 1): \
++++ ret = _mm_shuffle_epi_2301(_a); \
++++ break; \
++++ case _MM_SHUFFLE(0, 3, 2, 1): \
++++ ret = _mm_shuffle_epi_0321(_a); \
++++ break; \
++++ case _MM_SHUFFLE(2, 1, 0, 3): \
++++ ret = _mm_shuffle_epi_2103(_a); \
++++ break; \
++++ case _MM_SHUFFLE(1, 0, 1, 0): \
++++ ret = _mm_shuffle_epi_1010(_a); \
++++ break; \
++++ case _MM_SHUFFLE(1, 0, 0, 1): \
++++ ret = _mm_shuffle_epi_1001(_a); \
++++ break; \
++++ case _MM_SHUFFLE(0, 1, 0, 1): \
++++ ret = _mm_shuffle_epi_0101(_a); \
++++ break; \
++++ case _MM_SHUFFLE(2, 2, 1, 1): \
++++ ret = _mm_shuffle_epi_2211(_a); \
++++ break; \
++++ case _MM_SHUFFLE(0, 1, 2, 2): \
++++ ret = _mm_shuffle_epi_0122(_a); \
++++ break; \
++++ case _MM_SHUFFLE(3, 3, 3, 2): \
++++ ret = _mm_shuffle_epi_3332(_a); \
++++ break; \
++++ case _MM_SHUFFLE(0, 0, 0, 0): \
++++ ret = _mm_shuffle_epi32_splat(_a, 0); \
++++ break; \
++++ case _MM_SHUFFLE(1, 1, 1, 1): \
++++ ret = _mm_shuffle_epi32_splat(_a, 1); \
++++ break; \
++++ case _MM_SHUFFLE(2, 2, 2, 2): \
++++ ret = _mm_shuffle_epi32_splat(_a, 2); \
++++ break; \
++++ case _MM_SHUFFLE(3, 3, 3, 3): \
++++ ret = _mm_shuffle_epi32_splat(_a, 3); \
++++ break; \
++++ default: \
++++ ret = _mm_shuffle_epi32_default(_a, (imm)); \
++++ break; \
++++ } _sse2neon_return(ret);)
++++#endif
++++
++++// Shuffle double-precision (64-bit) floating-point elements using the control
++++// in imm8, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pd
++++#ifdef _sse2neon_shuffle
++++#define _mm_shuffle_pd(a, b, imm8) \
++++ vreinterpretq_m128d_s64( \
++++ vshuffleq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), \
++++ imm8 & 0x1, ((imm8 & 0x2) >> 1) + 2))
++++#else
++++#define _mm_shuffle_pd(a, b, imm8) \
++++ _mm_castsi128_pd(_mm_set_epi64x( \
++++ vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \
++++ vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1)))
++++#endif
++++
++++// FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a,
++++// __constrange(0,255) int imm)
++++#if defined(_sse2neon_shuffle)
++++#define _mm_shufflehi_epi16(a, imm) \
++++ __extension__({ \
++++ int16x8_t _input = vreinterpretq_s16_m128i(a); \
++++ int16x8_t _shuf = \
++++ vshuffleq_s16(_input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \
++++ (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \
++++ (((imm) >> 6) & 0x3) + 4); \
++++ vreinterpretq_m128i_s16(_shuf); \
++++ })
++++#else // generic
++++#define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm))
++++#endif
++++
++++// FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a,
++++// __constrange(0,255) int imm)
++++#if defined(_sse2neon_shuffle)
++++#define _mm_shufflelo_epi16(a, imm) \
++++ __extension__({ \
++++ int16x8_t _input = vreinterpretq_s16_m128i(a); \
++++ int16x8_t _shuf = vshuffleq_s16( \
++++ _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \
++++ (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \
++++ vreinterpretq_m128i_s16(_shuf); \
++++ })
++++#else // generic
++++#define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm))
++++#endif
++++
++++// Shift packed 16-bit integers in a left by count while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi16
++++FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count)
++++{
++++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
++++ if (_sse2neon_unlikely(c & ~15))
++++ return _mm_setzero_si128();
++++
++++ int16x8_t vc = vdupq_n_s16((int16_t) c);
++++ return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc));
++++}
++++
++++// Shift packed 32-bit integers in a left by count while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi32
++++FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count)
++++{
++++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
++++ if (_sse2neon_unlikely(c & ~31))
++++ return _mm_setzero_si128();
++++
++++ int32x4_t vc = vdupq_n_s32((int32_t) c);
++++ return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc));
++++}
++++
++++// Shift packed 64-bit integers in a left by count while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi64
++++FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count)
++++{
++++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
++++ if (_sse2neon_unlikely(c & ~63))
++++ return _mm_setzero_si128();
++++
++++ int64x2_t vc = vdupq_n_s64((int64_t) c);
++++ return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc));
++++}
++++
++++// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi16
++++FORCE_INLINE __m128i _mm_slli_epi16(__m128i a, int imm)
++++{
++++ if (_sse2neon_unlikely(imm & ~15))
++++ return _mm_setzero_si128();
++++ return vreinterpretq_m128i_s16(
++++ vshlq_s16(vreinterpretq_s16_m128i(a), vdupq_n_s16(imm)));
++++}
++++
++++// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi32
++++FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm)
++++{
++++ if (_sse2neon_unlikely(imm & ~31))
++++ return _mm_setzero_si128();
++++ return vreinterpretq_m128i_s32(
++++ vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm)));
++++}
++++
++++// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi64
++++FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm)
++++{
++++ if (_sse2neon_unlikely(imm & ~63))
++++ return _mm_setzero_si128();
++++ return vreinterpretq_m128i_s64(
++++ vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm)));
++++}
++++
++++// Shift a left by imm8 bytes while shifting in zeros, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_si128
++++#define _mm_slli_si128(a, imm) \
++++ _sse2neon_define1( \
++++ __m128i, a, int8x16_t ret; \
++++ if (_sse2neon_unlikely(imm == 0)) ret = vreinterpretq_s8_m128i(_a); \
++++ else if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \
++++ else ret = vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(_a), \
++++ ((imm <= 0 || imm > 15) ? 0 : (16 - imm))); \
++++ _sse2neon_return(vreinterpretq_m128i_s8(ret));)
++++
++++// Compute the square root of packed double-precision (64-bit) floating-point
++++// elements in a, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_pd
++++FORCE_INLINE __m128d _mm_sqrt_pd(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vsqrtq_f64(vreinterpretq_f64_m128d(a)));
++++#else
++++ double a0 = sqrt(((double *) &a)[0]);
++++ double a1 = sqrt(((double *) &a)[1]);
++++ return _mm_set_pd(a1, a0);
++++#endif
++++}
++++
++++// Compute the square root of the lower double-precision (64-bit) floating-point
++++// element in b, store the result in the lower element of dst, and copy the
++++// upper element from a to the upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_sd
++++FORCE_INLINE __m128d _mm_sqrt_sd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return _mm_move_sd(a, _mm_sqrt_pd(b));
++++#else
++++ return _mm_set_pd(((double *) &a)[1], sqrt(((double *) &b)[0]));
++++#endif
++++}
++++
++++// Shift packed 16-bit integers in a right by count while shifting in sign bits,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi16
++++FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count)
++++{
++++ int64_t c = vgetq_lane_s64(count, 0);
++++ if (_sse2neon_unlikely(c & ~15))
++++ return _mm_cmplt_epi16(a, _mm_setzero_si128());
++++ return vreinterpretq_m128i_s16(
++++ vshlq_s16((int16x8_t) a, vdupq_n_s16((int) -c)));
++++}
++++
++++// Shift packed 32-bit integers in a right by count while shifting in sign bits,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi32
++++FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count)
++++{
++++ int64_t c = vgetq_lane_s64(count, 0);
++++ if (_sse2neon_unlikely(c & ~31))
++++ return _mm_cmplt_epi32(a, _mm_setzero_si128());
++++ return vreinterpretq_m128i_s32(
++++ vshlq_s32((int32x4_t) a, vdupq_n_s32((int) -c)));
++++}
++++
++++// Shift packed 16-bit integers in a right by imm8 while shifting in sign
++++// bits, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi16
++++FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm)
++++{
++++ const int count = (imm & ~15) ? 15 : imm;
++++ return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count));
++++}
++++
++++// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi32
++++// FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
++++#define _mm_srai_epi32(a, imm) \
++++ _sse2neon_define0( \
++++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) == 0)) { \
++++ ret = _a; \
++++ } else if (_sse2neon_likely(0 < (imm) && (imm) < 32)) { \
++++ ret = vreinterpretq_m128i_s32( \
++++ vshlq_s32(vreinterpretq_s32_m128i(_a), vdupq_n_s32(-(imm)))); \
++++ } else { \
++++ ret = vreinterpretq_m128i_s32( \
++++ vshrq_n_s32(vreinterpretq_s32_m128i(_a), 31)); \
++++ } _sse2neon_return(ret);)
++++
++++// Shift packed 16-bit integers in a right by count while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi16
++++FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count)
++++{
++++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
++++ if (_sse2neon_unlikely(c & ~15))
++++ return _mm_setzero_si128();
++++
++++ int16x8_t vc = vdupq_n_s16(-(int16_t) c);
++++ return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc));
++++}
++++
++++// Shift packed 32-bit integers in a right by count while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi32
++++FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count)
++++{
++++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
++++ if (_sse2neon_unlikely(c & ~31))
++++ return _mm_setzero_si128();
++++
++++ int32x4_t vc = vdupq_n_s32(-(int32_t) c);
++++ return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc));
++++}
++++
++++// Shift packed 64-bit integers in a right by count while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi64
++++FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count)
++++{
++++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
++++ if (_sse2neon_unlikely(c & ~63))
++++ return _mm_setzero_si128();
++++
++++ int64x2_t vc = vdupq_n_s64(-(int64_t) c);
++++ return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc));
++++}
++++
++++// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi16
++++#define _mm_srli_epi16(a, imm) \
++++ _sse2neon_define0( \
++++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~15)) { \
++++ ret = _mm_setzero_si128(); \
++++ } else { \
++++ ret = vreinterpretq_m128i_u16( \
++++ vshlq_u16(vreinterpretq_u16_m128i(_a), vdupq_n_s16(-(imm)))); \
++++ } _sse2neon_return(ret);)
++++
++++// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi32
++++// FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
++++#define _mm_srli_epi32(a, imm) \
++++ _sse2neon_define0( \
++++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~31)) { \
++++ ret = _mm_setzero_si128(); \
++++ } else { \
++++ ret = vreinterpretq_m128i_u32( \
++++ vshlq_u32(vreinterpretq_u32_m128i(_a), vdupq_n_s32(-(imm)))); \
++++ } _sse2neon_return(ret);)
++++
++++// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi64
++++#define _mm_srli_epi64(a, imm) \
++++ _sse2neon_define0( \
++++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~63)) { \
++++ ret = _mm_setzero_si128(); \
++++ } else { \
++++ ret = vreinterpretq_m128i_u64( \
++++ vshlq_u64(vreinterpretq_u64_m128i(_a), vdupq_n_s64(-(imm)))); \
++++ } _sse2neon_return(ret);)
++++
++++// Shift a right by imm8 bytes while shifting in zeros, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_si128
++++#define _mm_srli_si128(a, imm) \
++++ _sse2neon_define1( \
++++ __m128i, a, int8x16_t ret; \
++++ if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \
++++ else ret = vextq_s8(vreinterpretq_s8_m128i(_a), vdupq_n_s8(0), \
++++ (imm > 15 ? 0 : imm)); \
++++ _sse2neon_return(vreinterpretq_m128i_s8(ret));)
++++
++++// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
++++// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
++++// or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd
++++FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a));
++++#else
++++ vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a));
++++#endif
++++}
++++
++++// Store the lower double-precision (64-bit) floating-point element from a into
++++// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
++++// boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd1
++++FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a));
++++ vst1q_f64((float64_t *) mem_addr,
++++ vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low)));
++++#else
++++ float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a));
++++ vst1q_f32((float32_t *) mem_addr,
++++ vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low)));
++++#endif
++++}
++++
++++// Store the lower double-precision (64-bit) floating-point element from a into
++++// memory. mem_addr does not need to be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_store_sd
++++FORCE_INLINE void _mm_store_sd(double *mem_addr, __m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
++++#else
++++ vst1_u64((uint64_t *) mem_addr, vget_low_u64(vreinterpretq_u64_m128d(a)));
++++#endif
++++}
++++
++++// Store 128-bits of integer data from a into memory. mem_addr must be aligned
++++// on a 16-byte boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_si128
++++FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
++++{
++++ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
++++}
++++
++++// Store the lower double-precision (64-bit) floating-point element from a into
++++// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
++++// boundary or a general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#expand=9,526,5601&text=_mm_store1_pd
++++#define _mm_store1_pd _mm_store_pd1
++++
++++// Store the upper double-precision (64-bit) floating-point element from a into
++++// memory.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pd
++++FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a)));
++++#else
++++ vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a)));
++++#endif
++++}
++++
++++// Store 64-bit integer from the first element of a into memory.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_epi64
++++FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b)
++++{
++++ vst1_u64((uint64_t *) a, vget_low_u64(vreinterpretq_u64_m128i(b)));
++++}
++++
++++// Store the lower double-precision (64-bit) floating-point element from a into
++++// memory.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pd
++++FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
++++#else
++++ vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a)));
++++#endif
++++}
++++
++++// Store 2 double-precision (64-bit) floating-point elements from a into memory
++++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
++++// general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_pd
++++FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a)
++++{
++++ float32x4_t f = vreinterpretq_f32_m128d(a);
++++ _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2)));
++++}
++++
++++// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
++++// elements) from a into memory. mem_addr does not need to be aligned on any
++++// particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_pd
++++FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a)
++++{
++++ _mm_store_pd(mem_addr, a);
++++}
++++
++++// Store 128-bits of integer data from a into memory. mem_addr does not need to
++++// be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si128
++++FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
++++{
++++ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
++++}
++++
++++// Store 32-bit integer from the first element of a into memory. mem_addr does
++++// not need to be aligned on any particular boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si32
++++FORCE_INLINE void _mm_storeu_si32(void *p, __m128i a)
++++{
++++ vst1q_lane_s32((int32_t *) p, vreinterpretq_s32_m128i(a), 0);
++++}
++++
++++// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
++++// elements) from a into memory using a non-temporal memory hint. mem_addr must
++++// be aligned on a 16-byte boundary or a general-protection exception may be
++++// generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pd
++++FORCE_INLINE void _mm_stream_pd(double *p, __m128d a)
++++{
++++#if __has_builtin(__builtin_nontemporal_store)
++++ __builtin_nontemporal_store(a, (__m128d *) p);
++++#elif defined(__aarch64__) || defined(_M_ARM64)
++++ vst1q_f64(p, vreinterpretq_f64_m128d(a));
++++#else
++++ vst1q_s64((int64_t *) p, vreinterpretq_s64_m128d(a));
++++#endif
++++}
++++
++++// Store 128-bits of integer data from a into memory using a non-temporal memory
++++// hint. mem_addr must be aligned on a 16-byte boundary or a general-protection
++++// exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si128
++++FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
++++{
++++#if __has_builtin(__builtin_nontemporal_store)
++++ __builtin_nontemporal_store(a, p);
++++#else
++++ vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a));
++++#endif
++++}
++++
++++// Store 32-bit integer a into memory using a non-temporal hint to minimize
++++// cache pollution. If the cache line containing address mem_addr is already in
++++// the cache, the cache will be updated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si32
++++FORCE_INLINE void _mm_stream_si32(int *p, int a)
++++{
++++ vst1q_lane_s32((int32_t *) p, vdupq_n_s32(a), 0);
++++}
++++
++++// Store 64-bit integer a into memory using a non-temporal hint to minimize
++++// cache pollution. If the cache line containing address mem_addr is already in
++++// the cache, the cache will be updated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si64
++++FORCE_INLINE void _mm_stream_si64(__int64 *p, __int64 a)
++++{
++++ vst1_s64((int64_t *) p, vdup_n_s64((int64_t) a));
++++}
++++
++++// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi16
++++FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi32
++++FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi64
++++FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s64(
++++ vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
++++}
++++
++++// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi8
++++FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s8(
++++ vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Subtract packed double-precision (64-bit) floating-point elements in b from
++++// packed double-precision (64-bit) floating-point elements in a, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_pd
++++FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[2];
++++ c[0] = da[0] - db[0];
++++ c[1] = da[1] - db[1];
++++ return vld1q_f32((float32_t *) c);
++++#endif
++++}
++++
++++// Subtract the lower double-precision (64-bit) floating-point element in b from
++++// the lower double-precision (64-bit) floating-point element in a, store the
++++// result in the lower element of dst, and copy the upper element from a to the
++++// upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_sd
++++FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_sub_pd(a, b));
++++}
++++
++++// Subtract 64-bit integer b from 64-bit integer a, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_si64
++++FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_s64(
++++ vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
++++}
++++
++++// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a
++++// using saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi16
++++FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s16(
++++ vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++}
++++
++++// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a
++++// using saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi8
++++FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s8(
++++ vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit
++++// integers in a using saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu16
++++FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
++++}
++++
++++// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit
++++// integers in a using saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu8
++++FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(
++++ vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
++++}
++++
++++#define _mm_ucomieq_sd _mm_comieq_sd
++++#define _mm_ucomige_sd _mm_comige_sd
++++#define _mm_ucomigt_sd _mm_comigt_sd
++++#define _mm_ucomile_sd _mm_comile_sd
++++#define _mm_ucomilt_sd _mm_comilt_sd
++++#define _mm_ucomineq_sd _mm_comineq_sd
++++
++++// Return vector of type __m128d with undefined elements.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_pd
++++FORCE_INLINE __m128d _mm_undefined_pd(void)
++++{
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma GCC diagnostic push
++++#pragma GCC diagnostic ignored "-Wuninitialized"
++++#endif
++++ __m128d a;
++++#if defined(_MSC_VER)
++++ a = _mm_setzero_pd();
++++#endif
++++ return a;
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma GCC diagnostic pop
++++#endif
++++}
++++
++++// Unpack and interleave 16-bit integers from the high half of a and b, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi16
++++FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s16(
++++ vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++#else
++++ int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
++++ int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
++++ int16x4x2_t result = vzip_s16(a1, b1);
++++ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Unpack and interleave 32-bit integers from the high half of a and b, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi32
++++FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s32(
++++ vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++#else
++++ int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
++++ int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
++++ int32x2x2_t result = vzip_s32(a1, b1);
++++ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Unpack and interleave 64-bit integers from the high half of a and b, and
++++// store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi64
++++FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s64(
++++ vzip2q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
++++#else
++++ int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a));
++++ int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b));
++++ return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h));
++++#endif
++++}
++++
++++// Unpack and interleave 8-bit integers from the high half of a and b, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi8
++++FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s8(
++++ vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++#else
++++ int8x8_t a1 =
++++ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
++++ int8x8_t b1 =
++++ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
++++ int8x8x2_t result = vzip_s8(a1, b1);
++++ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Unpack and interleave double-precision (64-bit) floating-point elements from
++++// the high half of a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_pd
++++FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ return vreinterpretq_m128d_s64(
++++ vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)),
++++ vget_high_s64(vreinterpretq_s64_m128d(b))));
++++#endif
++++}
++++
++++// Unpack and interleave 16-bit integers from the low half of a and b, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi16
++++FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s16(
++++ vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
++++#else
++++ int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
++++ int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
++++ int16x4x2_t result = vzip_s16(a1, b1);
++++ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Unpack and interleave 32-bit integers from the low half of a and b, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi32
++++FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s32(
++++ vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++#else
++++ int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
++++ int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
++++ int32x2x2_t result = vzip_s32(a1, b1);
++++ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Unpack and interleave 64-bit integers from the low half of a and b, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi64
++++FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s64(
++++ vzip1q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
++++#else
++++ int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a));
++++ int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b));
++++ return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l));
++++#endif
++++}
++++
++++// Unpack and interleave 8-bit integers from the low half of a and b, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi8
++++FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s8(
++++ vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++#else
++++ int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
++++ int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
++++ int8x8x2_t result = vzip_s8(a1, b1);
++++ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
++++#endif
++++}
++++
++++// Unpack and interleave double-precision (64-bit) floating-point elements from
++++// the low half of a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_pd
++++FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ return vreinterpretq_m128d_s64(
++++ vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)),
++++ vget_low_s64(vreinterpretq_s64_m128d(b))));
++++#endif
++++}
++++
++++// Compute the bitwise XOR of packed double-precision (64-bit) floating-point
++++// elements in a and b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_pd
++++FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b)
++++{
++++ return vreinterpretq_m128d_s64(
++++ veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
++++}
++++
++++// Compute the bitwise XOR of 128 bits (representing integer data) in a and b,
++++// and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_si128
++++FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++/* SSE3 */
++++
++++// Alternatively add and subtract packed double-precision (64-bit)
++++// floating-point elements in a to/from packed elements in b, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_addsub_pd
++++FORCE_INLINE __m128d _mm_addsub_pd(__m128d a, __m128d b)
++++{
++++ _sse2neon_const __m128d mask = _mm_set_pd(1.0f, -1.0f);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vfmaq_f64(vreinterpretq_f64_m128d(a),
++++ vreinterpretq_f64_m128d(b),
++++ vreinterpretq_f64_m128d(mask)));
++++#else
++++ return _mm_add_pd(_mm_mul_pd(b, mask), a);
++++#endif
++++}
++++
++++// Alternatively add and subtract packed single-precision (32-bit)
++++// floating-point elements in a to/from packed elements in b, and store the
++++// results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=addsub_ps
++++FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b)
++++{
++++ _sse2neon_const __m128 mask = _mm_setr_ps(-1.0f, 1.0f, -1.0f, 1.0f);
++++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_FMA) /* VFPv4+ */
++++ return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(a),
++++ vreinterpretq_f32_m128(mask),
++++ vreinterpretq_f32_m128(b)));
++++#else
++++ return _mm_add_ps(_mm_mul_ps(b, mask), a);
++++#endif
++++}
++++
++++// Horizontally add adjacent pairs of double-precision (64-bit) floating-point
++++// elements in a and b, and pack the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pd
++++FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
++++#else
++++ double *da = (double *) &a;
++++ double *db = (double *) &b;
++++ double c[] = {da[0] + da[1], db[0] + db[1]};
++++ return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
++++#endif
++++}
++++
++++// Horizontally add adjacent pairs of single-precision (32-bit) floating-point
++++// elements in a and b, and pack the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_ps
++++FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(
++++ vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
++++#else
++++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
++++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
++++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
++++ return vreinterpretq_m128_f32(
++++ vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of double-precision (64-bit)
++++// floating-point elements in a and b, and pack the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pd
++++FORCE_INLINE __m128d _mm_hsub_pd(__m128d _a, __m128d _b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ float64x2_t a = vreinterpretq_f64_m128d(_a);
++++ float64x2_t b = vreinterpretq_f64_m128d(_b);
++++ return vreinterpretq_m128d_f64(
++++ vsubq_f64(vuzp1q_f64(a, b), vuzp2q_f64(a, b)));
++++#else
++++ double *da = (double *) &_a;
++++ double *db = (double *) &_b;
++++ double c[] = {da[0] - da[1], db[0] - db[1]};
++++ return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of single-precision (32-bit)
++++// floating-point elements in a and b, and pack the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_ps
++++FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b)
++++{
++++ float32x4_t a = vreinterpretq_f32_m128(_a);
++++ float32x4_t b = vreinterpretq_f32_m128(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(
++++ vsubq_f32(vuzp1q_f32(a, b), vuzp2q_f32(a, b)));
++++#else
++++ float32x4x2_t c = vuzpq_f32(a, b);
++++ return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1]));
++++#endif
++++}
++++
++++// Load 128-bits of integer data from unaligned memory into dst. This intrinsic
++++// may perform better than _mm_loadu_si128 when the data crosses a cache line
++++// boundary.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lddqu_si128
++++#define _mm_lddqu_si128 _mm_loadu_si128
++++
++++// Load a double-precision (64-bit) floating-point element from memory into both
++++// elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loaddup_pd
++++#define _mm_loaddup_pd _mm_load1_pd
++++
++++// Duplicate the low double-precision (64-bit) floating-point element from a,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movedup_pd
++++FORCE_INLINE __m128d _mm_movedup_pd(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(
++++ vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0));
++++#else
++++ return vreinterpretq_m128d_u64(
++++ vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0)));
++++#endif
++++}
++++
++++// Duplicate odd-indexed single-precision (32-bit) floating-point elements
++++// from a, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehdup_ps
++++FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(
++++ vtrn2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
++++#elif defined(_sse2neon_shuffle)
++++ return vreinterpretq_m128_f32(vshuffleq_s32(
++++ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3));
++++#else
++++ float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
++++ float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3);
++++ float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3};
++++ return vreinterpretq_m128_f32(vld1q_f32(data));
++++#endif
++++}
++++
++++// Duplicate even-indexed single-precision (32-bit) floating-point elements
++++// from a, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_moveldup_ps
++++FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128_f32(
++++ vtrn1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
++++#elif defined(_sse2neon_shuffle)
++++ return vreinterpretq_m128_f32(vshuffleq_s32(
++++ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2));
++++#else
++++ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
++++ float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2);
++++ float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2};
++++ return vreinterpretq_m128_f32(vld1q_f32(data));
++++#endif
++++}
++++
++++/* SSSE3 */
++++
++++// Compute the absolute value of packed signed 16-bit integers in a, and store
++++// the unsigned results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi16
++++FORCE_INLINE __m128i _mm_abs_epi16(__m128i a)
++++{
++++ return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a)));
++++}
++++
++++// Compute the absolute value of packed signed 32-bit integers in a, and store
++++// the unsigned results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi32
++++FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
++++{
++++ return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a)));
++++}
++++
++++// Compute the absolute value of packed signed 8-bit integers in a, and store
++++// the unsigned results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi8
++++FORCE_INLINE __m128i _mm_abs_epi8(__m128i a)
++++{
++++ return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a)));
++++}
++++
++++// Compute the absolute value of packed signed 16-bit integers in a, and store
++++// the unsigned results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi16
++++FORCE_INLINE __m64 _mm_abs_pi16(__m64 a)
++++{
++++ return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a)));
++++}
++++
++++// Compute the absolute value of packed signed 32-bit integers in a, and store
++++// the unsigned results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi32
++++FORCE_INLINE __m64 _mm_abs_pi32(__m64 a)
++++{
++++ return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a)));
++++}
++++
++++// Compute the absolute value of packed signed 8-bit integers in a, and store
++++// the unsigned results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi8
++++FORCE_INLINE __m64 _mm_abs_pi8(__m64 a)
++++{
++++ return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a)));
++++}
++++
++++// Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift
++++// the result right by imm8 bytes, and store the low 16 bytes in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi8
++++#if defined(__GNUC__) && !defined(__clang__)
++++#define _mm_alignr_epi8(a, b, imm) \
++++ __extension__({ \
++++ uint8x16_t _a = vreinterpretq_u8_m128i(a); \
++++ uint8x16_t _b = vreinterpretq_u8_m128i(b); \
++++ __m128i ret; \
++++ if (_sse2neon_unlikely((imm) & ~31)) \
++++ ret = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
++++ else if (imm >= 16) \
++++ ret = _mm_srli_si128(a, imm >= 16 ? imm - 16 : 0); \
++++ else \
++++ ret = \
++++ vreinterpretq_m128i_u8(vextq_u8(_b, _a, imm < 16 ? imm : 0)); \
++++ ret; \
++++ })
++++
++++#else
++++#define _mm_alignr_epi8(a, b, imm) \
++++ _sse2neon_define2( \
++++ __m128i, a, b, uint8x16_t __a = vreinterpretq_u8_m128i(_a); \
++++ uint8x16_t __b = vreinterpretq_u8_m128i(_b); __m128i ret; \
++++ if (_sse2neon_unlikely((imm) & ~31)) ret = \
++++ vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
++++ else if (imm >= 16) ret = \
++++ _mm_srli_si128(_a, imm >= 16 ? imm - 16 : 0); \
++++ else ret = \
++++ vreinterpretq_m128i_u8(vextq_u8(__b, __a, imm < 16 ? imm : 0)); \
++++ _sse2neon_return(ret);)
++++
++++#endif
++++
++++// Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift
++++// the result right by imm8 bytes, and store the low 8 bytes in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_pi8
++++#define _mm_alignr_pi8(a, b, imm) \
++++ _sse2neon_define2( \
++++ __m64, a, b, __m64 ret; if (_sse2neon_unlikely((imm) >= 16)) { \
++++ ret = vreinterpret_m64_s8(vdup_n_s8(0)); \
++++ } else { \
++++ uint8x8_t tmp_low; \
++++ uint8x8_t tmp_high; \
++++ if ((imm) >= 8) { \
++++ const int idx = (imm) -8; \
++++ tmp_low = vreinterpret_u8_m64(_a); \
++++ tmp_high = vdup_n_u8(0); \
++++ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
++++ } else { \
++++ const int idx = (imm); \
++++ tmp_low = vreinterpret_u8_m64(_b); \
++++ tmp_high = vreinterpret_u8_m64(_a); \
++++ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
++++ } \
++++ } _sse2neon_return(ret);)
++++
++++// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
++++// signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi16
++++FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b)
++++{
++++ int16x8_t a = vreinterpretq_s16_m128i(_a);
++++ int16x8_t b = vreinterpretq_s16_m128i(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s16(vpaddq_s16(a, b));
++++#else
++++ return vreinterpretq_m128i_s16(
++++ vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)),
++++ vpadd_s16(vget_low_s16(b), vget_high_s16(b))));
++++#endif
++++}
++++
++++// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
++++// signed 32-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi32
++++FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b)
++++{
++++ int32x4_t a = vreinterpretq_s32_m128i(_a);
++++ int32x4_t b = vreinterpretq_s32_m128i(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s32(vpaddq_s32(a, b));
++++#else
++++ return vreinterpretq_m128i_s32(
++++ vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)),
++++ vpadd_s32(vget_low_s32(b), vget_high_s32(b))));
++++#endif
++++}
++++
++++// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
++++// signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi16
++++FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_s16(
++++ vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
++++}
++++
++++// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
++++// signed 32-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi32
++++FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b)
++++{
++++ return vreinterpret_m64_s32(
++++ vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)));
++++}
++++
++++// Horizontally add adjacent pairs of signed 16-bit integers in a and b using
++++// saturation, and pack the signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_epi16
++++FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int16x8_t a = vreinterpretq_s16_m128i(_a);
++++ int16x8_t b = vreinterpretq_s16_m128i(_b);
++++ return vreinterpretq_s64_s16(
++++ vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
++++#else
++++ int32x4_t a = vreinterpretq_s32_m128i(_a);
++++ int32x4_t b = vreinterpretq_s32_m128i(_b);
++++ // Interleave using vshrn/vmovn
++++ // [a0|a2|a4|a6|b0|b2|b4|b6]
++++ // [a1|a3|a5|a7|b1|b3|b5|b7]
++++ int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
++++ int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
++++ // Saturated add
++++ return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357));
++++#endif
++++}
++++
++++// Horizontally add adjacent pairs of signed 16-bit integers in a and b using
++++// saturation, and pack the signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_pi16
++++FORCE_INLINE __m64 _mm_hadds_pi16(__m64 _a, __m64 _b)
++++{
++++ int16x4_t a = vreinterpret_s16_m64(_a);
++++ int16x4_t b = vreinterpret_s16_m64(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpret_s64_s16(vqadd_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
++++#else
++++ int16x4x2_t res = vuzp_s16(a, b);
++++ return vreinterpret_s64_s16(vqadd_s16(res.val[0], res.val[1]));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
++++// the signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi16
++++FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b)
++++{
++++ int16x8_t a = vreinterpretq_s16_m128i(_a);
++++ int16x8_t b = vreinterpretq_s16_m128i(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s16(
++++ vsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
++++#else
++++ int16x8x2_t c = vuzpq_s16(a, b);
++++ return vreinterpretq_m128i_s16(vsubq_s16(c.val[0], c.val[1]));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
++++// the signed 32-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi32
++++FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b)
++++{
++++ int32x4_t a = vreinterpretq_s32_m128i(_a);
++++ int32x4_t b = vreinterpretq_s32_m128i(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s32(
++++ vsubq_s32(vuzp1q_s32(a, b), vuzp2q_s32(a, b)));
++++#else
++++ int32x4x2_t c = vuzpq_s32(a, b);
++++ return vreinterpretq_m128i_s32(vsubq_s32(c.val[0], c.val[1]));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
++++// the signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pi16
++++FORCE_INLINE __m64 _mm_hsub_pi16(__m64 _a, __m64 _b)
++++{
++++ int16x4_t a = vreinterpret_s16_m64(_a);
++++ int16x4_t b = vreinterpret_s16_m64(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpret_m64_s16(vsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
++++#else
++++ int16x4x2_t c = vuzp_s16(a, b);
++++ return vreinterpret_m64_s16(vsub_s16(c.val[0], c.val[1]));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
++++// the signed 32-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_hsub_pi32
++++FORCE_INLINE __m64 _mm_hsub_pi32(__m64 _a, __m64 _b)
++++{
++++ int32x2_t a = vreinterpret_s32_m64(_a);
++++ int32x2_t b = vreinterpret_s32_m64(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpret_m64_s32(vsub_s32(vuzp1_s32(a, b), vuzp2_s32(a, b)));
++++#else
++++ int32x2x2_t c = vuzp_s32(a, b);
++++ return vreinterpret_m64_s32(vsub_s32(c.val[0], c.val[1]));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of signed 16-bit integers in a and b
++++// using saturation, and pack the signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_epi16
++++FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b)
++++{
++++ int16x8_t a = vreinterpretq_s16_m128i(_a);
++++ int16x8_t b = vreinterpretq_s16_m128i(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s16(
++++ vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
++++#else
++++ int16x8x2_t c = vuzpq_s16(a, b);
++++ return vreinterpretq_m128i_s16(vqsubq_s16(c.val[0], c.val[1]));
++++#endif
++++}
++++
++++// Horizontally subtract adjacent pairs of signed 16-bit integers in a and b
++++// using saturation, and pack the signed 16-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_pi16
++++FORCE_INLINE __m64 _mm_hsubs_pi16(__m64 _a, __m64 _b)
++++{
++++ int16x4_t a = vreinterpret_s16_m64(_a);
++++ int16x4_t b = vreinterpret_s16_m64(_b);
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpret_m64_s16(vqsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
++++#else
++++ int16x4x2_t c = vuzp_s16(a, b);
++++ return vreinterpret_m64_s16(vqsub_s16(c.val[0], c.val[1]));
++++#endif
++++}
++++
++++// Vertically multiply each unsigned 8-bit integer from a with the corresponding
++++// signed 8-bit integer from b, producing intermediate signed 16-bit integers.
++++// Horizontally add adjacent pairs of intermediate signed 16-bit integers,
++++// and pack the saturated results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_epi16
++++FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint8x16_t a = vreinterpretq_u8_m128i(_a);
++++ int8x16_t b = vreinterpretq_s8_m128i(_b);
++++ int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))),
++++ vmovl_s8(vget_low_s8(b)));
++++ int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))),
++++ vmovl_s8(vget_high_s8(b)));
++++ return vreinterpretq_m128i_s16(
++++ vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th)));
++++#else
++++ // This would be much simpler if x86 would choose to zero extend OR sign
++++ // extend, not both. This could probably be optimized better.
++++ uint16x8_t a = vreinterpretq_u16_m128i(_a);
++++ int16x8_t b = vreinterpretq_s16_m128i(_b);
++++
++++ // Zero extend a
++++ int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8));
++++ int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00)));
++++
++++ // Sign extend by shifting left then shifting right.
++++ int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8);
++++ int16x8_t b_odd = vshrq_n_s16(b, 8);
++++
++++ // multiply
++++ int16x8_t prod1 = vmulq_s16(a_even, b_even);
++++ int16x8_t prod2 = vmulq_s16(a_odd, b_odd);
++++
++++ // saturated add
++++ return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2));
++++#endif
++++}
++++
++++// Vertically multiply each unsigned 8-bit integer from a with the corresponding
++++// signed 8-bit integer from b, producing intermediate signed 16-bit integers.
++++// Horizontally add adjacent pairs of intermediate signed 16-bit integers, and
++++// pack the saturated results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_pi16
++++FORCE_INLINE __m64 _mm_maddubs_pi16(__m64 _a, __m64 _b)
++++{
++++ uint16x4_t a = vreinterpret_u16_m64(_a);
++++ int16x4_t b = vreinterpret_s16_m64(_b);
++++
++++ // Zero extend a
++++ int16x4_t a_odd = vreinterpret_s16_u16(vshr_n_u16(a, 8));
++++ int16x4_t a_even = vreinterpret_s16_u16(vand_u16(a, vdup_n_u16(0xff)));
++++
++++ // Sign extend by shifting left then shifting right.
++++ int16x4_t b_even = vshr_n_s16(vshl_n_s16(b, 8), 8);
++++ int16x4_t b_odd = vshr_n_s16(b, 8);
++++
++++ // multiply
++++ int16x4_t prod1 = vmul_s16(a_even, b_even);
++++ int16x4_t prod2 = vmul_s16(a_odd, b_odd);
++++
++++ // saturated add
++++ return vreinterpret_m64_s16(vqadd_s16(prod1, prod2));
++++}
++++
++++// Multiply packed signed 16-bit integers in a and b, producing intermediate
++++// signed 32-bit integers. Shift right by 15 bits while rounding up, and store
++++// the packed 16-bit integers in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_epi16
++++FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b)
++++{
++++ // Has issues due to saturation
++++ // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b));
++++
++++ // Multiply
++++ int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
++++ vget_low_s16(vreinterpretq_s16_m128i(b)));
++++ int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
++++ vget_high_s16(vreinterpretq_s16_m128i(b)));
++++
++++ // Rounding narrowing shift right
++++ // narrow = (int16_t)((mul + 16384) >> 15);
++++ int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15);
++++ int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15);
++++
++++ // Join together
++++ return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi));
++++}
++++
++++// Multiply packed signed 16-bit integers in a and b, producing intermediate
++++// signed 32-bit integers. Truncate each intermediate integer to the 18 most
++++// significant bits, round by adding 1, and store bits [16:1] to dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_pi16
++++FORCE_INLINE __m64 _mm_mulhrs_pi16(__m64 a, __m64 b)
++++{
++++ int32x4_t mul_extend =
++++ vmull_s16((vreinterpret_s16_m64(a)), (vreinterpret_s16_m64(b)));
++++
++++ // Rounding narrowing shift right
++++ return vreinterpret_m64_s16(vrshrn_n_s32(mul_extend, 15));
++++}
++++
++++// Shuffle packed 8-bit integers in a according to shuffle control mask in the
++++// corresponding 8-bit element of b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi8
++++FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
++++{
++++ int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a
++++ uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b
++++ uint8x16_t idx_masked =
++++ vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked));
++++#elif defined(__GNUC__)
++++ int8x16_t ret;
++++ // %e and %f represent the even and odd D registers
++++ // respectively.
++++ __asm__ __volatile__(
++++ "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n"
++++ "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n"
++++ : [ret] "=&w"(ret)
++++ : [tbl] "w"(tbl), [idx] "w"(idx_masked));
++++ return vreinterpretq_m128i_s8(ret);
++++#else
++++ // use this line if testing on aarch64
++++ int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)};
++++ return vreinterpretq_m128i_s8(
++++ vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)),
++++ vtbl2_s8(a_split, vget_high_u8(idx_masked))));
++++#endif
++++}
++++
++++// Shuffle packed 8-bit integers in a according to shuffle control mask in the
++++// corresponding 8-bit element of b, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi8
++++FORCE_INLINE __m64 _mm_shuffle_pi8(__m64 a, __m64 b)
++++{
++++ const int8x8_t controlMask =
++++ vand_s8(vreinterpret_s8_m64(b), vdup_n_s8((int8_t) (0x1 << 7 | 0x07)));
++++ int8x8_t res = vtbl1_s8(vreinterpret_s8_m64(a), controlMask);
++++ return vreinterpret_m64_s8(res);
++++}
++++
++++// Negate packed 16-bit integers in a when the corresponding signed
++++// 16-bit integer in b is negative, and store the results in dst.
++++// Element in dst are zeroed out when the corresponding element
++++// in b is zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi16
++++FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b)
++++{
++++ int16x8_t a = vreinterpretq_s16_m128i(_a);
++++ int16x8_t b = vreinterpretq_s16_m128i(_b);
++++
++++ // signed shift right: faster than vclt
++++ // (b < 0) ? 0xFFFF : 0
++++ uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15));
++++ // (b == 0) ? 0xFFFF : 0
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b));
++++#else
++++ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0)));
++++#endif
++++
++++ // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative
++++ // 'a') based on ltMask
++++ int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a);
++++ // res = masked & (~zeroMask)
++++ int16x8_t res = vbicq_s16(masked, zeroMask);
++++ return vreinterpretq_m128i_s16(res);
++++}
++++
++++// Negate packed 32-bit integers in a when the corresponding signed
++++// 32-bit integer in b is negative, and store the results in dst.
++++// Element in dst are zeroed out when the corresponding element
++++// in b is zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi32
++++FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b)
++++{
++++ int32x4_t a = vreinterpretq_s32_m128i(_a);
++++ int32x4_t b = vreinterpretq_s32_m128i(_b);
++++
++++ // signed shift right: faster than vclt
++++ // (b < 0) ? 0xFFFFFFFF : 0
++++ uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31));
++++
++++ // (b == 0) ? 0xFFFFFFFF : 0
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b));
++++#else
++++ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0)));
++++#endif
++++
++++ // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative
++++ // 'a') based on ltMask
++++ int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a);
++++ // res = masked & (~zeroMask)
++++ int32x4_t res = vbicq_s32(masked, zeroMask);
++++ return vreinterpretq_m128i_s32(res);
++++}
++++
++++// Negate packed 8-bit integers in a when the corresponding signed
++++// 8-bit integer in b is negative, and store the results in dst.
++++// Element in dst are zeroed out when the corresponding element
++++// in b is zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi8
++++FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b)
++++{
++++ int8x16_t a = vreinterpretq_s8_m128i(_a);
++++ int8x16_t b = vreinterpretq_s8_m128i(_b);
++++
++++ // signed shift right: faster than vclt
++++ // (b < 0) ? 0xFF : 0
++++ uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7));
++++
++++ // (b == 0) ? 0xFF : 0
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b));
++++#else
++++ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0)));
++++#endif
++++
++++ // bitwise select either a or negative 'a' (vnegq_s8(a) return negative 'a')
++++ // based on ltMask
++++ int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a);
++++ // res = masked & (~zeroMask)
++++ int8x16_t res = vbicq_s8(masked, zeroMask);
++++
++++ return vreinterpretq_m128i_s8(res);
++++}
++++
++++// Negate packed 16-bit integers in a when the corresponding signed 16-bit
++++// integer in b is negative, and store the results in dst. Element in dst are
++++// zeroed out when the corresponding element in b is zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi16
++++FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b)
++++{
++++ int16x4_t a = vreinterpret_s16_m64(_a);
++++ int16x4_t b = vreinterpret_s16_m64(_b);
++++
++++ // signed shift right: faster than vclt
++++ // (b < 0) ? 0xFFFF : 0
++++ uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15));
++++
++++ // (b == 0) ? 0xFFFF : 0
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b));
++++#else
++++ int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0)));
++++#endif
++++
++++ // bitwise select either a or negative 'a' (vneg_s16(a) return negative 'a')
++++ // based on ltMask
++++ int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a);
++++ // res = masked & (~zeroMask)
++++ int16x4_t res = vbic_s16(masked, zeroMask);
++++
++++ return vreinterpret_m64_s16(res);
++++}
++++
++++// Negate packed 32-bit integers in a when the corresponding signed 32-bit
++++// integer in b is negative, and store the results in dst. Element in dst are
++++// zeroed out when the corresponding element in b is zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi32
++++FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b)
++++{
++++ int32x2_t a = vreinterpret_s32_m64(_a);
++++ int32x2_t b = vreinterpret_s32_m64(_b);
++++
++++ // signed shift right: faster than vclt
++++ // (b < 0) ? 0xFFFFFFFF : 0
++++ uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31));
++++
++++ // (b == 0) ? 0xFFFFFFFF : 0
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b));
++++#else
++++ int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0)));
++++#endif
++++
++++ // bitwise select either a or negative 'a' (vneg_s32(a) return negative 'a')
++++ // based on ltMask
++++ int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a);
++++ // res = masked & (~zeroMask)
++++ int32x2_t res = vbic_s32(masked, zeroMask);
++++
++++ return vreinterpret_m64_s32(res);
++++}
++++
++++// Negate packed 8-bit integers in a when the corresponding signed 8-bit integer
++++// in b is negative, and store the results in dst. Element in dst are zeroed out
++++// when the corresponding element in b is zero.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi8
++++FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b)
++++{
++++ int8x8_t a = vreinterpret_s8_m64(_a);
++++ int8x8_t b = vreinterpret_s8_m64(_b);
++++
++++ // signed shift right: faster than vclt
++++ // (b < 0) ? 0xFF : 0
++++ uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7));
++++
++++ // (b == 0) ? 0xFF : 0
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b));
++++#else
++++ int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0)));
++++#endif
++++
++++ // bitwise select either a or negative 'a' (vneg_s8(a) return negative 'a')
++++ // based on ltMask
++++ int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a);
++++ // res = masked & (~zeroMask)
++++ int8x8_t res = vbic_s8(masked, zeroMask);
++++
++++ return vreinterpret_m64_s8(res);
++++}
++++
++++/* SSE4.1 */
++++
++++// Blend packed 16-bit integers from a and b using control mask imm8, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_epi16
++++// FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b,
++++// __constrange(0,255) int imm)
++++#define _mm_blend_epi16(a, b, imm) \
++++ _sse2neon_define2( \
++++ __m128i, a, b, \
++++ const uint16_t _mask[8] = \
++++ _sse2neon_init(((imm) & (1 << 0)) ? (uint16_t) -1 : 0x0, \
++++ ((imm) & (1 << 1)) ? (uint16_t) -1 : 0x0, \
++++ ((imm) & (1 << 2)) ? (uint16_t) -1 : 0x0, \
++++ ((imm) & (1 << 3)) ? (uint16_t) -1 : 0x0, \
++++ ((imm) & (1 << 4)) ? (uint16_t) -1 : 0x0, \
++++ ((imm) & (1 << 5)) ? (uint16_t) -1 : 0x0, \
++++ ((imm) & (1 << 6)) ? (uint16_t) -1 : 0x0, \
++++ ((imm) & (1 << 7)) ? (uint16_t) -1 : 0x0); \
++++ uint16x8_t _mask_vec = vld1q_u16(_mask); \
++++ uint16x8_t __a = vreinterpretq_u16_m128i(_a); \
++++ uint16x8_t __b = vreinterpretq_u16_m128i(_b); _sse2neon_return( \
++++ vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, __b, __a)));)
++++
++++// Blend packed double-precision (64-bit) floating-point elements from a and b
++++// using control mask imm8, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_pd
++++#define _mm_blend_pd(a, b, imm) \
++++ _sse2neon_define2( \
++++ __m128d, a, b, \
++++ const uint64_t _mask[2] = \
++++ _sse2neon_init(((imm) & (1 << 0)) ? ~UINT64_C(0) : UINT64_C(0), \
++++ ((imm) & (1 << 1)) ? ~UINT64_C(0) : UINT64_C(0)); \
++++ uint64x2_t _mask_vec = vld1q_u64(_mask); \
++++ uint64x2_t __a = vreinterpretq_u64_m128d(_a); \
++++ uint64x2_t __b = vreinterpretq_u64_m128d(_b); _sse2neon_return( \
++++ vreinterpretq_m128d_u64(vbslq_u64(_mask_vec, __b, __a)));)
++++
++++// Blend packed single-precision (32-bit) floating-point elements from a and b
++++// using mask, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_ps
++++FORCE_INLINE __m128 _mm_blend_ps(__m128 _a, __m128 _b, const char imm8)
++++{
++++ const uint32_t ALIGN_STRUCT(16)
++++ data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0,
++++ ((imm8) & (1 << 1)) ? UINT32_MAX : 0,
++++ ((imm8) & (1 << 2)) ? UINT32_MAX : 0,
++++ ((imm8) & (1 << 3)) ? UINT32_MAX : 0};
++++ uint32x4_t mask = vld1q_u32(data);
++++ float32x4_t a = vreinterpretq_f32_m128(_a);
++++ float32x4_t b = vreinterpretq_f32_m128(_b);
++++ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
++++}
++++
++++// Blend packed 8-bit integers from a and b using mask, and store the results in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_epi8
++++FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask)
++++{
++++ // Use a signed shift right to create a mask with the sign bit
++++ uint8x16_t mask =
++++ vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7));
++++ uint8x16_t a = vreinterpretq_u8_m128i(_a);
++++ uint8x16_t b = vreinterpretq_u8_m128i(_b);
++++ return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a));
++++}
++++
++++// Blend packed double-precision (64-bit) floating-point elements from a and b
++++// using mask, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_pd
++++FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask)
++++{
++++ uint64x2_t mask =
++++ vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_m128d(_mask), 63));
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ float64x2_t a = vreinterpretq_f64_m128d(_a);
++++ float64x2_t b = vreinterpretq_f64_m128d(_b);
++++ return vreinterpretq_m128d_f64(vbslq_f64(mask, b, a));
++++#else
++++ uint64x2_t a = vreinterpretq_u64_m128d(_a);
++++ uint64x2_t b = vreinterpretq_u64_m128d(_b);
++++ return vreinterpretq_m128d_u64(vbslq_u64(mask, b, a));
++++#endif
++++}
++++
++++// Blend packed single-precision (32-bit) floating-point elements from a and b
++++// using mask, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_ps
++++FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask)
++++{
++++ // Use a signed shift right to create a mask with the sign bit
++++ uint32x4_t mask =
++++ vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31));
++++ float32x4_t a = vreinterpretq_f32_m128(_a);
++++ float32x4_t b = vreinterpretq_f32_m128(_b);
++++ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
++++}
++++
++++// Round the packed double-precision (64-bit) floating-point elements in a up
++++// to an integer value, and store the results as packed double-precision
++++// floating-point elements in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_pd
++++FORCE_INLINE __m128d _mm_ceil_pd(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vrndpq_f64(vreinterpretq_f64_m128d(a)));
++++#else
++++ double *f = (double *) &a;
++++ return _mm_set_pd(ceil(f[1]), ceil(f[0]));
++++#endif
++++}
++++
++++// Round the packed single-precision (32-bit) floating-point elements in a up to
++++// an integer value, and store the results as packed single-precision
++++// floating-point elements in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ps
++++FORCE_INLINE __m128 _mm_ceil_ps(__m128 a)
++++{
++++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
++++ return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a)));
++++#else
++++ float *f = (float *) &a;
++++ return _mm_set_ps(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]), ceilf(f[0]));
++++#endif
++++}
++++
++++// Round the lower double-precision (64-bit) floating-point element in b up to
++++// an integer value, store the result as a double-precision floating-point
++++// element in the lower element of dst, and copy the upper element from a to the
++++// upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_sd
++++FORCE_INLINE __m128d _mm_ceil_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_ceil_pd(b));
++++}
++++
++++// Round the lower single-precision (32-bit) floating-point element in b up to
++++// an integer value, store the result as a single-precision floating-point
++++// element in the lower element of dst, and copy the upper 3 packed elements
++++// from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ss
++++FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_ceil_ps(b));
++++}
++++
++++// Compare packed 64-bit integers in a and b for equality, and store the results
++++// in dst
++++FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_u64(
++++ vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b)));
++++#else
++++ // ARMv7 lacks vceqq_u64
++++ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
++++ uint32x4_t cmp =
++++ vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b));
++++ uint32x4_t swapped = vrev64q_u32(cmp);
++++ return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped));
++++#endif
++++}
++++
++++// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi32
++++FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
++++}
++++
++++// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi64
++++FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a)
++++{
++++ int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */
++++ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
++++ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
++++ return vreinterpretq_m128i_s64(s64x2);
++++}
++++
++++// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi64
++++FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a)
++++{
++++ return vreinterpretq_m128i_s64(
++++ vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))));
++++}
++++
++++// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi16
++++FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a)
++++{
++++ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
++++ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
++++ return vreinterpretq_m128i_s16(s16x8);
++++}
++++
++++// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store
++++// the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi32
++++FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a)
++++{
++++ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
++++ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
++++ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */
++++ return vreinterpretq_m128i_s32(s32x4);
++++}
++++
++++// Sign extend packed 8-bit integers in the low 8 bytes of a to packed 64-bit
++++// integers, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi64
++++FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a)
++++{
++++ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */
++++ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */
++++ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
++++ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
++++ return vreinterpretq_m128i_s64(s64x2);
++++}
++++
++++// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi32
++++FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a)
++++{
++++ return vreinterpretq_m128i_u32(
++++ vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a))));
++++}
++++
++++// Zero extend packed unsigned 16-bit integers in a to packed 64-bit integers,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi64
++++FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a)
++++{
++++ uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */
++++ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
++++ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
++++ return vreinterpretq_m128i_u64(u64x2);
++++}
++++
++++// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu32_epi64
++++FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a)
++++{
++++ return vreinterpretq_m128i_u64(
++++ vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a))));
++++}
++++
++++// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi16
++++FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a)
++++{
++++ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx HGFE DCBA */
++++ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0H0G 0F0E 0D0C 0B0A */
++++ return vreinterpretq_m128i_u16(u16x8);
++++}
++++
++++// Zero extend packed unsigned 8-bit integers in a to packed 32-bit integers,
++++// and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi32
++++FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
++++{
++++ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
++++ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
++++ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
++++ return vreinterpretq_m128i_u32(u32x4);
++++}
++++
++++// Zero extend packed unsigned 8-bit integers in the low 8 bytes of a to packed
++++// 64-bit integers, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi64
++++FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a)
++++{
++++ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */
++++ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */
++++ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
++++ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
++++ return vreinterpretq_m128i_u64(u64x2);
++++}
++++
++++// Conditionally multiply the packed double-precision (64-bit) floating-point
++++// elements in a and b using the high 4 bits in imm8, sum the four products, and
++++// conditionally store the sum in dst using the low 4 bits of imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_pd
++++FORCE_INLINE __m128d _mm_dp_pd(__m128d a, __m128d b, const int imm)
++++{
++++ // Generate mask value from constant immediate bit value
++++ const int64_t bit0Mask = imm & 0x01 ? UINT64_MAX : 0;
++++ const int64_t bit1Mask = imm & 0x02 ? UINT64_MAX : 0;
++++#if !SSE2NEON_PRECISE_DP
++++ const int64_t bit4Mask = imm & 0x10 ? UINT64_MAX : 0;
++++ const int64_t bit5Mask = imm & 0x20 ? UINT64_MAX : 0;
++++#endif
++++ // Conditional multiplication
++++#if !SSE2NEON_PRECISE_DP
++++ __m128d mul = _mm_mul_pd(a, b);
++++ const __m128d mulMask =
++++ _mm_castsi128_pd(_mm_set_epi64x(bit5Mask, bit4Mask));
++++ __m128d tmp = _mm_and_pd(mul, mulMask);
++++#else
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ double d0 = (imm & 0x10) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0) *
++++ vgetq_lane_f64(vreinterpretq_f64_m128d(b), 0)
++++ : 0;
++++ double d1 = (imm & 0x20) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1) *
++++ vgetq_lane_f64(vreinterpretq_f64_m128d(b), 1)
++++ : 0;
++++#else
++++ double d0 = (imm & 0x10) ? ((double *) &a)[0] * ((double *) &b)[0] : 0;
++++ double d1 = (imm & 0x20) ? ((double *) &a)[1] * ((double *) &b)[1] : 0;
++++#endif
++++ __m128d tmp = _mm_set_pd(d1, d0);
++++#endif
++++ // Sum the products
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ double sum = vpaddd_f64(vreinterpretq_f64_m128d(tmp));
++++#else
++++ double sum = *((double *) &tmp) + *(((double *) &tmp) + 1);
++++#endif
++++ // Conditionally store the sum
++++ const __m128d sumMask =
++++ _mm_castsi128_pd(_mm_set_epi64x(bit1Mask, bit0Mask));
++++ __m128d res = _mm_and_pd(_mm_set_pd1(sum), sumMask);
++++ return res;
++++}
++++
++++// Conditionally multiply the packed single-precision (32-bit) floating-point
++++// elements in a and b using the high 4 bits in imm8, sum the four products,
++++// and conditionally store the sum in dst using the low 4 bits of imm.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_ps
++++FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm)
++++{
++++ float32x4_t elementwise_prod = _mm_mul_ps(a, b);
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ /* shortcuts */
++++ if (imm == 0xFF) {
++++ return _mm_set1_ps(vaddvq_f32(elementwise_prod));
++++ }
++++
++++ if ((imm & 0x0F) == 0x0F) {
++++ if (!(imm & (1 << 4)))
++++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 0);
++++ if (!(imm & (1 << 5)))
++++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 1);
++++ if (!(imm & (1 << 6)))
++++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 2);
++++ if (!(imm & (1 << 7)))
++++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 3);
++++
++++ return _mm_set1_ps(vaddvq_f32(elementwise_prod));
++++ }
++++#endif
++++
++++ float s = 0.0f;
++++
++++ if (imm & (1 << 4))
++++ s += vgetq_lane_f32(elementwise_prod, 0);
++++ if (imm & (1 << 5))
++++ s += vgetq_lane_f32(elementwise_prod, 1);
++++ if (imm & (1 << 6))
++++ s += vgetq_lane_f32(elementwise_prod, 2);
++++ if (imm & (1 << 7))
++++ s += vgetq_lane_f32(elementwise_prod, 3);
++++
++++ const float32_t res[4] = {
++++ (imm & 0x1) ? s : 0.0f,
++++ (imm & 0x2) ? s : 0.0f,
++++ (imm & 0x4) ? s : 0.0f,
++++ (imm & 0x8) ? s : 0.0f,
++++ };
++++ return vreinterpretq_m128_f32(vld1q_f32(res));
++++}
++++
++++// Extract a 32-bit integer from a, selected with imm8, and store the result in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi32
++++// FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm)
++++#define _mm_extract_epi32(a, imm) \
++++ vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))
++++
++++// Extract a 64-bit integer from a, selected with imm8, and store the result in
++++// dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi64
++++// FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm)
++++#define _mm_extract_epi64(a, imm) \
++++ vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm))
++++
++++// Extract an 8-bit integer from a, selected with imm8, and store the result in
++++// the lower element of dst. FORCE_INLINE int _mm_extract_epi8(__m128i a,
++++// __constrange(0,16) int imm)
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi8
++++#define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm))
++++
++++// Extracts the selected single-precision (32-bit) floating-point from a.
++++// FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm)
++++#define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm))
++++
++++// Round the packed double-precision (64-bit) floating-point elements in a down
++++// to an integer value, and store the results as packed double-precision
++++// floating-point elements in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_pd
++++FORCE_INLINE __m128d _mm_floor_pd(__m128d a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128d_f64(vrndmq_f64(vreinterpretq_f64_m128d(a)));
++++#else
++++ double *f = (double *) &a;
++++ return _mm_set_pd(floor(f[1]), floor(f[0]));
++++#endif
++++}
++++
++++// Round the packed single-precision (32-bit) floating-point elements in a down
++++// to an integer value, and store the results as packed single-precision
++++// floating-point elements in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ps
++++FORCE_INLINE __m128 _mm_floor_ps(__m128 a)
++++{
++++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
++++ return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a)));
++++#else
++++ float *f = (float *) &a;
++++ return _mm_set_ps(floorf(f[3]), floorf(f[2]), floorf(f[1]), floorf(f[0]));
++++#endif
++++}
++++
++++// Round the lower double-precision (64-bit) floating-point element in b down to
++++// an integer value, store the result as a double-precision floating-point
++++// element in the lower element of dst, and copy the upper element from a to the
++++// upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_sd
++++FORCE_INLINE __m128d _mm_floor_sd(__m128d a, __m128d b)
++++{
++++ return _mm_move_sd(a, _mm_floor_pd(b));
++++}
++++
++++// Round the lower single-precision (32-bit) floating-point element in b down to
++++// an integer value, store the result as a single-precision floating-point
++++// element in the lower element of dst, and copy the upper 3 packed elements
++++// from a to the upper elements of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ss
++++FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b)
++++{
++++ return _mm_move_ss(a, _mm_floor_ps(b));
++++}
++++
++++// Copy a to dst, and insert the 32-bit integer i into dst at the location
++++// specified by imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi32
++++// FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b,
++++// __constrange(0,4) int imm)
++++#define _mm_insert_epi32(a, b, imm) \
++++ vreinterpretq_m128i_s32( \
++++ vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm)))
++++
++++// Copy a to dst, and insert the 64-bit integer i into dst at the location
++++// specified by imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi64
++++// FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b,
++++// __constrange(0,2) int imm)
++++#define _mm_insert_epi64(a, b, imm) \
++++ vreinterpretq_m128i_s64( \
++++ vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm)))
++++
++++// Copy a to dst, and insert the lower 8-bit integer from i into dst at the
++++// location specified by imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi8
++++// FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b,
++++// __constrange(0,16) int imm)
++++#define _mm_insert_epi8(a, b, imm) \
++++ vreinterpretq_m128i_s8(vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm)))
++++
++++// Copy a to tmp, then insert a single-precision (32-bit) floating-point
++++// element from b into tmp using the control in imm8. Store tmp to dst using
++++// the mask in imm8 (elements are zeroed out when the corresponding bit is set).
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=insert_ps
++++#define _mm_insert_ps(a, b, imm8) \
++++ _sse2neon_define2( \
++++ __m128, a, b, \
++++ float32x4_t tmp1 = \
++++ vsetq_lane_f32(vgetq_lane_f32(_b, (imm8 >> 6) & 0x3), \
++++ vreinterpretq_f32_m128(_a), 0); \
++++ float32x4_t tmp2 = \
++++ vsetq_lane_f32(vgetq_lane_f32(tmp1, 0), \
++++ vreinterpretq_f32_m128(_a), ((imm8 >> 4) & 0x3)); \
++++ const uint32_t data[4] = \
++++ _sse2neon_init(((imm8) & (1 << 0)) ? UINT32_MAX : 0, \
++++ ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \
++++ ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \
++++ ((imm8) & (1 << 3)) ? UINT32_MAX : 0); \
++++ uint32x4_t mask = vld1q_u32(data); \
++++ float32x4_t all_zeros = vdupq_n_f32(0); \
++++ \
++++ _sse2neon_return(vreinterpretq_m128_f32( \
++++ vbslq_f32(mask, all_zeros, vreinterpretq_f32_m128(tmp2))));)
++++
++++// Compare packed signed 32-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi32
++++FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Compare packed signed 8-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi8
++++FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s8(
++++ vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Compare packed unsigned 16-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu16
++++FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
++++}
++++
++++// Compare packed unsigned 32-bit integers in a and b, and store packed maximum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
++++FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u32(
++++ vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
++++}
++++
++++// Compare packed signed 32-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi32
++++FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Compare packed signed 8-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi8
++++FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s8(
++++ vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
++++}
++++
++++// Compare packed unsigned 16-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu16
++++FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
++++}
++++
++++// Compare packed unsigned 32-bit integers in a and b, and store packed minimum
++++// values in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
++++FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u32(
++++ vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
++++}
++++
++++// Horizontally compute the minimum amongst the packed unsigned 16-bit integers
++++// in a, store the minimum and index in dst, and zero the remaining bits in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_minpos_epu16
++++FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a)
++++{
++++ __m128i dst;
++++ uint16_t min, idx = 0;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ // Find the minimum value
++++ min = vminvq_u16(vreinterpretq_u16_m128i(a));
++++
++++ // Get the index of the minimum value
++++ static const uint16_t idxv[] = {0, 1, 2, 3, 4, 5, 6, 7};
++++ uint16x8_t minv = vdupq_n_u16(min);
++++ uint16x8_t cmeq = vceqq_u16(minv, vreinterpretq_u16_m128i(a));
++++ idx = vminvq_u16(vornq_u16(vld1q_u16(idxv), cmeq));
++++#else
++++ // Find the minimum value
++++ __m64 tmp;
++++ tmp = vreinterpret_m64_u16(
++++ vmin_u16(vget_low_u16(vreinterpretq_u16_m128i(a)),
++++ vget_high_u16(vreinterpretq_u16_m128i(a))));
++++ tmp = vreinterpret_m64_u16(
++++ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
++++ tmp = vreinterpret_m64_u16(
++++ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
++++ min = vget_lane_u16(vreinterpret_u16_m64(tmp), 0);
++++ // Get the index of the minimum value
++++ int i;
++++ for (i = 0; i < 8; i++) {
++++ if (min == vgetq_lane_u16(vreinterpretq_u16_m128i(a), 0)) {
++++ idx = (uint16_t) i;
++++ break;
++++ }
++++ a = _mm_srli_si128(a, 2);
++++ }
++++#endif
++++ // Generate result
++++ dst = _mm_setzero_si128();
++++ dst = vreinterpretq_m128i_u16(
++++ vsetq_lane_u16(min, vreinterpretq_u16_m128i(dst), 0));
++++ dst = vreinterpretq_m128i_u16(
++++ vsetq_lane_u16(idx, vreinterpretq_u16_m128i(dst), 1));
++++ return dst;
++++}
++++
++++// Compute the sum of absolute differences (SADs) of quadruplets of unsigned
++++// 8-bit integers in a compared to those in b, and store the 16-bit results in
++++// dst. Eight SADs are performed using one quadruplet from b and eight
++++// quadruplets from a. One quadruplet is selected from b starting at on the
++++// offset specified in imm8. Eight quadruplets are formed from sequential 8-bit
++++// integers selected from a starting at the offset specified in imm8.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mpsadbw_epu8
++++FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm)
++++{
++++ uint8x16_t _a, _b;
++++
++++ switch (imm & 0x4) {
++++ case 0:
++++ // do nothing
++++ _a = vreinterpretq_u8_m128i(a);
++++ break;
++++ case 4:
++++ _a = vreinterpretq_u8_u32(vextq_u32(vreinterpretq_u32_m128i(a),
++++ vreinterpretq_u32_m128i(a), 1));
++++ break;
++++ default:
++++#if defined(__GNUC__) || defined(__clang__)
++++ __builtin_unreachable();
++++#elif defined(_MSC_VER)
++++ __assume(0);
++++#endif
++++ break;
++++ }
++++
++++ switch (imm & 0x3) {
++++ case 0:
++++ _b = vreinterpretq_u8_u32(
++++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 0)));
++++ break;
++++ case 1:
++++ _b = vreinterpretq_u8_u32(
++++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 1)));
++++ break;
++++ case 2:
++++ _b = vreinterpretq_u8_u32(
++++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 2)));
++++ break;
++++ case 3:
++++ _b = vreinterpretq_u8_u32(
++++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 3)));
++++ break;
++++ default:
++++#if defined(__GNUC__) || defined(__clang__)
++++ __builtin_unreachable();
++++#elif defined(_MSC_VER)
++++ __assume(0);
++++#endif
++++ break;
++++ }
++++
++++ int16x8_t c04, c15, c26, c37;
++++ uint8x8_t low_b = vget_low_u8(_b);
++++ c04 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a), low_b));
++++ uint8x16_t _a_1 = vextq_u8(_a, _a, 1);
++++ c15 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_1), low_b));
++++ uint8x16_t _a_2 = vextq_u8(_a, _a, 2);
++++ c26 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_2), low_b));
++++ uint8x16_t _a_3 = vextq_u8(_a, _a, 3);
++++ c37 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_3), low_b));
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ // |0|4|2|6|
++++ c04 = vpaddq_s16(c04, c26);
++++ // |1|5|3|7|
++++ c15 = vpaddq_s16(c15, c37);
++++
++++ int32x4_t trn1_c =
++++ vtrn1q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
++++ int32x4_t trn2_c =
++++ vtrn2q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
++++ return vreinterpretq_m128i_s16(vpaddq_s16(vreinterpretq_s16_s32(trn1_c),
++++ vreinterpretq_s16_s32(trn2_c)));
++++#else
++++ int16x4_t c01, c23, c45, c67;
++++ c01 = vpadd_s16(vget_low_s16(c04), vget_low_s16(c15));
++++ c23 = vpadd_s16(vget_low_s16(c26), vget_low_s16(c37));
++++ c45 = vpadd_s16(vget_high_s16(c04), vget_high_s16(c15));
++++ c67 = vpadd_s16(vget_high_s16(c26), vget_high_s16(c37));
++++
++++ return vreinterpretq_m128i_s16(
++++ vcombine_s16(vpadd_s16(c01, c23), vpadd_s16(c45, c67)));
++++#endif
++++}
++++
++++// Multiply the low signed 32-bit integers from each packed 64-bit element in
++++// a and b, and store the signed 64-bit results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epi32
++++FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b)
++++{
++++ // vmull_s32 upcasts instead of masking, so we downcast.
++++ int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a));
++++ int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b));
++++ return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo));
++++}
++++
++++// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit
++++// integers, and store the low 32 bits of the intermediate integers in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi32
++++FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_s32(
++++ vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
++++}
++++
++++// Convert packed signed 32-bit integers from a and b to packed 16-bit integers
++++// using unsigned saturation, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi32
++++FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u16(
++++ vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)),
++++ vqmovun_s32(vreinterpretq_s32_m128i(b))));
++++}
++++
++++// Round the packed double-precision (64-bit) floating-point elements in a using
++++// the rounding parameter, and store the results as packed double-precision
++++// floating-point elements in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_pd
++++FORCE_INLINE __m128d _mm_round_pd(__m128d a, int rounding)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ switch (rounding) {
++++ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
++++ return vreinterpretq_m128d_f64(vrndnq_f64(vreinterpretq_f64_m128d(a)));
++++ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
++++ return _mm_floor_pd(a);
++++ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
++++ return _mm_ceil_pd(a);
++++ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
++++ return vreinterpretq_m128d_f64(vrndq_f64(vreinterpretq_f64_m128d(a)));
++++ default: //_MM_FROUND_CUR_DIRECTION
++++ return vreinterpretq_m128d_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)));
++++ }
++++#else
++++ double *v_double = (double *) &a;
++++
++++ if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
++++ (rounding == _MM_FROUND_CUR_DIRECTION &&
++++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
++++ double res[2], tmp;
++++ for (int i = 0; i < 2; i++) {
++++ tmp = (v_double[i] < 0) ? -v_double[i] : v_double[i];
++++ double roundDown = floor(tmp); // Round down value
++++ double roundUp = ceil(tmp); // Round up value
++++ double diffDown = tmp - roundDown;
++++ double diffUp = roundUp - tmp;
++++ if (diffDown < diffUp) {
++++ /* If it's closer to the round down value, then use it */
++++ res[i] = roundDown;
++++ } else if (diffDown > diffUp) {
++++ /* If it's closer to the round up value, then use it */
++++ res[i] = roundUp;
++++ } else {
++++ /* If it's equidistant between round up and round down value,
++++ * pick the one which is an even number */
++++ double half = roundDown / 2;
++++ if (half != floor(half)) {
++++ /* If the round down value is odd, return the round up value
++++ */
++++ res[i] = roundUp;
++++ } else {
++++ /* If the round up value is odd, return the round down value
++++ */
++++ res[i] = roundDown;
++++ }
++++ }
++++ res[i] = (v_double[i] < 0) ? -res[i] : res[i];
++++ }
++++ return _mm_set_pd(res[1], res[0]);
++++ } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
++++ (rounding == _MM_FROUND_CUR_DIRECTION &&
++++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
++++ return _mm_floor_pd(a);
++++ } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
++++ (rounding == _MM_FROUND_CUR_DIRECTION &&
++++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
++++ return _mm_ceil_pd(a);
++++ }
++++ return _mm_set_pd(v_double[1] > 0 ? floor(v_double[1]) : ceil(v_double[1]),
++++ v_double[0] > 0 ? floor(v_double[0]) : ceil(v_double[0]));
++++#endif
++++}
++++
++++// Round the packed single-precision (32-bit) floating-point elements in a using
++++// the rounding parameter, and store the results as packed single-precision
++++// floating-point elements in dst.
++++// software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps
++++FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding)
++++{
++++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
++++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
++++ switch (rounding) {
++++ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
++++ return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a)));
++++ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
++++ return _mm_floor_ps(a);
++++ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
++++ return _mm_ceil_ps(a);
++++ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
++++ return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a)));
++++ default: //_MM_FROUND_CUR_DIRECTION
++++ return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a)));
++++ }
++++#else
++++ float *v_float = (float *) &a;
++++
++++ if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
++++ (rounding == _MM_FROUND_CUR_DIRECTION &&
++++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
++++ uint32x4_t signmask = vdupq_n_u32(0x80000000);
++++ float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
++++ vdupq_n_f32(0.5f)); /* +/- 0.5 */
++++ int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
++++ vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
++++ int32x4_t r_trunc = vcvtq_s32_f32(
++++ vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
++++ int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
++++ vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
++++ int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
++++ vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
++++ float32x4_t delta = vsubq_f32(
++++ vreinterpretq_f32_m128(a),
++++ vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
++++ uint32x4_t is_delta_half =
++++ vceqq_f32(delta, half); /* delta == +/- 0.5 */
++++ return vreinterpretq_m128_f32(
++++ vcvtq_f32_s32(vbslq_s32(is_delta_half, r_even, r_normal)));
++++ } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
++++ (rounding == _MM_FROUND_CUR_DIRECTION &&
++++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
++++ return _mm_floor_ps(a);
++++ } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
++++ (rounding == _MM_FROUND_CUR_DIRECTION &&
++++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
++++ return _mm_ceil_ps(a);
++++ }
++++ return _mm_set_ps(v_float[3] > 0 ? floorf(v_float[3]) : ceilf(v_float[3]),
++++ v_float[2] > 0 ? floorf(v_float[2]) : ceilf(v_float[2]),
++++ v_float[1] > 0 ? floorf(v_float[1]) : ceilf(v_float[1]),
++++ v_float[0] > 0 ? floorf(v_float[0]) : ceilf(v_float[0]));
++++#endif
++++}
++++
++++// Round the lower double-precision (64-bit) floating-point element in b using
++++// the rounding parameter, store the result as a double-precision floating-point
++++// element in the lower element of dst, and copy the upper element from a to the
++++// upper element of dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_sd
++++FORCE_INLINE __m128d _mm_round_sd(__m128d a, __m128d b, int rounding)
++++{
++++ return _mm_move_sd(a, _mm_round_pd(b, rounding));
++++}
++++
++++// Round the lower single-precision (32-bit) floating-point element in b using
++++// the rounding parameter, store the result as a single-precision floating-point
++++// element in the lower element of dst, and copy the upper 3 packed elements
++++// from a to the upper elements of dst. Rounding is done according to the
++++// rounding[3:0] parameter, which can be one of:
++++// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and
++++// suppress exceptions
++++// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and
++++// suppress exceptions
++++// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress
++++// exceptions
++++// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress
++++// exceptions _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see
++++// _MM_SET_ROUNDING_MODE
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_ss
++++FORCE_INLINE __m128 _mm_round_ss(__m128 a, __m128 b, int rounding)
++++{
++++ return _mm_move_ss(a, _mm_round_ps(b, rounding));
++++}
++++
++++// Load 128-bits of integer data from memory into dst using a non-temporal
++++// memory hint. mem_addr must be aligned on a 16-byte boundary or a
++++// general-protection exception may be generated.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_load_si128
++++FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p)
++++{
++++#if __has_builtin(__builtin_nontemporal_store)
++++ return __builtin_nontemporal_load(p);
++++#else
++++ return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p));
++++#endif
++++}
++++
++++// Compute the bitwise NOT of a and then AND with a 128-bit vector containing
++++// all 1's, and return 1 if the result is zero, otherwise return 0.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_ones
++++FORCE_INLINE int _mm_test_all_ones(__m128i a)
++++{
++++ return (uint64_t) (vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) ==
++++ ~(uint64_t) 0;
++++}
++++
++++// Compute the bitwise AND of 128 bits (representing integer data) in a and
++++// mask, and return 1 if the result is zero, otherwise return 0.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_zeros
++++FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask)
++++{
++++ int64x2_t a_and_mask =
++++ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask));
++++ return !(vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1));
++++}
++++
++++// Compute the bitwise AND of 128 bits (representing integer data) in a and
++++// mask, and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute
++++// the bitwise NOT of a and then AND with mask, and set CF to 1 if the result is
++++// zero, otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
++++// otherwise return 0.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_test_mix_ones_zero
++++// Note: Argument names may be wrong in the Intel intrinsics guide.
++++FORCE_INLINE int _mm_test_mix_ones_zeros(__m128i a, __m128i mask)
++++{
++++ uint64x2_t v = vreinterpretq_u64_m128i(a);
++++ uint64x2_t m = vreinterpretq_u64_m128i(mask);
++++
++++ // find ones (set-bits) and zeros (clear-bits) under clip mask
++++ uint64x2_t ones = vandq_u64(m, v);
++++ uint64x2_t zeros = vbicq_u64(m, v);
++++
++++ // If both 128-bit variables are populated (non-zero) then return 1.
++++ // For comparision purposes, first compact each var down to 32-bits.
++++ uint32x2_t reduced = vpmax_u32(vqmovn_u64(ones), vqmovn_u64(zeros));
++++
++++ // if folding minimum is non-zero then both vars must be non-zero
++++ return (vget_lane_u32(vpmin_u32(reduced, reduced), 0) != 0);
++++}
++++
++++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
++++// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
++++// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
++++// otherwise set CF to 0. Return the CF value.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testc_si128
++++FORCE_INLINE int _mm_testc_si128(__m128i a, __m128i b)
++++{
++++ int64x2_t s64 =
++++ vbicq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a));
++++ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
++++}
++++
++++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
++++// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
++++// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
++++// otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
++++// otherwise return 0.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testnzc_si128
++++#define _mm_testnzc_si128(a, b) _mm_test_mix_ones_zeros(a, b)
++++
++++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
++++// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
++++// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
++++// otherwise set CF to 0. Return the ZF value.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testz_si128
++++FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b)
++++{
++++ int64x2_t s64 =
++++ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b));
++++ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
++++}
++++
++++/* SSE4.2 */
++++
++++static const uint16_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask16b[8] = {
++++ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
++++};
++++static const uint8_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask8b[16] = {
++++ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
++++ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
++++};
++++
++++/* specify the source data format */
++++#define _SIDD_UBYTE_OPS 0x00 /* unsigned 8-bit characters */
++++#define _SIDD_UWORD_OPS 0x01 /* unsigned 16-bit characters */
++++#define _SIDD_SBYTE_OPS 0x02 /* signed 8-bit characters */
++++#define _SIDD_SWORD_OPS 0x03 /* signed 16-bit characters */
++++
++++/* specify the comparison operation */
++++#define _SIDD_CMP_EQUAL_ANY 0x00 /* compare equal any: strchr */
++++#define _SIDD_CMP_RANGES 0x04 /* compare ranges */
++++#define _SIDD_CMP_EQUAL_EACH 0x08 /* compare equal each: strcmp */
++++#define _SIDD_CMP_EQUAL_ORDERED 0x0C /* compare equal ordered */
++++
++++/* specify the polarity */
++++#define _SIDD_POSITIVE_POLARITY 0x00
++++#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
++++#define _SIDD_NEGATIVE_POLARITY 0x10 /* negate results */
++++#define _SIDD_MASKED_NEGATIVE_POLARITY \
++++ 0x30 /* negate results only before end of string */
++++
++++/* specify the output selection in _mm_cmpXstri */
++++#define _SIDD_LEAST_SIGNIFICANT 0x00
++++#define _SIDD_MOST_SIGNIFICANT 0x40
++++
++++/* specify the output selection in _mm_cmpXstrm */
++++#define _SIDD_BIT_MASK 0x00
++++#define _SIDD_UNIT_MASK 0x40
++++
++++/* Pattern Matching for C macros.
++++ * https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
++++ */
++++
++++/* catenate */
++++#define SSE2NEON_PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
++++#define SSE2NEON_CAT(a, b) SSE2NEON_PRIMITIVE_CAT(a, b)
++++
++++#define SSE2NEON_IIF(c) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_IIF_, c)
++++/* run the 2nd parameter */
++++#define SSE2NEON_IIF_0(t, ...) __VA_ARGS__
++++/* run the 1st parameter */
++++#define SSE2NEON_IIF_1(t, ...) t
++++
++++#define SSE2NEON_COMPL(b) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_COMPL_, b)
++++#define SSE2NEON_COMPL_0 1
++++#define SSE2NEON_COMPL_1 0
++++
++++#define SSE2NEON_DEC(x) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_DEC_, x)
++++#define SSE2NEON_DEC_1 0
++++#define SSE2NEON_DEC_2 1
++++#define SSE2NEON_DEC_3 2
++++#define SSE2NEON_DEC_4 3
++++#define SSE2NEON_DEC_5 4
++++#define SSE2NEON_DEC_6 5
++++#define SSE2NEON_DEC_7 6
++++#define SSE2NEON_DEC_8 7
++++#define SSE2NEON_DEC_9 8
++++#define SSE2NEON_DEC_10 9
++++#define SSE2NEON_DEC_11 10
++++#define SSE2NEON_DEC_12 11
++++#define SSE2NEON_DEC_13 12
++++#define SSE2NEON_DEC_14 13
++++#define SSE2NEON_DEC_15 14
++++#define SSE2NEON_DEC_16 15
++++
++++/* detection */
++++#define SSE2NEON_CHECK_N(x, n, ...) n
++++#define SSE2NEON_CHECK(...) SSE2NEON_CHECK_N(__VA_ARGS__, 0, )
++++#define SSE2NEON_PROBE(x) x, 1,
++++
++++#define SSE2NEON_NOT(x) SSE2NEON_CHECK(SSE2NEON_PRIMITIVE_CAT(SSE2NEON_NOT_, x))
++++#define SSE2NEON_NOT_0 SSE2NEON_PROBE(~)
++++
++++#define SSE2NEON_BOOL(x) SSE2NEON_COMPL(SSE2NEON_NOT(x))
++++#define SSE2NEON_IF(c) SSE2NEON_IIF(SSE2NEON_BOOL(c))
++++
++++#define SSE2NEON_EAT(...)
++++#define SSE2NEON_EXPAND(...) __VA_ARGS__
++++#define SSE2NEON_WHEN(c) SSE2NEON_IF(c)(SSE2NEON_EXPAND, SSE2NEON_EAT)
++++
++++/* recursion */
++++/* deferred expression */
++++#define SSE2NEON_EMPTY()
++++#define SSE2NEON_DEFER(id) id SSE2NEON_EMPTY()
++++#define SSE2NEON_OBSTRUCT(...) __VA_ARGS__ SSE2NEON_DEFER(SSE2NEON_EMPTY)()
++++#define SSE2NEON_EXPAND(...) __VA_ARGS__
++++
++++#define SSE2NEON_EVAL(...) \
++++ SSE2NEON_EVAL1(SSE2NEON_EVAL1(SSE2NEON_EVAL1(__VA_ARGS__)))
++++#define SSE2NEON_EVAL1(...) \
++++ SSE2NEON_EVAL2(SSE2NEON_EVAL2(SSE2NEON_EVAL2(__VA_ARGS__)))
++++#define SSE2NEON_EVAL2(...) \
++++ SSE2NEON_EVAL3(SSE2NEON_EVAL3(SSE2NEON_EVAL3(__VA_ARGS__)))
++++#define SSE2NEON_EVAL3(...) __VA_ARGS__
++++
++++#define SSE2NEON_REPEAT(count, macro, ...) \
++++ SSE2NEON_WHEN(count) \
++++ (SSE2NEON_OBSTRUCT(SSE2NEON_REPEAT_INDIRECT)()( \
++++ SSE2NEON_DEC(count), macro, \
++++ __VA_ARGS__) SSE2NEON_OBSTRUCT(macro)(SSE2NEON_DEC(count), \
++++ __VA_ARGS__))
++++#define SSE2NEON_REPEAT_INDIRECT() SSE2NEON_REPEAT
++++
++++#define SSE2NEON_SIZE_OF_byte 8
++++#define SSE2NEON_NUMBER_OF_LANES_byte 16
++++#define SSE2NEON_SIZE_OF_word 16
++++#define SSE2NEON_NUMBER_OF_LANES_word 8
++++
++++#define SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE(i, type) \
++++ mtx[i] = vreinterpretq_m128i_##type(vceqq_##type( \
++++ vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i)), \
++++ vreinterpretq_##type##_m128i(a)));
++++
++++#define SSE2NEON_FILL_LANE(i, type) \
++++ vec_b[i] = \
++++ vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i));
++++
++++#define PCMPSTR_RANGES(a, b, mtx, data_type_prefix, type_prefix, size, \
++++ number_of_lanes, byte_or_word) \
++++ do { \
++++ SSE2NEON_CAT( \
++++ data_type_prefix, \
++++ SSE2NEON_CAT(size, \
++++ SSE2NEON_CAT(x, SSE2NEON_CAT(number_of_lanes, _t)))) \
++++ vec_b[number_of_lanes]; \
++++ __m128i mask = SSE2NEON_IIF(byte_or_word)( \
++++ vreinterpretq_m128i_u16(vdupq_n_u16(0xff)), \
++++ vreinterpretq_m128i_u32(vdupq_n_u32(0xffff))); \
++++ SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, SSE2NEON_FILL_LANE, \
++++ SSE2NEON_CAT(type_prefix, size))) \
++++ for (int i = 0; i < number_of_lanes; i++) { \
++++ mtx[i] = SSE2NEON_CAT(vreinterpretq_m128i_u, \
++++ size)(SSE2NEON_CAT(vbslq_u, size)( \
++++ SSE2NEON_CAT(vreinterpretq_u, \
++++ SSE2NEON_CAT(size, _m128i))(mask), \
++++ SSE2NEON_CAT(vcgeq_, SSE2NEON_CAT(type_prefix, size))( \
++++ vec_b[i], \
++++ SSE2NEON_CAT( \
++++ vreinterpretq_, \
++++ SSE2NEON_CAT(type_prefix, \
++++ SSE2NEON_CAT(size, _m128i(a))))), \
++++ SSE2NEON_CAT(vcleq_, SSE2NEON_CAT(type_prefix, size))( \
++++ vec_b[i], \
++++ SSE2NEON_CAT( \
++++ vreinterpretq_, \
++++ SSE2NEON_CAT(type_prefix, \
++++ SSE2NEON_CAT(size, _m128i(a))))))); \
++++ } \
++++ } while (0)
++++
++++#define PCMPSTR_EQ(a, b, mtx, size, number_of_lanes) \
++++ do { \
++++ SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, \
++++ SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE, \
++++ SSE2NEON_CAT(u, size))) \
++++ } while (0)
++++
++++#define SSE2NEON_CMP_EQUAL_ANY_IMPL(type) \
++++ static int _sse2neon_cmp_##type##_equal_any(__m128i a, int la, __m128i b, \
++++ int lb) \
++++ { \
++++ __m128i mtx[16]; \
++++ PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
++++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
++++ return SSE2NEON_CAT( \
++++ _sse2neon_aggregate_equal_any_, \
++++ SSE2NEON_CAT( \
++++ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
++++ SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
++++ type))))(la, lb, mtx); \
++++ }
++++
++++#define SSE2NEON_CMP_RANGES_IMPL(type, data_type, us, byte_or_word) \
++++ static int _sse2neon_cmp_##us##type##_ranges(__m128i a, int la, __m128i b, \
++++ int lb) \
++++ { \
++++ __m128i mtx[16]; \
++++ PCMPSTR_RANGES( \
++++ a, b, mtx, data_type, us, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
++++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), byte_or_word); \
++++ return SSE2NEON_CAT( \
++++ _sse2neon_aggregate_ranges_, \
++++ SSE2NEON_CAT( \
++++ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
++++ SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
++++ type))))(la, lb, mtx); \
++++ }
++++
++++#define SSE2NEON_CMP_EQUAL_ORDERED_IMPL(type) \
++++ static int _sse2neon_cmp_##type##_equal_ordered(__m128i a, int la, \
++++ __m128i b, int lb) \
++++ { \
++++ __m128i mtx[16]; \
++++ PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
++++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
++++ return SSE2NEON_CAT( \
++++ _sse2neon_aggregate_equal_ordered_, \
++++ SSE2NEON_CAT( \
++++ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
++++ SSE2NEON_CAT(x, \
++++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type))))( \
++++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), la, lb, mtx); \
++++ }
++++
++++static int _sse2neon_aggregate_equal_any_8x16(int la, int lb, __m128i mtx[16])
++++{
++++ int res = 0;
++++ int m = (1 << la) - 1;
++++ uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
++++ uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
++++ uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
++++ uint8x16_t vec = vcombine_u8(t_lo, t_hi);
++++ for (int j = 0; j < lb; j++) {
++++ mtx[j] = vreinterpretq_m128i_u8(
++++ vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
++++ mtx[j] = vreinterpretq_m128i_u8(
++++ vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
++++ int tmp = _sse2neon_vaddvq_u8(vreinterpretq_u8_m128i(mtx[j])) ? 1 : 0;
++++ res |= (tmp << j);
++++ }
++++ return res;
++++}
++++
++++static int _sse2neon_aggregate_equal_any_16x8(int la, int lb, __m128i mtx[16])
++++{
++++ int res = 0;
++++ int m = (1 << la) - 1;
++++ uint16x8_t vec =
++++ vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
++++ for (int j = 0; j < lb; j++) {
++++ mtx[j] = vreinterpretq_m128i_u16(
++++ vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
++++ mtx[j] = vreinterpretq_m128i_u16(
++++ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
++++ int tmp = _sse2neon_vaddvq_u16(vreinterpretq_u16_m128i(mtx[j])) ? 1 : 0;
++++ res |= (tmp << j);
++++ }
++++ return res;
++++}
++++
++++/* clang-format off */
++++#define SSE2NEON_GENERATE_CMP_EQUAL_ANY(prefix) \
++++ prefix##IMPL(byte) \
++++ prefix##IMPL(word)
++++/* clang-format on */
++++
++++SSE2NEON_GENERATE_CMP_EQUAL_ANY(SSE2NEON_CMP_EQUAL_ANY_)
++++
++++static int _sse2neon_aggregate_ranges_16x8(int la, int lb, __m128i mtx[16])
++++{
++++ int res = 0;
++++ int m = (1 << la) - 1;
++++ uint16x8_t vec =
++++ vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
++++ for (int j = 0; j < lb; j++) {
++++ mtx[j] = vreinterpretq_m128i_u16(
++++ vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
++++ mtx[j] = vreinterpretq_m128i_u16(
++++ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
++++ __m128i tmp = vreinterpretq_m128i_u32(
++++ vshrq_n_u32(vreinterpretq_u32_m128i(mtx[j]), 16));
++++ uint32x4_t vec_res = vandq_u32(vreinterpretq_u32_m128i(mtx[j]),
++++ vreinterpretq_u32_m128i(tmp));
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ int t = vaddvq_u32(vec_res) ? 1 : 0;
++++#else
++++ uint64x2_t sumh = vpaddlq_u32(vec_res);
++++ int t = vgetq_lane_u64(sumh, 0) + vgetq_lane_u64(sumh, 1);
++++#endif
++++ res |= (t << j);
++++ }
++++ return res;
++++}
++++
++++static int _sse2neon_aggregate_ranges_8x16(int la, int lb, __m128i mtx[16])
++++{
++++ int res = 0;
++++ int m = (1 << la) - 1;
++++ uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
++++ uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
++++ uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
++++ uint8x16_t vec = vcombine_u8(t_lo, t_hi);
++++ for (int j = 0; j < lb; j++) {
++++ mtx[j] = vreinterpretq_m128i_u8(
++++ vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
++++ mtx[j] = vreinterpretq_m128i_u8(
++++ vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
++++ __m128i tmp = vreinterpretq_m128i_u16(
++++ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 8));
++++ uint16x8_t vec_res = vandq_u16(vreinterpretq_u16_m128i(mtx[j]),
++++ vreinterpretq_u16_m128i(tmp));
++++ int t = _sse2neon_vaddvq_u16(vec_res) ? 1 : 0;
++++ res |= (t << j);
++++ }
++++ return res;
++++}
++++
++++#define SSE2NEON_CMP_RANGES_IS_BYTE 1
++++#define SSE2NEON_CMP_RANGES_IS_WORD 0
++++
++++/* clang-format off */
++++#define SSE2NEON_GENERATE_CMP_RANGES(prefix) \
++++ prefix##IMPL(byte, uint, u, prefix##IS_BYTE) \
++++ prefix##IMPL(byte, int, s, prefix##IS_BYTE) \
++++ prefix##IMPL(word, uint, u, prefix##IS_WORD) \
++++ prefix##IMPL(word, int, s, prefix##IS_WORD)
++++/* clang-format on */
++++
++++SSE2NEON_GENERATE_CMP_RANGES(SSE2NEON_CMP_RANGES_)
++++
++++#undef SSE2NEON_CMP_RANGES_IS_BYTE
++++#undef SSE2NEON_CMP_RANGES_IS_WORD
++++
++++static int _sse2neon_cmp_byte_equal_each(__m128i a, int la, __m128i b, int lb)
++++{
++++ uint8x16_t mtx =
++++ vceqq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b));
++++ int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
++++ int m1 = 0x10000 - (1 << la);
++++ int tb = 0x10000 - (1 << lb);
++++ uint8x8_t vec_mask, vec0_lo, vec0_hi, vec1_lo, vec1_hi;
++++ uint8x8_t tmp_lo, tmp_hi, res_lo, res_hi;
++++ vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
++++ vec0_lo = vtst_u8(vdup_n_u8(m0), vec_mask);
++++ vec0_hi = vtst_u8(vdup_n_u8(m0 >> 8), vec_mask);
++++ vec1_lo = vtst_u8(vdup_n_u8(m1), vec_mask);
++++ vec1_hi = vtst_u8(vdup_n_u8(m1 >> 8), vec_mask);
++++ tmp_lo = vtst_u8(vdup_n_u8(tb), vec_mask);
++++ tmp_hi = vtst_u8(vdup_n_u8(tb >> 8), vec_mask);
++++
++++ res_lo = vbsl_u8(vec0_lo, vdup_n_u8(0), vget_low_u8(mtx));
++++ res_hi = vbsl_u8(vec0_hi, vdup_n_u8(0), vget_high_u8(mtx));
++++ res_lo = vbsl_u8(vec1_lo, tmp_lo, res_lo);
++++ res_hi = vbsl_u8(vec1_hi, tmp_hi, res_hi);
++++ res_lo = vand_u8(res_lo, vec_mask);
++++ res_hi = vand_u8(res_hi, vec_mask);
++++
++++ int res = _sse2neon_vaddv_u8(res_lo) + (_sse2neon_vaddv_u8(res_hi) << 8);
++++ return res;
++++}
++++
++++static int _sse2neon_cmp_word_equal_each(__m128i a, int la, __m128i b, int lb)
++++{
++++ uint16x8_t mtx =
++++ vceqq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
++++ int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
++++ int m1 = 0x100 - (1 << la);
++++ int tb = 0x100 - (1 << lb);
++++ uint16x8_t vec_mask = vld1q_u16(_sse2neon_cmpestr_mask16b);
++++ uint16x8_t vec0 = vtstq_u16(vdupq_n_u16(m0), vec_mask);
++++ uint16x8_t vec1 = vtstq_u16(vdupq_n_u16(m1), vec_mask);
++++ uint16x8_t tmp = vtstq_u16(vdupq_n_u16(tb), vec_mask);
++++ mtx = vbslq_u16(vec0, vdupq_n_u16(0), mtx);
++++ mtx = vbslq_u16(vec1, tmp, mtx);
++++ mtx = vandq_u16(mtx, vec_mask);
++++ return _sse2neon_vaddvq_u16(mtx);
++++}
++++
++++#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE 1
++++#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD 0
++++
++++#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IMPL(size, number_of_lanes, data_type) \
++++ static int _sse2neon_aggregate_equal_ordered_##size##x##number_of_lanes( \
++++ int bound, int la, int lb, __m128i mtx[16]) \
++++ { \
++++ int res = 0; \
++++ int m1 = SSE2NEON_IIF(data_type)(0x10000, 0x100) - (1 << la); \
++++ uint##size##x8_t vec_mask = SSE2NEON_IIF(data_type)( \
++++ vld1_u##size(_sse2neon_cmpestr_mask##size##b), \
++++ vld1q_u##size(_sse2neon_cmpestr_mask##size##b)); \
++++ uint##size##x##number_of_lanes##_t vec1 = SSE2NEON_IIF(data_type)( \
++++ vcombine_u##size(vtst_u##size(vdup_n_u##size(m1), vec_mask), \
++++ vtst_u##size(vdup_n_u##size(m1 >> 8), vec_mask)), \
++++ vtstq_u##size(vdupq_n_u##size(m1), vec_mask)); \
++++ uint##size##x##number_of_lanes##_t vec_minusone = vdupq_n_u##size(-1); \
++++ uint##size##x##number_of_lanes##_t vec_zero = vdupq_n_u##size(0); \
++++ for (int j = 0; j < lb; j++) { \
++++ mtx[j] = vreinterpretq_m128i_u##size(vbslq_u##size( \
++++ vec1, vec_minusone, vreinterpretq_u##size##_m128i(mtx[j]))); \
++++ } \
++++ for (int j = lb; j < bound; j++) { \
++++ mtx[j] = vreinterpretq_m128i_u##size( \
++++ vbslq_u##size(vec1, vec_minusone, vec_zero)); \
++++ } \
++++ unsigned SSE2NEON_IIF(data_type)(char, short) *ptr = \
++++ (unsigned SSE2NEON_IIF(data_type)(char, short) *) mtx; \
++++ for (int i = 0; i < bound; i++) { \
++++ int val = 1; \
++++ for (int j = 0, k = i; j < bound - i && k < bound; j++, k++) \
++++ val &= ptr[k * bound + j]; \
++++ res += val << i; \
++++ } \
++++ return res; \
++++ }
++++
++++/* clang-format off */
++++#define SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(prefix) \
++++ prefix##IMPL(8, 16, prefix##IS_UBYTE) \
++++ prefix##IMPL(16, 8, prefix##IS_UWORD)
++++/* clang-format on */
++++
++++SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(SSE2NEON_AGGREGATE_EQUAL_ORDER_)
++++
++++#undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE
++++#undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD
++++
++++/* clang-format off */
++++#define SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(prefix) \
++++ prefix##IMPL(byte) \
++++ prefix##IMPL(word)
++++/* clang-format on */
++++
++++SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(SSE2NEON_CMP_EQUAL_ORDERED_)
++++
++++#define SSE2NEON_CMPESTR_LIST \
++++ _(CMP_UBYTE_EQUAL_ANY, cmp_byte_equal_any) \
++++ _(CMP_UWORD_EQUAL_ANY, cmp_word_equal_any) \
++++ _(CMP_SBYTE_EQUAL_ANY, cmp_byte_equal_any) \
++++ _(CMP_SWORD_EQUAL_ANY, cmp_word_equal_any) \
++++ _(CMP_UBYTE_RANGES, cmp_ubyte_ranges) \
++++ _(CMP_UWORD_RANGES, cmp_uword_ranges) \
++++ _(CMP_SBYTE_RANGES, cmp_sbyte_ranges) \
++++ _(CMP_SWORD_RANGES, cmp_sword_ranges) \
++++ _(CMP_UBYTE_EQUAL_EACH, cmp_byte_equal_each) \
++++ _(CMP_UWORD_EQUAL_EACH, cmp_word_equal_each) \
++++ _(CMP_SBYTE_EQUAL_EACH, cmp_byte_equal_each) \
++++ _(CMP_SWORD_EQUAL_EACH, cmp_word_equal_each) \
++++ _(CMP_UBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
++++ _(CMP_UWORD_EQUAL_ORDERED, cmp_word_equal_ordered) \
++++ _(CMP_SBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
++++ _(CMP_SWORD_EQUAL_ORDERED, cmp_word_equal_ordered)
++++
++++enum {
++++#define _(name, func_suffix) name,
++++ SSE2NEON_CMPESTR_LIST
++++#undef _
++++};
++++typedef int (*cmpestr_func_t)(__m128i a, int la, __m128i b, int lb);
++++static cmpestr_func_t _sse2neon_cmpfunc_table[] = {
++++#define _(name, func_suffix) _sse2neon_##func_suffix,
++++ SSE2NEON_CMPESTR_LIST
++++#undef _
++++};
++++
++++FORCE_INLINE int _sse2neon_sido_negative(int res, int lb, int imm8, int bound)
++++{
++++ switch (imm8 & 0x30) {
++++ case _SIDD_NEGATIVE_POLARITY:
++++ res ^= 0xffffffff;
++++ break;
++++ case _SIDD_MASKED_NEGATIVE_POLARITY:
++++ res ^= (1 << lb) - 1;
++++ break;
++++ default:
++++ break;
++++ }
++++
++++ return res & ((bound == 8) ? 0xFF : 0xFFFF);
++++}
++++
++++FORCE_INLINE int _sse2neon_clz(unsigned int x)
++++{
++++#ifdef _MSC_VER
++++ unsigned long cnt = 0;
++++ if (_BitScanReverse(&cnt, x))
++++ return 31 - cnt;
++++ return 32;
++++#else
++++ return x != 0 ? __builtin_clz(x) : 32;
++++#endif
++++}
++++
++++FORCE_INLINE int _sse2neon_ctz(unsigned int x)
++++{
++++#ifdef _MSC_VER
++++ unsigned long cnt = 0;
++++ if (_BitScanForward(&cnt, x))
++++ return cnt;
++++ return 32;
++++#else
++++ return x != 0 ? __builtin_ctz(x) : 32;
++++#endif
++++}
++++
++++FORCE_INLINE int _sse2neon_ctzll(unsigned long long x)
++++{
++++#ifdef _MSC_VER
++++ unsigned long cnt;
++++#if defined(SSE2NEON_HAS_BITSCAN64)
++++ if (_BitScanForward64(&cnt, x))
++++ return (int) (cnt);
++++#else
++++ if (_BitScanForward(&cnt, (unsigned long) (x)))
++++ return (int) cnt;
++++ if (_BitScanForward(&cnt, (unsigned long) (x >> 32)))
++++ return (int) (cnt + 32);
++++#endif /* SSE2NEON_HAS_BITSCAN64 */
++++ return 64;
++++#else /* assume GNU compatible compilers */
++++ return x != 0 ? __builtin_ctzll(x) : 64;
++++#endif
++++}
++++
++++#define SSE2NEON_MIN(x, y) (x) < (y) ? (x) : (y)
++++
++++#define SSE2NEON_CMPSTR_SET_UPPER(var, imm) \
++++ const int var = (imm & 0x01) ? 8 : 16
++++
++++#define SSE2NEON_CMPESTRX_LEN_PAIR(a, b, la, lb) \
++++ int tmp1 = la ^ (la >> 31); \
++++ la = tmp1 - (la >> 31); \
++++ int tmp2 = lb ^ (lb >> 31); \
++++ lb = tmp2 - (lb >> 31); \
++++ la = SSE2NEON_MIN(la, bound); \
++++ lb = SSE2NEON_MIN(lb, bound)
++++
++++// Compare all pairs of character in string a and b,
++++// then aggregate the result.
++++// As the only difference of PCMPESTR* and PCMPISTR* is the way to calculate the
++++// length of string, we use SSE2NEON_CMP{I,E}STRX_GET_LEN to get the length of
++++// string a and b.
++++#define SSE2NEON_COMP_AGG(a, b, la, lb, imm8, IE) \
++++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8); \
++++ SSE2NEON_##IE##_LEN_PAIR(a, b, la, lb); \
++++ int r2 = (_sse2neon_cmpfunc_table[imm8 & 0x0f])(a, la, b, lb); \
++++ r2 = _sse2neon_sido_negative(r2, lb, imm8, bound)
++++
++++#define SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8) \
++++ return (r2 == 0) ? bound \
++++ : ((imm8 & 0x40) ? (31 - _sse2neon_clz(r2)) \
++++ : _sse2neon_ctz(r2))
++++
++++#define SSE2NEON_CMPSTR_GENERATE_MASK(dst) \
++++ __m128i dst = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
++++ if (imm8 & 0x40) { \
++++ if (bound == 8) { \
++++ uint16x8_t tmp = vtstq_u16(vdupq_n_u16(r2), \
++++ vld1q_u16(_sse2neon_cmpestr_mask16b)); \
++++ dst = vreinterpretq_m128i_u16(vbslq_u16( \
++++ tmp, vdupq_n_u16(-1), vreinterpretq_u16_m128i(dst))); \
++++ } else { \
++++ uint8x16_t vec_r2 = \
++++ vcombine_u8(vdup_n_u8(r2), vdup_n_u8(r2 >> 8)); \
++++ uint8x16_t tmp = \
++++ vtstq_u8(vec_r2, vld1q_u8(_sse2neon_cmpestr_mask8b)); \
++++ dst = vreinterpretq_m128i_u8( \
++++ vbslq_u8(tmp, vdupq_n_u8(-1), vreinterpretq_u8_m128i(dst))); \
++++ } \
++++ } else { \
++++ if (bound == 16) { \
++++ dst = vreinterpretq_m128i_u16( \
++++ vsetq_lane_u16(r2 & 0xffff, vreinterpretq_u16_m128i(dst), 0)); \
++++ } else { \
++++ dst = vreinterpretq_m128i_u8( \
++++ vsetq_lane_u8(r2 & 0xff, vreinterpretq_u8_m128i(dst), 0)); \
++++ } \
++++ } \
++++ return dst
++++
++++// Compare packed strings in a and b with lengths la and lb using the control
++++// in imm8, and returns 1 if b did not contain a null character and the
++++// resulting mask was zero, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestra
++++FORCE_INLINE int _mm_cmpestra(__m128i a,
++++ int la,
++++ __m128i b,
++++ int lb,
++++ const int imm8)
++++{
++++ int lb_cpy = lb;
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
++++ return !r2 & (lb_cpy > bound);
++++}
++++
++++// Compare packed strings in a and b with lengths la and lb using the control in
++++// imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrc
++++FORCE_INLINE int _mm_cmpestrc(__m128i a,
++++ int la,
++++ __m128i b,
++++ int lb,
++++ const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
++++ return r2 != 0;
++++}
++++
++++// Compare packed strings in a and b with lengths la and lb using the control
++++// in imm8, and store the generated index in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestri
++++FORCE_INLINE int _mm_cmpestri(__m128i a,
++++ int la,
++++ __m128i b,
++++ int lb,
++++ const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
++++ SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
++++}
++++
++++// Compare packed strings in a and b with lengths la and lb using the control
++++// in imm8, and store the generated mask in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrm
++++FORCE_INLINE __m128i
++++_mm_cmpestrm(__m128i a, int la, __m128i b, int lb, const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
++++ SSE2NEON_CMPSTR_GENERATE_MASK(dst);
++++}
++++
++++// Compare packed strings in a and b with lengths la and lb using the control in
++++// imm8, and returns bit 0 of the resulting bit mask.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestro
++++FORCE_INLINE int _mm_cmpestro(__m128i a,
++++ int la,
++++ __m128i b,
++++ int lb,
++++ const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
++++ return r2 & 1;
++++}
++++
++++// Compare packed strings in a and b with lengths la and lb using the control in
++++// imm8, and returns 1 if any character in a was null, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrs
++++FORCE_INLINE int _mm_cmpestrs(__m128i a,
++++ int la,
++++ __m128i b,
++++ int lb,
++++ const int imm8)
++++{
++++ (void) a;
++++ (void) b;
++++ (void) lb;
++++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
++++ return la <= (bound - 1);
++++}
++++
++++// Compare packed strings in a and b with lengths la and lb using the control in
++++// imm8, and returns 1 if any character in b was null, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrz
++++FORCE_INLINE int _mm_cmpestrz(__m128i a,
++++ int la,
++++ __m128i b,
++++ int lb,
++++ const int imm8)
++++{
++++ (void) a;
++++ (void) b;
++++ (void) la;
++++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
++++ return lb <= (bound - 1);
++++}
++++
++++#define SSE2NEON_CMPISTRX_LENGTH(str, len, imm8) \
++++ do { \
++++ if (imm8 & 0x01) { \
++++ uint16x8_t equal_mask_##str = \
++++ vceqq_u16(vreinterpretq_u16_m128i(str), vdupq_n_u16(0)); \
++++ uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
++++ uint64_t matches_##str = \
++++ vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
++++ len = _sse2neon_ctzll(matches_##str) >> 3; \
++++ } else { \
++++ uint16x8_t equal_mask_##str = vreinterpretq_u16_u8( \
++++ vceqq_u8(vreinterpretq_u8_m128i(str), vdupq_n_u8(0))); \
++++ uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
++++ uint64_t matches_##str = \
++++ vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
++++ len = _sse2neon_ctzll(matches_##str) >> 2; \
++++ } \
++++ } while (0)
++++
++++#define SSE2NEON_CMPISTRX_LEN_PAIR(a, b, la, lb) \
++++ int la, lb; \
++++ do { \
++++ SSE2NEON_CMPISTRX_LENGTH(a, la, imm8); \
++++ SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8); \
++++ } while (0)
++++
++++// Compare packed strings with implicit lengths in a and b using the control in
++++// imm8, and returns 1 if b did not contain a null character and the resulting
++++// mask was zero, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistra
++++FORCE_INLINE int _mm_cmpistra(__m128i a, __m128i b, const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
++++ return !r2 & (lb >= bound);
++++}
++++
++++// Compare packed strings with implicit lengths in a and b using the control in
++++// imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrc
++++FORCE_INLINE int _mm_cmpistrc(__m128i a, __m128i b, const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
++++ return r2 != 0;
++++}
++++
++++// Compare packed strings with implicit lengths in a and b using the control in
++++// imm8, and store the generated index in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistri
++++FORCE_INLINE int _mm_cmpistri(__m128i a, __m128i b, const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
++++ SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
++++}
++++
++++// Compare packed strings with implicit lengths in a and b using the control in
++++// imm8, and store the generated mask in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrm
++++FORCE_INLINE __m128i _mm_cmpistrm(__m128i a, __m128i b, const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
++++ SSE2NEON_CMPSTR_GENERATE_MASK(dst);
++++}
++++
++++// Compare packed strings with implicit lengths in a and b using the control in
++++// imm8, and returns bit 0 of the resulting bit mask.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistro
++++FORCE_INLINE int _mm_cmpistro(__m128i a, __m128i b, const int imm8)
++++{
++++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
++++ return r2 & 1;
++++}
++++
++++// Compare packed strings with implicit lengths in a and b using the control in
++++// imm8, and returns 1 if any character in a was null, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrs
++++FORCE_INLINE int _mm_cmpistrs(__m128i a, __m128i b, const int imm8)
++++{
++++ (void) b;
++++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
++++ int la;
++++ SSE2NEON_CMPISTRX_LENGTH(a, la, imm8);
++++ return la <= (bound - 1);
++++}
++++
++++// Compare packed strings with implicit lengths in a and b using the control in
++++// imm8, and returns 1 if any character in b was null, and 0 otherwise.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrz
++++FORCE_INLINE int _mm_cmpistrz(__m128i a, __m128i b, const int imm8)
++++{
++++ (void) a;
++++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
++++ int lb;
++++ SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8);
++++ return lb <= (bound - 1);
++++}
++++
++++// Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers
++++// in b for greater than.
++++FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ return vreinterpretq_m128i_u64(
++++ vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
++++#else
++++ return vreinterpretq_m128i_s64(vshrq_n_s64(
++++ vqsubq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a)),
++++ 63));
++++#endif
++++}
++++
++++// Starting with the initial value in crc, accumulates a CRC32 value for
++++// unsigned 16-bit integer v, and stores the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u16
++++FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v)
++++{
++++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
++++ __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t"
++++ : [c] "+r"(crc)
++++ : [v] "r"(v));
++++#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
++++ (defined(_M_ARM64) && !defined(__clang__))
++++ crc = __crc32ch(crc, v);
++++#else
++++ crc = _mm_crc32_u8(crc, v & 0xff);
++++ crc = _mm_crc32_u8(crc, (v >> 8) & 0xff);
++++#endif
++++ return crc;
++++}
++++
++++// Starting with the initial value in crc, accumulates a CRC32 value for
++++// unsigned 32-bit integer v, and stores the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u32
++++FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v)
++++{
++++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
++++ __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t"
++++ : [c] "+r"(crc)
++++ : [v] "r"(v));
++++#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
++++ (defined(_M_ARM64) && !defined(__clang__))
++++ crc = __crc32cw(crc, v);
++++#else
++++ crc = _mm_crc32_u16(crc, v & 0xffff);
++++ crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff);
++++#endif
++++ return crc;
++++}
++++
++++// Starting with the initial value in crc, accumulates a CRC32 value for
++++// unsigned 64-bit integer v, and stores the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u64
++++FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v)
++++{
++++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
++++ __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t"
++++ : [c] "+r"(crc)
++++ : [v] "r"(v));
++++#elif (defined(_M_ARM64) && !defined(__clang__))
++++ crc = __crc32cd((uint32_t) crc, v);
++++#else
++++ crc = _mm_crc32_u32((uint32_t) (crc), v & 0xffffffff);
++++ crc = _mm_crc32_u32((uint32_t) (crc), (v >> 32) & 0xffffffff);
++++#endif
++++ return crc;
++++}
++++
++++// Starting with the initial value in crc, accumulates a CRC32 value for
++++// unsigned 8-bit integer v, and stores the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u8
++++FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v)
++++{
++++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
++++ __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t"
++++ : [c] "+r"(crc)
++++ : [v] "r"(v));
++++#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
++++ (defined(_M_ARM64) && !defined(__clang__))
++++ crc = __crc32cb(crc, v);
++++#else
++++ crc ^= v;
++++ for (int bit = 0; bit < 8; bit++) {
++++ if (crc & 1)
++++ crc = (crc >> 1) ^ UINT32_C(0x82f63b78);
++++ else
++++ crc = (crc >> 1);
++++ }
++++#endif
++++ return crc;
++++}
++++
++++/* AES */
++++
++++#if !defined(__ARM_FEATURE_CRYPTO) && (!defined(_M_ARM64) || defined(__clang__))
++++/* clang-format off */
++++#define SSE2NEON_AES_SBOX(w) \
++++ { \
++++ w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), \
++++ w(0xc5), w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), \
++++ w(0xab), w(0x76), w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), \
++++ w(0x59), w(0x47), w(0xf0), w(0xad), w(0xd4), w(0xa2), w(0xaf), \
++++ w(0x9c), w(0xa4), w(0x72), w(0xc0), w(0xb7), w(0xfd), w(0x93), \
++++ w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc), w(0x34), w(0xa5), \
++++ w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15), w(0x04), \
++++ w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a), \
++++ w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), \
++++ w(0x75), w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), \
++++ w(0x5a), w(0xa0), w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), \
++++ w(0xe3), w(0x2f), w(0x84), w(0x53), w(0xd1), w(0x00), w(0xed), \
++++ w(0x20), w(0xfc), w(0xb1), w(0x5b), w(0x6a), w(0xcb), w(0xbe), \
++++ w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf), w(0xd0), w(0xef), \
++++ w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85), w(0x45), \
++++ w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8), \
++++ w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), \
++++ w(0xf5), w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), \
++++ w(0xf3), w(0xd2), w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), \
++++ w(0x97), w(0x44), w(0x17), w(0xc4), w(0xa7), w(0x7e), w(0x3d), \
++++ w(0x64), w(0x5d), w(0x19), w(0x73), w(0x60), w(0x81), w(0x4f), \
++++ w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88), w(0x46), w(0xee), \
++++ w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb), w(0xe0), \
++++ w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c), \
++++ w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), \
++++ w(0x79), w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), \
++++ w(0x4e), w(0xa9), w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), \
++++ w(0x7a), w(0xae), w(0x08), w(0xba), w(0x78), w(0x25), w(0x2e), \
++++ w(0x1c), w(0xa6), w(0xb4), w(0xc6), w(0xe8), w(0xdd), w(0x74), \
++++ w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a), w(0x70), w(0x3e), \
++++ w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e), w(0x61), \
++++ w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e), \
++++ w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), \
++++ w(0x94), w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), \
++++ w(0x28), w(0xdf), w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), \
++++ w(0xe6), w(0x42), w(0x68), w(0x41), w(0x99), w(0x2d), w(0x0f), \
++++ w(0xb0), w(0x54), w(0xbb), w(0x16) \
++++ }
++++#define SSE2NEON_AES_RSBOX(w) \
++++ { \
++++ w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), \
++++ w(0x38), w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), \
++++ w(0xd7), w(0xfb), w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), \
++++ w(0x2f), w(0xff), w(0x87), w(0x34), w(0x8e), w(0x43), w(0x44), \
++++ w(0xc4), w(0xde), w(0xe9), w(0xcb), w(0x54), w(0x7b), w(0x94), \
++++ w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d), w(0xee), w(0x4c), \
++++ w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e), w(0x08), \
++++ w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2), \
++++ w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), \
++++ w(0x25), w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), \
++++ w(0x98), w(0x16), w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), \
++++ w(0x65), w(0xb6), w(0x92), w(0x6c), w(0x70), w(0x48), w(0x50), \
++++ w(0xfd), w(0xed), w(0xb9), w(0xda), w(0x5e), w(0x15), w(0x46), \
++++ w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84), w(0x90), w(0xd8), \
++++ w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a), w(0xf7), \
++++ w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06), \
++++ w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), \
++++ w(0x02), w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), \
++++ w(0x8a), w(0x6b), w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), \
++++ w(0x67), w(0xdc), w(0xea), w(0x97), w(0xf2), w(0xcf), w(0xce), \
++++ w(0xf0), w(0xb4), w(0xe6), w(0x73), w(0x96), w(0xac), w(0x74), \
++++ w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85), w(0xe2), w(0xf9), \
++++ w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e), w(0x47), \
++++ w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89), \
++++ w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), \
++++ w(0x1b), w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), \
++++ w(0x79), w(0x20), w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), \
++++ w(0xcd), w(0x5a), w(0xf4), w(0x1f), w(0xdd), w(0xa8), w(0x33), \
++++ w(0x88), w(0x07), w(0xc7), w(0x31), w(0xb1), w(0x12), w(0x10), \
++++ w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f), w(0x60), w(0x51), \
++++ w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d), w(0x2d), \
++++ w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef), \
++++ w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), \
++++ w(0xb0), w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), \
++++ w(0x99), w(0x61), w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), \
++++ w(0x77), w(0xd6), w(0x26), w(0xe1), w(0x69), w(0x14), w(0x63), \
++++ w(0x55), w(0x21), w(0x0c), w(0x7d) \
++++ }
++++/* clang-format on */
++++
++++/* X Macro trick. See https://en.wikipedia.org/wiki/X_Macro */
++++#define SSE2NEON_AES_H0(x) (x)
++++static const uint8_t _sse2neon_sbox[256] = SSE2NEON_AES_SBOX(SSE2NEON_AES_H0);
++++static const uint8_t _sse2neon_rsbox[256] = SSE2NEON_AES_RSBOX(SSE2NEON_AES_H0);
++++#undef SSE2NEON_AES_H0
++++
++++/* x_time function and matrix multiply function */
++++#if !defined(__aarch64__) && !defined(_M_ARM64)
++++#define SSE2NEON_XT(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b))
++++#define SSE2NEON_MULTIPLY(x, y) \
++++ (((y & 1) * x) ^ ((y >> 1 & 1) * SSE2NEON_XT(x)) ^ \
++++ ((y >> 2 & 1) * SSE2NEON_XT(SSE2NEON_XT(x))) ^ \
++++ ((y >> 3 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x)))) ^ \
++++ ((y >> 4 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x))))))
++++#endif
++++
++++// In the absence of crypto extensions, implement aesenc using regular NEON
++++// intrinsics instead. See:
++++// https://www.workofard.com/2017/01/accelerated-aes-for-the-arm64-linux-kernel/
++++// https://www.workofard.com/2017/07/ghash-for-low-end-cores/ and
++++// for more information.
++++FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i RoundKey)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ static const uint8_t shift_rows[] = {
++++ 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
++++ 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
++++ };
++++ static const uint8_t ror32by8[] = {
++++ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
++++ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
++++ };
++++
++++ uint8x16_t v;
++++ uint8x16_t w = vreinterpretq_u8_m128i(a);
++++
++++ /* shift rows */
++++ w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
++++
++++ /* sub bytes */
++++ // Here, we separate the whole 256-bytes table into 4 64-bytes tables, and
++++ // look up each of the table. After each lookup, we load the next table
++++ // which locates at the next 64-bytes. In the meantime, the index in the
++++ // table would be smaller than it was, so the index parameters of
++++ // `vqtbx4q_u8()` need to be added the same constant as the loaded tables.
++++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
++++ // 'w-0x40' equals to 'vsubq_u8(w, vdupq_n_u8(0x40))'
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
++++
++++ /* mix columns */
++++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
++++ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
++++ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
++++
++++ /* add round key */
++++ return vreinterpretq_m128i_u8(w) ^ RoundKey;
++++
++++#else /* ARMv7-A implementation for a table-based AES */
++++#define SSE2NEON_AES_B2W(b0, b1, b2, b3) \
++++ (((uint32_t) (b3) << 24) | ((uint32_t) (b2) << 16) | \
++++ ((uint32_t) (b1) << 8) | (uint32_t) (b0))
++++// muliplying 'x' by 2 in GF(2^8)
++++#define SSE2NEON_AES_F2(x) ((x << 1) ^ (((x >> 7) & 1) * 0x011b /* WPOLY */))
++++// muliplying 'x' by 3 in GF(2^8)
++++#define SSE2NEON_AES_F3(x) (SSE2NEON_AES_F2(x) ^ x)
++++#define SSE2NEON_AES_U0(p) \
++++ SSE2NEON_AES_B2W(SSE2NEON_AES_F2(p), p, p, SSE2NEON_AES_F3(p))
++++#define SSE2NEON_AES_U1(p) \
++++ SSE2NEON_AES_B2W(SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p, p)
++++#define SSE2NEON_AES_U2(p) \
++++ SSE2NEON_AES_B2W(p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p)
++++#define SSE2NEON_AES_U3(p) \
++++ SSE2NEON_AES_B2W(p, p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p))
++++
++++ // this generates a table containing every possible permutation of
++++ // shift_rows() and sub_bytes() with mix_columns().
++++ static const uint32_t ALIGN_STRUCT(16) aes_table[4][256] = {
++++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U0),
++++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U1),
++++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U2),
++++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U3),
++++ };
++++#undef SSE2NEON_AES_B2W
++++#undef SSE2NEON_AES_F2
++++#undef SSE2NEON_AES_F3
++++#undef SSE2NEON_AES_U0
++++#undef SSE2NEON_AES_U1
++++#undef SSE2NEON_AES_U2
++++#undef SSE2NEON_AES_U3
++++
++++ uint32_t x0 = _mm_cvtsi128_si32(a); // get a[31:0]
++++ uint32_t x1 =
++++ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55)); // get a[63:32]
++++ uint32_t x2 =
++++ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xAA)); // get a[95:64]
++++ uint32_t x3 =
++++ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF)); // get a[127:96]
++++
++++ // finish the modulo addition step in mix_columns()
++++ __m128i out = _mm_set_epi32(
++++ (aes_table[0][x3 & 0xff] ^ aes_table[1][(x0 >> 8) & 0xff] ^
++++ aes_table[2][(x1 >> 16) & 0xff] ^ aes_table[3][x2 >> 24]),
++++ (aes_table[0][x2 & 0xff] ^ aes_table[1][(x3 >> 8) & 0xff] ^
++++ aes_table[2][(x0 >> 16) & 0xff] ^ aes_table[3][x1 >> 24]),
++++ (aes_table[0][x1 & 0xff] ^ aes_table[1][(x2 >> 8) & 0xff] ^
++++ aes_table[2][(x3 >> 16) & 0xff] ^ aes_table[3][x0 >> 24]),
++++ (aes_table[0][x0 & 0xff] ^ aes_table[1][(x1 >> 8) & 0xff] ^
++++ aes_table[2][(x2 >> 16) & 0xff] ^ aes_table[3][x3 >> 24]));
++++
++++ return _mm_xor_si128(out, RoundKey);
++++#endif
++++}
++++
++++// Perform one round of an AES decryption flow on data (state) in a using the
++++// round key in RoundKey, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
++++FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
++++{
++++#if defined(__aarch64__)
++++ static const uint8_t inv_shift_rows[] = {
++++ 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
++++ 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
++++ };
++++ static const uint8_t ror32by8[] = {
++++ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
++++ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
++++ };
++++
++++ uint8x16_t v;
++++ uint8x16_t w = vreinterpretq_u8_m128i(a);
++++
++++ // inverse shift rows
++++ w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
++++
++++ // inverse sub bytes
++++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
++++
++++ // inverse mix columns
++++ // multiplying 'v' by 4 in GF(2^8)
++++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
++++ w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
++++ v ^= w;
++++ v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
++++
++++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) &
++++ 0x1b); // muliplying 'v' by 2 in GF(2^8)
++++ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
++++ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
++++
++++ // add round key
++++ return vreinterpretq_m128i_u8(w) ^ RoundKey;
++++
++++#else /* ARMv7-A NEON implementation */
++++ /* FIXME: optimized for NEON */
++++ uint8_t i, e, f, g, h, v[4][4];
++++ uint8_t *_a = (uint8_t *) &a;
++++ for (i = 0; i < 16; ++i) {
++++ v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
++++ }
++++
++++ // inverse mix columns
++++ for (i = 0; i < 4; ++i) {
++++ e = v[i][0];
++++ f = v[i][1];
++++ g = v[i][2];
++++ h = v[i][3];
++++
++++ v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
++++ SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
++++ v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
++++ SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
++++ v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
++++ SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
++++ v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
++++ SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
++++ }
++++
++++ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
++++#endif
++++}
++++
++++// Perform the last round of an AES encryption flow on data (state) in a using
++++// the round key in RoundKey, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
++++FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
++++{
++++#if defined(__aarch64__)
++++ static const uint8_t shift_rows[] = {
++++ 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
++++ 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
++++ };
++++
++++ uint8x16_t v;
++++ uint8x16_t w = vreinterpretq_u8_m128i(a);
++++
++++ // shift rows
++++ w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
++++
++++ // sub bytes
++++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
++++
++++ // add round key
++++ return vreinterpretq_m128i_u8(v) ^ RoundKey;
++++
++++#else /* ARMv7-A implementation */
++++ uint8_t v[16] = {
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 0)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 5)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 10)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 15)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 4)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 9)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 14)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 3)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 8)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 13)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 2)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 7)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 12)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 1)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 6)],
++++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 11)],
++++ };
++++
++++ return vreinterpretq_m128i_u8(vld1q_u8(v)) ^ RoundKey;
++++#endif
++++}
++++
++++// Perform the last round of an AES decryption flow on data (state) in a using
++++// the round key in RoundKey, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
++++FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
++++{
++++#if defined(__aarch64__)
++++ static const uint8_t inv_shift_rows[] = {
++++ 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
++++ 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
++++ };
++++
++++ uint8x16_t v;
++++ uint8x16_t w = vreinterpretq_u8_m128i(a);
++++
++++ // inverse shift rows
++++ w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
++++
++++ // inverse sub bytes
++++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
++++
++++ // add round key
++++ return vreinterpretq_m128i_u8(v) ^ RoundKey;
++++
++++#else /* ARMv7-A NEON implementation */
++++ /* FIXME: optimized for NEON */
++++ uint8_t v[4][4];
++++ uint8_t *_a = (uint8_t *) &a;
++++ for (int i = 0; i < 16; ++i) {
++++ v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
++++ }
++++
++++ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
++++#endif
++++}
++++
++++// Perform the InvMixColumns transformation on a and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
++++FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
++++{
++++#if defined(__aarch64__)
++++ static const uint8_t ror32by8[] = {
++++ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
++++ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
++++ };
++++ uint8x16_t v = vreinterpretq_u8_m128i(a);
++++ uint8x16_t w;
++++
++++ // multiplying 'v' by 4 in GF(2^8)
++++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
++++ w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
++++ v ^= w;
++++ v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
++++
++++ // multiplying 'v' by 2 in GF(2^8)
++++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
++++ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
++++ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
++++ return vreinterpretq_m128i_u8(w);
++++
++++#else /* ARMv7-A NEON implementation */
++++ uint8_t i, e, f, g, h, v[4][4];
++++ vst1q_u8((uint8_t *) v, vreinterpretq_u8_m128i(a));
++++ for (i = 0; i < 4; ++i) {
++++ e = v[i][0];
++++ f = v[i][1];
++++ g = v[i][2];
++++ h = v[i][3];
++++
++++ v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
++++ SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
++++ v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
++++ SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
++++ v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
++++ SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
++++ v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
++++ SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
++++ }
++++
++++ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v));
++++#endif
++++}
++++
++++// Assist in expanding the AES cipher key by computing steps towards generating
++++// a round key for encryption cipher using data from a and an 8-bit round
++++// constant specified in imm8, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
++++//
++++// Emits the Advanced Encryption Standard (AES) instruction aeskeygenassist.
++++// This instruction generates a round key for AES encryption. See
++++// https://kazakov.life/2017/11/01/cryptocurrency-mining-on-ios-devices/
++++// for details.
++++FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
++++{
++++#if defined(__aarch64__)
++++ uint8x16_t _a = vreinterpretq_u8_m128i(a);
++++ uint8x16_t v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), _a);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), _a - 0x40);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), _a - 0x80);
++++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), _a - 0xc0);
++++
++++ uint32x4_t v_u32 = vreinterpretq_u32_u8(v);
++++ uint32x4_t ror_v = vorrq_u32(vshrq_n_u32(v_u32, 8), vshlq_n_u32(v_u32, 24));
++++ uint32x4_t ror_xor_v = veorq_u32(ror_v, vdupq_n_u32(rcon));
++++
++++ return vreinterpretq_m128i_u32(vtrn2q_u32(v_u32, ror_xor_v));
++++
++++#else /* ARMv7-A NEON implementation */
++++ uint32_t X1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55));
++++ uint32_t X3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF));
++++ for (int i = 0; i < 4; ++i) {
++++ ((uint8_t *) &X1)[i] = _sse2neon_sbox[((uint8_t *) &X1)[i]];
++++ ((uint8_t *) &X3)[i] = _sse2neon_sbox[((uint8_t *) &X3)[i]];
++++ }
++++ return _mm_set_epi32(((X3 >> 8) | (X3 << 24)) ^ rcon, X3,
++++ ((X1 >> 8) | (X1 << 24)) ^ rcon, X1);
++++#endif
++++}
++++#undef SSE2NEON_AES_SBOX
++++#undef SSE2NEON_AES_RSBOX
++++
++++#if defined(__aarch64__)
++++#undef SSE2NEON_XT
++++#undef SSE2NEON_MULTIPLY
++++#endif
++++
++++#else /* __ARM_FEATURE_CRYPTO */
++++// Implements equivalent of 'aesenc' by combining AESE (with an empty key) and
++++// AESMC and then manually applying the real key as an xor operation. This
++++// unfortunately means an additional xor op; the compiler should be able to
++++// optimize this away for repeated calls however. See
++++// https://blog.michaelbrase.com/2018/05/08/emulating-x86-aes-intrinsics-on-armv8-a
++++// for more details.
++++FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i b)
++++{
++++ return vreinterpretq_m128i_u8(veorq_u8(
++++ vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
++++ vreinterpretq_u8_m128i(b)));
++++}
++++
++++// Perform one round of an AES decryption flow on data (state) in a using the
++++// round key in RoundKey, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
++++FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
++++{
++++ return vreinterpretq_m128i_u8(veorq_u8(
++++ vaesimcq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
++++ vreinterpretq_u8_m128i(RoundKey)));
++++}
++++
++++// Perform the last round of an AES encryption flow on data (state) in a using
++++// the round key in RoundKey, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
++++FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
++++{
++++ return _mm_xor_si128(vreinterpretq_m128i_u8(vaeseq_u8(
++++ vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
++++ RoundKey);
++++}
++++
++++// Perform the last round of an AES decryption flow on data (state) in a using
++++// the round key in RoundKey, and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
++++FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
++++{
++++ return vreinterpretq_m128i_u8(
++++ veorq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0)),
++++ vreinterpretq_u8_m128i(RoundKey)));
++++}
++++
++++// Perform the InvMixColumns transformation on a and store the result in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
++++FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
++++{
++++ return vreinterpretq_m128i_u8(vaesimcq_u8(vreinterpretq_u8_m128i(a)));
++++}
++++
++++// Assist in expanding the AES cipher key by computing steps towards generating
++++// a round key for encryption cipher using data from a and an 8-bit round
++++// constant specified in imm8, and store the result in dst."
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
++++FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
++++{
++++ // AESE does ShiftRows and SubBytes on A
++++ uint8x16_t u8 = vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0));
++++
++++#ifndef _MSC_VER
++++ uint8x16_t dest = {
++++ // Undo ShiftRows step from AESE and extract X1 and X3
++++ u8[0x4], u8[0x1], u8[0xE], u8[0xB], // SubBytes(X1)
++++ u8[0x1], u8[0xE], u8[0xB], u8[0x4], // ROT(SubBytes(X1))
++++ u8[0xC], u8[0x9], u8[0x6], u8[0x3], // SubBytes(X3)
++++ u8[0x9], u8[0x6], u8[0x3], u8[0xC], // ROT(SubBytes(X3))
++++ };
++++ uint32x4_t r = {0, (unsigned) rcon, 0, (unsigned) rcon};
++++ return vreinterpretq_m128i_u8(dest) ^ vreinterpretq_m128i_u32(r);
++++#else
++++ // We have to do this hack because MSVC is strictly adhering to the CPP
++++ // standard, in particular C++03 8.5.1 sub-section 15, which states that
++++ // unions must be initialized by their first member type.
++++
++++ // As per the Windows ARM64 ABI, it is always little endian, so this works
++++ __n128 dest{
++++ ((uint64_t) u8.n128_u8[0x4] << 0) | ((uint64_t) u8.n128_u8[0x1] << 8) |
++++ ((uint64_t) u8.n128_u8[0xE] << 16) |
++++ ((uint64_t) u8.n128_u8[0xB] << 24) |
++++ ((uint64_t) u8.n128_u8[0x1] << 32) |
++++ ((uint64_t) u8.n128_u8[0xE] << 40) |
++++ ((uint64_t) u8.n128_u8[0xB] << 48) |
++++ ((uint64_t) u8.n128_u8[0x4] << 56),
++++ ((uint64_t) u8.n128_u8[0xC] << 0) | ((uint64_t) u8.n128_u8[0x9] << 8) |
++++ ((uint64_t) u8.n128_u8[0x6] << 16) |
++++ ((uint64_t) u8.n128_u8[0x3] << 24) |
++++ ((uint64_t) u8.n128_u8[0x9] << 32) |
++++ ((uint64_t) u8.n128_u8[0x6] << 40) |
++++ ((uint64_t) u8.n128_u8[0x3] << 48) |
++++ ((uint64_t) u8.n128_u8[0xC] << 56)};
++++
++++ dest.n128_u32[1] = dest.n128_u32[1] ^ rcon;
++++ dest.n128_u32[3] = dest.n128_u32[3] ^ rcon;
++++
++++ return dest;
++++#endif
++++}
++++#endif
++++
++++/* Others */
++++
++++// Perform a carry-less multiplication of two 64-bit integers, selected from a
++++// and b according to imm8, and store the results in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clmulepi64_si128
++++FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm)
++++{
++++ uint64x2_t a = vreinterpretq_u64_m128i(_a);
++++ uint64x2_t b = vreinterpretq_u64_m128i(_b);
++++ switch (imm & 0x11) {
++++ case 0x00:
++++ return vreinterpretq_m128i_u64(
++++ _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b)));
++++ case 0x01:
++++ return vreinterpretq_m128i_u64(
++++ _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b)));
++++ case 0x10:
++++ return vreinterpretq_m128i_u64(
++++ _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b)));
++++ case 0x11:
++++ return vreinterpretq_m128i_u64(
++++ _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b)));
++++ default:
++++ abort();
++++ }
++++}
++++
++++FORCE_INLINE unsigned int _sse2neon_mm_get_denormals_zero_mode(void)
++++{
++++ union {
++++ fpcr_bitfield field;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint64_t value;
++++#else
++++ uint32_t value;
++++#endif
++++ } r;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ r.value = _sse2neon_get_fpcr();
++++#else
++++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
++++#endif
++++
++++ return r.field.bit24 ? _MM_DENORMALS_ZERO_ON : _MM_DENORMALS_ZERO_OFF;
++++}
++++
++++// Count the number of bits set to 1 in unsigned 32-bit integer a, and
++++// return that count in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u32
++++FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++#if __has_builtin(__builtin_popcount)
++++ return __builtin_popcount(a);
++++#elif defined(_MSC_VER)
++++ return _CountOneBits(a);
++++#else
++++ return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a)));
++++#endif
++++#else
++++ uint32_t count = 0;
++++ uint8x8_t input_val, count8x8_val;
++++ uint16x4_t count16x4_val;
++++ uint32x2_t count32x2_val;
++++
++++ input_val = vld1_u8((uint8_t *) &a);
++++ count8x8_val = vcnt_u8(input_val);
++++ count16x4_val = vpaddl_u8(count8x8_val);
++++ count32x2_val = vpaddl_u16(count16x4_val);
++++
++++ vst1_u32(&count, count32x2_val);
++++ return count;
++++#endif
++++}
++++
++++// Count the number of bits set to 1 in unsigned 64-bit integer a, and
++++// return that count in dst.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u64
++++FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++#if __has_builtin(__builtin_popcountll)
++++ return __builtin_popcountll(a);
++++#elif defined(_MSC_VER)
++++ return _CountOneBits64(a);
++++#else
++++ return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a)));
++++#endif
++++#else
++++ uint64_t count = 0;
++++ uint8x8_t input_val, count8x8_val;
++++ uint16x4_t count16x4_val;
++++ uint32x2_t count32x2_val;
++++ uint64x1_t count64x1_val;
++++
++++ input_val = vld1_u8((uint8_t *) &a);
++++ count8x8_val = vcnt_u8(input_val);
++++ count16x4_val = vpaddl_u8(count8x8_val);
++++ count32x2_val = vpaddl_u16(count16x4_val);
++++ count64x1_val = vpaddl_u32(count32x2_val);
++++ vst1_u64(&count, count64x1_val);
++++ return count;
++++#endif
++++}
++++
++++FORCE_INLINE void _sse2neon_mm_set_denormals_zero_mode(unsigned int flag)
++++{
++++ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
++++ // regardless of the value of the FZ bit.
++++ union {
++++ fpcr_bitfield field;
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint64_t value;
++++#else
++++ uint32_t value;
++++#endif
++++ } r;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ r.value = _sse2neon_get_fpcr();
++++#else
++++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
++++#endif
++++
++++ r.field.bit24 = (flag & _MM_DENORMALS_ZERO_MASK) == _MM_DENORMALS_ZERO_ON;
++++
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ _sse2neon_set_fpcr(r.value);
++++#else
++++ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
++++#endif
++++}
++++
++++// Return the current 64-bit value of the processor's time-stamp counter.
++++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=rdtsc
++++FORCE_INLINE uint64_t _rdtsc(void)
++++{
++++#if defined(__aarch64__) || defined(_M_ARM64)
++++ uint64_t val;
++++
++++ /* According to ARM DDI 0487F.c, from Armv8.0 to Armv8.5 inclusive, the
++++ * system counter is at least 56 bits wide; from Armv8.6, the counter
++++ * must be 64 bits wide. So the system counter could be less than 64
++++ * bits wide and it is attributed with the flag 'cap_user_time_short'
++++ * is true.
++++ */
++++#if defined(_MSC_VER)
++++ val = _ReadStatusReg(ARM64_SYSREG(3, 3, 14, 0, 2));
++++#else
++++ __asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(val));
++++#endif
++++
++++ return val;
++++#else
++++ uint32_t pmccntr, pmuseren, pmcntenset;
++++ // Read the user mode Performance Monitoring Unit (PMU)
++++ // User Enable Register (PMUSERENR) access permissions.
++++ __asm__ __volatile__("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
++++ if (pmuseren & 1) { // Allows reading PMUSERENR for user mode code.
++++ __asm__ __volatile__("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
++++ if (pmcntenset & 0x80000000UL) { // Is it counting?
++++ __asm__ __volatile__("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
++++ // The counter is set up to count every 64th cycle
++++ return (uint64_t) (pmccntr) << 6;
++++ }
++++ }
++++
++++ // Fallback to syscall as we can't enable PMUSERENR in user mode.
++++ struct timeval tv;
++++ gettimeofday(&tv, NULL);
++++ return (uint64_t) (tv.tv_sec) * 1000000 + tv.tv_usec;
++++#endif
++++}
++++
++++#if defined(__GNUC__) || defined(__clang__)
++++#pragma pop_macro("ALIGN_STRUCT")
++++#pragma pop_macro("FORCE_INLINE")
++++#endif
++++
++++#if defined(__GNUC__) && !defined(__clang__)
++++#pragma GCC pop_options
++++#endif
++++
++++#endif
--- /dev/null
--- /dev/null
--- /dev/null
+++optional-static-apps
+++omit-doxygen-build-paths
+++update-doxygen
+++doxygen-pdf
+++doxygen-without-sse2neon
+++patch-sse2neon-here
--- /dev/null
--- /dev/null
--- /dev/null
+++Author: A. Maitland Bottoms <bottoms@debian.org>
+++Forwarded: not-needed
+++Description: update doxygen
+++ For Debian version of Doxygen.
+++
+++--- a/docs/Doxyfile.in
++++++ b/docs/Doxyfile.in
+++@@ -1,4 +1,4 @@
+++-# Doxyfile 1.8.6
++++# Doxyfile 1.9.4
+++
+++ # This file describes the settings to be used by the documentation system
+++ # doxygen (www.doxygen.org) for a project.
+++@@ -12,16 +12,25 @@
+++ # For lists, items can also be appended using:
+++ # TAG += value [value, ...]
+++ # Values that contain spaces should be placed between quotes (\" \").
++++#
++++# Note:
++++#
++++# Use doxygen to compare the used configuration file with the template
++++# configuration file:
++++# doxygen -x [configFile]
++++# Use doxygen to compare the used configuration file with the template
++++# configuration file without replacing the environment variables:
++++# doxygen -x_noenv [configFile]
+++
+++ #---------------------------------------------------------------------------
+++ # Project related configuration options
+++ #---------------------------------------------------------------------------
+++
+++-# This tag specifies the encoding used for all characters in the config file
+++-# that follow. The default is UTF-8 which is also the encoding used for all text
+++-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+++-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+++-# for the list of possible encodings.
++++# This tag specifies the encoding used for all characters in the configuration
++++# file that follow. The default is UTF-8 which is also the encoding used for all
++++# text before the first occurrence of this tag. Doxygen uses libiconv (or the
++++# iconv built into libc) for the transcoding. See
++++# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
+++ # The default value is: UTF-8.
+++
+++ DOXYFILE_ENCODING = UTF-8
+++@@ -46,10 +55,10 @@
+++
+++ PROJECT_BRIEF = "Architecture-tuned implementations of math kernels"
+++
+++-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+++-# the documentation. The maximum height of the logo should not exceed 55 pixels
+++-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+++-# to the output directory.
++++# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
++++# in the documentation. The maximum height of the logo should not exceed 55
++++# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
++++# the logo to the output directory.
+++
+++ PROJECT_LOGO = @CMAKE_SOURCE_DIR@/docs/volk_logo_small.png
+++
+++@@ -58,44 +67,61 @@
+++ # entered, it will be relative to the location where doxygen was started. If
+++ # left blank the current directory will be used.
+++
+++-# TODO: configure this to be a special docs directory. nw tried, but running
+++-# make doc won' create the directory, but with doxygen it will. why?
+++-
+++ OUTPUT_DIRECTORY =
+++
+++-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+++-# directories (in 2 levels) under the output directory of each output format and
+++-# will distribute the generated files over these directories. Enabling this
++++# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096
++++# sub-directories (in 2 levels) under the output directory of each output format
++++# and will distribute the generated files over these directories. Enabling this
+++ # option can be useful when feeding doxygen a huge amount of source files, where
+++ # putting all generated files in the same directory would otherwise causes
+++-# performance problems for the file system.
++++# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to
++++# control the number of sub-directories.
+++ # The default value is: NO.
+++
+++ CREATE_SUBDIRS = NO
+++
++++# Controls the number of sub-directories that will be created when
++++# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every
++++# level increment doubles the number of directories, resulting in 4096
++++# directories at level 8 which is the default and also the maximum value. The
++++# sub-directories are organized in 2 levels, the first level always has a fixed
++++# numer of 16 directories.
++++# Minimum value: 0, maximum value: 8, default value: 8.
++++# This tag requires that the tag CREATE_SUBDIRS is set to YES.
++++
++++CREATE_SUBDIRS_LEVEL = 8
++++
++++# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
++++# characters to appear in the names of generated files. If set to NO, non-ASCII
++++# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
++++# U+3044.
++++# The default value is: NO.
++++
++++ALLOW_UNICODE_NAMES = NO
++++
+++ # The OUTPUT_LANGUAGE tag is used to specify the language in which all
+++ # documentation generated by doxygen is written. Doxygen will use this
+++ # information to generate all constant output in the proper language.
+++-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+++-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+++-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+++-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+++-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+++-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+++-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+++-# Ukrainian and Vietnamese.
++++# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian,
++++# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English
++++# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek,
++++# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with
++++# English messages), Korean, Korean-en (Korean with English messages), Latvian,
++++# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese,
++++# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish,
++++# Swedish, Turkish, Ukrainian and Vietnamese.
+++ # The default value is: English.
+++
+++ OUTPUT_LANGUAGE = English
+++
+++-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
++++# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+++ # descriptions after the members that are listed in the file and class
+++ # documentation (similar to Javadoc). Set to NO to disable this.
+++ # The default value is: YES.
+++
+++ BRIEF_MEMBER_DESC = YES
+++
+++-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
++++# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+++ # description of a member or function before the detailed description
+++ #
+++ # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+++@@ -140,7 +166,7 @@
+++
+++ INLINE_INHERITED_MEMB = NO
+++
+++-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
++++# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+++ # before files name in the file list and in the header files. If set to NO the
+++ # shortest path that makes the file name unique will be used
+++ # The default value is: YES.
+++@@ -157,7 +183,8 @@
+++ # will be relative from the directory where doxygen is started.
+++ # This tag requires that the tag FULL_PATH_NAMES is set to YES.
+++
+++-STRIP_FROM_PATH = @CMAKE_BINARY_DIR@ @CMAKE_SOURCE_DIR@
++++STRIP_FROM_PATH = @CMAKE_BINARY_DIR@ \
++++ @CMAKE_SOURCE_DIR@
+++
+++ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+++ # path mentioned in the documentation of a class, which tells the reader which
+++@@ -166,7 +193,8 @@
+++ # specify the list of include paths that are normally passed to the compiler
+++ # using the -I flag.
+++
+++-STRIP_FROM_INC_PATH = @CMAKE_SOURCE_DIR@ @CMAKE_BINARY_DIR@
++++STRIP_FROM_INC_PATH = @CMAKE_SOURCE_DIR@ \
++++ @CMAKE_BINARY_DIR@
+++
+++ # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+++ # less readable) file names. This can be useful is your file systems doesn't
+++@@ -184,6 +212,16 @@
+++
+++ JAVADOC_AUTOBRIEF = NO
+++
++++# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
++++# such as
++++# /***************
++++# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
++++# Javadoc-style will behave just like regular comments and it will not be
++++# interpreted by doxygen.
++++# The default value is: NO.
++++
++++JAVADOC_BANNER = NO
++++
+++ # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+++ # line (until the first dot) of a Qt-style comment as the brief description. If
+++ # set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+++@@ -204,15 +242,23 @@
+++
+++ MULTILINE_CPP_IS_BRIEF = NO
+++
++++# By default Python docstrings are displayed as preformatted text and doxygen's
++++# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
++++# doxygen's special commands can be used and the contents of the docstring
++++# documentation blocks is shown as doxygen documentation.
++++# The default value is: YES.
++++
++++PYTHON_DOCSTRING = YES
++++
+++ # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+++ # documentation from any documented member that it re-implements.
+++ # The default value is: YES.
+++
+++ INHERIT_DOCS = YES
+++
+++-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+++-# new page for each member. If set to NO, the documentation of a member will be
+++-# part of the file/class/namespace that contains it.
++++# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
++++# page for each member. If set to NO, the documentation of a member will be part
++++# of the file/class/namespace that contains it.
+++ # The default value is: NO.
+++
+++ SEPARATE_MEMBER_PAGES = NO
+++@@ -227,20 +273,19 @@
+++ # the documentation. An alias has the form:
+++ # name=value
+++ # For example adding
+++-# "sideeffect=@par Side Effects:\n"
++++# "sideeffect=@par Side Effects:^^"
+++ # will allow you to put the command \sideeffect (or @sideeffect) in the
+++ # documentation, which will result in a user-defined paragraph with heading
+++-# "Side Effects:". You can put \n's in the value part of an alias to insert
+++-# newlines.
++++# "Side Effects:". Note that you cannot put \n's in the value part of an alias
++++# to insert newlines (in the resulting output). You can put ^^ in the value part
++++# of an alias to insert a newline as if a physical newline was in the original
++++# file. When you need a literal { or } or , in the value part of an alias you
++++# have to escape them by means of a backslash (\), this can lead to conflicts
++++# with the commands \{ and \} for these it is advised to use the version @{ and
++++# @} or use a double escape (\\{ and \\})
+++
+++ ALIASES =
+++
+++-# This tag can be used to specify a number of word-keyword mappings (TCL only).
+++-# A mapping has the form "name=value". For example adding "class=itcl::class"
+++-# will allow you to use the command class in the itcl::class meaning.
+++-
+++-TCL_SUBST =
+++-
+++ # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+++ # only. Doxygen will then generate output that is more tailored for C. For
+++ # instance, some of the names that are used will be different. The list of all
+++@@ -269,25 +314,40 @@
+++
+++ OPTIMIZE_OUTPUT_VHDL = NO
+++
++++# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
++++# sources only. Doxygen will then generate output that is more tailored for that
++++# language. For instance, namespaces will be presented as modules, types will be
++++# separated into more groups, etc.
++++# The default value is: NO.
++++
++++OPTIMIZE_OUTPUT_SLICE = NO
++++
+++ # Doxygen selects the parser to use depending on the extension of the files it
+++ # parses. With this tag you can assign which parser to use for a given
+++ # extension. Doxygen has a built-in mapping, but you can override or extend it
+++ # using this tag. The format is ext=language, where ext is a file extension, and
+++-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+++-# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+++-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+++-# (default is Fortran), use: inc=Fortran f=C.
++++# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
++++# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
++++# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
++++# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
++++# tries to guess whether the code is fixed or free formatted code, this is the
++++# default for Fortran type files). For instance to make doxygen treat .inc files
++++# as Fortran files (default is PHP), and .f files as C (default is Fortran),
++++# use: inc=Fortran f=C.
+++ #
+++-# Note For files without extension you can use no_extension as a placeholder.
++++# Note: For files without extension you can use no_extension as a placeholder.
+++ #
+++ # Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+++-# the files are not read by doxygen.
++++# the files are not read by doxygen. When specifying no_extension you should add
++++# * to the FILE_PATTERNS.
++++#
++++# Note see also the list of default file extension mappings.
+++
+++ EXTENSION_MAPPING =
+++
+++ # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+++ # according to the Markdown format, which allows for more readable
+++-# documentation. See http://daringfireball.net/projects/markdown/ for details.
++++# documentation. See https://daringfireball.net/projects/markdown/ for details.
+++ # The output of markdown processing is further processed by doxygen, so you can
+++ # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+++ # case of backward compatibilities issues.
+++@@ -295,10 +355,19 @@
+++
+++ MARKDOWN_SUPPORT = YES
+++
++++# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
++++# to that level are automatically included in the table of contents, even if
++++# they do not have an id attribute.
++++# Note: This feature currently applies only to Markdown headings.
++++# Minimum value: 0, maximum value: 99, default value: 5.
++++# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
++++
++++TOC_INCLUDE_HEADINGS = 5
++++
+++ # When enabled doxygen tries to link words that correspond to documented
+++ # classes, or namespaces to their corresponding documentation. Such a link can
+++-# be prevented in individual cases by by putting a % sign in front of the word
+++-# or globally by setting AUTOLINK_SUPPORT to NO.
++++# be prevented in individual cases by putting a % sign in front of the word or
++++# globally by setting AUTOLINK_SUPPORT to NO.
+++ # The default value is: YES.
+++
+++ AUTOLINK_SUPPORT = YES
+++@@ -320,7 +389,7 @@
+++ CPP_CLI_SUPPORT = NO
+++
+++ # Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+++-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
++++# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
+++ # will parse them like normal C++ but will assume all classes use public instead
+++ # of private inheritance when no explicit protection keyword is present.
+++ # The default value is: NO.
+++@@ -338,13 +407,20 @@
+++ IDL_PROPERTY_SUPPORT = YES
+++
+++ # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+++-# tag is set to YES, then doxygen will reuse the documentation of the first
++++# tag is set to YES then doxygen will reuse the documentation of the first
+++ # member in the group (if any) for the other members of the group. By default
+++ # all members of a group must be documented explicitly.
+++ # The default value is: NO.
+++
+++ DISTRIBUTE_GROUP_DOC = NO
+++
++++# If one adds a struct or class to a group and this option is enabled, then also
++++# any nested class or struct is added to the same group. By default this option
++++# is disabled and one has to add nested compounds explicitly via \ingroup.
++++# The default value is: NO.
++++
++++GROUP_NESTED_COMPOUNDS = NO
++++
+++ # Set the SUBGROUPING tag to YES to allow class member groups of the same type
+++ # (for instance a group of public functions) to be put as a subgroup of that
+++ # type (e.g. under the Public Functions section). Set it to NO to prevent
+++@@ -399,11 +475,24 @@
+++
+++ LOOKUP_CACHE_SIZE = 0
+++
++++# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use
++++# during processing. When set to 0 doxygen will based this on the number of
++++# cores available in the system. You can set it explicitly to a value larger
++++# than 0 to get more control over the balance between CPU load and processing
++++# speed. At this moment only the input processing can be done using multiple
++++# threads. Since this is still an experimental feature the default is set to 1,
++++# which effectively disables parallel processing. Please report any issues you
++++# encounter. Generating dot graphs in parallel is controlled by the
++++# DOT_NUM_THREADS setting.
++++# Minimum value: 0, maximum value: 32, default value: 1.
++++
++++NUM_PROC_THREADS = 1
++++
+++ #---------------------------------------------------------------------------
+++ # Build related configuration options
+++ #---------------------------------------------------------------------------
+++
+++-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
++++# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+++ # documentation are documented, even if no documentation was available. Private
+++ # class members and static file members will be hidden unless the
+++ # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+++@@ -413,35 +502,41 @@
+++
+++ EXTRACT_ALL = YES
+++
+++-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
++++# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+++ # be included in the documentation.
+++ # The default value is: NO.
+++
+++ EXTRACT_PRIVATE = NO
+++
+++-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
++++# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
++++# methods of a class will be included in the documentation.
++++# The default value is: NO.
++++
++++EXTRACT_PRIV_VIRTUAL = NO
++++
++++# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+++ # scope will be included in the documentation.
+++ # The default value is: NO.
+++
+++ EXTRACT_PACKAGE = NO
+++
+++-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
++++# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+++ # included in the documentation.
+++ # The default value is: NO.
+++
+++ EXTRACT_STATIC = YES
+++
+++-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+++-# locally in source files will be included in the documentation. If set to NO
++++# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
++++# locally in source files will be included in the documentation. If set to NO,
+++ # only classes defined in header files are included. Does not have any effect
+++ # for Java sources.
+++ # The default value is: YES.
+++
+++ EXTRACT_LOCAL_CLASSES = YES
+++
+++-# This flag is only useful for Objective-C code. When set to YES local methods,
++++# This flag is only useful for Objective-C code. If set to YES, local methods,
+++ # which are defined in the implementation section but not in the interface are
+++-# included in the documentation. If set to NO only methods in the interface are
++++# included in the documentation. If set to NO, only methods in the interface are
+++ # included.
+++ # The default value is: NO.
+++
+++@@ -456,6 +551,13 @@
+++
+++ EXTRACT_ANON_NSPACES = NO
+++
++++# If this flag is set to YES, the name of an unnamed parameter in a declaration
++++# will be determined by the corresponding definition. By default unnamed
++++# parameters remain unnamed in the output.
++++# The default value is: YES.
++++
++++RESOLVE_UNNAMED_PARAMS = YES
++++
+++ # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+++ # undocumented members inside documented classes or files. If set to NO these
+++ # members will be included in the various overviews, but no documentation
+++@@ -466,21 +568,21 @@
+++
+++ # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+++ # undocumented classes that are normally visible in the class hierarchy. If set
+++-# to NO these classes will be included in the various overviews. This option has
+++-# no effect if EXTRACT_ALL is enabled.
++++# to NO, these classes will be included in the various overviews. This option
++++# has no effect if EXTRACT_ALL is enabled.
+++ # The default value is: NO.
+++
+++ HIDE_UNDOC_CLASSES = YES
+++
+++ # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+++-# (class|struct|union) declarations. If set to NO these declarations will be
+++-# included in the documentation.
++++# declarations. If set to NO, these declarations will be included in the
++++# documentation.
+++ # The default value is: NO.
+++
+++ HIDE_FRIEND_COMPOUNDS = NO
+++
+++ # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+++-# documentation blocks found inside the body of a function. If set to NO these
++++# documentation blocks found inside the body of a function. If set to NO, these
+++ # blocks will be appended to the function's detailed documentation block.
+++ # The default value is: NO.
+++
+++@@ -493,22 +595,42 @@
+++
+++ INTERNAL_DOCS = NO
+++
+++-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+++-# names in lower-case letters. If set to YES upper-case letters are also
+++-# allowed. This is useful if you have classes or files whose names only differ
+++-# in case and if your file system supports case sensitive file names. Windows
+++-# and Mac users are advised to set this option to NO.
++++# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
++++# able to match the capabilities of the underlying filesystem. In case the
++++# filesystem is case sensitive (i.e. it supports files in the same directory
++++# whose names only differ in casing), the option must be set to YES to properly
++++# deal with such files in case they appear in the input. For filesystems that
++++# are not case sensitive the option should be set to NO to properly deal with
++++# output files written for symbols that only differ in casing, such as for two
++++# classes, one named CLASS and the other named Class, and to also support
++++# references to files without having to specify the exact matching casing. On
++++# Windows (including Cygwin) and MacOS, users should typically set this option
++++# to NO, whereas on Linux or other Unix flavors it should typically be set to
++++# YES.
+++ # The default value is: system dependent.
+++
+++ CASE_SENSE_NAMES = NO
+++
+++ # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+++-# their full class and namespace scopes in the documentation. If set to YES the
++++# their full class and namespace scopes in the documentation. If set to YES, the
+++ # scope will be hidden.
+++ # The default value is: NO.
+++
+++ HIDE_SCOPE_NAMES = NO
+++
++++# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
++++# append additional text to a page's title, such as Class Reference. If set to
++++# YES the compound reference will be hidden.
++++# The default value is: NO.
++++
++++HIDE_COMPOUND_REFERENCE= NO
++++
++++# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
++++# will show which file needs to be included to use the class.
++++# The default value is: YES.
++++
++++SHOW_HEADERFILE = YES
++++
+++ # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+++ # the files that are included by a file in the documentation of that file.
+++ # The default value is: YES.
+++@@ -536,14 +658,14 @@
+++
+++ # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+++ # (detailed) documentation of file and class members alphabetically by member
+++-# name. If set to NO the members will appear in declaration order.
++++# name. If set to NO, the members will appear in declaration order.
+++ # The default value is: YES.
+++
+++ SORT_MEMBER_DOCS = YES
+++
+++ # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+++ # descriptions of file, namespace and class members alphabetically by member
+++-# name. If set to NO the members will appear in declaration order. Note that
++++# name. If set to NO, the members will appear in declaration order. Note that
+++ # this will also influence the order of the classes in the class list.
+++ # The default value is: NO.
+++
+++@@ -588,27 +710,25 @@
+++
+++ STRICT_PROTO_MATCHING = NO
+++
+++-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+++-# todo list. This list is created by putting \todo commands in the
+++-# documentation.
++++# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
++++# list. This list is created by putting \todo commands in the documentation.
+++ # The default value is: YES.
+++
+++ GENERATE_TODOLIST = YES
+++
+++-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+++-# test list. This list is created by putting \test commands in the
+++-# documentation.
++++# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
++++# list. This list is created by putting \test commands in the documentation.
+++ # The default value is: YES.
+++
+++ GENERATE_TESTLIST = YES
+++
+++-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
++++# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+++ # list. This list is created by putting \bug commands in the documentation.
+++ # The default value is: YES.
+++
+++ GENERATE_BUGLIST = YES
+++
+++-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
++++# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+++ # the deprecated list. This list is created by putting \deprecated commands in
+++ # the documentation.
+++ # The default value is: YES.
+++@@ -633,8 +753,8 @@
+++ MAX_INITIALIZER_LINES = 30
+++
+++ # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+++-# the bottom of the documentation of classes and structs. If set to YES the list
+++-# will mention the files that were used to generate the documentation.
++++# the bottom of the documentation of classes and structs. If set to YES, the
++++# list will mention the files that were used to generate the documentation.
+++ # The default value is: YES.
+++
+++ SHOW_USED_FILES = NO
+++@@ -668,7 +788,8 @@
+++ # output files in an output format independent way. To create the layout file
+++ # that represents doxygen's defaults, run doxygen with the -l option. You can
+++ # optionally specify a file name after the option, if omitted DoxygenLayout.xml
+++-# will be used as the name of the layout file.
++++# will be used as the name of the layout file. See also section "Changing the
++++# layout of pages" for information.
+++ #
+++ # Note that if you run doxygen from a directory containing a file called
+++ # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+++@@ -679,11 +800,10 @@
+++ # The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+++ # the reference definitions. This must be a list of .bib files. The .bib
+++ # extension is automatically appended if omitted. This requires the bibtex tool
+++-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
++++# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
+++ # For LaTeX the style of the bibliography can be controlled using
+++ # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+++-# search path. Do not use file names with spaces, bibtex cannot handle them. See
+++-# also \cite for info how to create references.
++++# search path. See also \cite for info how to create references.
+++
+++ CITE_BIB_FILES =
+++
+++@@ -699,7 +819,7 @@
+++ QUIET = NO
+++
+++ # The WARNINGS tag can be used to turn on/off the warning messages that are
+++-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
++++# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+++ # this implies that the warnings are on.
+++ #
+++ # Tip: Turn warnings on while writing the documentation.
+++@@ -707,7 +827,7 @@
+++
+++ WARNINGS = YES
+++
+++-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
++++# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+++ # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+++ # will automatically be disabled.
+++ # The default value is: YES.
+++@@ -715,34 +835,66 @@
+++ WARN_IF_UNDOCUMENTED = YES
+++
+++ # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+++-# potential errors in the documentation, such as not documenting some parameters
+++-# in a documented function, or documenting parameters that don't exist or using
+++-# markup commands wrongly.
++++# potential errors in the documentation, such as documenting some parameters in
++++# a documented function twice, or documenting parameters that don't exist or
++++# using markup commands wrongly.
+++ # The default value is: YES.
+++
+++ WARN_IF_DOC_ERROR = YES
+++
++++# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete
++++# function parameter documentation. If set to NO, doxygen will accept that some
++++# parameters have no documentation without warning.
++++# The default value is: YES.
++++
++++WARN_IF_INCOMPLETE_DOC = YES
++++
+++ # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+++ # are documented, but have no documentation for their parameters or return
+++-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+++-# documentation, but not about the absence of documentation.
++++# value. If set to NO, doxygen will only warn about wrong parameter
++++# documentation, but not about the absence of documentation. If EXTRACT_ALL is
++++# set to YES then this flag will automatically be disabled. See also
++++# WARN_IF_INCOMPLETE_DOC
+++ # The default value is: NO.
+++
+++ WARN_NO_PARAMDOC = NO
+++
++++# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
++++# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
++++# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
++++# at the end of the doxygen process doxygen will return with a non-zero status.
++++# Possible values are: NO, YES and FAIL_ON_WARNINGS.
++++# The default value is: NO.
++++
++++WARN_AS_ERROR = NO
++++
+++ # The WARN_FORMAT tag determines the format of the warning messages that doxygen
+++ # can produce. The string should contain the $file, $line, and $text tags, which
+++ # will be replaced by the file and line number from which the warning originated
+++ # and the warning text. Optionally the format may contain $version, which will
+++ # be replaced by the version of the file (if it could be obtained via
+++ # FILE_VERSION_FILTER)
++++# See also: WARN_LINE_FORMAT
+++ # The default value is: $file:$line: $text.
+++
+++ WARN_FORMAT = "$file:$line: $text"
+++
++++# In the $text part of the WARN_FORMAT command it is possible that a reference
++++# to a more specific place is given. To make it easier to jump to this place
++++# (outside of doxygen) the user can define a custom "cut" / "paste" string.
++++# Example:
++++# WARN_LINE_FORMAT = "'vi $file +$line'"
++++# See also: WARN_FORMAT
++++# The default value is: at line $line of file $file.
++++
++++WARN_LINE_FORMAT = "at line $line of file $file"
++++
+++ # The WARN_LOGFILE tag can be used to specify a file to which warning and error
+++ # messages should be written. If left blank the output is written to standard
+++-# error (stderr).
++++# error (stderr). In case the file specified cannot be opened for writing the
++++# warning and error messages are written to standard error. When as file - is
++++# specified the warning and error messages are written to standard output
++++# (stdout).
+++
+++ WARN_LOGFILE =
+++
+++@@ -753,7 +905,7 @@
+++ # The INPUT tag is used to specify the files and/or directories that contain
+++ # documented source files. You may enter file names like myfile.cpp or
+++ # directories like /usr/src/myproject. Separate the files or directories with
+++-# spaces.
++++# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+++ # Note: If this tag is empty the current directory is searched.
+++
+++ INPUT = @CMAKE_SOURCE_DIR@
+++@@ -761,20 +913,29 @@
+++ # This tag can be used to specify the character encoding of the source files
+++ # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+++ # libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+++-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+++-# possible encodings.
++++# documentation (see:
++++# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
+++ # The default value is: UTF-8.
+++
+++ INPUT_ENCODING = UTF-8
+++
+++ # If the value of the INPUT tag contains directories, you can use the
+++ # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+++-# *.h) to filter out the source-files in the directories. If left blank the
+++-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+++-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+++-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+++-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+++-# *.qsf, *.as and *.js.
++++# *.h) to filter out the source-files in the directories.
++++#
++++# Note that for custom extensions or not directly supported extensions you also
++++# need to set EXTENSION_MAPPING for the extension otherwise the files are not
++++# read by doxygen.
++++#
++++# Note the list of default checked file patterns might differ from the list of
++++# default file extension mappings.
++++#
++++# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
++++# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
++++# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml,
++++# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C
++++# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
++++# *.vhdl, *.ucf, *.qsf and *.ice.
+++
+++ FILE_PATTERNS = *.c \
+++ *.cc \
+++@@ -832,7 +993,14 @@
+++ # Note that relative paths are relative to the directory from which doxygen is
+++ # run.
+++
+++-EXCLUDE = @CMAKE_BINARY_DIR@ @CMAKE_SOURCE_DIR@/cpu_features @CMAKE_SOURCE_DIR@/README.md @CMAKE_SOURCE_DIR@/cmake @CMAKE_SOURCE_DIR@/docs/AUTHORS_RESUBMITTING_UNDER_LGPL_LICENSE.md @CMAKE_SOURCE_DIR@/apps @CMAKE_SOURCE_DIR@/lib/*qa* @CMAKE_SOURCE_DIR@/tmpl
++++EXCLUDE = @CMAKE_BINARY_DIR@ \
++++ @CMAKE_SOURCE_DIR@/cpu_features \
++++ @CMAKE_SOURCE_DIR@/README.md \
++++ @CMAKE_SOURCE_DIR@/cmake \
++++ @CMAKE_SOURCE_DIR@/docs/AUTHORS_RESUBMITTING_UNDER_LGPL_LICENSE.md \
++++ @CMAKE_SOURCE_DIR@/apps \
++++ @CMAKE_SOURCE_DIR@/lib/*qa* \
++++ @CMAKE_SOURCE_DIR@/tmpl
+++
+++ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+++ # directories that are symbolic links (a Unix file system feature) are excluded
+++@@ -854,7 +1022,7 @@
+++ # (namespaces, classes, functions, etc.) that should be excluded from the
+++ # output. The symbol name can be a fully qualified name, a word, or if the
+++ # wildcard * is used, a substring. Examples: ANamespace, AClass,
+++-# AClass::ANamespace, ANamespace::*Test
++++# ANamespace::AClass, ANamespace::*Test
+++ #
+++ # Note that the wildcards are matched against the file with absolute path, so to
+++ # exclude all test directories use the pattern */test/*
+++@@ -901,6 +1069,10 @@
+++ # Note that the filter must not add or remove lines; it is applied before the
+++ # code is scanned, but not when the output code is generated. If lines are added
+++ # or removed, the anchors will not be placed correctly.
++++#
++++# Note that for custom extensions or not directly supported extensions you also
++++# need to set EXTENSION_MAPPING for the extension otherwise the files are not
++++# properly processed by doxygen.
+++
+++ INPUT_FILTER =
+++
+++@@ -910,11 +1082,15 @@
+++ # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+++ # filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+++ # patterns match the file name, INPUT_FILTER is applied.
++++#
++++# Note that for custom extensions or not directly supported extensions you also
++++# need to set EXTENSION_MAPPING for the extension otherwise the files are not
++++# properly processed by doxygen.
+++
+++ FILTER_PATTERNS =
+++
+++ # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+++-# INPUT_FILTER ) will also be used to filter the input files that are used for
++++# INPUT_FILTER) will also be used to filter the input files that are used for
+++ # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+++ # The default value is: NO.
+++
+++@@ -962,7 +1138,7 @@
+++ STRIP_CODE_COMMENTS = YES
+++
+++ # If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+++-# function all documented functions referencing it will be listed.
++++# entity all documented functions referencing it will be listed.
+++ # The default value is: NO.
+++
+++ REFERENCED_BY_RELATION = NO
+++@@ -974,7 +1150,7 @@
+++ REFERENCES_RELATION = NO
+++
+++ # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+++-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
++++# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+++ # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+++ # link to the documentation.
+++ # The default value is: YES.
+++@@ -994,12 +1170,12 @@
+++ # If the USE_HTAGS tag is set to YES then the references to source code will
+++ # point to the HTML generated by the htags(1) tool instead of doxygen built-in
+++ # source browser. The htags tool is part of GNU's global source tagging system
+++-# (see http://www.gnu.org/software/global/global.html). You will need version
++++# (see https://www.gnu.org/software/global/global.html). You will need version
+++ # 4.8.6 or higher.
+++ #
+++ # To use it do the following:
+++ # - Install the latest version of global
+++-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
++++# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
+++ # - Make sure the INPUT points to the root of the source tree
+++ # - Run doxygen as normal
+++ #
+++@@ -1021,6 +1197,46 @@
+++
+++ VERBATIM_HEADERS = YES
+++
++++# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
++++# clang parser (see:
++++# http://clang.llvm.org/) for more accurate parsing at the cost of reduced
++++# performance. This can be particularly helpful with template rich C++ code for
++++# which doxygen's built-in parser lacks the necessary type information.
++++# Note: The availability of this option depends on whether or not doxygen was
++++# generated with the -Duse_libclang=ON option for CMake.
++++# The default value is: NO.
++++
++++CLANG_ASSISTED_PARSING = NO
++++
++++# If the CLANG_ASSISTED_PARSING tag is set to YES and the CLANG_ADD_INC_PATHS
++++# tag is set to YES then doxygen will add the directory of each input to the
++++# include path.
++++# The default value is: YES.
++++# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
++++
++++CLANG_ADD_INC_PATHS = YES
++++
++++# If clang assisted parsing is enabled you can provide the compiler with command
++++# line options that you would normally use when invoking the compiler. Note that
++++# the include paths will already be set by doxygen for the files and directories
++++# specified with INPUT and INCLUDE_PATH.
++++# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
++++
++++CLANG_OPTIONS =
++++
++++# If clang assisted parsing is enabled you can provide the clang parser with the
++++# path to the directory containing a file called compile_commands.json. This
++++# file is the compilation database (see:
++++# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the
++++# options used when the source files were built. This is equivalent to
++++# specifying the -p option to a clang tool, such as clang-check. These options
++++# will then be passed to the parser. Any options specified with CLANG_OPTIONS
++++# will be added as well.
++++# Note: The availability of this option depends on whether or not doxygen was
++++# generated with the -Duse_libclang=ON option for CMake.
++++
++++CLANG_DATABASE_PATH =
++++
+++ #---------------------------------------------------------------------------
+++ # Configuration options related to the alphabetical class index
+++ #---------------------------------------------------------------------------
+++@@ -1032,13 +1248,6 @@
+++
+++ ALPHABETICAL_INDEX = YES
+++
+++-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+++-# which the alphabetical index list will be split.
+++-# Minimum value: 1, maximum value: 20, default value: 5.
+++-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+++-
+++-COLS_IN_ALPHA_INDEX = 5
+++-
+++ # In case all classes in a project start with a common prefix, all classes will
+++ # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+++ # can be used to specify a prefix (or a list of prefixes) that should be ignored
+++@@ -1051,7 +1260,7 @@
+++ # Configuration options related to the HTML output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
++++# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+++ # The default value is: YES.
+++
+++ GENERATE_HTML = YES
+++@@ -1099,7 +1308,7 @@
+++ # that doxygen normally uses.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++
+++-HTML_FOOTER = ""
++++HTML_FOOTER =
+++
+++ # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+++ # sheet that is used by each HTML page. It can be used to fine-tune the look of
+++@@ -1113,13 +1322,15 @@
+++
+++ HTML_STYLESHEET =
+++
+++-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+++-# defined cascading style sheet that is included after the standard style sheets
++++# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
++++# cascading style sheets that are included after the standard style sheets
+++ # created by doxygen. Using this option one can overrule certain style aspects.
+++ # This is preferred over using HTML_STYLESHEET since it does not replace the
+++ # standard style sheet and is therefore more robust against future updates.
+++-# Doxygen will copy the style sheet file to the output directory. For an example
+++-# see the documentation.
++++# Doxygen will copy the style sheet files to the output directory.
++++# Note: The order of the extra style sheet files is of importance (e.g. the last
++++# style sheet in the list overrules the setting of the previous ones in the
++++# list). For an example see the documentation.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++
+++ HTML_EXTRA_STYLESHEET =
+++@@ -1135,9 +1346,9 @@
+++ HTML_EXTRA_FILES =
+++
+++ # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+++-# will adjust the colors in the stylesheet and background images according to
+++-# this color. Hue is specified as an angle on a colorwheel, see
+++-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
++++# will adjust the colors in the style sheet and background images according to
++++# this color. Hue is specified as an angle on a color-wheel, see
++++# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
+++ # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+++ # purple, and 360 is red again.
+++ # Minimum value: 0, maximum value: 359, default value: 220.
+++@@ -1146,7 +1357,7 @@
+++ HTML_COLORSTYLE_HUE = 220
+++
+++ # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+++-# in the HTML output. For a value of 0 the output will use grayscales only. A
++++# in the HTML output. For a value of 0 the output will use gray-scales only. A
+++ # value of 255 will produce the most vivid colors.
+++ # Minimum value: 0, maximum value: 255, default value: 100.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++@@ -1166,12 +1377,24 @@
+++
+++ # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+++ # page will contain the date and time when the page was generated. Setting this
+++-# to NO can help when comparing the output of multiple runs.
+++-# The default value is: YES.
++++# to YES can help to show when doxygen was last run and thus if the
++++# documentation is up to date.
++++# The default value is: NO.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++
+++ HTML_TIMESTAMP = NO
+++
++++# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
++++# documentation will contain a main index with vertical navigation menus that
++++# are dynamically created via JavaScript. If disabled, the navigation index will
++++# consists of multiple levels of tabs that are statically embedded in every HTML
++++# page. Disable this option to support browsers that do not have JavaScript,
++++# like the Qt help browser.
++++# The default value is: YES.
++++# This tag requires that the tag GENERATE_HTML is set to YES.
++++
++++HTML_DYNAMIC_MENUS = YES
++++
+++ # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+++ # documentation will contain sections that can be hidden and shown after the
+++ # page has loaded.
+++@@ -1195,13 +1418,14 @@
+++
+++ # If the GENERATE_DOCSET tag is set to YES, additional index files will be
+++ # generated that can be used as input for Apple's Xcode 3 integrated development
+++-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+++-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+++-# Makefile in the HTML output directory. Running make will produce the docset in
+++-# that directory and running make install will install the docset in
++++# environment (see:
++++# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
++++# create a documentation set, doxygen will generate a Makefile in the HTML
++++# output directory. Running make will produce the docset in that directory and
++++# running make install will install the docset in
+++ # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+++-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+++-# for more information.
++++# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
++++# genXcode/_index.html for more information.
+++ # The default value is: NO.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++
+++@@ -1215,6 +1439,13 @@
+++
+++ DOCSET_FEEDNAME = "Doxygen generated docs"
+++
++++# This tag determines the URL of the docset feed. A documentation feed provides
++++# an umbrella under which multiple documentation sets from a single provider
++++# (such as a company or product suite) can be grouped.
++++# This tag requires that the tag GENERATE_DOCSET is set to YES.
++++
++++DOCSET_FEEDURL =
++++
+++ # This tag specifies a string that should uniquely identify the documentation
+++ # set bundle. This should be a reverse domain-name style string, e.g.
+++ # com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+++@@ -1240,8 +1471,12 @@
+++ # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+++ # additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+++ # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+++-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+++-# Windows.
++++# on Windows. In the beginning of 2021 Microsoft took the original page, with
++++# a.o. the download links, offline the HTML help workshop was already many years
++++# in maintenance mode). You can download the HTML help workshop from the web
++++# archives at Installation executable (see:
++++# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo
++++# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
+++ #
+++ # The HTML Help Workshop contains a compiler that can convert all HTML output
+++ # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+++@@ -1263,28 +1498,29 @@
+++ CHM_FILE =
+++
+++ # The HHC_LOCATION tag can be used to specify the location (absolute path
+++-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
++++# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+++ # doxygen will try to run the HTML help compiler on the generated index.hhp.
+++ # The file has to be specified with full path.
+++ # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+++
+++ HHC_LOCATION =
+++
+++-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+++-# YES) or that it should be included in the master .chm file ( NO).
++++# The GENERATE_CHI flag controls if a separate .chi index file is generated
++++# (YES) or that it should be included in the main .chm file (NO).
+++ # The default value is: NO.
+++ # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+++
+++ GENERATE_CHI = NO
+++
+++-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
++++# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+++ # and project file content.
+++ # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+++
+++ CHM_INDEX_ENCODING =
+++
+++-# The BINARY_TOC flag controls whether a binary table of contents is generated (
+++-# YES) or a normal table of contents ( NO) in the .chm file.
++++# The BINARY_TOC flag controls whether a binary table of contents is generated
++++# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
++++# enables the Previous and Next buttons.
+++ # The default value is: NO.
+++ # This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+++
+++@@ -1315,7 +1551,8 @@
+++
+++ # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+++ # Project output. For more information please see Qt Help Project / Namespace
+++-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
++++# (see:
++++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+++ # The default value is: org.doxygen.Project.
+++ # This tag requires that the tag GENERATE_QHP is set to YES.
+++
+++@@ -1323,8 +1560,8 @@
+++
+++ # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+++ # Help Project output. For more information please see Qt Help Project / Virtual
+++-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+++-# folders).
++++# Folders (see:
++++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
+++ # The default value is: doc.
+++ # This tag requires that the tag GENERATE_QHP is set to YES.
+++
+++@@ -1332,30 +1569,30 @@
+++
+++ # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+++ # filter to add. For more information please see Qt Help Project / Custom
+++-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+++-# filters).
++++# Filters (see:
++++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+++ # This tag requires that the tag GENERATE_QHP is set to YES.
+++
+++ QHP_CUST_FILTER_NAME =
+++
+++ # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+++ # custom filter to add. For more information please see Qt Help Project / Custom
+++-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+++-# filters).
++++# Filters (see:
++++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+++ # This tag requires that the tag GENERATE_QHP is set to YES.
+++
+++ QHP_CUST_FILTER_ATTRS =
+++
+++ # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+++ # project's filter section matches. Qt Help Project / Filter Attributes (see:
+++-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
++++# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
+++ # This tag requires that the tag GENERATE_QHP is set to YES.
+++
+++ QHP_SECT_FILTER_ATTRS =
+++
+++-# The QHG_LOCATION tag can be used to specify the location of Qt's
+++-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+++-# generated .qhp file.
++++# The QHG_LOCATION tag can be used to specify the location (absolute path
++++# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
++++# run qhelpgenerator on the generated .qhp file.
+++ # This tag requires that the tag GENERATE_QHP is set to YES.
+++
+++ QHG_LOCATION =
+++@@ -1397,17 +1634,29 @@
+++ # index structure (just like the one that is generated for HTML Help). For this
+++ # to work a browser that supports JavaScript, DHTML, CSS and frames is required
+++ # (i.e. any modern browser). Windows users are probably better off using the
+++-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+++-# further fine-tune the look of the index. As an example, the default style
+++-# sheet generated by doxygen has an example that shows how to put an image at
+++-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+++-# the same information as the tab index, you could consider setting
+++-# DISABLE_INDEX to YES when enabling this option.
++++# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
++++# further fine tune the look of the index (see "Fine-tuning the output"). As an
++++# example, the default style sheet generated by doxygen has an example that
++++# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
++++# Since the tree basically has the same information as the tab index, you could
++++# consider setting DISABLE_INDEX to YES when enabling this option.
+++ # The default value is: NO.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++
+++ GENERATE_TREEVIEW = YES
+++
++++# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
++++# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
++++# area (value NO) or if it should extend to the full height of the window (value
++++# YES). Setting this to YES gives a layout similar to
++++# https://docs.readthedocs.io with more room for contents, but less room for the
++++# project logo, title, and description. If either GENERATE_TREEVIEW or
++++# DISABLE_INDEX is set to NO, this option has no effect.
++++# The default value is: NO.
++++# This tag requires that the tag GENERATE_HTML is set to YES.
++++
++++FULL_SIDEBAR = NO
++++
+++ # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+++ # doxygen will group on one line in the generated HTML documentation.
+++ #
+++@@ -1425,13 +1674,31 @@
+++
+++ TREEVIEW_WIDTH = 250
+++
+++-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
++++# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+++ # external symbols imported via tag files in a separate window.
+++ # The default value is: NO.
+++ # This tag requires that the tag GENERATE_HTML is set to YES.
+++
+++ EXT_LINKS_IN_WINDOW = NO
+++
++++# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email
++++# addresses.
++++# The default value is: YES.
++++# This tag requires that the tag GENERATE_HTML is set to YES.
++++
++++OBFUSCATE_EMAILS = YES
++++
++++# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
++++# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
++++# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
++++# the HTML output. These images will generally look nicer at scaled resolutions.
++++# Possible values are: png (the default) and svg (looks nicer but requires the
++++# pdf2svg or inkscape tool).
++++# The default value is: png.
++++# This tag requires that the tag GENERATE_HTML is set to YES.
++++
++++HTML_FORMULA_FORMAT = png
++++
+++ # Use this tag to change the font size of LaTeX formulas included as images in
+++ # the HTML documentation. When you change the font size after a successful
+++ # doxygen run you need to manually remove any form_*.png images from the HTML
+++@@ -1441,7 +1708,7 @@
+++
+++ FORMULA_FONTSIZE = 10
+++
+++-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
++++# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
+++ # generated for formulas are transparent PNGs. Transparent PNGs are not
+++ # supported properly for IE 6.0, but are supported on all modern browsers.
+++ #
+++@@ -1452,9 +1719,15 @@
+++
+++ FORMULA_TRANSPARENT = YES
+++
++++# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
++++# to create new LaTeX commands to be used in formulas as building blocks. See
++++# the section "Including formulas" for details.
++++
++++FORMULA_MACROFILE =
++++
+++ # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+++-# http://www.mathjax.org) which uses client side Javascript for the rendering
+++-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
++++# https://www.mathjax.org) which uses client side JavaScript for the rendering
++++# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+++ # installed or if you want to formulas look prettier in the HTML output. When
+++ # enabled you may also need to install MathJax separately and configure the path
+++ # to it using the MATHJAX_RELPATH option.
+++@@ -1463,11 +1736,29 @@
+++
+++ USE_MATHJAX = NO
+++
++++# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
++++# Note that the different versions of MathJax have different requirements with
++++# regards to the different settings, so it is possible that also other MathJax
++++# settings have to be changed when switching between the different MathJax
++++# versions.
++++# Possible values are: MathJax_2 and MathJax_3.
++++# The default value is: MathJax_2.
++++# This tag requires that the tag USE_MATHJAX is set to YES.
++++
++++MATHJAX_VERSION = MathJax_2
++++
+++ # When MathJax is enabled you can set the default output format to be used for
+++-# the MathJax output. See the MathJax site (see:
+++-# http://docs.mathjax.org/en/latest/output.html) for more details.
++++# the MathJax output. For more details about the output format see MathJax
++++# version 2 (see:
++++# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3
++++# (see:
++++# http://docs.mathjax.org/en/latest/web/components/output.html).
+++ # Possible values are: HTML-CSS (which is slower, but has the best
+++-# compatibility), NativeMML (i.e. MathML) and SVG.
++++# compatibility. This is the name for Mathjax version 2, for MathJax version 3
++++# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
++++# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
++++# is the name for Mathjax version 3, for MathJax version 2 this will be
++++# translated into HTML-CSS) and SVG.
+++ # The default value is: HTML-CSS.
+++ # This tag requires that the tag USE_MATHJAX is set to YES.
+++
+++@@ -1480,22 +1771,29 @@
+++ # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+++ # Content Delivery Network so you can quickly see the result without installing
+++ # MathJax. However, it is strongly recommended to install a local copy of
+++-# MathJax from http://www.mathjax.org before deployment.
+++-# The default value is: http://cdn.mathjax.org/mathjax/latest.
++++# MathJax from https://www.mathjax.org before deployment. The default value is:
++++# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2
++++# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3
+++ # This tag requires that the tag USE_MATHJAX is set to YES.
+++
+++ MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+++
+++ # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+++ # extension names that should be enabled during MathJax rendering. For example
++++# for MathJax version 2 (see
++++# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions):
+++ # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
++++# For example for MathJax version 3 (see
++++# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html):
++++# MATHJAX_EXTENSIONS = ams
+++ # This tag requires that the tag USE_MATHJAX is set to YES.
+++
+++ MATHJAX_EXTENSIONS =
+++
+++ # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+++ # of code that will be used on startup of the MathJax code. See the MathJax site
+++-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
++++# (see:
++++# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
+++ # example see the documentation.
+++ # This tag requires that the tag USE_MATHJAX is set to YES.
+++
+++@@ -1523,12 +1821,12 @@
+++ SEARCHENGINE = YES
+++
+++ # When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+++-# implemented using a web server instead of a web client using Javascript. There
+++-# are two flavours of web server based searching depending on the
+++-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+++-# searching and an index file used by the script. When EXTERNAL_SEARCH is
+++-# enabled the indexing and searching needs to be provided by external tools. See
+++-# the section "External Indexing and Searching" for details.
++++# implemented using a web server instead of a web client using JavaScript. There
++++# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
++++# setting. When disabled, doxygen will generate a PHP script for searching and
++++# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
++++# and searching needs to be provided by external tools. See the section
++++# "External Indexing and Searching" for details.
+++ # The default value is: NO.
+++ # This tag requires that the tag SEARCHENGINE is set to YES.
+++
+++@@ -1540,9 +1838,10 @@
+++ # external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+++ # search results.
+++ #
+++-# Doxygen ships with an example indexer ( doxyindexer) and search engine
++++# Doxygen ships with an example indexer (doxyindexer) and search engine
+++ # (doxysearch.cgi) which are based on the open source search engine library
+++-# Xapian (see: http://xapian.org/).
++++# Xapian (see:
++++# https://xapian.org/).
+++ #
+++ # See the section "External Indexing and Searching" for details.
+++ # The default value is: NO.
+++@@ -1553,10 +1852,11 @@
+++ # The SEARCHENGINE_URL should point to a search engine hosted by a web server
+++ # which will return the search results when EXTERNAL_SEARCH is enabled.
+++ #
+++-# Doxygen ships with an example indexer ( doxyindexer) and search engine
++++# Doxygen ships with an example indexer (doxyindexer) and search engine
+++ # (doxysearch.cgi) which are based on the open source search engine library
+++-# Xapian (see: http://xapian.org/). See the section "External Indexing and
+++-# Searching" for details.
++++# Xapian (see:
++++# https://xapian.org/). See the section "External Indexing and Searching" for
++++# details.
+++ # This tag requires that the tag SEARCHENGINE is set to YES.
+++
+++ SEARCHENGINE_URL =
+++@@ -1591,7 +1891,7 @@
+++ # Configuration options related to the LaTeX output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
++++# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+++ # The default value is: YES.
+++
+++ GENERATE_LATEX = NO
+++@@ -1607,22 +1907,36 @@
+++ # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+++ # invoked.
+++ #
+++-# Note that when enabling USE_PDFLATEX this option is only used for generating
+++-# bitmaps for formulas in the HTML output, but not in the Makefile that is
+++-# written to the output directory.
+++-# The default file is: latex.
++++# Note that when not enabling USE_PDFLATEX the default is latex when enabling
++++# USE_PDFLATEX the default is pdflatex and when in the later case latex is
++++# chosen this is overwritten by pdflatex. For specific output languages the
++++# default can have been set differently, this depends on the implementation of
++++# the output language.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++ LATEX_CMD_NAME = latex
+++
+++ # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+++ # index for LaTeX.
++++# Note: This tag is used in the Makefile / make.bat.
++++# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
++++# (.tex).
+++ # The default file is: makeindex.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++ MAKEINDEX_CMD_NAME = makeindex
+++
+++-# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
++++# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
++++# generate index for LaTeX. In case there is no backslash (\) as first character
++++# it will be automatically added in the LaTeX code.
++++# Note: This tag is used in the generated output file (.tex).
++++# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
++++# The default value is: makeindex.
++++# This tag requires that the tag GENERATE_LATEX is set to YES.
++++
++++LATEX_MAKEINDEX_CMD = makeindex
++++
++++# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+++ # documents. This may be useful for small projects and may help to save some
+++ # trees in general.
+++ # The default value is: NO.
+++@@ -1640,39 +1954,57 @@
+++ PAPER_TYPE = a4
+++
+++ # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+++-# that should be included in the LaTeX output. To get the times font for
+++-# instance you can specify
+++-# EXTRA_PACKAGES=times
++++# that should be included in the LaTeX output. The package can be specified just
++++# by its name or with the correct syntax as to be used with the LaTeX
++++# \usepackage command. To get the times font for instance you can specify :
++++# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
++++# To use the option intlimits with the amsmath package you can specify:
++++# EXTRA_PACKAGES=[intlimits]{amsmath}
+++ # If left blank no extra packages will be included.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++ EXTRA_PACKAGES =
+++
+++-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+++-# generated LaTeX document. The header should contain everything until the first
+++-# chapter. If it is left blank doxygen will generate a standard header. See
+++-# section "Doxygen usage" for information on how to let doxygen write the
+++-# default header to a separate file.
+++-#
+++-# Note: Only use a user-defined header if you know what you are doing! The
+++-# following commands have a special meaning inside the header: $title,
+++-# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+++-# replace them by respectively the title of the page, the current date and time,
+++-# only the current date, the version number of doxygen, the project name (see
+++-# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
++++# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for
++++# the generated LaTeX document. The header should contain everything until the
++++# first chapter. If it is left blank doxygen will generate a standard header. It
++++# is highly recommended to start with a default header using
++++# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty
++++# and then modify the file new_header.tex. See also section "Doxygen usage" for
++++# information on how to generate the default header that doxygen normally uses.
++++#
++++# Note: Only use a user-defined header if you know what you are doing!
++++# Note: The header is subject to change so you typically have to regenerate the
++++# default header when upgrading to a newer version of doxygen. The following
++++# commands have a special meaning inside the header (and footer): For a
++++# description of the possible markers and block names see the documentation.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++ LATEX_HEADER =
+++
+++-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+++-# generated LaTeX document. The footer should contain everything after the last
+++-# chapter. If it is left blank doxygen will generate a standard footer.
+++-#
+++-# Note: Only use a user-defined footer if you know what you are doing!
++++# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for
++++# the generated LaTeX document. The footer should contain everything after the
++++# last chapter. If it is left blank doxygen will generate a standard footer. See
++++# LATEX_HEADER for more information on how to generate a default footer and what
++++# special commands can be used inside the footer. See also section "Doxygen
++++# usage" for information on how to generate the default footer that doxygen
++++# normally uses. Note: Only use a user-defined footer if you know what you are
++++# doing!
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++ LATEX_FOOTER =
+++
++++# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
++++# LaTeX style sheets that are included after the standard style sheets created
++++# by doxygen. Using this option one can overrule certain style aspects. Doxygen
++++# will copy the style sheet files to the output directory.
++++# Note: The order of the extra style sheet files is of importance (e.g. the last
++++# style sheet in the list overrules the setting of the previous ones in the
++++# list).
++++# This tag requires that the tag GENERATE_LATEX is set to YES.
++++
++++LATEX_EXTRA_STYLESHEET =
++++
+++ # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+++ # other source files which should be copied to the LATEX_OUTPUT output
+++ # directory. Note that the files will be copied as-is; there are no commands or
+++@@ -1690,9 +2022,11 @@
+++
+++ PDF_HYPERLINKS = YES
+++
+++-# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+++-# the PDF file directly from the LaTeX files. Set this option to YES to get a
+++-# higher quality PDF documentation.
++++# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as
++++# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX
++++# files. Set this option to YES, to get a higher quality PDF documentation.
++++#
++++# See also section LATEX_CMD_NAME for selecting the engine.
+++ # The default value is: YES.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++@@ -1700,8 +2034,7 @@
+++
+++ # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+++ # command to the generated LaTeX files. This will instruct LaTeX to keep running
+++-# if errors occur, instead of asking the user for help. This option is also used
+++-# when generating formulas in HTML.
++++# if errors occur, instead of asking the user for help.
+++ # The default value is: NO.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++@@ -1714,29 +2047,35 @@
+++
+++ LATEX_HIDE_INDICES = NO
+++
+++-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+++-# code with syntax highlighting in the LaTeX output.
+++-#
+++-# Note that which sources are shown also depends on other settings such as
+++-# SOURCE_BROWSER.
+++-# The default value is: NO.
+++-# This tag requires that the tag GENERATE_LATEX is set to YES.
+++-
+++-LATEX_SOURCE_CODE = NO
+++-
+++ # The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+++ # bibliography, e.g. plainnat, or ieeetr. See
+++-# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
++++# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+++ # The default value is: plain.
+++ # This tag requires that the tag GENERATE_LATEX is set to YES.
+++
+++ LATEX_BIB_STYLE = plain
+++
++++# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
++++# page will contain the date and time when the page was generated. Setting this
++++# to NO can help when comparing the output of multiple runs.
++++# The default value is: NO.
++++# This tag requires that the tag GENERATE_LATEX is set to YES.
++++
++++LATEX_TIMESTAMP = NO
++++
++++# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
++++# path from which the emoji images will be read. If a relative path is entered,
++++# it will be relative to the LATEX_OUTPUT directory. If left blank the
++++# LATEX_OUTPUT directory will be used.
++++# This tag requires that the tag GENERATE_LATEX is set to YES.
++++
++++LATEX_EMOJI_DIRECTORY =
++++
+++ #---------------------------------------------------------------------------
+++ # Configuration options related to the RTF output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
++++# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+++ # RTF output is optimized for Word 97 and may not look too pretty with other RTF
+++ # readers/editors.
+++ # The default value is: NO.
+++@@ -1751,7 +2090,7 @@
+++
+++ RTF_OUTPUT = rtf
+++
+++-# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
++++# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+++ # documents. This may be useful for small projects and may help to save some
+++ # trees in general.
+++ # The default value is: NO.
+++@@ -1771,9 +2110,9 @@
+++
+++ RTF_HYPERLINKS = NO
+++
+++-# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+++-# file, i.e. a series of assignments. You only have to provide replacements,
+++-# missing definitions are set to their default value.
++++# Load stylesheet definitions from file. Syntax is similar to doxygen's
++++# configuration file, i.e. a series of assignments. You only have to provide
++++# replacements, missing definitions are set to their default value.
+++ #
+++ # See also section "Doxygen usage" for information on how to generate the
+++ # default style sheet that doxygen normally uses.
+++@@ -1782,8 +2121,8 @@
+++ RTF_STYLESHEET_FILE =
+++
+++ # Set optional variables used in the generation of an RTF document. Syntax is
+++-# similar to doxygen's config file. A template extensions file can be generated
+++-# using doxygen -e rtf extensionFile.
++++# similar to doxygen's configuration file. A template extensions file can be
++++# generated using doxygen -e rtf extensionFile.
+++ # This tag requires that the tag GENERATE_RTF is set to YES.
+++
+++ RTF_EXTENSIONS_FILE =
+++@@ -1792,7 +2131,7 @@
+++ # Configuration options related to the man page output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
++++# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+++ # classes and files.
+++ # The default value is: NO.
+++
+++@@ -1816,6 +2155,13 @@
+++
+++ MAN_EXTENSION = .3
+++
++++# The MAN_SUBDIR tag determines the name of the directory created within
++++# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
++++# MAN_EXTENSION with the initial . removed.
++++# This tag requires that the tag GENERATE_MAN is set to YES.
++++
++++MAN_SUBDIR =
++++
+++ # If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+++ # will generate one additional man file for each entity documented in the real
+++ # man page(s). These additional files only source the real man page, but without
+++@@ -1829,7 +2175,7 @@
+++ # Configuration options related to the XML output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
++++# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+++ # captures the structure of the code including all documentation.
+++ # The default value is: NO.
+++
+++@@ -1843,19 +2189,7 @@
+++
+++ XML_OUTPUT = xml
+++
+++-# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+++-# validating XML parser to check the syntax of the XML files.
+++-# This tag requires that the tag GENERATE_XML is set to YES.
+++-
+++-XML_SCHEMA =
+++-
+++-# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+++-# validating XML parser to check the syntax of the XML files.
+++-# This tag requires that the tag GENERATE_XML is set to YES.
+++-
+++-XML_DTD =
+++-
+++-# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
++++# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+++ # listings (including syntax highlighting and cross-referencing information) to
+++ # the XML output. Note that enabling this will significantly increase the size
+++ # of the XML output.
+++@@ -1864,11 +2198,18 @@
+++
+++ XML_PROGRAMLISTING = YES
+++
++++# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
++++# namespace members in file scope as well, matching the HTML output.
++++# The default value is: NO.
++++# This tag requires that the tag GENERATE_XML is set to YES.
++++
++++XML_NS_MEMB_FILE_SCOPE = NO
++++
+++ #---------------------------------------------------------------------------
+++ # Configuration options related to the DOCBOOK output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
++++# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+++ # that can be used to generate PDF.
+++ # The default value is: NO.
+++
+++@@ -1886,10 +2227,10 @@
+++ # Configuration options for the AutoGen Definitions output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+++-# Definitions (see http://autogen.sf.net) file that captures the structure of
+++-# the code including all documentation. Note that this feature is still
+++-# experimental and incomplete at the moment.
++++# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
++++# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
++++# the structure of the code including all documentation. Note that this feature
++++# is still experimental and incomplete at the moment.
+++ # The default value is: NO.
+++
+++ GENERATE_AUTOGEN_DEF = NO
+++@@ -1898,7 +2239,7 @@
+++ # Configuration options related to the Perl module output
+++ #---------------------------------------------------------------------------
+++
+++-# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
++++# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+++ # file that captures the structure of the code including all documentation.
+++ #
+++ # Note that this feature is still experimental and incomplete at the moment.
+++@@ -1906,7 +2247,7 @@
+++
+++ GENERATE_PERLMOD = NO
+++
+++-# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
++++# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+++ # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+++ # output from the Perl module output.
+++ # The default value is: NO.
+++@@ -1914,9 +2255,9 @@
+++
+++ PERLMOD_LATEX = NO
+++
+++-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
++++# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+++ # formatted so it can be parsed by a human reader. This is useful if you want to
+++-# understand what is going on. On the other hand, if this tag is set to NO the
++++# understand what is going on. On the other hand, if this tag is set to NO, the
+++ # size of the Perl module output will be much smaller and Perl will parse it
+++ # just the same.
+++ # The default value is: YES.
+++@@ -1936,14 +2277,14 @@
+++ # Configuration options related to the preprocessor
+++ #---------------------------------------------------------------------------
+++
+++-# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
++++# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+++ # C-preprocessor directives found in the sources and include files.
+++ # The default value is: YES.
+++
+++ ENABLE_PREPROCESSING = YES
+++
+++-# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+++-# in the source code. If set to NO only conditional compilation will be
++++# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
++++# in the source code. If set to NO, only conditional compilation will be
+++ # performed. Macro expansion can be done in a controlled way by setting
+++ # EXPAND_ONLY_PREDEF to YES.
+++ # The default value is: NO.
+++@@ -1959,7 +2300,7 @@
+++
+++ EXPAND_ONLY_PREDEF = NO
+++
+++-# If the SEARCH_INCLUDES tag is set to YES the includes files in the
++++# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+++ # INCLUDE_PATH will be searched if a #include is found.
+++ # The default value is: YES.
+++ # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+++@@ -1968,7 +2309,8 @@
+++
+++ # The INCLUDE_PATH tag can be used to specify one or more directories that
+++ # contain include files that are not input files but should be processed by the
+++-# preprocessor.
++++# preprocessor. Note that the INCLUDE_PATH is not recursive, so the setting of
++++# RECURSIVE has no effect here.
+++ # This tag requires that the tag SEARCH_INCLUDES is set to YES.
+++
+++ INCLUDE_PATH =
+++@@ -2008,9 +2350,9 @@
+++ EXPAND_AS_DEFINED =
+++
+++ # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+++-# remove all references to function-like macros that are alone on a line, have an
+++-# all uppercase name, and do not end with a semicolon. Such function macros are
+++-# typically used for boiler-plate code, and will confuse the parser if not
++++# remove all references to function-like macros that are alone on a line, have
++++# an all uppercase name, and do not end with a semicolon. Such function macros
++++# are typically used for boiler-plate code, and will confuse the parser if not
+++ # removed.
+++ # The default value is: YES.
+++ # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+++@@ -2030,7 +2372,7 @@
+++ # where loc1 and loc2 can be relative or absolute paths or URLs. See the
+++ # section "Linking to external documentation" for more information about the use
+++ # of tag files.
+++-# Note: Each tag file must have an unique name (where the name does NOT include
++++# Note: Each tag file must have a unique name (where the name does NOT include
+++ # the path). If a tag file is not located in the directory in which doxygen is
+++ # run, you must also specify the path to the tagfile here.
+++
+++@@ -2042,54 +2384,31 @@
+++
+++ GENERATE_TAGFILE =
+++
+++-# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+++-# class index. If set to NO only the inherited external classes will be listed.
++++# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
++++# the class index. If set to NO, only the inherited external classes will be
++++# listed.
+++ # The default value is: NO.
+++
+++ ALLEXTERNALS = NO
+++
+++-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+++-# the modules index. If set to NO, only the current project's groups will be
++++# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
++++# in the modules index. If set to NO, only the current project's groups will be
+++ # listed.
+++ # The default value is: YES.
+++
+++ EXTERNAL_GROUPS = YES
+++
+++-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
++++# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+++ # the related pages index. If set to NO, only the current project's pages will
+++ # be listed.
+++ # The default value is: YES.
+++
+++ EXTERNAL_PAGES = YES
+++
+++-# The PERL_PATH should be the absolute path and name of the perl script
+++-# interpreter (i.e. the result of 'which perl').
+++-# The default file (with absolute path) is: /usr/bin/perl.
+++-
+++-PERL_PATH = /usr/bin/perl
+++-
+++ #---------------------------------------------------------------------------
+++ # Configuration options related to the dot tool
+++ #---------------------------------------------------------------------------
+++
+++-# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+++-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+++-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+++-# disabled, but it is recommended to install and use dot, since it yields more
+++-# powerful graphs.
+++-# The default value is: YES.
+++-
+++-CLASS_DIAGRAMS = NO
+++-
+++-# You can define message sequence charts within doxygen comments using the \msc
+++-# command. Doxygen will then run the mscgen tool (see:
+++-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+++-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+++-# the mscgen tool resides. If left empty the tool is assumed to be found in the
+++-# default search path.
+++-
+++-MSCGEN_PATH =
+++-
+++ # You can include diagrams made with dia in doxygen documentation. Doxygen will
+++ # then run dia to produce the diagram and insert it in the documentation. The
+++ # DIA_PATH tag allows you to specify the directory where the dia binary resides.
+++@@ -2097,7 +2416,7 @@
+++
+++ DIA_PATH =
+++
+++-# If set to YES, the inheritance and collaboration graphs will hide inheritance
++++# If set to YES the inheritance and collaboration graphs will hide inheritance
+++ # and usage relations if the target is undocumented or is not a class.
+++ # The default value is: YES.
+++
+++@@ -2108,7 +2427,7 @@
+++ # http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+++ # Bell Labs. The other options in this section have no effect if this option is
+++ # set to NO
+++-# The default value is: NO.
++++# The default value is: YES.
+++
+++ HAVE_DOT = NO
+++
+++@@ -2122,7 +2441,7 @@
+++
+++ DOT_NUM_THREADS = 0
+++
+++-# When you want a differently looking font n the dot files that doxygen
++++# When you want a differently looking font in the dot files that doxygen
+++ # generates you can specify the font name using DOT_FONTNAME. You need to make
+++ # sure dot is able to find the font, which can be done by putting it in a
+++ # standard location or by setting the DOTFONTPATH environment variable or by
+++@@ -2146,13 +2465,16 @@
+++
+++ DOT_FONTPATH =
+++
+++-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+++-# each documented class showing the direct and indirect inheritance relations.
+++-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
++++# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a
++++# graph for each documented class showing the direct and indirect inheritance
++++# relations. In case HAVE_DOT is set as well dot will be used to draw the graph,
++++# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set
++++# to TEXT the direct and indirect inheritance relations will be shown as texts /
++++# links.
++++# Possible values are: NO, YES, TEXT and GRAPH.
+++ # The default value is: YES.
+++-# This tag requires that the tag HAVE_DOT is set to YES.
+++
+++-CLASS_GRAPH = YES
++++CLASS_GRAPH = TEXT
+++
+++ # If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+++ # graph for each documented class showing the direct and indirect implementation
+++@@ -2164,13 +2486,14 @@
+++ COLLABORATION_GRAPH = YES
+++
+++ # If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+++-# groups, showing the direct groups dependencies.
++++# groups, showing the direct groups dependencies. See also the chapter Grouping
++++# in the manual.
+++ # The default value is: YES.
+++ # This tag requires that the tag HAVE_DOT is set to YES.
+++
+++ GROUP_GRAPHS = YES
+++
+++-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
++++# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+++ # collaboration diagrams in a style similar to the OMG's Unified Modeling
+++ # Language.
+++ # The default value is: NO.
+++@@ -2187,10 +2510,32 @@
+++ # but if the number exceeds 15, the total amount of fields shown is limited to
+++ # 10.
+++ # Minimum value: 0, maximum value: 100, default value: 10.
+++-# This tag requires that the tag HAVE_DOT is set to YES.
++++# This tag requires that the tag UML_LOOK is set to YES.
+++
+++ UML_LIMIT_NUM_FIELDS = 10
+++
++++# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and
++++# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS
++++# tag is set to YES, doxygen will add type and arguments for attributes and
++++# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen
++++# will not generate fields with class member information in the UML graphs. The
++++# class diagrams will look similar to the default class diagrams but using UML
++++# notation for the relationships.
++++# Possible values are: NO, YES and NONE.
++++# The default value is: NO.
++++# This tag requires that the tag UML_LOOK is set to YES.
++++
++++DOT_UML_DETAILS = NO
++++
++++# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
++++# to display on a single line. If the actual line length exceeds this threshold
++++# significantly it will wrapped across multiple lines. Some heuristics are apply
++++# to avoid ugly line breaks.
++++# Minimum value: 0, maximum value: 1000, default value: 17.
++++# This tag requires that the tag HAVE_DOT is set to YES.
++++
++++DOT_WRAP_THRESHOLD = 17
++++
+++ # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+++ # collaboration graphs will show the relations between templates and their
+++ # instances.
+++@@ -2222,7 +2567,8 @@
+++ #
+++ # Note that enabling this option will significantly increase the time of a run.
+++ # So in most cases it will be better to enable call graphs for selected
+++-# functions only using the \callgraph command.
++++# functions only using the \callgraph command. Disabling a call graph can be
++++# accomplished by means of the command \hidecallgraph.
+++ # The default value is: NO.
+++ # This tag requires that the tag HAVE_DOT is set to YES.
+++
+++@@ -2233,7 +2579,8 @@
+++ #
+++ # Note that enabling this option will significantly increase the time of a run.
+++ # So in most cases it will be better to enable caller graphs for selected
+++-# functions only using the \callergraph command.
++++# functions only using the \callergraph command. Disabling a caller graph can be
++++# accomplished by means of the command \hidecallergraph.
+++ # The default value is: NO.
+++ # This tag requires that the tag HAVE_DOT is set to YES.
+++
+++@@ -2255,12 +2602,24 @@
+++
+++ DIRECTORY_GRAPH = YES
+++
++++# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels
++++# of child directories generated in directory dependency graphs by dot.
++++# Minimum value: 1, maximum value: 25, default value: 1.
++++# This tag requires that the tag DIRECTORY_GRAPH is set to YES.
++++
++++DIR_GRAPH_MAX_DEPTH = 1
++++
+++ # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+++-# generated by dot.
++++# generated by dot. For an explanation of the image formats see the section
++++# output formats in the documentation of the dot tool (Graphviz (see:
++++# http://www.graphviz.org/)).
+++ # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+++ # to make the SVG files visible in IE 9+ (other browsers do not have this
+++ # requirement).
+++-# Possible values are: png, jpg, gif and svg.
++++# Possible values are: png, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd,
++++# gif, gif:cairo, gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd,
++++# png:cairo, png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
++++# png:gdiplus:gdiplus.
+++ # The default value is: png.
+++ # This tag requires that the tag HAVE_DOT is set to YES.
+++
+++@@ -2303,6 +2662,24 @@
+++
+++ DIAFILE_DIRS =
+++
++++# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
++++# path where java can find the plantuml.jar file or to the filename of jar file
++++# to be used. If left blank, it is assumed PlantUML is not used or called during
++++# a preprocessing step. Doxygen will generate a warning when it encounters a
++++# \startuml command in this case and will not generate output for the diagram.
++++
++++PLANTUML_JAR_PATH =
++++
++++# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
++++# configuration file for plantuml.
++++
++++PLANTUML_CFG_FILE =
++++
++++# When using plantuml, the specified paths are searched for files specified by
++++# the !include statement in a plantuml block.
++++
++++PLANTUML_INCLUDE_PATH =
++++
+++ # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+++ # that will be shown in the graph. If the number of nodes in a graph becomes
+++ # larger than this value, doxygen will truncate the graph, which is visualized
+++@@ -2339,7 +2716,7 @@
+++
+++ DOT_TRANSPARENT = NO
+++
+++-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
++++# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+++ # files in one run (i.e. multiple -o and -T options on the command line). This
+++ # makes dot run faster, but since only newer versions of dot (>1.8.10) support
+++ # this, this feature is disabled by default.
+++@@ -2351,14 +2728,18 @@
+++ # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+++ # explaining the meaning of the various boxes and arrows in the dot generated
+++ # graphs.
++++# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal
++++# graphical representation for inheritance and collaboration diagrams is used.
+++ # The default value is: YES.
+++ # This tag requires that the tag HAVE_DOT is set to YES.
+++
+++ GENERATE_LEGEND = YES
+++
+++-# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
++++# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate
+++ # files that are used to generate the various graphs.
++++#
++++# Note: This setting is not only used for dot files but also for msc temporary
++++# files.
+++ # The default value is: YES.
+++-# This tag requires that the tag HAVE_DOT is set to YES.
+++
+++ DOT_CLEANUP = YES
+++--- a/docs/DoxygenLayout.xml
++++++ b/docs/DoxygenLayout.xml
+++@@ -1,22 +1,36 @@
+++ <doxygenlayout version="1.0">
+++- <!-- Generated by doxygen 1.8.6 -->
++++ <!-- Generated by doxygen 1.9.4 -->
+++ <!-- Navigation index tabs for HTML output -->
+++ <navindex>
+++ <tab type="mainpage" visible="yes" title=""/>
+++ <tab type="pages" visible="yes" title="" intro=""/>
+++ <tab type="modules" visible="yes" title="" intro=""/>
+++- <!--
+++ <tab type="namespaces" visible="yes" title="">
+++ <tab type="namespacelist" visible="yes" title="" intro=""/>
+++ <tab type="namespacemembers" visible="yes" title="" intro=""/>
+++ </tab>
++++ <tab type="concepts" visible="yes" title="">
++++ </tab>
++++ <tab type="interfaces" visible="yes" title="">
++++ <tab type="interfacelist" visible="yes" title="" intro=""/>
++++ <tab type="interfaceindex" visible="$ALPHABETICAL_INDEX" title=""/>
++++ <tab type="interfacehierarchy" visible="yes" title="" intro=""/>
++++ </tab>
+++ <tab type="classes" visible="yes" title="">
+++ <tab type="classlist" visible="yes" title="" intro=""/>
+++ <tab type="classindex" visible="$ALPHABETICAL_INDEX" title=""/>
+++ <tab type="hierarchy" visible="yes" title="" intro=""/>
+++ <tab type="classmembers" visible="yes" title="" intro=""/>
+++ </tab>
+++- -->
++++ <tab type="structs" visible="yes" title="">
++++ <tab type="structlist" visible="yes" title="" intro=""/>
++++ <tab type="structindex" visible="$ALPHABETICAL_INDEX" title=""/>
++++ </tab>
++++ <tab type="exceptions" visible="yes" title="">
++++ <tab type="exceptionlist" visible="yes" title="" intro=""/>
++++ <tab type="exceptionindex" visible="$ALPHABETICAL_INDEX" title=""/>
++++ <tab type="exceptionhierarchy" visible="yes" title="" intro=""/>
++++ </tab>
+++ <tab type="files" visible="yes" title="">
+++ <tab type="filelist" visible="yes" title="" intro=""/>
+++ <tab type="globals" visible="yes" title="" intro=""/>
+++@@ -27,7 +41,7 @@
+++ <!-- Layout definition for a class page -->
+++ <class>
+++ <briefdescription visible="yes"/>
+++- <includes visible="$SHOW_INCLUDE_FILES"/>
++++ <includes visible="$SHOW_HEADERFILE"/>
+++ <inheritancegraph visible="$CLASS_GRAPH"/>
+++ <collaborationgraph visible="$COLLABORATION_GRAPH"/>
+++ <memberdecl>
+++@@ -89,8 +103,14 @@
+++ <memberdecl>
+++ <nestednamespaces visible="yes" title=""/>
+++ <constantgroups visible="yes" title=""/>
++++ <interfaces visible="yes" title=""/>
+++ <classes visible="yes" title=""/>
++++ <concepts visible="yes" title=""/>
++++ <structs visible="yes" title=""/>
++++ <exceptions visible="yes" title=""/>
+++ <typedefs title=""/>
++++ <sequences title=""/>
++++ <dictionaries title=""/>
+++ <enums title=""/>
+++ <functions title=""/>
+++ <variables title=""/>
+++@@ -100,6 +120,8 @@
+++ <memberdef>
+++ <inlineclasses title=""/>
+++ <typedefs title=""/>
++++ <sequences title=""/>
++++ <dictionaries title=""/>
+++ <enums title=""/>
+++ <functions title=""/>
+++ <variables title=""/>
+++@@ -107,6 +129,15 @@
+++ <authorsection visible="yes"/>
+++ </namespace>
+++
++++ <!-- Layout definition for a concept page -->
++++ <concept>
++++ <briefdescription visible="yes"/>
++++ <includes visible="$SHOW_HEADERFILE"/>
++++ <definition visible="yes" title=""/>
++++ <detaileddescription title=""/>
++++ <authorsection visible="yes"/>
++++ </concept>
++++
+++ <!-- Layout definition for a file page -->
+++ <file>
+++ <briefdescription visible="yes"/>
+++@@ -115,11 +146,17 @@
+++ <includedbygraph visible="$INCLUDED_BY_GRAPH"/>
+++ <sourcelink visible="yes"/>
+++ <memberdecl>
++++ <interfaces visible="yes" title=""/>
+++ <classes visible="yes" title=""/>
++++ <structs visible="yes" title=""/>
++++ <exceptions visible="yes" title=""/>
+++ <namespaces visible="yes" title=""/>
++++ <concepts visible="yes" title=""/>
+++ <constantgroups visible="yes" title=""/>
+++ <defines title=""/>
+++ <typedefs title=""/>
++++ <sequences title=""/>
++++ <dictionaries title=""/>
+++ <enums title=""/>
+++ <functions title=""/>
+++ <variables title=""/>
+++@@ -130,6 +167,8 @@
+++ <inlineclasses title=""/>
+++ <defines title=""/>
+++ <typedefs title=""/>
++++ <sequences title=""/>
++++ <dictionaries title=""/>
+++ <enums title=""/>
+++ <functions title=""/>
+++ <variables title=""/>
+++@@ -146,9 +185,12 @@
+++ <dirs visible="yes" title=""/>
+++ <files visible="yes" title=""/>
+++ <namespaces visible="yes" title=""/>
++++ <concepts visible="yes" title=""/>
+++ <classes visible="yes" title=""/>
+++ <defines title=""/>
+++ <typedefs title=""/>
++++ <sequences title=""/>
++++ <dictionaries title=""/>
+++ <enums title=""/>
+++ <enumvalues title=""/>
+++ <functions title=""/>
+++@@ -168,6 +210,8 @@
+++ <inlineclasses title=""/>
+++ <defines title=""/>
+++ <typedefs title=""/>
++++ <sequences title=""/>
++++ <dictionaries title=""/>
+++ <enums title=""/>
+++ <enumvalues title=""/>
+++ <functions title=""/>
--- /dev/null
--- /dev/null
--- /dev/null
+++#!/usr/bin/make -f
+++DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
+++export DEB_HOST_MULTIARCH
+++#DEB_BUILD_MAINT_OPTIONS += hardening=+all
+++#export DEB_BUILD_MAINT_OPTIONS
+++#export DH_VERBOSE=1
+++
+++%:
+++ dh $@ --with python3
+++
+++override_dh_auto_configure:
+++# dh_auto_configure -- -DENABLE_DOXYGEN_PDF=ON
+++# ln -s sse2neon/sse2neon.h include/volk/sse2neon/sse2neon.h
+++ dh_auto_configure
+++
+++override_dh_auto_build-indep:
+++ cmake --build obj-* --target all
+++ cmake --build obj-* --target volk_doc
+++# cmake --build obj-* --target volk_pdf_doc
+++
+++override_dh_auto_install:
+++ dh_auto_install
+++ find debian -type d -empty -delete
+++
+++override_dh_auto_clean:
+++ dh_auto_clean
+++ rm -rf gen/__pycache__
+++# rm -f include/volk/sse2neon/sse2neon.h
--- /dev/null
--- /dev/null
--- /dev/null
+++3.0 (quilt)
--- /dev/null
--- /dev/null
--- /dev/null
+++---
+++Bug-Database: https://github.com/gnuradio/volk/issues
+++Bug-Submit: https://github.com/gnuradio/volk/issues/new
+++Repository-Browse: https://github.com/gnuradio/volk
--- /dev/null
--- /dev/null
--- /dev/null
+++.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.10.
+++.TH VOLK-CONFIG-INFO "1" "July 2014" "volk-config-info 0.1" "User Commands"
+++.SH NAME
+++volk-config-info \- pkgconfig-like tool for Vector Optimized Library of Kernels 0.1
+++.SH DESCRIPTION
+++.SS "Program options: volk-config-info [options]:"
+++.TP
+++\fB\-h\fR [ \fB\-\-help\fR ]
+++print help message
+++.TP
+++\fB\-\-prefix\fR
+++print VOLK installation prefix
+++.TP
+++\fB\-\-builddate\fR
+++print VOLK build date (RFC2822 format)
+++.TP
+++\fB\-\-cc\fR
+++print VOLK C compiler version
+++.TP
+++\fB\-\-cflags\fR
+++print VOLK CFLAGS
+++.TP
+++\fB\-\-all\-machines\fR
+++print VOLK machines built into library
+++.TP
+++\fB\-\-avail\-machines\fR
+++print VOLK machines the current platform can use
+++.TP
+++\fB\-\-machine\fR
+++print the VOLK machine that will be used
+++.TP
+++\fB\-v\fR [ \fB\-\-version\fR ]
+++print VOLK version
+++.SH "SEE ALSO"
+++The full documentation for
+++.B volk-config-info
+++is maintained as a Texinfo manual. If the
+++.B info
+++and
+++.B volk-config-info
+++programs are properly installed at your site, the command
+++.IP
+++.B info volk-config-info
+++.PP
+++should give you access to the complete manual.
--- /dev/null
--- /dev/null
--- /dev/null
+++.TH GNURADIO "1" "August 2013" "volk_modtool 3.7" "User Commands"
+++.SH NAME
+++volk_modtool \- tailor VOLK modules
+++.SH DESCRIPTION
+++The volk_modtool tool is installed along with VOLK as a way of helping
+++to construct, add to, and interogate the VOLK library or companion
+++libraries.
+++.P
+++volk_modtool is installed into $prefix/bin.
+++.P
+++VOLK modtool enables creating standalone (out-of-tree) VOLK modules
+++and provides a few tools for sharing VOLK kernels between VOLK
+++modules. If you need to design or work with VOLK kernels away from
+++the canonical VOLK library, this is the tool. If you need to tailor
+++your own VOLK library for whatever reason, this is the tool.
+++.P
+++The canonical VOLK library installs a volk.h and a libvolk.so. Your
+++own library will install volk_$name.h and libvolk_$name.so. Ya Gronk?
+++Good.
+++.P
+++There isn't a substantial difference between the canonical VOLK
+++module and any other VOLK module. They're all peers. Any module
+++created via VOLK modtool will come complete with a default
+++volk_modtool.cfg file associating the module with the base from which
+++it came, its distinctive $name and its destination (or path). These
+++values (created from user input if VOLK modtool runs without a
+++user-supplied config file or a default config file) serve as default
+++values for some VOLK modtool actions. It's more or less intended for
+++the user to change directories to the top level of a created VOLK
+++module and then run volk_modtool to take advantage of the values
+++stored in the default volk_modtool.cfg file.
+++.P
+++Apart from creating new VOLK modules, VOLK modtool allows you to list
+++the names of kernels in other modules, list the names of kernels in
+++the current module, add kernels from another module into the current
+++module, and remove kernels from the current module. When moving
+++kernels between modules, VOLK modtool does its best to keep the qa
+++and profiling code for those kernels intact. If the base has a test
+++or a profiling call for some kernel, those calls will follow the
+++kernel when VOLK modtool adds that kernel. If QA or profiling
+++requires a puppet kernel, the puppet kernel will follow the original
+++kernel when VOLK modtool adds that original kernel. VOLK modtool
+++respects puppets.
+++.P
+++======================================================================
+++.P
+++.SH Installing a new VOLK Library:
+++.P
+++Run the command "volk_modtool -i". This will ask you three questions:
+++.P
+++ name: // the name to give your VOLK library: volk_<name>
+++ destination: // directory new source tree is built under -- must exists.
+++ // It will create <directory>/volk_<name>
+++ base: // the directory containing the original VOLK source code
+++.P
+++This will build a new skeleton directory in the destination provided
+++with the name volk_<name>. It will contain the necessary structure to
+++build:
+++.P
+++ mkdir build
+++ cd build
+++ cmake -DCMAKE_INSTALL_PREFIX=/opt/volk ../
+++ make
+++ sudo make install
+++.P
+++Right now, the library is empty and contains no kernels. Kernels can
+++be added from another VOLK library using the '-a' option. If not
+++specified, the kernel will be extracted from the base VOLK
+++directory. Using the '-b' allows us to specify another VOLK library to
+++use for this purpose.
+++.P
+++ volk_modtool -a -n 32fc_x2_conjugate_dot_prod_32fc
+++.P
+++This will put the code for the new kernel into
+++<destination>/volk_<name>/kernels/volk_<name>/
+++.P
+++Other kernels must be added by hand. See the following webpages for
+++more information about creating VOLK kernels:
+++ http://gnuradio.org/doc/doxygen/volk_guide.html
+++ http://gnuradio.org/redmine/projects/gnuradio/wiki/Volk
+++.P
+++======================================================================
+++.P
+++.SH OPTIONS
+++.P
+++Options for Adding and Removing Kernels:
+++ -a, --add_kernel
+++ Add kernel from existing VOLK module. Uses the base VOLK module
+++ unless -b is used. Use -n to specify the kernel name.
+++ Requires: -n.
+++ Optional: -b
+++.P
+++ -A, --add_all_kernels
+++ Add all kernels from existing VOLK module. Uses the base VOLK
+++ module unless -b is used.
+++ Optional: -b
+++.P
+++ -x, --remove_kernel
+++ Remove kernel from module.
+++ Required: -n.
+++ Optional: -b
+++.P
+++Options for Listing Kernels:
+++ -l, --list
+++ Lists all kernels available in the base VOLK module.
+++.P
+++ -k, --kernels
+++ Lists all kernels in this VOLK module.
+++.P
+++ -r, --remote-list
+++ Lists all kernels in another VOLK module that is specified
+++ using the -b option.
--- /dev/null
--- /dev/null
--- /dev/null
+++.TH UHD_FFT "1" "March 2012" "volk_profile 3.5" "User Commands"
+++.SH NAME
+++volk_profile \- Quality Assurance application for libvolk functions
+++.SH DESCRIPTION
+++Writes profile results to a file.
--- /dev/null
--- /dev/null
--- /dev/null
+++version=4
+++opts="filenamemangle=s%(?:.*?)?v?(\d[\d.]*@ARCHIVE_EXT@)%@PACKAGE@-$1%,uversionmangle=s/-rc/~rc/" \
+++ https://github.com/gnuradio/volk/tags \
+++ (?:.*?/)?v?@ANY_VERSION@@ARCHIVE_EXT@ debian
+++
+++opts="component=sse2neon" \
+++ https://github.com/DLTcollab/sse2neon/tags \
+++ (?:.*?/)?v?@ANY_VERSION@@ARCHIVE_EXT@ 1.7.0 uupdate
--- /dev/null
--- /dev/null
--- /dev/null
+++#!/usr/bin/env bash
+++
+++. .ci/common.sh
+++
+++set -x
+++
+++for file in ${SOURCES};
+++do
+++ clang-format-12 ${file} > expected-format
+++ diff -u -p --label="${file}" --label="expected coding style" ${file} expected-format
+++done
+++exit $(clang-format-12 --output-replacements-xml ${SOURCES} | egrep -c "</replacement>")
--- /dev/null
--- /dev/null
--- /dev/null
+++GCC_REL=11.2-2022.02
+++ARM_MIRROR=https://github.com/DLTcollab/toolchain-arm/raw/main
+++
+++SOURCES=$(find $(git rev-parse --show-toplevel) | egrep "\.(cpp|h)\$" | egrep -v "gcc-arm-${GCC_REL}-x86_64-aarch64-none-linux-gnu|gcc-arm-${GCC_REL}-x86_64-arm-none-linux-gnueabihf")
+++
+++# Expect host is Linux/x86_64
+++check_platform()
+++{
+++ MACHINE_TYPE=`uname -m`
+++ if [ ${MACHINE_TYPE} != 'x86_64' ]; then
+++ exit
+++ fi
+++
+++ OS_TYPE=`uname -s`
+++ if [ ${OS_TYPE} != 'Linux' ]; then
+++ exit
+++ fi
+++}
--- /dev/null
--- /dev/null
--- /dev/null
+++#!/usr/bin/env bash
+++
+++. .ci/common.sh
+++
+++check_platform
+++
+++# Clang/LLVM is natively a cross-compiler.
+++# TODO: Do cross-compilation using Clang
+++# https://clang.llvm.org/docs/CrossCompilation.html
+++if [ $(printenv CXX | grep clang) ]; then
+++ exit
+++fi
+++
+++set -x
+++
+++make clean
+++export PATH=gcc-arm-${GCC_REL}-x86_64-aarch64-none-linux-gnu/bin:$PATH
+++make CROSS_COMPILE=aarch64-none-linux-gnu- check || exit 1 # ARMv8-A
+++
+++make clean
+++export PATH=gcc-arm-${GCC_REL}-x86_64-arm-none-linux-gnueabihf/bin:$PATH
+++make CROSS_COMPILE=arm-none-linux-gnueabihf- check || exit 1 # ARMv7-A
--- /dev/null
--- /dev/null
--- /dev/null
+++#!/usr/bin/env bash
+++
+++. .ci/common.sh
+++
+++check_platform
+++
+++sudo apt-get update -q -y
+++sudo apt-get install -q -y qemu-user
+++
+++# Clang/LLVM is natively a cross-compiler, meaning that one set of programs
+++# can compile to all targets by setting the -target option.
+++if [ $(printenv CXX | grep clang) ]; then
+++ exit
+++fi
+++
+++set -x
+++
+++sudo apt-get install -y curl xz-utils
+++
+++curl -L \
+++ ${ARM_MIRROR}/gcc-arm-${GCC_REL}-x86_64-arm-none-linux-gnueabihf.tar.xz \
+++ | tar -Jx || exit 1
+++
+++curl -L \
+++ ${ARM_MIRROR}/gcc-arm-${GCC_REL}-x86_64-aarch64-none-linux-gnu.tar.xz \
+++ | tar -Jx || exit 1
--- /dev/null
--- /dev/null
--- /dev/null
+++BasedOnStyle: Chromium
+++Language: Cpp
+++MaxEmptyLinesToKeep: 3
+++IndentCaseLabels: false
+++AllowShortIfStatementsOnASingleLine: false
+++AllowShortCaseLabelsOnASingleLine: false
+++AllowShortLoopsOnASingleLine: false
+++DerivePointerAlignment: false
+++PointerAlignment: Right
+++SpaceAfterCStyleCast: true
+++TabWidth: 4
+++UseTab: Never
+++IndentWidth: 4
+++BreakBeforeBraces: Linux
+++AccessModifierOffset: -4
+++ForEachMacros:
+++ - SET_FOREACH
+++ - RB_FOREACH
+++AlignEscapedNewlines: Left
--- /dev/null
--- /dev/null
--- /dev/null
+++*.md text=auto
+++LICENSE text=auto
+++
+++sse2neon.h -text linguist-language=c
--- /dev/null
--- /dev/null
--- /dev/null
+++# Lines starting with '#' are comments.
+++# More details are here: https://help.github.com/articles/about-codeowners/
+++
+++# Global codeowners:
+++* @jserv @marktwtn
--- /dev/null
--- /dev/null
--- /dev/null
+++name: GitHub Actions
+++
+++on: [push, pull_request]
+++
+++jobs:
+++ host-x86:
+++ runs-on: ubuntu-20.04
+++ strategy:
+++ matrix:
+++ arch: [x86_64]
+++ cxx_compiler: [g++-10, clang++-11]
+++ steps:
+++ - name: checkout code
+++ uses: actions/checkout@v4
+++ - name: build artifact
+++ env:
+++ CXX: ${{ matrix.cxx_compiler }}
+++ run: |
+++ sh .ci/cross-tool.sh
+++ make check
+++ sh .ci/cross-check.sh
+++
+++ host-win:
+++ runs-on: windows-2022
+++ strategy:
+++ matrix:
+++ arch:
+++ - x86_64
+++ - armv7
+++ - aarch64
+++ env:
+++ LLVM_MINGW_URL: https://github.com/mstorsjo/llvm-mingw/releases/download/20220906/llvm-mingw-20220906-ucrt-x86_64.zip
+++ defaults:
+++ run:
+++ shell: bash
+++ steps:
+++ - name: unpack llvm-mingw
+++ run: |
+++ curl -L -O $LLVM_MINGW_URL
+++ unzip -q llvm-mingw-*.zip
+++ rm llvm-mingw-*.zip
+++ mv llvm-mingw-* "$HOME/llvm-mingw"
+++ echo "$HOME/llvm-mingw/bin" >> $GITHUB_PATH
+++ - name: checkout code
+++ uses: actions/checkout@v4
+++ - name: build artifact
+++ env:
+++ CXX: ${{ matrix.arch }}-w64-mingw32-clang++
+++ run: mingw32-make processor=${{ matrix.arch }}
+++ - name: run tests
+++ if: matrix.arch == 'x86_64'
+++ run: mingw32-make check
+++
+++ host-arm:
+++ runs-on: ubuntu-20.04
+++ strategy:
+++ matrix:
+++ arch_with_features: [
+++ {arch: armv7, feature: none, arch_cflags: none},
+++ {arch: aarch64, feature: none, arch_cflags: none},
+++ {arch: aarch64, feature: crypto+crc, arch_cflags: none},
+++ {arch: armv7, feature: none, arch_cflags: '-mcpu=cortex-a32 -mfpu=neon-fp-armv8'}
+++ ]
+++ cxx_compiler: [g++-10, clang++-11]
+++ steps:
+++ - name: checkout code
+++ uses: actions/checkout@v4
+++ - name: build artifact
+++ # The Github Action for non-x86 CPU
+++ # https://github.com/uraimo/run-on-arch-action
+++ uses: uraimo/run-on-arch-action@v2.6.0
+++ with:
+++ arch: ${{ matrix.arch_with_features.arch }}
+++ distro: ubuntu20.04
+++ env: |
+++ CXX: ${{ matrix.cxx_compiler }}
+++ ARCH_CFLAGS: ${{ matrix.arch_with_features.arch_cflags }}
+++ install: |
+++ apt-get update -q -y
+++ apt-get install -q -y "${{ matrix.cxx_compiler }}" make
+++ apt-get install -q -y gcc
+++ run: |
+++ make FEATURE=${{ matrix.arch_with_features.feature }} check
+++
+++ host-win-msvc:
+++ runs-on: windows-2022
+++ steps:
+++ - name: checkout code
+++ uses: actions/checkout@v4
+++
+++ - name: add msbuild to PATH
+++ uses: microsoft/setup-msbuild@v1.1
+++
+++ - name: build artifact
+++ run: msbuild sse2neon.vcxproj -t:rebuild -property:Configuration=Release -property:Platform=ARM64
+++
+++ - name: upload artifact
+++ uses: actions/upload-artifact@master
+++ with:
+++ name: msvc-arm64-artifact
+++ path: ARM64
+++
+++ test-win-msvc:
+++ runs-on: ubuntu-latest
+++ container: linaro/wine-arm64
+++ needs: host-win-msvc
+++ steps:
+++ - name: download artifact
+++ uses: actions/download-artifact@master
+++ with:
+++ name: msvc-arm64-artifact
+++
+++ - name: Run tests
+++ run: wine-arm64 cmd.exe /c 'Release\sse2neon.exe'
+++
+++
+++ coding-style:
+++ runs-on: ubuntu-20.04
+++ steps:
+++ - name: checkout code
+++ uses: actions/checkout@v4
+++ - name: style check
+++ run: |
+++ sudo apt-get install -q -y clang-format-12
+++ sh .ci/check-format.sh
+++ shell: bash
--- /dev/null
--- /dev/null
--- /dev/null
+++*.exe
+++*.o
+++*.gch
+++tests/*.d
+++tests/main
+++gcc-arm-*
+++.vs/
+++Debug/
+++Release/
+++*.vcxproj.user
--- /dev/null
--- /dev/null
--- /dev/null
+++# Contributing to SSE2NEON
+++
+++:+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
+++
+++The following is a set of guidelines for contributing to [SSE2NEON](https://github.com/DLTcollab/sse2neon),
+++hosted on GitHub. These are mostly guidelines, not rules. Use your best
+++judgment, and feel free to propose changes to this document in a pull request.
+++
+++## Issues
+++
+++This project uses GitHub Issues to track ongoing development, discuss project plans, and keep track of bugs. Be sure to search for existing issues before you create another one.
+++
+++Visit our [Issues page on GitHub](https://github.com/DLTcollab/sse2neon/issues) to search and submit.
+++
+++## Add New Intrinsic
+++
+++The new intrinsic conversion should be added in the `sse2neon.h` file,
+++and it should be placed in the correct classification with the alphabetical order.
+++The classification can be referenced from [Intel Intrinsics Guide](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html).
+++
+++Classification: `SSE`, `SSE2`, `SSE3`, `SSSE3`, `SSE4.1`, `SSE4.2`
+++
+++## Coding Convention
+++
+++We welcome all contributions from corporate, acaddemic and individual developers. However, there are a number of fundamental ground rules that you must adhere to in order to participate. These rules are outlined as follows:
+++* All code must adhere to the existing C coding style (see below). While we are somewhat flexible in basic style, you will adhere to what is currently in place. Uncommented, complicated algorithmic constructs will be rejected.
+++* All external pull requests must contain sufficient documentation in the pull request comments in order to be accepted.
+++
+++Software requirement: [clang-format](https://clang.llvm.org/docs/ClangFormat.html) version 12 or later.
+++
+++Use the command `$ clang-format -i *.[ch]` to enforce a consistent coding style.
+++
+++## Naming Conventions
+++
+++There are some general rules.
+++* Names with leading and trailing underscores are reserved for system purposes, and most systems use them for names that the user should not have to know.
+++* Function, typedef, and variable names, as well as struct, union, and enum tag names should be in lower case.
+++* Many function-like macros are in all CAPS.
+++* Avoid names that differ only in case, like `foo` and `Foo`. Similarly, avoid `foobar` and `foo_bar`. The potential for confusion is considerable.
+++* Similarly, avoid names that look like each other. On many terminals and printers, `l`, `1` and `I` look quite similar. A variable named `l` is particularly bad because it looks so much like the constant `1`.
+++
+++In general, global names (including enums) should have a common prefix (`SSE2NEON_` for macros and enum constants; `_sse2neon_` for functions) identifying the module that they belong with. Globals may alternatively be grouped in a global structure. Typedeffed names often have `_t` appended to their name.
+++
+++Avoid using names that might conflict with other names used in standard libraries. There may be more library code included in some systems than you need. Your program could also be extended in the future.
+++
+++## Coding Style for Modern C
+++
+++This coding style is a variation of the K&R style. Some general principles: honor tradition, but accept progress; be consistent;
+++embrace the latest C standards; embrace modern compilers, their static analysis
+++capabilities and sanitizers.
+++
+++### Indentation
+++
+++Use 4 spaces rather than tabs.
+++
+++### Line length
+++
+++All lines should generally be within 80 characters. Wrap long lines.
+++There are some good reasons behind this:
+++* It forces the developer to write more succinct code;
+++* Humans are better at processing information in smaller quantity portions;
+++* It helps users of vi/vim (and potentially other editors) who use vertical splits.
+++
+++### Comments
+++
+++Multi-line comments shall have the opening and closing characters
+++in a separate line, with the lines containing the content prefixed by a space
+++and the `*` characters for alignment, e.g.,
+++```c
+++/*
+++ * This is a multi-line comment.
+++ */
+++
+++/* One line comment. */
+++```
+++
+++Use multi-line comments for more elaborative descriptions or before more
+++significant logical block of code.
+++
+++Single-line comments shall be written in C89 style:
+++```c
+++ return (uintptr_t) val; /* return a bitfield */
+++```
+++
+++Leave two spaces between the statement and the inline comment.
+++
+++### Spacing and brackets
+++
+++Use one space after the conditional or loop keyword, no spaces around
+++their brackets, and one space before the opening curly bracket.
+++
+++Functions (their declarations or calls), `sizeof` operator or similar
+++macros shall not have a space after their name/keyword or around the
+++brackets, e.g.,
+++```c
+++unsigned total_len = offsetof(obj_t, items[n]);
+++unsigned obj_len = sizeof(obj_t);
+++```
+++
+++Use brackets to avoid ambiguity and with operators such as `sizeof`,
+++but otherwise avoid redundant or excessive brackets.
+++
+++### Variable names and declarations
+++
+++- Use descriptive names for global variables and short names for locals.
+++Find the right balance between descriptive and succinct.
+++
+++- Use [snakecase](https://en.wikipedia.org/wiki/Snake_case).
+++Do not use "camelcase".
+++
+++- Do not use Hungarian notation or other unnecessary prefixing or suffixing.
+++
+++- Use the following spacing for pointers:
+++```c
+++const char *name; /* const pointer; '*' with the name and space before it */
+++conf_t * const cfg; /* pointer to a const data; spaces around 'const' */
+++const uint8_t * const charmap; /* const pointer and const data */
+++const void * restrict key; /* const pointer which does not alias */
+++```
+++
+++### Type definitions
+++
+++Declarations shall be on the same line, e.g.,
+++```c
+++typedef void (*dir_iter_t)(void *, const char *, struct dirent *);
+++```
+++
+++_Typedef_ structures rather than pointers. Note that structures can be kept
+++opaque if they are not dereferenced outside the translation unit where they
+++are defined. Pointers can be _typedefed_ only if there is a very compelling
+++reason.
+++
+++New types may be suffixed with `_t`. Structure name, when used within the
+++translation unit, may be omitted, e.g.:
+++
+++```c
+++typedef struct {
+++ unsigned if_index;
+++ unsigned addr_len;
+++ addr_t next_hop;
+++} route_info_t;
+++```
+++
+++### Initialization
+++
+++Embrace C99 structure initialization where reasonable, e.g.,
+++```c
+++static const crypto_ops_t openssl_ops = {
+++ .create = openssl_crypto_create,
+++ .destroy = openssl_crypto_destroy,
+++ .encrypt = openssl_crypto_encrypt,
+++ .decrypt = openssl_crypto_decrypt,
+++ .hmac = openssl_crypto_hmac,
+++};
+++```
+++
+++Embrace C99 array initialization, especially for the state machines, e.g.,
+++```c
+++static const uint8_t tcp_fsm[TCP_NSTATES][2][TCPFC_COUNT] = {
+++ [TCPS_CLOSED] = {
+++ [FLOW_FORW] = {
+++ /* Handshake (1): initial SYN. */
+++ [TCPFC_SYN] = TCPS_SYN_SENT,
+++ },
+++ },
+++ ...
+++}
+++```
+++
+++### Control structures
+++
+++Try to make the control flow easy to follow. Avoid long convoluted logic
+++expressions; try to split them where possible (into inline functions,
+++separate if-statements, etc).
+++
+++The control structure keyword and the expression in the brackets should be
+++separated by a single space. The opening curly bracket shall be in the
+++same line, also separated by a single space. Example:
+++
+++```c
+++ for (;;) {
+++ obj = get_first();
+++ while ((obj = get_next(obj))) {
+++ ...
+++ }
+++ if (done)
+++ break;
+++ }
+++```
+++
+++Do not add inner spaces around the brackets. There should be one space after
+++the semicolon when `for` has expressions:
+++```c
+++ for (unsigned i = 0; i < __arraycount(items); i++) {
+++ ...
+++ }
+++```
+++
+++#### Avoid unnecessary nesting levels
+++
+++Avoid:
+++```c
+++int inspect(obj_t *obj)
+++{
+++ if (cond) {
+++ ...
+++ /* long code block */
+++ ...
+++ return 0;
+++ }
+++ return -1;
+++}
+++```
+++
+++Consider:
+++```c
+++int inspect(obj_t *obj)
+++{
+++ if (!cond)
+++ return -1;
+++
+++ ...
+++ return 0;
+++}
+++```
+++
+++However, do not make logic more convoluted.
+++
+++### `if` statements
+++
+++Curly brackets and spacing follow the K&R style:
+++```c
+++ if (a == b) {
+++ ..
+++ } else if (a < b) {
+++ ...
+++ } else {
+++ ...
+++ }
+++```
+++
+++Simple and succinct one-line if-statements may omit curly brackets:
+++```c
+++ if (!valid)
+++ return -1;
+++```
+++
+++However, do prefer curly brackets with multi-line or more complex statements.
+++If one branch uses curly brackets, then all other branches shall use the
+++curly brackets too.
+++
+++Wrap long conditions to the if-statement indentation adding extra 4 spaces:
+++```c
+++ if (some_long_expression &&
+++ another_expression) {
+++ ...
+++ }
+++```
+++
+++#### Avoid redundant `else`
+++
+++Avoid:
+++```c
+++ if (flag & F_FEATURE_X) {
+++ ...
+++ return 0;
+++ } else {
+++ return -1;
+++ }
+++```
+++
+++Consider:
+++```c
+++ if (flag & F_FEATURE_X) {
+++ ...
+++ return 0;
+++ }
+++ return -1;
+++```
+++
+++### `switch` statements
+++
+++Switch statements should have the `case` blocks at the same indentation
+++level, e.g.:
+++```c
+++ switch (expr) {
+++ case A:
+++ ...
+++ break;
+++ case B:
+++ /* fallthrough */
+++ case C:
+++ ...
+++ break;
+++ }
+++```
+++
+++If the case bock does not break, then it is strongly recommended to add a
+++comment containing "fallthrough" to indicate it. Modern compilers can also
+++be configured to require such comment (see gcc `-Wimplicit-fallthrough`).
+++
+++### Function definitions
+++
+++The opening and closing curly brackets shall also be in the separate lines (K&R style).
+++
+++```c
+++ssize_t hex_write(FILE *stream, const void *buf, size_t len)
+++{
+++ ...
+++}
+++```
+++
+++Do not use old style K&R style C definitions.
+++
+++### Object abstraction
+++
+++Objects are often "simulated" by the C programmers with a `struct` and
+++its "public API". To enforce the information hiding principle, it is a
+++good idea to define the structure in the source file (translation unit)
+++and provide only the _declaration_ in the header. For example, `obj.c`:
+++
+++```c
+++#include "obj.h"
+++
+++struct obj {
+++ int value;
+++}
+++
+++obj_t *obj_create(void)
+++{
+++ return calloc(1, sizeof(obj_t));
+++}
+++
+++void obj_destroy(obj_t *obj)
+++{
+++ free(obj);
+++}
+++```
+++
+++With an example `obj.h`:
+++```c
+++#ifndef _OBJ_H_
+++#define _OBJ_H_
+++
+++typedef struct obj;
+++
+++obj_t *obj_create(void);
+++void obj_destroy(obj_t *);
+++
+++#endif
+++```
+++
+++Such structuring will prevent direct access of the `obj_t` members outside
+++the `obj.c` source file. The implementation (of such "class" or "module")
+++may be large and abstracted within separate source files. In such case,
+++consider separating structures and "methods" into separate headers (think of
+++different visibility), for example `obj_impl.h` (private) and `obj.h` (public).
+++
+++Consider `crypto_impl.h`:
+++```c
+++#ifndef _CRYPTO_IMPL_H_
+++#define _CRYPTO_IMPL_H_
+++
+++#if !defined(__CRYPTO_PRIVATE)
+++#error "only to be used by the crypto modules"
+++#endif
+++
+++#include "crypto.h"
+++
+++typedef struct crypto {
+++ crypto_cipher_t cipher;
+++ void *key;
+++ size_t key_len;
+++ ...
+++}
+++...
+++
+++#endif
+++```
+++
+++And `crypto.h` (public API):
+++
+++```c
+++#ifndef _CRYPTO_H_
+++#define _CRYPTO_H_
+++
+++typedef struct crypto crypto_t;
+++
+++crypto_t *crypto_create(crypto_cipher_t);
+++void crypto_destroy(crypto_t *);
+++...
+++
+++#endif
+++```
+++
+++### Use reasonable types
+++
+++Use `unsigned` for general iterators; use `size_t` for general sizes; use
+++`ssize_t` to return a size which may include an error. Of course, consider
+++possible overflows.
+++
+++Avoid using `uint8_t` or `uint16_t` or other sub-word types for general
+++iterators and similar cases, unless programming for micro-controllers or
+++other constrained environments.
+++
+++C has rather peculiar _type promotion rules_ and unnecessary use of sub-word
+++types might contribute to a bug once in a while.
+++
+++### Embrace portability
+++
+++#### Byte-order
+++
+++Do not assume x86 or little-endian architecture. Use endian conversion
+++functions for operating the on-disk and on-the-wire structures or other
+++cases where it is appropriate.
+++
+++#### Types
+++
+++- Do not assume a particular 32-bit vs 64-bit architecture, e.g., do not
+++assume the size of `long` or `unsigned long`. Use `int64_t` or `uint64_t`
+++for the 8-byte integers.
+++
+++- Do not assume `char` is signed; for example, on Arm it is unsigned.
+++
+++- Use C99 macros for constant prefixes or formatting of the fixed-width
+++types.
+++
+++Use:
+++```c
+++#define SOME_CONSTANT (UINT64_C(1) << 48)
+++printf("val %" PRIu64 "\n", SOME_CONSTANT);
+++```
+++
+++Do not use:
+++```c
+++#define SOME_CONSTANT (1ULL << 48)
+++printf("val %lld\n", SOME_CONSTANT);
+++```
+++
+++#### Avoid unaligned access
+++
+++Do not assume unaligned access is safe. It is not safe on Arm, POWER,
+++and various other architectures. Moreover, even on x86 unaligned access
+++is slower.
+++
+++#### Avoid extreme portability
+++
+++Unless programming for micro-controllers or exotic CPU architectures,
+++focus on the common denominator of the modern CPU architectures, avoiding
+++the very maximum portability which can make the code unnecessarily cumbersome.
+++
+++Some examples:
+++- It is fair to assume `sizeof(int) == 4` since it is the case on all modern
+++mainstream architectures. PDP-11 era is long gone.
+++- Using `1U` instead of `UINT32_C(1)` or `(uint32_t) 1` is also fine.
+++- It is fair to assume that `NULL` is matching `(uintptr_t) 0` and it is fair
+++to `memset()` structures with zero. Non-zero `NULL` is for retro computing.
+++
+++## References
+++- [Linux kernel coding style](https://www.kernel.org/doc/html/latest/process/coding-style.html)
+++- 1999, Brian W. Kernighan and Rob Pike, The Practice of Programming, Addison–Wesley.
+++- 1993, Bill Shannon, [C Style and Coding Standards for SunOS](https://devnull-cz.github.io/unix-linux-prog-in-c/cstyle.ms.pdf)
--- /dev/null
--- /dev/null
--- /dev/null
+++MIT License
+++
+++Copyright (c) 2015-2023 SSE2NEON Contributors
+++
+++Permission is hereby granted, free of charge, to any person obtaining a copy
+++of this software and associated documentation files (the "Software"), to deal
+++in the Software without restriction, including without limitation the rights
+++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+++copies of the Software, and to permit persons to whom the Software is
+++furnished to do so, subject to the following conditions:
+++
+++The above copyright notice and this permission notice shall be included in all
+++copies or substantial portions of the Software.
+++
+++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+++SOFTWARE.
--- /dev/null
--- /dev/null
--- /dev/null
+++ifndef CC
+++override CC = gcc
+++endif
+++
+++ifndef CXX
+++override CXX = g++
+++endif
+++
+++ifndef CROSS_COMPILE
+++ processor := $(shell uname -m)
+++else # CROSS_COMPILE was set
+++ CC = $(CROSS_COMPILE)gcc
+++ CXX = $(CROSS_COMPILE)g++
+++ CXXFLAGS += -static
+++ LDFLAGS += -static
+++ check_arm := $(shell echo | $(CROSS_COMPILE)cpp -dM - | grep " __ARM_ARCH " | cut -c20-)
+++ ifeq ($(check_arm),8)
+++ processor = aarch64
+++ else ifeq ($(check_arm),7) # detect ARMv7-A only
+++ processor = arm
+++ else
+++ $(error Unsupported cross-compiler)
+++ endif
+++endif
+++
+++EXEC_WRAPPER =
+++ifdef CROSS_COMPILE
+++EXEC_WRAPPER = qemu-$(processor)
+++endif
+++
+++# Follow platform-specific configurations
+++ARCH_CFLAGS ?=
+++ARCH_CFLAGS_IS_SET =
+++ifeq ($(ARCH_CFLAGS),)
+++ ARCH_CFLAGS_IS_SET = true
+++endif
+++ifeq ($(ARCH_CFLAGS),none)
+++ ARCH_CFLAGS_IS_SET = true
+++endif
+++ifdef ARCH_CFLAGS_IS_SET
+++ ifeq ($(processor),$(filter $(processor),aarch64 arm64))
+++ override ARCH_CFLAGS := -march=armv8-a+fp+simd
+++ else ifeq ($(processor),$(filter $(processor),i386 x86_64))
+++ override ARCH_CFLAGS := -maes -mpclmul -mssse3 -msse4.2
+++ else ifeq ($(processor),$(filter $(processor),arm armv7 armv7l))
+++ override ARCH_CFLAGS := -mfpu=neon
+++ else
+++ $(error Unsupported architecture)
+++ endif
+++endif
+++
+++FEATURE ?=
+++ifneq ($(FEATURE),)
+++ifneq ($(FEATURE),none)
+++COMMA:= ,
+++ARCH_CFLAGS := $(ARCH_CFLAGS)+$(subst $(COMMA),+,$(FEATURE))
+++endif
+++endif
+++
+++CXXFLAGS += -Wall -Wcast-qual -I. $(ARCH_CFLAGS) -std=gnu++14
+++LDFLAGS += -lm
+++OBJS = \
+++ tests/binding.o \
+++ tests/common.o \
+++ tests/impl.o \
+++ tests/main.o
+++deps := $(OBJS:%.o=%.o.d)
+++
+++.SUFFIXES: .o .cpp
+++.cpp.o:
+++ $(CXX) -o $@ $(CXXFLAGS) -c -MMD -MF $@.d $<
+++
+++EXEC = tests/main
+++
+++$(EXEC): $(OBJS)
+++ $(CXX) $(LDFLAGS) -o $@ $^
+++
+++check: tests/main
+++ifeq ($(processor),$(filter $(processor),aarch64 arm64 arm armv7l))
+++ $(CC) $(ARCH_CFLAGS) -c sse2neon.h
+++endif
+++ $(EXEC_WRAPPER) $^
+++
+++indent:
+++ @echo "Formatting files with clang-format.."
+++ @if ! hash clang-format-12; then echo "clang-format-12 is required to indent"; fi
+++ clang-format-12 -i sse2neon.h tests/*.cpp tests/*.h
+++
+++.PHONY: clean check format
+++clean:
+++ $(RM) $(OBJS) $(EXEC) $(deps) sse2neon.h.gch
+++
+++-include $(deps)
--- /dev/null
--- /dev/null
--- /dev/null
+++# sse2neon
+++
+++
+++A C/C++ header file that converts Intel SSE intrinsics to Arm/Aarch64 NEON intrinsics.
+++
+++## Introduction
+++
+++`sse2neon` is a translator of Intel SSE (Streaming SIMD Extensions) intrinsics
+++to [Arm NEON](https://developer.arm.com/architectures/instruction-sets/simd-isas/neon),
+++shortening the time needed to get an Arm working program that then can be used to
+++extract profiles and to identify hot paths in the code.
+++The header file `sse2neon.h` contains several of the functions provided by Intel
+++intrinsic headers such as `<xmmintrin.h>`, only implemented with NEON-based counterparts
+++to produce the exact semantics of the intrinsics.
+++
+++## Mapping and Coverage
+++
+++Header file | Extension |
+++---|---|
+++`<mmintrin.h>` | MMX |
+++`<xmmintrin.h>` | SSE |
+++`<emmintrin.h>` | SSE2 |
+++`<pmmintrin.h>` | SSE3 |
+++`<tmmintrin.h>` | SSSE3 |
+++`<smmintrin.h>` | SSE4.1 |
+++`<nmmintrin.h>` | SSE4.2 |
+++`<wmmintrin.h>` | AES |
+++
+++`sse2neon` aims to support SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2 and AES extension.
+++
+++In order to deliver NEON-equivalent intrinsics for all SSE intrinsics used widely,
+++please be aware that some SSE intrinsics exist a direct mapping with a concrete
+++NEON-equivalent intrinsic. Others, unfortunately, lack a 1:1 mapping, meaning that
+++their equivalents are built utilizing a number of NEON intrinsics.
+++
+++For example, SSE intrinsic `_mm_loadu_si128` has a direct NEON mapping (`vld1q_s32`),
+++but SSE intrinsic `_mm_maddubs_epi16` has to be implemented with 13+ NEON instructions.
+++
+++### Floating-point compatibility
+++
+++Some conversions require several NEON intrinsics, which may produce inconsistent results
+++compared to their SSE counterparts due to differences in the arithmetic rules of IEEE-754.
+++
+++Taking a possible conversion of `_mm_rsqrt_ps` as example:
+++
+++```c
+++__m128 _mm_rsqrt_ps(__m128 in)
+++{
+++ float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+++
+++ out = vmulq_f32(
+++ out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
+++
+++ return vreinterpretq_m128_f32(out);
+++}
+++```
+++
+++The `_mm_rsqrt_ps` conversion will produce NaN if a source value is `0.0` (first INF for the
+++reciprocal square root of `0.0`, then INF * `0.0` using `vmulq_f32`). In contrast,
+++the SSE counterpart produces INF if a source value is `0.0`.
+++As a result, additional treatments should be applied to ensure consistency between the conversion and its SSE counterpart.
+++
+++## Usage
+++
+++- Put the file `sse2neon.h` in to your source code directory.
+++
+++- Locate the following SSE header files included in the code:
+++```C
+++#include <xmmintrin.h>
+++#include <emmintrin.h>
+++```
+++ {p,t,s,n,w}mmintrin.h could be replaceable as well.
+++
+++- Replace them with:
+++```C
+++#include "sse2neon.h"
+++```
+++
+++- Explicitly specify platform-specific options to gcc/clang compilers.
+++ * On ARMv8-A 64-bit targets, you should specify the following compiler option: (Remove `crypto` and/or `crc` if your architecture does not support cryptographic and/or CRC32 extensions)
+++ ```shell
+++ -march=armv8-a+fp+simd+crypto+crc
+++ ```
+++ * On ARMv8-A 32-bit targets, you should specify the following compiler option:
+++ ```shell
+++ -mfpu=neon-fp-armv8
+++ ```
+++ * On ARMv7-A targets, you need to append the following compiler option:
+++ ```shell
+++ -mfpu=neon
+++ ```
+++
+++## Compile-time Configurations
+++
+++Though floating-point operations in NEON use the IEEE single-precision format, NEON does not fully comply to the IEEE standard when inputs or results are denormal or NaN values for minimizing power consumption as well as maximizing performance.
+++Considering the balance between correctness and performance, `sse2neon` recognizes the following compile-time configurations:
+++* `SSE2NEON_PRECISE_MINMAX`: Enable precise implementation of `_mm_min_{ps,pd}` and `_mm_max_{ps,pd}`. If you need consistent results such as handling with NaN values, enable it.
+++* `SSE2NEON_PRECISE_DIV` (deprecated): Enable precise implementation of `_mm_rcp_ps` and `_mm_div_ps` by additional Netwon-Raphson iteration for accuracy.
+++* `SSE2NEON_PRECISE_SQRT` (deprecated): Enable precise implementation of `_mm_sqrt_ps` and `_mm_rsqrt_ps` by additional Netwon-Raphson iteration for accuracy.
+++* `SSE2NEON_PRECISE_DP`: Enable precise implementation of `_mm_dp_pd`. When the conditional bit is not set, the corresponding multiplication would not be executed.
+++
+++The above are turned off by default, and you should define the corresponding macro(s) as `1` before including `sse2neon.h` if you need the precise implementations.
+++
+++## Run Built-in Test Suite
+++
+++`sse2neon` provides a unified interface for developing test cases. These test
+++cases are located in `tests` directory, and the input data is specified at
+++runtime. Use the following commands to perform test cases:
+++```shell
+++$ make check
+++```
+++
+++For running check with enabling features, you can use assign the features with `FEATURE` command.
+++If `none` is assigned, then the command will be the same as simply calling `make check`.
+++The following command enable `crypto` and `crc` features in the tests.
+++```
+++$ make FEATURE=crypto+crc check
+++```
+++
+++For running check on certain CPU, setting the mode of FPU, etc.,
+++you can also assign the desired options with `ARCH_CFLAGS` command.
+++If `none` is assigned, the command acts as same as calling `make check`.
+++For instance, to run tests on Cortex-A53 with enabling ARM VFPv4 extension and NEON:
+++```
+++$ make ARCH_CFLAGS="-mcpu=cortex-a53 -mfpu=neon-vfpv4" check
+++```
+++
+++### Running tests on hosts other than ARM platform
+++
+++For running tests on hosts other than ARM platform,
+++you can specify GNU toolchain for cross compilation with `CROSS_COMPILE` command.
+++[QEMU](https://www.qemu.org/) should be installed in advance.
+++
+++For ARMv8-A running in 64-bit mode type:
+++```shell
+++$ make CROSS_COMPILE=aarch64-linux-gnu- check # ARMv8-A
+++```
+++
+++For ARMv7-A type:
+++```shell
+++$ make CROSS_COMPILE=arm-linux-gnueabihf- check # ARMv7-A
+++```
+++
+++For ARMv8-A running in 32-bit mode (A32 instruction set) type:
+++```shell
+++$ make \
+++ CROSS_COMPILE=arm-linux-gnueabihf- \
+++ ARCH_CFLAGS="-mcpu=cortex-a32 -mfpu=neon-fp-armv8" \
+++ check
+++```
+++
+++Check the details via [Test Suite for SSE2NEON](tests/README.md).
+++
+++## Adoptions
+++Here is a partial list of open source projects that have adopted `sse2neon` for Arm/Aarch64 support.
+++* [Aaru Data Preservation Suite](https://www.aaru.app/) is a fully-featured software package to preserve all storage media from the very old to the cutting edge, as well as to give detailed information about any supported image file (whether from Aaru or not) and to extract the files from those images.
+++* [aether-game-utils](https://github.com/johnhues/aether-game-utils) is a collection of cross platform utilities for quickly creating small game prototypes in C++.
+++* [ALE](https://github.com/sc932/ALE), aka Assembly Likelihood Evaluation, is a tool for evaluating accuracy of assemblies without the need of a reference genome.
+++* [AnchorWave](https://github.com/baoxingsong/AnchorWave), Anchored Wavefront Alignment, identifies collinear regions via conserved anchors (full-length CDS and full-length exon have been implemented currently) and breaks collinear regions into shorter fragments, i.e., anchor and inter-anchor intervals.
+++* [ATAK-CIV](https://github.com/deptofdefense/AndroidTacticalAssaultKit-CIV), Android Tactical Assault Kit for Civilian Use, is the official geospatial-temporal and situational awareness tool used by the US Government.
+++* [Apache Doris](https://doris.apache.org/) is a Massively Parallel Processing (MPP) based interactive SQL data warehousing for reporting and analysis.
+++* [Apache Impala](https://impala.apache.org/) is a lightning-fast, distributed SQL queries for petabytes of data stored in Apache Hadoop clusters.
+++* [Apache Kudu](https://kudu.apache.org/) completes Hadoop's storage layer to enable fast analytics on fast data.
+++* [ares](https://github.com/ares-emulator/ares) is a cross-platform, open source, multi-system emulator, focusing on accuracy and preservation.
+++* [ART](https://github.com/dinosaure/art) is an implementation in OCaml of [Adaptive Radix Tree](https://db.in.tum.de/~leis/papers/ART.pdf) (ART).
+++* [Async](https://github.com/romange/async) is a set of c++ primitives that allows efficient and rapid development in C++17 on GNU/Linux systems.
+++* [avec](https://github.com/unevens/avec) is a little library for using SIMD instructions on both x86 and Arm.
+++* [BEAGLE](https://github.com/beagle-dev/beagle-lib) is a high-performance library that can perform the core calculations at the heart of most Bayesian and Maximum Likelihood phylogenetics packages.
+++* [BitMagic](https://github.com/tlk00/BitMagic) implements compressed bit-vectors and containers (vectors) based on ideas of bit-slicing transform and Rank-Select compression, offering sets of method to architect your applications to use HPC techniques to save memory (thus be able to fit more data in one compute unit) and improve storage and traffic patterns when storing data vectors and models in files or object stores.
+++* [bipartite\_motif\_finder](https://github.com/soedinglab/bipartite_motif_finder) as known as BMF (Bipartite Motif Finder) is an open source tool for finding co-occurences of sequence motifs in genomic sequences.
+++* [Blender](https://www.blender.org/) is the free and open source 3D creation suite, supporting the entirety of the 3D pipeline.
+++* [Boo](https://github.com/AxioDL/boo) is a cross-platform windowing and event manager similar to SDL or SFML, with additional 3D rendering functionality.
+++* [Brickworks](https://github.com/sdangelo/brickworks) is a music DSP toolkit that supplies with the fundamental building blocks for creating and enhancing audio engines on any platform.
+++* [CARTA](https://github.com/CARTAvis/carta-backend) is a new visualization tool designed for viewing radio astronomy images in CASA, FITS, MIRIAD, and HDF5 formats (using the IDIA custom schema for HDF5).
+++* [Catcoon](https://github.com/i-evi/catcoon) is a [feedforward neural network](https://en.wikipedia.org/wiki/Feedforward_neural_network) implementation in C.
+++* [compute-runtime](https://github.com/intel/compute-runtime), the Intel Graphics Compute Runtime for oneAPI Level Zero and OpenCL Driver, provides compute API support (Level Zero, OpenCL) for Intel graphics hardware architectures (HD Graphics, Xe).
+++* [contour](https://github.com/contour-terminal/contour) is a modern and actually fast virtual terminal emulator.
+++* [Cog](https://github.com/losnoco/Cog) is a free and open source audio player for macOS.
+++* [dab-cmdline](https://github.com/JvanKatwijk/dab-cmdline) provides entries for the functionality to handle Digital audio broadcasting (DAB)/DAB+ through some simple calls.
+++* [DISTRHO](https://distrho.sourceforge.io/) is an open-source project for Cross-Platform Audio Plugins.
+++* [Dragonfly](https://github.com/dragonflydb/dragonfly) is a modern in-memory datastore, fully compatible with Redis and Memcached APIs.
+++* [EDGE](https://github.com/3dfxdev/EDGE) is an advanced OpenGL source port spawned from the DOOM engine, with focus on easy development and expansion for modders and end-users.
+++* [Embree](https://github.com/embree/embree) is a collection of high-performance ray tracing kernels. Its target users are graphics application engineers who want to improve the performance of their photo-realistic rendering application by leveraging Embree's performance-optimized ray tracing kernels.
+++* [emp-tool](https://github.com/emp-toolkit/emp-tool) aims to provide a benchmark for secure computation and allowing other researchers to experiment and extend.
+++* [Exudyn](https://github.com/jgerstmayr/EXUDYN) is a C++ based Python library for efficient simulation of flexible multibody dynamics systems.
+++* [FoundationDB](https://www.foundationdb.org) is a distributed database designed to handle large volumes of structured data across clusters of commodity servers.
+++* [fsrc](https://github.com/elsamuko/fsrc) is capable of searching large codebases for text snippets.
+++* [gmmlib](https://github.com/intel/gmmlib) is the Intel Graphics Memory Management Library that provides device specific and buffer management for the Intel Graphics Compute Runtime for OpenCL and the Intel Media Driver for VAAPI.
+++* [HISE](https://github.com/christophhart/HISE) is a cross-platform open source audio application for building virtual instruments, emphasizing on sampling, but includes some basic synthesis features for making hybrid instruments as well as audio effects.
+++* [iqtree2](https://github.com/iqtree/iqtree2) is an efficient and versatile stochastic implementation to infer phylogenetic trees by maximum likelihood.
+++* [indelPost](https://github.com/stjude/indelPost) is a Python library for indel processing via realignment and read-based phasing to resolve alignment ambiguities.
+++* [IResearch](https://github.com/iresearch-toolkit/iresearch) is a cross-platform, high-performance document oriented search engine library written entirely in C++ with the focus on a pluggability of different ranking/similarity models.
+++* [Kraken](https://github.com/Wabi-Studios/Kraken) is a 3D animation platform redefining animation composition, collaborative workflows, simulation engines, skeletal rigging systems, and look development from storyboard to final render.
+++* [kram](https://github.com/alecazam/kram) is a wrapper to several popular encoders to and from PNG/[KTX](https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/) files with [LDR/HDR and BC/ASTC/ETC2](https://developer.arm.com/solutions/graphics-and-gaming/developer-guides/learn-the-basics/adaptive-scalable-texture-compression/single-page).
+++* [Krita](https://invent.kde.org/graphics/krita) is a cross-platform application that offers an end-to-end solution for creating digital art files from scratch built on the KDE and Qt frameworks.
+++* [libCML](https://github.com/belosthomas/libCML) is a SLAM library and scientific tool, which include a novel fast thread-safe graph map implementation.
+++* [libhdfs3](https://github.com/ClickHouse/libhdfs3) is implemented based on native Hadoop RPC protocol and Hadoop Distributed File System (HDFS), a highly fault-tolerant distributed fs, data transfer protocol.
+++* [libpostal](https://github.com/openvenues/libpostal) is a C library for parsing/normalizing street addresses around the world using statistical NLP and open data.
+++* [libscapi](https://github.com/cryptobiu/libscapi) stands for the "Secure Computation API", providing reliable, efficient, and highly flexible cryptographic infrastructure.
+++* [libstreamvbyte](https://github.com/wst24365888/libstreamvbyte) is a C++ implementation of [StreamVByte](https://arxiv.org/abs/1709.08990).
+++* [libmatoya](https://github.com/matoya/libmatoya) is a cross-platform application development library, providing various features such as common cryptography tasks.
+++* [Loosejaw](https://github.com/TheHolyDiver/Loosejaw) provides deep hybrid CPU/GPU digital signal processing.
+++* [Madronalib](https://github.com/madronalabs/madronalib) enables efficient audio DSP on SIMD processors with readable and brief C++ code.
+++* [minimap2](https://github.com/lh3/minimap2) is a versatile sequence alignment program that aligns DNA or mRNA sequences against a large reference database.
+++* [mixed-fem](https://github.com/tytrusty/mixed-fem) is an open source reference implementation of Mixed Variational Finite Elements for Implicit Simulation of Deformables.
+++* [MMseqs2](https://github.com/soedinglab/MMseqs2) (Many-against-Many sequence searching) is a software suite to search and cluster huge protein and nucleotide sequence sets.
+++* [MRIcroGL](https://github.com/rordenlab/MRIcroGL) is a cross-platform tool for viewing NIfTI, DICOM, MGH, MHD, NRRD, AFNI format medical images.
+++* [N2](https://github.com/oddconcepts/n2o) is an approximate nearest neighborhoods algorithm library written in C++, providing a much faster search speed than other implementations when modeling large dataset.
+++* [nanors](https://github.com/sleepybishop/nanors) is a tiny, performant implementation of [Reed-Solomon codes](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction), capable of reaching multi-gigabit speeds on a single core.
+++* [niimath](https://github.com/rordenlab/niimath) is a general image calculator with superior performance.
+++* [NVIDIA GameWorks](https://developer.nvidia.com/gameworks-source-github) has been already used in a lot of games. These repositories are public on GitHub.
+++* [Nx Meta Platform Open Source Components](https://github.com/networkoptix/nx_open) are used to build all Powered-by-Nx products including Nx Witness Video Management System (VMS).
+++* [ofxNDI](https://github.com/leadedge/ofxNDI) is an [openFrameworks](https://openframeworks.cc/) addon to allow sending and receiving images over a network using the [NewTek](https://en.wikipedia.org/wiki/NewTek) Network Device Protocol.
+++* [OGRE](https://github.com/OGRECave/ogre) is a scene-oriented, flexible 3D engine written in C++ designed to make it easier and more intuitive for developers to produce games and demos utilising 3D hardware.
+++* [Olive](https://github.com/olive-editor/olive) is a free non-linear video editor for Windows, macOS, and Linux.
+++* [OpenColorIO](https://github.com/AcademySoftwareFoundation/OpenColorIO) a complete color management solution geared towards motion picture production with an emphasis on visual effects and computer animation.
+++* [OpenXRay](https://github.com/OpenXRay/xray-16) is an improved version of the X-Ray engine, used in world famous S.T.A.L.K.E.R. game series by GSC Game World.
+++* [parallel-n64](https://github.com/libretro/parallel-n64) is an optimized/rewritten Nintendo 64 emulator made specifically for [Libretro](https://www.libretro.com/).
+++* [Pathfinder C++](https://github.com/floppyhammer/pathfinder-cpp) is a fast, practical, GPU-based rasterizer for fonts and vector graphics using Vulkan and C++.
+++* [PFFFT](https://github.com/marton78/pffft) does 1D Fast Fourier Transforms, of single precision real and complex vectors.
+++* [pixaccess](https://github.com/oliverue/pixaccess) provides the abstractions for integer and float bitmaps, pixels, and aliased (nearest neighbor) and anti-aliased (bi-linearly interpolated) pixel access.
+++* [PlutoSDR Firmware](https://github.com/seanstone/plutosdr-fw) is the customized firmware for the [PlutoSDR](https://wiki.analog.com/university/tools/pluto) that can be used to introduce fundamentals of Software Defined Radio (SDR) or Radio Frequency (RF) or Communications as advanced topics in electrical engineering in a self or instructor lead setting.
+++* [PowerToys](https://github.com/microsoft/PowerToys) is a set of utilities for power users to tune and streamline their Windows experience for greater productivity.
+++* [Pygame](https://www.pygame.org) is cross-platform and designed to make it easy to write multimedia software, such as games, in Python.
+++* [R:RandomFieldsUtils](https://cran.r-project.org/web/packages/RandomFieldsUtils) provides various utilities might be used in spatial statistics and elsewhere. (CRAN)
+++* [ReHLDS](https://github.com/gennadykataev/rehlds) is fully compatible with latest Half-Life Dedicated Server (HLDS) with a lot of defects and (potential) bugs fixed.
+++* [rkcommon](https://github.com/ospray/rkcommon) represents a common set of C++ infrastructure and CMake utilities used by various components of [Intel oneAPI Rendering Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/rendering-toolkit.html).
+++* [RPCS3](https://github.com/RPCS3/rpcs3) is the world's first free and open-source PlayStation 3 emulator/debugger, written in C++.
+++* [simd\_utils](https://github.com/JishinMaster/simd_utils) is a header-only library implementing common mathematical functions using SIMD intrinsics.
+++* [Sire](https://github.com/OpenBioSim/sire) is a molecular modelling framework that provides extensive functionality to manipulate representations of biomolecular systems.
+++* [SMhasher](https://github.com/rurban/smhasher) provides comprehensive Hash function quality and speed tests.
+++* [SNN++](https://github.com/ianmkim/snnpp) implements a single layer non linear Spiking Neural Network for images classification and generation.
+++* [Spack](https://github.com/spack/spack) is a multi-platform package manager that builds and installs multiple versions and configurations of software.
+++* [SRA](https://github.com/ncbi/sra-tools) is a collection of tools and libraries for using data in the [INSDC Sequence Read Archives](https://www.ncbi.nlm.nih.gov/sra/docs/).
+++* [srsLTE](https://github.com/srsLTE/srsLTE) is an open source SDR LTE software suite.
+++* [SSW](https://github.com/mengyao/Complete-Striped-Smith-Waterman-Library) is a fast implementation of the [Smith-Waterman algorithm](https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm), which uses the SIMD instructions to parallelize the algorithm at the instruction level.
+++* [Surge](https://github.com/surge-synthesizer/surge) is an open source digital synthesizer.
+++* [The Forge](https://github.com/ConfettiFX/The-Forge) is a cross-platform rendering framework, providing building blocks to write your own game engine.
+++* [Typesense](https://github.com/typesense/typesense) is a fast, typo-tolerant search engine for building delightful search experiences.
+++* [Vcpkg](https://github.com/microsoft/vcpkg) is a C++ Library Manager for Windows, Linux, and macOS.
+++* [VelocyPack](https://github.com/arangodb/velocypack) is a fast and compact format for serialization and storage.
+++* [VOLK](https://github.com/gnuradio/volk), Vector-Optimized Library of Kernel, is a sub-project of [GNU Radio](https://www.gnuradio.org/).
+++* [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) is a machine learning system which pushes the frontier of machine learning with techniques such as online, hashing, allreduce, reductions, learning2search, active, and interactive learning.
+++* [Winter](https://github.com/rosenthj/Winter) is the top rated chess engine from Switzerland and has competed at top invite only computer chess events.
+++* [XEVE](https://github.com/mpeg5/xeve) (eXtra-fast Essential Video Encoder) is an open sourced and fast MPEG-5 EVC encoder.
+++* [XMRig](https://github.com/xmrig/xmrig) is an open source CPU miner for [Monero](https://web.getmonero.org/) cryptocurrency.
+++* [xsimd](https://github.com/xtensor-stack/xsimd) provides a unified means for using SIMD intrinsics and parallelized, optimized mathematical functions.
+++* [YACL](https://github.com/secretflow/yasl) is a C++ library contains modules and utilities which [SecretFlow](https://github.com/secretflow) code depends on.
+++
+++## Related Projects
+++* [SIMDe](https://github.com/simd-everywhere/simde): fast and portable implementations of SIMD
+++ intrinsics on hardware which doesn't natively support them, such as calling SSE functions on ARM.
+++* [CatBoost's sse2neon](https://github.com/catboost/catboost/blob/master/library/cpp/sse/sse2neon.h)
+++* [ARM\_NEON\_2\_x86\_SSE](https://github.com/intel/ARM_NEON_2_x86_SSE)
+++* [AvxToNeon](https://github.com/kunpengcompute/AvxToNeon)
+++* [sse2rvv](https://github.com/FeddrickAquino/sse2rvv): C header file that converts Intel SSE intrinsics to RISC-V Vector intrinsic.
+++* [sse2msa](https://github.com/i-evi/sse2msa): A C/C++ header file that converts Intel SSE intrinsics to MIPS/MIPS64 MSA intrinsics.
+++* [sse2zig](https://github.com/aqrit/sse2zig): Intel SSE intrinsics mapped to [Zig](https://ziglang.org/) vector extensions.
+++* [POWER/PowerPC support for GCC](https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000) contains a series of headers simplifying porting x86\_64 code that makes explicit use of Intel intrinsics to powerpc64le (pure little-endian mode that has been introduced with the [POWER8](https://en.wikipedia.org/wiki/POWER8)).
+++ - implementation: [xmmintrin.h](https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/xmmintrin.h), [emmintrin.h](https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/emmintrin.h), [pmmintrin.h](https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/pmmintrin.h), [tmmintrin.h](https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/tmmintrin.h), [smmintrin.h](https://github.com/gcc-mirror/gcc/blob/master/gcc/config/rs6000/smmintrin.h)
+++
+++## Reference
+++* [Intel Intrinsics Guide](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html)
+++* [Microsoft: x86 intrinsics list](https://learn.microsoft.com/en-us/cpp/intrinsics/x86-intrinsics-list)
+++* [Arm Neon Intrinsics Reference](https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/intrinsics)
+++* [Neon Programmer's Guide for Armv8-A](https://developer.arm.com/architectures/instruction-sets/simd-isas/neon/neon-programmers-guide-for-armv8-a)
+++* [NEON Programmer's Guide](https://static.docs.arm.com/den0018/a/DEN0018A_neon_programmers_guide_en.pdf)
+++* [qemu/target/i386/ops\_sse.h](https://github.com/qemu/qemu/blob/master/target/i386/ops_sse.h): Comprehensive SSE instruction emulation in C. Ideal for semantic checks.
+++* [Porting Takua Renderer to 64-bit ARM- Part 1](https://blog.yiningkarlli.com/2021/05/porting-takua-to-arm-pt1.html)
+++* [Porting Takua Renderer to 64-bit ARM- Part 2](https://blog.yiningkarlli.com/2021/07/porting-takua-to-arm-pt2.html)
+++* [Comparing SIMD on x86-64 and arm64](https://blog.yiningkarlli.com/2021/09/neon-vs-sse.html)
+++* [Port with SSE2Neon and SIMDe](https://developer.arm.com/documentation/102581/0200/Port-with-SSE2Neon-and-SIMDe)
+++* [Genomics: Optimizing the BWA aligner for Arm Servers](https://community.arm.com/arm-community-blogs/b/high-performance-computing-blog/posts/optimizing-genomics-and-the-bwa-aligner-for-arm-servers)
+++* [Bit twiddling with Arm Neon: beating SSE movemasks, counting bits and more](https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon)
+++* [C/C++ on Graviton](https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md)
+++* [Tune graphics-intensive games for Apple silicon](https://developer.apple.com/games/planning/)
+++* [Benchmarking and Testing of Qualcomm Snapdragon System-on-Chip for JPL Space Applications and Missions](https://ieeexplore.ieee.org/abstract/document/9843518)
+++
+++## Licensing
+++
+++`sse2neon` is freely redistributable under the MIT License.
--- /dev/null
--- /dev/null
--- /dev/null
+++#ifndef SSE2NEON_H
+++#define SSE2NEON_H
+++
+++// This header file provides a simple API translation layer
+++// between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
+++//
+++// Contributors to this work are:
+++// John W. Ratcliff <jratcliffscarab@gmail.com>
+++// Brandon Rowlett <browlett@nvidia.com>
+++// Ken Fast <kfast@gdeb.com>
+++// Eric van Beurden <evanbeurden@nvidia.com>
+++// Alexander Potylitsin <apotylitsin@nvidia.com>
+++// Hasindu Gamaarachchi <hasindu2008@gmail.com>
+++// Jim Huang <jserv@ccns.ncku.edu.tw>
+++// Mark Cheng <marktwtn@gmail.com>
+++// Malcolm James MacLeod <malcolm@gulden.com>
+++// Devin Hussey (easyaspi314) <husseydevin@gmail.com>
+++// Sebastian Pop <spop@amazon.com>
+++// Developer Ecosystem Engineering <DeveloperEcosystemEngineering@apple.com>
+++// Danila Kutenin <danilak@google.com>
+++// François Turban (JishinMaster) <francois.turban@gmail.com>
+++// Pei-Hsuan Hung <afcidk@gmail.com>
+++// Yang-Hao Yuan <yuanyanghau@gmail.com>
+++// Syoyo Fujita <syoyo@lighttransport.com>
+++// Brecht Van Lommel <brecht@blender.org>
+++// Jonathan Hue <jhue@adobe.com>
+++// Cuda Chen <clh960524@gmail.com>
+++// Aymen Qader <aymen.qader@arm.com>
+++// Anthony Roberts <anthony.roberts@linaro.org>
+++
+++/*
+++ * sse2neon is freely redistributable under the MIT License.
+++ *
+++ * Permission is hereby granted, free of charge, to any person obtaining a copy
+++ * of this software and associated documentation files (the "Software"), to deal
+++ * in the Software without restriction, including without limitation the rights
+++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+++ * copies of the Software, and to permit persons to whom the Software is
+++ * furnished to do so, subject to the following conditions:
+++ *
+++ * The above copyright notice and this permission notice shall be included in
+++ * all copies or substantial portions of the Software.
+++ *
+++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+++ * SOFTWARE.
+++ */
+++
+++/* Tunable configurations */
+++
+++/* Enable precise implementation of math operations
+++ * This would slow down the computation a bit, but gives consistent result with
+++ * x86 SSE. (e.g. would solve a hole or NaN pixel in the rendering result)
+++ */
+++/* _mm_min|max_ps|ss|pd|sd */
+++#ifndef SSE2NEON_PRECISE_MINMAX
+++#define SSE2NEON_PRECISE_MINMAX (0)
+++#endif
+++/* _mm_rcp_ps and _mm_div_ps */
+++#ifndef SSE2NEON_PRECISE_DIV
+++#define SSE2NEON_PRECISE_DIV (0)
+++#endif
+++/* _mm_sqrt_ps and _mm_rsqrt_ps */
+++#ifndef SSE2NEON_PRECISE_SQRT
+++#define SSE2NEON_PRECISE_SQRT (0)
+++#endif
+++/* _mm_dp_pd */
+++#ifndef SSE2NEON_PRECISE_DP
+++#define SSE2NEON_PRECISE_DP (0)
+++#endif
+++
+++/* Enable inclusion of windows.h on MSVC platforms
+++ * This makes _mm_clflush functional on windows, as there is no builtin.
+++ */
+++#ifndef SSE2NEON_INCLUDE_WINDOWS_H
+++#define SSE2NEON_INCLUDE_WINDOWS_H (0)
+++#endif
+++
+++/* compiler specific definitions */
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma push_macro("FORCE_INLINE")
+++#pragma push_macro("ALIGN_STRUCT")
+++#define FORCE_INLINE static inline __attribute__((always_inline))
+++#define ALIGN_STRUCT(x) __attribute__((aligned(x)))
+++#define _sse2neon_likely(x) __builtin_expect(!!(x), 1)
+++#define _sse2neon_unlikely(x) __builtin_expect(!!(x), 0)
+++#elif defined(_MSC_VER)
+++#if _MSVC_TRADITIONAL
+++#error Using the traditional MSVC preprocessor is not supported! Use /Zc:preprocessor instead.
+++#endif
+++#ifndef FORCE_INLINE
+++#define FORCE_INLINE static inline
+++#endif
+++#ifndef ALIGN_STRUCT
+++#define ALIGN_STRUCT(x) __declspec(align(x))
+++#endif
+++#define _sse2neon_likely(x) (x)
+++#define _sse2neon_unlikely(x) (x)
+++#else
+++#pragma message("Macro name collisions may happen with unsupported compilers.")
+++#endif
+++
+++#if defined(__GNUC__) && __GNUC__ < 10
+++#warning "GCC versions earlier than 10 are not supported."
+++#endif
+++
+++/* C language does not allow initializing a variable with a function call. */
+++#ifdef __cplusplus
+++#define _sse2neon_const static const
+++#else
+++#define _sse2neon_const const
+++#endif
+++
+++#include <stdint.h>
+++#include <stdlib.h>
+++
+++#if defined(_WIN32)
+++/* Definitions for _mm_{malloc,free} are provided by <malloc.h>
+++ * from both MinGW-w64 and MSVC.
+++ */
+++#define SSE2NEON_ALLOC_DEFINED
+++#endif
+++
+++/* If using MSVC */
+++#ifdef _MSC_VER
+++#include <intrin.h>
+++#if SSE2NEON_INCLUDE_WINDOWS_H
+++#include <processthreadsapi.h>
+++#include <windows.h>
+++#endif
+++
+++#if !defined(__cplusplus)
+++#error SSE2NEON only supports C++ compilation with this compiler
+++#endif
+++
+++#ifdef SSE2NEON_ALLOC_DEFINED
+++#include <malloc.h>
+++#endif
+++
+++#if (defined(_M_AMD64) || defined(__x86_64__)) || \
+++ (defined(_M_ARM64) || defined(__arm64__))
+++#define SSE2NEON_HAS_BITSCAN64
+++#endif
+++#endif
+++
+++#if defined(__GNUC__) || defined(__clang__)
+++#define _sse2neon_define0(type, s, body) \
+++ __extension__({ \
+++ type _a = (s); \
+++ body \
+++ })
+++#define _sse2neon_define1(type, s, body) \
+++ __extension__({ \
+++ type _a = (s); \
+++ body \
+++ })
+++#define _sse2neon_define2(type, a, b, body) \
+++ __extension__({ \
+++ type _a = (a), _b = (b); \
+++ body \
+++ })
+++#define _sse2neon_return(ret) (ret)
+++#else
+++#define _sse2neon_define0(type, a, body) [=](type _a) { body }(a)
+++#define _sse2neon_define1(type, a, body) [](type _a) { body }(a)
+++#define _sse2neon_define2(type, a, b, body) \
+++ [](type _a, type _b) { body }((a), (b))
+++#define _sse2neon_return(ret) return ret
+++#endif
+++
+++#define _sse2neon_init(...) \
+++ { \
+++ __VA_ARGS__ \
+++ }
+++
+++/* Compiler barrier */
+++#if defined(_MSC_VER)
+++#define SSE2NEON_BARRIER() _ReadWriteBarrier()
+++#else
+++#define SSE2NEON_BARRIER() \
+++ do { \
+++ __asm__ __volatile__("" ::: "memory"); \
+++ (void) 0; \
+++ } while (0)
+++#endif
+++
+++/* Memory barriers
+++ * __atomic_thread_fence does not include a compiler barrier; instead,
+++ * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
+++ * semantics.
+++ */
+++#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+++#include <stdatomic.h>
+++#endif
+++
+++FORCE_INLINE void _sse2neon_smp_mb(void)
+++{
+++ SSE2NEON_BARRIER();
+++#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
+++ !defined(__STDC_NO_ATOMICS__)
+++ atomic_thread_fence(memory_order_seq_cst);
+++#elif defined(__GNUC__) || defined(__clang__)
+++ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+++#else /* MSVC */
+++ __dmb(_ARM64_BARRIER_ISH);
+++#endif
+++}
+++
+++/* Architecture-specific build options */
+++/* FIXME: #pragma GCC push_options is only available on GCC */
+++#if defined(__GNUC__)
+++#if defined(__arm__) && __ARM_ARCH == 7
+++/* According to ARM C Language Extensions Architecture specification,
+++ * __ARM_NEON is defined to a value indicating the Advanced SIMD (NEON)
+++ * architecture supported.
+++ */
+++#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
+++#error "You must enable NEON instructions (e.g. -mfpu=neon) to use SSE2NEON."
+++#endif
+++#if !defined(__clang__)
+++#pragma GCC push_options
+++#pragma GCC target("fpu=neon")
+++#endif
+++#elif defined(__aarch64__) || defined(_M_ARM64)
+++#if !defined(__clang__) && !defined(_MSC_VER)
+++#pragma GCC push_options
+++#pragma GCC target("+simd")
+++#endif
+++#elif __ARM_ARCH == 8
+++#if !defined(__ARM_NEON) || !defined(__ARM_NEON__)
+++#error \
+++ "You must enable NEON instructions (e.g. -mfpu=neon-fp-armv8) to use SSE2NEON."
+++#endif
+++#if !defined(__clang__) && !defined(_MSC_VER)
+++#pragma GCC push_options
+++#endif
+++#else
+++#error "Unsupported target. Must be either ARMv7-A+NEON or ARMv8-A."
+++#endif
+++#endif
+++
+++#include <arm_neon.h>
+++#if (!defined(__aarch64__) && !defined(_M_ARM64)) && (__ARM_ARCH == 8)
+++#if defined __has_include && __has_include(<arm_acle.h>)
+++#include <arm_acle.h>
+++#endif
+++#endif
+++
+++/* Apple Silicon cache lines are double of what is commonly used by Intel, AMD
+++ * and other Arm microarchitectures use.
+++ * From sysctl -a on Apple M1:
+++ * hw.cachelinesize: 128
+++ */
+++#if defined(__APPLE__) && (defined(__aarch64__) || defined(__arm64__))
+++#define SSE2NEON_CACHELINE_SIZE 128
+++#else
+++#define SSE2NEON_CACHELINE_SIZE 64
+++#endif
+++
+++/* Rounding functions require either Aarch64 instructions or libm fallback */
+++#if !defined(__aarch64__) && !defined(_M_ARM64)
+++#include <math.h>
+++#endif
+++
+++/* On ARMv7, some registers, such as PMUSERENR and PMCCNTR, are read-only
+++ * or even not accessible in user mode.
+++ * To write or access to these registers in user mode,
+++ * we have to perform syscall instead.
+++ */
+++#if (!defined(__aarch64__) && !defined(_M_ARM64))
+++#include <sys/time.h>
+++#endif
+++
+++/* "__has_builtin" can be used to query support for built-in functions
+++ * provided by gcc/clang and other compilers that support it.
+++ */
+++#ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
+++/* Compatibility with gcc <= 9 */
+++#if defined(__GNUC__) && (__GNUC__ <= 9)
+++#define __has_builtin(x) HAS##x
+++#define HAS__builtin_popcount 1
+++#define HAS__builtin_popcountll 1
+++
+++// __builtin_shuffle introduced in GCC 4.7.0
+++#if (__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7))
+++#define HAS__builtin_shuffle 1
+++#else
+++#define HAS__builtin_shuffle 0
+++#endif
+++
+++#define HAS__builtin_shufflevector 0
+++#define HAS__builtin_nontemporal_store 0
+++#else
+++#define __has_builtin(x) 0
+++#endif
+++#endif
+++
+++/**
+++ * MACRO for shuffle parameter for _mm_shuffle_ps().
+++ * Argument fp3 is a digit[0123] that represents the fp from argument "b"
+++ * of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
+++ * for fp2 in result. fp1 is a digit[0123] that represents the fp from
+++ * argument "a" of mm_shuffle_ps that will be places in fp1 of result.
+++ * fp0 is the same for fp0 of result.
+++ */
+++#define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
+++ (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
+++
+++#if __has_builtin(__builtin_shufflevector)
+++#define _sse2neon_shuffle(type, a, b, ...) \
+++ __builtin_shufflevector(a, b, __VA_ARGS__)
+++#elif __has_builtin(__builtin_shuffle)
+++#define _sse2neon_shuffle(type, a, b, ...) \
+++ __extension__({ \
+++ type tmp = {__VA_ARGS__}; \
+++ __builtin_shuffle(a, b, tmp); \
+++ })
+++#endif
+++
+++#ifdef _sse2neon_shuffle
+++#define vshuffle_s16(a, b, ...) _sse2neon_shuffle(int16x4_t, a, b, __VA_ARGS__)
+++#define vshuffleq_s16(a, b, ...) _sse2neon_shuffle(int16x8_t, a, b, __VA_ARGS__)
+++#define vshuffle_s32(a, b, ...) _sse2neon_shuffle(int32x2_t, a, b, __VA_ARGS__)
+++#define vshuffleq_s32(a, b, ...) _sse2neon_shuffle(int32x4_t, a, b, __VA_ARGS__)
+++#define vshuffle_s64(a, b, ...) _sse2neon_shuffle(int64x1_t, a, b, __VA_ARGS__)
+++#define vshuffleq_s64(a, b, ...) _sse2neon_shuffle(int64x2_t, a, b, __VA_ARGS__)
+++#endif
+++
+++/* Rounding mode macros. */
+++#define _MM_FROUND_TO_NEAREST_INT 0x00
+++#define _MM_FROUND_TO_NEG_INF 0x01
+++#define _MM_FROUND_TO_POS_INF 0x02
+++#define _MM_FROUND_TO_ZERO 0x03
+++#define _MM_FROUND_CUR_DIRECTION 0x04
+++#define _MM_FROUND_NO_EXC 0x08
+++#define _MM_FROUND_RAISE_EXC 0x00
+++#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
+++#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
+++#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
+++#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
+++#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
+++#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
+++#define _MM_ROUND_NEAREST 0x0000
+++#define _MM_ROUND_DOWN 0x2000
+++#define _MM_ROUND_UP 0x4000
+++#define _MM_ROUND_TOWARD_ZERO 0x6000
+++/* Flush zero mode macros. */
+++#define _MM_FLUSH_ZERO_MASK 0x8000
+++#define _MM_FLUSH_ZERO_ON 0x8000
+++#define _MM_FLUSH_ZERO_OFF 0x0000
+++/* Denormals are zeros mode macros. */
+++#define _MM_DENORMALS_ZERO_MASK 0x0040
+++#define _MM_DENORMALS_ZERO_ON 0x0040
+++#define _MM_DENORMALS_ZERO_OFF 0x0000
+++
+++/* indicate immediate constant argument in a given range */
+++#define __constrange(a, b) const
+++
+++/* A few intrinsics accept traditional data types like ints or floats, but
+++ * most operate on data types that are specific to SSE.
+++ * If a vector type ends in d, it contains doubles, and if it does not have
+++ * a suffix, it contains floats. An integer vector type can contain any type
+++ * of integer, from chars to shorts to unsigned long longs.
+++ */
+++typedef int64x1_t __m64;
+++typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
+++// On ARM 32-bit architecture, the float64x2_t is not supported.
+++// The data type __m128d should be represented in a different way for related
+++// intrinsic conversion.
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
+++#else
+++typedef float32x4_t __m128d;
+++#endif
+++typedef int64x2_t __m128i; /* 128-bit vector containing integers */
+++
+++// __int64 is defined in the Intrinsics Guide which maps to different datatype
+++// in different data model
+++#if !(defined(_WIN32) || defined(_WIN64) || defined(__int64))
+++#if (defined(__x86_64__) || defined(__i386__))
+++#define __int64 long long
+++#else
+++#define __int64 int64_t
+++#endif
+++#endif
+++
+++/* type-safe casting between types */
+++
+++#define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
+++#define vreinterpretq_m128_f32(x) (x)
+++#define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
+++
+++#define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
+++#define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
+++#define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
+++#define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
+++
+++#define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
+++#define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
+++#define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
+++#define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
+++
+++#define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
+++#define vreinterpretq_f32_m128(x) (x)
+++#define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
+++
+++#define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
+++#define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
+++#define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
+++#define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
+++
+++#define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
+++#define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
+++#define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
+++#define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
+++
+++#define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
+++#define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
+++#define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
+++#define vreinterpretq_m128i_s64(x) (x)
+++
+++#define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
+++#define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
+++#define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
+++#define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
+++
+++#define vreinterpretq_f32_m128i(x) vreinterpretq_f32_s64(x)
+++#define vreinterpretq_f64_m128i(x) vreinterpretq_f64_s64(x)
+++
+++#define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
+++#define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
+++#define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
+++#define vreinterpretq_s64_m128i(x) (x)
+++
+++#define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
+++#define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
+++#define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
+++#define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
+++
+++#define vreinterpret_m64_s8(x) vreinterpret_s64_s8(x)
+++#define vreinterpret_m64_s16(x) vreinterpret_s64_s16(x)
+++#define vreinterpret_m64_s32(x) vreinterpret_s64_s32(x)
+++#define vreinterpret_m64_s64(x) (x)
+++
+++#define vreinterpret_m64_u8(x) vreinterpret_s64_u8(x)
+++#define vreinterpret_m64_u16(x) vreinterpret_s64_u16(x)
+++#define vreinterpret_m64_u32(x) vreinterpret_s64_u32(x)
+++#define vreinterpret_m64_u64(x) vreinterpret_s64_u64(x)
+++
+++#define vreinterpret_m64_f16(x) vreinterpret_s64_f16(x)
+++#define vreinterpret_m64_f32(x) vreinterpret_s64_f32(x)
+++#define vreinterpret_m64_f64(x) vreinterpret_s64_f64(x)
+++
+++#define vreinterpret_u8_m64(x) vreinterpret_u8_s64(x)
+++#define vreinterpret_u16_m64(x) vreinterpret_u16_s64(x)
+++#define vreinterpret_u32_m64(x) vreinterpret_u32_s64(x)
+++#define vreinterpret_u64_m64(x) vreinterpret_u64_s64(x)
+++
+++#define vreinterpret_s8_m64(x) vreinterpret_s8_s64(x)
+++#define vreinterpret_s16_m64(x) vreinterpret_s16_s64(x)
+++#define vreinterpret_s32_m64(x) vreinterpret_s32_s64(x)
+++#define vreinterpret_s64_m64(x) (x)
+++
+++#define vreinterpret_f32_m64(x) vreinterpret_f32_s64(x)
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++#define vreinterpretq_m128d_s32(x) vreinterpretq_f64_s32(x)
+++#define vreinterpretq_m128d_s64(x) vreinterpretq_f64_s64(x)
+++
+++#define vreinterpretq_m128d_u64(x) vreinterpretq_f64_u64(x)
+++
+++#define vreinterpretq_m128d_f32(x) vreinterpretq_f64_f32(x)
+++#define vreinterpretq_m128d_f64(x) (x)
+++
+++#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f64(x)
+++
+++#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f64(x)
+++#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f64(x)
+++
+++#define vreinterpretq_f64_m128d(x) (x)
+++#define vreinterpretq_f32_m128d(x) vreinterpretq_f32_f64(x)
+++#else
+++#define vreinterpretq_m128d_s32(x) vreinterpretq_f32_s32(x)
+++#define vreinterpretq_m128d_s64(x) vreinterpretq_f32_s64(x)
+++
+++#define vreinterpretq_m128d_u32(x) vreinterpretq_f32_u32(x)
+++#define vreinterpretq_m128d_u64(x) vreinterpretq_f32_u64(x)
+++
+++#define vreinterpretq_m128d_f32(x) (x)
+++
+++#define vreinterpretq_s64_m128d(x) vreinterpretq_s64_f32(x)
+++
+++#define vreinterpretq_u32_m128d(x) vreinterpretq_u32_f32(x)
+++#define vreinterpretq_u64_m128d(x) vreinterpretq_u64_f32(x)
+++
+++#define vreinterpretq_f32_m128d(x) (x)
+++#endif
+++
+++// A struct is defined in this header file called 'SIMDVec' which can be used
+++// by applications which attempt to access the contents of an __m128 struct
+++// directly. It is important to note that accessing the __m128 struct directly
+++// is bad coding practice by Microsoft: @see:
+++// https://learn.microsoft.com/en-us/cpp/cpp/m128
+++//
+++// However, some legacy source code may try to access the contents of an __m128
+++// struct directly so the developer can use the SIMDVec as an alias for it. Any
+++// casting must be done manually by the developer, as you cannot cast or
+++// otherwise alias the base NEON data type for intrinsic operations.
+++//
+++// union intended to allow direct access to an __m128 variable using the names
+++// that the MSVC compiler provides. This union should really only be used when
+++// trying to access the members of the vector as integer values. GCC/clang
+++// allow native access to the float members through a simple array access
+++// operator (in C since 4.6, in C++ since 4.8).
+++//
+++// Ideally direct accesses to SIMD vectors should not be used since it can cause
+++// a performance hit. If it really is needed however, the original __m128
+++// variable can be aliased with a pointer to this union and used to access
+++// individual components. The use of this union should be hidden behind a macro
+++// that is used throughout the codebase to access the members instead of always
+++// declaring this type of variable.
+++typedef union ALIGN_STRUCT(16) SIMDVec {
+++ float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
+++ int8_t m128_i8[16]; // as signed 8-bit integers.
+++ int16_t m128_i16[8]; // as signed 16-bit integers.
+++ int32_t m128_i32[4]; // as signed 32-bit integers.
+++ int64_t m128_i64[2]; // as signed 64-bit integers.
+++ uint8_t m128_u8[16]; // as unsigned 8-bit integers.
+++ uint16_t m128_u16[8]; // as unsigned 16-bit integers.
+++ uint32_t m128_u32[4]; // as unsigned 32-bit integers.
+++ uint64_t m128_u64[2]; // as unsigned 64-bit integers.
+++} SIMDVec;
+++
+++// casting using SIMDVec
+++#define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *) &x)->m128_u64[n])
+++#define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *) &x)->m128_u32[n])
+++#define vreinterpretq_nth_u8_m128i(x, n) (((SIMDVec *) &x)->m128_u8[n])
+++
+++/* SSE macros */
+++#define _MM_GET_FLUSH_ZERO_MODE _sse2neon_mm_get_flush_zero_mode
+++#define _MM_SET_FLUSH_ZERO_MODE _sse2neon_mm_set_flush_zero_mode
+++#define _MM_GET_DENORMALS_ZERO_MODE _sse2neon_mm_get_denormals_zero_mode
+++#define _MM_SET_DENORMALS_ZERO_MODE _sse2neon_mm_set_denormals_zero_mode
+++
+++// Function declaration
+++// SSE
+++FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(void);
+++FORCE_INLINE __m128 _mm_move_ss(__m128, __m128);
+++FORCE_INLINE __m128 _mm_or_ps(__m128, __m128);
+++FORCE_INLINE __m128 _mm_set_ps1(float);
+++FORCE_INLINE __m128 _mm_setzero_ps(void);
+++// SSE2
+++FORCE_INLINE __m128i _mm_and_si128(__m128i, __m128i);
+++FORCE_INLINE __m128i _mm_castps_si128(__m128);
+++FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i, __m128i);
+++FORCE_INLINE __m128i _mm_cvtps_epi32(__m128);
+++FORCE_INLINE __m128d _mm_move_sd(__m128d, __m128d);
+++FORCE_INLINE __m128i _mm_or_si128(__m128i, __m128i);
+++FORCE_INLINE __m128i _mm_set_epi32(int, int, int, int);
+++FORCE_INLINE __m128i _mm_set_epi64x(int64_t, int64_t);
+++FORCE_INLINE __m128d _mm_set_pd(double, double);
+++FORCE_INLINE __m128i _mm_set1_epi32(int);
+++FORCE_INLINE __m128i _mm_setzero_si128(void);
+++// SSE4.1
+++FORCE_INLINE __m128d _mm_ceil_pd(__m128d);
+++FORCE_INLINE __m128 _mm_ceil_ps(__m128);
+++FORCE_INLINE __m128d _mm_floor_pd(__m128d);
+++FORCE_INLINE __m128 _mm_floor_ps(__m128);
+++FORCE_INLINE __m128d _mm_round_pd(__m128d, int);
+++FORCE_INLINE __m128 _mm_round_ps(__m128, int);
+++// SSE4.2
+++FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t, uint8_t);
+++
+++/* Backwards compatibility for compilers with lack of specific type support */
+++
+++// Older gcc does not define vld1q_u8_x4 type
+++#if defined(__GNUC__) && !defined(__clang__) && \
+++ ((__GNUC__ <= 13 && defined(__arm__)) || \
+++ (__GNUC__ == 10 && __GNUC_MINOR__ < 3 && defined(__aarch64__)) || \
+++ (__GNUC__ <= 9 && defined(__aarch64__)))
+++FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
+++{
+++ uint8x16x4_t ret;
+++ ret.val[0] = vld1q_u8(p + 0);
+++ ret.val[1] = vld1q_u8(p + 16);
+++ ret.val[2] = vld1q_u8(p + 32);
+++ ret.val[3] = vld1q_u8(p + 48);
+++ return ret;
+++}
+++#else
+++// Wraps vld1q_u8_x4
+++FORCE_INLINE uint8x16x4_t _sse2neon_vld1q_u8_x4(const uint8_t *p)
+++{
+++ return vld1q_u8_x4(p);
+++}
+++#endif
+++
+++#if !defined(__aarch64__) && !defined(_M_ARM64)
+++/* emulate vaddv u8 variant */
+++FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
+++{
+++ const uint64x1_t v1 = vpaddl_u32(vpaddl_u16(vpaddl_u8(v8)));
+++ return vget_lane_u8(vreinterpret_u8_u64(v1), 0);
+++}
+++#else
+++// Wraps vaddv_u8
+++FORCE_INLINE uint8_t _sse2neon_vaddv_u8(uint8x8_t v8)
+++{
+++ return vaddv_u8(v8);
+++}
+++#endif
+++
+++#if !defined(__aarch64__) && !defined(_M_ARM64)
+++/* emulate vaddvq u8 variant */
+++FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
+++{
+++ uint8x8_t tmp = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
+++ uint8_t res = 0;
+++ for (int i = 0; i < 8; ++i)
+++ res += tmp[i];
+++ return res;
+++}
+++#else
+++// Wraps vaddvq_u8
+++FORCE_INLINE uint8_t _sse2neon_vaddvq_u8(uint8x16_t a)
+++{
+++ return vaddvq_u8(a);
+++}
+++#endif
+++
+++#if !defined(__aarch64__) && !defined(_M_ARM64)
+++/* emulate vaddvq u16 variant */
+++FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
+++{
+++ uint32x4_t m = vpaddlq_u16(a);
+++ uint64x2_t n = vpaddlq_u32(m);
+++ uint64x1_t o = vget_low_u64(n) + vget_high_u64(n);
+++
+++ return vget_lane_u32((uint32x2_t) o, 0);
+++}
+++#else
+++// Wraps vaddvq_u16
+++FORCE_INLINE uint16_t _sse2neon_vaddvq_u16(uint16x8_t a)
+++{
+++ return vaddvq_u16(a);
+++}
+++#endif
+++
+++/* Function Naming Conventions
+++ * The naming convention of SSE intrinsics is straightforward. A generic SSE
+++ * intrinsic function is given as follows:
+++ * _mm_<name>_<data_type>
+++ *
+++ * The parts of this format are given as follows:
+++ * 1. <name> describes the operation performed by the intrinsic
+++ * 2. <data_type> identifies the data type of the function's primary arguments
+++ *
+++ * This last part, <data_type>, is a little complicated. It identifies the
+++ * content of the input values, and can be set to any of the following values:
+++ * + ps - vectors contain floats (ps stands for packed single-precision)
+++ * + pd - vectors contain doubles (pd stands for packed double-precision)
+++ * + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
+++ * signed integers
+++ * + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
+++ * unsigned integers
+++ * + si128 - unspecified 128-bit vector or 256-bit vector
+++ * + m128/m128i/m128d - identifies input vector types when they are different
+++ * than the type of the returned vector
+++ *
+++ * For example, _mm_setzero_ps. The _mm implies that the function returns
+++ * a 128-bit vector. The _ps at the end implies that the argument vectors
+++ * contain floats.
+++ *
+++ * A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
+++ * // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
+++ * __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
+++ * // Set packed 8-bit integers
+++ * // 128 bits, 16 chars, per 8 bits
+++ * __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
+++ * 4, 5, 12, 13, 6, 7, 14, 15);
+++ * // Shuffle packed 8-bit integers
+++ * __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
+++ */
+++
+++/* Constants for use with _mm_prefetch. */
+++enum _mm_hint {
+++ _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
+++ _MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
+++ _MM_HINT_T1 = 2, /* load data to L2 cache only */
+++ _MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
+++};
+++
+++// The bit field mapping to the FPCR(floating-point control register)
+++typedef struct {
+++ uint16_t res0;
+++ uint8_t res1 : 6;
+++ uint8_t bit22 : 1;
+++ uint8_t bit23 : 1;
+++ uint8_t bit24 : 1;
+++ uint8_t res2 : 7;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint32_t res3;
+++#endif
+++} fpcr_bitfield;
+++
+++// Takes the upper 64 bits of a and places it in the low end of the result
+++// Takes the lower 64 bits of b and places it into the high end of the result.
+++FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
+++{
+++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
+++}
+++
+++// takes the lower two 32-bit values from a and swaps them and places in high
+++// end of result takes the higher two 32 bit values from b and swaps them and
+++// places in low end of result.
+++FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
+++{
+++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+++ float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
+++ return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
+++{
+++ float32x2_t a21 = vget_high_f32(
+++ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
+++ float32x2_t b03 = vget_low_f32(
+++ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
+++ return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
+++{
+++ float32x2_t a03 = vget_low_f32(
+++ vextq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
+++ float32x2_t b21 = vget_high_f32(
+++ vextq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
+++ return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
+++{
+++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
+++{
+++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
+++{
+++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+++ float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
+++ return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
+++}
+++
+++// keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
+++// high
+++FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
+++{
+++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
+++{
+++ float32x2_t a11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
+++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+++ return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
+++{
+++ float32x2_t a22 =
+++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
+++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+++ return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
+++{
+++ float32x2_t a00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
+++ float32x2_t b22 =
+++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
+++ return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
+++{
+++ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+++ float32x2_t a22 =
+++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
+++ float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
+++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
+++{
+++ float32x2_t a33 =
+++ vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
+++ float32x2_t b11 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
+++ return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
+++{
+++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+++ float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
+++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+++ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+++ return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
+++{
+++ float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
+++ float32_t b2 = vgetq_lane_f32(b, 2);
+++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+++ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+++ return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
+++}
+++
+++FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
+++{
+++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+++ float32_t b2 = vgetq_lane_f32(b, 2);
+++ float32x2_t b00 = vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
+++ float32x2_t b20 = vset_lane_f32(b2, b00, 1);
+++ return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
+++}
+++
+++// For MSVC, we check only if it is ARM64, as every single ARM64 processor
+++// supported by WoA has crypto extensions. If this changes in the future,
+++// this can be verified via the runtime-only method of:
+++// IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)
+++#if (defined(_M_ARM64) && !defined(__clang__)) || \
+++ (defined(__ARM_FEATURE_CRYPTO) && \
+++ (defined(__aarch64__) || __has_builtin(__builtin_arm_crypto_vmullp64)))
+++// Wraps vmull_p64
+++FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
+++{
+++ poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
+++ poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
+++#if defined(_MSC_VER)
+++ __n64 a1 = {a}, b1 = {b};
+++ return vreinterpretq_u64_p128(vmull_p64(a1, b1));
+++#else
+++ return vreinterpretq_u64_p128(vmull_p64(a, b));
+++#endif
+++}
+++#else // ARMv7 polyfill
+++// ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
+++//
+++// vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
+++// 64-bit->128-bit polynomial multiply.
+++//
+++// It needs some work and is somewhat slow, but it is still faster than all
+++// known scalar methods.
+++//
+++// Algorithm adapted to C from
+++// https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
+++// from "Fast Software Polynomial Multiplication on ARM Processors Using the
+++// NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
+++// (https://hal.inria.fr/hal-01506572)
+++static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
+++{
+++ poly8x8_t a = vreinterpret_p8_u64(_a);
+++ poly8x8_t b = vreinterpret_p8_u64(_b);
+++
+++ // Masks
+++ uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
+++ vcreate_u8(0x00000000ffffffff));
+++ uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
+++ vcreate_u8(0x0000000000000000));
+++
+++ // Do the multiplies, rotating with vext to get all combinations
+++ uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
+++ uint8x16_t e =
+++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
+++ uint8x16_t f =
+++ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
+++ uint8x16_t g =
+++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
+++ uint8x16_t h =
+++ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
+++ uint8x16_t i =
+++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
+++ uint8x16_t j =
+++ vreinterpretq_u8_p16(vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
+++ uint8x16_t k =
+++ vreinterpretq_u8_p16(vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
+++
+++ // Add cross products
+++ uint8x16_t l = veorq_u8(e, f); // L = E + F
+++ uint8x16_t m = veorq_u8(g, h); // M = G + H
+++ uint8x16_t n = veorq_u8(i, j); // N = I + J
+++
+++ // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
+++ // instructions.
+++#if defined(__aarch64__)
+++ uint8x16_t lm_p0 = vreinterpretq_u8_u64(
+++ vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
+++ uint8x16_t lm_p1 = vreinterpretq_u8_u64(
+++ vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
+++ uint8x16_t nk_p0 = vreinterpretq_u8_u64(
+++ vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
+++ uint8x16_t nk_p1 = vreinterpretq_u8_u64(
+++ vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
+++#else
+++ uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
+++ uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
+++ uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
+++ uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
+++#endif
+++ // t0 = (L) (P0 + P1) << 8
+++ // t1 = (M) (P2 + P3) << 16
+++ uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
+++ uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
+++ uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
+++
+++ // t2 = (N) (P4 + P5) << 24
+++ // t3 = (K) (P6 + P7) << 32
+++ uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
+++ uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
+++ uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
+++
+++ // De-interleave
+++#if defined(__aarch64__)
+++ uint8x16_t t0 = vreinterpretq_u8_u64(
+++ vuzp1q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
+++ uint8x16_t t1 = vreinterpretq_u8_u64(
+++ vuzp2q_u64(vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
+++ uint8x16_t t2 = vreinterpretq_u8_u64(
+++ vuzp1q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
+++ uint8x16_t t3 = vreinterpretq_u8_u64(
+++ vuzp2q_u64(vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
+++#else
+++ uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
+++ uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
+++ uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
+++ uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
+++#endif
+++ // Shift the cross products
+++ uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
+++ uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
+++ uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
+++ uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
+++
+++ // Accumulate the products
+++ uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
+++ uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
+++ uint8x16_t mix = veorq_u8(d, cross1);
+++ uint8x16_t r = veorq_u8(mix, cross2);
+++ return vreinterpretq_u64_u8(r);
+++}
+++#endif // ARMv7 polyfill
+++
+++// C equivalent:
+++// __m128i _mm_shuffle_epi32_default(__m128i a,
+++// __constrange(0, 255) int imm) {
+++// __m128i ret;
+++// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
+++// ret[2] = a[(imm >> 4) & 0x03]; ret[3] = a[(imm >> 6) & 0x03];
+++// return ret;
+++// }
+++#define _mm_shuffle_epi32_default(a, imm) \
+++ vreinterpretq_m128i_s32(vsetq_lane_s32( \
+++ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 6) & 0x3), \
+++ vsetq_lane_s32( \
+++ vgetq_lane_s32(vreinterpretq_s32_m128i(a), ((imm) >> 4) & 0x3), \
+++ vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), \
+++ ((imm) >> 2) & 0x3), \
+++ vmovq_n_s32(vgetq_lane_s32( \
+++ vreinterpretq_s32_m128i(a), (imm) & (0x3))), \
+++ 1), \
+++ 2), \
+++ 3))
+++
+++// Takes the upper 64 bits of a and places it in the low end of the result
+++// Takes the lower 64 bits of a and places it into the high end of the result.
+++FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
+++{
+++ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
+++ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+++ return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
+++}
+++
+++// takes the lower two 32-bit values from a and swaps them and places in low end
+++// of result takes the higher two 32 bit values from a and swaps them and places
+++// in high end of result.
+++FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
+++{
+++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+++ int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
+++ return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
+++}
+++
+++// rotates the least significant 32 bits into the most significant 32 bits, and
+++// shifts the rest down
+++FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
+++}
+++
+++// rotates the most significant 32 bits into the least significant 32 bits, and
+++// shifts the rest up
+++FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vextq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
+++}
+++
+++// gets the lower 64 bits of a, and places it in the upper 64 bits
+++// gets the lower 64 bits of a and places it in the lower 64 bits
+++FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
+++{
+++ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+++ return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
+++}
+++
+++// gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the
+++// lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits
+++FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
+++{
+++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+++ int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
+++ return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
+++}
+++
+++// gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the
+++// upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and
+++// places it in the lower 64 bits
+++FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
+++{
+++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+++ return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
+++}
+++
+++FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
+++{
+++ int32x2_t a11 = vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
+++ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
+++ return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
+++}
+++
+++FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
+++{
+++ int32x2_t a22 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
+++ int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
+++ return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
+++}
+++
+++FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
+++{
+++ int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
+++ int32x2_t a33 = vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
+++ return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
+++}
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++#define _mm_shuffle_epi32_splat(a, imm) \
+++ vreinterpretq_m128i_s32(vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm)))
+++#else
+++#define _mm_shuffle_epi32_splat(a, imm) \
+++ vreinterpretq_m128i_s32( \
+++ vdupq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))))
+++#endif
+++
+++// NEON does not support a general purpose permute intrinsic.
+++// Shuffle single-precision (32-bit) floating-point elements in a using the
+++// control in imm8, and store the results in dst.
+++//
+++// C equivalent:
+++// __m128 _mm_shuffle_ps_default(__m128 a, __m128 b,
+++// __constrange(0, 255) int imm) {
+++// __m128 ret;
+++// ret[0] = a[imm & 0x3]; ret[1] = a[(imm >> 2) & 0x3];
+++// ret[2] = b[(imm >> 4) & 0x03]; ret[3] = b[(imm >> 6) & 0x03];
+++// return ret;
+++// }
+++//
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_ps
+++#define _mm_shuffle_ps_default(a, b, imm) \
+++ vreinterpretq_m128_f32(vsetq_lane_f32( \
+++ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 6) & 0x3), \
+++ vsetq_lane_f32( \
+++ vgetq_lane_f32(vreinterpretq_f32_m128(b), ((imm) >> 4) & 0x3), \
+++ vsetq_lane_f32( \
+++ vgetq_lane_f32(vreinterpretq_f32_m128(a), ((imm) >> 2) & 0x3), \
+++ vmovq_n_f32( \
+++ vgetq_lane_f32(vreinterpretq_f32_m128(a), (imm) & (0x3))), \
+++ 1), \
+++ 2), \
+++ 3))
+++
+++// Shuffle 16-bit integers in the low 64 bits of a using the control in imm8.
+++// Store the results in the low 64 bits of dst, with the high 64 bits being
+++// copied from a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflelo_epi16
+++#define _mm_shufflelo_epi16_function(a, imm) \
+++ _sse2neon_define1( \
+++ __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \
+++ int16x4_t lowBits = vget_low_s16(ret); \
+++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), ret, 0); \
+++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, \
+++ 1); \
+++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, \
+++ 2); \
+++ ret = vsetq_lane_s16(vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, \
+++ 3); \
+++ _sse2neon_return(vreinterpretq_m128i_s16(ret));)
+++
+++// Shuffle 16-bit integers in the high 64 bits of a using the control in imm8.
+++// Store the results in the high 64 bits of dst, with the low 64 bits being
+++// copied from a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shufflehi_epi16
+++#define _mm_shufflehi_epi16_function(a, imm) \
+++ _sse2neon_define1( \
+++ __m128i, a, int16x8_t ret = vreinterpretq_s16_m128i(_a); \
+++ int16x4_t highBits = vget_high_s16(ret); \
+++ ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), ret, 4); \
+++ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, \
+++ 5); \
+++ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, \
+++ 6); \
+++ ret = vsetq_lane_s16(vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, \
+++ 7); \
+++ _sse2neon_return(vreinterpretq_m128i_s16(ret));)
+++
+++/* MMX */
+++
+++//_mm_empty is a no-op on arm
+++FORCE_INLINE void _mm_empty(void) {}
+++
+++/* SSE */
+++
+++// Add packed single-precision (32-bit) floating-point elements in a and b, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ps
+++FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Add the lower single-precision (32-bit) floating-point element in a and b,
+++// store the result in the lower element of dst, and copy the upper 3 packed
+++// elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_ss
+++FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
+++{
+++ float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
+++ float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
+++ // the upper values in the result must be the remnants of <a>.
+++ return vreinterpretq_m128_f32(vaddq_f32(a, value));
+++}
+++
+++// Compute the bitwise AND of packed single-precision (32-bit) floating-point
+++// elements in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_ps
+++FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_s32(
+++ vandq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+++}
+++
+++// Compute the bitwise NOT of packed single-precision (32-bit) floating-point
+++// elements in a and then AND with b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_ps
+++FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_s32(
+++ vbicq_s32(vreinterpretq_s32_m128(b),
+++ vreinterpretq_s32_m128(a))); // *NOTE* argument swap
+++}
+++
+++// Average packed unsigned 16-bit integers in a and b, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu16
+++FORCE_INLINE __m64 _mm_avg_pu16(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_u16(
+++ vrhadd_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)));
+++}
+++
+++// Average packed unsigned 8-bit integers in a and b, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_pu8
+++FORCE_INLINE __m64 _mm_avg_pu8(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_u8(
+++ vrhadd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for equality, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ps
+++FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(
+++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for equality, store the result in the lower element of dst, and copy the
+++// upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_ss
+++FORCE_INLINE __m128 _mm_cmpeq_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpeq_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for greater-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ps
+++FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(
+++ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for greater-than-or-equal, store the result in the lower element of dst,
+++// and copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_ss
+++FORCE_INLINE __m128 _mm_cmpge_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpge_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for greater-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ps
+++FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(
+++ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for greater-than, store the result in the lower element of dst, and copy
+++// the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_ss
+++FORCE_INLINE __m128 _mm_cmpgt_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpgt_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for less-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ps
+++FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(
+++ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for less-than-or-equal, store the result in the lower element of dst, and
+++// copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_ss
+++FORCE_INLINE __m128 _mm_cmple_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmple_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for less-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ps
+++FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(
+++ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for less-than, store the result in the lower element of dst, and copy the
+++// upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_ss
+++FORCE_INLINE __m128 _mm_cmplt_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmplt_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for not-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ps
+++FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(vmvnq_u32(
+++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for not-equal, store the result in the lower element of dst, and copy the
+++// upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_ss
+++FORCE_INLINE __m128 _mm_cmpneq_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpneq_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for not-greater-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ps
+++FORCE_INLINE __m128 _mm_cmpnge_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(vmvnq_u32(
+++ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for not-greater-than-or-equal, store the result in the lower element of
+++// dst, and copy the upper 3 packed elements from a to the upper elements of
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_ss
+++FORCE_INLINE __m128 _mm_cmpnge_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpnge_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for not-greater-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ps
+++FORCE_INLINE __m128 _mm_cmpngt_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(vmvnq_u32(
+++ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for not-greater-than, store the result in the lower element of dst, and
+++// copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_ss
+++FORCE_INLINE __m128 _mm_cmpngt_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpngt_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for not-less-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ps
+++FORCE_INLINE __m128 _mm_cmpnle_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(vmvnq_u32(
+++ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for not-less-than-or-equal, store the result in the lower element of dst,
+++// and copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_ss
+++FORCE_INLINE __m128 _mm_cmpnle_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpnle_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// for not-less-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ps
+++FORCE_INLINE __m128 _mm_cmpnlt_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_u32(vmvnq_u32(
+++ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b for not-less-than, store the result in the lower element of dst, and copy
+++// the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_ss
+++FORCE_INLINE __m128 _mm_cmpnlt_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpnlt_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// to see if neither is NaN, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ps
+++//
+++// See also:
+++// http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
+++// http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
+++FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b)
+++{
+++ // Note: NEON does not have ordered compare builtin
+++ // Need to compare a eq a and b eq b to check for NaN
+++ // Do AND of results to get final
+++ uint32x4_t ceqaa =
+++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+++ uint32x4_t ceqbb =
+++ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b to see if neither is NaN, store the result in the lower element of dst, and
+++// copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_ss
+++FORCE_INLINE __m128 _mm_cmpord_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpord_ps(a, b));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b
+++// to see if either is NaN, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ps
+++FORCE_INLINE __m128 _mm_cmpunord_ps(__m128 a, __m128 b)
+++{
+++ uint32x4_t f32a =
+++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
+++ uint32x4_t f32b =
+++ vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_u32(vmvnq_u32(vandq_u32(f32a, f32b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b to see if either is NaN, store the result in the lower element of dst, and
+++// copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_ss
+++FORCE_INLINE __m128 _mm_cmpunord_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_cmpunord_ps(a, b));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point element in a and b
+++// for equality, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_ss
+++FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
+++{
+++ uint32x4_t a_eq_b =
+++ vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+++ return vgetq_lane_u32(a_eq_b, 0) & 0x1;
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point element in a and b
+++// for greater-than-or-equal, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_ss
+++FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
+++{
+++ uint32x4_t a_ge_b =
+++ vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+++ return vgetq_lane_u32(a_ge_b, 0) & 0x1;
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point element in a and b
+++// for greater-than, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_ss
+++FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
+++{
+++ uint32x4_t a_gt_b =
+++ vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+++ return vgetq_lane_u32(a_gt_b, 0) & 0x1;
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point element in a and b
+++// for less-than-or-equal, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_ss
+++FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
+++{
+++ uint32x4_t a_le_b =
+++ vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+++ return vgetq_lane_u32(a_le_b, 0) & 0x1;
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point element in a and b
+++// for less-than, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_ss
+++FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
+++{
+++ uint32x4_t a_lt_b =
+++ vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
+++ return vgetq_lane_u32(a_lt_b, 0) & 0x1;
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point element in a and b
+++// for not-equal, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_ss
+++FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
+++{
+++ return !_mm_comieq_ss(a, b);
+++}
+++
+++// Convert packed signed 32-bit integers in b to packed single-precision
+++// (32-bit) floating-point elements, store the results in the lower 2 elements
+++// of dst, and copy the upper 2 packed elements from a to the upper elements of
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_pi2ps
+++FORCE_INLINE __m128 _mm_cvt_pi2ps(__m128 a, __m64 b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
+++ vget_high_f32(vreinterpretq_f32_m128(a))));
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 32-bit integers, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ps2pi
+++FORCE_INLINE __m64 _mm_cvt_ps2pi(__m128 a)
+++{
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+++ return vreinterpret_m64_s32(
+++ vget_low_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a)))));
+++#else
+++ return vreinterpret_m64_s32(vcvt_s32_f32(vget_low_f32(
+++ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)))));
+++#endif
+++}
+++
+++// Convert the signed 32-bit integer b to a single-precision (32-bit)
+++// floating-point element, store the result in the lower element of dst, and
+++// copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_si2ss
+++FORCE_INLINE __m128 _mm_cvt_si2ss(__m128 a, int b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
+++}
+++
+++// Convert the lower single-precision (32-bit) floating-point element in a to a
+++// 32-bit integer, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvt_ss2si
+++FORCE_INLINE int _mm_cvt_ss2si(__m128 a)
+++{
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+++ return vgetq_lane_s32(vcvtnq_s32_f32(vrndiq_f32(vreinterpretq_f32_m128(a))),
+++ 0);
+++#else
+++ float32_t data = vgetq_lane_f32(
+++ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
+++ return (int32_t) data;
+++#endif
+++}
+++
+++// Convert packed 16-bit integers in a to packed single-precision (32-bit)
+++// floating-point elements, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi16_ps
+++FORCE_INLINE __m128 _mm_cvtpi16_ps(__m64 a)
+++{
+++ return vreinterpretq_m128_f32(
+++ vcvtq_f32_s32(vmovl_s16(vreinterpret_s16_m64(a))));
+++}
+++
+++// Convert packed 32-bit integers in b to packed single-precision (32-bit)
+++// floating-point elements, store the results in the lower 2 elements of dst,
+++// and copy the upper 2 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_ps
+++FORCE_INLINE __m128 _mm_cvtpi32_ps(__m128 a, __m64 b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vcombine_f32(vcvt_f32_s32(vreinterpret_s32_m64(b)),
+++ vget_high_f32(vreinterpretq_f32_m128(a))));
+++}
+++
+++// Convert packed signed 32-bit integers in a to packed single-precision
+++// (32-bit) floating-point elements, store the results in the lower 2 elements
+++// of dst, then convert the packed signed 32-bit integers in b to
+++// single-precision (32-bit) floating-point element, and store the results in
+++// the upper 2 elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32x2_ps
+++FORCE_INLINE __m128 _mm_cvtpi32x2_ps(__m64 a, __m64 b)
+++{
+++ return vreinterpretq_m128_f32(vcvtq_f32_s32(
+++ vcombine_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b))));
+++}
+++
+++// Convert the lower packed 8-bit integers in a to packed single-precision
+++// (32-bit) floating-point elements, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi8_ps
+++FORCE_INLINE __m128 _mm_cvtpi8_ps(__m64 a)
+++{
+++ return vreinterpretq_m128_f32(vcvtq_f32_s32(
+++ vmovl_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_m64(a))))));
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 16-bit integers, and store the results in dst. Note: this intrinsic
+++// will generate 0x7FFF, rather than 0x8000, for input values between 0x7FFF and
+++// 0x7FFFFFFF.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi16
+++FORCE_INLINE __m64 _mm_cvtps_pi16(__m128 a)
+++{
+++ return vreinterpret_m64_s16(
+++ vqmovn_s32(vreinterpretq_s32_m128i(_mm_cvtps_epi32(a))));
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 32-bit integers, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi32
+++#define _mm_cvtps_pi32(a) _mm_cvt_ps2pi(a)
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 8-bit integers, and store the results in lower 4 elements of dst.
+++// Note: this intrinsic will generate 0x7F, rather than 0x80, for input values
+++// between 0x7F and 0x7FFFFFFF.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pi8
+++FORCE_INLINE __m64 _mm_cvtps_pi8(__m128 a)
+++{
+++ return vreinterpret_m64_s8(vqmovn_s16(
+++ vcombine_s16(vreinterpret_s16_m64(_mm_cvtps_pi16(a)), vdup_n_s16(0))));
+++}
+++
+++// Convert packed unsigned 16-bit integers in a to packed single-precision
+++// (32-bit) floating-point elements, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu16_ps
+++FORCE_INLINE __m128 _mm_cvtpu16_ps(__m64 a)
+++{
+++ return vreinterpretq_m128_f32(
+++ vcvtq_f32_u32(vmovl_u16(vreinterpret_u16_m64(a))));
+++}
+++
+++// Convert the lower packed unsigned 8-bit integers in a to packed
+++// single-precision (32-bit) floating-point elements, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpu8_ps
+++FORCE_INLINE __m128 _mm_cvtpu8_ps(__m64 a)
+++{
+++ return vreinterpretq_m128_f32(vcvtq_f32_u32(
+++ vmovl_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_m64(a))))));
+++}
+++
+++// Convert the signed 32-bit integer b to a single-precision (32-bit)
+++// floating-point element, store the result in the lower element of dst, and
+++// copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_ss
+++#define _mm_cvtsi32_ss(a, b) _mm_cvt_si2ss(a, b)
+++
+++// Convert the signed 64-bit integer b to a single-precision (32-bit)
+++// floating-point element, store the result in the lower element of dst, and
+++// copy the upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_ss
+++FORCE_INLINE __m128 _mm_cvtsi64_ss(__m128 a, int64_t b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vsetq_lane_f32((float) b, vreinterpretq_f32_m128(a), 0));
+++}
+++
+++// Copy the lower single-precision (32-bit) floating-point element of a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_f32
+++FORCE_INLINE float _mm_cvtss_f32(__m128 a)
+++{
+++ return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+++}
+++
+++// Convert the lower single-precision (32-bit) floating-point element in a to a
+++// 32-bit integer, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si32
+++#define _mm_cvtss_si32(a) _mm_cvt_ss2si(a)
+++
+++// Convert the lower single-precision (32-bit) floating-point element in a to a
+++// 64-bit integer, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_si64
+++FORCE_INLINE int64_t _mm_cvtss_si64(__m128 a)
+++{
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+++ return (int64_t) vgetq_lane_f32(vrndiq_f32(vreinterpretq_f32_m128(a)), 0);
+++#else
+++ float32_t data = vgetq_lane_f32(
+++ vreinterpretq_f32_m128(_mm_round_ps(a, _MM_FROUND_CUR_DIRECTION)), 0);
+++ return (int64_t) data;
+++#endif
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 32-bit integers with truncation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ps2pi
+++FORCE_INLINE __m64 _mm_cvtt_ps2pi(__m128 a)
+++{
+++ return vreinterpret_m64_s32(
+++ vget_low_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a))));
+++}
+++
+++// Convert the lower single-precision (32-bit) floating-point element in a to a
+++// 32-bit integer with truncation, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtt_ss2si
+++FORCE_INLINE int _mm_cvtt_ss2si(__m128 a)
+++{
+++ return vgetq_lane_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)), 0);
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 32-bit integers with truncation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_pi32
+++#define _mm_cvttps_pi32(a) _mm_cvtt_ps2pi(a)
+++
+++// Convert the lower single-precision (32-bit) floating-point element in a to a
+++// 32-bit integer with truncation, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si32
+++#define _mm_cvttss_si32(a) _mm_cvtt_ss2si(a)
+++
+++// Convert the lower single-precision (32-bit) floating-point element in a to a
+++// 64-bit integer with truncation, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttss_si64
+++FORCE_INLINE int64_t _mm_cvttss_si64(__m128 a)
+++{
+++ return (int64_t) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+++}
+++
+++// Divide packed single-precision (32-bit) floating-point elements in a by
+++// packed elements in b, and store the results in dst.
+++// Due to ARMv7-A NEON's lack of a precise division intrinsic, we implement
+++// division by multiplying a by b's reciprocal before using the Newton-Raphson
+++// method to approximate the results.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ps
+++FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(
+++ vdivq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++#else
+++ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(b));
+++ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
+++ // Additional Netwon-Raphson iteration for accuracy
+++ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(b)));
+++ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a), recip));
+++#endif
+++}
+++
+++// Divide the lower single-precision (32-bit) floating-point element in a by the
+++// lower single-precision (32-bit) floating-point element in b, store the result
+++// in the lower element of dst, and copy the upper 3 packed elements from a to
+++// the upper elements of dst.
+++// Warning: ARMv7-A does not produce the same result compared to Intel and not
+++// IEEE-compliant.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_ss
+++FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
+++{
+++ float32_t value =
+++ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
+++ return vreinterpretq_m128_f32(
+++ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+++}
+++
+++// Extract a 16-bit integer from a, selected with imm8, and store the result in
+++// the lower element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_pi16
+++#define _mm_extract_pi16(a, imm) \
+++ (int32_t) vget_lane_u16(vreinterpret_u16_m64(a), (imm))
+++
+++// Free aligned memory that was allocated with _mm_malloc.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_free
+++#if !defined(SSE2NEON_ALLOC_DEFINED)
+++FORCE_INLINE void _mm_free(void *addr)
+++{
+++ free(addr);
+++}
+++#endif
+++
+++FORCE_INLINE uint64_t _sse2neon_get_fpcr(void)
+++{
+++ uint64_t value;
+++#if defined(_MSC_VER)
+++ value = _ReadStatusReg(ARM64_FPCR);
+++#else
+++ __asm__ __volatile__("mrs %0, FPCR" : "=r"(value)); /* read */
+++#endif
+++ return value;
+++}
+++
+++FORCE_INLINE void _sse2neon_set_fpcr(uint64_t value)
+++{
+++#if defined(_MSC_VER)
+++ _WriteStatusReg(ARM64_FPCR, value);
+++#else
+++ __asm__ __volatile__("msr FPCR, %0" ::"r"(value)); /* write */
+++#endif
+++}
+++
+++// Macro: Get the flush zero bits from the MXCSR control and status register.
+++// The flush zero may contain any of the following flags: _MM_FLUSH_ZERO_ON or
+++// _MM_FLUSH_ZERO_OFF
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_FLUSH_ZERO_MODE
+++FORCE_INLINE unsigned int _sse2neon_mm_get_flush_zero_mode(void)
+++{
+++ union {
+++ fpcr_bitfield field;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint64_t value;
+++#else
+++ uint32_t value;
+++#endif
+++ } r;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ r.value = _sse2neon_get_fpcr();
+++#else
+++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+++#endif
+++
+++ return r.field.bit24 ? _MM_FLUSH_ZERO_ON : _MM_FLUSH_ZERO_OFF;
+++}
+++
+++// Macro: Get the rounding mode bits from the MXCSR control and status register.
+++// The rounding mode may contain any of the following flags: _MM_ROUND_NEAREST,
+++// _MM_ROUND_DOWN, _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_GET_ROUNDING_MODE
+++FORCE_INLINE unsigned int _MM_GET_ROUNDING_MODE(void)
+++{
+++ union {
+++ fpcr_bitfield field;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint64_t value;
+++#else
+++ uint32_t value;
+++#endif
+++ } r;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ r.value = _sse2neon_get_fpcr();
+++#else
+++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+++#endif
+++
+++ if (r.field.bit22) {
+++ return r.field.bit23 ? _MM_ROUND_TOWARD_ZERO : _MM_ROUND_UP;
+++ } else {
+++ return r.field.bit23 ? _MM_ROUND_DOWN : _MM_ROUND_NEAREST;
+++ }
+++}
+++
+++// Copy a to dst, and insert the 16-bit integer i into dst at the location
+++// specified by imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_pi16
+++#define _mm_insert_pi16(a, b, imm) \
+++ vreinterpret_m64_s16(vset_lane_s16((b), vreinterpret_s16_m64(a), (imm)))
+++
+++// Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+++// elements) from memory into dst. mem_addr must be aligned on a 16-byte
+++// boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps
+++FORCE_INLINE __m128 _mm_load_ps(const float *p)
+++{
+++ return vreinterpretq_m128_f32(vld1q_f32(p));
+++}
+++
+++// Load a single-precision (32-bit) floating-point element from memory into all
+++// elements of dst.
+++//
+++// dst[31:0] := MEM[mem_addr+31:mem_addr]
+++// dst[63:32] := MEM[mem_addr+31:mem_addr]
+++// dst[95:64] := MEM[mem_addr+31:mem_addr]
+++// dst[127:96] := MEM[mem_addr+31:mem_addr]
+++//
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ps1
+++#define _mm_load_ps1 _mm_load1_ps
+++
+++// Load a single-precision (32-bit) floating-point element from memory into the
+++// lower of dst, and zero the upper 3 elements. mem_addr does not need to be
+++// aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_ss
+++FORCE_INLINE __m128 _mm_load_ss(const float *p)
+++{
+++ return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
+++}
+++
+++// Load a single-precision (32-bit) floating-point element from memory into all
+++// elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_ps
+++FORCE_INLINE __m128 _mm_load1_ps(const float *p)
+++{
+++ return vreinterpretq_m128_f32(vld1q_dup_f32(p));
+++}
+++
+++// Load 2 single-precision (32-bit) floating-point elements from memory into the
+++// upper 2 elements of dst, and copy the lower 2 elements from a to dst.
+++// mem_addr does not need to be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pi
+++FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p)
+++{
+++ return vreinterpretq_m128_f32(
+++ vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *) p)));
+++}
+++
+++// Load 2 single-precision (32-bit) floating-point elements from memory into the
+++// lower 2 elements of dst, and copy the upper 2 elements from a to dst.
+++// mem_addr does not need to be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pi
+++FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
+++{
+++ return vreinterpretq_m128_f32(
+++ vcombine_f32(vld1_f32((const float32_t *) p), vget_high_f32(a)));
+++}
+++
+++// Load 4 single-precision (32-bit) floating-point elements from memory into dst
+++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+++// general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_ps
+++FORCE_INLINE __m128 _mm_loadr_ps(const float *p)
+++{
+++ float32x4_t v = vrev64q_f32(vld1q_f32(p));
+++ return vreinterpretq_m128_f32(vextq_f32(v, v, 2));
+++}
+++
+++// Load 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+++// elements) from memory into dst. mem_addr does not need to be aligned on any
+++// particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_ps
+++FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
+++{
+++ // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are
+++ // equivalent for neon
+++ return vreinterpretq_m128_f32(vld1q_f32(p));
+++}
+++
+++// Load unaligned 16-bit integer from memory into the first element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si16
+++FORCE_INLINE __m128i _mm_loadu_si16(const void *p)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vsetq_lane_s16(*(const int16_t *) p, vdupq_n_s16(0), 0));
+++}
+++
+++// Load unaligned 64-bit integer from memory into the first element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si64
+++FORCE_INLINE __m128i _mm_loadu_si64(const void *p)
+++{
+++ return vreinterpretq_m128i_s64(
+++ vcombine_s64(vld1_s64((const int64_t *) p), vdup_n_s64(0)));
+++}
+++
+++// Allocate size bytes of memory, aligned to the alignment specified in align,
+++// and return a pointer to the allocated memory. _mm_free should be used to free
+++// memory that is allocated with _mm_malloc.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_malloc
+++#if !defined(SSE2NEON_ALLOC_DEFINED)
+++FORCE_INLINE void *_mm_malloc(size_t size, size_t align)
+++{
+++ void *ptr;
+++ if (align == 1)
+++ return malloc(size);
+++ if (align == 2 || (sizeof(void *) == 8 && align == 4))
+++ align = sizeof(void *);
+++ if (!posix_memalign(&ptr, align, size))
+++ return ptr;
+++ return NULL;
+++}
+++#endif
+++
+++// Conditionally store 8-bit integer elements from a into memory using mask
+++// (elements are not stored when the highest bit is not set in the corresponding
+++// element) and a non-temporal memory hint.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmove_si64
+++FORCE_INLINE void _mm_maskmove_si64(__m64 a, __m64 mask, char *mem_addr)
+++{
+++ int8x8_t shr_mask = vshr_n_s8(vreinterpret_s8_m64(mask), 7);
+++ __m128 b = _mm_load_ps((const float *) mem_addr);
+++ int8x8_t masked =
+++ vbsl_s8(vreinterpret_u8_s8(shr_mask), vreinterpret_s8_m64(a),
+++ vreinterpret_s8_u64(vget_low_u64(vreinterpretq_u64_m128(b))));
+++ vst1_s8((int8_t *) mem_addr, masked);
+++}
+++
+++// Conditionally store 8-bit integer elements from a into memory using mask
+++// (elements are not stored when the highest bit is not set in the corresponding
+++// element) and a non-temporal memory hint.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_maskmovq
+++#define _m_maskmovq(a, mask, mem_addr) _mm_maskmove_si64(a, mask, mem_addr)
+++
+++// Compare packed signed 16-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pi16
+++FORCE_INLINE __m64 _mm_max_pi16(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_s16(
+++ vmax_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b,
+++// and store packed maximum values in dst. dst does not follow the IEEE Standard
+++// for Floating-Point Arithmetic (IEEE 754) maximum value when inputs are NaN or
+++// signed-zero values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ps
+++FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
+++{
+++#if SSE2NEON_PRECISE_MINMAX
+++ float32x4_t _a = vreinterpretq_f32_m128(a);
+++ float32x4_t _b = vreinterpretq_f32_m128(b);
+++ return vreinterpretq_m128_f32(vbslq_f32(vcgtq_f32(_a, _b), _a, _b));
+++#else
+++ return vreinterpretq_m128_f32(
+++ vmaxq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++#endif
+++}
+++
+++// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pu8
+++FORCE_INLINE __m64 _mm_max_pu8(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_u8(
+++ vmax_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b, store the maximum value in the lower element of dst, and copy the upper 3
+++// packed elements from a to the upper element of dst. dst does not follow the
+++// IEEE Standard for Floating-Point Arithmetic (IEEE 754) maximum value when
+++// inputs are NaN or signed-zero values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_ss
+++FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
+++{
+++ float32_t value = vgetq_lane_f32(_mm_max_ps(a, b), 0);
+++ return vreinterpretq_m128_f32(
+++ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+++}
+++
+++// Compare packed signed 16-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pi16
+++FORCE_INLINE __m64 _mm_min_pi16(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_s16(
+++ vmin_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+++}
+++
+++// Compare packed single-precision (32-bit) floating-point elements in a and b,
+++// and store packed minimum values in dst. dst does not follow the IEEE Standard
+++// for Floating-Point Arithmetic (IEEE 754) minimum value when inputs are NaN or
+++// signed-zero values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ps
+++FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
+++{
+++#if SSE2NEON_PRECISE_MINMAX
+++ float32x4_t _a = vreinterpretq_f32_m128(a);
+++ float32x4_t _b = vreinterpretq_f32_m128(b);
+++ return vreinterpretq_m128_f32(vbslq_f32(vcltq_f32(_a, _b), _a, _b));
+++#else
+++ return vreinterpretq_m128_f32(
+++ vminq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++#endif
+++}
+++
+++// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pu8
+++FORCE_INLINE __m64 _mm_min_pu8(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_u8(
+++ vmin_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)));
+++}
+++
+++// Compare the lower single-precision (32-bit) floating-point elements in a and
+++// b, store the minimum value in the lower element of dst, and copy the upper 3
+++// packed elements from a to the upper element of dst. dst does not follow the
+++// IEEE Standard for Floating-Point Arithmetic (IEEE 754) minimum value when
+++// inputs are NaN or signed-zero values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_ss
+++FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
+++{
+++ float32_t value = vgetq_lane_f32(_mm_min_ps(a, b), 0);
+++ return vreinterpretq_m128_f32(
+++ vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
+++}
+++
+++// Move the lower single-precision (32-bit) floating-point element from b to the
+++// lower element of dst, and copy the upper 3 packed elements from a to the
+++// upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_ss
+++FORCE_INLINE __m128 _mm_move_ss(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), 0),
+++ vreinterpretq_f32_m128(a), 0));
+++}
+++
+++// Move the upper 2 single-precision (32-bit) floating-point elements from b to
+++// the lower 2 elements of dst, and copy the upper 2 elements from a to the
+++// upper 2 elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehl_ps
+++FORCE_INLINE __m128 _mm_movehl_ps(__m128 a, __m128 b)
+++{
+++#if defined(aarch64__)
+++ return vreinterpretq_m128_u64(
+++ vzip2q_u64(vreinterpretq_u64_m128(b), vreinterpretq_u64_m128(a)));
+++#else
+++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_f32(vcombine_f32(b32, a32));
+++#endif
+++}
+++
+++// Move the lower 2 single-precision (32-bit) floating-point elements from b to
+++// the upper 2 elements of dst, and copy the lower 2 elements from a to the
+++// lower 2 elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movelh_ps
+++FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
+++{
+++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(__A));
+++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(__B));
+++ return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
+++}
+++
+++// Create mask from the most significant bit of each 8-bit element in a, and
+++// store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pi8
+++FORCE_INLINE int _mm_movemask_pi8(__m64 a)
+++{
+++ uint8x8_t input = vreinterpret_u8_m64(a);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ static const int8_t shift[8] = {0, 1, 2, 3, 4, 5, 6, 7};
+++ uint8x8_t tmp = vshr_n_u8(input, 7);
+++ return vaddv_u8(vshl_u8(tmp, vld1_s8(shift)));
+++#else
+++ // Refer the implementation of `_mm_movemask_epi8`
+++ uint16x4_t high_bits = vreinterpret_u16_u8(vshr_n_u8(input, 7));
+++ uint32x2_t paired16 =
+++ vreinterpret_u32_u16(vsra_n_u16(high_bits, high_bits, 7));
+++ uint8x8_t paired32 =
+++ vreinterpret_u8_u32(vsra_n_u32(paired16, paired16, 14));
+++ return vget_lane_u8(paired32, 0) | ((int) vget_lane_u8(paired32, 4) << 4);
+++#endif
+++}
+++
+++// Set each bit of mask dst based on the most significant bit of the
+++// corresponding packed single-precision (32-bit) floating-point element in a.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_ps
+++FORCE_INLINE int _mm_movemask_ps(__m128 a)
+++{
+++ uint32x4_t input = vreinterpretq_u32_m128(a);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ static const int32_t shift[4] = {0, 1, 2, 3};
+++ uint32x4_t tmp = vshrq_n_u32(input, 31);
+++ return vaddvq_u32(vshlq_u32(tmp, vld1q_s32(shift)));
+++#else
+++ // Uses the exact same method as _mm_movemask_epi8, see that for details.
+++ // Shift out everything but the sign bits with a 32-bit unsigned shift
+++ // right.
+++ uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31));
+++ // Merge the two pairs together with a 64-bit unsigned shift right + add.
+++ uint8x16_t paired =
+++ vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
+++ // Extract the result.
+++ return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
+++#endif
+++}
+++
+++// Multiply packed single-precision (32-bit) floating-point elements in a and b,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ps
+++FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vmulq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Multiply the lower single-precision (32-bit) floating-point element in a and
+++// b, store the result in the lower element of dst, and copy the upper 3 packed
+++// elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_ss
+++FORCE_INLINE __m128 _mm_mul_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_mul_ps(a, b));
+++}
+++
+++// Multiply the packed unsigned 16-bit integers in a and b, producing
+++// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+++// integers in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_pu16
+++FORCE_INLINE __m64 _mm_mulhi_pu16(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_u16(vshrn_n_u32(
+++ vmull_u16(vreinterpret_u16_m64(a), vreinterpret_u16_m64(b)), 16));
+++}
+++
+++// Compute the bitwise OR of packed single-precision (32-bit) floating-point
+++// elements in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_ps
+++FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_s32(
+++ vorrq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+++}
+++
+++// Average packed unsigned 8-bit integers in a and b, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgb
+++#define _m_pavgb(a, b) _mm_avg_pu8(a, b)
+++
+++// Average packed unsigned 16-bit integers in a and b, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pavgw
+++#define _m_pavgw(a, b) _mm_avg_pu16(a, b)
+++
+++// Extract a 16-bit integer from a, selected with imm8, and store the result in
+++// the lower element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pextrw
+++#define _m_pextrw(a, imm) _mm_extract_pi16(a, imm)
+++
+++// Copy a to dst, and insert the 16-bit integer i into dst at the location
+++// specified by imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_pinsrw
+++#define _m_pinsrw(a, i, imm) _mm_insert_pi16(a, i, imm)
+++
+++// Compare packed signed 16-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxsw
+++#define _m_pmaxsw(a, b) _mm_max_pi16(a, b)
+++
+++// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmaxub
+++#define _m_pmaxub(a, b) _mm_max_pu8(a, b)
+++
+++// Compare packed signed 16-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminsw
+++#define _m_pminsw(a, b) _mm_min_pi16(a, b)
+++
+++// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pminub
+++#define _m_pminub(a, b) _mm_min_pu8(a, b)
+++
+++// Create mask from the most significant bit of each 8-bit element in a, and
+++// store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmovmskb
+++#define _m_pmovmskb(a) _mm_movemask_pi8(a)
+++
+++// Multiply the packed unsigned 16-bit integers in a and b, producing
+++// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+++// integers in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pmulhuw
+++#define _m_pmulhuw(a, b) _mm_mulhi_pu16(a, b)
+++
+++// Fetch the line of data from memory that contains address p to a location in
+++// the cache hierarchy specified by the locality hint i.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_prefetch
+++FORCE_INLINE void _mm_prefetch(char const *p, int i)
+++{
+++ (void) i;
+++#if defined(_MSC_VER)
+++ switch (i) {
+++ case _MM_HINT_NTA:
+++ __prefetch2(p, 1);
+++ break;
+++ case _MM_HINT_T0:
+++ __prefetch2(p, 0);
+++ break;
+++ case _MM_HINT_T1:
+++ __prefetch2(p, 2);
+++ break;
+++ case _MM_HINT_T2:
+++ __prefetch2(p, 4);
+++ break;
+++ }
+++#else
+++ switch (i) {
+++ case _MM_HINT_NTA:
+++ __builtin_prefetch(p, 0, 0);
+++ break;
+++ case _MM_HINT_T0:
+++ __builtin_prefetch(p, 0, 3);
+++ break;
+++ case _MM_HINT_T1:
+++ __builtin_prefetch(p, 0, 2);
+++ break;
+++ case _MM_HINT_T2:
+++ __builtin_prefetch(p, 0, 1);
+++ break;
+++ }
+++#endif
+++}
+++
+++// Compute the absolute differences of packed unsigned 8-bit integers in a and
+++// b, then horizontally sum each consecutive 8 differences to produce four
+++// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+++// 16 bits of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=m_psadbw
+++#define _m_psadbw(a, b) _mm_sad_pu8(a, b)
+++
+++// Shuffle 16-bit integers in a using the control in imm8, and store the results
+++// in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_m_pshufw
+++#define _m_pshufw(a, imm) _mm_shuffle_pi16(a, imm)
+++
+++// Compute the approximate reciprocal of packed single-precision (32-bit)
+++// floating-point elements in a, and store the results in dst. The maximum
+++// relative error for this approximation is less than 1.5*2^-12.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ps
+++FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
+++{
+++ float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
+++ recip = vmulq_f32(recip, vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
+++ return vreinterpretq_m128_f32(recip);
+++}
+++
+++// Compute the approximate reciprocal of the lower single-precision (32-bit)
+++// floating-point element in a, store the result in the lower element of dst,
+++// and copy the upper 3 packed elements from a to the upper elements of dst. The
+++// maximum relative error for this approximation is less than 1.5*2^-12.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rcp_ss
+++FORCE_INLINE __m128 _mm_rcp_ss(__m128 a)
+++{
+++ return _mm_move_ss(a, _mm_rcp_ps(a));
+++}
+++
+++// Compute the approximate reciprocal square root of packed single-precision
+++// (32-bit) floating-point elements in a, and store the results in dst. The
+++// maximum relative error for this approximation is less than 1.5*2^-12.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ps
+++FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
+++{
+++ float32x4_t out = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+++
+++ // Generate masks for detecting whether input has any 0.0f/-0.0f
+++ // (which becomes positive/negative infinity by IEEE-754 arithmetic rules).
+++ const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
+++ const uint32x4_t neg_inf = vdupq_n_u32(0xFF800000);
+++ const uint32x4_t has_pos_zero =
+++ vceqq_u32(pos_inf, vreinterpretq_u32_f32(out));
+++ const uint32x4_t has_neg_zero =
+++ vceqq_u32(neg_inf, vreinterpretq_u32_f32(out));
+++
+++ out = vmulq_f32(
+++ out, vrsqrtsq_f32(vmulq_f32(vreinterpretq_f32_m128(in), out), out));
+++
+++ // Set output vector element to infinity/negative-infinity if
+++ // the corresponding input vector element is 0.0f/-0.0f.
+++ out = vbslq_f32(has_pos_zero, (float32x4_t) pos_inf, out);
+++ out = vbslq_f32(has_neg_zero, (float32x4_t) neg_inf, out);
+++
+++ return vreinterpretq_m128_f32(out);
+++}
+++
+++// Compute the approximate reciprocal square root of the lower single-precision
+++// (32-bit) floating-point element in a, store the result in the lower element
+++// of dst, and copy the upper 3 packed elements from a to the upper elements of
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_rsqrt_ss
+++FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
+++{
+++ return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0);
+++}
+++
+++// Compute the absolute differences of packed unsigned 8-bit integers in a and
+++// b, then horizontally sum each consecutive 8 differences to produce four
+++// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+++// 16 bits of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_pu8
+++FORCE_INLINE __m64 _mm_sad_pu8(__m64 a, __m64 b)
+++{
+++ uint64x1_t t = vpaddl_u32(vpaddl_u16(
+++ vpaddl_u8(vabd_u8(vreinterpret_u8_m64(a), vreinterpret_u8_m64(b)))));
+++ return vreinterpret_m64_u16(
+++ vset_lane_u16((int) vget_lane_u64(t, 0), vdup_n_u16(0), 0));
+++}
+++
+++// Macro: Set the flush zero bits of the MXCSR control and status register to
+++// the value in unsigned 32-bit integer a. The flush zero may contain any of the
+++// following flags: _MM_FLUSH_ZERO_ON or _MM_FLUSH_ZERO_OFF
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_FLUSH_ZERO_MODE
+++FORCE_INLINE void _sse2neon_mm_set_flush_zero_mode(unsigned int flag)
+++{
+++ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
+++ // regardless of the value of the FZ bit.
+++ union {
+++ fpcr_bitfield field;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint64_t value;
+++#else
+++ uint32_t value;
+++#endif
+++ } r;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ r.value = _sse2neon_get_fpcr();
+++#else
+++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+++#endif
+++
+++ r.field.bit24 = (flag & _MM_FLUSH_ZERO_MASK) == _MM_FLUSH_ZERO_ON;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ _sse2neon_set_fpcr(r.value);
+++#else
+++ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
+++#endif
+++}
+++
+++// Set packed single-precision (32-bit) floating-point elements in dst with the
+++// supplied values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps
+++FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
+++{
+++ float ALIGN_STRUCT(16) data[4] = {x, y, z, w};
+++ return vreinterpretq_m128_f32(vld1q_f32(data));
+++}
+++
+++// Broadcast single-precision (32-bit) floating-point value a to all elements of
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ps1
+++FORCE_INLINE __m128 _mm_set_ps1(float _w)
+++{
+++ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
+++}
+++
+++// Macro: Set the rounding mode bits of the MXCSR control and status register to
+++// the value in unsigned 32-bit integer a. The rounding mode may contain any of
+++// the following flags: _MM_ROUND_NEAREST, _MM_ROUND_DOWN, _MM_ROUND_UP,
+++// _MM_ROUND_TOWARD_ZERO
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_MM_SET_ROUNDING_MODE
+++FORCE_INLINE void _MM_SET_ROUNDING_MODE(int rounding)
+++{
+++ union {
+++ fpcr_bitfield field;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint64_t value;
+++#else
+++ uint32_t value;
+++#endif
+++ } r;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ r.value = _sse2neon_get_fpcr();
+++#else
+++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+++#endif
+++
+++ switch (rounding) {
+++ case _MM_ROUND_TOWARD_ZERO:
+++ r.field.bit22 = 1;
+++ r.field.bit23 = 1;
+++ break;
+++ case _MM_ROUND_DOWN:
+++ r.field.bit22 = 0;
+++ r.field.bit23 = 1;
+++ break;
+++ case _MM_ROUND_UP:
+++ r.field.bit22 = 1;
+++ r.field.bit23 = 0;
+++ break;
+++ default: //_MM_ROUND_NEAREST
+++ r.field.bit22 = 0;
+++ r.field.bit23 = 0;
+++ }
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ _sse2neon_set_fpcr(r.value);
+++#else
+++ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
+++#endif
+++}
+++
+++// Copy single-precision (32-bit) floating-point element a to the lower element
+++// of dst, and zero the upper 3 elements.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_ss
+++FORCE_INLINE __m128 _mm_set_ss(float a)
+++{
+++ return vreinterpretq_m128_f32(vsetq_lane_f32(a, vdupq_n_f32(0), 0));
+++}
+++
+++// Broadcast single-precision (32-bit) floating-point value a to all elements of
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_ps
+++FORCE_INLINE __m128 _mm_set1_ps(float _w)
+++{
+++ return vreinterpretq_m128_f32(vdupq_n_f32(_w));
+++}
+++
+++// Set the MXCSR control and status register with the value in unsigned 32-bit
+++// integer a.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setcsr
+++// FIXME: _mm_setcsr() implementation supports changing the rounding mode only.
+++FORCE_INLINE void _mm_setcsr(unsigned int a)
+++{
+++ _MM_SET_ROUNDING_MODE(a);
+++}
+++
+++// Get the unsigned 32-bit value of the MXCSR control and status register.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_getcsr
+++// FIXME: _mm_getcsr() implementation supports reading the rounding mode only.
+++FORCE_INLINE unsigned int _mm_getcsr(void)
+++{
+++ return _MM_GET_ROUNDING_MODE();
+++}
+++
+++// Set packed single-precision (32-bit) floating-point elements in dst with the
+++// supplied values in reverse order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_ps
+++FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x)
+++{
+++ float ALIGN_STRUCT(16) data[4] = {w, z, y, x};
+++ return vreinterpretq_m128_f32(vld1q_f32(data));
+++}
+++
+++// Return vector of type __m128 with all elements set to zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_ps
+++FORCE_INLINE __m128 _mm_setzero_ps(void)
+++{
+++ return vreinterpretq_m128_f32(vdupq_n_f32(0));
+++}
+++
+++// Shuffle 16-bit integers in a using the control in imm8, and store the results
+++// in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi16
+++#ifdef _sse2neon_shuffle
+++#define _mm_shuffle_pi16(a, imm) \
+++ vreinterpret_m64_s16(vshuffle_s16( \
+++ vreinterpret_s16_m64(a), vreinterpret_s16_m64(a), (imm & 0x3), \
+++ ((imm >> 2) & 0x3), ((imm >> 4) & 0x3), ((imm >> 6) & 0x3)))
+++#else
+++#define _mm_shuffle_pi16(a, imm) \
+++ _sse2neon_define1( \
+++ __m64, a, int16x4_t ret; \
+++ ret = vmov_n_s16( \
+++ vget_lane_s16(vreinterpret_s16_m64(_a), (imm) & (0x3))); \
+++ ret = vset_lane_s16( \
+++ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 2) & 0x3), ret, \
+++ 1); \
+++ ret = vset_lane_s16( \
+++ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 4) & 0x3), ret, \
+++ 2); \
+++ ret = vset_lane_s16( \
+++ vget_lane_s16(vreinterpret_s16_m64(_a), ((imm) >> 6) & 0x3), ret, \
+++ 3); \
+++ _sse2neon_return(vreinterpret_m64_s16(ret));)
+++#endif
+++
+++// Perform a serializing operation on all store-to-memory instructions that were
+++// issued prior to this instruction. Guarantees that every store instruction
+++// that precedes, in program order, is globally visible before any store
+++// instruction which follows the fence in program order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sfence
+++FORCE_INLINE void _mm_sfence(void)
+++{
+++ _sse2neon_smp_mb();
+++}
+++
+++// Perform a serializing operation on all load-from-memory and store-to-memory
+++// instructions that were issued prior to this instruction. Guarantees that
+++// every memory access that precedes, in program order, the memory fence
+++// instruction is globally visible before any memory instruction which follows
+++// the fence in program order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mfence
+++FORCE_INLINE void _mm_mfence(void)
+++{
+++ _sse2neon_smp_mb();
+++}
+++
+++// Perform a serializing operation on all load-from-memory instructions that
+++// were issued prior to this instruction. Guarantees that every load instruction
+++// that precedes, in program order, is globally visible before any load
+++// instruction which follows the fence in program order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lfence
+++FORCE_INLINE void _mm_lfence(void)
+++{
+++ _sse2neon_smp_mb();
+++}
+++
+++// FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255)
+++// int imm)
+++#ifdef _sse2neon_shuffle
+++#define _mm_shuffle_ps(a, b, imm) \
+++ __extension__({ \
+++ float32x4_t _input1 = vreinterpretq_f32_m128(a); \
+++ float32x4_t _input2 = vreinterpretq_f32_m128(b); \
+++ float32x4_t _shuf = \
+++ vshuffleq_s32(_input1, _input2, (imm) & (0x3), ((imm) >> 2) & 0x3, \
+++ (((imm) >> 4) & 0x3) + 4, (((imm) >> 6) & 0x3) + 4); \
+++ vreinterpretq_m128_f32(_shuf); \
+++ })
+++#else // generic
+++#define _mm_shuffle_ps(a, b, imm) \
+++ _sse2neon_define2( \
+++ __m128, a, b, __m128 ret; switch (imm) { \
+++ case _MM_SHUFFLE(1, 0, 3, 2): \
+++ ret = _mm_shuffle_ps_1032(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(2, 3, 0, 1): \
+++ ret = _mm_shuffle_ps_2301(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(0, 3, 2, 1): \
+++ ret = _mm_shuffle_ps_0321(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(2, 1, 0, 3): \
+++ ret = _mm_shuffle_ps_2103(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(1, 0, 1, 0): \
+++ ret = _mm_movelh_ps(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(1, 0, 0, 1): \
+++ ret = _mm_shuffle_ps_1001(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(0, 1, 0, 1): \
+++ ret = _mm_shuffle_ps_0101(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(3, 2, 1, 0): \
+++ ret = _mm_shuffle_ps_3210(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(0, 0, 1, 1): \
+++ ret = _mm_shuffle_ps_0011(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(0, 0, 2, 2): \
+++ ret = _mm_shuffle_ps_0022(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(2, 2, 0, 0): \
+++ ret = _mm_shuffle_ps_2200(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(3, 2, 0, 2): \
+++ ret = _mm_shuffle_ps_3202(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(3, 2, 3, 2): \
+++ ret = _mm_movehl_ps(_b, _a); \
+++ break; \
+++ case _MM_SHUFFLE(1, 1, 3, 3): \
+++ ret = _mm_shuffle_ps_1133(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(2, 0, 1, 0): \
+++ ret = _mm_shuffle_ps_2010(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(2, 0, 0, 1): \
+++ ret = _mm_shuffle_ps_2001(_a, _b); \
+++ break; \
+++ case _MM_SHUFFLE(2, 0, 3, 2): \
+++ ret = _mm_shuffle_ps_2032(_a, _b); \
+++ break; \
+++ default: \
+++ ret = _mm_shuffle_ps_default(_a, _b, (imm)); \
+++ break; \
+++ } _sse2neon_return(ret);)
+++#endif
+++
+++// Compute the square root of packed single-precision (32-bit) floating-point
+++// elements in a, and store the results in dst.
+++// Due to ARMv7-A NEON's lack of a precise square root intrinsic, we implement
+++// square root by multiplying input in with its reciprocal square root before
+++// using the Newton-Raphson method to approximate the results.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ps
+++FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in)));
+++#else
+++ float32x4_t recip = vrsqrteq_f32(vreinterpretq_f32_m128(in));
+++
+++ // Test for vrsqrteq_f32(0) -> positive infinity case.
+++ // Change to zero, so that s * 1/sqrt(s) result is zero too.
+++ const uint32x4_t pos_inf = vdupq_n_u32(0x7F800000);
+++ const uint32x4_t div_by_zero =
+++ vceqq_u32(pos_inf, vreinterpretq_u32_f32(recip));
+++ recip = vreinterpretq_f32_u32(
+++ vandq_u32(vmvnq_u32(div_by_zero), vreinterpretq_u32_f32(recip)));
+++
+++ recip = vmulq_f32(
+++ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
+++ recip);
+++ // Additional Netwon-Raphson iteration for accuracy
+++ recip = vmulq_f32(
+++ vrsqrtsq_f32(vmulq_f32(recip, recip), vreinterpretq_f32_m128(in)),
+++ recip);
+++
+++ // sqrt(s) = s * 1/sqrt(s)
+++ return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(in), recip));
+++#endif
+++}
+++
+++// Compute the square root of the lower single-precision (32-bit) floating-point
+++// element in a, store the result in the lower element of dst, and copy the
+++// upper 3 packed elements from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_ss
+++FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
+++{
+++ float32_t value =
+++ vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
+++ return vreinterpretq_m128_f32(
+++ vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
+++}
+++
+++// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+++// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
+++// or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps
+++FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
+++{
+++ vst1q_f32(p, vreinterpretq_f32_m128(a));
+++}
+++
+++// Store the lower single-precision (32-bit) floating-point element from a into
+++// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+++// boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ps1
+++FORCE_INLINE void _mm_store_ps1(float *p, __m128 a)
+++{
+++ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+++ vst1q_f32(p, vdupq_n_f32(a0));
+++}
+++
+++// Store the lower single-precision (32-bit) floating-point element from a into
+++// memory. mem_addr does not need to be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_ss
+++FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
+++{
+++ vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
+++}
+++
+++// Store the lower single-precision (32-bit) floating-point element from a into
+++// 4 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+++// boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store1_ps
+++#define _mm_store1_ps _mm_store_ps1
+++
+++// Store the upper 2 single-precision (32-bit) floating-point elements from a
+++// into memory.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pi
+++FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
+++{
+++ *p = vreinterpret_m64_f32(vget_high_f32(a));
+++}
+++
+++// Store the lower 2 single-precision (32-bit) floating-point elements from a
+++// into memory.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pi
+++FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a)
+++{
+++ *p = vreinterpret_m64_f32(vget_low_f32(a));
+++}
+++
+++// Store 4 single-precision (32-bit) floating-point elements from a into memory
+++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+++// general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_ps
+++FORCE_INLINE void _mm_storer_ps(float *p, __m128 a)
+++{
+++ float32x4_t tmp = vrev64q_f32(vreinterpretq_f32_m128(a));
+++ float32x4_t rev = vextq_f32(tmp, tmp, 2);
+++ vst1q_f32(p, rev);
+++}
+++
+++// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-point
+++// elements) from a into memory. mem_addr does not need to be aligned on any
+++// particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_ps
+++FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
+++{
+++ vst1q_f32(p, vreinterpretq_f32_m128(a));
+++}
+++
+++// Stores 16-bits of integer data a at the address p.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si16
+++FORCE_INLINE void _mm_storeu_si16(void *p, __m128i a)
+++{
+++ vst1q_lane_s16((int16_t *) p, vreinterpretq_s16_m128i(a), 0);
+++}
+++
+++// Stores 64-bits of integer data a at the address p.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si64
+++FORCE_INLINE void _mm_storeu_si64(void *p, __m128i a)
+++{
+++ vst1q_lane_s64((int64_t *) p, vreinterpretq_s64_m128i(a), 0);
+++}
+++
+++// Store 64-bits of integer data from a into memory using a non-temporal memory
+++// hint.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pi
+++FORCE_INLINE void _mm_stream_pi(__m64 *p, __m64 a)
+++{
+++ vst1_s64((int64_t *) p, vreinterpret_s64_m64(a));
+++}
+++
+++// Store 128-bits (composed of 4 packed single-precision (32-bit) floating-
+++// point elements) from a into memory using a non-temporal memory hint.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_ps
+++FORCE_INLINE void _mm_stream_ps(float *p, __m128 a)
+++{
+++#if __has_builtin(__builtin_nontemporal_store)
+++ __builtin_nontemporal_store(a, (float32x4_t *) p);
+++#else
+++ vst1q_f32(p, vreinterpretq_f32_m128(a));
+++#endif
+++}
+++
+++// Subtract packed single-precision (32-bit) floating-point elements in b from
+++// packed single-precision (32-bit) floating-point elements in a, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ps
+++FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_f32(
+++ vsubq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++}
+++
+++// Subtract the lower single-precision (32-bit) floating-point element in b from
+++// the lower single-precision (32-bit) floating-point element in a, store the
+++// result in the lower element of dst, and copy the upper 3 packed elements from
+++// a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_ss
+++FORCE_INLINE __m128 _mm_sub_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_sub_ps(a, b));
+++}
+++
+++// Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision
+++// (32-bit) floating-point elements in row0, row1, row2, and row3, and store the
+++// transposed matrix in these vectors (row0 now contains column 0, etc.).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=MM_TRANSPOSE4_PS
+++#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+++ do { \
+++ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
+++ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
+++ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
+++ vget_low_f32(ROW23.val[0])); \
+++ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
+++ vget_low_f32(ROW23.val[1])); \
+++ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
+++ vget_high_f32(ROW23.val[0])); \
+++ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
+++ vget_high_f32(ROW23.val[1])); \
+++ } while (0)
+++
+++// according to the documentation, these intrinsics behave the same as the
+++// non-'u' versions. We'll just alias them here.
+++#define _mm_ucomieq_ss _mm_comieq_ss
+++#define _mm_ucomige_ss _mm_comige_ss
+++#define _mm_ucomigt_ss _mm_comigt_ss
+++#define _mm_ucomile_ss _mm_comile_ss
+++#define _mm_ucomilt_ss _mm_comilt_ss
+++#define _mm_ucomineq_ss _mm_comineq_ss
+++
+++// Return vector of type __m128i with undefined elements.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_undefined_si128
+++FORCE_INLINE __m128i _mm_undefined_si128(void)
+++{
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma GCC diagnostic push
+++#pragma GCC diagnostic ignored "-Wuninitialized"
+++#endif
+++ __m128i a;
+++#if defined(_MSC_VER)
+++ a = _mm_setzero_si128();
+++#endif
+++ return a;
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma GCC diagnostic pop
+++#endif
+++}
+++
+++// Return vector of type __m128 with undefined elements.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_ps
+++FORCE_INLINE __m128 _mm_undefined_ps(void)
+++{
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma GCC diagnostic push
+++#pragma GCC diagnostic ignored "-Wuninitialized"
+++#endif
+++ __m128 a;
+++#if defined(_MSC_VER)
+++ a = _mm_setzero_ps();
+++#endif
+++ return a;
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma GCC diagnostic pop
+++#endif
+++}
+++
+++// Unpack and interleave single-precision (32-bit) floating-point elements from
+++// the high half a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_ps
+++FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(
+++ vzip2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++#else
+++ float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
+++ float32x2x2_t result = vzip_f32(a1, b1);
+++ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Unpack and interleave single-precision (32-bit) floating-point elements from
+++// the low half of a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_ps
+++FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(
+++ vzip1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++#else
+++ float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
+++ float32x2x2_t result = vzip_f32(a1, b1);
+++ return vreinterpretq_m128_f32(vcombine_f32(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Compute the bitwise XOR of packed single-precision (32-bit) floating-point
+++// elements in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_ps
+++FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
+++{
+++ return vreinterpretq_m128_s32(
+++ veorq_s32(vreinterpretq_s32_m128(a), vreinterpretq_s32_m128(b)));
+++}
+++
+++/* SSE2 */
+++
+++// Add packed 16-bit integers in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi16
+++FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Add packed 32-bit integers in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi32
+++FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vaddq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Add packed 64-bit integers in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi64
+++FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s64(
+++ vaddq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+++}
+++
+++// Add packed 8-bit integers in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_epi8
+++FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s8(
+++ vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Add packed double-precision (64-bit) floating-point elements in a and b, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_pd
+++FORCE_INLINE __m128d _mm_add_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[2];
+++ c[0] = da[0] + db[0];
+++ c[1] = da[1] + db[1];
+++ return vld1q_f32((float32_t *) c);
+++#endif
+++}
+++
+++// Add the lower double-precision (64-bit) floating-point element in a and b,
+++// store the result in the lower element of dst, and copy the upper element from
+++// a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_sd
+++FORCE_INLINE __m128d _mm_add_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_add_pd(a, b));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[2];
+++ c[0] = da[0] + db[0];
+++ c[1] = da[1];
+++ return vld1q_f32((float32_t *) c);
+++#endif
+++}
+++
+++// Add 64-bit integers a and b, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_add_si64
+++FORCE_INLINE __m64 _mm_add_si64(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_s64(
+++ vadd_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
+++}
+++
+++// Add packed signed 16-bit integers in a and b using saturation, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi16
+++FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vqaddq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Add packed signed 8-bit integers in a and b using saturation, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epi8
+++FORCE_INLINE __m128i _mm_adds_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s8(
+++ vqaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Add packed unsigned 16-bit integers in a and b using saturation, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu16
+++FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vqaddq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+++}
+++
+++// Add packed unsigned 8-bit integers in a and b using saturation, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_adds_epu8
+++FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vqaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+++}
+++
+++// Compute the bitwise AND of packed double-precision (64-bit) floating-point
+++// elements in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_pd
+++FORCE_INLINE __m128d _mm_and_pd(__m128d a, __m128d b)
+++{
+++ return vreinterpretq_m128d_s64(
+++ vandq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+++}
+++
+++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+++// and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_and_si128
+++FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vandq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Compute the bitwise NOT of packed double-precision (64-bit) floating-point
+++// elements in a and then AND with b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_pd
+++FORCE_INLINE __m128d _mm_andnot_pd(__m128d a, __m128d b)
+++{
+++ // *NOTE* argument swap
+++ return vreinterpretq_m128d_s64(
+++ vbicq_s64(vreinterpretq_s64_m128d(b), vreinterpretq_s64_m128d(a)));
+++}
+++
+++// Compute the bitwise NOT of 128 bits (representing integer data) in a and then
+++// AND with b, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_andnot_si128
+++FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vbicq_s32(vreinterpretq_s32_m128i(b),
+++ vreinterpretq_s32_m128i(a))); // *NOTE* argument swap
+++}
+++
+++// Average packed unsigned 16-bit integers in a and b, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu16
+++FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b)
+++{
+++ return (__m128i) vrhaddq_u16(vreinterpretq_u16_m128i(a),
+++ vreinterpretq_u16_m128i(b));
+++}
+++
+++// Average packed unsigned 8-bit integers in a and b, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_avg_epu8
+++FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vrhaddq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+++}
+++
+++// Shift a left by imm8 bytes while shifting in zeros, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bslli_si128
+++#define _mm_bslli_si128(a, imm) _mm_slli_si128(a, imm)
+++
+++// Shift a right by imm8 bytes while shifting in zeros, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_bsrli_si128
+++#define _mm_bsrli_si128(a, imm) _mm_srli_si128(a, imm)
+++
+++// Cast vector of type __m128d to type __m128. This intrinsic is only used for
+++// compilation and does not generate any instructions, thus it has zero latency.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_ps
+++FORCE_INLINE __m128 _mm_castpd_ps(__m128d a)
+++{
+++ return vreinterpretq_m128_s64(vreinterpretq_s64_m128d(a));
+++}
+++
+++// Cast vector of type __m128d to type __m128i. This intrinsic is only used for
+++// compilation and does not generate any instructions, thus it has zero latency.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castpd_si128
+++FORCE_INLINE __m128i _mm_castpd_si128(__m128d a)
+++{
+++ return vreinterpretq_m128i_s64(vreinterpretq_s64_m128d(a));
+++}
+++
+++// Cast vector of type __m128 to type __m128d. This intrinsic is only used for
+++// compilation and does not generate any instructions, thus it has zero latency.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_pd
+++FORCE_INLINE __m128d _mm_castps_pd(__m128 a)
+++{
+++ return vreinterpretq_m128d_s32(vreinterpretq_s32_m128(a));
+++}
+++
+++// Cast vector of type __m128 to type __m128i. This intrinsic is only used for
+++// compilation and does not generate any instructions, thus it has zero latency.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castps_si128
+++FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
+++{
+++ return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
+++}
+++
+++// Cast vector of type __m128i to type __m128d. This intrinsic is only used for
+++// compilation and does not generate any instructions, thus it has zero latency.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_pd
+++FORCE_INLINE __m128d _mm_castsi128_pd(__m128i a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vreinterpretq_f64_m128i(a));
+++#else
+++ return vreinterpretq_m128d_f32(vreinterpretq_f32_m128i(a));
+++#endif
+++}
+++
+++// Cast vector of type __m128i to type __m128. This intrinsic is only used for
+++// compilation and does not generate any instructions, thus it has zero latency.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_castsi128_ps
+++FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
+++{
+++ return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
+++}
+++
+++// Invalidate and flush the cache line that contains p from all levels of the
+++// cache hierarchy.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clflush
+++#if defined(__APPLE__)
+++#include <libkern/OSCacheControl.h>
+++#endif
+++FORCE_INLINE void _mm_clflush(void const *p)
+++{
+++ (void) p;
+++
+++ /* sys_icache_invalidate is supported since macOS 10.5.
+++ * However, it does not work on non-jailbroken iOS devices, although the
+++ * compilation is successful.
+++ */
+++#if defined(__APPLE__)
+++ sys_icache_invalidate((void *) (uintptr_t) p, SSE2NEON_CACHELINE_SIZE);
+++#elif defined(__GNUC__) || defined(__clang__)
+++ uintptr_t ptr = (uintptr_t) p;
+++ __builtin___clear_cache((char *) ptr,
+++ (char *) ptr + SSE2NEON_CACHELINE_SIZE);
+++#elif (_MSC_VER) && SSE2NEON_INCLUDE_WINDOWS_H
+++ FlushInstructionCache(GetCurrentProcess(), p, SSE2NEON_CACHELINE_SIZE);
+++#endif
+++}
+++
+++// Compare packed 16-bit integers in a and b for equality, and store the results
+++// in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi16
+++FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vceqq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Compare packed 32-bit integers in a and b for equality, and store the results
+++// in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi32
+++FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u32(
+++ vceqq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Compare packed 8-bit integers in a and b for equality, and store the results
+++// in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_epi8
+++FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for equality, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_pd
+++FORCE_INLINE __m128d _mm_cmpeq_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(
+++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+++ uint32x4_t cmp =
+++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
+++ uint32x4_t swapped = vrev64q_u32(cmp);
+++ return vreinterpretq_m128d_u32(vandq_u32(cmp, swapped));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for equality, store the result in the lower element of dst, and copy the
+++// upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpeq_sd
+++FORCE_INLINE __m128d _mm_cmpeq_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_cmpeq_pd(a, b));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for greater-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_pd
+++FORCE_INLINE __m128d _mm_cmpge_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(
+++ vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = (*(double *) &a1) >= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for greater-than-or-equal, store the result in the lower element of dst,
+++// and copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpge_sd
+++FORCE_INLINE __m128d _mm_cmpge_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_cmpge_pd(a, b));
+++#else
+++ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) >= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = a1;
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare packed signed 16-bit integers in a and b for greater-than, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi16
+++FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vcgtq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Compare packed signed 32-bit integers in a and b for greater-than, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi32
+++FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u32(
+++ vcgtq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Compare packed signed 8-bit integers in a and b for greater-than, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_epi8
+++FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for greater-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_pd
+++FORCE_INLINE __m128d _mm_cmpgt_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(
+++ vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = (*(double *) &a1) > (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for greater-than, store the result in the lower element of dst, and copy
+++// the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpgt_sd
+++FORCE_INLINE __m128d _mm_cmpgt_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_cmpgt_pd(a, b));
+++#else
+++ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) > (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = a1;
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for less-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_pd
+++FORCE_INLINE __m128d _mm_cmple_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(
+++ vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = (*(double *) &a1) <= (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for less-than-or-equal, store the result in the lower element of dst, and
+++// copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmple_sd
+++FORCE_INLINE __m128d _mm_cmple_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_cmple_pd(a, b));
+++#else
+++ // expand "_mm_cmpge_pd()" to reduce unnecessary operations
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) <= (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = a1;
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare packed signed 16-bit integers in a and b for less-than, and store the
+++// results in dst. Note: This intrinsic emits the pcmpgtw instruction with the
+++// order of the operands switched.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi16
+++FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vcltq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Compare packed signed 32-bit integers in a and b for less-than, and store the
+++// results in dst. Note: This intrinsic emits the pcmpgtd instruction with the
+++// order of the operands switched.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi32
+++FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u32(
+++ vcltq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Compare packed signed 8-bit integers in a and b for less-than, and store the
+++// results in dst. Note: This intrinsic emits the pcmpgtb instruction with the
+++// order of the operands switched.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_epi8
+++FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for less-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_pd
+++FORCE_INLINE __m128d _mm_cmplt_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(
+++ vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = (*(double *) &a1) < (*(double *) &b1) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for less-than, store the result in the lower element of dst, and copy the
+++// upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmplt_sd
+++FORCE_INLINE __m128d _mm_cmplt_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_cmplt_pd(a, b));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) < (*(double *) &b0) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] = a1;
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for not-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_pd
+++FORCE_INLINE __m128d _mm_cmpneq_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_s32(vmvnq_s32(vreinterpretq_s32_u64(
+++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)))));
+++#else
+++ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+++ uint32x4_t cmp =
+++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
+++ uint32x4_t swapped = vrev64q_u32(cmp);
+++ return vreinterpretq_m128d_u32(vmvnq_u32(vandq_u32(cmp, swapped)));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for not-equal, store the result in the lower element of dst, and copy the
+++// upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpneq_sd
+++FORCE_INLINE __m128d _mm_cmpneq_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_cmpneq_pd(a, b));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for not-greater-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_pd
+++FORCE_INLINE __m128d _mm_cmpnge_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(veorq_u64(
+++ vcgeq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+++ vdupq_n_u64(UINT64_MAX)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] =
+++ !((*(double *) &a0) >= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] =
+++ !((*(double *) &a1) >= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for not-greater-than-or-equal, store the result in the lower element of
+++// dst, and copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnge_sd
+++FORCE_INLINE __m128d _mm_cmpnge_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_cmpnge_pd(a, b));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for not-greater-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_cmpngt_pd
+++FORCE_INLINE __m128d _mm_cmpngt_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(veorq_u64(
+++ vcgtq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+++ vdupq_n_u64(UINT64_MAX)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] =
+++ !((*(double *) &a0) > (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] =
+++ !((*(double *) &a1) > (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for not-greater-than, store the result in the lower element of dst, and
+++// copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpngt_sd
+++FORCE_INLINE __m128d _mm_cmpngt_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_cmpngt_pd(a, b));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for not-less-than-or-equal, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_pd
+++FORCE_INLINE __m128d _mm_cmpnle_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(veorq_u64(
+++ vcleq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+++ vdupq_n_u64(UINT64_MAX)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] =
+++ !((*(double *) &a0) <= (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] =
+++ !((*(double *) &a1) <= (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for not-less-than-or-equal, store the result in the lower element of dst,
+++// and copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnle_sd
+++FORCE_INLINE __m128d _mm_cmpnle_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_cmpnle_pd(a, b));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// for not-less-than, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_pd
+++FORCE_INLINE __m128d _mm_cmpnlt_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_u64(veorq_u64(
+++ vcltq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)),
+++ vdupq_n_u64(UINT64_MAX)));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] =
+++ !((*(double *) &a0) < (*(double *) &b0)) ? ~UINT64_C(0) : UINT64_C(0);
+++ d[1] =
+++ !((*(double *) &a1) < (*(double *) &b1)) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b for not-less-than, store the result in the lower element of dst, and copy
+++// the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpnlt_sd
+++FORCE_INLINE __m128d _mm_cmpnlt_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_cmpnlt_pd(a, b));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// to see if neither is NaN, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_pd
+++FORCE_INLINE __m128d _mm_cmpord_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ // Excluding NaNs, any two floating point numbers can be compared.
+++ uint64x2_t not_nan_a =
+++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
+++ uint64x2_t not_nan_b =
+++ vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
+++ return vreinterpretq_m128d_u64(vandq_u64(not_nan_a, not_nan_b));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+++ (*(double *) &b0) == (*(double *) &b0))
+++ ? ~UINT64_C(0)
+++ : UINT64_C(0);
+++ d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
+++ (*(double *) &b1) == (*(double *) &b1))
+++ ? ~UINT64_C(0)
+++ : UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b to see if neither is NaN, store the result in the lower element of dst, and
+++// copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpord_sd
+++FORCE_INLINE __m128d _mm_cmpord_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_cmpord_pd(a, b));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t d[2];
+++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+++ (*(double *) &b0) == (*(double *) &b0))
+++ ? ~UINT64_C(0)
+++ : UINT64_C(0);
+++ d[1] = a1;
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b
+++// to see if either is NaN, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_pd
+++FORCE_INLINE __m128d _mm_cmpunord_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ // Two NaNs are not equal in comparison operation.
+++ uint64x2_t not_nan_a =
+++ vceqq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(a));
+++ uint64x2_t not_nan_b =
+++ vceqq_f64(vreinterpretq_f64_m128d(b), vreinterpretq_f64_m128d(b));
+++ return vreinterpretq_m128d_s32(
+++ vmvnq_s32(vreinterpretq_s32_u64(vandq_u64(not_nan_a, not_nan_b))));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+++ (*(double *) &b0) == (*(double *) &b0))
+++ ? UINT64_C(0)
+++ : ~UINT64_C(0);
+++ d[1] = ((*(double *) &a1) == (*(double *) &a1) &&
+++ (*(double *) &b1) == (*(double *) &b1))
+++ ? UINT64_C(0)
+++ : ~UINT64_C(0);
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b to see if either is NaN, store the result in the lower element of dst, and
+++// copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpunord_sd
+++FORCE_INLINE __m128d _mm_cmpunord_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_cmpunord_pd(a, b));
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t d[2];
+++ d[0] = ((*(double *) &a0) == (*(double *) &a0) &&
+++ (*(double *) &b0) == (*(double *) &b0))
+++ ? UINT64_C(0)
+++ : ~UINT64_C(0);
+++ d[1] = a1;
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point element in a and b
+++// for greater-than-or-equal, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comige_sd
+++FORCE_INLINE int _mm_comige_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vgetq_lane_u64(vcgeq_f64(a, b), 0) & 0x1;
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++
+++ return (*(double *) &a0 >= *(double *) &b0);
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point element in a and b
+++// for greater-than, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comigt_sd
+++FORCE_INLINE int _mm_comigt_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vgetq_lane_u64(vcgtq_f64(a, b), 0) & 0x1;
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++
+++ return (*(double *) &a0 > *(double *) &b0);
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point element in a and b
+++// for less-than-or-equal, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comile_sd
+++FORCE_INLINE int _mm_comile_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vgetq_lane_u64(vcleq_f64(a, b), 0) & 0x1;
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++
+++ return (*(double *) &a0 <= *(double *) &b0);
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point element in a and b
+++// for less-than, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comilt_sd
+++FORCE_INLINE int _mm_comilt_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vgetq_lane_u64(vcltq_f64(a, b), 0) & 0x1;
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++
+++ return (*(double *) &a0 < *(double *) &b0);
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point element in a and b
+++// for equality, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comieq_sd
+++FORCE_INLINE int _mm_comieq_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vgetq_lane_u64(vceqq_f64(a, b), 0) & 0x1;
+++#else
+++ uint32x4_t a_not_nan =
+++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(a));
+++ uint32x4_t b_not_nan =
+++ vceqq_u32(vreinterpretq_u32_m128d(b), vreinterpretq_u32_m128d(b));
+++ uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
+++ uint32x4_t a_eq_b =
+++ vceqq_u32(vreinterpretq_u32_m128d(a), vreinterpretq_u32_m128d(b));
+++ uint64x2_t and_results = vandq_u64(vreinterpretq_u64_u32(a_and_b_not_nan),
+++ vreinterpretq_u64_u32(a_eq_b));
+++ return vgetq_lane_u64(and_results, 0) & 0x1;
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point element in a and b
+++// for not-equal, and return the boolean result (0 or 1).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_comineq_sd
+++FORCE_INLINE int _mm_comineq_sd(__m128d a, __m128d b)
+++{
+++ return !_mm_comieq_sd(a, b);
+++}
+++
+++// Convert packed signed 32-bit integers in a to packed double-precision
+++// (64-bit) floating-point elements, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_pd
+++FORCE_INLINE __m128d _mm_cvtepi32_pd(__m128i a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vcvtq_f64_s64(vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a)))));
+++#else
+++ double a0 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
+++ double a1 = (double) vgetq_lane_s32(vreinterpretq_s32_m128i(a), 1);
+++ return _mm_set_pd(a1, a0);
+++#endif
+++}
+++
+++// Convert packed signed 32-bit integers in a to packed single-precision
+++// (32-bit) floating-point elements, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_ps
+++FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
+++{
+++ return vreinterpretq_m128_f32(vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
+++}
+++
+++// Convert packed double-precision (64-bit) floating-point elements in a to
+++// packed 32-bit integers, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_epi32
+++FORCE_INLINE __m128i _mm_cvtpd_epi32(__m128d a)
+++{
+++// vrnd32xq_f64 not supported on clang
+++#if defined(__ARM_FEATURE_FRINT) && !defined(__clang__)
+++ float64x2_t rounded = vrnd32xq_f64(vreinterpretq_f64_m128d(a));
+++ int64x2_t integers = vcvtq_s64_f64(rounded);
+++ return vreinterpretq_m128i_s32(
+++ vcombine_s32(vmovn_s64(integers), vdup_n_s32(0)));
+++#else
+++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ double d0 = ((double *) &rnd)[0];
+++ double d1 = ((double *) &rnd)[1];
+++ return _mm_set_epi32(0, 0, (int32_t) d1, (int32_t) d0);
+++#endif
+++}
+++
+++// Convert packed double-precision (64-bit) floating-point elements in a to
+++// packed 32-bit integers, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_pi32
+++FORCE_INLINE __m64 _mm_cvtpd_pi32(__m128d a)
+++{
+++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ double d0 = ((double *) &rnd)[0];
+++ double d1 = ((double *) &rnd)[1];
+++ int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) d0, (int32_t) d1};
+++ return vreinterpret_m64_s32(vld1_s32(data));
+++}
+++
+++// Convert packed double-precision (64-bit) floating-point elements in a to
+++// packed single-precision (32-bit) floating-point elements, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpd_ps
+++FORCE_INLINE __m128 _mm_cvtpd_ps(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ float32x2_t tmp = vcvt_f32_f64(vreinterpretq_f64_m128d(a));
+++ return vreinterpretq_m128_f32(vcombine_f32(tmp, vdup_n_f32(0)));
+++#else
+++ float a0 = (float) ((double *) &a)[0];
+++ float a1 = (float) ((double *) &a)[1];
+++ return _mm_set_ps(0, 0, a1, a0);
+++#endif
+++}
+++
+++// Convert packed signed 32-bit integers in a to packed double-precision
+++// (64-bit) floating-point elements, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtpi32_pd
+++FORCE_INLINE __m128d _mm_cvtpi32_pd(__m64 a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vcvtq_f64_s64(vmovl_s32(vreinterpret_s32_m64(a))));
+++#else
+++ double a0 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 0);
+++ double a1 = (double) vget_lane_s32(vreinterpret_s32_m64(a), 1);
+++ return _mm_set_pd(a1, a0);
+++#endif
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 32-bit integers, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_epi32
+++// *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A
+++// does not support! It is supported on ARMv8-A however.
+++FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
+++{
+++#if defined(__ARM_FEATURE_FRINT)
+++ return vreinterpretq_m128i_s32(vcvtq_s32_f32(vrnd32xq_f32(a)));
+++#elif (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+++ switch (_MM_GET_ROUNDING_MODE()) {
+++ case _MM_ROUND_NEAREST:
+++ return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a));
+++ case _MM_ROUND_DOWN:
+++ return vreinterpretq_m128i_s32(vcvtmq_s32_f32(a));
+++ case _MM_ROUND_UP:
+++ return vreinterpretq_m128i_s32(vcvtpq_s32_f32(a));
+++ default: // _MM_ROUND_TOWARD_ZERO
+++ return vreinterpretq_m128i_s32(vcvtq_s32_f32(a));
+++ }
+++#else
+++ float *f = (float *) &a;
+++ switch (_MM_GET_ROUNDING_MODE()) {
+++ case _MM_ROUND_NEAREST: {
+++ uint32x4_t signmask = vdupq_n_u32(0x80000000);
+++ float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
+++ vdupq_n_f32(0.5f)); /* +/- 0.5 */
+++ int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
+++ vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
+++ int32x4_t r_trunc = vcvtq_s32_f32(
+++ vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
+++ int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
+++ vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
+++ int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
+++ vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
+++ float32x4_t delta = vsubq_f32(
+++ vreinterpretq_f32_m128(a),
+++ vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
+++ uint32x4_t is_delta_half =
+++ vceqq_f32(delta, half); /* delta == +/- 0.5 */
+++ return vreinterpretq_m128i_s32(
+++ vbslq_s32(is_delta_half, r_even, r_normal));
+++ }
+++ case _MM_ROUND_DOWN:
+++ return _mm_set_epi32(floorf(f[3]), floorf(f[2]), floorf(f[1]),
+++ floorf(f[0]));
+++ case _MM_ROUND_UP:
+++ return _mm_set_epi32(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]),
+++ ceilf(f[0]));
+++ default: // _MM_ROUND_TOWARD_ZERO
+++ return _mm_set_epi32((int32_t) f[3], (int32_t) f[2], (int32_t) f[1],
+++ (int32_t) f[0]);
+++ }
+++#endif
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed double-precision (64-bit) floating-point elements, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtps_pd
+++FORCE_INLINE __m128d _mm_cvtps_pd(__m128 a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vcvt_f64_f32(vget_low_f32(vreinterpretq_f32_m128(a))));
+++#else
+++ double a0 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+++ double a1 = (double) vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
+++ return _mm_set_pd(a1, a0);
+++#endif
+++}
+++
+++// Copy the lower double-precision (64-bit) floating-point element of a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_f64
+++FORCE_INLINE double _mm_cvtsd_f64(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return (double) vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0);
+++#else
+++ return ((double *) &a)[0];
+++#endif
+++}
+++
+++// Convert the lower double-precision (64-bit) floating-point element in a to a
+++// 32-bit integer, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si32
+++FORCE_INLINE int32_t _mm_cvtsd_si32(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return (int32_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
+++#else
+++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ double ret = ((double *) &rnd)[0];
+++ return (int32_t) ret;
+++#endif
+++}
+++
+++// Convert the lower double-precision (64-bit) floating-point element in a to a
+++// 64-bit integer, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64
+++FORCE_INLINE int64_t _mm_cvtsd_si64(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return (int64_t) vgetq_lane_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)), 0);
+++#else
+++ __m128d rnd = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ double ret = ((double *) &rnd)[0];
+++ return (int64_t) ret;
+++#endif
+++}
+++
+++// Convert the lower double-precision (64-bit) floating-point element in a to a
+++// 64-bit integer, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_si64x
+++#define _mm_cvtsd_si64x _mm_cvtsd_si64
+++
+++// Convert the lower double-precision (64-bit) floating-point element in b to a
+++// single-precision (32-bit) floating-point element, store the result in the
+++// lower element of dst, and copy the upper 3 packed elements from a to the
+++// upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsd_ss
+++FORCE_INLINE __m128 _mm_cvtsd_ss(__m128 a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(vsetq_lane_f32(
+++ vget_lane_f32(vcvt_f32_f64(vreinterpretq_f64_m128d(b)), 0),
+++ vreinterpretq_f32_m128(a), 0));
+++#else
+++ return vreinterpretq_m128_f32(vsetq_lane_f32((float) ((double *) &b)[0],
+++ vreinterpretq_f32_m128(a), 0));
+++#endif
+++}
+++
+++// Copy the lower 32-bit integer in a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si32
+++FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
+++{
+++ return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
+++}
+++
+++// Copy the lower 64-bit integer in a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64
+++FORCE_INLINE int64_t _mm_cvtsi128_si64(__m128i a)
+++{
+++ return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0);
+++}
+++
+++// Copy the lower 64-bit integer in a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
+++#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
+++
+++// Convert the signed 32-bit integer b to a double-precision (64-bit)
+++// floating-point element, store the result in the lower element of dst, and
+++// copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_sd
+++FORCE_INLINE __m128d _mm_cvtsi32_sd(__m128d a, int32_t b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
+++#else
+++ double bf = (double) b;
+++ return vreinterpretq_m128d_s64(
+++ vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
+++#endif
+++}
+++
+++// Copy the lower 64-bit integer in a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi128_si64x
+++#define _mm_cvtsi128_si64x(a) _mm_cvtsi128_si64(a)
+++
+++// Copy 32-bit integer a to the lower elements of dst, and zero the upper
+++// elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi32_si128
+++FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
+++{
+++ return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
+++}
+++
+++// Convert the signed 64-bit integer b to a double-precision (64-bit)
+++// floating-point element, store the result in the lower element of dst, and
+++// copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_sd
+++FORCE_INLINE __m128d _mm_cvtsi64_sd(__m128d a, int64_t b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vsetq_lane_f64((double) b, vreinterpretq_f64_m128d(a), 0));
+++#else
+++ double bf = (double) b;
+++ return vreinterpretq_m128d_s64(
+++ vsetq_lane_s64(*(int64_t *) &bf, vreinterpretq_s64_m128d(a), 0));
+++#endif
+++}
+++
+++// Copy 64-bit integer a to the lower element of dst, and zero the upper
+++// element.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64_si128
+++FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a)
+++{
+++ return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0));
+++}
+++
+++// Copy 64-bit integer a to the lower element of dst, and zero the upper
+++// element.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_si128
+++#define _mm_cvtsi64x_si128(a) _mm_cvtsi64_si128(a)
+++
+++// Convert the signed 64-bit integer b to a double-precision (64-bit)
+++// floating-point element, store the result in the lower element of dst, and
+++// copy the upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtsi64x_sd
+++#define _mm_cvtsi64x_sd(a, b) _mm_cvtsi64_sd(a, b)
+++
+++// Convert the lower single-precision (32-bit) floating-point element in b to a
+++// double-precision (64-bit) floating-point element, store the result in the
+++// lower element of dst, and copy the upper element from a to the upper element
+++// of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtss_sd
+++FORCE_INLINE __m128d _mm_cvtss_sd(__m128d a, __m128 b)
+++{
+++ double d = (double) vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vsetq_lane_f64(d, vreinterpretq_f64_m128d(a), 0));
+++#else
+++ return vreinterpretq_m128d_s64(
+++ vsetq_lane_s64(*(int64_t *) &d, vreinterpretq_s64_m128d(a), 0));
+++#endif
+++}
+++
+++// Convert packed double-precision (64-bit) floating-point elements in a to
+++// packed 32-bit integers with truncation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_epi32
+++FORCE_INLINE __m128i _mm_cvttpd_epi32(__m128d a)
+++{
+++ double a0 = ((double *) &a)[0];
+++ double a1 = ((double *) &a)[1];
+++ return _mm_set_epi32(0, 0, (int32_t) a1, (int32_t) a0);
+++}
+++
+++// Convert packed double-precision (64-bit) floating-point elements in a to
+++// packed 32-bit integers with truncation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttpd_pi32
+++FORCE_INLINE __m64 _mm_cvttpd_pi32(__m128d a)
+++{
+++ double a0 = ((double *) &a)[0];
+++ double a1 = ((double *) &a)[1];
+++ int32_t ALIGN_STRUCT(16) data[2] = {(int32_t) a0, (int32_t) a1};
+++ return vreinterpret_m64_s32(vld1_s32(data));
+++}
+++
+++// Convert packed single-precision (32-bit) floating-point elements in a to
+++// packed 32-bit integers with truncation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttps_epi32
+++FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
+++{
+++ return vreinterpretq_m128i_s32(vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
+++}
+++
+++// Convert the lower double-precision (64-bit) floating-point element in a to a
+++// 32-bit integer with truncation, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si32
+++FORCE_INLINE int32_t _mm_cvttsd_si32(__m128d a)
+++{
+++ double ret = *((double *) &a);
+++ return (int32_t) ret;
+++}
+++
+++// Convert the lower double-precision (64-bit) floating-point element in a to a
+++// 64-bit integer with truncation, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64
+++FORCE_INLINE int64_t _mm_cvttsd_si64(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vgetq_lane_s64(vcvtq_s64_f64(vreinterpretq_f64_m128d(a)), 0);
+++#else
+++ double ret = *((double *) &a);
+++ return (int64_t) ret;
+++#endif
+++}
+++
+++// Convert the lower double-precision (64-bit) floating-point element in a to a
+++// 64-bit integer with truncation, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvttsd_si64x
+++#define _mm_cvttsd_si64x(a) _mm_cvttsd_si64(a)
+++
+++// Divide packed double-precision (64-bit) floating-point elements in a by
+++// packed elements in b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_pd
+++FORCE_INLINE __m128d _mm_div_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[2];
+++ c[0] = da[0] / db[0];
+++ c[1] = da[1] / db[1];
+++ return vld1q_f32((float32_t *) c);
+++#endif
+++}
+++
+++// Divide the lower double-precision (64-bit) floating-point element in a by the
+++// lower double-precision (64-bit) floating-point element in b, store the result
+++// in the lower element of dst, and copy the upper element from a to the upper
+++// element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_div_sd
+++FORCE_INLINE __m128d _mm_div_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ float64x2_t tmp =
+++ vdivq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b));
+++ return vreinterpretq_m128d_f64(
+++ vsetq_lane_f64(vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1), tmp, 1));
+++#else
+++ return _mm_move_sd(a, _mm_div_pd(a, b));
+++#endif
+++}
+++
+++// Extract a 16-bit integer from a, selected with imm8, and store the result in
+++// the lower element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi16
+++// FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
+++#define _mm_extract_epi16(a, imm) \
+++ vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm))
+++
+++// Copy a to dst, and insert the 16-bit integer i into dst at the location
+++// specified by imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi16
+++// FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b,
+++// __constrange(0,8) int imm)
+++#define _mm_insert_epi16(a, b, imm) \
+++ vreinterpretq_m128i_s16( \
+++ vsetq_lane_s16((b), vreinterpretq_s16_m128i(a), (imm)))
+++
+++// Load 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+++// elements) from memory into dst. mem_addr must be aligned on a 16-byte
+++// boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd
+++FORCE_INLINE __m128d _mm_load_pd(const double *p)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vld1q_f64(p));
+++#else
+++ const float *fp = (const float *) p;
+++ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], fp[2], fp[3]};
+++ return vreinterpretq_m128d_f32(vld1q_f32(data));
+++#endif
+++}
+++
+++// Load a double-precision (64-bit) floating-point element from memory into both
+++// elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_pd1
+++#define _mm_load_pd1 _mm_load1_pd
+++
+++// Load a double-precision (64-bit) floating-point element from memory into the
+++// lower of dst, and zero the upper element. mem_addr does not need to be
+++// aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_sd
+++FORCE_INLINE __m128d _mm_load_sd(const double *p)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vsetq_lane_f64(*p, vdupq_n_f64(0), 0));
+++#else
+++ const float *fp = (const float *) p;
+++ float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0};
+++ return vreinterpretq_m128d_f32(vld1q_f32(data));
+++#endif
+++}
+++
+++// Load 128-bits of integer data from memory into dst. mem_addr must be aligned
+++// on a 16-byte boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load_si128
+++FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
+++{
+++ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
+++}
+++
+++// Load a double-precision (64-bit) floating-point element from memory into both
+++// elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_load1_pd
+++FORCE_INLINE __m128d _mm_load1_pd(const double *p)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vld1q_dup_f64(p));
+++#else
+++ return vreinterpretq_m128d_s64(vdupq_n_s64(*(const int64_t *) p));
+++#endif
+++}
+++
+++// Load a double-precision (64-bit) floating-point element from memory into the
+++// upper element of dst, and copy the lower element from a to dst. mem_addr does
+++// not need to be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadh_pd
+++FORCE_INLINE __m128d _mm_loadh_pd(__m128d a, const double *p)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vcombine_f64(vget_low_f64(vreinterpretq_f64_m128d(a)), vld1_f64(p)));
+++#else
+++ return vreinterpretq_m128d_f32(vcombine_f32(
+++ vget_low_f32(vreinterpretq_f32_m128d(a)), vld1_f32((const float *) p)));
+++#endif
+++}
+++
+++// Load 64-bit integer from memory into the first element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_epi64
+++FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p)
+++{
+++ /* Load the lower 64 bits of the value pointed to by p into the
+++ * lower 64 bits of the result, zeroing the upper 64 bits of the result.
+++ */
+++ return vreinterpretq_m128i_s32(
+++ vcombine_s32(vld1_s32((int32_t const *) p), vcreate_s32(0)));
+++}
+++
+++// Load a double-precision (64-bit) floating-point element from memory into the
+++// lower element of dst, and copy the upper element from a to dst. mem_addr does
+++// not need to be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadl_pd
+++FORCE_INLINE __m128d _mm_loadl_pd(__m128d a, const double *p)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vcombine_f64(vld1_f64(p), vget_high_f64(vreinterpretq_f64_m128d(a))));
+++#else
+++ return vreinterpretq_m128d_f32(
+++ vcombine_f32(vld1_f32((const float *) p),
+++ vget_high_f32(vreinterpretq_f32_m128d(a))));
+++#endif
+++}
+++
+++// Load 2 double-precision (64-bit) floating-point elements from memory into dst
+++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+++// general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadr_pd
+++FORCE_INLINE __m128d _mm_loadr_pd(const double *p)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ float64x2_t v = vld1q_f64(p);
+++ return vreinterpretq_m128d_f64(vextq_f64(v, v, 1));
+++#else
+++ int64x2_t v = vld1q_s64((const int64_t *) p);
+++ return vreinterpretq_m128d_s64(vextq_s64(v, v, 1));
+++#endif
+++}
+++
+++// Loads two double-precision from unaligned memory, floating-point values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_pd
+++FORCE_INLINE __m128d _mm_loadu_pd(const double *p)
+++{
+++ return _mm_load_pd(p);
+++}
+++
+++// Load 128-bits of integer data from memory into dst. mem_addr does not need to
+++// be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si128
+++FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
+++{
+++ return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *) p));
+++}
+++
+++// Load unaligned 32-bit integer from memory into the first element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loadu_si32
+++FORCE_INLINE __m128i _mm_loadu_si32(const void *p)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vsetq_lane_s32(*(const int32_t *) p, vdupq_n_s32(0), 0));
+++}
+++
+++// Multiply packed signed 16-bit integers in a and b, producing intermediate
+++// signed 32-bit integers. Horizontally add adjacent pairs of intermediate
+++// 32-bit integers, and pack the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_madd_epi16
+++FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b)
+++{
+++ int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
+++ vget_low_s16(vreinterpretq_s16_m128i(b)));
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int32x4_t high =
+++ vmull_high_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b));
+++
+++ return vreinterpretq_m128i_s32(vpaddq_s32(low, high));
+++#else
+++ int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
+++ vget_high_s16(vreinterpretq_s16_m128i(b)));
+++
+++ int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low));
+++ int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high));
+++
+++ return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum));
+++#endif
+++}
+++
+++// Conditionally store 8-bit integer elements from a into memory using mask
+++// (elements are not stored when the highest bit is not set in the corresponding
+++// element) and a non-temporal memory hint. mem_addr does not need to be aligned
+++// on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maskmoveu_si128
+++FORCE_INLINE void _mm_maskmoveu_si128(__m128i a, __m128i mask, char *mem_addr)
+++{
+++ int8x16_t shr_mask = vshrq_n_s8(vreinterpretq_s8_m128i(mask), 7);
+++ __m128 b = _mm_load_ps((const float *) mem_addr);
+++ int8x16_t masked =
+++ vbslq_s8(vreinterpretq_u8_s8(shr_mask), vreinterpretq_s8_m128i(a),
+++ vreinterpretq_s8_m128(b));
+++ vst1q_s8((int8_t *) mem_addr, masked);
+++}
+++
+++// Compare packed signed 16-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi16
+++FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vmaxq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Compare packed unsigned 8-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu8
+++FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b,
+++// and store packed maximum values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_pd
+++FORCE_INLINE __m128d _mm_max_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++#if SSE2NEON_PRECISE_MINMAX
+++ float64x2_t _a = vreinterpretq_f64_m128d(a);
+++ float64x2_t _b = vreinterpretq_f64_m128d(b);
+++ return vreinterpretq_m128d_f64(vbslq_f64(vcgtq_f64(_a, _b), _a, _b));
+++#else
+++ return vreinterpretq_m128d_f64(
+++ vmaxq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#endif
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) > (*(double *) &b0) ? a0 : b0;
+++ d[1] = (*(double *) &a1) > (*(double *) &b1) ? a1 : b1;
+++
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b, store the maximum value in the lower element of dst, and copy the upper
+++// element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_sd
+++FORCE_INLINE __m128d _mm_max_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_max_pd(a, b));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[2] = {da[0] > db[0] ? da[0] : db[0], da[1]};
+++ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
+++#endif
+++}
+++
+++// Compare packed signed 16-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi16
+++FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vminq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Compare packed unsigned 8-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu8
+++FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+++}
+++
+++// Compare packed double-precision (64-bit) floating-point elements in a and b,
+++// and store packed minimum values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_pd
+++FORCE_INLINE __m128d _mm_min_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++#if SSE2NEON_PRECISE_MINMAX
+++ float64x2_t _a = vreinterpretq_f64_m128d(a);
+++ float64x2_t _b = vreinterpretq_f64_m128d(b);
+++ return vreinterpretq_m128d_f64(vbslq_f64(vcltq_f64(_a, _b), _a, _b));
+++#else
+++ return vreinterpretq_m128d_f64(
+++ vminq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#endif
+++#else
+++ uint64_t a0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t a1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(a));
+++ uint64_t b0 = (uint64_t) vget_low_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t b1 = (uint64_t) vget_high_u64(vreinterpretq_u64_m128d(b));
+++ uint64_t d[2];
+++ d[0] = (*(double *) &a0) < (*(double *) &b0) ? a0 : b0;
+++ d[1] = (*(double *) &a1) < (*(double *) &b1) ? a1 : b1;
+++ return vreinterpretq_m128d_u64(vld1q_u64(d));
+++#endif
+++}
+++
+++// Compare the lower double-precision (64-bit) floating-point elements in a and
+++// b, store the minimum value in the lower element of dst, and copy the upper
+++// element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_sd
+++FORCE_INLINE __m128d _mm_min_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_min_pd(a, b));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[2] = {da[0] < db[0] ? da[0] : db[0], da[1]};
+++ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) c));
+++#endif
+++}
+++
+++// Copy the lower 64-bit integer in a to the lower element of dst, and zero the
+++// upper element.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_epi64
+++FORCE_INLINE __m128i _mm_move_epi64(__m128i a)
+++{
+++ return vreinterpretq_m128i_s64(
+++ vsetq_lane_s64(0, vreinterpretq_s64_m128i(a), 1));
+++}
+++
+++// Move the lower double-precision (64-bit) floating-point element from b to the
+++// lower element of dst, and copy the upper element from a to the upper element
+++// of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_move_sd
+++FORCE_INLINE __m128d _mm_move_sd(__m128d a, __m128d b)
+++{
+++ return vreinterpretq_m128d_f32(
+++ vcombine_f32(vget_low_f32(vreinterpretq_f32_m128d(b)),
+++ vget_high_f32(vreinterpretq_f32_m128d(a))));
+++}
+++
+++// Create mask from the most significant bit of each 8-bit element in a, and
+++// store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_epi8
+++FORCE_INLINE int _mm_movemask_epi8(__m128i a)
+++{
+++ // Use increasingly wide shifts+adds to collect the sign bits
+++ // together.
+++ // Since the widening shifts would be rather confusing to follow in little
+++ // endian, everything will be illustrated in big endian order instead. This
+++ // has a different result - the bits would actually be reversed on a big
+++ // endian machine.
+++
+++ // Starting input (only half the elements are shown):
+++ // 89 ff 1d c0 00 10 99 33
+++ uint8x16_t input = vreinterpretq_u8_m128i(a);
+++
+++ // Shift out everything but the sign bits with an unsigned shift right.
+++ //
+++ // Bytes of the vector::
+++ // 89 ff 1d c0 00 10 99 33
+++ // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
+++ // | | | | | | | |
+++ // 01 01 00 01 00 00 01 00
+++ //
+++ // Bits of first important lane(s):
+++ // 10001001 (89)
+++ // \______
+++ // |
+++ // 00000001 (01)
+++ uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
+++
+++ // Merge the even lanes together with a 16-bit unsigned shift right + add.
+++ // 'xx' represents garbage data which will be ignored in the final result.
+++ // In the important bytes, the add functions like a binary OR.
+++ //
+++ // 01 01 00 01 00 00 01 00
+++ // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
+++ // \| \| \| \|
+++ // xx 03 xx 01 xx 00 xx 02
+++ //
+++ // 00000001 00000001 (01 01)
+++ // \_______ |
+++ // \|
+++ // xxxxxxxx xxxxxx11 (xx 03)
+++ uint32x4_t paired16 =
+++ vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
+++
+++ // Repeat with a wider 32-bit shift + add.
+++ // xx 03 xx 01 xx 00 xx 02
+++ // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >>
+++ // 14))
+++ // \| \|
+++ // xx xx xx 0d xx xx xx 02
+++ //
+++ // 00000011 00000001 (03 01)
+++ // \\_____ ||
+++ // '----.\||
+++ // xxxxxxxx xxxx1101 (xx 0d)
+++ uint64x2_t paired32 =
+++ vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
+++
+++ // Last, an even wider 64-bit shift + add to get our result in the low 8 bit
+++ // lanes. xx xx xx 0d xx xx xx 02
+++ // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >>
+++ // 28))
+++ // \|
+++ // xx xx xx xx xx xx xx d2
+++ //
+++ // 00001101 00000010 (0d 02)
+++ // \ \___ | |
+++ // '---. \| |
+++ // xxxxxxxx 11010010 (xx d2)
+++ uint8x16_t paired64 =
+++ vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
+++
+++ // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
+++ // xx xx xx xx xx xx xx d2
+++ // || return paired64[0]
+++ // d2
+++ // Note: Little endian would return the correct value 4b (01001011) instead.
+++ return vgetq_lane_u8(paired64, 0) | ((int) vgetq_lane_u8(paired64, 8) << 8);
+++}
+++
+++// Set each bit of mask dst based on the most significant bit of the
+++// corresponding packed double-precision (64-bit) floating-point element in a.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movemask_pd
+++FORCE_INLINE int _mm_movemask_pd(__m128d a)
+++{
+++ uint64x2_t input = vreinterpretq_u64_m128d(a);
+++ uint64x2_t high_bits = vshrq_n_u64(input, 63);
+++ return (int) (vgetq_lane_u64(high_bits, 0) |
+++ (vgetq_lane_u64(high_bits, 1) << 1));
+++}
+++
+++// Copy the lower 64-bit integer in a to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movepi64_pi64
+++FORCE_INLINE __m64 _mm_movepi64_pi64(__m128i a)
+++{
+++ return vreinterpret_m64_s64(vget_low_s64(vreinterpretq_s64_m128i(a)));
+++}
+++
+++// Copy the 64-bit integer a to the lower element of dst, and zero the upper
+++// element.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movpi64_epi64
+++FORCE_INLINE __m128i _mm_movpi64_epi64(__m64 a)
+++{
+++ return vreinterpretq_m128i_s64(
+++ vcombine_s64(vreinterpret_s64_m64(a), vdup_n_s64(0)));
+++}
+++
+++// Multiply the low unsigned 32-bit integers from each packed 64-bit element in
+++// a and b, and store the unsigned 64-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epu32
+++FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b)
+++{
+++ // vmull_u32 upcasts instead of masking, so we downcast.
+++ uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a));
+++ uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b));
+++ return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo));
+++}
+++
+++// Multiply packed double-precision (64-bit) floating-point elements in a and b,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_pd
+++FORCE_INLINE __m128d _mm_mul_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vmulq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[2];
+++ c[0] = da[0] * db[0];
+++ c[1] = da[1] * db[1];
+++ return vld1q_f32((float32_t *) c);
+++#endif
+++}
+++
+++// Multiply the lower double-precision (64-bit) floating-point element in a and
+++// b, store the result in the lower element of dst, and copy the upper element
+++// from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mul_sd
+++FORCE_INLINE __m128d _mm_mul_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_mul_pd(a, b));
+++}
+++
+++// Multiply the low unsigned 32-bit integers from a and b, and store the
+++// unsigned 64-bit result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_su32
+++FORCE_INLINE __m64 _mm_mul_su32(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_u64(vget_low_u64(
+++ vmull_u32(vreinterpret_u32_m64(a), vreinterpret_u32_m64(b))));
+++}
+++
+++// Multiply the packed signed 16-bit integers in a and b, producing intermediate
+++// 32-bit integers, and store the high 16 bits of the intermediate integers in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epi16
+++FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
+++{
+++ /* FIXME: issue with large values because of result saturation */
+++ // int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a),
+++ // vreinterpretq_s16_m128i(b)); /* =2*a*b */ return
+++ // vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
+++ int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
+++ int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
+++ int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
+++ int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
+++ int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
+++ int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
+++ uint16x8x2_t r =
+++ vuzpq_u16(vreinterpretq_u16_s32(ab3210), vreinterpretq_u16_s32(ab7654));
+++ return vreinterpretq_m128i_u16(r.val[1]);
+++}
+++
+++// Multiply the packed unsigned 16-bit integers in a and b, producing
+++// intermediate 32-bit integers, and store the high 16 bits of the intermediate
+++// integers in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhi_epu16
+++FORCE_INLINE __m128i _mm_mulhi_epu16(__m128i a, __m128i b)
+++{
+++ uint16x4_t a3210 = vget_low_u16(vreinterpretq_u16_m128i(a));
+++ uint16x4_t b3210 = vget_low_u16(vreinterpretq_u16_m128i(b));
+++ uint32x4_t ab3210 = vmull_u16(a3210, b3210);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint32x4_t ab7654 =
+++ vmull_high_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
+++ uint16x8_t r = vuzp2q_u16(vreinterpretq_u16_u32(ab3210),
+++ vreinterpretq_u16_u32(ab7654));
+++ return vreinterpretq_m128i_u16(r);
+++#else
+++ uint16x4_t a7654 = vget_high_u16(vreinterpretq_u16_m128i(a));
+++ uint16x4_t b7654 = vget_high_u16(vreinterpretq_u16_m128i(b));
+++ uint32x4_t ab7654 = vmull_u16(a7654, b7654);
+++ uint16x8x2_t r =
+++ vuzpq_u16(vreinterpretq_u16_u32(ab3210), vreinterpretq_u16_u32(ab7654));
+++ return vreinterpretq_m128i_u16(r.val[1]);
+++#endif
+++}
+++
+++// Multiply the packed 16-bit integers in a and b, producing intermediate 32-bit
+++// integers, and store the low 16 bits of the intermediate integers in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi16
+++FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vmulq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Compute the bitwise OR of packed double-precision (64-bit) floating-point
+++// elements in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_or_pd
+++FORCE_INLINE __m128d _mm_or_pd(__m128d a, __m128d b)
+++{
+++ return vreinterpretq_m128d_s64(
+++ vorrq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+++}
+++
+++// Compute the bitwise OR of 128 bits (representing integer data) in a and b,
+++// and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_or_si128
+++FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vorrq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Convert packed signed 16-bit integers from a and b to packed 8-bit integers
+++// using signed saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi16
+++FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s8(
+++ vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)),
+++ vqmovn_s16(vreinterpretq_s16_m128i(b))));
+++}
+++
+++// Convert packed signed 32-bit integers from a and b to packed 16-bit integers
+++// using signed saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packs_epi32
+++FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)),
+++ vqmovn_s32(vreinterpretq_s32_m128i(b))));
+++}
+++
+++// Convert packed signed 16-bit integers from a and b to packed 8-bit integers
+++// using unsigned saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi16
+++FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)),
+++ vqmovun_s16(vreinterpretq_s16_m128i(b))));
+++}
+++
+++// Pause the processor. This is typically used in spin-wait loops and depending
+++// on the x86 processor typical values are in the 40-100 cycle range. The
+++// 'yield' instruction isn't a good fit because it's effectively a nop on most
+++// Arm cores. Experience with several databases has shown has shown an 'isb' is
+++// a reasonable approximation.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_pause
+++FORCE_INLINE void _mm_pause(void)
+++{
+++#if defined(_MSC_VER)
+++ __isb(_ARM64_BARRIER_SY);
+++#else
+++ __asm__ __volatile__("isb\n");
+++#endif
+++}
+++
+++// Compute the absolute differences of packed unsigned 8-bit integers in a and
+++// b, then horizontally sum each consecutive 8 differences to produce two
+++// unsigned 16-bit integers, and pack these unsigned 16-bit integers in the low
+++// 16 bits of 64-bit elements in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sad_epu8
+++FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b)
+++{
+++ uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t) a, (uint8x16_t) b));
+++ return vreinterpretq_m128i_u64(vpaddlq_u32(vpaddlq_u16(t)));
+++}
+++
+++// Set packed 16-bit integers in dst with the supplied values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi16
+++FORCE_INLINE __m128i _mm_set_epi16(short i7,
+++ short i6,
+++ short i5,
+++ short i4,
+++ short i3,
+++ short i2,
+++ short i1,
+++ short i0)
+++{
+++ int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
+++ return vreinterpretq_m128i_s16(vld1q_s16(data));
+++}
+++
+++// Set packed 32-bit integers in dst with the supplied values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi32
+++FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
+++{
+++ int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3};
+++ return vreinterpretq_m128i_s32(vld1q_s32(data));
+++}
+++
+++// Set packed 64-bit integers in dst with the supplied values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64
+++FORCE_INLINE __m128i _mm_set_epi64(__m64 i1, __m64 i2)
+++{
+++ return _mm_set_epi64x(vget_lane_s64(i1, 0), vget_lane_s64(i2, 0));
+++}
+++
+++// Set packed 64-bit integers in dst with the supplied values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi64x
+++FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2)
+++{
+++ return vreinterpretq_m128i_s64(
+++ vcombine_s64(vcreate_s64(i2), vcreate_s64(i1)));
+++}
+++
+++// Set packed 8-bit integers in dst with the supplied values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_epi8
+++FORCE_INLINE __m128i _mm_set_epi8(signed char b15,
+++ signed char b14,
+++ signed char b13,
+++ signed char b12,
+++ signed char b11,
+++ signed char b10,
+++ signed char b9,
+++ signed char b8,
+++ signed char b7,
+++ signed char b6,
+++ signed char b5,
+++ signed char b4,
+++ signed char b3,
+++ signed char b2,
+++ signed char b1,
+++ signed char b0)
+++{
+++ int8_t ALIGN_STRUCT(16)
+++ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
+++ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
+++ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
+++ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
+++ return (__m128i) vld1q_s8(data);
+++}
+++
+++// Set packed double-precision (64-bit) floating-point elements in dst with the
+++// supplied values.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd
+++FORCE_INLINE __m128d _mm_set_pd(double e1, double e0)
+++{
+++ double ALIGN_STRUCT(16) data[2] = {e0, e1};
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vld1q_f64((float64_t *) data));
+++#else
+++ return vreinterpretq_m128d_f32(vld1q_f32((float32_t *) data));
+++#endif
+++}
+++
+++// Broadcast double-precision (64-bit) floating-point value a to all elements of
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_pd1
+++#define _mm_set_pd1 _mm_set1_pd
+++
+++// Copy double-precision (64-bit) floating-point element a to the lower element
+++// of dst, and zero the upper element.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set_sd
+++FORCE_INLINE __m128d _mm_set_sd(double a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vsetq_lane_f64(a, vdupq_n_f64(0), 0));
+++#else
+++ return _mm_set_pd(0, a);
+++#endif
+++}
+++
+++// Broadcast 16-bit integer a to all elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi16
+++FORCE_INLINE __m128i _mm_set1_epi16(short w)
+++{
+++ return vreinterpretq_m128i_s16(vdupq_n_s16(w));
+++}
+++
+++// Broadcast 32-bit integer a to all elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi32
+++FORCE_INLINE __m128i _mm_set1_epi32(int _i)
+++{
+++ return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
+++}
+++
+++// Broadcast 64-bit integer a to all elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64
+++FORCE_INLINE __m128i _mm_set1_epi64(__m64 _i)
+++{
+++ return vreinterpretq_m128i_s64(vdupq_lane_s64(_i, 0));
+++}
+++
+++// Broadcast 64-bit integer a to all elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi64x
+++FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
+++{
+++ return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
+++}
+++
+++// Broadcast 8-bit integer a to all elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_epi8
+++FORCE_INLINE __m128i _mm_set1_epi8(signed char w)
+++{
+++ return vreinterpretq_m128i_s8(vdupq_n_s8(w));
+++}
+++
+++// Broadcast double-precision (64-bit) floating-point value a to all elements of
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_set1_pd
+++FORCE_INLINE __m128d _mm_set1_pd(double d)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vdupq_n_f64(d));
+++#else
+++ return vreinterpretq_m128d_s64(vdupq_n_s64(*(int64_t *) &d));
+++#endif
+++}
+++
+++// Set packed 16-bit integers in dst with the supplied values in reverse order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi16
+++FORCE_INLINE __m128i _mm_setr_epi16(short w0,
+++ short w1,
+++ short w2,
+++ short w3,
+++ short w4,
+++ short w5,
+++ short w6,
+++ short w7)
+++{
+++ int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7};
+++ return vreinterpretq_m128i_s16(vld1q_s16((int16_t *) data));
+++}
+++
+++// Set packed 32-bit integers in dst with the supplied values in reverse order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi32
+++FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
+++{
+++ int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0};
+++ return vreinterpretq_m128i_s32(vld1q_s32(data));
+++}
+++
+++// Set packed 64-bit integers in dst with the supplied values in reverse order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi64
+++FORCE_INLINE __m128i _mm_setr_epi64(__m64 e1, __m64 e0)
+++{
+++ return vreinterpretq_m128i_s64(vcombine_s64(e1, e0));
+++}
+++
+++// Set packed 8-bit integers in dst with the supplied values in reverse order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_epi8
+++FORCE_INLINE __m128i _mm_setr_epi8(signed char b0,
+++ signed char b1,
+++ signed char b2,
+++ signed char b3,
+++ signed char b4,
+++ signed char b5,
+++ signed char b6,
+++ signed char b7,
+++ signed char b8,
+++ signed char b9,
+++ signed char b10,
+++ signed char b11,
+++ signed char b12,
+++ signed char b13,
+++ signed char b14,
+++ signed char b15)
+++{
+++ int8_t ALIGN_STRUCT(16)
+++ data[16] = {(int8_t) b0, (int8_t) b1, (int8_t) b2, (int8_t) b3,
+++ (int8_t) b4, (int8_t) b5, (int8_t) b6, (int8_t) b7,
+++ (int8_t) b8, (int8_t) b9, (int8_t) b10, (int8_t) b11,
+++ (int8_t) b12, (int8_t) b13, (int8_t) b14, (int8_t) b15};
+++ return (__m128i) vld1q_s8(data);
+++}
+++
+++// Set packed double-precision (64-bit) floating-point elements in dst with the
+++// supplied values in reverse order.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setr_pd
+++FORCE_INLINE __m128d _mm_setr_pd(double e1, double e0)
+++{
+++ return _mm_set_pd(e0, e1);
+++}
+++
+++// Return vector of type __m128d with all elements set to zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_pd
+++FORCE_INLINE __m128d _mm_setzero_pd(void)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vdupq_n_f64(0));
+++#else
+++ return vreinterpretq_m128d_f32(vdupq_n_f32(0));
+++#endif
+++}
+++
+++// Return vector of type __m128i with all elements set to zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_setzero_si128
+++FORCE_INLINE __m128i _mm_setzero_si128(void)
+++{
+++ return vreinterpretq_m128i_s32(vdupq_n_s32(0));
+++}
+++
+++// Shuffle 32-bit integers in a using the control in imm8, and store the results
+++// in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi32
+++// FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a,
+++// __constrange(0,255) int imm)
+++#if defined(_sse2neon_shuffle)
+++#define _mm_shuffle_epi32(a, imm) \
+++ __extension__({ \
+++ int32x4_t _input = vreinterpretq_s32_m128i(a); \
+++ int32x4_t _shuf = \
+++ vshuffleq_s32(_input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \
+++ ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \
+++ vreinterpretq_m128i_s32(_shuf); \
+++ })
+++#else // generic
+++#define _mm_shuffle_epi32(a, imm) \
+++ _sse2neon_define1( \
+++ __m128i, a, __m128i ret; switch (imm) { \
+++ case _MM_SHUFFLE(1, 0, 3, 2): \
+++ ret = _mm_shuffle_epi_1032(_a); \
+++ break; \
+++ case _MM_SHUFFLE(2, 3, 0, 1): \
+++ ret = _mm_shuffle_epi_2301(_a); \
+++ break; \
+++ case _MM_SHUFFLE(0, 3, 2, 1): \
+++ ret = _mm_shuffle_epi_0321(_a); \
+++ break; \
+++ case _MM_SHUFFLE(2, 1, 0, 3): \
+++ ret = _mm_shuffle_epi_2103(_a); \
+++ break; \
+++ case _MM_SHUFFLE(1, 0, 1, 0): \
+++ ret = _mm_shuffle_epi_1010(_a); \
+++ break; \
+++ case _MM_SHUFFLE(1, 0, 0, 1): \
+++ ret = _mm_shuffle_epi_1001(_a); \
+++ break; \
+++ case _MM_SHUFFLE(0, 1, 0, 1): \
+++ ret = _mm_shuffle_epi_0101(_a); \
+++ break; \
+++ case _MM_SHUFFLE(2, 2, 1, 1): \
+++ ret = _mm_shuffle_epi_2211(_a); \
+++ break; \
+++ case _MM_SHUFFLE(0, 1, 2, 2): \
+++ ret = _mm_shuffle_epi_0122(_a); \
+++ break; \
+++ case _MM_SHUFFLE(3, 3, 3, 2): \
+++ ret = _mm_shuffle_epi_3332(_a); \
+++ break; \
+++ case _MM_SHUFFLE(0, 0, 0, 0): \
+++ ret = _mm_shuffle_epi32_splat(_a, 0); \
+++ break; \
+++ case _MM_SHUFFLE(1, 1, 1, 1): \
+++ ret = _mm_shuffle_epi32_splat(_a, 1); \
+++ break; \
+++ case _MM_SHUFFLE(2, 2, 2, 2): \
+++ ret = _mm_shuffle_epi32_splat(_a, 2); \
+++ break; \
+++ case _MM_SHUFFLE(3, 3, 3, 3): \
+++ ret = _mm_shuffle_epi32_splat(_a, 3); \
+++ break; \
+++ default: \
+++ ret = _mm_shuffle_epi32_default(_a, (imm)); \
+++ break; \
+++ } _sse2neon_return(ret);)
+++#endif
+++
+++// Shuffle double-precision (64-bit) floating-point elements using the control
+++// in imm8, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pd
+++#ifdef _sse2neon_shuffle
+++#define _mm_shuffle_pd(a, b, imm8) \
+++ vreinterpretq_m128d_s64( \
+++ vshuffleq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b), \
+++ imm8 & 0x1, ((imm8 & 0x2) >> 1) + 2))
+++#else
+++#define _mm_shuffle_pd(a, b, imm8) \
+++ _mm_castsi128_pd(_mm_set_epi64x( \
+++ vgetq_lane_s64(vreinterpretq_s64_m128d(b), (imm8 & 0x2) >> 1), \
+++ vgetq_lane_s64(vreinterpretq_s64_m128d(a), imm8 & 0x1)))
+++#endif
+++
+++// FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a,
+++// __constrange(0,255) int imm)
+++#if defined(_sse2neon_shuffle)
+++#define _mm_shufflehi_epi16(a, imm) \
+++ __extension__({ \
+++ int16x8_t _input = vreinterpretq_s16_m128i(a); \
+++ int16x8_t _shuf = \
+++ vshuffleq_s16(_input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \
+++ (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \
+++ (((imm) >> 6) & 0x3) + 4); \
+++ vreinterpretq_m128i_s16(_shuf); \
+++ })
+++#else // generic
+++#define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm))
+++#endif
+++
+++// FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a,
+++// __constrange(0,255) int imm)
+++#if defined(_sse2neon_shuffle)
+++#define _mm_shufflelo_epi16(a, imm) \
+++ __extension__({ \
+++ int16x8_t _input = vreinterpretq_s16_m128i(a); \
+++ int16x8_t _shuf = vshuffleq_s16( \
+++ _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \
+++ (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, 7); \
+++ vreinterpretq_m128i_s16(_shuf); \
+++ })
+++#else // generic
+++#define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm))
+++#endif
+++
+++// Shift packed 16-bit integers in a left by count while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi16
+++FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count)
+++{
+++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+++ if (_sse2neon_unlikely(c & ~15))
+++ return _mm_setzero_si128();
+++
+++ int16x8_t vc = vdupq_n_s16((int16_t) c);
+++ return vreinterpretq_m128i_s16(vshlq_s16(vreinterpretq_s16_m128i(a), vc));
+++}
+++
+++// Shift packed 32-bit integers in a left by count while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi32
+++FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count)
+++{
+++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+++ if (_sse2neon_unlikely(c & ~31))
+++ return _mm_setzero_si128();
+++
+++ int32x4_t vc = vdupq_n_s32((int32_t) c);
+++ return vreinterpretq_m128i_s32(vshlq_s32(vreinterpretq_s32_m128i(a), vc));
+++}
+++
+++// Shift packed 64-bit integers in a left by count while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sll_epi64
+++FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count)
+++{
+++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+++ if (_sse2neon_unlikely(c & ~63))
+++ return _mm_setzero_si128();
+++
+++ int64x2_t vc = vdupq_n_s64((int64_t) c);
+++ return vreinterpretq_m128i_s64(vshlq_s64(vreinterpretq_s64_m128i(a), vc));
+++}
+++
+++// Shift packed 16-bit integers in a left by imm8 while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi16
+++FORCE_INLINE __m128i _mm_slli_epi16(__m128i a, int imm)
+++{
+++ if (_sse2neon_unlikely(imm & ~15))
+++ return _mm_setzero_si128();
+++ return vreinterpretq_m128i_s16(
+++ vshlq_s16(vreinterpretq_s16_m128i(a), vdupq_n_s16(imm)));
+++}
+++
+++// Shift packed 32-bit integers in a left by imm8 while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi32
+++FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, int imm)
+++{
+++ if (_sse2neon_unlikely(imm & ~31))
+++ return _mm_setzero_si128();
+++ return vreinterpretq_m128i_s32(
+++ vshlq_s32(vreinterpretq_s32_m128i(a), vdupq_n_s32(imm)));
+++}
+++
+++// Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_epi64
+++FORCE_INLINE __m128i _mm_slli_epi64(__m128i a, int imm)
+++{
+++ if (_sse2neon_unlikely(imm & ~63))
+++ return _mm_setzero_si128();
+++ return vreinterpretq_m128i_s64(
+++ vshlq_s64(vreinterpretq_s64_m128i(a), vdupq_n_s64(imm)));
+++}
+++
+++// Shift a left by imm8 bytes while shifting in zeros, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_slli_si128
+++#define _mm_slli_si128(a, imm) \
+++ _sse2neon_define1( \
+++ __m128i, a, int8x16_t ret; \
+++ if (_sse2neon_unlikely(imm == 0)) ret = vreinterpretq_s8_m128i(_a); \
+++ else if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \
+++ else ret = vextq_s8(vdupq_n_s8(0), vreinterpretq_s8_m128i(_a), \
+++ ((imm <= 0 || imm > 15) ? 0 : (16 - imm))); \
+++ _sse2neon_return(vreinterpretq_m128i_s8(ret));)
+++
+++// Compute the square root of packed double-precision (64-bit) floating-point
+++// elements in a, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_pd
+++FORCE_INLINE __m128d _mm_sqrt_pd(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vsqrtq_f64(vreinterpretq_f64_m128d(a)));
+++#else
+++ double a0 = sqrt(((double *) &a)[0]);
+++ double a1 = sqrt(((double *) &a)[1]);
+++ return _mm_set_pd(a1, a0);
+++#endif
+++}
+++
+++// Compute the square root of the lower double-precision (64-bit) floating-point
+++// element in b, store the result in the lower element of dst, and copy the
+++// upper element from a to the upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sqrt_sd
+++FORCE_INLINE __m128d _mm_sqrt_sd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return _mm_move_sd(a, _mm_sqrt_pd(b));
+++#else
+++ return _mm_set_pd(((double *) &a)[1], sqrt(((double *) &b)[0]));
+++#endif
+++}
+++
+++// Shift packed 16-bit integers in a right by count while shifting in sign bits,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi16
+++FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count)
+++{
+++ int64_t c = vgetq_lane_s64(count, 0);
+++ if (_sse2neon_unlikely(c & ~15))
+++ return _mm_cmplt_epi16(a, _mm_setzero_si128());
+++ return vreinterpretq_m128i_s16(
+++ vshlq_s16((int16x8_t) a, vdupq_n_s16((int) -c)));
+++}
+++
+++// Shift packed 32-bit integers in a right by count while shifting in sign bits,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sra_epi32
+++FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count)
+++{
+++ int64_t c = vgetq_lane_s64(count, 0);
+++ if (_sse2neon_unlikely(c & ~31))
+++ return _mm_cmplt_epi32(a, _mm_setzero_si128());
+++ return vreinterpretq_m128i_s32(
+++ vshlq_s32((int32x4_t) a, vdupq_n_s32((int) -c)));
+++}
+++
+++// Shift packed 16-bit integers in a right by imm8 while shifting in sign
+++// bits, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi16
+++FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int imm)
+++{
+++ const int count = (imm & ~15) ? 15 : imm;
+++ return (__m128i) vshlq_s16((int16x8_t) a, vdupq_n_s16(-count));
+++}
+++
+++// Shift packed 32-bit integers in a right by imm8 while shifting in sign bits,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srai_epi32
+++// FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
+++#define _mm_srai_epi32(a, imm) \
+++ _sse2neon_define0( \
+++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) == 0)) { \
+++ ret = _a; \
+++ } else if (_sse2neon_likely(0 < (imm) && (imm) < 32)) { \
+++ ret = vreinterpretq_m128i_s32( \
+++ vshlq_s32(vreinterpretq_s32_m128i(_a), vdupq_n_s32(-(imm)))); \
+++ } else { \
+++ ret = vreinterpretq_m128i_s32( \
+++ vshrq_n_s32(vreinterpretq_s32_m128i(_a), 31)); \
+++ } _sse2neon_return(ret);)
+++
+++// Shift packed 16-bit integers in a right by count while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi16
+++FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count)
+++{
+++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+++ if (_sse2neon_unlikely(c & ~15))
+++ return _mm_setzero_si128();
+++
+++ int16x8_t vc = vdupq_n_s16(-(int16_t) c);
+++ return vreinterpretq_m128i_u16(vshlq_u16(vreinterpretq_u16_m128i(a), vc));
+++}
+++
+++// Shift packed 32-bit integers in a right by count while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi32
+++FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count)
+++{
+++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+++ if (_sse2neon_unlikely(c & ~31))
+++ return _mm_setzero_si128();
+++
+++ int32x4_t vc = vdupq_n_s32(-(int32_t) c);
+++ return vreinterpretq_m128i_u32(vshlq_u32(vreinterpretq_u32_m128i(a), vc));
+++}
+++
+++// Shift packed 64-bit integers in a right by count while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srl_epi64
+++FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count)
+++{
+++ uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
+++ if (_sse2neon_unlikely(c & ~63))
+++ return _mm_setzero_si128();
+++
+++ int64x2_t vc = vdupq_n_s64(-(int64_t) c);
+++ return vreinterpretq_m128i_u64(vshlq_u64(vreinterpretq_u64_m128i(a), vc));
+++}
+++
+++// Shift packed 16-bit integers in a right by imm8 while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi16
+++#define _mm_srli_epi16(a, imm) \
+++ _sse2neon_define0( \
+++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~15)) { \
+++ ret = _mm_setzero_si128(); \
+++ } else { \
+++ ret = vreinterpretq_m128i_u16( \
+++ vshlq_u16(vreinterpretq_u16_m128i(_a), vdupq_n_s16(-(imm)))); \
+++ } _sse2neon_return(ret);)
+++
+++// Shift packed 32-bit integers in a right by imm8 while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi32
+++// FORCE_INLINE __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
+++#define _mm_srli_epi32(a, imm) \
+++ _sse2neon_define0( \
+++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~31)) { \
+++ ret = _mm_setzero_si128(); \
+++ } else { \
+++ ret = vreinterpretq_m128i_u32( \
+++ vshlq_u32(vreinterpretq_u32_m128i(_a), vdupq_n_s32(-(imm)))); \
+++ } _sse2neon_return(ret);)
+++
+++// Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_epi64
+++#define _mm_srli_epi64(a, imm) \
+++ _sse2neon_define0( \
+++ __m128i, a, __m128i ret; if (_sse2neon_unlikely((imm) & ~63)) { \
+++ ret = _mm_setzero_si128(); \
+++ } else { \
+++ ret = vreinterpretq_m128i_u64( \
+++ vshlq_u64(vreinterpretq_u64_m128i(_a), vdupq_n_s64(-(imm)))); \
+++ } _sse2neon_return(ret);)
+++
+++// Shift a right by imm8 bytes while shifting in zeros, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_srli_si128
+++#define _mm_srli_si128(a, imm) \
+++ _sse2neon_define1( \
+++ __m128i, a, int8x16_t ret; \
+++ if (_sse2neon_unlikely((imm) & ~15)) ret = vdupq_n_s8(0); \
+++ else ret = vextq_s8(vreinterpretq_s8_m128i(_a), vdupq_n_s8(0), \
+++ (imm > 15 ? 0 : imm)); \
+++ _sse2neon_return(vreinterpretq_m128i_s8(ret));)
+++
+++// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+++// elements) from a into memory. mem_addr must be aligned on a 16-byte boundary
+++// or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd
+++FORCE_INLINE void _mm_store_pd(double *mem_addr, __m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ vst1q_f64((float64_t *) mem_addr, vreinterpretq_f64_m128d(a));
+++#else
+++ vst1q_f32((float32_t *) mem_addr, vreinterpretq_f32_m128d(a));
+++#endif
+++}
+++
+++// Store the lower double-precision (64-bit) floating-point element from a into
+++// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+++// boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_pd1
+++FORCE_INLINE void _mm_store_pd1(double *mem_addr, __m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ float64x1_t a_low = vget_low_f64(vreinterpretq_f64_m128d(a));
+++ vst1q_f64((float64_t *) mem_addr,
+++ vreinterpretq_f64_m128d(vcombine_f64(a_low, a_low)));
+++#else
+++ float32x2_t a_low = vget_low_f32(vreinterpretq_f32_m128d(a));
+++ vst1q_f32((float32_t *) mem_addr,
+++ vreinterpretq_f32_m128d(vcombine_f32(a_low, a_low)));
+++#endif
+++}
+++
+++// Store the lower double-precision (64-bit) floating-point element from a into
+++// memory. mem_addr does not need to be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_store_sd
+++FORCE_INLINE void _mm_store_sd(double *mem_addr, __m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
+++#else
+++ vst1_u64((uint64_t *) mem_addr, vget_low_u64(vreinterpretq_u64_m128d(a)));
+++#endif
+++}
+++
+++// Store 128-bits of integer data from a into memory. mem_addr must be aligned
+++// on a 16-byte boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_store_si128
+++FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
+++{
+++ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
+++}
+++
+++// Store the lower double-precision (64-bit) floating-point element from a into
+++// 2 contiguous elements in memory. mem_addr must be aligned on a 16-byte
+++// boundary or a general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#expand=9,526,5601&text=_mm_store1_pd
+++#define _mm_store1_pd _mm_store_pd1
+++
+++// Store the upper double-precision (64-bit) floating-point element from a into
+++// memory.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeh_pd
+++FORCE_INLINE void _mm_storeh_pd(double *mem_addr, __m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ vst1_f64((float64_t *) mem_addr, vget_high_f64(vreinterpretq_f64_m128d(a)));
+++#else
+++ vst1_f32((float32_t *) mem_addr, vget_high_f32(vreinterpretq_f32_m128d(a)));
+++#endif
+++}
+++
+++// Store 64-bit integer from the first element of a into memory.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_epi64
+++FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b)
+++{
+++ vst1_u64((uint64_t *) a, vget_low_u64(vreinterpretq_u64_m128i(b)));
+++}
+++
+++// Store the lower double-precision (64-bit) floating-point element from a into
+++// memory.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storel_pd
+++FORCE_INLINE void _mm_storel_pd(double *mem_addr, __m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ vst1_f64((float64_t *) mem_addr, vget_low_f64(vreinterpretq_f64_m128d(a)));
+++#else
+++ vst1_f32((float32_t *) mem_addr, vget_low_f32(vreinterpretq_f32_m128d(a)));
+++#endif
+++}
+++
+++// Store 2 double-precision (64-bit) floating-point elements from a into memory
+++// in reverse order. mem_addr must be aligned on a 16-byte boundary or a
+++// general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storer_pd
+++FORCE_INLINE void _mm_storer_pd(double *mem_addr, __m128d a)
+++{
+++ float32x4_t f = vreinterpretq_f32_m128d(a);
+++ _mm_store_pd(mem_addr, vreinterpretq_m128d_f32(vextq_f32(f, f, 2)));
+++}
+++
+++// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+++// elements) from a into memory. mem_addr does not need to be aligned on any
+++// particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_pd
+++FORCE_INLINE void _mm_storeu_pd(double *mem_addr, __m128d a)
+++{
+++ _mm_store_pd(mem_addr, a);
+++}
+++
+++// Store 128-bits of integer data from a into memory. mem_addr does not need to
+++// be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si128
+++FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
+++{
+++ vst1q_s32((int32_t *) p, vreinterpretq_s32_m128i(a));
+++}
+++
+++// Store 32-bit integer from the first element of a into memory. mem_addr does
+++// not need to be aligned on any particular boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_storeu_si32
+++FORCE_INLINE void _mm_storeu_si32(void *p, __m128i a)
+++{
+++ vst1q_lane_s32((int32_t *) p, vreinterpretq_s32_m128i(a), 0);
+++}
+++
+++// Store 128-bits (composed of 2 packed double-precision (64-bit) floating-point
+++// elements) from a into memory using a non-temporal memory hint. mem_addr must
+++// be aligned on a 16-byte boundary or a general-protection exception may be
+++// generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_pd
+++FORCE_INLINE void _mm_stream_pd(double *p, __m128d a)
+++{
+++#if __has_builtin(__builtin_nontemporal_store)
+++ __builtin_nontemporal_store(a, (__m128d *) p);
+++#elif defined(__aarch64__) || defined(_M_ARM64)
+++ vst1q_f64(p, vreinterpretq_f64_m128d(a));
+++#else
+++ vst1q_s64((int64_t *) p, vreinterpretq_s64_m128d(a));
+++#endif
+++}
+++
+++// Store 128-bits of integer data from a into memory using a non-temporal memory
+++// hint. mem_addr must be aligned on a 16-byte boundary or a general-protection
+++// exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si128
+++FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
+++{
+++#if __has_builtin(__builtin_nontemporal_store)
+++ __builtin_nontemporal_store(a, p);
+++#else
+++ vst1q_s64((int64_t *) p, vreinterpretq_s64_m128i(a));
+++#endif
+++}
+++
+++// Store 32-bit integer a into memory using a non-temporal hint to minimize
+++// cache pollution. If the cache line containing address mem_addr is already in
+++// the cache, the cache will be updated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si32
+++FORCE_INLINE void _mm_stream_si32(int *p, int a)
+++{
+++ vst1q_lane_s32((int32_t *) p, vdupq_n_s32(a), 0);
+++}
+++
+++// Store 64-bit integer a into memory using a non-temporal hint to minimize
+++// cache pollution. If the cache line containing address mem_addr is already in
+++// the cache, the cache will be updated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_si64
+++FORCE_INLINE void _mm_stream_si64(__int64 *p, __int64 a)
+++{
+++ vst1_s64((int64_t *) p, vdup_n_s64((int64_t) a));
+++}
+++
+++// Subtract packed 16-bit integers in b from packed 16-bit integers in a, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi16
+++FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Subtract packed 32-bit integers in b from packed 32-bit integers in a, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi32
+++FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vsubq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Subtract packed 64-bit integers in b from packed 64-bit integers in a, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi64
+++FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s64(
+++ vsubq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+++}
+++
+++// Subtract packed 8-bit integers in b from packed 8-bit integers in a, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_epi8
+++FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s8(
+++ vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Subtract packed double-precision (64-bit) floating-point elements in b from
+++// packed double-precision (64-bit) floating-point elements in a, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_sub_pd
+++FORCE_INLINE __m128d _mm_sub_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vsubq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[2];
+++ c[0] = da[0] - db[0];
+++ c[1] = da[1] - db[1];
+++ return vld1q_f32((float32_t *) c);
+++#endif
+++}
+++
+++// Subtract the lower double-precision (64-bit) floating-point element in b from
+++// the lower double-precision (64-bit) floating-point element in a, store the
+++// result in the lower element of dst, and copy the upper element from a to the
+++// upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_sd
+++FORCE_INLINE __m128d _mm_sub_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_sub_pd(a, b));
+++}
+++
+++// Subtract 64-bit integer b from 64-bit integer a, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sub_si64
+++FORCE_INLINE __m64 _mm_sub_si64(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_s64(
+++ vsub_s64(vreinterpret_s64_m64(a), vreinterpret_s64_m64(b)));
+++}
+++
+++// Subtract packed signed 16-bit integers in b from packed 16-bit integers in a
+++// using saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi16
+++FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s16(
+++ vqsubq_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++}
+++
+++// Subtract packed signed 8-bit integers in b from packed 8-bit integers in a
+++// using saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epi8
+++FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s8(
+++ vqsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Subtract packed unsigned 16-bit integers in b from packed unsigned 16-bit
+++// integers in a using saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu16
+++FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vqsubq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+++}
+++
+++// Subtract packed unsigned 8-bit integers in b from packed unsigned 8-bit
+++// integers in a using saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_subs_epu8
+++FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(
+++ vqsubq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
+++}
+++
+++#define _mm_ucomieq_sd _mm_comieq_sd
+++#define _mm_ucomige_sd _mm_comige_sd
+++#define _mm_ucomigt_sd _mm_comigt_sd
+++#define _mm_ucomile_sd _mm_comile_sd
+++#define _mm_ucomilt_sd _mm_comilt_sd
+++#define _mm_ucomineq_sd _mm_comineq_sd
+++
+++// Return vector of type __m128d with undefined elements.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_undefined_pd
+++FORCE_INLINE __m128d _mm_undefined_pd(void)
+++{
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma GCC diagnostic push
+++#pragma GCC diagnostic ignored "-Wuninitialized"
+++#endif
+++ __m128d a;
+++#if defined(_MSC_VER)
+++ a = _mm_setzero_pd();
+++#endif
+++ return a;
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma GCC diagnostic pop
+++#endif
+++}
+++
+++// Unpack and interleave 16-bit integers from the high half of a and b, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi16
+++FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s16(
+++ vzip2q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++#else
+++ int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
+++ int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
+++ int16x4x2_t result = vzip_s16(a1, b1);
+++ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Unpack and interleave 32-bit integers from the high half of a and b, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi32
+++FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s32(
+++ vzip2q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++#else
+++ int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
+++ int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
+++ int32x2x2_t result = vzip_s32(a1, b1);
+++ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Unpack and interleave 64-bit integers from the high half of a and b, and
+++// store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi64
+++FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s64(
+++ vzip2q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+++#else
+++ int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a));
+++ int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b));
+++ return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h));
+++#endif
+++}
+++
+++// Unpack and interleave 8-bit integers from the high half of a and b, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_epi8
+++FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s8(
+++ vzip2q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++#else
+++ int8x8_t a1 =
+++ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
+++ int8x8_t b1 =
+++ vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
+++ int8x8x2_t result = vzip_s8(a1, b1);
+++ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Unpack and interleave double-precision (64-bit) floating-point elements from
+++// the high half of a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpackhi_pd
+++FORCE_INLINE __m128d _mm_unpackhi_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vzip2q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ return vreinterpretq_m128d_s64(
+++ vcombine_s64(vget_high_s64(vreinterpretq_s64_m128d(a)),
+++ vget_high_s64(vreinterpretq_s64_m128d(b))));
+++#endif
+++}
+++
+++// Unpack and interleave 16-bit integers from the low half of a and b, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi16
+++FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s16(
+++ vzip1q_s16(vreinterpretq_s16_m128i(a), vreinterpretq_s16_m128i(b)));
+++#else
+++ int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
+++ int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
+++ int16x4x2_t result = vzip_s16(a1, b1);
+++ return vreinterpretq_m128i_s16(vcombine_s16(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Unpack and interleave 32-bit integers from the low half of a and b, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi32
+++FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s32(
+++ vzip1q_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++#else
+++ int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
+++ int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
+++ int32x2x2_t result = vzip_s32(a1, b1);
+++ return vreinterpretq_m128i_s32(vcombine_s32(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Unpack and interleave 64-bit integers from the low half of a and b, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi64
+++FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s64(
+++ vzip1q_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+++#else
+++ int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a));
+++ int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b));
+++ return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l));
+++#endif
+++}
+++
+++// Unpack and interleave 8-bit integers from the low half of a and b, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_epi8
+++FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s8(
+++ vzip1q_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++#else
+++ int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
+++ int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
+++ int8x8x2_t result = vzip_s8(a1, b1);
+++ return vreinterpretq_m128i_s8(vcombine_s8(result.val[0], result.val[1]));
+++#endif
+++}
+++
+++// Unpack and interleave double-precision (64-bit) floating-point elements from
+++// the low half of a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_unpacklo_pd
+++FORCE_INLINE __m128d _mm_unpacklo_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vzip1q_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ return vreinterpretq_m128d_s64(
+++ vcombine_s64(vget_low_s64(vreinterpretq_s64_m128d(a)),
+++ vget_low_s64(vreinterpretq_s64_m128d(b))));
+++#endif
+++}
+++
+++// Compute the bitwise XOR of packed double-precision (64-bit) floating-point
+++// elements in a and b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_pd
+++FORCE_INLINE __m128d _mm_xor_pd(__m128d a, __m128d b)
+++{
+++ return vreinterpretq_m128d_s64(
+++ veorq_s64(vreinterpretq_s64_m128d(a), vreinterpretq_s64_m128d(b)));
+++}
+++
+++// Compute the bitwise XOR of 128 bits (representing integer data) in a and b,
+++// and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_xor_si128
+++FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ veorq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++/* SSE3 */
+++
+++// Alternatively add and subtract packed double-precision (64-bit)
+++// floating-point elements in a to/from packed elements in b, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_addsub_pd
+++FORCE_INLINE __m128d _mm_addsub_pd(__m128d a, __m128d b)
+++{
+++ _sse2neon_const __m128d mask = _mm_set_pd(1.0f, -1.0f);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vfmaq_f64(vreinterpretq_f64_m128d(a),
+++ vreinterpretq_f64_m128d(b),
+++ vreinterpretq_f64_m128d(mask)));
+++#else
+++ return _mm_add_pd(_mm_mul_pd(b, mask), a);
+++#endif
+++}
+++
+++// Alternatively add and subtract packed single-precision (32-bit)
+++// floating-point elements in a to/from packed elements in b, and store the
+++// results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=addsub_ps
+++FORCE_INLINE __m128 _mm_addsub_ps(__m128 a, __m128 b)
+++{
+++ _sse2neon_const __m128 mask = _mm_setr_ps(-1.0f, 1.0f, -1.0f, 1.0f);
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_FMA) /* VFPv4+ */
+++ return vreinterpretq_m128_f32(vfmaq_f32(vreinterpretq_f32_m128(a),
+++ vreinterpretq_f32_m128(mask),
+++ vreinterpretq_f32_m128(b)));
+++#else
+++ return _mm_add_ps(_mm_mul_ps(b, mask), a);
+++#endif
+++}
+++
+++// Horizontally add adjacent pairs of double-precision (64-bit) floating-point
+++// elements in a and b, and pack the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pd
+++FORCE_INLINE __m128d _mm_hadd_pd(__m128d a, __m128d b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vpaddq_f64(vreinterpretq_f64_m128d(a), vreinterpretq_f64_m128d(b)));
+++#else
+++ double *da = (double *) &a;
+++ double *db = (double *) &b;
+++ double c[] = {da[0] + da[1], db[0] + db[1]};
+++ return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
+++#endif
+++}
+++
+++// Horizontally add adjacent pairs of single-precision (32-bit) floating-point
+++// elements in a and b, and pack the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_ps
+++FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(
+++ vpaddq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b)));
+++#else
+++ float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
+++ float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
+++ float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
+++ return vreinterpretq_m128_f32(
+++ vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of double-precision (64-bit)
+++// floating-point elements in a and b, and pack the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pd
+++FORCE_INLINE __m128d _mm_hsub_pd(__m128d _a, __m128d _b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ float64x2_t a = vreinterpretq_f64_m128d(_a);
+++ float64x2_t b = vreinterpretq_f64_m128d(_b);
+++ return vreinterpretq_m128d_f64(
+++ vsubq_f64(vuzp1q_f64(a, b), vuzp2q_f64(a, b)));
+++#else
+++ double *da = (double *) &_a;
+++ double *db = (double *) &_b;
+++ double c[] = {da[0] - da[1], db[0] - db[1]};
+++ return vreinterpretq_m128d_u64(vld1q_u64((uint64_t *) c));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of single-precision (32-bit)
+++// floating-point elements in a and b, and pack the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_ps
+++FORCE_INLINE __m128 _mm_hsub_ps(__m128 _a, __m128 _b)
+++{
+++ float32x4_t a = vreinterpretq_f32_m128(_a);
+++ float32x4_t b = vreinterpretq_f32_m128(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(
+++ vsubq_f32(vuzp1q_f32(a, b), vuzp2q_f32(a, b)));
+++#else
+++ float32x4x2_t c = vuzpq_f32(a, b);
+++ return vreinterpretq_m128_f32(vsubq_f32(c.val[0], c.val[1]));
+++#endif
+++}
+++
+++// Load 128-bits of integer data from unaligned memory into dst. This intrinsic
+++// may perform better than _mm_loadu_si128 when the data crosses a cache line
+++// boundary.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_lddqu_si128
+++#define _mm_lddqu_si128 _mm_loadu_si128
+++
+++// Load a double-precision (64-bit) floating-point element from memory into both
+++// elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_loaddup_pd
+++#define _mm_loaddup_pd _mm_load1_pd
+++
+++// Duplicate the low double-precision (64-bit) floating-point element from a,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movedup_pd
+++FORCE_INLINE __m128d _mm_movedup_pd(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(
+++ vdupq_laneq_f64(vreinterpretq_f64_m128d(a), 0));
+++#else
+++ return vreinterpretq_m128d_u64(
+++ vdupq_n_u64(vgetq_lane_u64(vreinterpretq_u64_m128d(a), 0)));
+++#endif
+++}
+++
+++// Duplicate odd-indexed single-precision (32-bit) floating-point elements
+++// from a, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_movehdup_ps
+++FORCE_INLINE __m128 _mm_movehdup_ps(__m128 a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(
+++ vtrn2q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
+++#elif defined(_sse2neon_shuffle)
+++ return vreinterpretq_m128_f32(vshuffleq_s32(
+++ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 1, 1, 3, 3));
+++#else
+++ float32_t a1 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 1);
+++ float32_t a3 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 3);
+++ float ALIGN_STRUCT(16) data[4] = {a1, a1, a3, a3};
+++ return vreinterpretq_m128_f32(vld1q_f32(data));
+++#endif
+++}
+++
+++// Duplicate even-indexed single-precision (32-bit) floating-point elements
+++// from a, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_moveldup_ps
+++FORCE_INLINE __m128 _mm_moveldup_ps(__m128 a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128_f32(
+++ vtrn1q_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a)));
+++#elif defined(_sse2neon_shuffle)
+++ return vreinterpretq_m128_f32(vshuffleq_s32(
+++ vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 0, 0, 2, 2));
+++#else
+++ float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
+++ float32_t a2 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 2);
+++ float ALIGN_STRUCT(16) data[4] = {a0, a0, a2, a2};
+++ return vreinterpretq_m128_f32(vld1q_f32(data));
+++#endif
+++}
+++
+++/* SSSE3 */
+++
+++// Compute the absolute value of packed signed 16-bit integers in a, and store
+++// the unsigned results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi16
+++FORCE_INLINE __m128i _mm_abs_epi16(__m128i a)
+++{
+++ return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a)));
+++}
+++
+++// Compute the absolute value of packed signed 32-bit integers in a, and store
+++// the unsigned results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi32
+++FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
+++{
+++ return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a)));
+++}
+++
+++// Compute the absolute value of packed signed 8-bit integers in a, and store
+++// the unsigned results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_epi8
+++FORCE_INLINE __m128i _mm_abs_epi8(__m128i a)
+++{
+++ return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a)));
+++}
+++
+++// Compute the absolute value of packed signed 16-bit integers in a, and store
+++// the unsigned results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi16
+++FORCE_INLINE __m64 _mm_abs_pi16(__m64 a)
+++{
+++ return vreinterpret_m64_s16(vabs_s16(vreinterpret_s16_m64(a)));
+++}
+++
+++// Compute the absolute value of packed signed 32-bit integers in a, and store
+++// the unsigned results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi32
+++FORCE_INLINE __m64 _mm_abs_pi32(__m64 a)
+++{
+++ return vreinterpret_m64_s32(vabs_s32(vreinterpret_s32_m64(a)));
+++}
+++
+++// Compute the absolute value of packed signed 8-bit integers in a, and store
+++// the unsigned results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_abs_pi8
+++FORCE_INLINE __m64 _mm_abs_pi8(__m64 a)
+++{
+++ return vreinterpret_m64_s8(vabs_s8(vreinterpret_s8_m64(a)));
+++}
+++
+++// Concatenate 16-byte blocks in a and b into a 32-byte temporary result, shift
+++// the result right by imm8 bytes, and store the low 16 bytes in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_epi8
+++#if defined(__GNUC__) && !defined(__clang__)
+++#define _mm_alignr_epi8(a, b, imm) \
+++ __extension__({ \
+++ uint8x16_t _a = vreinterpretq_u8_m128i(a); \
+++ uint8x16_t _b = vreinterpretq_u8_m128i(b); \
+++ __m128i ret; \
+++ if (_sse2neon_unlikely((imm) & ~31)) \
+++ ret = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
+++ else if (imm >= 16) \
+++ ret = _mm_srli_si128(a, imm >= 16 ? imm - 16 : 0); \
+++ else \
+++ ret = \
+++ vreinterpretq_m128i_u8(vextq_u8(_b, _a, imm < 16 ? imm : 0)); \
+++ ret; \
+++ })
+++
+++#else
+++#define _mm_alignr_epi8(a, b, imm) \
+++ _sse2neon_define2( \
+++ __m128i, a, b, uint8x16_t __a = vreinterpretq_u8_m128i(_a); \
+++ uint8x16_t __b = vreinterpretq_u8_m128i(_b); __m128i ret; \
+++ if (_sse2neon_unlikely((imm) & ~31)) ret = \
+++ vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
+++ else if (imm >= 16) ret = \
+++ _mm_srli_si128(_a, imm >= 16 ? imm - 16 : 0); \
+++ else ret = \
+++ vreinterpretq_m128i_u8(vextq_u8(__b, __a, imm < 16 ? imm : 0)); \
+++ _sse2neon_return(ret);)
+++
+++#endif
+++
+++// Concatenate 8-byte blocks in a and b into a 16-byte temporary result, shift
+++// the result right by imm8 bytes, and store the low 8 bytes in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_alignr_pi8
+++#define _mm_alignr_pi8(a, b, imm) \
+++ _sse2neon_define2( \
+++ __m64, a, b, __m64 ret; if (_sse2neon_unlikely((imm) >= 16)) { \
+++ ret = vreinterpret_m64_s8(vdup_n_s8(0)); \
+++ } else { \
+++ uint8x8_t tmp_low; \
+++ uint8x8_t tmp_high; \
+++ if ((imm) >= 8) { \
+++ const int idx = (imm) -8; \
+++ tmp_low = vreinterpret_u8_m64(_a); \
+++ tmp_high = vdup_n_u8(0); \
+++ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
+++ } else { \
+++ const int idx = (imm); \
+++ tmp_low = vreinterpret_u8_m64(_b); \
+++ tmp_high = vreinterpret_u8_m64(_a); \
+++ ret = vreinterpret_m64_u8(vext_u8(tmp_low, tmp_high, idx)); \
+++ } \
+++ } _sse2neon_return(ret);)
+++
+++// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
+++// signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi16
+++FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b)
+++{
+++ int16x8_t a = vreinterpretq_s16_m128i(_a);
+++ int16x8_t b = vreinterpretq_s16_m128i(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s16(vpaddq_s16(a, b));
+++#else
+++ return vreinterpretq_m128i_s16(
+++ vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)),
+++ vpadd_s16(vget_low_s16(b), vget_high_s16(b))));
+++#endif
+++}
+++
+++// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
+++// signed 32-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_epi32
+++FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b)
+++{
+++ int32x4_t a = vreinterpretq_s32_m128i(_a);
+++ int32x4_t b = vreinterpretq_s32_m128i(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s32(vpaddq_s32(a, b));
+++#else
+++ return vreinterpretq_m128i_s32(
+++ vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)),
+++ vpadd_s32(vget_low_s32(b), vget_high_s32(b))));
+++#endif
+++}
+++
+++// Horizontally add adjacent pairs of 16-bit integers in a and b, and pack the
+++// signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi16
+++FORCE_INLINE __m64 _mm_hadd_pi16(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_s16(
+++ vpadd_s16(vreinterpret_s16_m64(a), vreinterpret_s16_m64(b)));
+++}
+++
+++// Horizontally add adjacent pairs of 32-bit integers in a and b, and pack the
+++// signed 32-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadd_pi32
+++FORCE_INLINE __m64 _mm_hadd_pi32(__m64 a, __m64 b)
+++{
+++ return vreinterpret_m64_s32(
+++ vpadd_s32(vreinterpret_s32_m64(a), vreinterpret_s32_m64(b)));
+++}
+++
+++// Horizontally add adjacent pairs of signed 16-bit integers in a and b using
+++// saturation, and pack the signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_epi16
+++FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int16x8_t a = vreinterpretq_s16_m128i(_a);
+++ int16x8_t b = vreinterpretq_s16_m128i(_b);
+++ return vreinterpretq_s64_s16(
+++ vqaddq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+++#else
+++ int32x4_t a = vreinterpretq_s32_m128i(_a);
+++ int32x4_t b = vreinterpretq_s32_m128i(_b);
+++ // Interleave using vshrn/vmovn
+++ // [a0|a2|a4|a6|b0|b2|b4|b6]
+++ // [a1|a3|a5|a7|b1|b3|b5|b7]
+++ int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
+++ int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
+++ // Saturated add
+++ return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357));
+++#endif
+++}
+++
+++// Horizontally add adjacent pairs of signed 16-bit integers in a and b using
+++// saturation, and pack the signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hadds_pi16
+++FORCE_INLINE __m64 _mm_hadds_pi16(__m64 _a, __m64 _b)
+++{
+++ int16x4_t a = vreinterpret_s16_m64(_a);
+++ int16x4_t b = vreinterpret_s16_m64(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpret_s64_s16(vqadd_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
+++#else
+++ int16x4x2_t res = vuzp_s16(a, b);
+++ return vreinterpret_s64_s16(vqadd_s16(res.val[0], res.val[1]));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
+++// the signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi16
+++FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b)
+++{
+++ int16x8_t a = vreinterpretq_s16_m128i(_a);
+++ int16x8_t b = vreinterpretq_s16_m128i(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s16(
+++ vsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+++#else
+++ int16x8x2_t c = vuzpq_s16(a, b);
+++ return vreinterpretq_m128i_s16(vsubq_s16(c.val[0], c.val[1]));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
+++// the signed 32-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_epi32
+++FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b)
+++{
+++ int32x4_t a = vreinterpretq_s32_m128i(_a);
+++ int32x4_t b = vreinterpretq_s32_m128i(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s32(
+++ vsubq_s32(vuzp1q_s32(a, b), vuzp2q_s32(a, b)));
+++#else
+++ int32x4x2_t c = vuzpq_s32(a, b);
+++ return vreinterpretq_m128i_s32(vsubq_s32(c.val[0], c.val[1]));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of 16-bit integers in a and b, and pack
+++// the signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsub_pi16
+++FORCE_INLINE __m64 _mm_hsub_pi16(__m64 _a, __m64 _b)
+++{
+++ int16x4_t a = vreinterpret_s16_m64(_a);
+++ int16x4_t b = vreinterpret_s16_m64(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpret_m64_s16(vsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
+++#else
+++ int16x4x2_t c = vuzp_s16(a, b);
+++ return vreinterpret_m64_s16(vsub_s16(c.val[0], c.val[1]));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of 32-bit integers in a and b, and pack
+++// the signed 32-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_hsub_pi32
+++FORCE_INLINE __m64 _mm_hsub_pi32(__m64 _a, __m64 _b)
+++{
+++ int32x2_t a = vreinterpret_s32_m64(_a);
+++ int32x2_t b = vreinterpret_s32_m64(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpret_m64_s32(vsub_s32(vuzp1_s32(a, b), vuzp2_s32(a, b)));
+++#else
+++ int32x2x2_t c = vuzp_s32(a, b);
+++ return vreinterpret_m64_s32(vsub_s32(c.val[0], c.val[1]));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of signed 16-bit integers in a and b
+++// using saturation, and pack the signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_epi16
+++FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b)
+++{
+++ int16x8_t a = vreinterpretq_s16_m128i(_a);
+++ int16x8_t b = vreinterpretq_s16_m128i(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s16(
+++ vqsubq_s16(vuzp1q_s16(a, b), vuzp2q_s16(a, b)));
+++#else
+++ int16x8x2_t c = vuzpq_s16(a, b);
+++ return vreinterpretq_m128i_s16(vqsubq_s16(c.val[0], c.val[1]));
+++#endif
+++}
+++
+++// Horizontally subtract adjacent pairs of signed 16-bit integers in a and b
+++// using saturation, and pack the signed 16-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_hsubs_pi16
+++FORCE_INLINE __m64 _mm_hsubs_pi16(__m64 _a, __m64 _b)
+++{
+++ int16x4_t a = vreinterpret_s16_m64(_a);
+++ int16x4_t b = vreinterpret_s16_m64(_b);
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpret_m64_s16(vqsub_s16(vuzp1_s16(a, b), vuzp2_s16(a, b)));
+++#else
+++ int16x4x2_t c = vuzp_s16(a, b);
+++ return vreinterpret_m64_s16(vqsub_s16(c.val[0], c.val[1]));
+++#endif
+++}
+++
+++// Vertically multiply each unsigned 8-bit integer from a with the corresponding
+++// signed 8-bit integer from b, producing intermediate signed 16-bit integers.
+++// Horizontally add adjacent pairs of intermediate signed 16-bit integers,
+++// and pack the saturated results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_epi16
+++FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint8x16_t a = vreinterpretq_u8_m128i(_a);
+++ int8x16_t b = vreinterpretq_s8_m128i(_b);
+++ int16x8_t tl = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(a))),
+++ vmovl_s8(vget_low_s8(b)));
+++ int16x8_t th = vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(a))),
+++ vmovl_s8(vget_high_s8(b)));
+++ return vreinterpretq_m128i_s16(
+++ vqaddq_s16(vuzp1q_s16(tl, th), vuzp2q_s16(tl, th)));
+++#else
+++ // This would be much simpler if x86 would choose to zero extend OR sign
+++ // extend, not both. This could probably be optimized better.
+++ uint16x8_t a = vreinterpretq_u16_m128i(_a);
+++ int16x8_t b = vreinterpretq_s16_m128i(_b);
+++
+++ // Zero extend a
+++ int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8));
+++ int16x8_t a_even = vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00)));
+++
+++ // Sign extend by shifting left then shifting right.
+++ int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8);
+++ int16x8_t b_odd = vshrq_n_s16(b, 8);
+++
+++ // multiply
+++ int16x8_t prod1 = vmulq_s16(a_even, b_even);
+++ int16x8_t prod2 = vmulq_s16(a_odd, b_odd);
+++
+++ // saturated add
+++ return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2));
+++#endif
+++}
+++
+++// Vertically multiply each unsigned 8-bit integer from a with the corresponding
+++// signed 8-bit integer from b, producing intermediate signed 16-bit integers.
+++// Horizontally add adjacent pairs of intermediate signed 16-bit integers, and
+++// pack the saturated results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_maddubs_pi16
+++FORCE_INLINE __m64 _mm_maddubs_pi16(__m64 _a, __m64 _b)
+++{
+++ uint16x4_t a = vreinterpret_u16_m64(_a);
+++ int16x4_t b = vreinterpret_s16_m64(_b);
+++
+++ // Zero extend a
+++ int16x4_t a_odd = vreinterpret_s16_u16(vshr_n_u16(a, 8));
+++ int16x4_t a_even = vreinterpret_s16_u16(vand_u16(a, vdup_n_u16(0xff)));
+++
+++ // Sign extend by shifting left then shifting right.
+++ int16x4_t b_even = vshr_n_s16(vshl_n_s16(b, 8), 8);
+++ int16x4_t b_odd = vshr_n_s16(b, 8);
+++
+++ // multiply
+++ int16x4_t prod1 = vmul_s16(a_even, b_even);
+++ int16x4_t prod2 = vmul_s16(a_odd, b_odd);
+++
+++ // saturated add
+++ return vreinterpret_m64_s16(vqadd_s16(prod1, prod2));
+++}
+++
+++// Multiply packed signed 16-bit integers in a and b, producing intermediate
+++// signed 32-bit integers. Shift right by 15 bits while rounding up, and store
+++// the packed 16-bit integers in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_epi16
+++FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b)
+++{
+++ // Has issues due to saturation
+++ // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b));
+++
+++ // Multiply
+++ int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
+++ vget_low_s16(vreinterpretq_s16_m128i(b)));
+++ int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
+++ vget_high_s16(vreinterpretq_s16_m128i(b)));
+++
+++ // Rounding narrowing shift right
+++ // narrow = (int16_t)((mul + 16384) >> 15);
+++ int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15);
+++ int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15);
+++
+++ // Join together
+++ return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi));
+++}
+++
+++// Multiply packed signed 16-bit integers in a and b, producing intermediate
+++// signed 32-bit integers. Truncate each intermediate integer to the 18 most
+++// significant bits, round by adding 1, and store bits [16:1] to dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mulhrs_pi16
+++FORCE_INLINE __m64 _mm_mulhrs_pi16(__m64 a, __m64 b)
+++{
+++ int32x4_t mul_extend =
+++ vmull_s16((vreinterpret_s16_m64(a)), (vreinterpret_s16_m64(b)));
+++
+++ // Rounding narrowing shift right
+++ return vreinterpret_m64_s16(vrshrn_n_s32(mul_extend, 15));
+++}
+++
+++// Shuffle packed 8-bit integers in a according to shuffle control mask in the
+++// corresponding 8-bit element of b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_epi8
+++FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
+++{
+++ int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a
+++ uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b
+++ uint8x16_t idx_masked =
+++ vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked));
+++#elif defined(__GNUC__)
+++ int8x16_t ret;
+++ // %e and %f represent the even and odd D registers
+++ // respectively.
+++ __asm__ __volatile__(
+++ "vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n"
+++ "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n"
+++ : [ret] "=&w"(ret)
+++ : [tbl] "w"(tbl), [idx] "w"(idx_masked));
+++ return vreinterpretq_m128i_s8(ret);
+++#else
+++ // use this line if testing on aarch64
+++ int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)};
+++ return vreinterpretq_m128i_s8(
+++ vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)),
+++ vtbl2_s8(a_split, vget_high_u8(idx_masked))));
+++#endif
+++}
+++
+++// Shuffle packed 8-bit integers in a according to shuffle control mask in the
+++// corresponding 8-bit element of b, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_shuffle_pi8
+++FORCE_INLINE __m64 _mm_shuffle_pi8(__m64 a, __m64 b)
+++{
+++ const int8x8_t controlMask =
+++ vand_s8(vreinterpret_s8_m64(b), vdup_n_s8((int8_t) (0x1 << 7 | 0x07)));
+++ int8x8_t res = vtbl1_s8(vreinterpret_s8_m64(a), controlMask);
+++ return vreinterpret_m64_s8(res);
+++}
+++
+++// Negate packed 16-bit integers in a when the corresponding signed
+++// 16-bit integer in b is negative, and store the results in dst.
+++// Element in dst are zeroed out when the corresponding element
+++// in b is zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi16
+++FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b)
+++{
+++ int16x8_t a = vreinterpretq_s16_m128i(_a);
+++ int16x8_t b = vreinterpretq_s16_m128i(_b);
+++
+++ // signed shift right: faster than vclt
+++ // (b < 0) ? 0xFFFF : 0
+++ uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15));
+++ // (b == 0) ? 0xFFFF : 0
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqzq_s16(b));
+++#else
+++ int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, vdupq_n_s16(0)));
+++#endif
+++
+++ // bitwise select either a or negative 'a' (vnegq_s16(a) equals to negative
+++ // 'a') based on ltMask
+++ int16x8_t masked = vbslq_s16(ltMask, vnegq_s16(a), a);
+++ // res = masked & (~zeroMask)
+++ int16x8_t res = vbicq_s16(masked, zeroMask);
+++ return vreinterpretq_m128i_s16(res);
+++}
+++
+++// Negate packed 32-bit integers in a when the corresponding signed
+++// 32-bit integer in b is negative, and store the results in dst.
+++// Element in dst are zeroed out when the corresponding element
+++// in b is zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi32
+++FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b)
+++{
+++ int32x4_t a = vreinterpretq_s32_m128i(_a);
+++ int32x4_t b = vreinterpretq_s32_m128i(_b);
+++
+++ // signed shift right: faster than vclt
+++ // (b < 0) ? 0xFFFFFFFF : 0
+++ uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31));
+++
+++ // (b == 0) ? 0xFFFFFFFF : 0
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqzq_s32(b));
+++#else
+++ int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, vdupq_n_s32(0)));
+++#endif
+++
+++ // bitwise select either a or negative 'a' (vnegq_s32(a) equals to negative
+++ // 'a') based on ltMask
+++ int32x4_t masked = vbslq_s32(ltMask, vnegq_s32(a), a);
+++ // res = masked & (~zeroMask)
+++ int32x4_t res = vbicq_s32(masked, zeroMask);
+++ return vreinterpretq_m128i_s32(res);
+++}
+++
+++// Negate packed 8-bit integers in a when the corresponding signed
+++// 8-bit integer in b is negative, and store the results in dst.
+++// Element in dst are zeroed out when the corresponding element
+++// in b is zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_epi8
+++FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b)
+++{
+++ int8x16_t a = vreinterpretq_s8_m128i(_a);
+++ int8x16_t b = vreinterpretq_s8_m128i(_b);
+++
+++ // signed shift right: faster than vclt
+++ // (b < 0) ? 0xFF : 0
+++ uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7));
+++
+++ // (b == 0) ? 0xFF : 0
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqzq_s8(b));
+++#else
+++ int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, vdupq_n_s8(0)));
+++#endif
+++
+++ // bitwise select either a or negative 'a' (vnegq_s8(a) return negative 'a')
+++ // based on ltMask
+++ int8x16_t masked = vbslq_s8(ltMask, vnegq_s8(a), a);
+++ // res = masked & (~zeroMask)
+++ int8x16_t res = vbicq_s8(masked, zeroMask);
+++
+++ return vreinterpretq_m128i_s8(res);
+++}
+++
+++// Negate packed 16-bit integers in a when the corresponding signed 16-bit
+++// integer in b is negative, and store the results in dst. Element in dst are
+++// zeroed out when the corresponding element in b is zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi16
+++FORCE_INLINE __m64 _mm_sign_pi16(__m64 _a, __m64 _b)
+++{
+++ int16x4_t a = vreinterpret_s16_m64(_a);
+++ int16x4_t b = vreinterpret_s16_m64(_b);
+++
+++ // signed shift right: faster than vclt
+++ // (b < 0) ? 0xFFFF : 0
+++ uint16x4_t ltMask = vreinterpret_u16_s16(vshr_n_s16(b, 15));
+++
+++ // (b == 0) ? 0xFFFF : 0
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int16x4_t zeroMask = vreinterpret_s16_u16(vceqz_s16(b));
+++#else
+++ int16x4_t zeroMask = vreinterpret_s16_u16(vceq_s16(b, vdup_n_s16(0)));
+++#endif
+++
+++ // bitwise select either a or negative 'a' (vneg_s16(a) return negative 'a')
+++ // based on ltMask
+++ int16x4_t masked = vbsl_s16(ltMask, vneg_s16(a), a);
+++ // res = masked & (~zeroMask)
+++ int16x4_t res = vbic_s16(masked, zeroMask);
+++
+++ return vreinterpret_m64_s16(res);
+++}
+++
+++// Negate packed 32-bit integers in a when the corresponding signed 32-bit
+++// integer in b is negative, and store the results in dst. Element in dst are
+++// zeroed out when the corresponding element in b is zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi32
+++FORCE_INLINE __m64 _mm_sign_pi32(__m64 _a, __m64 _b)
+++{
+++ int32x2_t a = vreinterpret_s32_m64(_a);
+++ int32x2_t b = vreinterpret_s32_m64(_b);
+++
+++ // signed shift right: faster than vclt
+++ // (b < 0) ? 0xFFFFFFFF : 0
+++ uint32x2_t ltMask = vreinterpret_u32_s32(vshr_n_s32(b, 31));
+++
+++ // (b == 0) ? 0xFFFFFFFF : 0
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int32x2_t zeroMask = vreinterpret_s32_u32(vceqz_s32(b));
+++#else
+++ int32x2_t zeroMask = vreinterpret_s32_u32(vceq_s32(b, vdup_n_s32(0)));
+++#endif
+++
+++ // bitwise select either a or negative 'a' (vneg_s32(a) return negative 'a')
+++ // based on ltMask
+++ int32x2_t masked = vbsl_s32(ltMask, vneg_s32(a), a);
+++ // res = masked & (~zeroMask)
+++ int32x2_t res = vbic_s32(masked, zeroMask);
+++
+++ return vreinterpret_m64_s32(res);
+++}
+++
+++// Negate packed 8-bit integers in a when the corresponding signed 8-bit integer
+++// in b is negative, and store the results in dst. Element in dst are zeroed out
+++// when the corresponding element in b is zero.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_sign_pi8
+++FORCE_INLINE __m64 _mm_sign_pi8(__m64 _a, __m64 _b)
+++{
+++ int8x8_t a = vreinterpret_s8_m64(_a);
+++ int8x8_t b = vreinterpret_s8_m64(_b);
+++
+++ // signed shift right: faster than vclt
+++ // (b < 0) ? 0xFF : 0
+++ uint8x8_t ltMask = vreinterpret_u8_s8(vshr_n_s8(b, 7));
+++
+++ // (b == 0) ? 0xFF : 0
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int8x8_t zeroMask = vreinterpret_s8_u8(vceqz_s8(b));
+++#else
+++ int8x8_t zeroMask = vreinterpret_s8_u8(vceq_s8(b, vdup_n_s8(0)));
+++#endif
+++
+++ // bitwise select either a or negative 'a' (vneg_s8(a) return negative 'a')
+++ // based on ltMask
+++ int8x8_t masked = vbsl_s8(ltMask, vneg_s8(a), a);
+++ // res = masked & (~zeroMask)
+++ int8x8_t res = vbic_s8(masked, zeroMask);
+++
+++ return vreinterpret_m64_s8(res);
+++}
+++
+++/* SSE4.1 */
+++
+++// Blend packed 16-bit integers from a and b using control mask imm8, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_epi16
+++// FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b,
+++// __constrange(0,255) int imm)
+++#define _mm_blend_epi16(a, b, imm) \
+++ _sse2neon_define2( \
+++ __m128i, a, b, \
+++ const uint16_t _mask[8] = \
+++ _sse2neon_init(((imm) & (1 << 0)) ? (uint16_t) -1 : 0x0, \
+++ ((imm) & (1 << 1)) ? (uint16_t) -1 : 0x0, \
+++ ((imm) & (1 << 2)) ? (uint16_t) -1 : 0x0, \
+++ ((imm) & (1 << 3)) ? (uint16_t) -1 : 0x0, \
+++ ((imm) & (1 << 4)) ? (uint16_t) -1 : 0x0, \
+++ ((imm) & (1 << 5)) ? (uint16_t) -1 : 0x0, \
+++ ((imm) & (1 << 6)) ? (uint16_t) -1 : 0x0, \
+++ ((imm) & (1 << 7)) ? (uint16_t) -1 : 0x0); \
+++ uint16x8_t _mask_vec = vld1q_u16(_mask); \
+++ uint16x8_t __a = vreinterpretq_u16_m128i(_a); \
+++ uint16x8_t __b = vreinterpretq_u16_m128i(_b); _sse2neon_return( \
+++ vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, __b, __a)));)
+++
+++// Blend packed double-precision (64-bit) floating-point elements from a and b
+++// using control mask imm8, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_pd
+++#define _mm_blend_pd(a, b, imm) \
+++ _sse2neon_define2( \
+++ __m128d, a, b, \
+++ const uint64_t _mask[2] = \
+++ _sse2neon_init(((imm) & (1 << 0)) ? ~UINT64_C(0) : UINT64_C(0), \
+++ ((imm) & (1 << 1)) ? ~UINT64_C(0) : UINT64_C(0)); \
+++ uint64x2_t _mask_vec = vld1q_u64(_mask); \
+++ uint64x2_t __a = vreinterpretq_u64_m128d(_a); \
+++ uint64x2_t __b = vreinterpretq_u64_m128d(_b); _sse2neon_return( \
+++ vreinterpretq_m128d_u64(vbslq_u64(_mask_vec, __b, __a)));)
+++
+++// Blend packed single-precision (32-bit) floating-point elements from a and b
+++// using mask, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blend_ps
+++FORCE_INLINE __m128 _mm_blend_ps(__m128 _a, __m128 _b, const char imm8)
+++{
+++ const uint32_t ALIGN_STRUCT(16)
+++ data[4] = {((imm8) & (1 << 0)) ? UINT32_MAX : 0,
+++ ((imm8) & (1 << 1)) ? UINT32_MAX : 0,
+++ ((imm8) & (1 << 2)) ? UINT32_MAX : 0,
+++ ((imm8) & (1 << 3)) ? UINT32_MAX : 0};
+++ uint32x4_t mask = vld1q_u32(data);
+++ float32x4_t a = vreinterpretq_f32_m128(_a);
+++ float32x4_t b = vreinterpretq_f32_m128(_b);
+++ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
+++}
+++
+++// Blend packed 8-bit integers from a and b using mask, and store the results in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_epi8
+++FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask)
+++{
+++ // Use a signed shift right to create a mask with the sign bit
+++ uint8x16_t mask =
+++ vreinterpretq_u8_s8(vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7));
+++ uint8x16_t a = vreinterpretq_u8_m128i(_a);
+++ uint8x16_t b = vreinterpretq_u8_m128i(_b);
+++ return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a));
+++}
+++
+++// Blend packed double-precision (64-bit) floating-point elements from a and b
+++// using mask, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_pd
+++FORCE_INLINE __m128d _mm_blendv_pd(__m128d _a, __m128d _b, __m128d _mask)
+++{
+++ uint64x2_t mask =
+++ vreinterpretq_u64_s64(vshrq_n_s64(vreinterpretq_s64_m128d(_mask), 63));
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ float64x2_t a = vreinterpretq_f64_m128d(_a);
+++ float64x2_t b = vreinterpretq_f64_m128d(_b);
+++ return vreinterpretq_m128d_f64(vbslq_f64(mask, b, a));
+++#else
+++ uint64x2_t a = vreinterpretq_u64_m128d(_a);
+++ uint64x2_t b = vreinterpretq_u64_m128d(_b);
+++ return vreinterpretq_m128d_u64(vbslq_u64(mask, b, a));
+++#endif
+++}
+++
+++// Blend packed single-precision (32-bit) floating-point elements from a and b
+++// using mask, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_blendv_ps
+++FORCE_INLINE __m128 _mm_blendv_ps(__m128 _a, __m128 _b, __m128 _mask)
+++{
+++ // Use a signed shift right to create a mask with the sign bit
+++ uint32x4_t mask =
+++ vreinterpretq_u32_s32(vshrq_n_s32(vreinterpretq_s32_m128(_mask), 31));
+++ float32x4_t a = vreinterpretq_f32_m128(_a);
+++ float32x4_t b = vreinterpretq_f32_m128(_b);
+++ return vreinterpretq_m128_f32(vbslq_f32(mask, b, a));
+++}
+++
+++// Round the packed double-precision (64-bit) floating-point elements in a up
+++// to an integer value, and store the results as packed double-precision
+++// floating-point elements in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_pd
+++FORCE_INLINE __m128d _mm_ceil_pd(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vrndpq_f64(vreinterpretq_f64_m128d(a)));
+++#else
+++ double *f = (double *) &a;
+++ return _mm_set_pd(ceil(f[1]), ceil(f[0]));
+++#endif
+++}
+++
+++// Round the packed single-precision (32-bit) floating-point elements in a up to
+++// an integer value, and store the results as packed single-precision
+++// floating-point elements in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ps
+++FORCE_INLINE __m128 _mm_ceil_ps(__m128 a)
+++{
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+++ return vreinterpretq_m128_f32(vrndpq_f32(vreinterpretq_f32_m128(a)));
+++#else
+++ float *f = (float *) &a;
+++ return _mm_set_ps(ceilf(f[3]), ceilf(f[2]), ceilf(f[1]), ceilf(f[0]));
+++#endif
+++}
+++
+++// Round the lower double-precision (64-bit) floating-point element in b up to
+++// an integer value, store the result as a double-precision floating-point
+++// element in the lower element of dst, and copy the upper element from a to the
+++// upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_sd
+++FORCE_INLINE __m128d _mm_ceil_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_ceil_pd(b));
+++}
+++
+++// Round the lower single-precision (32-bit) floating-point element in b up to
+++// an integer value, store the result as a single-precision floating-point
+++// element in the lower element of dst, and copy the upper 3 packed elements
+++// from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_ceil_ss
+++FORCE_INLINE __m128 _mm_ceil_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_ceil_ps(b));
+++}
+++
+++// Compare packed 64-bit integers in a and b for equality, and store the results
+++// in dst
+++FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_u64(
+++ vceqq_u64(vreinterpretq_u64_m128i(a), vreinterpretq_u64_m128i(b)));
+++#else
+++ // ARMv7 lacks vceqq_u64
+++ // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
+++ uint32x4_t cmp =
+++ vceqq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b));
+++ uint32x4_t swapped = vrev64q_u32(cmp);
+++ return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped));
+++#endif
+++}
+++
+++// Sign extend packed 16-bit integers in a to packed 32-bit integers, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi32
+++FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
+++}
+++
+++// Sign extend packed 16-bit integers in a to packed 64-bit integers, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi16_epi64
+++FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a)
+++{
+++ int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */
+++ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
+++ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
+++ return vreinterpretq_m128i_s64(s64x2);
+++}
+++
+++// Sign extend packed 32-bit integers in a to packed 64-bit integers, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi32_epi64
+++FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a)
+++{
+++ return vreinterpretq_m128i_s64(
+++ vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))));
+++}
+++
+++// Sign extend packed 8-bit integers in a to packed 16-bit integers, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi16
+++FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a)
+++{
+++ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
+++ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+++ return vreinterpretq_m128i_s16(s16x8);
+++}
+++
+++// Sign extend packed 8-bit integers in a to packed 32-bit integers, and store
+++// the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi32
+++FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a)
+++{
+++ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
+++ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+++ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */
+++ return vreinterpretq_m128i_s32(s32x4);
+++}
+++
+++// Sign extend packed 8-bit integers in the low 8 bytes of a to packed 64-bit
+++// integers, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepi8_epi64
+++FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a)
+++{
+++ int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */
+++ int16x8_t s16x8 = vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */
+++ int32x4_t s32x4 = vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
+++ int64x2_t s64x2 = vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
+++ return vreinterpretq_m128i_s64(s64x2);
+++}
+++
+++// Zero extend packed unsigned 16-bit integers in a to packed 32-bit integers,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi32
+++FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a)
+++{
+++ return vreinterpretq_m128i_u32(
+++ vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a))));
+++}
+++
+++// Zero extend packed unsigned 16-bit integers in a to packed 64-bit integers,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu16_epi64
+++FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a)
+++{
+++ uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */
+++ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
+++ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
+++ return vreinterpretq_m128i_u64(u64x2);
+++}
+++
+++// Zero extend packed unsigned 32-bit integers in a to packed 64-bit integers,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu32_epi64
+++FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a)
+++{
+++ return vreinterpretq_m128i_u64(
+++ vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a))));
+++}
+++
+++// Zero extend packed unsigned 8-bit integers in a to packed 16-bit integers,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi16
+++FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a)
+++{
+++ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx HGFE DCBA */
+++ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0H0G 0F0E 0D0C 0B0A */
+++ return vreinterpretq_m128i_u16(u16x8);
+++}
+++
+++// Zero extend packed unsigned 8-bit integers in a to packed 32-bit integers,
+++// and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi32
+++FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
+++{
+++ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
+++ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
+++ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
+++ return vreinterpretq_m128i_u32(u32x4);
+++}
+++
+++// Zero extend packed unsigned 8-bit integers in the low 8 bytes of a to packed
+++// 64-bit integers, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cvtepu8_epi64
+++FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a)
+++{
+++ uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */
+++ uint16x8_t u16x8 = vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */
+++ uint32x4_t u32x4 = vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
+++ uint64x2_t u64x2 = vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
+++ return vreinterpretq_m128i_u64(u64x2);
+++}
+++
+++// Conditionally multiply the packed double-precision (64-bit) floating-point
+++// elements in a and b using the high 4 bits in imm8, sum the four products, and
+++// conditionally store the sum in dst using the low 4 bits of imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_pd
+++FORCE_INLINE __m128d _mm_dp_pd(__m128d a, __m128d b, const int imm)
+++{
+++ // Generate mask value from constant immediate bit value
+++ const int64_t bit0Mask = imm & 0x01 ? UINT64_MAX : 0;
+++ const int64_t bit1Mask = imm & 0x02 ? UINT64_MAX : 0;
+++#if !SSE2NEON_PRECISE_DP
+++ const int64_t bit4Mask = imm & 0x10 ? UINT64_MAX : 0;
+++ const int64_t bit5Mask = imm & 0x20 ? UINT64_MAX : 0;
+++#endif
+++ // Conditional multiplication
+++#if !SSE2NEON_PRECISE_DP
+++ __m128d mul = _mm_mul_pd(a, b);
+++ const __m128d mulMask =
+++ _mm_castsi128_pd(_mm_set_epi64x(bit5Mask, bit4Mask));
+++ __m128d tmp = _mm_and_pd(mul, mulMask);
+++#else
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ double d0 = (imm & 0x10) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 0) *
+++ vgetq_lane_f64(vreinterpretq_f64_m128d(b), 0)
+++ : 0;
+++ double d1 = (imm & 0x20) ? vgetq_lane_f64(vreinterpretq_f64_m128d(a), 1) *
+++ vgetq_lane_f64(vreinterpretq_f64_m128d(b), 1)
+++ : 0;
+++#else
+++ double d0 = (imm & 0x10) ? ((double *) &a)[0] * ((double *) &b)[0] : 0;
+++ double d1 = (imm & 0x20) ? ((double *) &a)[1] * ((double *) &b)[1] : 0;
+++#endif
+++ __m128d tmp = _mm_set_pd(d1, d0);
+++#endif
+++ // Sum the products
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ double sum = vpaddd_f64(vreinterpretq_f64_m128d(tmp));
+++#else
+++ double sum = *((double *) &tmp) + *(((double *) &tmp) + 1);
+++#endif
+++ // Conditionally store the sum
+++ const __m128d sumMask =
+++ _mm_castsi128_pd(_mm_set_epi64x(bit1Mask, bit0Mask));
+++ __m128d res = _mm_and_pd(_mm_set_pd1(sum), sumMask);
+++ return res;
+++}
+++
+++// Conditionally multiply the packed single-precision (32-bit) floating-point
+++// elements in a and b using the high 4 bits in imm8, sum the four products,
+++// and conditionally store the sum in dst using the low 4 bits of imm.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_dp_ps
+++FORCE_INLINE __m128 _mm_dp_ps(__m128 a, __m128 b, const int imm)
+++{
+++ float32x4_t elementwise_prod = _mm_mul_ps(a, b);
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ /* shortcuts */
+++ if (imm == 0xFF) {
+++ return _mm_set1_ps(vaddvq_f32(elementwise_prod));
+++ }
+++
+++ if ((imm & 0x0F) == 0x0F) {
+++ if (!(imm & (1 << 4)))
+++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 0);
+++ if (!(imm & (1 << 5)))
+++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 1);
+++ if (!(imm & (1 << 6)))
+++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 2);
+++ if (!(imm & (1 << 7)))
+++ elementwise_prod = vsetq_lane_f32(0.0f, elementwise_prod, 3);
+++
+++ return _mm_set1_ps(vaddvq_f32(elementwise_prod));
+++ }
+++#endif
+++
+++ float s = 0.0f;
+++
+++ if (imm & (1 << 4))
+++ s += vgetq_lane_f32(elementwise_prod, 0);
+++ if (imm & (1 << 5))
+++ s += vgetq_lane_f32(elementwise_prod, 1);
+++ if (imm & (1 << 6))
+++ s += vgetq_lane_f32(elementwise_prod, 2);
+++ if (imm & (1 << 7))
+++ s += vgetq_lane_f32(elementwise_prod, 3);
+++
+++ const float32_t res[4] = {
+++ (imm & 0x1) ? s : 0.0f,
+++ (imm & 0x2) ? s : 0.0f,
+++ (imm & 0x4) ? s : 0.0f,
+++ (imm & 0x8) ? s : 0.0f,
+++ };
+++ return vreinterpretq_m128_f32(vld1q_f32(res));
+++}
+++
+++// Extract a 32-bit integer from a, selected with imm8, and store the result in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi32
+++// FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm)
+++#define _mm_extract_epi32(a, imm) \
+++ vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))
+++
+++// Extract a 64-bit integer from a, selected with imm8, and store the result in
+++// dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi64
+++// FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm)
+++#define _mm_extract_epi64(a, imm) \
+++ vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm))
+++
+++// Extract an 8-bit integer from a, selected with imm8, and store the result in
+++// the lower element of dst. FORCE_INLINE int _mm_extract_epi8(__m128i a,
+++// __constrange(0,16) int imm)
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_extract_epi8
+++#define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm))
+++
+++// Extracts the selected single-precision (32-bit) floating-point from a.
+++// FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm)
+++#define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm))
+++
+++// Round the packed double-precision (64-bit) floating-point elements in a down
+++// to an integer value, and store the results as packed double-precision
+++// floating-point elements in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_pd
+++FORCE_INLINE __m128d _mm_floor_pd(__m128d a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128d_f64(vrndmq_f64(vreinterpretq_f64_m128d(a)));
+++#else
+++ double *f = (double *) &a;
+++ return _mm_set_pd(floor(f[1]), floor(f[0]));
+++#endif
+++}
+++
+++// Round the packed single-precision (32-bit) floating-point elements in a down
+++// to an integer value, and store the results as packed single-precision
+++// floating-point elements in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ps
+++FORCE_INLINE __m128 _mm_floor_ps(__m128 a)
+++{
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+++ return vreinterpretq_m128_f32(vrndmq_f32(vreinterpretq_f32_m128(a)));
+++#else
+++ float *f = (float *) &a;
+++ return _mm_set_ps(floorf(f[3]), floorf(f[2]), floorf(f[1]), floorf(f[0]));
+++#endif
+++}
+++
+++// Round the lower double-precision (64-bit) floating-point element in b down to
+++// an integer value, store the result as a double-precision floating-point
+++// element in the lower element of dst, and copy the upper element from a to the
+++// upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_sd
+++FORCE_INLINE __m128d _mm_floor_sd(__m128d a, __m128d b)
+++{
+++ return _mm_move_sd(a, _mm_floor_pd(b));
+++}
+++
+++// Round the lower single-precision (32-bit) floating-point element in b down to
+++// an integer value, store the result as a single-precision floating-point
+++// element in the lower element of dst, and copy the upper 3 packed elements
+++// from a to the upper elements of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_floor_ss
+++FORCE_INLINE __m128 _mm_floor_ss(__m128 a, __m128 b)
+++{
+++ return _mm_move_ss(a, _mm_floor_ps(b));
+++}
+++
+++// Copy a to dst, and insert the 32-bit integer i into dst at the location
+++// specified by imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi32
+++// FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b,
+++// __constrange(0,4) int imm)
+++#define _mm_insert_epi32(a, b, imm) \
+++ vreinterpretq_m128i_s32( \
+++ vsetq_lane_s32((b), vreinterpretq_s32_m128i(a), (imm)))
+++
+++// Copy a to dst, and insert the 64-bit integer i into dst at the location
+++// specified by imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi64
+++// FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b,
+++// __constrange(0,2) int imm)
+++#define _mm_insert_epi64(a, b, imm) \
+++ vreinterpretq_m128i_s64( \
+++ vsetq_lane_s64((b), vreinterpretq_s64_m128i(a), (imm)))
+++
+++// Copy a to dst, and insert the lower 8-bit integer from i into dst at the
+++// location specified by imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_insert_epi8
+++// FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b,
+++// __constrange(0,16) int imm)
+++#define _mm_insert_epi8(a, b, imm) \
+++ vreinterpretq_m128i_s8(vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm)))
+++
+++// Copy a to tmp, then insert a single-precision (32-bit) floating-point
+++// element from b into tmp using the control in imm8. Store tmp to dst using
+++// the mask in imm8 (elements are zeroed out when the corresponding bit is set).
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=insert_ps
+++#define _mm_insert_ps(a, b, imm8) \
+++ _sse2neon_define2( \
+++ __m128, a, b, \
+++ float32x4_t tmp1 = \
+++ vsetq_lane_f32(vgetq_lane_f32(_b, (imm8 >> 6) & 0x3), \
+++ vreinterpretq_f32_m128(_a), 0); \
+++ float32x4_t tmp2 = \
+++ vsetq_lane_f32(vgetq_lane_f32(tmp1, 0), \
+++ vreinterpretq_f32_m128(_a), ((imm8 >> 4) & 0x3)); \
+++ const uint32_t data[4] = \
+++ _sse2neon_init(((imm8) & (1 << 0)) ? UINT32_MAX : 0, \
+++ ((imm8) & (1 << 1)) ? UINT32_MAX : 0, \
+++ ((imm8) & (1 << 2)) ? UINT32_MAX : 0, \
+++ ((imm8) & (1 << 3)) ? UINT32_MAX : 0); \
+++ uint32x4_t mask = vld1q_u32(data); \
+++ float32x4_t all_zeros = vdupq_n_f32(0); \
+++ \
+++ _sse2neon_return(vreinterpretq_m128_f32( \
+++ vbslq_f32(mask, all_zeros, vreinterpretq_f32_m128(tmp2))));)
+++
+++// Compare packed signed 32-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi32
+++FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Compare packed signed 8-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epi8
+++FORCE_INLINE __m128i _mm_max_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s8(
+++ vmaxq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Compare packed unsigned 16-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu16
+++FORCE_INLINE __m128i _mm_max_epu16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vmaxq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+++}
+++
+++// Compare packed unsigned 32-bit integers in a and b, and store packed maximum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
+++FORCE_INLINE __m128i _mm_max_epu32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u32(
+++ vmaxq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
+++}
+++
+++// Compare packed signed 32-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi32
+++FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vminq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Compare packed signed 8-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epi8
+++FORCE_INLINE __m128i _mm_min_epi8(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s8(
+++ vminq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
+++}
+++
+++// Compare packed unsigned 16-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_min_epu16
+++FORCE_INLINE __m128i _mm_min_epu16(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vminq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b)));
+++}
+++
+++// Compare packed unsigned 32-bit integers in a and b, and store packed minimum
+++// values in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_max_epu32
+++FORCE_INLINE __m128i _mm_min_epu32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u32(
+++ vminq_u32(vreinterpretq_u32_m128i(a), vreinterpretq_u32_m128i(b)));
+++}
+++
+++// Horizontally compute the minimum amongst the packed unsigned 16-bit integers
+++// in a, store the minimum and index in dst, and zero the remaining bits in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_minpos_epu16
+++FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a)
+++{
+++ __m128i dst;
+++ uint16_t min, idx = 0;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ // Find the minimum value
+++ min = vminvq_u16(vreinterpretq_u16_m128i(a));
+++
+++ // Get the index of the minimum value
+++ static const uint16_t idxv[] = {0, 1, 2, 3, 4, 5, 6, 7};
+++ uint16x8_t minv = vdupq_n_u16(min);
+++ uint16x8_t cmeq = vceqq_u16(minv, vreinterpretq_u16_m128i(a));
+++ idx = vminvq_u16(vornq_u16(vld1q_u16(idxv), cmeq));
+++#else
+++ // Find the minimum value
+++ __m64 tmp;
+++ tmp = vreinterpret_m64_u16(
+++ vmin_u16(vget_low_u16(vreinterpretq_u16_m128i(a)),
+++ vget_high_u16(vreinterpretq_u16_m128i(a))));
+++ tmp = vreinterpret_m64_u16(
+++ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
+++ tmp = vreinterpret_m64_u16(
+++ vpmin_u16(vreinterpret_u16_m64(tmp), vreinterpret_u16_m64(tmp)));
+++ min = vget_lane_u16(vreinterpret_u16_m64(tmp), 0);
+++ // Get the index of the minimum value
+++ int i;
+++ for (i = 0; i < 8; i++) {
+++ if (min == vgetq_lane_u16(vreinterpretq_u16_m128i(a), 0)) {
+++ idx = (uint16_t) i;
+++ break;
+++ }
+++ a = _mm_srli_si128(a, 2);
+++ }
+++#endif
+++ // Generate result
+++ dst = _mm_setzero_si128();
+++ dst = vreinterpretq_m128i_u16(
+++ vsetq_lane_u16(min, vreinterpretq_u16_m128i(dst), 0));
+++ dst = vreinterpretq_m128i_u16(
+++ vsetq_lane_u16(idx, vreinterpretq_u16_m128i(dst), 1));
+++ return dst;
+++}
+++
+++// Compute the sum of absolute differences (SADs) of quadruplets of unsigned
+++// 8-bit integers in a compared to those in b, and store the 16-bit results in
+++// dst. Eight SADs are performed using one quadruplet from b and eight
+++// quadruplets from a. One quadruplet is selected from b starting at on the
+++// offset specified in imm8. Eight quadruplets are formed from sequential 8-bit
+++// integers selected from a starting at the offset specified in imm8.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mpsadbw_epu8
+++FORCE_INLINE __m128i _mm_mpsadbw_epu8(__m128i a, __m128i b, const int imm)
+++{
+++ uint8x16_t _a, _b;
+++
+++ switch (imm & 0x4) {
+++ case 0:
+++ // do nothing
+++ _a = vreinterpretq_u8_m128i(a);
+++ break;
+++ case 4:
+++ _a = vreinterpretq_u8_u32(vextq_u32(vreinterpretq_u32_m128i(a),
+++ vreinterpretq_u32_m128i(a), 1));
+++ break;
+++ default:
+++#if defined(__GNUC__) || defined(__clang__)
+++ __builtin_unreachable();
+++#elif defined(_MSC_VER)
+++ __assume(0);
+++#endif
+++ break;
+++ }
+++
+++ switch (imm & 0x3) {
+++ case 0:
+++ _b = vreinterpretq_u8_u32(
+++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 0)));
+++ break;
+++ case 1:
+++ _b = vreinterpretq_u8_u32(
+++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 1)));
+++ break;
+++ case 2:
+++ _b = vreinterpretq_u8_u32(
+++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 2)));
+++ break;
+++ case 3:
+++ _b = vreinterpretq_u8_u32(
+++ vdupq_n_u32(vgetq_lane_u32(vreinterpretq_u32_m128i(b), 3)));
+++ break;
+++ default:
+++#if defined(__GNUC__) || defined(__clang__)
+++ __builtin_unreachable();
+++#elif defined(_MSC_VER)
+++ __assume(0);
+++#endif
+++ break;
+++ }
+++
+++ int16x8_t c04, c15, c26, c37;
+++ uint8x8_t low_b = vget_low_u8(_b);
+++ c04 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a), low_b));
+++ uint8x16_t _a_1 = vextq_u8(_a, _a, 1);
+++ c15 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_1), low_b));
+++ uint8x16_t _a_2 = vextq_u8(_a, _a, 2);
+++ c26 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_2), low_b));
+++ uint8x16_t _a_3 = vextq_u8(_a, _a, 3);
+++ c37 = vreinterpretq_s16_u16(vabdl_u8(vget_low_u8(_a_3), low_b));
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ // |0|4|2|6|
+++ c04 = vpaddq_s16(c04, c26);
+++ // |1|5|3|7|
+++ c15 = vpaddq_s16(c15, c37);
+++
+++ int32x4_t trn1_c =
+++ vtrn1q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
+++ int32x4_t trn2_c =
+++ vtrn2q_s32(vreinterpretq_s32_s16(c04), vreinterpretq_s32_s16(c15));
+++ return vreinterpretq_m128i_s16(vpaddq_s16(vreinterpretq_s16_s32(trn1_c),
+++ vreinterpretq_s16_s32(trn2_c)));
+++#else
+++ int16x4_t c01, c23, c45, c67;
+++ c01 = vpadd_s16(vget_low_s16(c04), vget_low_s16(c15));
+++ c23 = vpadd_s16(vget_low_s16(c26), vget_low_s16(c37));
+++ c45 = vpadd_s16(vget_high_s16(c04), vget_high_s16(c15));
+++ c67 = vpadd_s16(vget_high_s16(c26), vget_high_s16(c37));
+++
+++ return vreinterpretq_m128i_s16(
+++ vcombine_s16(vpadd_s16(c01, c23), vpadd_s16(c45, c67)));
+++#endif
+++}
+++
+++// Multiply the low signed 32-bit integers from each packed 64-bit element in
+++// a and b, and store the signed 64-bit results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mul_epi32
+++FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b)
+++{
+++ // vmull_s32 upcasts instead of masking, so we downcast.
+++ int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a));
+++ int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b));
+++ return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo));
+++}
+++
+++// Multiply the packed 32-bit integers in a and b, producing intermediate 64-bit
+++// integers, and store the low 32 bits of the intermediate integers in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_mullo_epi32
+++FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_s32(
+++ vmulq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b)));
+++}
+++
+++// Convert packed signed 32-bit integers from a and b to packed 16-bit integers
+++// using unsigned saturation, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_packus_epi32
+++FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u16(
+++ vcombine_u16(vqmovun_s32(vreinterpretq_s32_m128i(a)),
+++ vqmovun_s32(vreinterpretq_s32_m128i(b))));
+++}
+++
+++// Round the packed double-precision (64-bit) floating-point elements in a using
+++// the rounding parameter, and store the results as packed double-precision
+++// floating-point elements in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_pd
+++FORCE_INLINE __m128d _mm_round_pd(__m128d a, int rounding)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ switch (rounding) {
+++ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
+++ return vreinterpretq_m128d_f64(vrndnq_f64(vreinterpretq_f64_m128d(a)));
+++ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
+++ return _mm_floor_pd(a);
+++ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
+++ return _mm_ceil_pd(a);
+++ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
+++ return vreinterpretq_m128d_f64(vrndq_f64(vreinterpretq_f64_m128d(a)));
+++ default: //_MM_FROUND_CUR_DIRECTION
+++ return vreinterpretq_m128d_f64(vrndiq_f64(vreinterpretq_f64_m128d(a)));
+++ }
+++#else
+++ double *v_double = (double *) &a;
+++
+++ if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
+++ (rounding == _MM_FROUND_CUR_DIRECTION &&
+++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
+++ double res[2], tmp;
+++ for (int i = 0; i < 2; i++) {
+++ tmp = (v_double[i] < 0) ? -v_double[i] : v_double[i];
+++ double roundDown = floor(tmp); // Round down value
+++ double roundUp = ceil(tmp); // Round up value
+++ double diffDown = tmp - roundDown;
+++ double diffUp = roundUp - tmp;
+++ if (diffDown < diffUp) {
+++ /* If it's closer to the round down value, then use it */
+++ res[i] = roundDown;
+++ } else if (diffDown > diffUp) {
+++ /* If it's closer to the round up value, then use it */
+++ res[i] = roundUp;
+++ } else {
+++ /* If it's equidistant between round up and round down value,
+++ * pick the one which is an even number */
+++ double half = roundDown / 2;
+++ if (half != floor(half)) {
+++ /* If the round down value is odd, return the round up value
+++ */
+++ res[i] = roundUp;
+++ } else {
+++ /* If the round up value is odd, return the round down value
+++ */
+++ res[i] = roundDown;
+++ }
+++ }
+++ res[i] = (v_double[i] < 0) ? -res[i] : res[i];
+++ }
+++ return _mm_set_pd(res[1], res[0]);
+++ } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
+++ (rounding == _MM_FROUND_CUR_DIRECTION &&
+++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
+++ return _mm_floor_pd(a);
+++ } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
+++ (rounding == _MM_FROUND_CUR_DIRECTION &&
+++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
+++ return _mm_ceil_pd(a);
+++ }
+++ return _mm_set_pd(v_double[1] > 0 ? floor(v_double[1]) : ceil(v_double[1]),
+++ v_double[0] > 0 ? floor(v_double[0]) : ceil(v_double[0]));
+++#endif
+++}
+++
+++// Round the packed single-precision (32-bit) floating-point elements in a using
+++// the rounding parameter, and store the results as packed single-precision
+++// floating-point elements in dst.
+++// software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_round_ps
+++FORCE_INLINE __m128 _mm_round_ps(__m128 a, int rounding)
+++{
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || \
+++ defined(__ARM_FEATURE_DIRECTED_ROUNDING)
+++ switch (rounding) {
+++ case (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC):
+++ return vreinterpretq_m128_f32(vrndnq_f32(vreinterpretq_f32_m128(a)));
+++ case (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC):
+++ return _mm_floor_ps(a);
+++ case (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC):
+++ return _mm_ceil_ps(a);
+++ case (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC):
+++ return vreinterpretq_m128_f32(vrndq_f32(vreinterpretq_f32_m128(a)));
+++ default: //_MM_FROUND_CUR_DIRECTION
+++ return vreinterpretq_m128_f32(vrndiq_f32(vreinterpretq_f32_m128(a)));
+++ }
+++#else
+++ float *v_float = (float *) &a;
+++
+++ if (rounding == (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC) ||
+++ (rounding == _MM_FROUND_CUR_DIRECTION &&
+++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST)) {
+++ uint32x4_t signmask = vdupq_n_u32(0x80000000);
+++ float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
+++ vdupq_n_f32(0.5f)); /* +/- 0.5 */
+++ int32x4_t r_normal = vcvtq_s32_f32(vaddq_f32(
+++ vreinterpretq_f32_m128(a), half)); /* round to integer: [a + 0.5]*/
+++ int32x4_t r_trunc = vcvtq_s32_f32(
+++ vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
+++ int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
+++ vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
+++ int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
+++ vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
+++ float32x4_t delta = vsubq_f32(
+++ vreinterpretq_f32_m128(a),
+++ vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
+++ uint32x4_t is_delta_half =
+++ vceqq_f32(delta, half); /* delta == +/- 0.5 */
+++ return vreinterpretq_m128_f32(
+++ vcvtq_f32_s32(vbslq_s32(is_delta_half, r_even, r_normal)));
+++ } else if (rounding == (_MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC) ||
+++ (rounding == _MM_FROUND_CUR_DIRECTION &&
+++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN)) {
+++ return _mm_floor_ps(a);
+++ } else if (rounding == (_MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC) ||
+++ (rounding == _MM_FROUND_CUR_DIRECTION &&
+++ _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP)) {
+++ return _mm_ceil_ps(a);
+++ }
+++ return _mm_set_ps(v_float[3] > 0 ? floorf(v_float[3]) : ceilf(v_float[3]),
+++ v_float[2] > 0 ? floorf(v_float[2]) : ceilf(v_float[2]),
+++ v_float[1] > 0 ? floorf(v_float[1]) : ceilf(v_float[1]),
+++ v_float[0] > 0 ? floorf(v_float[0]) : ceilf(v_float[0]));
+++#endif
+++}
+++
+++// Round the lower double-precision (64-bit) floating-point element in b using
+++// the rounding parameter, store the result as a double-precision floating-point
+++// element in the lower element of dst, and copy the upper element from a to the
+++// upper element of dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_sd
+++FORCE_INLINE __m128d _mm_round_sd(__m128d a, __m128d b, int rounding)
+++{
+++ return _mm_move_sd(a, _mm_round_pd(b, rounding));
+++}
+++
+++// Round the lower single-precision (32-bit) floating-point element in b using
+++// the rounding parameter, store the result as a single-precision floating-point
+++// element in the lower element of dst, and copy the upper 3 packed elements
+++// from a to the upper elements of dst. Rounding is done according to the
+++// rounding[3:0] parameter, which can be one of:
+++// (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC) // round to nearest, and
+++// suppress exceptions
+++// (_MM_FROUND_TO_NEG_INF |_MM_FROUND_NO_EXC) // round down, and
+++// suppress exceptions
+++// (_MM_FROUND_TO_POS_INF |_MM_FROUND_NO_EXC) // round up, and suppress
+++// exceptions
+++// (_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC) // truncate, and suppress
+++// exceptions _MM_FROUND_CUR_DIRECTION // use MXCSR.RC; see
+++// _MM_SET_ROUNDING_MODE
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_round_ss
+++FORCE_INLINE __m128 _mm_round_ss(__m128 a, __m128 b, int rounding)
+++{
+++ return _mm_move_ss(a, _mm_round_ps(b, rounding));
+++}
+++
+++// Load 128-bits of integer data from memory into dst using a non-temporal
+++// memory hint. mem_addr must be aligned on a 16-byte boundary or a
+++// general-protection exception may be generated.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_stream_load_si128
+++FORCE_INLINE __m128i _mm_stream_load_si128(__m128i *p)
+++{
+++#if __has_builtin(__builtin_nontemporal_store)
+++ return __builtin_nontemporal_load(p);
+++#else
+++ return vreinterpretq_m128i_s64(vld1q_s64((int64_t *) p));
+++#endif
+++}
+++
+++// Compute the bitwise NOT of a and then AND with a 128-bit vector containing
+++// all 1's, and return 1 if the result is zero, otherwise return 0.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_ones
+++FORCE_INLINE int _mm_test_all_ones(__m128i a)
+++{
+++ return (uint64_t) (vgetq_lane_s64(a, 0) & vgetq_lane_s64(a, 1)) ==
+++ ~(uint64_t) 0;
+++}
+++
+++// Compute the bitwise AND of 128 bits (representing integer data) in a and
+++// mask, and return 1 if the result is zero, otherwise return 0.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_test_all_zeros
+++FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask)
+++{
+++ int64x2_t a_and_mask =
+++ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(mask));
+++ return !(vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1));
+++}
+++
+++// Compute the bitwise AND of 128 bits (representing integer data) in a and
+++// mask, and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute
+++// the bitwise NOT of a and then AND with mask, and set CF to 1 if the result is
+++// zero, otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
+++// otherwise return 0.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_test_mix_ones_zero
+++// Note: Argument names may be wrong in the Intel intrinsics guide.
+++FORCE_INLINE int _mm_test_mix_ones_zeros(__m128i a, __m128i mask)
+++{
+++ uint64x2_t v = vreinterpretq_u64_m128i(a);
+++ uint64x2_t m = vreinterpretq_u64_m128i(mask);
+++
+++ // find ones (set-bits) and zeros (clear-bits) under clip mask
+++ uint64x2_t ones = vandq_u64(m, v);
+++ uint64x2_t zeros = vbicq_u64(m, v);
+++
+++ // If both 128-bit variables are populated (non-zero) then return 1.
+++ // For comparision purposes, first compact each var down to 32-bits.
+++ uint32x2_t reduced = vpmax_u32(vqmovn_u64(ones), vqmovn_u64(zeros));
+++
+++ // if folding minimum is non-zero then both vars must be non-zero
+++ return (vget_lane_u32(vpmin_u32(reduced, reduced), 0) != 0);
+++}
+++
+++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+++// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+++// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+++// otherwise set CF to 0. Return the CF value.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testc_si128
+++FORCE_INLINE int _mm_testc_si128(__m128i a, __m128i b)
+++{
+++ int64x2_t s64 =
+++ vbicq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a));
+++ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
+++}
+++
+++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+++// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+++// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+++// otherwise set CF to 0. Return 1 if both the ZF and CF values are zero,
+++// otherwise return 0.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testnzc_si128
+++#define _mm_testnzc_si128(a, b) _mm_test_mix_ones_zeros(a, b)
+++
+++// Compute the bitwise AND of 128 bits (representing integer data) in a and b,
+++// and set ZF to 1 if the result is zero, otherwise set ZF to 0. Compute the
+++// bitwise NOT of a and then AND with b, and set CF to 1 if the result is zero,
+++// otherwise set CF to 0. Return the ZF value.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_testz_si128
+++FORCE_INLINE int _mm_testz_si128(__m128i a, __m128i b)
+++{
+++ int64x2_t s64 =
+++ vandq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b));
+++ return !(vgetq_lane_s64(s64, 0) | vgetq_lane_s64(s64, 1));
+++}
+++
+++/* SSE4.2 */
+++
+++static const uint16_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask16b[8] = {
+++ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+++};
+++static const uint8_t ALIGN_STRUCT(16) _sse2neon_cmpestr_mask8b[16] = {
+++ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+++ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+++};
+++
+++/* specify the source data format */
+++#define _SIDD_UBYTE_OPS 0x00 /* unsigned 8-bit characters */
+++#define _SIDD_UWORD_OPS 0x01 /* unsigned 16-bit characters */
+++#define _SIDD_SBYTE_OPS 0x02 /* signed 8-bit characters */
+++#define _SIDD_SWORD_OPS 0x03 /* signed 16-bit characters */
+++
+++/* specify the comparison operation */
+++#define _SIDD_CMP_EQUAL_ANY 0x00 /* compare equal any: strchr */
+++#define _SIDD_CMP_RANGES 0x04 /* compare ranges */
+++#define _SIDD_CMP_EQUAL_EACH 0x08 /* compare equal each: strcmp */
+++#define _SIDD_CMP_EQUAL_ORDERED 0x0C /* compare equal ordered */
+++
+++/* specify the polarity */
+++#define _SIDD_POSITIVE_POLARITY 0x00
+++#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+++#define _SIDD_NEGATIVE_POLARITY 0x10 /* negate results */
+++#define _SIDD_MASKED_NEGATIVE_POLARITY \
+++ 0x30 /* negate results only before end of string */
+++
+++/* specify the output selection in _mm_cmpXstri */
+++#define _SIDD_LEAST_SIGNIFICANT 0x00
+++#define _SIDD_MOST_SIGNIFICANT 0x40
+++
+++/* specify the output selection in _mm_cmpXstrm */
+++#define _SIDD_BIT_MASK 0x00
+++#define _SIDD_UNIT_MASK 0x40
+++
+++/* Pattern Matching for C macros.
+++ * https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
+++ */
+++
+++/* catenate */
+++#define SSE2NEON_PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
+++#define SSE2NEON_CAT(a, b) SSE2NEON_PRIMITIVE_CAT(a, b)
+++
+++#define SSE2NEON_IIF(c) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_IIF_, c)
+++/* run the 2nd parameter */
+++#define SSE2NEON_IIF_0(t, ...) __VA_ARGS__
+++/* run the 1st parameter */
+++#define SSE2NEON_IIF_1(t, ...) t
+++
+++#define SSE2NEON_COMPL(b) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_COMPL_, b)
+++#define SSE2NEON_COMPL_0 1
+++#define SSE2NEON_COMPL_1 0
+++
+++#define SSE2NEON_DEC(x) SSE2NEON_PRIMITIVE_CAT(SSE2NEON_DEC_, x)
+++#define SSE2NEON_DEC_1 0
+++#define SSE2NEON_DEC_2 1
+++#define SSE2NEON_DEC_3 2
+++#define SSE2NEON_DEC_4 3
+++#define SSE2NEON_DEC_5 4
+++#define SSE2NEON_DEC_6 5
+++#define SSE2NEON_DEC_7 6
+++#define SSE2NEON_DEC_8 7
+++#define SSE2NEON_DEC_9 8
+++#define SSE2NEON_DEC_10 9
+++#define SSE2NEON_DEC_11 10
+++#define SSE2NEON_DEC_12 11
+++#define SSE2NEON_DEC_13 12
+++#define SSE2NEON_DEC_14 13
+++#define SSE2NEON_DEC_15 14
+++#define SSE2NEON_DEC_16 15
+++
+++/* detection */
+++#define SSE2NEON_CHECK_N(x, n, ...) n
+++#define SSE2NEON_CHECK(...) SSE2NEON_CHECK_N(__VA_ARGS__, 0, )
+++#define SSE2NEON_PROBE(x) x, 1,
+++
+++#define SSE2NEON_NOT(x) SSE2NEON_CHECK(SSE2NEON_PRIMITIVE_CAT(SSE2NEON_NOT_, x))
+++#define SSE2NEON_NOT_0 SSE2NEON_PROBE(~)
+++
+++#define SSE2NEON_BOOL(x) SSE2NEON_COMPL(SSE2NEON_NOT(x))
+++#define SSE2NEON_IF(c) SSE2NEON_IIF(SSE2NEON_BOOL(c))
+++
+++#define SSE2NEON_EAT(...)
+++#define SSE2NEON_EXPAND(...) __VA_ARGS__
+++#define SSE2NEON_WHEN(c) SSE2NEON_IF(c)(SSE2NEON_EXPAND, SSE2NEON_EAT)
+++
+++/* recursion */
+++/* deferred expression */
+++#define SSE2NEON_EMPTY()
+++#define SSE2NEON_DEFER(id) id SSE2NEON_EMPTY()
+++#define SSE2NEON_OBSTRUCT(...) __VA_ARGS__ SSE2NEON_DEFER(SSE2NEON_EMPTY)()
+++#define SSE2NEON_EXPAND(...) __VA_ARGS__
+++
+++#define SSE2NEON_EVAL(...) \
+++ SSE2NEON_EVAL1(SSE2NEON_EVAL1(SSE2NEON_EVAL1(__VA_ARGS__)))
+++#define SSE2NEON_EVAL1(...) \
+++ SSE2NEON_EVAL2(SSE2NEON_EVAL2(SSE2NEON_EVAL2(__VA_ARGS__)))
+++#define SSE2NEON_EVAL2(...) \
+++ SSE2NEON_EVAL3(SSE2NEON_EVAL3(SSE2NEON_EVAL3(__VA_ARGS__)))
+++#define SSE2NEON_EVAL3(...) __VA_ARGS__
+++
+++#define SSE2NEON_REPEAT(count, macro, ...) \
+++ SSE2NEON_WHEN(count) \
+++ (SSE2NEON_OBSTRUCT(SSE2NEON_REPEAT_INDIRECT)()( \
+++ SSE2NEON_DEC(count), macro, \
+++ __VA_ARGS__) SSE2NEON_OBSTRUCT(macro)(SSE2NEON_DEC(count), \
+++ __VA_ARGS__))
+++#define SSE2NEON_REPEAT_INDIRECT() SSE2NEON_REPEAT
+++
+++#define SSE2NEON_SIZE_OF_byte 8
+++#define SSE2NEON_NUMBER_OF_LANES_byte 16
+++#define SSE2NEON_SIZE_OF_word 16
+++#define SSE2NEON_NUMBER_OF_LANES_word 8
+++
+++#define SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE(i, type) \
+++ mtx[i] = vreinterpretq_m128i_##type(vceqq_##type( \
+++ vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i)), \
+++ vreinterpretq_##type##_m128i(a)));
+++
+++#define SSE2NEON_FILL_LANE(i, type) \
+++ vec_b[i] = \
+++ vdupq_n_##type(vgetq_lane_##type(vreinterpretq_##type##_m128i(b), i));
+++
+++#define PCMPSTR_RANGES(a, b, mtx, data_type_prefix, type_prefix, size, \
+++ number_of_lanes, byte_or_word) \
+++ do { \
+++ SSE2NEON_CAT( \
+++ data_type_prefix, \
+++ SSE2NEON_CAT(size, \
+++ SSE2NEON_CAT(x, SSE2NEON_CAT(number_of_lanes, _t)))) \
+++ vec_b[number_of_lanes]; \
+++ __m128i mask = SSE2NEON_IIF(byte_or_word)( \
+++ vreinterpretq_m128i_u16(vdupq_n_u16(0xff)), \
+++ vreinterpretq_m128i_u32(vdupq_n_u32(0xffff))); \
+++ SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, SSE2NEON_FILL_LANE, \
+++ SSE2NEON_CAT(type_prefix, size))) \
+++ for (int i = 0; i < number_of_lanes; i++) { \
+++ mtx[i] = SSE2NEON_CAT(vreinterpretq_m128i_u, \
+++ size)(SSE2NEON_CAT(vbslq_u, size)( \
+++ SSE2NEON_CAT(vreinterpretq_u, \
+++ SSE2NEON_CAT(size, _m128i))(mask), \
+++ SSE2NEON_CAT(vcgeq_, SSE2NEON_CAT(type_prefix, size))( \
+++ vec_b[i], \
+++ SSE2NEON_CAT( \
+++ vreinterpretq_, \
+++ SSE2NEON_CAT(type_prefix, \
+++ SSE2NEON_CAT(size, _m128i(a))))), \
+++ SSE2NEON_CAT(vcleq_, SSE2NEON_CAT(type_prefix, size))( \
+++ vec_b[i], \
+++ SSE2NEON_CAT( \
+++ vreinterpretq_, \
+++ SSE2NEON_CAT(type_prefix, \
+++ SSE2NEON_CAT(size, _m128i(a))))))); \
+++ } \
+++ } while (0)
+++
+++#define PCMPSTR_EQ(a, b, mtx, size, number_of_lanes) \
+++ do { \
+++ SSE2NEON_EVAL(SSE2NEON_REPEAT(number_of_lanes, \
+++ SSE2NEON_COMPARE_EQUAL_THEN_FILL_LANE, \
+++ SSE2NEON_CAT(u, size))) \
+++ } while (0)
+++
+++#define SSE2NEON_CMP_EQUAL_ANY_IMPL(type) \
+++ static int _sse2neon_cmp_##type##_equal_any(__m128i a, int la, __m128i b, \
+++ int lb) \
+++ { \
+++ __m128i mtx[16]; \
+++ PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
+++ return SSE2NEON_CAT( \
+++ _sse2neon_aggregate_equal_any_, \
+++ SSE2NEON_CAT( \
+++ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+++ SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
+++ type))))(la, lb, mtx); \
+++ }
+++
+++#define SSE2NEON_CMP_RANGES_IMPL(type, data_type, us, byte_or_word) \
+++ static int _sse2neon_cmp_##us##type##_ranges(__m128i a, int la, __m128i b, \
+++ int lb) \
+++ { \
+++ __m128i mtx[16]; \
+++ PCMPSTR_RANGES( \
+++ a, b, mtx, data_type, us, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), byte_or_word); \
+++ return SSE2NEON_CAT( \
+++ _sse2neon_aggregate_ranges_, \
+++ SSE2NEON_CAT( \
+++ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+++ SSE2NEON_CAT(x, SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, \
+++ type))))(la, lb, mtx); \
+++ }
+++
+++#define SSE2NEON_CMP_EQUAL_ORDERED_IMPL(type) \
+++ static int _sse2neon_cmp_##type##_equal_ordered(__m128i a, int la, \
+++ __m128i b, int lb) \
+++ { \
+++ __m128i mtx[16]; \
+++ PCMPSTR_EQ(a, b, mtx, SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type)); \
+++ return SSE2NEON_CAT( \
+++ _sse2neon_aggregate_equal_ordered_, \
+++ SSE2NEON_CAT( \
+++ SSE2NEON_CAT(SSE2NEON_SIZE_OF_, type), \
+++ SSE2NEON_CAT(x, \
+++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type))))( \
+++ SSE2NEON_CAT(SSE2NEON_NUMBER_OF_LANES_, type), la, lb, mtx); \
+++ }
+++
+++static int _sse2neon_aggregate_equal_any_8x16(int la, int lb, __m128i mtx[16])
+++{
+++ int res = 0;
+++ int m = (1 << la) - 1;
+++ uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
+++ uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
+++ uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
+++ uint8x16_t vec = vcombine_u8(t_lo, t_hi);
+++ for (int j = 0; j < lb; j++) {
+++ mtx[j] = vreinterpretq_m128i_u8(
+++ vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
+++ mtx[j] = vreinterpretq_m128i_u8(
+++ vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
+++ int tmp = _sse2neon_vaddvq_u8(vreinterpretq_u8_m128i(mtx[j])) ? 1 : 0;
+++ res |= (tmp << j);
+++ }
+++ return res;
+++}
+++
+++static int _sse2neon_aggregate_equal_any_16x8(int la, int lb, __m128i mtx[16])
+++{
+++ int res = 0;
+++ int m = (1 << la) - 1;
+++ uint16x8_t vec =
+++ vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
+++ for (int j = 0; j < lb; j++) {
+++ mtx[j] = vreinterpretq_m128i_u16(
+++ vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
+++ mtx[j] = vreinterpretq_m128i_u16(
+++ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
+++ int tmp = _sse2neon_vaddvq_u16(vreinterpretq_u16_m128i(mtx[j])) ? 1 : 0;
+++ res |= (tmp << j);
+++ }
+++ return res;
+++}
+++
+++/* clang-format off */
+++#define SSE2NEON_GENERATE_CMP_EQUAL_ANY(prefix) \
+++ prefix##IMPL(byte) \
+++ prefix##IMPL(word)
+++/* clang-format on */
+++
+++SSE2NEON_GENERATE_CMP_EQUAL_ANY(SSE2NEON_CMP_EQUAL_ANY_)
+++
+++static int _sse2neon_aggregate_ranges_16x8(int la, int lb, __m128i mtx[16])
+++{
+++ int res = 0;
+++ int m = (1 << la) - 1;
+++ uint16x8_t vec =
+++ vtstq_u16(vdupq_n_u16(m), vld1q_u16(_sse2neon_cmpestr_mask16b));
+++ for (int j = 0; j < lb; j++) {
+++ mtx[j] = vreinterpretq_m128i_u16(
+++ vandq_u16(vec, vreinterpretq_u16_m128i(mtx[j])));
+++ mtx[j] = vreinterpretq_m128i_u16(
+++ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 15));
+++ __m128i tmp = vreinterpretq_m128i_u32(
+++ vshrq_n_u32(vreinterpretq_u32_m128i(mtx[j]), 16));
+++ uint32x4_t vec_res = vandq_u32(vreinterpretq_u32_m128i(mtx[j]),
+++ vreinterpretq_u32_m128i(tmp));
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ int t = vaddvq_u32(vec_res) ? 1 : 0;
+++#else
+++ uint64x2_t sumh = vpaddlq_u32(vec_res);
+++ int t = vgetq_lane_u64(sumh, 0) + vgetq_lane_u64(sumh, 1);
+++#endif
+++ res |= (t << j);
+++ }
+++ return res;
+++}
+++
+++static int _sse2neon_aggregate_ranges_8x16(int la, int lb, __m128i mtx[16])
+++{
+++ int res = 0;
+++ int m = (1 << la) - 1;
+++ uint8x8_t vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
+++ uint8x8_t t_lo = vtst_u8(vdup_n_u8(m & 0xff), vec_mask);
+++ uint8x8_t t_hi = vtst_u8(vdup_n_u8(m >> 8), vec_mask);
+++ uint8x16_t vec = vcombine_u8(t_lo, t_hi);
+++ for (int j = 0; j < lb; j++) {
+++ mtx[j] = vreinterpretq_m128i_u8(
+++ vandq_u8(vec, vreinterpretq_u8_m128i(mtx[j])));
+++ mtx[j] = vreinterpretq_m128i_u8(
+++ vshrq_n_u8(vreinterpretq_u8_m128i(mtx[j]), 7));
+++ __m128i tmp = vreinterpretq_m128i_u16(
+++ vshrq_n_u16(vreinterpretq_u16_m128i(mtx[j]), 8));
+++ uint16x8_t vec_res = vandq_u16(vreinterpretq_u16_m128i(mtx[j]),
+++ vreinterpretq_u16_m128i(tmp));
+++ int t = _sse2neon_vaddvq_u16(vec_res) ? 1 : 0;
+++ res |= (t << j);
+++ }
+++ return res;
+++}
+++
+++#define SSE2NEON_CMP_RANGES_IS_BYTE 1
+++#define SSE2NEON_CMP_RANGES_IS_WORD 0
+++
+++/* clang-format off */
+++#define SSE2NEON_GENERATE_CMP_RANGES(prefix) \
+++ prefix##IMPL(byte, uint, u, prefix##IS_BYTE) \
+++ prefix##IMPL(byte, int, s, prefix##IS_BYTE) \
+++ prefix##IMPL(word, uint, u, prefix##IS_WORD) \
+++ prefix##IMPL(word, int, s, prefix##IS_WORD)
+++/* clang-format on */
+++
+++SSE2NEON_GENERATE_CMP_RANGES(SSE2NEON_CMP_RANGES_)
+++
+++#undef SSE2NEON_CMP_RANGES_IS_BYTE
+++#undef SSE2NEON_CMP_RANGES_IS_WORD
+++
+++static int _sse2neon_cmp_byte_equal_each(__m128i a, int la, __m128i b, int lb)
+++{
+++ uint8x16_t mtx =
+++ vceqq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b));
+++ int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
+++ int m1 = 0x10000 - (1 << la);
+++ int tb = 0x10000 - (1 << lb);
+++ uint8x8_t vec_mask, vec0_lo, vec0_hi, vec1_lo, vec1_hi;
+++ uint8x8_t tmp_lo, tmp_hi, res_lo, res_hi;
+++ vec_mask = vld1_u8(_sse2neon_cmpestr_mask8b);
+++ vec0_lo = vtst_u8(vdup_n_u8(m0), vec_mask);
+++ vec0_hi = vtst_u8(vdup_n_u8(m0 >> 8), vec_mask);
+++ vec1_lo = vtst_u8(vdup_n_u8(m1), vec_mask);
+++ vec1_hi = vtst_u8(vdup_n_u8(m1 >> 8), vec_mask);
+++ tmp_lo = vtst_u8(vdup_n_u8(tb), vec_mask);
+++ tmp_hi = vtst_u8(vdup_n_u8(tb >> 8), vec_mask);
+++
+++ res_lo = vbsl_u8(vec0_lo, vdup_n_u8(0), vget_low_u8(mtx));
+++ res_hi = vbsl_u8(vec0_hi, vdup_n_u8(0), vget_high_u8(mtx));
+++ res_lo = vbsl_u8(vec1_lo, tmp_lo, res_lo);
+++ res_hi = vbsl_u8(vec1_hi, tmp_hi, res_hi);
+++ res_lo = vand_u8(res_lo, vec_mask);
+++ res_hi = vand_u8(res_hi, vec_mask);
+++
+++ int res = _sse2neon_vaddv_u8(res_lo) + (_sse2neon_vaddv_u8(res_hi) << 8);
+++ return res;
+++}
+++
+++static int _sse2neon_cmp_word_equal_each(__m128i a, int la, __m128i b, int lb)
+++{
+++ uint16x8_t mtx =
+++ vceqq_u16(vreinterpretq_u16_m128i(a), vreinterpretq_u16_m128i(b));
+++ int m0 = (la < lb) ? 0 : ((1 << la) - (1 << lb));
+++ int m1 = 0x100 - (1 << la);
+++ int tb = 0x100 - (1 << lb);
+++ uint16x8_t vec_mask = vld1q_u16(_sse2neon_cmpestr_mask16b);
+++ uint16x8_t vec0 = vtstq_u16(vdupq_n_u16(m0), vec_mask);
+++ uint16x8_t vec1 = vtstq_u16(vdupq_n_u16(m1), vec_mask);
+++ uint16x8_t tmp = vtstq_u16(vdupq_n_u16(tb), vec_mask);
+++ mtx = vbslq_u16(vec0, vdupq_n_u16(0), mtx);
+++ mtx = vbslq_u16(vec1, tmp, mtx);
+++ mtx = vandq_u16(mtx, vec_mask);
+++ return _sse2neon_vaddvq_u16(mtx);
+++}
+++
+++#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE 1
+++#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD 0
+++
+++#define SSE2NEON_AGGREGATE_EQUAL_ORDER_IMPL(size, number_of_lanes, data_type) \
+++ static int _sse2neon_aggregate_equal_ordered_##size##x##number_of_lanes( \
+++ int bound, int la, int lb, __m128i mtx[16]) \
+++ { \
+++ int res = 0; \
+++ int m1 = SSE2NEON_IIF(data_type)(0x10000, 0x100) - (1 << la); \
+++ uint##size##x8_t vec_mask = SSE2NEON_IIF(data_type)( \
+++ vld1_u##size(_sse2neon_cmpestr_mask##size##b), \
+++ vld1q_u##size(_sse2neon_cmpestr_mask##size##b)); \
+++ uint##size##x##number_of_lanes##_t vec1 = SSE2NEON_IIF(data_type)( \
+++ vcombine_u##size(vtst_u##size(vdup_n_u##size(m1), vec_mask), \
+++ vtst_u##size(vdup_n_u##size(m1 >> 8), vec_mask)), \
+++ vtstq_u##size(vdupq_n_u##size(m1), vec_mask)); \
+++ uint##size##x##number_of_lanes##_t vec_minusone = vdupq_n_u##size(-1); \
+++ uint##size##x##number_of_lanes##_t vec_zero = vdupq_n_u##size(0); \
+++ for (int j = 0; j < lb; j++) { \
+++ mtx[j] = vreinterpretq_m128i_u##size(vbslq_u##size( \
+++ vec1, vec_minusone, vreinterpretq_u##size##_m128i(mtx[j]))); \
+++ } \
+++ for (int j = lb; j < bound; j++) { \
+++ mtx[j] = vreinterpretq_m128i_u##size( \
+++ vbslq_u##size(vec1, vec_minusone, vec_zero)); \
+++ } \
+++ unsigned SSE2NEON_IIF(data_type)(char, short) *ptr = \
+++ (unsigned SSE2NEON_IIF(data_type)(char, short) *) mtx; \
+++ for (int i = 0; i < bound; i++) { \
+++ int val = 1; \
+++ for (int j = 0, k = i; j < bound - i && k < bound; j++, k++) \
+++ val &= ptr[k * bound + j]; \
+++ res += val << i; \
+++ } \
+++ return res; \
+++ }
+++
+++/* clang-format off */
+++#define SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(prefix) \
+++ prefix##IMPL(8, 16, prefix##IS_UBYTE) \
+++ prefix##IMPL(16, 8, prefix##IS_UWORD)
+++/* clang-format on */
+++
+++SSE2NEON_GENERATE_AGGREGATE_EQUAL_ORDER(SSE2NEON_AGGREGATE_EQUAL_ORDER_)
+++
+++#undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UBYTE
+++#undef SSE2NEON_AGGREGATE_EQUAL_ORDER_IS_UWORD
+++
+++/* clang-format off */
+++#define SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(prefix) \
+++ prefix##IMPL(byte) \
+++ prefix##IMPL(word)
+++/* clang-format on */
+++
+++SSE2NEON_GENERATE_CMP_EQUAL_ORDERED(SSE2NEON_CMP_EQUAL_ORDERED_)
+++
+++#define SSE2NEON_CMPESTR_LIST \
+++ _(CMP_UBYTE_EQUAL_ANY, cmp_byte_equal_any) \
+++ _(CMP_UWORD_EQUAL_ANY, cmp_word_equal_any) \
+++ _(CMP_SBYTE_EQUAL_ANY, cmp_byte_equal_any) \
+++ _(CMP_SWORD_EQUAL_ANY, cmp_word_equal_any) \
+++ _(CMP_UBYTE_RANGES, cmp_ubyte_ranges) \
+++ _(CMP_UWORD_RANGES, cmp_uword_ranges) \
+++ _(CMP_SBYTE_RANGES, cmp_sbyte_ranges) \
+++ _(CMP_SWORD_RANGES, cmp_sword_ranges) \
+++ _(CMP_UBYTE_EQUAL_EACH, cmp_byte_equal_each) \
+++ _(CMP_UWORD_EQUAL_EACH, cmp_word_equal_each) \
+++ _(CMP_SBYTE_EQUAL_EACH, cmp_byte_equal_each) \
+++ _(CMP_SWORD_EQUAL_EACH, cmp_word_equal_each) \
+++ _(CMP_UBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
+++ _(CMP_UWORD_EQUAL_ORDERED, cmp_word_equal_ordered) \
+++ _(CMP_SBYTE_EQUAL_ORDERED, cmp_byte_equal_ordered) \
+++ _(CMP_SWORD_EQUAL_ORDERED, cmp_word_equal_ordered)
+++
+++enum {
+++#define _(name, func_suffix) name,
+++ SSE2NEON_CMPESTR_LIST
+++#undef _
+++};
+++typedef int (*cmpestr_func_t)(__m128i a, int la, __m128i b, int lb);
+++static cmpestr_func_t _sse2neon_cmpfunc_table[] = {
+++#define _(name, func_suffix) _sse2neon_##func_suffix,
+++ SSE2NEON_CMPESTR_LIST
+++#undef _
+++};
+++
+++FORCE_INLINE int _sse2neon_sido_negative(int res, int lb, int imm8, int bound)
+++{
+++ switch (imm8 & 0x30) {
+++ case _SIDD_NEGATIVE_POLARITY:
+++ res ^= 0xffffffff;
+++ break;
+++ case _SIDD_MASKED_NEGATIVE_POLARITY:
+++ res ^= (1 << lb) - 1;
+++ break;
+++ default:
+++ break;
+++ }
+++
+++ return res & ((bound == 8) ? 0xFF : 0xFFFF);
+++}
+++
+++FORCE_INLINE int _sse2neon_clz(unsigned int x)
+++{
+++#ifdef _MSC_VER
+++ unsigned long cnt = 0;
+++ if (_BitScanReverse(&cnt, x))
+++ return 31 - cnt;
+++ return 32;
+++#else
+++ return x != 0 ? __builtin_clz(x) : 32;
+++#endif
+++}
+++
+++FORCE_INLINE int _sse2neon_ctz(unsigned int x)
+++{
+++#ifdef _MSC_VER
+++ unsigned long cnt = 0;
+++ if (_BitScanForward(&cnt, x))
+++ return cnt;
+++ return 32;
+++#else
+++ return x != 0 ? __builtin_ctz(x) : 32;
+++#endif
+++}
+++
+++FORCE_INLINE int _sse2neon_ctzll(unsigned long long x)
+++{
+++#ifdef _MSC_VER
+++ unsigned long cnt;
+++#if defined(SSE2NEON_HAS_BITSCAN64)
+++ if (_BitScanForward64(&cnt, x))
+++ return (int) (cnt);
+++#else
+++ if (_BitScanForward(&cnt, (unsigned long) (x)))
+++ return (int) cnt;
+++ if (_BitScanForward(&cnt, (unsigned long) (x >> 32)))
+++ return (int) (cnt + 32);
+++#endif /* SSE2NEON_HAS_BITSCAN64 */
+++ return 64;
+++#else /* assume GNU compatible compilers */
+++ return x != 0 ? __builtin_ctzll(x) : 64;
+++#endif
+++}
+++
+++#define SSE2NEON_MIN(x, y) (x) < (y) ? (x) : (y)
+++
+++#define SSE2NEON_CMPSTR_SET_UPPER(var, imm) \
+++ const int var = (imm & 0x01) ? 8 : 16
+++
+++#define SSE2NEON_CMPESTRX_LEN_PAIR(a, b, la, lb) \
+++ int tmp1 = la ^ (la >> 31); \
+++ la = tmp1 - (la >> 31); \
+++ int tmp2 = lb ^ (lb >> 31); \
+++ lb = tmp2 - (lb >> 31); \
+++ la = SSE2NEON_MIN(la, bound); \
+++ lb = SSE2NEON_MIN(lb, bound)
+++
+++// Compare all pairs of character in string a and b,
+++// then aggregate the result.
+++// As the only difference of PCMPESTR* and PCMPISTR* is the way to calculate the
+++// length of string, we use SSE2NEON_CMP{I,E}STRX_GET_LEN to get the length of
+++// string a and b.
+++#define SSE2NEON_COMP_AGG(a, b, la, lb, imm8, IE) \
+++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8); \
+++ SSE2NEON_##IE##_LEN_PAIR(a, b, la, lb); \
+++ int r2 = (_sse2neon_cmpfunc_table[imm8 & 0x0f])(a, la, b, lb); \
+++ r2 = _sse2neon_sido_negative(r2, lb, imm8, bound)
+++
+++#define SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8) \
+++ return (r2 == 0) ? bound \
+++ : ((imm8 & 0x40) ? (31 - _sse2neon_clz(r2)) \
+++ : _sse2neon_ctz(r2))
+++
+++#define SSE2NEON_CMPSTR_GENERATE_MASK(dst) \
+++ __m128i dst = vreinterpretq_m128i_u8(vdupq_n_u8(0)); \
+++ if (imm8 & 0x40) { \
+++ if (bound == 8) { \
+++ uint16x8_t tmp = vtstq_u16(vdupq_n_u16(r2), \
+++ vld1q_u16(_sse2neon_cmpestr_mask16b)); \
+++ dst = vreinterpretq_m128i_u16(vbslq_u16( \
+++ tmp, vdupq_n_u16(-1), vreinterpretq_u16_m128i(dst))); \
+++ } else { \
+++ uint8x16_t vec_r2 = \
+++ vcombine_u8(vdup_n_u8(r2), vdup_n_u8(r2 >> 8)); \
+++ uint8x16_t tmp = \
+++ vtstq_u8(vec_r2, vld1q_u8(_sse2neon_cmpestr_mask8b)); \
+++ dst = vreinterpretq_m128i_u8( \
+++ vbslq_u8(tmp, vdupq_n_u8(-1), vreinterpretq_u8_m128i(dst))); \
+++ } \
+++ } else { \
+++ if (bound == 16) { \
+++ dst = vreinterpretq_m128i_u16( \
+++ vsetq_lane_u16(r2 & 0xffff, vreinterpretq_u16_m128i(dst), 0)); \
+++ } else { \
+++ dst = vreinterpretq_m128i_u8( \
+++ vsetq_lane_u8(r2 & 0xff, vreinterpretq_u8_m128i(dst), 0)); \
+++ } \
+++ } \
+++ return dst
+++
+++// Compare packed strings in a and b with lengths la and lb using the control
+++// in imm8, and returns 1 if b did not contain a null character and the
+++// resulting mask was zero, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestra
+++FORCE_INLINE int _mm_cmpestra(__m128i a,
+++ int la,
+++ __m128i b,
+++ int lb,
+++ const int imm8)
+++{
+++ int lb_cpy = lb;
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+++ return !r2 & (lb_cpy > bound);
+++}
+++
+++// Compare packed strings in a and b with lengths la and lb using the control in
+++// imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrc
+++FORCE_INLINE int _mm_cmpestrc(__m128i a,
+++ int la,
+++ __m128i b,
+++ int lb,
+++ const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+++ return r2 != 0;
+++}
+++
+++// Compare packed strings in a and b with lengths la and lb using the control
+++// in imm8, and store the generated index in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestri
+++FORCE_INLINE int _mm_cmpestri(__m128i a,
+++ int la,
+++ __m128i b,
+++ int lb,
+++ const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+++ SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
+++}
+++
+++// Compare packed strings in a and b with lengths la and lb using the control
+++// in imm8, and store the generated mask in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrm
+++FORCE_INLINE __m128i
+++_mm_cmpestrm(__m128i a, int la, __m128i b, int lb, const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+++ SSE2NEON_CMPSTR_GENERATE_MASK(dst);
+++}
+++
+++// Compare packed strings in a and b with lengths la and lb using the control in
+++// imm8, and returns bit 0 of the resulting bit mask.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestro
+++FORCE_INLINE int _mm_cmpestro(__m128i a,
+++ int la,
+++ __m128i b,
+++ int lb,
+++ const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPESTRX);
+++ return r2 & 1;
+++}
+++
+++// Compare packed strings in a and b with lengths la and lb using the control in
+++// imm8, and returns 1 if any character in a was null, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrs
+++FORCE_INLINE int _mm_cmpestrs(__m128i a,
+++ int la,
+++ __m128i b,
+++ int lb,
+++ const int imm8)
+++{
+++ (void) a;
+++ (void) b;
+++ (void) lb;
+++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+++ return la <= (bound - 1);
+++}
+++
+++// Compare packed strings in a and b with lengths la and lb using the control in
+++// imm8, and returns 1 if any character in b was null, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpestrz
+++FORCE_INLINE int _mm_cmpestrz(__m128i a,
+++ int la,
+++ __m128i b,
+++ int lb,
+++ const int imm8)
+++{
+++ (void) a;
+++ (void) b;
+++ (void) la;
+++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+++ return lb <= (bound - 1);
+++}
+++
+++#define SSE2NEON_CMPISTRX_LENGTH(str, len, imm8) \
+++ do { \
+++ if (imm8 & 0x01) { \
+++ uint16x8_t equal_mask_##str = \
+++ vceqq_u16(vreinterpretq_u16_m128i(str), vdupq_n_u16(0)); \
+++ uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
+++ uint64_t matches_##str = \
+++ vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
+++ len = _sse2neon_ctzll(matches_##str) >> 3; \
+++ } else { \
+++ uint16x8_t equal_mask_##str = vreinterpretq_u16_u8( \
+++ vceqq_u8(vreinterpretq_u8_m128i(str), vdupq_n_u8(0))); \
+++ uint8x8_t res_##str = vshrn_n_u16(equal_mask_##str, 4); \
+++ uint64_t matches_##str = \
+++ vget_lane_u64(vreinterpret_u64_u8(res_##str), 0); \
+++ len = _sse2neon_ctzll(matches_##str) >> 2; \
+++ } \
+++ } while (0)
+++
+++#define SSE2NEON_CMPISTRX_LEN_PAIR(a, b, la, lb) \
+++ int la, lb; \
+++ do { \
+++ SSE2NEON_CMPISTRX_LENGTH(a, la, imm8); \
+++ SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8); \
+++ } while (0)
+++
+++// Compare packed strings with implicit lengths in a and b using the control in
+++// imm8, and returns 1 if b did not contain a null character and the resulting
+++// mask was zero, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistra
+++FORCE_INLINE int _mm_cmpistra(__m128i a, __m128i b, const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+++ return !r2 & (lb >= bound);
+++}
+++
+++// Compare packed strings with implicit lengths in a and b using the control in
+++// imm8, and returns 1 if the resulting mask was non-zero, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrc
+++FORCE_INLINE int _mm_cmpistrc(__m128i a, __m128i b, const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+++ return r2 != 0;
+++}
+++
+++// Compare packed strings with implicit lengths in a and b using the control in
+++// imm8, and store the generated index in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistri
+++FORCE_INLINE int _mm_cmpistri(__m128i a, __m128i b, const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+++ SSE2NEON_CMPSTR_GENERATE_INDEX(r2, bound, imm8);
+++}
+++
+++// Compare packed strings with implicit lengths in a and b using the control in
+++// imm8, and store the generated mask in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrm
+++FORCE_INLINE __m128i _mm_cmpistrm(__m128i a, __m128i b, const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+++ SSE2NEON_CMPSTR_GENERATE_MASK(dst);
+++}
+++
+++// Compare packed strings with implicit lengths in a and b using the control in
+++// imm8, and returns bit 0 of the resulting bit mask.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistro
+++FORCE_INLINE int _mm_cmpistro(__m128i a, __m128i b, const int imm8)
+++{
+++ SSE2NEON_COMP_AGG(a, b, la, lb, imm8, CMPISTRX);
+++ return r2 & 1;
+++}
+++
+++// Compare packed strings with implicit lengths in a and b using the control in
+++// imm8, and returns 1 if any character in a was null, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrs
+++FORCE_INLINE int _mm_cmpistrs(__m128i a, __m128i b, const int imm8)
+++{
+++ (void) b;
+++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+++ int la;
+++ SSE2NEON_CMPISTRX_LENGTH(a, la, imm8);
+++ return la <= (bound - 1);
+++}
+++
+++// Compare packed strings with implicit lengths in a and b using the control in
+++// imm8, and returns 1 if any character in b was null, and 0 otherwise.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_cmpistrz
+++FORCE_INLINE int _mm_cmpistrz(__m128i a, __m128i b, const int imm8)
+++{
+++ (void) a;
+++ SSE2NEON_CMPSTR_SET_UPPER(bound, imm8);
+++ int lb;
+++ SSE2NEON_CMPISTRX_LENGTH(b, lb, imm8);
+++ return lb <= (bound - 1);
+++}
+++
+++// Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers
+++// in b for greater than.
+++FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ return vreinterpretq_m128i_u64(
+++ vcgtq_s64(vreinterpretq_s64_m128i(a), vreinterpretq_s64_m128i(b)));
+++#else
+++ return vreinterpretq_m128i_s64(vshrq_n_s64(
+++ vqsubq_s64(vreinterpretq_s64_m128i(b), vreinterpretq_s64_m128i(a)),
+++ 63));
+++#endif
+++}
+++
+++// Starting with the initial value in crc, accumulates a CRC32 value for
+++// unsigned 16-bit integer v, and stores the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u16
+++FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v)
+++{
+++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+++ __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t"
+++ : [c] "+r"(crc)
+++ : [v] "r"(v));
+++#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
+++ (defined(_M_ARM64) && !defined(__clang__))
+++ crc = __crc32ch(crc, v);
+++#else
+++ crc = _mm_crc32_u8(crc, v & 0xff);
+++ crc = _mm_crc32_u8(crc, (v >> 8) & 0xff);
+++#endif
+++ return crc;
+++}
+++
+++// Starting with the initial value in crc, accumulates a CRC32 value for
+++// unsigned 32-bit integer v, and stores the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u32
+++FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v)
+++{
+++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+++ __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t"
+++ : [c] "+r"(crc)
+++ : [v] "r"(v));
+++#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
+++ (defined(_M_ARM64) && !defined(__clang__))
+++ crc = __crc32cw(crc, v);
+++#else
+++ crc = _mm_crc32_u16(crc, v & 0xffff);
+++ crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff);
+++#endif
+++ return crc;
+++}
+++
+++// Starting with the initial value in crc, accumulates a CRC32 value for
+++// unsigned 64-bit integer v, and stores the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u64
+++FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v)
+++{
+++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+++ __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t"
+++ : [c] "+r"(crc)
+++ : [v] "r"(v));
+++#elif (defined(_M_ARM64) && !defined(__clang__))
+++ crc = __crc32cd((uint32_t) crc, v);
+++#else
+++ crc = _mm_crc32_u32((uint32_t) (crc), v & 0xffffffff);
+++ crc = _mm_crc32_u32((uint32_t) (crc), (v >> 32) & 0xffffffff);
+++#endif
+++ return crc;
+++}
+++
+++// Starting with the initial value in crc, accumulates a CRC32 value for
+++// unsigned 8-bit integer v, and stores the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_crc32_u8
+++FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v)
+++{
+++#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
+++ __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t"
+++ : [c] "+r"(crc)
+++ : [v] "r"(v));
+++#elif ((__ARM_ARCH == 8) && defined(__ARM_FEATURE_CRC32)) || \
+++ (defined(_M_ARM64) && !defined(__clang__))
+++ crc = __crc32cb(crc, v);
+++#else
+++ crc ^= v;
+++ for (int bit = 0; bit < 8; bit++) {
+++ if (crc & 1)
+++ crc = (crc >> 1) ^ UINT32_C(0x82f63b78);
+++ else
+++ crc = (crc >> 1);
+++ }
+++#endif
+++ return crc;
+++}
+++
+++/* AES */
+++
+++#if !defined(__ARM_FEATURE_CRYPTO) && (!defined(_M_ARM64) || defined(__clang__))
+++/* clang-format off */
+++#define SSE2NEON_AES_SBOX(w) \
+++ { \
+++ w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), \
+++ w(0xc5), w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), \
+++ w(0xab), w(0x76), w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), \
+++ w(0x59), w(0x47), w(0xf0), w(0xad), w(0xd4), w(0xa2), w(0xaf), \
+++ w(0x9c), w(0xa4), w(0x72), w(0xc0), w(0xb7), w(0xfd), w(0x93), \
+++ w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc), w(0x34), w(0xa5), \
+++ w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15), w(0x04), \
+++ w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a), \
+++ w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), \
+++ w(0x75), w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), \
+++ w(0x5a), w(0xa0), w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), \
+++ w(0xe3), w(0x2f), w(0x84), w(0x53), w(0xd1), w(0x00), w(0xed), \
+++ w(0x20), w(0xfc), w(0xb1), w(0x5b), w(0x6a), w(0xcb), w(0xbe), \
+++ w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf), w(0xd0), w(0xef), \
+++ w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85), w(0x45), \
+++ w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8), \
+++ w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), \
+++ w(0xf5), w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), \
+++ w(0xf3), w(0xd2), w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), \
+++ w(0x97), w(0x44), w(0x17), w(0xc4), w(0xa7), w(0x7e), w(0x3d), \
+++ w(0x64), w(0x5d), w(0x19), w(0x73), w(0x60), w(0x81), w(0x4f), \
+++ w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88), w(0x46), w(0xee), \
+++ w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb), w(0xe0), \
+++ w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c), \
+++ w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), \
+++ w(0x79), w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), \
+++ w(0x4e), w(0xa9), w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), \
+++ w(0x7a), w(0xae), w(0x08), w(0xba), w(0x78), w(0x25), w(0x2e), \
+++ w(0x1c), w(0xa6), w(0xb4), w(0xc6), w(0xe8), w(0xdd), w(0x74), \
+++ w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a), w(0x70), w(0x3e), \
+++ w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e), w(0x61), \
+++ w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e), \
+++ w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), \
+++ w(0x94), w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), \
+++ w(0x28), w(0xdf), w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), \
+++ w(0xe6), w(0x42), w(0x68), w(0x41), w(0x99), w(0x2d), w(0x0f), \
+++ w(0xb0), w(0x54), w(0xbb), w(0x16) \
+++ }
+++#define SSE2NEON_AES_RSBOX(w) \
+++ { \
+++ w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), \
+++ w(0x38), w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), \
+++ w(0xd7), w(0xfb), w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), \
+++ w(0x2f), w(0xff), w(0x87), w(0x34), w(0x8e), w(0x43), w(0x44), \
+++ w(0xc4), w(0xde), w(0xe9), w(0xcb), w(0x54), w(0x7b), w(0x94), \
+++ w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d), w(0xee), w(0x4c), \
+++ w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e), w(0x08), \
+++ w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2), \
+++ w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), \
+++ w(0x25), w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), \
+++ w(0x98), w(0x16), w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), \
+++ w(0x65), w(0xb6), w(0x92), w(0x6c), w(0x70), w(0x48), w(0x50), \
+++ w(0xfd), w(0xed), w(0xb9), w(0xda), w(0x5e), w(0x15), w(0x46), \
+++ w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84), w(0x90), w(0xd8), \
+++ w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a), w(0xf7), \
+++ w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06), \
+++ w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), \
+++ w(0x02), w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), \
+++ w(0x8a), w(0x6b), w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), \
+++ w(0x67), w(0xdc), w(0xea), w(0x97), w(0xf2), w(0xcf), w(0xce), \
+++ w(0xf0), w(0xb4), w(0xe6), w(0x73), w(0x96), w(0xac), w(0x74), \
+++ w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85), w(0xe2), w(0xf9), \
+++ w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e), w(0x47), \
+++ w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89), \
+++ w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), \
+++ w(0x1b), w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), \
+++ w(0x79), w(0x20), w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), \
+++ w(0xcd), w(0x5a), w(0xf4), w(0x1f), w(0xdd), w(0xa8), w(0x33), \
+++ w(0x88), w(0x07), w(0xc7), w(0x31), w(0xb1), w(0x12), w(0x10), \
+++ w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f), w(0x60), w(0x51), \
+++ w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d), w(0x2d), \
+++ w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef), \
+++ w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), \
+++ w(0xb0), w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), \
+++ w(0x99), w(0x61), w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), \
+++ w(0x77), w(0xd6), w(0x26), w(0xe1), w(0x69), w(0x14), w(0x63), \
+++ w(0x55), w(0x21), w(0x0c), w(0x7d) \
+++ }
+++/* clang-format on */
+++
+++/* X Macro trick. See https://en.wikipedia.org/wiki/X_Macro */
+++#define SSE2NEON_AES_H0(x) (x)
+++static const uint8_t _sse2neon_sbox[256] = SSE2NEON_AES_SBOX(SSE2NEON_AES_H0);
+++static const uint8_t _sse2neon_rsbox[256] = SSE2NEON_AES_RSBOX(SSE2NEON_AES_H0);
+++#undef SSE2NEON_AES_H0
+++
+++/* x_time function and matrix multiply function */
+++#if !defined(__aarch64__) && !defined(_M_ARM64)
+++#define SSE2NEON_XT(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b))
+++#define SSE2NEON_MULTIPLY(x, y) \
+++ (((y & 1) * x) ^ ((y >> 1 & 1) * SSE2NEON_XT(x)) ^ \
+++ ((y >> 2 & 1) * SSE2NEON_XT(SSE2NEON_XT(x))) ^ \
+++ ((y >> 3 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x)))) ^ \
+++ ((y >> 4 & 1) * SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(SSE2NEON_XT(x))))))
+++#endif
+++
+++// In the absence of crypto extensions, implement aesenc using regular NEON
+++// intrinsics instead. See:
+++// https://www.workofard.com/2017/01/accelerated-aes-for-the-arm64-linux-kernel/
+++// https://www.workofard.com/2017/07/ghash-for-low-end-cores/ and
+++// for more information.
+++FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i RoundKey)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ static const uint8_t shift_rows[] = {
+++ 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
+++ 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
+++ };
+++ static const uint8_t ror32by8[] = {
+++ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
+++ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
+++ };
+++
+++ uint8x16_t v;
+++ uint8x16_t w = vreinterpretq_u8_m128i(a);
+++
+++ /* shift rows */
+++ w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
+++
+++ /* sub bytes */
+++ // Here, we separate the whole 256-bytes table into 4 64-bytes tables, and
+++ // look up each of the table. After each lookup, we load the next table
+++ // which locates at the next 64-bytes. In the meantime, the index in the
+++ // table would be smaller than it was, so the index parameters of
+++ // `vqtbx4q_u8()` need to be added the same constant as the loaded tables.
+++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
+++ // 'w-0x40' equals to 'vsubq_u8(w, vdupq_n_u8(0x40))'
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
+++
+++ /* mix columns */
+++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+++ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
+++ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
+++
+++ /* add round key */
+++ return vreinterpretq_m128i_u8(w) ^ RoundKey;
+++
+++#else /* ARMv7-A implementation for a table-based AES */
+++#define SSE2NEON_AES_B2W(b0, b1, b2, b3) \
+++ (((uint32_t) (b3) << 24) | ((uint32_t) (b2) << 16) | \
+++ ((uint32_t) (b1) << 8) | (uint32_t) (b0))
+++// muliplying 'x' by 2 in GF(2^8)
+++#define SSE2NEON_AES_F2(x) ((x << 1) ^ (((x >> 7) & 1) * 0x011b /* WPOLY */))
+++// muliplying 'x' by 3 in GF(2^8)
+++#define SSE2NEON_AES_F3(x) (SSE2NEON_AES_F2(x) ^ x)
+++#define SSE2NEON_AES_U0(p) \
+++ SSE2NEON_AES_B2W(SSE2NEON_AES_F2(p), p, p, SSE2NEON_AES_F3(p))
+++#define SSE2NEON_AES_U1(p) \
+++ SSE2NEON_AES_B2W(SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p, p)
+++#define SSE2NEON_AES_U2(p) \
+++ SSE2NEON_AES_B2W(p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p), p)
+++#define SSE2NEON_AES_U3(p) \
+++ SSE2NEON_AES_B2W(p, p, SSE2NEON_AES_F3(p), SSE2NEON_AES_F2(p))
+++
+++ // this generates a table containing every possible permutation of
+++ // shift_rows() and sub_bytes() with mix_columns().
+++ static const uint32_t ALIGN_STRUCT(16) aes_table[4][256] = {
+++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U0),
+++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U1),
+++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U2),
+++ SSE2NEON_AES_SBOX(SSE2NEON_AES_U3),
+++ };
+++#undef SSE2NEON_AES_B2W
+++#undef SSE2NEON_AES_F2
+++#undef SSE2NEON_AES_F3
+++#undef SSE2NEON_AES_U0
+++#undef SSE2NEON_AES_U1
+++#undef SSE2NEON_AES_U2
+++#undef SSE2NEON_AES_U3
+++
+++ uint32_t x0 = _mm_cvtsi128_si32(a); // get a[31:0]
+++ uint32_t x1 =
+++ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55)); // get a[63:32]
+++ uint32_t x2 =
+++ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xAA)); // get a[95:64]
+++ uint32_t x3 =
+++ _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF)); // get a[127:96]
+++
+++ // finish the modulo addition step in mix_columns()
+++ __m128i out = _mm_set_epi32(
+++ (aes_table[0][x3 & 0xff] ^ aes_table[1][(x0 >> 8) & 0xff] ^
+++ aes_table[2][(x1 >> 16) & 0xff] ^ aes_table[3][x2 >> 24]),
+++ (aes_table[0][x2 & 0xff] ^ aes_table[1][(x3 >> 8) & 0xff] ^
+++ aes_table[2][(x0 >> 16) & 0xff] ^ aes_table[3][x1 >> 24]),
+++ (aes_table[0][x1 & 0xff] ^ aes_table[1][(x2 >> 8) & 0xff] ^
+++ aes_table[2][(x3 >> 16) & 0xff] ^ aes_table[3][x0 >> 24]),
+++ (aes_table[0][x0 & 0xff] ^ aes_table[1][(x1 >> 8) & 0xff] ^
+++ aes_table[2][(x2 >> 16) & 0xff] ^ aes_table[3][x3 >> 24]));
+++
+++ return _mm_xor_si128(out, RoundKey);
+++#endif
+++}
+++
+++// Perform one round of an AES decryption flow on data (state) in a using the
+++// round key in RoundKey, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
+++FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
+++{
+++#if defined(__aarch64__)
+++ static const uint8_t inv_shift_rows[] = {
+++ 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
+++ 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
+++ };
+++ static const uint8_t ror32by8[] = {
+++ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
+++ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
+++ };
+++
+++ uint8x16_t v;
+++ uint8x16_t w = vreinterpretq_u8_m128i(a);
+++
+++ // inverse shift rows
+++ w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
+++
+++ // inverse sub bytes
+++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
+++
+++ // inverse mix columns
+++ // multiplying 'v' by 4 in GF(2^8)
+++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+++ w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
+++ v ^= w;
+++ v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
+++
+++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) &
+++ 0x1b); // muliplying 'v' by 2 in GF(2^8)
+++ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
+++ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
+++
+++ // add round key
+++ return vreinterpretq_m128i_u8(w) ^ RoundKey;
+++
+++#else /* ARMv7-A NEON implementation */
+++ /* FIXME: optimized for NEON */
+++ uint8_t i, e, f, g, h, v[4][4];
+++ uint8_t *_a = (uint8_t *) &a;
+++ for (i = 0; i < 16; ++i) {
+++ v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
+++ }
+++
+++ // inverse mix columns
+++ for (i = 0; i < 4; ++i) {
+++ e = v[i][0];
+++ f = v[i][1];
+++ g = v[i][2];
+++ h = v[i][3];
+++
+++ v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
+++ SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
+++ v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
+++ SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
+++ v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
+++ SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
+++ v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
+++ SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
+++ }
+++
+++ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
+++#endif
+++}
+++
+++// Perform the last round of an AES encryption flow on data (state) in a using
+++// the round key in RoundKey, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
+++FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
+++{
+++#if defined(__aarch64__)
+++ static const uint8_t shift_rows[] = {
+++ 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
+++ 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
+++ };
+++
+++ uint8x16_t v;
+++ uint8x16_t w = vreinterpretq_u8_m128i(a);
+++
+++ // shift rows
+++ w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
+++
+++ // sub bytes
+++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), w);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), w - 0x40);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), w - 0x80);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), w - 0xc0);
+++
+++ // add round key
+++ return vreinterpretq_m128i_u8(v) ^ RoundKey;
+++
+++#else /* ARMv7-A implementation */
+++ uint8_t v[16] = {
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 0)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 5)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 10)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 15)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 4)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 9)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 14)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 3)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 8)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 13)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 2)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 7)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 12)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 1)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 6)],
+++ _sse2neon_sbox[vgetq_lane_u8(vreinterpretq_u8_m128i(a), 11)],
+++ };
+++
+++ return vreinterpretq_m128i_u8(vld1q_u8(v)) ^ RoundKey;
+++#endif
+++}
+++
+++// Perform the last round of an AES decryption flow on data (state) in a using
+++// the round key in RoundKey, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
+++FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
+++{
+++#if defined(__aarch64__)
+++ static const uint8_t inv_shift_rows[] = {
+++ 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb,
+++ 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3,
+++ };
+++
+++ uint8x16_t v;
+++ uint8x16_t w = vreinterpretq_u8_m128i(a);
+++
+++ // inverse shift rows
+++ w = vqtbl1q_u8(w, vld1q_u8(inv_shift_rows));
+++
+++ // inverse sub bytes
+++ v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_rsbox), w);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x40), w - 0x40);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0x80), w - 0x80);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_rsbox + 0xc0), w - 0xc0);
+++
+++ // add round key
+++ return vreinterpretq_m128i_u8(v) ^ RoundKey;
+++
+++#else /* ARMv7-A NEON implementation */
+++ /* FIXME: optimized for NEON */
+++ uint8_t v[4][4];
+++ uint8_t *_a = (uint8_t *) &a;
+++ for (int i = 0; i < 16; ++i) {
+++ v[((i / 4) + (i % 4)) % 4][i % 4] = _sse2neon_rsbox[_a[i]];
+++ }
+++
+++ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v)) ^ RoundKey;
+++#endif
+++}
+++
+++// Perform the InvMixColumns transformation on a and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
+++FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
+++{
+++#if defined(__aarch64__)
+++ static const uint8_t ror32by8[] = {
+++ 0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
+++ 0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
+++ };
+++ uint8x16_t v = vreinterpretq_u8_m128i(a);
+++ uint8x16_t w;
+++
+++ // multiplying 'v' by 4 in GF(2^8)
+++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+++ w = (w << 1) ^ (uint8x16_t) (((int8x16_t) w >> 7) & 0x1b);
+++ v ^= w;
+++ v ^= (uint8x16_t) vrev32q_u16((uint16x8_t) w);
+++
+++ // multiplying 'v' by 2 in GF(2^8)
+++ w = (v << 1) ^ (uint8x16_t) (((int8x16_t) v >> 7) & 0x1b);
+++ w ^= (uint8x16_t) vrev32q_u16((uint16x8_t) v);
+++ w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
+++ return vreinterpretq_m128i_u8(w);
+++
+++#else /* ARMv7-A NEON implementation */
+++ uint8_t i, e, f, g, h, v[4][4];
+++ vst1q_u8((uint8_t *) v, vreinterpretq_u8_m128i(a));
+++ for (i = 0; i < 4; ++i) {
+++ e = v[i][0];
+++ f = v[i][1];
+++ g = v[i][2];
+++ h = v[i][3];
+++
+++ v[i][0] = SSE2NEON_MULTIPLY(e, 0x0e) ^ SSE2NEON_MULTIPLY(f, 0x0b) ^
+++ SSE2NEON_MULTIPLY(g, 0x0d) ^ SSE2NEON_MULTIPLY(h, 0x09);
+++ v[i][1] = SSE2NEON_MULTIPLY(e, 0x09) ^ SSE2NEON_MULTIPLY(f, 0x0e) ^
+++ SSE2NEON_MULTIPLY(g, 0x0b) ^ SSE2NEON_MULTIPLY(h, 0x0d);
+++ v[i][2] = SSE2NEON_MULTIPLY(e, 0x0d) ^ SSE2NEON_MULTIPLY(f, 0x09) ^
+++ SSE2NEON_MULTIPLY(g, 0x0e) ^ SSE2NEON_MULTIPLY(h, 0x0b);
+++ v[i][3] = SSE2NEON_MULTIPLY(e, 0x0b) ^ SSE2NEON_MULTIPLY(f, 0x0d) ^
+++ SSE2NEON_MULTIPLY(g, 0x09) ^ SSE2NEON_MULTIPLY(h, 0x0e);
+++ }
+++
+++ return vreinterpretq_m128i_u8(vld1q_u8((uint8_t *) v));
+++#endif
+++}
+++
+++// Assist in expanding the AES cipher key by computing steps towards generating
+++// a round key for encryption cipher using data from a and an 8-bit round
+++// constant specified in imm8, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
+++//
+++// Emits the Advanced Encryption Standard (AES) instruction aeskeygenassist.
+++// This instruction generates a round key for AES encryption. See
+++// https://kazakov.life/2017/11/01/cryptocurrency-mining-on-ios-devices/
+++// for details.
+++FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
+++{
+++#if defined(__aarch64__)
+++ uint8x16_t _a = vreinterpretq_u8_m128i(a);
+++ uint8x16_t v = vqtbl4q_u8(_sse2neon_vld1q_u8_x4(_sse2neon_sbox), _a);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x40), _a - 0x40);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0x80), _a - 0x80);
+++ v = vqtbx4q_u8(v, _sse2neon_vld1q_u8_x4(_sse2neon_sbox + 0xc0), _a - 0xc0);
+++
+++ uint32x4_t v_u32 = vreinterpretq_u32_u8(v);
+++ uint32x4_t ror_v = vorrq_u32(vshrq_n_u32(v_u32, 8), vshlq_n_u32(v_u32, 24));
+++ uint32x4_t ror_xor_v = veorq_u32(ror_v, vdupq_n_u32(rcon));
+++
+++ return vreinterpretq_m128i_u32(vtrn2q_u32(v_u32, ror_xor_v));
+++
+++#else /* ARMv7-A NEON implementation */
+++ uint32_t X1 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0x55));
+++ uint32_t X3 = _mm_cvtsi128_si32(_mm_shuffle_epi32(a, 0xFF));
+++ for (int i = 0; i < 4; ++i) {
+++ ((uint8_t *) &X1)[i] = _sse2neon_sbox[((uint8_t *) &X1)[i]];
+++ ((uint8_t *) &X3)[i] = _sse2neon_sbox[((uint8_t *) &X3)[i]];
+++ }
+++ return _mm_set_epi32(((X3 >> 8) | (X3 << 24)) ^ rcon, X3,
+++ ((X1 >> 8) | (X1 << 24)) ^ rcon, X1);
+++#endif
+++}
+++#undef SSE2NEON_AES_SBOX
+++#undef SSE2NEON_AES_RSBOX
+++
+++#if defined(__aarch64__)
+++#undef SSE2NEON_XT
+++#undef SSE2NEON_MULTIPLY
+++#endif
+++
+++#else /* __ARM_FEATURE_CRYPTO */
+++// Implements equivalent of 'aesenc' by combining AESE (with an empty key) and
+++// AESMC and then manually applying the real key as an xor operation. This
+++// unfortunately means an additional xor op; the compiler should be able to
+++// optimize this away for repeated calls however. See
+++// https://blog.michaelbrase.com/2018/05/08/emulating-x86-aes-intrinsics-on-armv8-a
+++// for more details.
+++FORCE_INLINE __m128i _mm_aesenc_si128(__m128i a, __m128i b)
+++{
+++ return vreinterpretq_m128i_u8(veorq_u8(
+++ vaesmcq_u8(vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
+++ vreinterpretq_u8_m128i(b)));
+++}
+++
+++// Perform one round of an AES decryption flow on data (state) in a using the
+++// round key in RoundKey, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdec_si128
+++FORCE_INLINE __m128i _mm_aesdec_si128(__m128i a, __m128i RoundKey)
+++{
+++ return vreinterpretq_m128i_u8(veorq_u8(
+++ vaesimcq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
+++ vreinterpretq_u8_m128i(RoundKey)));
+++}
+++
+++// Perform the last round of an AES encryption flow on data (state) in a using
+++// the round key in RoundKey, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesenclast_si128
+++FORCE_INLINE __m128i _mm_aesenclast_si128(__m128i a, __m128i RoundKey)
+++{
+++ return _mm_xor_si128(vreinterpretq_m128i_u8(vaeseq_u8(
+++ vreinterpretq_u8_m128i(a), vdupq_n_u8(0))),
+++ RoundKey);
+++}
+++
+++// Perform the last round of an AES decryption flow on data (state) in a using
+++// the round key in RoundKey, and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesdeclast_si128
+++FORCE_INLINE __m128i _mm_aesdeclast_si128(__m128i a, __m128i RoundKey)
+++{
+++ return vreinterpretq_m128i_u8(
+++ veorq_u8(vaesdq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0)),
+++ vreinterpretq_u8_m128i(RoundKey)));
+++}
+++
+++// Perform the InvMixColumns transformation on a and store the result in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aesimc_si128
+++FORCE_INLINE __m128i _mm_aesimc_si128(__m128i a)
+++{
+++ return vreinterpretq_m128i_u8(vaesimcq_u8(vreinterpretq_u8_m128i(a)));
+++}
+++
+++// Assist in expanding the AES cipher key by computing steps towards generating
+++// a round key for encryption cipher using data from a and an 8-bit round
+++// constant specified in imm8, and store the result in dst."
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_aeskeygenassist_si128
+++FORCE_INLINE __m128i _mm_aeskeygenassist_si128(__m128i a, const int rcon)
+++{
+++ // AESE does ShiftRows and SubBytes on A
+++ uint8x16_t u8 = vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0));
+++
+++#ifndef _MSC_VER
+++ uint8x16_t dest = {
+++ // Undo ShiftRows step from AESE and extract X1 and X3
+++ u8[0x4], u8[0x1], u8[0xE], u8[0xB], // SubBytes(X1)
+++ u8[0x1], u8[0xE], u8[0xB], u8[0x4], // ROT(SubBytes(X1))
+++ u8[0xC], u8[0x9], u8[0x6], u8[0x3], // SubBytes(X3)
+++ u8[0x9], u8[0x6], u8[0x3], u8[0xC], // ROT(SubBytes(X3))
+++ };
+++ uint32x4_t r = {0, (unsigned) rcon, 0, (unsigned) rcon};
+++ return vreinterpretq_m128i_u8(dest) ^ vreinterpretq_m128i_u32(r);
+++#else
+++ // We have to do this hack because MSVC is strictly adhering to the CPP
+++ // standard, in particular C++03 8.5.1 sub-section 15, which states that
+++ // unions must be initialized by their first member type.
+++
+++ // As per the Windows ARM64 ABI, it is always little endian, so this works
+++ __n128 dest{
+++ ((uint64_t) u8.n128_u8[0x4] << 0) | ((uint64_t) u8.n128_u8[0x1] << 8) |
+++ ((uint64_t) u8.n128_u8[0xE] << 16) |
+++ ((uint64_t) u8.n128_u8[0xB] << 24) |
+++ ((uint64_t) u8.n128_u8[0x1] << 32) |
+++ ((uint64_t) u8.n128_u8[0xE] << 40) |
+++ ((uint64_t) u8.n128_u8[0xB] << 48) |
+++ ((uint64_t) u8.n128_u8[0x4] << 56),
+++ ((uint64_t) u8.n128_u8[0xC] << 0) | ((uint64_t) u8.n128_u8[0x9] << 8) |
+++ ((uint64_t) u8.n128_u8[0x6] << 16) |
+++ ((uint64_t) u8.n128_u8[0x3] << 24) |
+++ ((uint64_t) u8.n128_u8[0x9] << 32) |
+++ ((uint64_t) u8.n128_u8[0x6] << 40) |
+++ ((uint64_t) u8.n128_u8[0x3] << 48) |
+++ ((uint64_t) u8.n128_u8[0xC] << 56)};
+++
+++ dest.n128_u32[1] = dest.n128_u32[1] ^ rcon;
+++ dest.n128_u32[3] = dest.n128_u32[3] ^ rcon;
+++
+++ return dest;
+++#endif
+++}
+++#endif
+++
+++/* Others */
+++
+++// Perform a carry-less multiplication of two 64-bit integers, selected from a
+++// and b according to imm8, and store the results in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_clmulepi64_si128
+++FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm)
+++{
+++ uint64x2_t a = vreinterpretq_u64_m128i(_a);
+++ uint64x2_t b = vreinterpretq_u64_m128i(_b);
+++ switch (imm & 0x11) {
+++ case 0x00:
+++ return vreinterpretq_m128i_u64(
+++ _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b)));
+++ case 0x01:
+++ return vreinterpretq_m128i_u64(
+++ _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b)));
+++ case 0x10:
+++ return vreinterpretq_m128i_u64(
+++ _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b)));
+++ case 0x11:
+++ return vreinterpretq_m128i_u64(
+++ _sse2neon_vmull_p64(vget_high_u64(a), vget_high_u64(b)));
+++ default:
+++ abort();
+++ }
+++}
+++
+++FORCE_INLINE unsigned int _sse2neon_mm_get_denormals_zero_mode(void)
+++{
+++ union {
+++ fpcr_bitfield field;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint64_t value;
+++#else
+++ uint32_t value;
+++#endif
+++ } r;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ r.value = _sse2neon_get_fpcr();
+++#else
+++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+++#endif
+++
+++ return r.field.bit24 ? _MM_DENORMALS_ZERO_ON : _MM_DENORMALS_ZERO_OFF;
+++}
+++
+++// Count the number of bits set to 1 in unsigned 32-bit integer a, and
+++// return that count in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u32
+++FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++#if __has_builtin(__builtin_popcount)
+++ return __builtin_popcount(a);
+++#elif defined(_MSC_VER)
+++ return _CountOneBits(a);
+++#else
+++ return (int) vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t) a)));
+++#endif
+++#else
+++ uint32_t count = 0;
+++ uint8x8_t input_val, count8x8_val;
+++ uint16x4_t count16x4_val;
+++ uint32x2_t count32x2_val;
+++
+++ input_val = vld1_u8((uint8_t *) &a);
+++ count8x8_val = vcnt_u8(input_val);
+++ count16x4_val = vpaddl_u8(count8x8_val);
+++ count32x2_val = vpaddl_u16(count16x4_val);
+++
+++ vst1_u32(&count, count32x2_val);
+++ return count;
+++#endif
+++}
+++
+++// Count the number of bits set to 1 in unsigned 64-bit integer a, and
+++// return that count in dst.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=_mm_popcnt_u64
+++FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++#if __has_builtin(__builtin_popcountll)
+++ return __builtin_popcountll(a);
+++#elif defined(_MSC_VER)
+++ return _CountOneBits64(a);
+++#else
+++ return (int64_t) vaddlv_u8(vcnt_u8(vcreate_u8(a)));
+++#endif
+++#else
+++ uint64_t count = 0;
+++ uint8x8_t input_val, count8x8_val;
+++ uint16x4_t count16x4_val;
+++ uint32x2_t count32x2_val;
+++ uint64x1_t count64x1_val;
+++
+++ input_val = vld1_u8((uint8_t *) &a);
+++ count8x8_val = vcnt_u8(input_val);
+++ count16x4_val = vpaddl_u8(count8x8_val);
+++ count32x2_val = vpaddl_u16(count16x4_val);
+++ count64x1_val = vpaddl_u32(count32x2_val);
+++ vst1_u64(&count, count64x1_val);
+++ return count;
+++#endif
+++}
+++
+++FORCE_INLINE void _sse2neon_mm_set_denormals_zero_mode(unsigned int flag)
+++{
+++ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
+++ // regardless of the value of the FZ bit.
+++ union {
+++ fpcr_bitfield field;
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint64_t value;
+++#else
+++ uint32_t value;
+++#endif
+++ } r;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ r.value = _sse2neon_get_fpcr();
+++#else
+++ __asm__ __volatile__("vmrs %0, FPSCR" : "=r"(r.value)); /* read */
+++#endif
+++
+++ r.field.bit24 = (flag & _MM_DENORMALS_ZERO_MASK) == _MM_DENORMALS_ZERO_ON;
+++
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ _sse2neon_set_fpcr(r.value);
+++#else
+++ __asm__ __volatile__("vmsr FPSCR, %0" ::"r"(r)); /* write */
+++#endif
+++}
+++
+++// Return the current 64-bit value of the processor's time-stamp counter.
+++// https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=rdtsc
+++FORCE_INLINE uint64_t _rdtsc(void)
+++{
+++#if defined(__aarch64__) || defined(_M_ARM64)
+++ uint64_t val;
+++
+++ /* According to ARM DDI 0487F.c, from Armv8.0 to Armv8.5 inclusive, the
+++ * system counter is at least 56 bits wide; from Armv8.6, the counter
+++ * must be 64 bits wide. So the system counter could be less than 64
+++ * bits wide and it is attributed with the flag 'cap_user_time_short'
+++ * is true.
+++ */
+++#if defined(_MSC_VER)
+++ val = _ReadStatusReg(ARM64_SYSREG(3, 3, 14, 0, 2));
+++#else
+++ __asm__ __volatile__("mrs %0, cntvct_el0" : "=r"(val));
+++#endif
+++
+++ return val;
+++#else
+++ uint32_t pmccntr, pmuseren, pmcntenset;
+++ // Read the user mode Performance Monitoring Unit (PMU)
+++ // User Enable Register (PMUSERENR) access permissions.
+++ __asm__ __volatile__("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
+++ if (pmuseren & 1) { // Allows reading PMUSERENR for user mode code.
+++ __asm__ __volatile__("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
+++ if (pmcntenset & 0x80000000UL) { // Is it counting?
+++ __asm__ __volatile__("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
+++ // The counter is set up to count every 64th cycle
+++ return (uint64_t) (pmccntr) << 6;
+++ }
+++ }
+++
+++ // Fallback to syscall as we can't enable PMUSERENR in user mode.
+++ struct timeval tv;
+++ gettimeofday(&tv, NULL);
+++ return (uint64_t) (tv.tv_sec) * 1000000 + tv.tv_usec;
+++#endif
+++}
+++
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma pop_macro("ALIGN_STRUCT")
+++#pragma pop_macro("FORCE_INLINE")
+++#endif
+++
+++#if defined(__GNUC__) && !defined(__clang__)
+++#pragma GCC pop_options
+++#endif
+++
+++#endif
--- /dev/null
--- /dev/null
--- /dev/null
+++
+++Microsoft Visual Studio Solution File, Format Version 12.00
+++# Visual Studio Version 17
+++VisualStudioVersion = 17.3.32901.215
+++MinimumVisualStudioVersion = 10.0.40219.1
+++Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "sse2neon", "sse2neon.vcxproj", "{341BF194-865B-4DA6-8120-93173498E774}"
+++EndProject
+++Global
+++ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+++ Debug|ARM = Debug|ARM
+++ Debug|ARM64 = Debug|ARM64
+++ Release|ARM = Release|ARM
+++ Release|ARM64 = Release|ARM64
+++ EndGlobalSection
+++ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+++ {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM.ActiveCfg = Debug|ARM
+++ {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM.Build.0 = Debug|ARM
+++ {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM64.ActiveCfg = Debug|ARM64
+++ {341BF194-865B-4DA6-8120-93173498E774}.Debug|ARM64.Build.0 = Debug|ARM64
+++ {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM.ActiveCfg = Release|ARM
+++ {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM.Build.0 = Release|ARM
+++ {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM64.ActiveCfg = Release|ARM64
+++ {341BF194-865B-4DA6-8120-93173498E774}.Release|ARM64.Build.0 = Release|ARM64
+++ EndGlobalSection
+++ GlobalSection(SolutionProperties) = preSolution
+++ HideSolutionNode = FALSE
+++ EndGlobalSection
+++ GlobalSection(ExtensibilityGlobals) = postSolution
+++ SolutionGuid = {D503B299-AA05-4E05-A8D9-37C8D229ACB1}
+++ EndGlobalSection
+++EndGlobal
--- /dev/null
--- /dev/null
--- /dev/null
+++<?xml version="1.0" encoding="utf-8"?>
+++<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+++ <ItemGroup Label="ProjectConfigurations">
+++ <ProjectConfiguration Include="Debug|ARM">
+++ <Configuration>Debug</Configuration>
+++ <Platform>ARM</Platform>
+++ </ProjectConfiguration>
+++ <ProjectConfiguration Include="Debug|ARM64">
+++ <Configuration>Debug</Configuration>
+++ <Platform>ARM64</Platform>
+++ </ProjectConfiguration>
+++ <ProjectConfiguration Include="Release|ARM">
+++ <Configuration>Release</Configuration>
+++ <Platform>ARM</Platform>
+++ </ProjectConfiguration>
+++ <ProjectConfiguration Include="Release|ARM64">
+++ <Configuration>Release</Configuration>
+++ <Platform>ARM64</Platform>
+++ </ProjectConfiguration>
+++ </ItemGroup>
+++ <PropertyGroup Label="Globals">
+++ <VCProjectVersion>16.0</VCProjectVersion>
+++ <Keyword>Win32Proj</Keyword>
+++ <ProjectGuid>{341bf194-865b-4da6-8120-93173498e774}</ProjectGuid>
+++ <RootNamespace>sse2neon</RootNamespace>
+++ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+++ </PropertyGroup>
+++ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM'" Label="Configuration">
+++ <ConfigurationType>Application</ConfigurationType>
+++ <UseDebugLibraries>true</UseDebugLibraries>
+++ <PlatformToolset>v143</PlatformToolset>
+++ <CharacterSet>Unicode</CharacterSet>
+++ </PropertyGroup>
+++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM'" Label="Configuration">
+++ <ConfigurationType>Application</ConfigurationType>
+++ <UseDebugLibraries>false</UseDebugLibraries>
+++ <PlatformToolset>v143</PlatformToolset>
+++ <WholeProgramOptimization>true</WholeProgramOptimization>
+++ <CharacterSet>Unicode</CharacterSet>
+++ </PropertyGroup>
+++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'" Label="Configuration">
+++ <ConfigurationType>Application</ConfigurationType>
+++ <UseDebugLibraries>true</UseDebugLibraries>
+++ <PlatformToolset>v143</PlatformToolset>
+++ <CharacterSet>Unicode</CharacterSet>
+++ </PropertyGroup>
+++ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'" Label="Configuration">
+++ <ConfigurationType>Application</ConfigurationType>
+++ <UseDebugLibraries>false</UseDebugLibraries>
+++ <PlatformToolset>v143</PlatformToolset>
+++ <WholeProgramOptimization>true</WholeProgramOptimization>
+++ <CharacterSet>Unicode</CharacterSet>
+++ </PropertyGroup>
+++ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+++ <ImportGroup Label="ExtensionSettings">
+++ </ImportGroup>
+++ <ImportGroup Label="Shared">
+++ </ImportGroup>
+++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM'" Label="PropertySheets">
+++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+++ </ImportGroup>
+++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM'" Label="PropertySheets">
+++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+++ </ImportGroup>
+++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'" Label="PropertySheets">
+++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+++ </ImportGroup>
+++ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'" Label="PropertySheets">
+++ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+++ </ImportGroup>
+++ <PropertyGroup Label="UserMacros" />
+++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM'">
+++ <ClCompile>
+++ <WarningLevel>Level3</WarningLevel>
+++ <SDLCheck>true</SDLCheck>
+++ <PreprocessorDefinitions>__i386__;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+++ <ConformanceMode>true</ConformanceMode>
+++ <AdditionalIncludeDirectories>.;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+++ </ClCompile>
+++ <Link>
+++ <SubSystem>Console</SubSystem>
+++ <GenerateDebugInformation>true</GenerateDebugInformation>
+++ </Link>
+++ </ItemDefinitionGroup>
+++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM'">
+++ <ClCompile>
+++ <WarningLevel>Level3</WarningLevel>
+++ <FunctionLevelLinking>true</FunctionLevelLinking>
+++ <IntrinsicFunctions>true</IntrinsicFunctions>
+++ <SDLCheck>true</SDLCheck>
+++ <PreprocessorDefinitions>__i386__;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+++ <ConformanceMode>true</ConformanceMode>
+++ <AdditionalIncludeDirectories>.;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+++ </ClCompile>
+++ <Link>
+++ <SubSystem>Console</SubSystem>
+++ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+++ <OptimizeReferences>true</OptimizeReferences>
+++ <GenerateDebugInformation>true</GenerateDebugInformation>
+++ </Link>
+++ </ItemDefinitionGroup>
+++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'">
+++ <ClCompile>
+++ <WarningLevel>Level3</WarningLevel>
+++ <SDLCheck>true</SDLCheck>
+++ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+++ <ConformanceMode>true</ConformanceMode>
+++ <AdditionalIncludeDirectories>.;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+++ <AdditionalOptions>/Zc:preprocessor</AdditionalOptions>
+++ <IntrinsicFunctions>true</IntrinsicFunctions>
+++ </ClCompile>
+++ <Link>
+++ <SubSystem>Console</SubSystem>
+++ <GenerateDebugInformation>true</GenerateDebugInformation>
+++ </Link>
+++ </ItemDefinitionGroup>
+++ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'">
+++ <ClCompile>
+++ <WarningLevel>Level3</WarningLevel>
+++ <FunctionLevelLinking>true</FunctionLevelLinking>
+++ <IntrinsicFunctions>true</IntrinsicFunctions>
+++ <SDLCheck>true</SDLCheck>
+++ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+++ <ConformanceMode>true</ConformanceMode>
+++ <AdditionalIncludeDirectories>.;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+++ <AdditionalOptions>/Zc:preprocessor %(AdditionalOptions)</AdditionalOptions>
+++ </ClCompile>
+++ <Link>
+++ <SubSystem>Console</SubSystem>
+++ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+++ <OptimizeReferences>true</OptimizeReferences>
+++ <GenerateDebugInformation>true</GenerateDebugInformation>
+++ </Link>
+++ </ItemDefinitionGroup>
+++ <ItemGroup>
+++ <ClCompile Include="tests\binding.cpp" />
+++ <ClCompile Include="tests\common.cpp" />
+++ <ClCompile Include="tests\impl.cpp" />
+++ <ClCompile Include="tests\main.cpp" />
+++ </ItemGroup>
+++ <ItemGroup>
+++ <ClInclude Include="sse2neon.h" />
+++ <ClInclude Include="tests\binding.h" />
+++ <ClInclude Include="tests\common.h" />
+++ <ClInclude Include="tests\impl.h" />
+++ </ItemGroup>
+++ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+++ <ImportGroup Label="ExtensionTargets">
+++ </ImportGroup>
+++</Project>
--- /dev/null
--- /dev/null
--- /dev/null
+++<?xml version="1.0" encoding="utf-8"?>
+++<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+++ <ItemGroup>
+++ <Filter Include="Source Files">
+++ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+++ <Extensions>cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+++ </Filter>
+++ <Filter Include="Header Files">
+++ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+++ <Extensions>h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd</Extensions>
+++ </Filter>
+++ <Filter Include="Resource Files">
+++ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+++ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+++ </Filter>
+++ </ItemGroup>
+++ <ItemGroup>
+++ <ClCompile Include="tests\binding.cpp">
+++ <Filter>Source Files</Filter>
+++ </ClCompile>
+++ <ClCompile Include="tests\common.cpp">
+++ <Filter>Source Files</Filter>
+++ </ClCompile>
+++ <ClCompile Include="tests\impl.cpp">
+++ <Filter>Source Files</Filter>
+++ </ClCompile>
+++ <ClCompile Include="tests\main.cpp">
+++ <Filter>Source Files</Filter>
+++ </ClCompile>
+++ </ItemGroup>
+++ <ItemGroup>
+++ <ClInclude Include="tests\binding.h">
+++ <Filter>Header Files</Filter>
+++ </ClInclude>
+++ <ClInclude Include="tests\common.h">
+++ <Filter>Header Files</Filter>
+++ </ClInclude>
+++ <ClInclude Include="tests\impl.h">
+++ <Filter>Header Files</Filter>
+++ </ClInclude>
+++ <ClInclude Include="sse2neon.h">
+++ <Filter>Header Files</Filter>
+++ </ClInclude>
+++ </ItemGroup>
+++</Project>
--- /dev/null
--- /dev/null
--- /dev/null
+++# Test Suite for SSE2NEON
+++
+++:warning: **Warning: The test suite is based on the little-endian architecture.**
+++
+++## Add More Test Items
+++Once the conversion is implemented, the test can be added with the following steps:
+++
+++* File `tests/impl.h`
+++
+++ Add the intrinsic under `INTRIN_LIST` macro. The naming convention
+++ should be `mm_xxx`.
+++ Place it in the correct classification with the alphabetical order.
+++ The classification can be referenced from [Intel Intrinsics Guide](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html).
+++
+++* File `tests/impl.cpp`
+++ ```c
+++ result_t test_mm_xxx()
+++ {
+++ // The C implementation
+++ ...
+++
+++ // The Neon implementation
+++ ret = _mm_xxx();
+++
+++ // Compare the result of two implementations and return either
+++ // TEST_SUCCESS, TEST_FAIL, or TEST_UNIMPL
+++ ...
+++ }
+++ ```
--- /dev/null
--- /dev/null
--- /dev/null
+++#include "binding.h"
+++
+++#include <stdio.h>
+++#include <stdlib.h>
+++
+++namespace SSE2NEON
+++{
+++void *platformAlignedAlloc(size_t size)
+++{
+++ void *address;
+++#if defined(_WIN32)
+++ address = _aligned_malloc(size, 16);
+++ if (!address) {
+++#else
+++ int ret = posix_memalign(&address, 16, size);
+++ if (ret != 0) {
+++#endif
+++ fprintf(stderr, "Error at File %s line number %d\n", __FILE__,
+++ __LINE__);
+++ exit(EXIT_FAILURE);
+++ }
+++ return address;
+++}
+++
+++void platformAlignedFree(void *ptr)
+++{
+++#if defined(_WIN32)
+++ _aligned_free(ptr);
+++#else
+++ free(ptr);
+++#endif
+++}
+++
+++
+++} // namespace SSE2NEON
--- /dev/null
--- /dev/null
--- /dev/null
+++#ifndef SSE2NEONBINDING_H
+++#define SSE2NEONBINDING_H
+++
+++#include <stdlib.h>
+++
+++// The SSE2NEON unit tests run both within our own internal project
+++// as well as within the open source framework.
+++// This header file is used to abstract any distinctions between
+++// those two build environments.
+++//
+++// Initially, this is for how 16 byte aligned memory is allocated
+++namespace SSE2NEON
+++{
+++void *platformAlignedAlloc(size_t size);
+++void platformAlignedFree(void *ptr);
+++
+++} // namespace SSE2NEON
+++
+++#endif
--- /dev/null
--- /dev/null
--- /dev/null
+++#include "common.h"
+++#include <cmath>
+++#include <cstdint>
+++
+++namespace SSE2NEON
+++{
+++int32_t NaN = ~0;
+++int64_t NaN64 = ~0;
+++
+++result_t validateInt64(__m128i a, int64_t i0, int64_t i1)
+++{
+++ const int64_t *t = (const int64_t *) &a;
+++ ASSERT_RETURN(t[0] == i0);
+++ ASSERT_RETURN(t[1] == i1);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateInt64(__m64 a, int64_t i0)
+++{
+++ const int64_t *t = (const int64_t *) &a;
+++ ASSERT_RETURN(t[0] == i0);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt64(__m128i a, uint64_t u0, uint64_t u1)
+++{
+++ const uint64_t *t = (const uint64_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt64(__m64 a, uint64_t u0)
+++{
+++ const uint64_t *t = (const uint64_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateInt32(__m128i a,
+++ int32_t i0,
+++ int32_t i1,
+++ int32_t i2,
+++ int32_t i3)
+++{
+++ const int32_t *t = (const int32_t *) &a;
+++ ASSERT_RETURN(t[0] == i0);
+++ ASSERT_RETURN(t[1] == i1);
+++ ASSERT_RETURN(t[2] == i2);
+++ ASSERT_RETURN(t[3] == i3);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt32(__m128i a,
+++ uint32_t u0,
+++ uint32_t u1,
+++ uint32_t u2,
+++ uint32_t u3)
+++{
+++ const uint32_t *t = (const uint32_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ ASSERT_RETURN(t[2] == u2);
+++ ASSERT_RETURN(t[3] == u3);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt32(__m64 a, uint32_t u0, uint32_t u1)
+++{
+++ const uint32_t *t = (const uint32_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateInt16(__m128i a,
+++ int16_t i0,
+++ int16_t i1,
+++ int16_t i2,
+++ int16_t i3,
+++ int16_t i4,
+++ int16_t i5,
+++ int16_t i6,
+++ int16_t i7)
+++{
+++ const int16_t *t = (const int16_t *) &a;
+++ ASSERT_RETURN(t[0] == i0);
+++ ASSERT_RETURN(t[1] == i1);
+++ ASSERT_RETURN(t[2] == i2);
+++ ASSERT_RETURN(t[3] == i3);
+++ ASSERT_RETURN(t[4] == i4);
+++ ASSERT_RETURN(t[5] == i5);
+++ ASSERT_RETURN(t[6] == i6);
+++ ASSERT_RETURN(t[7] == i7);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateInt16(__m64 a, int16_t i0, int16_t i1, int16_t i2, int16_t i3)
+++{
+++ const int16_t *t = (const int16_t *) &a;
+++ ASSERT_RETURN(t[0] == i0);
+++ ASSERT_RETURN(t[1] == i1);
+++ ASSERT_RETURN(t[2] == i2);
+++ ASSERT_RETURN(t[3] == i3);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt16(__m128i a,
+++ uint16_t u0,
+++ uint16_t u1,
+++ uint16_t u2,
+++ uint16_t u3,
+++ uint16_t u4,
+++ uint16_t u5,
+++ uint16_t u6,
+++ uint16_t u7)
+++{
+++ const uint16_t *t = (const uint16_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ ASSERT_RETURN(t[2] == u2);
+++ ASSERT_RETURN(t[3] == u3);
+++ ASSERT_RETURN(t[4] == u4);
+++ ASSERT_RETURN(t[5] == u5);
+++ ASSERT_RETURN(t[6] == u6);
+++ ASSERT_RETURN(t[7] == u7);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateInt32(__m64 a, int32_t u0, int32_t u1)
+++{
+++ const int32_t *t = (const int32_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt16(__m64 a,
+++ uint16_t u0,
+++ uint16_t u1,
+++ uint16_t u2,
+++ uint16_t u3)
+++{
+++ const uint16_t *t = (const uint16_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ ASSERT_RETURN(t[2] == u2);
+++ ASSERT_RETURN(t[3] == u3);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateInt8(__m128i a,
+++ int8_t i0,
+++ int8_t i1,
+++ int8_t i2,
+++ int8_t i3,
+++ int8_t i4,
+++ int8_t i5,
+++ int8_t i6,
+++ int8_t i7,
+++ int8_t i8,
+++ int8_t i9,
+++ int8_t i10,
+++ int8_t i11,
+++ int8_t i12,
+++ int8_t i13,
+++ int8_t i14,
+++ int8_t i15)
+++{
+++ const int8_t *t = (const int8_t *) &a;
+++ ASSERT_RETURN(t[0] == i0);
+++ ASSERT_RETURN(t[1] == i1);
+++ ASSERT_RETURN(t[2] == i2);
+++ ASSERT_RETURN(t[3] == i3);
+++ ASSERT_RETURN(t[4] == i4);
+++ ASSERT_RETURN(t[5] == i5);
+++ ASSERT_RETURN(t[6] == i6);
+++ ASSERT_RETURN(t[7] == i7);
+++ ASSERT_RETURN(t[8] == i8);
+++ ASSERT_RETURN(t[9] == i9);
+++ ASSERT_RETURN(t[10] == i10);
+++ ASSERT_RETURN(t[11] == i11);
+++ ASSERT_RETURN(t[12] == i12);
+++ ASSERT_RETURN(t[13] == i13);
+++ ASSERT_RETURN(t[14] == i14);
+++ ASSERT_RETURN(t[15] == i15);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateInt8(__m64 a,
+++ int8_t i0,
+++ int8_t i1,
+++ int8_t i2,
+++ int8_t i3,
+++ int8_t i4,
+++ int8_t i5,
+++ int8_t i6,
+++ int8_t i7)
+++{
+++ const int8_t *t = (const int8_t *) &a;
+++ ASSERT_RETURN(t[0] == i0);
+++ ASSERT_RETURN(t[1] == i1);
+++ ASSERT_RETURN(t[2] == i2);
+++ ASSERT_RETURN(t[3] == i3);
+++ ASSERT_RETURN(t[4] == i4);
+++ ASSERT_RETURN(t[5] == i5);
+++ ASSERT_RETURN(t[6] == i6);
+++ ASSERT_RETURN(t[7] == i7);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt8(__m128i a,
+++ uint8_t u0,
+++ uint8_t u1,
+++ uint8_t u2,
+++ uint8_t u3,
+++ uint8_t u4,
+++ uint8_t u5,
+++ uint8_t u6,
+++ uint8_t u7,
+++ uint8_t u8,
+++ uint8_t u9,
+++ uint8_t u10,
+++ uint8_t u11,
+++ uint8_t u12,
+++ uint8_t u13,
+++ uint8_t u14,
+++ uint8_t u15)
+++{
+++ const uint8_t *t = (const uint8_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ ASSERT_RETURN(t[2] == u2);
+++ ASSERT_RETURN(t[3] == u3);
+++ ASSERT_RETURN(t[4] == u4);
+++ ASSERT_RETURN(t[5] == u5);
+++ ASSERT_RETURN(t[6] == u6);
+++ ASSERT_RETURN(t[7] == u7);
+++ ASSERT_RETURN(t[8] == u8);
+++ ASSERT_RETURN(t[9] == u9);
+++ ASSERT_RETURN(t[10] == u10);
+++ ASSERT_RETURN(t[11] == u11);
+++ ASSERT_RETURN(t[12] == u12);
+++ ASSERT_RETURN(t[13] == u13);
+++ ASSERT_RETURN(t[14] == u14);
+++ ASSERT_RETURN(t[15] == u15);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateUInt8(__m64 a,
+++ uint8_t u0,
+++ uint8_t u1,
+++ uint8_t u2,
+++ uint8_t u3,
+++ uint8_t u4,
+++ uint8_t u5,
+++ uint8_t u6,
+++ uint8_t u7)
+++{
+++ const uint8_t *t = (const uint8_t *) &a;
+++ ASSERT_RETURN(t[0] == u0);
+++ ASSERT_RETURN(t[1] == u1);
+++ ASSERT_RETURN(t[2] == u2);
+++ ASSERT_RETURN(t[3] == u3);
+++ ASSERT_RETURN(t[4] == u4);
+++ ASSERT_RETURN(t[5] == u5);
+++ ASSERT_RETURN(t[6] == u6);
+++ ASSERT_RETURN(t[7] == u7);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateSingleFloatPair(float a, float b)
+++{
+++ const uint32_t *ua = (const uint32_t *) &a;
+++ const uint32_t *ub = (const uint32_t *) &b;
+++ // We do an integer (binary) compare rather than a
+++ // floating point compare to take NaNs and infinities
+++ // into account as well.
+++ return (*ua) == (*ub) ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t validateSingleDoublePair(double a, double b)
+++{
+++ const uint64_t *ua = (const uint64_t *) &a;
+++ const uint64_t *ub = (const uint64_t *) &b;
+++ // We do an integer (binary) compare rather than a
+++ // floating point compare to take NaNs and infinities
+++ // into account as well.
+++
+++ if (std::isnan(a) && std::isnan(b)) {
+++ return TEST_SUCCESS;
+++ }
+++
+++ return (*ua) == (*ub) ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t validateFloat(__m128 a, float f0, float f1, float f2, float f3)
+++{
+++ const float *t = (const float *) &a;
+++ ASSERT_RETURN(validateSingleFloatPair(t[0], f0));
+++ ASSERT_RETURN(validateSingleFloatPair(t[1], f1));
+++ ASSERT_RETURN(validateSingleFloatPair(t[2], f2));
+++ ASSERT_RETURN(validateSingleFloatPair(t[3], f3));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateFloatEpsilon(__m128 a,
+++ float f0,
+++ float f1,
+++ float f2,
+++ float f3,
+++ float epsilon)
+++{
+++ const float *t = (const float *) &a;
+++ float df0 = fabsf(t[0] - f0);
+++ float df1 = fabsf(t[1] - f1);
+++ float df2 = fabsf(t[2] - f2);
+++ float df3 = fabsf(t[3] - f3);
+++
+++ // Due to floating-point error, subtracting floating-point number with NaN
+++ // and zero value usually produces erroneous result. Therefore, we directly
+++ // define the difference of two floating-point numbers to zero if both
+++ // numbers are NaN or zero.
+++ if ((std::isnan(t[0]) && std::isnan(f0)) || (t[0] == 0 && f0 == 0)) {
+++ df0 = 0;
+++ }
+++
+++ if ((std::isnan(t[1]) && std::isnan(f1)) || (t[1] == 0 && f1 == 0)) {
+++ df1 = 0;
+++ }
+++
+++ if ((std::isnan(t[2]) && std::isnan(f2)) || (t[2] == 0 && f2 == 0)) {
+++ df2 = 0;
+++ }
+++
+++ if ((std::isnan(t[3]) && std::isnan(f3)) || (t[3] == 0 && f3 == 0)) {
+++ df3 = 0;
+++ }
+++
+++ ASSERT_RETURN(df0 < epsilon);
+++ ASSERT_RETURN(df1 < epsilon);
+++ ASSERT_RETURN(df2 < epsilon);
+++ ASSERT_RETURN(df3 < epsilon);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateFloatError(__m128 a,
+++ float f0,
+++ float f1,
+++ float f2,
+++ float f3,
+++ float err)
+++{
+++ const float *t = (const float *) &a;
+++ float df0 = fabsf((t[0] - f0) / f0);
+++ float df1 = fabsf((t[1] - f1) / f1);
+++ float df2 = fabsf((t[2] - f2) / f2);
+++ float df3 = fabsf((t[3] - f3) / f3);
+++
+++ if ((std::isnan(t[0]) && std::isnan(f0)) || (t[0] == 0 && f0 == 0) ||
+++ (std::isinf(t[0]) && std::isinf(f0))) {
+++ df0 = 0;
+++ }
+++
+++ if ((std::isnan(t[1]) && std::isnan(f1)) || (t[1] == 0 && f1 == 0) ||
+++ (std::isinf(t[1]) && std::isinf(f1))) {
+++ df1 = 0;
+++ }
+++
+++ if ((std::isnan(t[2]) && std::isnan(f2)) || (t[2] == 0 && f2 == 0) ||
+++ (std::isinf(t[2]) && std::isinf(f2))) {
+++ df2 = 0;
+++ }
+++
+++ if ((std::isnan(t[3]) && std::isnan(f3)) || (t[3] == 0 && f3 == 0) ||
+++ (std::isinf(t[3]) && std::isinf(f3))) {
+++ df3 = 0;
+++ }
+++
+++ ASSERT_RETURN(df0 < err);
+++ ASSERT_RETURN(df1 < err);
+++ ASSERT_RETURN(df2 < err);
+++ ASSERT_RETURN(df3 < err);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateDouble(__m128d a, double d0, double d1)
+++{
+++ const double *t = (const double *) &a;
+++ ASSERT_RETURN(validateSingleDoublePair(t[0], d0));
+++ ASSERT_RETURN(validateSingleDoublePair(t[1], d1));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t validateFloatError(__m128d a, double d0, double d1, double err)
+++{
+++ const double *t = (const double *) &a;
+++ double td0 = fabs((t[0] - d0) / d0);
+++ double td1 = fabs((t[1] - d1) / d1);
+++
+++ if (std::isnan(t[0]) && std::isnan(d0)) {
+++ td0 = 0;
+++ }
+++
+++ if (std::isnan(t[1]) && std::isnan(d1)) {
+++ td1 = 0;
+++ }
+++
+++ ASSERT_RETURN(td0 < err);
+++ ASSERT_RETURN(td1 < err);
+++ return TEST_SUCCESS;
+++}
+++
+++} // namespace SSE2NEON
--- /dev/null
--- /dev/null
--- /dev/null
+++#ifndef SSE2NEONCOMMON_H
+++#define SSE2NEONCOMMON_H
+++#include <cstdint>
+++#if (defined(__aarch64__) || defined(_M_ARM64)) || defined(__arm__)
+++#include "sse2neon.h"
+++#elif defined(__x86_64__) || defined(__i386__)
+++#include <emmintrin.h>
+++#include <smmintrin.h>
+++#include <tmmintrin.h>
+++#include <wmmintrin.h>
+++#include <x86intrin.h>
+++#include <xmmintrin.h>
+++
+++// __int64 is defined in the Intrinsics Guide which maps to different datatype
+++// in different data model
+++#if !(defined(_WIN32) || defined(_WIN64) || defined(__int64))
+++#if (defined(__x86_64__) || defined(__i386__))
+++#define __int64 long long
+++#else
+++#define __int64 int64_t
+++#endif
+++#endif
+++
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma push_macro("ALIGN_STRUCT")
+++#define ALIGN_STRUCT(x) __attribute__((aligned(x)))
+++#else
+++#define ALIGN_STRUCT(x) __declspec(align(x))
+++#endif
+++
+++typedef union ALIGN_STRUCT(16) SIMDVec {
+++ float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
+++ int8_t m128_i8[16]; // as signed 8-bit integers.
+++ int16_t m128_i16[8]; // as signed 16-bit integers.
+++ int32_t m128_i32[4]; // as signed 32-bit integers.
+++ int64_t m128_i64[2]; // as signed 64-bit integers.
+++ uint8_t m128_u8[16]; // as unsigned 8-bit integers.
+++ uint16_t m128_u16[8]; // as unsigned 16-bit integers.
+++ uint32_t m128_u32[4]; // as unsigned 32-bit integers.
+++ uint64_t m128_u64[2]; // as unsigned 64-bit integers.
+++} SIMDVec;
+++
+++#if defined(__GNUC__) || defined(__clang__)
+++#pragma pop_macro("ALIGN_STRUCT")
+++#endif
+++
+++/* Tunable testing configuration for precise testing */
+++/* _mm_min|max_ps|ss|pd|sd */
+++#ifndef SSE2NEON_PRECISE_MINMAX
+++#define SSE2NEON_PRECISE_MINMAX (0)
+++#endif
+++#endif
+++
+++#define ASSERT_RETURN(x) \
+++ if (!(x)) \
+++ return TEST_FAIL;
+++
+++namespace SSE2NEON
+++{
+++enum result_t {
+++ TEST_SUCCESS = 1,
+++ TEST_FAIL = 0,
+++ TEST_UNIMPL = -1,
+++};
+++extern int32_t NaN;
+++extern int64_t NaN64;
+++#define ALL_BIT_1_32 (*(float *) &NaN)
+++#define ALL_BIT_1_64 (*(double *) &NaN64)
+++
+++template <typename T>
+++result_t validate128(T a, T b)
+++{
+++ const int32_t *t1 = (const int32_t *) &a;
+++ const int32_t *t2 = (const int32_t *) &b;
+++
+++ ASSERT_RETURN(t1[0] == t2[0]);
+++ ASSERT_RETURN(t1[1] == t2[1]);
+++ ASSERT_RETURN(t1[2] == t2[2]);
+++ ASSERT_RETURN(t1[3] == t2[3]);
+++ return TEST_SUCCESS;
+++}
+++result_t validateInt64(__m128i a, int64_t i0, int64_t i1);
+++result_t validateInt64(__m64 a, int64_t i0);
+++result_t validateUInt64(__m128i a, uint64_t u0, uint64_t u1);
+++result_t validateUInt64(__m64 a, uint64_t u0);
+++result_t validateInt32(__m128i a,
+++ int32_t i0,
+++ int32_t i1,
+++ int32_t i2,
+++ int32_t i3);
+++result_t validateUInt32(__m128i a,
+++ uint32_t u0,
+++ uint32_t u1,
+++ uint32_t u2,
+++ uint32_t u3);
+++result_t validateUInt32(__m64 a, uint32_t u0, uint32_t u1);
+++result_t validateInt32(__m64 a, int32_t u0, int32_t u1);
+++result_t validateInt16(__m128i a,
+++ int16_t i0,
+++ int16_t i1,
+++ int16_t i2,
+++ int16_t i3,
+++ int16_t i4,
+++ int16_t i5,
+++ int16_t i6,
+++ int16_t i7);
+++result_t validateInt16(__m64 a, int16_t i0, int16_t i1, int16_t i2, int16_t i3);
+++result_t validateUInt16(__m128i a,
+++ uint16_t u0,
+++ uint16_t u1,
+++ uint16_t u2,
+++ uint16_t u3,
+++ uint16_t u4,
+++ uint16_t u5,
+++ uint16_t u6,
+++ uint16_t u7);
+++result_t validateUInt16(__m64 a,
+++ uint16_t u0,
+++ uint16_t u1,
+++ uint16_t u2,
+++ uint16_t u3);
+++result_t validateInt8(__m128i a,
+++ int8_t i0,
+++ int8_t i1,
+++ int8_t i2,
+++ int8_t i3,
+++ int8_t i4,
+++ int8_t i5,
+++ int8_t i6,
+++ int8_t i7,
+++ int8_t i8,
+++ int8_t i9,
+++ int8_t i10,
+++ int8_t i11,
+++ int8_t i12,
+++ int8_t i13,
+++ int8_t i14,
+++ int8_t i15);
+++result_t validateInt8(__m64 a,
+++ int8_t i0,
+++ int8_t i1,
+++ int8_t i2,
+++ int8_t i3,
+++ int8_t i4,
+++ int8_t i5,
+++ int8_t i6,
+++ int8_t i7);
+++result_t validateUInt8(__m128i a,
+++ uint8_t u0,
+++ uint8_t u1,
+++ uint8_t u2,
+++ uint8_t u3,
+++ uint8_t u4,
+++ uint8_t u5,
+++ uint8_t u6,
+++ uint8_t u7,
+++ uint8_t u8,
+++ uint8_t u9,
+++ uint8_t u10,
+++ uint8_t u11,
+++ uint8_t u12,
+++ uint8_t u13,
+++ uint8_t u14,
+++ uint8_t u15);
+++result_t validateUInt8(__m64 a,
+++ uint8_t u0,
+++ uint8_t u1,
+++ uint8_t u2,
+++ uint8_t u3,
+++ uint8_t u4,
+++ uint8_t u5,
+++ uint8_t u6,
+++ uint8_t u7);
+++result_t validateSingleFloatPair(float a, float b);
+++result_t validateSingleDoublePair(double a, double b);
+++result_t validateFloat(__m128 a, float f0, float f1, float f2, float f3);
+++result_t validateFloatEpsilon(__m128 a,
+++ float f0,
+++ float f1,
+++ float f2,
+++ float f3,
+++ float epsilon);
+++result_t validateFloatError(__m128 a,
+++ float f0,
+++ float f1,
+++ float f2,
+++ float f3,
+++ float err);
+++result_t validateDouble(__m128d a, double d0, double d1);
+++result_t validateFloatError(__m128d a, double d0, double d1, double err);
+++
+++#define VALIDATE_INT8_M128(A, B) \
+++ validateInt8(A, B[0], B[1], B[2], B[3], B[4], B[5], B[6], B[7], B[8], \
+++ B[9], B[10], B[11], B[12], B[13], B[14], B[15])
+++#define VALIDATE_UINT8_M128(A, B) \
+++ validateUInt8(A, B[0], B[1], B[2], B[3], B[4], B[5], B[6], B[7], B[8], \
+++ B[9], B[10], B[11], B[12], B[13], B[14], B[15])
+++#define VALIDATE_INT16_M128(A, B) \
+++ validateInt16(A, B[0], B[1], B[2], B[3], B[4], B[5], B[6], B[7])
+++#define VALIDATE_UINT16_M128(A, B) \
+++ validateUInt16(A, B[0], B[1], B[2], B[3], B[4], B[5], B[6], B[7])
+++#define VALIDATE_INT32_M128(A, B) validateInt32(A, B[0], B[1], B[2], B[3])
+++#define VALIDATE_UINT32_M128(A, B) validateUInt32(A, B[0], B[1], B[2], B[3])
+++
+++#define VALIDATE_INT8_M64(A, B) \
+++ validateInt8(A, B[0], B[1], B[2], B[3], B[4], B[5], B[6], B[7])
+++#define VALIDATE_UINT8_M64(A, B) \
+++ validateUInt8(A, B[0], B[1], B[2], B[3], B[4], B[5], B[6], B[7])
+++#define VALIDATE_INT16_M64(A, B) validateInt16(A, B[0], B[1], B[2], B[3])
+++#define VALIDATE_UINT16_M64(A, B) validateUInt16(A, B[0], B[1], B[2], B[3])
+++#define VALIDATE_INT32_M64(A, B) validateInt32(A, B[0], B[1])
+++#define VALIDATE_UINT32_M64(A, B) validateUInt32(A, B[0], B[1])
+++#define CHECK_RESULT(EXP) \
+++ if (EXP != TEST_SUCCESS) { \
+++ return TEST_FAIL; \
+++ }
+++#define IMM_2_ITER \
+++ TEST_IMPL(0) \
+++ TEST_IMPL(1)
+++#define IMM_4_ITER \
+++ IMM_2_ITER \
+++ TEST_IMPL(2) \
+++ TEST_IMPL(3)
+++#define IMM_8_ITER \
+++ IMM_4_ITER \
+++ TEST_IMPL(4) \
+++ TEST_IMPL(5) \
+++ TEST_IMPL(6) \
+++ TEST_IMPL(7)
+++#define IMM_16_ITER \
+++ IMM_8_ITER \
+++ TEST_IMPL(8) \
+++ TEST_IMPL(9) \
+++ TEST_IMPL(10) \
+++ TEST_IMPL(11) \
+++ TEST_IMPL(12) \
+++ TEST_IMPL(13) \
+++ TEST_IMPL(14) \
+++ TEST_IMPL(15)
+++#define IMM_32_ITER \
+++ IMM_16_ITER \
+++ TEST_IMPL(16) \
+++ TEST_IMPL(17) \
+++ TEST_IMPL(18) \
+++ TEST_IMPL(19) \
+++ TEST_IMPL(20) \
+++ TEST_IMPL(21) \
+++ TEST_IMPL(22) \
+++ TEST_IMPL(23) \
+++ TEST_IMPL(24) \
+++ TEST_IMPL(25) \
+++ TEST_IMPL(26) \
+++ TEST_IMPL(27) \
+++ TEST_IMPL(28) \
+++ TEST_IMPL(29) \
+++ TEST_IMPL(30) \
+++ TEST_IMPL(31)
+++#define IMM_64_ITER \
+++ IMM_32_ITER \
+++ TEST_IMPL(32) \
+++ TEST_IMPL(33) \
+++ TEST_IMPL(34) \
+++ TEST_IMPL(35) \
+++ TEST_IMPL(36) \
+++ TEST_IMPL(37) \
+++ TEST_IMPL(38) \
+++ TEST_IMPL(39) \
+++ TEST_IMPL(40) \
+++ TEST_IMPL(41) \
+++ TEST_IMPL(42) \
+++ TEST_IMPL(43) \
+++ TEST_IMPL(44) \
+++ TEST_IMPL(45) \
+++ TEST_IMPL(46) \
+++ TEST_IMPL(47) \
+++ TEST_IMPL(48) \
+++ TEST_IMPL(49) \
+++ TEST_IMPL(50) \
+++ TEST_IMPL(51) \
+++ TEST_IMPL(52) \
+++ TEST_IMPL(53) \
+++ TEST_IMPL(54) \
+++ TEST_IMPL(55) \
+++ TEST_IMPL(56) \
+++ TEST_IMPL(57) \
+++ TEST_IMPL(58) \
+++ TEST_IMPL(59) \
+++ TEST_IMPL(60) \
+++ TEST_IMPL(61) \
+++ TEST_IMPL(62) \
+++ TEST_IMPL(63)
+++#define IMM_128_ITER \
+++ IMM_64_ITER \
+++ TEST_IMPL(64) \
+++ TEST_IMPL(65) \
+++ TEST_IMPL(66) \
+++ TEST_IMPL(67) \
+++ TEST_IMPL(68) \
+++ TEST_IMPL(69) \
+++ TEST_IMPL(70) \
+++ TEST_IMPL(71) \
+++ TEST_IMPL(72) \
+++ TEST_IMPL(73) \
+++ TEST_IMPL(74) \
+++ TEST_IMPL(75) \
+++ TEST_IMPL(76) \
+++ TEST_IMPL(77) \
+++ TEST_IMPL(78) \
+++ TEST_IMPL(79) \
+++ TEST_IMPL(80) \
+++ TEST_IMPL(81) \
+++ TEST_IMPL(82) \
+++ TEST_IMPL(83) \
+++ TEST_IMPL(84) \
+++ TEST_IMPL(85) \
+++ TEST_IMPL(86) \
+++ TEST_IMPL(87) \
+++ TEST_IMPL(88) \
+++ TEST_IMPL(89) \
+++ TEST_IMPL(90) \
+++ TEST_IMPL(91) \
+++ TEST_IMPL(92) \
+++ TEST_IMPL(93) \
+++ TEST_IMPL(94) \
+++ TEST_IMPL(95) \
+++ TEST_IMPL(96) \
+++ TEST_IMPL(97) \
+++ TEST_IMPL(98) \
+++ TEST_IMPL(99) \
+++ TEST_IMPL(100) \
+++ TEST_IMPL(101) \
+++ TEST_IMPL(102) \
+++ TEST_IMPL(103) \
+++ TEST_IMPL(104) \
+++ TEST_IMPL(105) \
+++ TEST_IMPL(106) \
+++ TEST_IMPL(107) \
+++ TEST_IMPL(108) \
+++ TEST_IMPL(109) \
+++ TEST_IMPL(110) \
+++ TEST_IMPL(111) \
+++ TEST_IMPL(112) \
+++ TEST_IMPL(113) \
+++ TEST_IMPL(114) \
+++ TEST_IMPL(115) \
+++ TEST_IMPL(116) \
+++ TEST_IMPL(117) \
+++ TEST_IMPL(118) \
+++ TEST_IMPL(119) \
+++ TEST_IMPL(120) \
+++ TEST_IMPL(121) \
+++ TEST_IMPL(122) \
+++ TEST_IMPL(123) \
+++ TEST_IMPL(124) \
+++ TEST_IMPL(125) \
+++ TEST_IMPL(126) \
+++ TEST_IMPL(127)
+++#define IMM_256_ITER \
+++ IMM_128_ITER \
+++ TEST_IMPL(128) \
+++ TEST_IMPL(129) \
+++ TEST_IMPL(130) \
+++ TEST_IMPL(131) \
+++ TEST_IMPL(132) \
+++ TEST_IMPL(133) \
+++ TEST_IMPL(134) \
+++ TEST_IMPL(135) \
+++ TEST_IMPL(136) \
+++ TEST_IMPL(137) \
+++ TEST_IMPL(138) \
+++ TEST_IMPL(139) \
+++ TEST_IMPL(140) \
+++ TEST_IMPL(141) \
+++ TEST_IMPL(142) \
+++ TEST_IMPL(143) \
+++ TEST_IMPL(144) \
+++ TEST_IMPL(145) \
+++ TEST_IMPL(146) \
+++ TEST_IMPL(147) \
+++ TEST_IMPL(148) \
+++ TEST_IMPL(149) \
+++ TEST_IMPL(150) \
+++ TEST_IMPL(151) \
+++ TEST_IMPL(152) \
+++ TEST_IMPL(153) \
+++ TEST_IMPL(154) \
+++ TEST_IMPL(155) \
+++ TEST_IMPL(156) \
+++ TEST_IMPL(157) \
+++ TEST_IMPL(158) \
+++ TEST_IMPL(159) \
+++ TEST_IMPL(160) \
+++ TEST_IMPL(161) \
+++ TEST_IMPL(162) \
+++ TEST_IMPL(163) \
+++ TEST_IMPL(164) \
+++ TEST_IMPL(165) \
+++ TEST_IMPL(166) \
+++ TEST_IMPL(167) \
+++ TEST_IMPL(168) \
+++ TEST_IMPL(169) \
+++ TEST_IMPL(170) \
+++ TEST_IMPL(171) \
+++ TEST_IMPL(172) \
+++ TEST_IMPL(173) \
+++ TEST_IMPL(174) \
+++ TEST_IMPL(175) \
+++ TEST_IMPL(176) \
+++ TEST_IMPL(177) \
+++ TEST_IMPL(178) \
+++ TEST_IMPL(179) \
+++ TEST_IMPL(180) \
+++ TEST_IMPL(181) \
+++ TEST_IMPL(182) \
+++ TEST_IMPL(183) \
+++ TEST_IMPL(184) \
+++ TEST_IMPL(185) \
+++ TEST_IMPL(186) \
+++ TEST_IMPL(187) \
+++ TEST_IMPL(188) \
+++ TEST_IMPL(189) \
+++ TEST_IMPL(190) \
+++ TEST_IMPL(191) \
+++ TEST_IMPL(192) \
+++ TEST_IMPL(193) \
+++ TEST_IMPL(194) \
+++ TEST_IMPL(195) \
+++ TEST_IMPL(196) \
+++ TEST_IMPL(197) \
+++ TEST_IMPL(198) \
+++ TEST_IMPL(199) \
+++ TEST_IMPL(200) \
+++ TEST_IMPL(201) \
+++ TEST_IMPL(202) \
+++ TEST_IMPL(203) \
+++ TEST_IMPL(204) \
+++ TEST_IMPL(205) \
+++ TEST_IMPL(206) \
+++ TEST_IMPL(207) \
+++ TEST_IMPL(208) \
+++ TEST_IMPL(209) \
+++ TEST_IMPL(210) \
+++ TEST_IMPL(211) \
+++ TEST_IMPL(212) \
+++ TEST_IMPL(213) \
+++ TEST_IMPL(214) \
+++ TEST_IMPL(215) \
+++ TEST_IMPL(216) \
+++ TEST_IMPL(217) \
+++ TEST_IMPL(218) \
+++ TEST_IMPL(219) \
+++ TEST_IMPL(220) \
+++ TEST_IMPL(221) \
+++ TEST_IMPL(222) \
+++ TEST_IMPL(223) \
+++ TEST_IMPL(224) \
+++ TEST_IMPL(225) \
+++ TEST_IMPL(226) \
+++ TEST_IMPL(227) \
+++ TEST_IMPL(228) \
+++ TEST_IMPL(229) \
+++ TEST_IMPL(230) \
+++ TEST_IMPL(231) \
+++ TEST_IMPL(232) \
+++ TEST_IMPL(233) \
+++ TEST_IMPL(234) \
+++ TEST_IMPL(235) \
+++ TEST_IMPL(236) \
+++ TEST_IMPL(237) \
+++ TEST_IMPL(238) \
+++ TEST_IMPL(239) \
+++ TEST_IMPL(240) \
+++ TEST_IMPL(241) \
+++ TEST_IMPL(242) \
+++ TEST_IMPL(243) \
+++ TEST_IMPL(244) \
+++ TEST_IMPL(245) \
+++ TEST_IMPL(246) \
+++ TEST_IMPL(247) \
+++ TEST_IMPL(248) \
+++ TEST_IMPL(249) \
+++ TEST_IMPL(250) \
+++ TEST_IMPL(251) \
+++ TEST_IMPL(252) \
+++ TEST_IMPL(253) \
+++ TEST_IMPL(254) \
+++ TEST_IMPL(255)
+++} // namespace SSE2NEON
+++
+++#endif
--- /dev/null
--- /dev/null
--- /dev/null
+++#include <assert.h>
+++#include <float.h>
+++#include <inttypes.h>
+++#include <math.h>
+++#include <stdalign.h>
+++#include <stdint.h>
+++#include <stdio.h>
+++#include <stdlib.h>
+++#include <string.h>
+++#include <utility>
+++
+++#include "binding.h"
+++#include "impl.h"
+++
+++// Try 10,000 random floating point values for each test we run
+++#define MAX_TEST_VALUE 10000
+++
+++/* Pattern Matching for C macros.
+++ * https://github.com/pfultz2/Cloak/wiki/C-Preprocessor-tricks,-tips,-and-idioms
+++ */
+++
+++/* catenate */
+++#define PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
+++
+++#define IIF(c) PRIMITIVE_CAT(IIF_, c)
+++/* run the 2nd parameter */
+++#define IIF_0(t, ...) __VA_ARGS__
+++/* run the 1st parameter */
+++#define IIF_1(t, ...) t
+++
+++// This program a set of unit tests to ensure that each SSE call provide the
+++// output we expect. If this fires an assert, then something didn't match up.
+++//
+++// Functions with "test_" prefix will be called in runSingleTest.
+++namespace SSE2NEON
+++{
+++// Forward declaration
+++class SSE2NEONTestImpl : public SSE2NEONTest
+++{
+++public:
+++ SSE2NEONTestImpl(void);
+++ result_t loadTestFloatPointers(uint32_t i);
+++ result_t loadTestIntPointers(uint32_t i);
+++ result_t runSingleTest(InstructionTest test, uint32_t i);
+++
+++ float *mTestFloatPointer1;
+++ float *mTestFloatPointer2;
+++ int32_t *mTestIntPointer1;
+++ int32_t *mTestIntPointer2;
+++ float mTestFloats[MAX_TEST_VALUE];
+++ int32_t mTestInts[MAX_TEST_VALUE];
+++
+++ virtual ~SSE2NEONTestImpl(void)
+++ {
+++ platformAlignedFree(mTestFloatPointer1);
+++ platformAlignedFree(mTestFloatPointer2);
+++ platformAlignedFree(mTestIntPointer1);
+++ platformAlignedFree(mTestIntPointer2);
+++ }
+++ virtual void release(void) { delete this; }
+++ virtual result_t runTest(InstructionTest test)
+++ {
+++ result_t ret = TEST_SUCCESS;
+++
+++ // Test a whole bunch of values
+++ for (uint32_t i = 0; i < (MAX_TEST_VALUE - 8); i++) {
+++ ret = loadTestFloatPointers(i); // Load some random float values
+++ if (ret == TEST_FAIL)
+++ break; // load test float failed??
+++ ret = loadTestIntPointers(i); // load some random int values
+++ if (ret == TEST_FAIL)
+++ break; // load test float failed??
+++ // If we are testing the reciprocal, then invert the input data
+++ // (easier for debugging)
+++ if (test == it_mm_rcp_ps) {
+++ mTestFloatPointer1[0] = 1.0f / mTestFloatPointer1[0];
+++ mTestFloatPointer1[1] = 1.0f / mTestFloatPointer1[1];
+++ mTestFloatPointer1[2] = 1.0f / mTestFloatPointer1[2];
+++ mTestFloatPointer1[3] = 1.0f / mTestFloatPointer1[3];
+++ }
+++ if (test == it_mm_rcp_ps || test == it_mm_rcp_ss ||
+++ test == it_mm_rsqrt_ps || test == it_mm_rsqrt_ss) {
+++ if ((rand() & 3) == 0) {
+++ uint32_t r1 = rand() & 3;
+++ uint32_t r2 = rand() & 3;
+++ uint32_t r3 = rand() & 3;
+++ uint32_t r4 = rand() & 3;
+++ uint32_t r5 = rand() & 3;
+++ uint32_t r6 = rand() & 3;
+++ uint32_t r7 = rand() & 3;
+++ uint32_t r8 = rand() & 3;
+++ mTestFloatPointer1[r1] = 0.0f;
+++ mTestFloatPointer1[r2] = 0.0f;
+++ mTestFloatPointer1[r3] = 0.0f;
+++ mTestFloatPointer1[r4] = 0.0f;
+++ mTestFloatPointer1[r5] = -0.0f;
+++ mTestFloatPointer1[r6] = -0.0f;
+++ mTestFloatPointer1[r7] = -0.0f;
+++ mTestFloatPointer1[r8] = -0.0f;
+++ }
+++ }
+++ if (test == it_mm_cmpge_ps || test == it_mm_cmpge_ss ||
+++ test == it_mm_cmple_ps || test == it_mm_cmple_ss ||
+++ test == it_mm_cmpeq_ps || test == it_mm_cmpeq_ss) {
+++ // Make sure at least one value is the same.
+++ mTestFloatPointer1[3] = mTestFloatPointer2[3];
+++ }
+++
+++ if (test == it_mm_cmpord_ps || test == it_mm_cmpord_ss ||
+++ test == it_mm_cmpunord_ps || test == it_mm_cmpunord_ss ||
+++ test == it_mm_cmpeq_ps || test == it_mm_cmpeq_ss ||
+++ test == it_mm_cmpge_ps || test == it_mm_cmpge_ss ||
+++ test == it_mm_cmpgt_ps || test == it_mm_cmpgt_ss ||
+++ test == it_mm_cmple_ps || test == it_mm_cmple_ss ||
+++ test == it_mm_cmplt_ps || test == it_mm_cmplt_ss ||
+++ test == it_mm_cmpneq_ps || test == it_mm_cmpneq_ss ||
+++ test == it_mm_cmpnge_ps || test == it_mm_cmpnge_ss ||
+++ test == it_mm_cmpngt_ps || test == it_mm_cmpngt_ss ||
+++ test == it_mm_cmpnle_ps || test == it_mm_cmpnle_ss ||
+++ test == it_mm_cmpnlt_ps || test == it_mm_cmpnlt_ss ||
+++ test == it_mm_comieq_ss || test == it_mm_ucomieq_ss ||
+++ test == it_mm_comige_ss || test == it_mm_ucomige_ss ||
+++ test == it_mm_comigt_ss || test == it_mm_ucomigt_ss ||
+++ test == it_mm_comile_ss || test == it_mm_ucomile_ss ||
+++ test == it_mm_comilt_ss || test == it_mm_ucomilt_ss ||
+++ test == it_mm_comineq_ss || test == it_mm_ucomineq_ss) {
+++ // Make sure the NaN values are included in the testing
+++ // one out of four times.
+++ if ((rand() & 3) == 0) {
+++ uint32_t r1 = rand() & 3;
+++ uint32_t r2 = rand() & 3;
+++ mTestFloatPointer1[r1] = nanf("");
+++ mTestFloatPointer2[r2] = nanf("");
+++ }
+++ }
+++
+++ if (test == it_mm_cmpord_pd || test == it_mm_cmpord_sd ||
+++ test == it_mm_cmpunord_pd || test == it_mm_cmpunord_sd ||
+++ test == it_mm_cmpeq_pd || test == it_mm_cmpeq_sd ||
+++ test == it_mm_cmpge_pd || test == it_mm_cmpge_sd ||
+++ test == it_mm_cmpgt_pd || test == it_mm_cmpgt_sd ||
+++ test == it_mm_cmple_pd || test == it_mm_cmple_sd ||
+++ test == it_mm_cmplt_pd || test == it_mm_cmplt_sd ||
+++ test == it_mm_cmpneq_pd || test == it_mm_cmpneq_sd ||
+++ test == it_mm_cmpnge_pd || test == it_mm_cmpnge_sd ||
+++ test == it_mm_cmpngt_pd || test == it_mm_cmpngt_sd ||
+++ test == it_mm_cmpnle_pd || test == it_mm_cmpnle_sd ||
+++ test == it_mm_cmpnlt_pd || test == it_mm_cmpnlt_sd ||
+++ test == it_mm_comieq_sd || test == it_mm_ucomieq_sd ||
+++ test == it_mm_comige_sd || test == it_mm_ucomige_sd ||
+++ test == it_mm_comigt_sd || test == it_mm_ucomigt_sd ||
+++ test == it_mm_comile_sd || test == it_mm_ucomile_sd ||
+++ test == it_mm_comilt_sd || test == it_mm_ucomilt_sd ||
+++ test == it_mm_comineq_sd || test == it_mm_ucomineq_sd) {
+++ // Make sure the NaN values are included in the testing
+++ // one out of four times.
+++ if ((rand() & 3) == 0) {
+++ // FIXME:
+++ // The argument "0xFFFFFFFFFFFF" is a tricky workaround to
+++ // set the NaN value for doubles. The code is not intuitive
+++ // and should be fixed in the future.
+++ uint32_t r1 = ((rand() & 1) << 1) + 1;
+++ uint32_t r2 = ((rand() & 1) << 1) + 1;
+++ mTestFloatPointer1[r1] = nanf("0xFFFFFFFFFFFF");
+++ mTestFloatPointer2[r2] = nanf("0xFFFFFFFFFFFF");
+++ }
+++ }
+++
+++ if (test == it_mm_max_pd || test == it_mm_max_sd ||
+++ test == it_mm_min_pd || test == it_mm_min_sd) {
+++ // Make sure the positive/negative inifinity values are included
+++ // in the testing one out of four times.
+++ if ((rand() & 3) == 0) {
+++ uint32_t r1 = ((rand() & 1) << 1) + 1;
+++ uint32_t r2 = ((rand() & 1) << 1) + 1;
+++ uint32_t r3 = ((rand() & 1) << 1) + 1;
+++ uint32_t r4 = ((rand() & 1) << 1) + 1;
+++ mTestFloatPointer1[r1] = INFINITY;
+++ mTestFloatPointer2[r2] = INFINITY;
+++ mTestFloatPointer1[r3] = -INFINITY;
+++ mTestFloatPointer1[r4] = -INFINITY;
+++ }
+++ }
+++
+++#if SSE2NEON_PRECISE_MINMAX
+++ if (test == it_mm_max_ps || test == it_mm_max_ss ||
+++ test == it_mm_min_ps || test == it_mm_min_ss) {
+++ // Make sure the NaN values are included in the testing
+++ // one out of four times.
+++ if ((rand() & 3) == 0) {
+++ uint32_t r1 = rand() & 3;
+++ uint32_t r2 = rand() & 3;
+++ mTestFloatPointer1[r1] = nanf("");
+++ mTestFloatPointer2[r2] = nanf("");
+++ }
+++ }
+++
+++ if (test == it_mm_max_pd || test == it_mm_max_sd ||
+++ test == it_mm_min_pd || test == it_mm_min_sd) {
+++ // Make sure the NaN values are included in the testing
+++ // one out of four times.
+++ if ((rand() & 3) == 0) {
+++ // FIXME:
+++ // The argument "0xFFFFFFFFFFFF" is a tricky workaround to
+++ // set the NaN value for doubles. The code is not intuitive
+++ // and should be fixed in the future.
+++ uint32_t r1 = ((rand() & 1) << 1) + 1;
+++ uint32_t r2 = ((rand() & 1) << 1) + 1;
+++ mTestFloatPointer1[r1] = nanf("0xFFFFFFFFFFFF");
+++ mTestFloatPointer2[r2] = nanf("0xFFFFFFFFFFFF");
+++ }
+++ }
+++#endif
+++
+++ // one out of every random 64 times or so, mix up the test floats to
+++ // contain some integer values
+++ if ((rand() & 63) == 0) {
+++ uint32_t option = rand() & 3;
+++ switch (option) {
+++ // All integers..
+++ case 0:
+++ mTestFloatPointer1[0] = float(mTestIntPointer1[0]);
+++ mTestFloatPointer1[1] = float(mTestIntPointer1[1]);
+++ mTestFloatPointer1[2] = float(mTestIntPointer1[2]);
+++ mTestFloatPointer1[3] = float(mTestIntPointer1[3]);
+++
+++ mTestFloatPointer2[0] = float(mTestIntPointer2[0]);
+++ mTestFloatPointer2[1] = float(mTestIntPointer2[1]);
+++ mTestFloatPointer2[2] = float(mTestIntPointer2[2]);
+++ mTestFloatPointer2[3] = float(mTestIntPointer2[3]);
+++
+++ break;
+++ case 1: {
+++ uint32_t index = rand() & 3;
+++ mTestFloatPointer1[index] = float(mTestIntPointer1[index]);
+++ index = rand() & 3;
+++ mTestFloatPointer2[index] = float(mTestIntPointer2[index]);
+++ } break;
+++ case 2: {
+++ uint32_t index1 = rand() & 3;
+++ uint32_t index2 = rand() & 3;
+++ mTestFloatPointer1[index1] =
+++ float(mTestIntPointer1[index1]);
+++ mTestFloatPointer1[index2] =
+++ float(mTestIntPointer1[index2]);
+++ index1 = rand() & 3;
+++ index2 = rand() & 3;
+++ mTestFloatPointer1[index1] =
+++ float(mTestIntPointer1[index1]);
+++ mTestFloatPointer1[index2] =
+++ float(mTestIntPointer1[index2]);
+++ } break;
+++ case 3:
+++ mTestFloatPointer1[0] = float(mTestIntPointer1[0]);
+++ mTestFloatPointer1[1] = float(mTestIntPointer1[1]);
+++ mTestFloatPointer1[2] = float(mTestIntPointer1[2]);
+++ mTestFloatPointer1[3] = float(mTestIntPointer1[3]);
+++ break;
+++ }
+++ if ((rand() & 3) == 0) { // one out of 4 times, make halves
+++ for (uint32_t j = 0; j < 4; j++) {
+++ mTestFloatPointer1[j] *= 0.5f;
+++ mTestFloatPointer2[j] *= 0.5f;
+++ }
+++ }
+++ }
+++
+++ ret = runSingleTest(test, i);
+++ if (ret == TEST_FAIL) // the test failed...
+++ {
+++ // Set a breakpoint here if you want to step through the failure
+++ // case in the debugger
+++ ret = runSingleTest(test, i);
+++ break;
+++ }
+++ }
+++ return ret;
+++ }
+++};
+++
+++const char *instructionString[] = {
+++#define _(x) #x,
+++ INTRIN_LIST
+++#undef _
+++};
+++
+++// Produce rounding which is the same as SSE instructions with _MM_ROUND_NEAREST
+++// rounding mode
+++static inline float bankersRounding(float val)
+++{
+++ if (val < 0)
+++ return -bankersRounding(-val);
+++
+++ float ret;
+++ float roundDown = floorf(val); // Round down value
+++ float roundUp = ceilf(val); // Round up value
+++ float diffDown = val - roundDown;
+++ float diffUp = roundUp - val;
+++
+++ if (diffDown < diffUp) {
+++ /* If it's closer to the round down value, then use it */
+++ ret = roundDown;
+++ } else if (diffDown > diffUp) {
+++ /* If it's closer to the round up value, then use it */
+++ ret = roundUp;
+++ } else {
+++ /* If it's equidistant between round up and round down value, pick the
+++ * one which is an even number */
+++ float half = roundDown / 2;
+++ if (half != floorf(half)) {
+++ /* If the round down value is odd, return the round up value */
+++ ret = roundUp;
+++ } else {
+++ /* If the round up value is odd, return the round down value */
+++ ret = roundDown;
+++ }
+++ }
+++ return ret;
+++}
+++
+++static inline double bankersRounding(double val)
+++{
+++ if (val < 0)
+++ return -bankersRounding(-val);
+++
+++ double ret;
+++ double roundDown = floor(val); // Round down value
+++ double roundUp = ceil(val); // Round up value
+++ double diffDown = val - roundDown;
+++ double diffUp = roundUp - val;
+++
+++ if (diffDown < diffUp) {
+++ /* If it's closer to the round down value, then use it */
+++ ret = roundDown;
+++ } else if (diffDown > diffUp) {
+++ /* If it's closer to the round up value, then use it */
+++ ret = roundUp;
+++ } else {
+++ /* If it's equidistant between round up and round down value, pick the
+++ * one which is an even number */
+++ double half = roundDown / 2;
+++ if (half != floor(half)) {
+++ /* If the round down value is odd, return the round up value */
+++ ret = roundUp;
+++ } else {
+++ /* If the round up value is odd, return the round down value */
+++ ret = roundDown;
+++ }
+++ }
+++ return ret;
+++}
+++
+++// SplitMix64 PRNG by Sebastiano Vigna, see:
+++// <https://xoshiro.di.unimi.it/splitmix64.c>
+++static uint64_t state; // the state of SplitMix64 PRNG
+++const double TWOPOWER64 = pow(2, 64);
+++
+++#define SSE2NEON_INIT_RNG(seed) \
+++ do { \
+++ state = seed; \
+++ } while (0)
+++
+++static double next()
+++{
+++ uint64_t z = (state += 0x9e3779b97f4a7c15);
+++ z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
+++ z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
+++ return (double) (z ^ (z >> 31));
+++}
+++
+++static float ranf()
+++{
+++ return (float) (next() / TWOPOWER64);
+++}
+++
+++static float ranf(float low, float high)
+++{
+++ return ranf() * (high - low) + low;
+++}
+++
+++// Enable the tests which are using the macro of another tests
+++result_t test_mm_slli_si128(const SSE2NEONTestImpl &impl, uint32_t iter);
+++result_t test_mm_srli_si128(const SSE2NEONTestImpl &impl, uint32_t iter);
+++result_t test_mm_shuffle_pi16(const SSE2NEONTestImpl &impl, uint32_t iter);
+++
+++// This function is not called from "runSingleTest", but for other intrinsic
+++// tests that might need to call "_mm_set_epi32".
+++__m128i do_mm_set_epi32(int32_t x, int32_t y, int32_t z, int32_t w)
+++{
+++ __m128i a = _mm_set_epi32(x, y, z, w);
+++ validateInt32(a, w, z, y, x);
+++ return a;
+++}
+++
+++// This function is not called from "runSingleTest", but for other intrinsic
+++// tests that might need to load __m64 data.
+++template <class T>
+++__m64 load_m64(const T *p)
+++{
+++ return *((const __m64 *) p);
+++}
+++
+++// This function is not called from "runSingleTest", but for other intrinsic
+++// tests that might need to call "_mm_load_ps".
+++template <class T>
+++__m128 load_m128(const T *p)
+++{
+++ return _mm_loadu_ps((const float *) p);
+++}
+++
+++// This function is not called from "runSingleTest", but for other intrinsic
+++// tests that might need to call "_mm_load_ps".
+++template <class T>
+++__m128i load_m128i(const T *p)
+++{
+++ __m128 a = _mm_loadu_ps((const float *) p);
+++ __m128i ia = *(const __m128i *) &a;
+++ return ia;
+++}
+++
+++// This function is not called from "runSingleTest", but for other intrinsic
+++// tests that might need to call "_mm_load_pd".
+++template <class T>
+++__m128d load_m128d(const T *p)
+++{
+++ return _mm_loadu_pd((const double *) p);
+++}
+++
+++// This function is not called from "runSingleTest", but for other intrinsic
+++// tests that might need to call "_mm_store_ps".
+++result_t do_mm_store_ps(float *p, float x, float y, float z, float w)
+++{
+++ __m128 a = _mm_set_ps(x, y, z, w);
+++ _mm_store_ps(p, a);
+++ ASSERT_RETURN(p[0] == w);
+++ ASSERT_RETURN(p[1] == z);
+++ ASSERT_RETURN(p[2] == y);
+++ ASSERT_RETURN(p[3] == x);
+++ return TEST_SUCCESS;
+++}
+++
+++// This function is not called from "runSingleTest", but for other intrinsic
+++// tests that might need to call "_mm_store_ps".
+++result_t do_mm_store_ps(int32_t *p, int32_t x, int32_t y, int32_t z, int32_t w)
+++{
+++ __m128i a = _mm_set_epi32(x, y, z, w);
+++ _mm_store_ps((float *) p, *(const __m128 *) &a);
+++ ASSERT_RETURN(p[0] == w);
+++ ASSERT_RETURN(p[1] == z);
+++ ASSERT_RETURN(p[2] == y);
+++ ASSERT_RETURN(p[3] == x);
+++ return TEST_SUCCESS;
+++}
+++
+++float cmp_noNaN(float a, float b)
+++{
+++ return (!isnan(a) && !isnan(b)) ? ALL_BIT_1_32 : 0.0f;
+++}
+++
+++double cmp_noNaN(double a, double b)
+++{
+++ return (!isnan(a) && !isnan(b)) ? ALL_BIT_1_64 : 0.0f;
+++}
+++
+++float cmp_hasNaN(float a, float b)
+++{
+++ return (isnan(a) || isnan(b)) ? ALL_BIT_1_32 : 0.0f;
+++}
+++
+++double cmp_hasNaN(double a, double b)
+++{
+++ return (isnan(a) || isnan(b)) ? ALL_BIT_1_64 : 0.0f;
+++}
+++
+++int32_t comilt_ss(float a, float b)
+++{
+++ if (isnan(a) || isnan(b))
+++ return 0;
+++ return (a < b);
+++}
+++
+++int32_t comigt_ss(float a, float b)
+++{
+++ if (isnan(a) || isnan(b))
+++ return 0;
+++ return (a > b);
+++}
+++
+++int32_t comile_ss(float a, float b)
+++{
+++ if (isnan(a) || isnan(b))
+++ return 0;
+++ return (a <= b);
+++}
+++
+++int32_t comige_ss(float a, float b)
+++{
+++ if (isnan(a) || isnan(b))
+++ return 0;
+++ return (a >= b);
+++}
+++
+++int32_t comieq_ss(float a, float b)
+++{
+++ if (isnan(a) || isnan(b))
+++ return 0;
+++ return (a == b);
+++}
+++
+++int32_t comineq_ss(float a, float b)
+++{
+++ if (isnan(a) || isnan(b))
+++ return 1;
+++ return (a != b);
+++}
+++
+++static inline int16_t saturate_16(int32_t a)
+++{
+++ int32_t max = (1 << 15) - 1;
+++ int32_t min = -(1 << 15);
+++ if (a > max)
+++ return max;
+++ if (a < min)
+++ return min;
+++ return a;
+++}
+++
+++uint32_t canonical_crc32_u8(uint32_t crc, uint8_t v)
+++{
+++ crc ^= v;
+++ for (int bit = 0; bit < 8; bit++) {
+++ if (crc & 1)
+++ crc = (crc >> 1) ^ uint32_t(0x82f63b78);
+++ else
+++ crc = (crc >> 1);
+++ }
+++ return crc;
+++}
+++
+++uint32_t canonical_crc32_u16(uint32_t crc, uint16_t v)
+++{
+++ crc = canonical_crc32_u8(crc, v & 0xff);
+++ crc = canonical_crc32_u8(crc, (v >> 8) & 0xff);
+++ return crc;
+++}
+++
+++uint32_t canonical_crc32_u32(uint32_t crc, uint32_t v)
+++{
+++ crc = canonical_crc32_u16(crc, v & 0xffff);
+++ crc = canonical_crc32_u16(crc, (v >> 16) & 0xffff);
+++ return crc;
+++}
+++
+++uint64_t canonical_crc32_u64(uint64_t crc, uint64_t v)
+++{
+++ crc = canonical_crc32_u32((uint32_t) (crc), v & 0xffffffff);
+++ crc = canonical_crc32_u32((uint32_t) (crc), (v >> 32) & 0xffffffff);
+++ return crc;
+++}
+++
+++static const uint8_t crypto_aes_sbox[256] = {
+++ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
+++ 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+++ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
+++ 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+++ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
+++ 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+++ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
+++ 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+++ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
+++ 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+++ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
+++ 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+++ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
+++ 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+++ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
+++ 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+++ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
+++ 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+++ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
+++ 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+++ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
+++ 0xb0, 0x54, 0xbb, 0x16,
+++};
+++
+++static const uint8_t crypto_aes_rsbox[256] = {
+++ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e,
+++ 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
+++ 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32,
+++ 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
+++ 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49,
+++ 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
+++ 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50,
+++ 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
+++ 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05,
+++ 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
+++ 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
+++ 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
+++ 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8,
+++ 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
+++ 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b,
+++ 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
+++ 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59,
+++ 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
+++ 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d,
+++ 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
+++ 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63,
+++ 0x55, 0x21, 0x0c, 0x7d,
+++};
+++
+++// XT is x_time function that muliplies 'x' by 2 in GF(2^8)
+++#define XT(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b))
+++
+++inline __m128i aesenc_128_reference(__m128i a, __m128i b)
+++{
+++ uint8_t i, t, u, v[4][4];
+++ for (i = 0; i < 16; ++i) {
+++ v[((i / 4) + 4 - (i % 4)) % 4][i % 4] =
+++ crypto_aes_sbox[((SIMDVec *) &a)->m128_u8[i]];
+++ }
+++ for (i = 0; i < 4; ++i) {
+++ t = v[i][0];
+++ u = v[i][0] ^ v[i][1] ^ v[i][2] ^ v[i][3];
+++ v[i][0] ^= u ^ XT(v[i][0] ^ v[i][1]);
+++ v[i][1] ^= u ^ XT(v[i][1] ^ v[i][2]);
+++ v[i][2] ^= u ^ XT(v[i][2] ^ v[i][3]);
+++ v[i][3] ^= u ^ XT(v[i][3] ^ t);
+++ }
+++
+++ for (i = 0; i < 16; ++i) {
+++ ((SIMDVec *) &a)->m128_u8[i] =
+++ v[i / 4][i % 4] ^ ((SIMDVec *) &b)->m128_u8[i];
+++ }
+++
+++ return a;
+++}
+++
+++#define MULTIPLY(x, y) \
+++ (((y & 1) * x) ^ ((y >> 1 & 1) * XT(x)) ^ ((y >> 2 & 1) * XT(XT(x))) ^ \
+++ ((y >> 3 & 1) * XT(XT(XT(x)))) ^ ((y >> 4 & 1) * XT(XT(XT(XT(x))))))
+++
+++inline __m128i aesdec_128_reference(__m128i a, __m128i b)
+++{
+++ uint8_t i, e, f, g, h, v[4][4];
+++ for (i = 0; i < 16; ++i) {
+++ v[((i / 4) + (i % 4)) % 4][i % 4] =
+++ crypto_aes_rsbox[((SIMDVec *) &a)->m128_u8[i]];
+++ }
+++
+++ for (i = 0; i < 4; ++i) {
+++ e = v[i][0];
+++ f = v[i][1];
+++ g = v[i][2];
+++ h = v[i][3];
+++
+++ v[i][0] = MULTIPLY(e, 0x0e) ^ MULTIPLY(f, 0x0b) ^ MULTIPLY(g, 0x0d) ^
+++ MULTIPLY(h, 0x09);
+++ v[i][1] = MULTIPLY(e, 0x09) ^ MULTIPLY(f, 0x0e) ^ MULTIPLY(g, 0x0b) ^
+++ MULTIPLY(h, 0x0d);
+++ v[i][2] = MULTIPLY(e, 0x0d) ^ MULTIPLY(f, 0x09) ^ MULTIPLY(g, 0x0e) ^
+++ MULTIPLY(h, 0x0b);
+++ v[i][3] = MULTIPLY(e, 0x0b) ^ MULTIPLY(f, 0x0d) ^ MULTIPLY(g, 0x09) ^
+++ MULTIPLY(h, 0x0e);
+++ }
+++
+++ for (i = 0; i < 16; ++i) {
+++ ((SIMDVec *) &a)->m128_u8[i] =
+++ v[i / 4][i % 4] ^ ((SIMDVec *) &b)->m128_u8[i];
+++ }
+++ return a;
+++}
+++
+++inline __m128i aesenclast_128_reference(__m128i s, __m128i rk)
+++{
+++ uint8_t i, v[4][4];
+++ for (i = 0; i < 16; ++i)
+++ v[((i / 4) + 4 - (i % 4)) % 4][i % 4] =
+++ crypto_aes_sbox[((SIMDVec *) &s)->m128_u8[i]];
+++ for (i = 0; i < 16; ++i)
+++ ((SIMDVec *) &s)->m128_u8[i] =
+++ v[i / 4][i % 4] ^ ((SIMDVec *) &rk)->m128_u8[i];
+++ return s;
+++}
+++
+++// Rotates right (circular right shift) value by "amount" positions
+++static inline uint32_t rotr(uint32_t value, uint32_t amount)
+++{
+++ return (value >> amount) | (value << ((32 - amount) & 31));
+++}
+++
+++static inline uint64_t MUL(uint32_t a, uint32_t b)
+++{
+++ return (uint64_t) a * (uint64_t) b;
+++}
+++
+++// From BearSSL. Performs a 32-bit->64-bit carryless/polynomial
+++// long multiply.
+++//
+++// This implementation was chosen because it is reasonably fast
+++// without a lookup table or branching.
+++//
+++// This does it by splitting up the bits in a way that they
+++// would not carry, then combine them together with xor (a
+++// carryless add).
+++//
+++// https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/hash/ghash_ctmul.c;h=3623202;hb=5f045c7#l164
+++static uint64_t clmul_32(uint32_t x, uint32_t y)
+++{
+++ uint32_t x0, x1, x2, x3;
+++ uint32_t y0, y1, y2, y3;
+++ uint64_t z0, z1, z2, z3;
+++
+++ x0 = x & (uint32_t) 0x11111111;
+++ x1 = x & (uint32_t) 0x22222222;
+++ x2 = x & (uint32_t) 0x44444444;
+++ x3 = x & (uint32_t) 0x88888888;
+++ y0 = y & (uint32_t) 0x11111111;
+++ y1 = y & (uint32_t) 0x22222222;
+++ y2 = y & (uint32_t) 0x44444444;
+++ y3 = y & (uint32_t) 0x88888888;
+++ z0 = MUL(x0, y0) ^ MUL(x1, y3) ^ MUL(x2, y2) ^ MUL(x3, y1);
+++ z1 = MUL(x0, y1) ^ MUL(x1, y0) ^ MUL(x2, y3) ^ MUL(x3, y2);
+++ z2 = MUL(x0, y2) ^ MUL(x1, y1) ^ MUL(x2, y0) ^ MUL(x3, y3);
+++ z3 = MUL(x0, y3) ^ MUL(x1, y2) ^ MUL(x2, y1) ^ MUL(x3, y0);
+++ z0 &= (uint64_t) 0x1111111111111111;
+++ z1 &= (uint64_t) 0x2222222222222222;
+++ z2 &= (uint64_t) 0x4444444444444444;
+++ z3 &= (uint64_t) 0x8888888888888888;
+++ return z0 | z1 | z2 | z3;
+++}
+++
+++// Performs a 64x64->128-bit carryless/polynomial long
+++// multiply, using the above routine to calculate the
+++// subproducts needed for the full-size multiply.
+++//
+++// This uses the Karatsuba algorithm.
+++//
+++// Normally, the Karatsuba algorithm isn't beneficial
+++// until very large numbers due to carry tracking and
+++// multiplication being relatively cheap.
+++//
+++// However, we have no carries and multiplication is
+++// definitely not cheap, so the Karatsuba algorithm is
+++// a low cost and easy optimization.
+++//
+++// https://en.m.wikipedia.org/wiki/Karatsuba_algorithm
+++//
+++// Note that addition and subtraction are both
+++// performed with xor, since all operations are
+++// carryless.
+++//
+++// The comments represent the actual mathematical
+++// operations being performed (instead of the bitwise
+++// operations) and to reflect the linked Wikipedia article.
+++static std::pair<uint64_t, uint64_t> clmul_64(uint64_t x, uint64_t y)
+++{
+++ // B = 2
+++ // m = 32
+++ // x = (x1 * B^m) + x0
+++ uint32_t x0 = x & 0xffffffff;
+++ uint32_t x1 = x >> 32;
+++ // y = (y1 * B^m) + y0
+++ uint32_t y0 = y & 0xffffffff;
+++ uint32_t y1 = y >> 32;
+++
+++ // z0 = x0 * y0
+++ uint64_t z0 = clmul_32(x0, y0);
+++ // z2 = x1 * y1
+++ uint64_t z2 = clmul_32(x1, y1);
+++ // z1 = (x0 + x1) * (y0 + y1) - z0 - z2
+++ uint64_t z1 = clmul_32(x0 ^ x1, y0 ^ y1) ^ z0 ^ z2;
+++
+++ // xy = z0 + (z1 * B^m) + (z2 * B^2m)
+++ // note: z1 is split between the low and high halves
+++ uint64_t xy0 = z0 ^ (z1 << 32);
+++ uint64_t xy1 = z2 ^ (z1 >> 32);
+++
+++ return std::make_pair(xy0, xy1);
+++}
+++
+++/* MMX */
+++result_t test_mm_empty(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return TEST_SUCCESS;
+++}
+++
+++/* SSE */
+++result_t test_mm_add_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float dx = _a[0] + _b[0];
+++ float dy = _a[1] + _b[1];
+++ float dz = _a[2] + _b[2];
+++ float dw = _a[3] + _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_add_ps(a, b);
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_add_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer1;
+++
+++ float f0 = _a[0] + _b[0];
+++ float f1 = _a[1];
+++ float f2 = _a[2];
+++ float f3 = _a[3];
+++
+++ __m128 a = _mm_load_ps(_a);
+++ __m128 b = _mm_load_ps(_b);
+++ __m128 c = _mm_add_ss(a, b);
+++
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_and_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_and_ps(a, b);
+++ // now for the assertion...
+++ const uint32_t *ia = (const uint32_t *) &a;
+++ const uint32_t *ib = (const uint32_t *) &b;
+++ uint32_t r[4];
+++ r[0] = ia[0] & ib[0];
+++ r[1] = ia[1] & ib[1];
+++ r[2] = ia[2] & ib[2];
+++ r[3] = ia[3] & ib[3];
+++ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+++ result_t res = VALIDATE_INT32_M128(*(const __m128i *) &c, r);
+++ if (res) {
+++ res = VALIDATE_INT32_M128(ret, r);
+++ }
+++ return res;
+++}
+++
+++// r0 := ~a0 & b0
+++// r1 := ~a1 & b1
+++// r2 := ~a2 & b2
+++// r3 := ~a3 & b3
+++result_t test_mm_andnot_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_andnot_ps(a, b);
+++ // now for the assertion...
+++ const uint32_t *ia = (const uint32_t *) &a;
+++ const uint32_t *ib = (const uint32_t *) &b;
+++ uint32_t r[4];
+++ r[0] = ~ia[0] & ib[0];
+++ r[1] = ~ia[1] & ib[1];
+++ r[2] = ~ia[2] & ib[2];
+++ r[3] = ~ia[3] & ib[3];
+++ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+++ result_t res = TEST_FAIL;
+++ res = VALIDATE_INT32_M128(*(const __m128i *) &c, r);
+++ if (res) {
+++ res = VALIDATE_INT32_M128(ret, r);
+++ }
+++ return res;
+++}
+++
+++result_t test_mm_avg_pu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+++ uint16_t d[4];
+++ d[0] = (_a[0] + _b[0] + 1) >> 1;
+++ d[1] = (_a[1] + _b[1] + 1) >> 1;
+++ d[2] = (_a[2] + _b[2] + 1) >> 1;
+++ d[3] = (_a[3] + _b[3] + 1) >> 1;
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_avg_pu16(a, b);
+++
+++ return VALIDATE_UINT16_M64(c, d);
+++}
+++
+++result_t test_mm_avg_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ uint8_t d[8];
+++ d[0] = (_a[0] + _b[0] + 1) >> 1;
+++ d[1] = (_a[1] + _b[1] + 1) >> 1;
+++ d[2] = (_a[2] + _b[2] + 1) >> 1;
+++ d[3] = (_a[3] + _b[3] + 1) >> 1;
+++ d[4] = (_a[4] + _b[4] + 1) >> 1;
+++ d[5] = (_a[5] + _b[5] + 1) >> 1;
+++ d[6] = (_a[6] + _b[6] + 1) >> 1;
+++ d[7] = (_a[7] + _b[7] + 1) >> 1;
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_avg_pu8(a, b);
+++
+++ return VALIDATE_UINT8_M64(c, d);
+++}
+++
+++result_t test_mm_cmpeq_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result[4];
+++ result[0] = _a[0] == _b[0] ? -1 : 0;
+++ result[1] = _a[1] == _b[1] ? -1 : 0;
+++ result[2] = _a[2] == _b[2] ? -1 : 0;
+++ result[3] = _a[3] == _b[3] ? -1 : 0;
+++
+++ __m128 ret = _mm_cmpeq_ps(a, b);
+++ __m128i iret = *(const __m128i *) &ret;
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmpeq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = _a[0] == _b[0] ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpeq_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpge_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result[4];
+++ result[0] = _a[0] >= _b[0] ? -1 : 0;
+++ result[1] = _a[1] >= _b[1] ? -1 : 0;
+++ result[2] = _a[2] >= _b[2] ? -1 : 0;
+++ result[3] = _a[3] >= _b[3] ? -1 : 0;
+++
+++ __m128 ret = _mm_cmpge_ps(a, b);
+++ __m128i iret = *(const __m128i *) &ret;
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmpge_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = _a[0] >= _b[0] ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpge_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpgt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result[4];
+++ result[0] = _a[0] > _b[0] ? -1 : 0;
+++ result[1] = _a[1] > _b[1] ? -1 : 0;
+++ result[2] = _a[2] > _b[2] ? -1 : 0;
+++ result[3] = _a[3] > _b[3] ? -1 : 0;
+++
+++ __m128 ret = _mm_cmpgt_ps(a, b);
+++ __m128i iret = *(const __m128i *) &ret;
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmpgt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = _a[0] > _b[0] ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpgt_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmple_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result[4];
+++ result[0] = _a[0] <= _b[0] ? -1 : 0;
+++ result[1] = _a[1] <= _b[1] ? -1 : 0;
+++ result[2] = _a[2] <= _b[2] ? -1 : 0;
+++ result[3] = _a[3] <= _b[3] ? -1 : 0;
+++
+++ __m128 ret = _mm_cmple_ps(a, b);
+++ __m128i iret = *(const __m128i *) &ret;
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmple_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = _a[0] <= _b[0] ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmple_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmplt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result[4];
+++ result[0] = _a[0] < _b[0] ? -1 : 0;
+++ result[1] = _a[1] < _b[1] ? -1 : 0;
+++ result[2] = _a[2] < _b[2] ? -1 : 0;
+++ result[3] = _a[3] < _b[3] ? -1 : 0;
+++
+++ __m128 ret = _mm_cmplt_ps(a, b);
+++ __m128i iret = *(const __m128i *) &ret;
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmplt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = _a[0] < _b[0] ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmplt_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpneq_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result[4];
+++ result[0] = _a[0] != _b[0] ? -1 : 0;
+++ result[1] = _a[1] != _b[1] ? -1 : 0;
+++ result[2] = _a[2] != _b[2] ? -1 : 0;
+++ result[3] = _a[3] != _b[3] ? -1 : 0;
+++
+++ __m128 ret = _mm_cmpneq_ps(a, b);
+++ __m128i iret = *(const __m128i *) &ret;
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmpneq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = _a[0] != _b[0] ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpneq_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpnge_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] >= _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = !(_a[1] >= _b[1]) ? ALL_BIT_1_32 : 0;
+++ result[2] = !(_a[2] >= _b[2]) ? ALL_BIT_1_32 : 0;
+++ result[3] = !(_a[3] >= _b[3]) ? ALL_BIT_1_32 : 0;
+++
+++ __m128 ret = _mm_cmpnge_ps(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpnge_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] >= _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpnge_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpngt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] > _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = !(_a[1] > _b[1]) ? ALL_BIT_1_32 : 0;
+++ result[2] = !(_a[2] > _b[2]) ? ALL_BIT_1_32 : 0;
+++ result[3] = !(_a[3] > _b[3]) ? ALL_BIT_1_32 : 0;
+++
+++ __m128 ret = _mm_cmpngt_ps(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpngt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] > _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpngt_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpnle_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] <= _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = !(_a[1] <= _b[1]) ? ALL_BIT_1_32 : 0;
+++ result[2] = !(_a[2] <= _b[2]) ? ALL_BIT_1_32 : 0;
+++ result[3] = !(_a[3] <= _b[3]) ? ALL_BIT_1_32 : 0;
+++
+++ __m128 ret = _mm_cmpnle_ps(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpnle_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] <= _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpnle_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpnlt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] < _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = !(_a[1] < _b[1]) ? ALL_BIT_1_32 : 0;
+++ result[2] = !(_a[2] < _b[2]) ? ALL_BIT_1_32 : 0;
+++ result[3] = !(_a[3] < _b[3]) ? ALL_BIT_1_32 : 0;
+++
+++ __m128 ret = _mm_cmpnlt_ps(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpnlt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = !(_a[0] < _b[0]) ? ALL_BIT_1_32 : 0;
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpnlt_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpord_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++
+++ for (uint32_t i = 0; i < 4; i++) {
+++ result[i] = cmp_noNaN(_a[i], _b[i]);
+++ }
+++
+++ __m128 ret = _mm_cmpord_ps(a, b);
+++
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpord_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = cmp_noNaN(_a[0], _b[0]);
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpord_ss(a, b);
+++
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpunord_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++
+++ for (uint32_t i = 0; i < 4; i++) {
+++ result[i] = cmp_hasNaN(_a[i], _b[i]);
+++ }
+++
+++ __m128 ret = _mm_cmpunord_ps(a, b);
+++
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_cmpunord_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = cmp_hasNaN(_a[0], _b[0]);
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_cmpunord_ss(a, b);
+++
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_comieq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comieq_ss correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result = comieq_ss(_a[0], _b[0]);
+++ int32_t ret = _mm_comieq_ss(a, b);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++#endif
+++}
+++
+++result_t test_mm_comige_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result = comige_ss(_a[0], _b[0]);
+++ int32_t ret = _mm_comige_ss(a, b);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_comigt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result = comigt_ss(_a[0], _b[0]);
+++ int32_t ret = _mm_comigt_ss(a, b);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_comile_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comile_ss correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result = comile_ss(_a[0], _b[0]);
+++ int32_t ret = _mm_comile_ss(a, b);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++#endif
+++}
+++
+++result_t test_mm_comilt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comilt_ss correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result = comilt_ss(_a[0], _b[0]);
+++
+++ int32_t ret = _mm_comilt_ss(a, b);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++#endif
+++}
+++
+++result_t test_mm_comineq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comineq_ss correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ int32_t result = comineq_ss(_a[0], _b[0]);
+++ int32_t ret = _mm_comineq_ss(a, b);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++#endif
+++}
+++
+++result_t test_mm_cvt_pi2ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++
+++ float dx = (float) _b[0];
+++ float dy = (float) _b[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m64 b = load_m64(_b);
+++ __m128 c = _mm_cvt_pi2ps(a, b);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvt_ps2pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ int32_t d[2];
+++
+++ for (int idx = 0; idx < 2; idx++) {
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d[idx] = (int32_t) (bankersRounding(_a[idx]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d[idx] = (int32_t) (floorf(_a[idx]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d[idx] = (int32_t) (ceilf(_a[idx]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d[idx] = (int32_t) (_a[idx]);
+++ break;
+++ }
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ __m64 ret = _mm_cvt_ps2pi(a);
+++
+++ return VALIDATE_INT32_M64(ret, d);
+++}
+++
+++result_t test_mm_cvt_si2ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const int32_t b = *impl.mTestIntPointer2;
+++
+++ float dx = (float) b;
+++ float dy = _a[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_cvt_si2ss(a, b);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvt_ss2si(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ int32_t d0;
+++
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d0 = (int32_t) (bankersRounding(_a[0]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d0 = (int32_t) (floorf(_a[0]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d0 = (int32_t) (ceilf(_a[0]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d0 = (int32_t) (_a[0]);
+++ break;
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ int32_t ret = _mm_cvt_ss2si(a);
+++ return ret == d0 ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtpi16_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++
+++ float dx = (float) _a[0];
+++ float dy = (float) _a[1];
+++ float dz = (float) _a[2];
+++ float dw = (float) _a[3];
+++
+++ __m64 a = load_m64(_a);
+++ __m128 c = _mm_cvtpi16_ps(a);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtpi32_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ float dx = (float) _b[0];
+++ float dy = (float) _b[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m64 b = load_m64(_b);
+++ __m128 c = _mm_cvtpi32_ps(a, b);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtpi32x2_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ float dx = (float) _a[0];
+++ float dy = (float) _a[1];
+++ float dz = (float) _b[0];
+++ float dw = (float) _b[1];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m128 c = _mm_cvtpi32x2_ps(a, b);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtpi8_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++
+++ float dx = (float) _a[0];
+++ float dy = (float) _a[1];
+++ float dz = (float) _a[2];
+++ float dw = (float) _a[3];
+++
+++ __m64 a = load_m64(_a);
+++ __m128 c = _mm_cvtpi8_ps(a);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtps_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ int16_t rnd[4];
+++
+++ for (int i = 0; i < 4; i++) {
+++ if ((float) INT16_MAX <= _a[i] && _a[i] <= (float) INT32_MAX) {
+++ rnd[i] = INT16_MAX;
+++ } else if (INT16_MIN < _a[i] && _a[i] < INT16_MAX) {
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ rnd[i] = (int16_t) bankersRounding(_a[i]);
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ rnd[i] = (int16_t) floorf(_a[i]);
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ rnd[i] = (int16_t) ceilf(_a[i]);
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ rnd[i] = (int16_t) _a[i];
+++ break;
+++ }
+++ } else {
+++ rnd[i] = INT16_MIN;
+++ }
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ __m64 ret = _mm_cvtps_pi16(a);
+++ return VALIDATE_INT16_M64(ret, rnd);
+++}
+++
+++result_t test_mm_cvtps_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ int32_t d[2];
+++
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d[0] = (int32_t) bankersRounding(_a[0]);
+++ d[1] = (int32_t) bankersRounding(_a[1]);
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d[0] = (int32_t) floorf(_a[0]);
+++ d[1] = (int32_t) floorf(_a[1]);
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d[0] = (int32_t) ceilf(_a[0]);
+++ d[1] = (int32_t) ceilf(_a[1]);
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d[0] = (int32_t) _a[0];
+++ d[1] = (int32_t) _a[1];
+++ break;
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ __m64 ret = _mm_cvtps_pi32(a);
+++
+++ return VALIDATE_INT32_M64(ret, d);
+++}
+++
+++result_t test_mm_cvtps_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ int8_t rnd[8] = {};
+++
+++ for (int i = 0; i < 4; i++) {
+++ if ((float) INT8_MAX <= _a[i] && _a[i] <= (float) INT32_MAX) {
+++ rnd[i] = INT8_MAX;
+++ } else if (INT8_MIN < _a[i] && _a[i] < INT8_MAX) {
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ rnd[i] = (int8_t) bankersRounding(_a[i]);
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ rnd[i] = (int8_t) floorf(_a[i]);
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ rnd[i] = (int8_t) ceilf(_a[i]);
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ rnd[i] = (int8_t) _a[i];
+++ break;
+++ }
+++ } else {
+++ rnd[i] = INT8_MIN;
+++ }
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ __m64 ret = _mm_cvtps_pi8(a);
+++ return VALIDATE_INT8_M64(ret, rnd);
+++}
+++
+++result_t test_mm_cvtpu16_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++
+++ float dx = (float) _a[0];
+++ float dy = (float) _a[1];
+++ float dz = (float) _a[2];
+++ float dw = (float) _a[3];
+++
+++ __m64 a = load_m64(_a);
+++ __m128 c = _mm_cvtpu16_ps(a);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtpu8_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++
+++ float dx = (float) _a[0];
+++ float dy = (float) _a[1];
+++ float dz = (float) _a[2];
+++ float dw = (float) _a[3];
+++
+++ __m64 a = load_m64(_a);
+++ __m128 c = _mm_cvtpu8_ps(a);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtsi32_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const int32_t b = *impl.mTestIntPointer2;
+++
+++ float dx = (float) b;
+++ float dy = _a[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_cvtsi32_ss(a, b);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtsi64_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const int64_t b = *(int64_t *) impl.mTestIntPointer2;
+++
+++ float dx = (float) b;
+++ float dy = _a[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_cvtsi64_ss(a, b);
+++
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_cvtss_f32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ float f = _a[0];
+++
+++ __m128 a = load_m128(_a);
+++ float c = _mm_cvtss_f32(a);
+++
+++ return f == c ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtss_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ int32_t d0;
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d0 = (int32_t) (bankersRounding(_a[0]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d0 = (int32_t) (floorf(_a[0]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d0 = (int32_t) (ceilf(_a[0]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d0 = (int32_t) (_a[0]);
+++ break;
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ int32_t ret = _mm_cvtss_si32(a);
+++
+++ return ret == d0 ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtss_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ int64_t d0;
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d0 = (int64_t) (bankersRounding(_a[0]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d0 = (int64_t) (floorf(_a[0]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d0 = (int64_t) (ceilf(_a[0]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d0 = (int64_t) (_a[0]);
+++ break;
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ int64_t ret = _mm_cvtss_si64(a);
+++
+++ return ret == d0 ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtt_ps2pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ int32_t d[2];
+++
+++ d[0] = (int32_t) _a[0];
+++ d[1] = (int32_t) _a[1];
+++
+++ __m128 a = load_m128(_a);
+++ __m64 ret = _mm_cvtt_ps2pi(a);
+++
+++ return VALIDATE_INT32_M64(ret, d);
+++}
+++
+++result_t test_mm_cvtt_ss2si(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ __m128 a = load_m128(_a);
+++ int ret = _mm_cvtt_ss2si(a);
+++
+++ return ret == (int32_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvttps_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ int32_t d[2];
+++
+++ d[0] = (int32_t) _a[0];
+++ d[1] = (int32_t) _a[1];
+++
+++ __m128 a = load_m128(_a);
+++ __m64 ret = _mm_cvttps_pi32(a);
+++
+++ return VALIDATE_INT32_M64(ret, d);
+++}
+++
+++result_t test_mm_cvttss_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ __m128 a = load_m128(_a);
+++ int ret = _mm_cvttss_si32(a);
+++
+++ return ret == (int32_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvttss_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ __m128 a = load_m128(_a);
+++ int64_t ret = _mm_cvttss_si64(a);
+++
+++ return ret == (int64_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_div_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float f0 = _a[0] / _b[0];
+++ float f1 = _a[1] / _b[1];
+++ float f2 = _a[2] / _b[2];
+++ float f3 = _a[3] / _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_div_ps(a, b);
+++
+++#if defined(__arm__) && !defined(__aarch64__) && !defined(_M_ARM64)
+++ // The implementation of "_mm_div_ps()" on ARM 32bit doesn't use "DIV"
+++ // instruction directly, instead it uses "FRECPE" instruction to approximate
+++ // it. Therefore, the precision is not as small as other architecture
+++ return validateFloatError(c, f0, f1, f2, f3, 0.00001f);
+++#else
+++ return validateFloat(c, f0, f1, f2, f3);
+++#endif
+++}
+++
+++result_t test_mm_div_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ float d0 = _a[0] / _b[0];
+++ float d1 = _a[1];
+++ float d2 = _a[2];
+++ float d3 = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_div_ss(a, b);
+++
+++#if defined(__arm__) && !defined(__aarch64__) && !defined(_M_ARM64)
+++ // The implementation of "_mm_div_ps()" on ARM 32bit doesn't use "DIV"
+++ // instruction directly, instead it uses "FRECPE" instruction to approximate
+++ // it. Therefore, the precision is not as small as other architecture
+++ return validateFloatError(c, d0, d1, d2, d3, 0.00001f);
+++#else
+++ return validateFloat(c, d0, d1, d2, d3);
+++#endif
+++}
+++
+++result_t test_mm_extract_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME GCC has bug on "_mm_extract_pi16" intrinsics. We will enable this
+++ // test when GCC fix this bug.
+++ // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98495 for more
+++ // information
+++#if defined(__clang__) || defined(_MSC_VER)
+++ uint64_t *_a = (uint64_t *) impl.mTestIntPointer1;
+++ const int idx = iter & 0x3;
+++
+++ __m64 a = load_m64(_a);
+++ int c;
+++ switch (idx) {
+++ case 0:
+++ c = _mm_extract_pi16(a, 0);
+++ break;
+++ case 1:
+++ c = _mm_extract_pi16(a, 1);
+++ break;
+++ case 2:
+++ c = _mm_extract_pi16(a, 2);
+++ break;
+++ case 3:
+++ c = _mm_extract_pi16(a, 3);
+++ break;
+++ }
+++
+++ ASSERT_RETURN((uint64_t) c == ((*_a >> (idx * 16)) & 0xFFFF));
+++ ASSERT_RETURN(0 == ((uint64_t) c & 0xFFFF0000));
+++ return TEST_SUCCESS;
+++#else
+++ return TEST_UNIMPL;
+++#endif
+++}
+++
+++result_t test_mm_malloc(const SSE2NEONTestImpl &impl, uint32_t iter);
+++result_t test_mm_free(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ /* We verify _mm_malloc first, and there is no need to check _mm_free . */
+++ return test_mm_malloc(impl, iter);
+++}
+++
+++result_t test_mm_get_flush_zero_mode(const SSE2NEONTestImpl &impl,
+++ uint32_t iter)
+++{
+++ int res_flush_zero_on, res_flush_zero_off;
+++ _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
+++ res_flush_zero_on = _MM_GET_FLUSH_ZERO_MODE() == _MM_FLUSH_ZERO_ON;
+++ _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_OFF);
+++ res_flush_zero_off = _MM_GET_FLUSH_ZERO_MODE() == _MM_FLUSH_ZERO_OFF;
+++
+++ return (res_flush_zero_on && res_flush_zero_off) ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_get_rounding_mode(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int res_toward_zero, res_to_neg_inf, res_to_pos_inf, res_nearest;
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ res_toward_zero = _MM_GET_ROUNDING_MODE() == _MM_ROUND_TOWARD_ZERO ? 1 : 0;
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ res_to_neg_inf = _MM_GET_ROUNDING_MODE() == _MM_ROUND_DOWN ? 1 : 0;
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ res_to_pos_inf = _MM_GET_ROUNDING_MODE() == _MM_ROUND_UP ? 1 : 0;
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ res_nearest = _MM_GET_ROUNDING_MODE() == _MM_ROUND_NEAREST ? 1 : 0;
+++
+++ if (res_toward_zero && res_to_neg_inf && res_to_pos_inf && res_nearest) {
+++ return TEST_SUCCESS;
+++ } else {
+++ return TEST_FAIL;
+++ }
+++}
+++
+++result_t test_mm_getcsr(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // store original csr value for post test restoring
+++ unsigned int originalCsr = _mm_getcsr();
+++
+++ unsigned int roundings[] = {_MM_ROUND_TOWARD_ZERO, _MM_ROUND_DOWN,
+++ _MM_ROUND_UP, _MM_ROUND_NEAREST};
+++ for (size_t i = 0; i < sizeof(roundings) / sizeof(roundings[0]); i++) {
+++ _mm_setcsr(_mm_getcsr() | roundings[i]);
+++ if ((_mm_getcsr() & roundings[i]) != roundings[i]) {
+++ return TEST_FAIL;
+++ }
+++ }
+++
+++ // restore original csr value for remaining tests
+++ _mm_setcsr(originalCsr);
+++
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_insert_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t insert = (int16_t) impl.mTestInts[iter];
+++ __m64 a;
+++ __m64 b;
+++
+++#define TEST_IMPL(IDX) \
+++ int16_t d##IDX[4]; \
+++ for (int i = 0; i < 4; i++) { \
+++ d##IDX[i] = _a[i]; \
+++ } \
+++ d##IDX[IDX] = insert; \
+++ \
+++ a = load_m64(_a); \
+++ b = _mm_insert_pi16(a, insert, IDX); \
+++ CHECK_RESULT(VALIDATE_INT16_M64(b, d##IDX))
+++
+++ IMM_4_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_load_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *addr = impl.mTestFloatPointer1;
+++
+++ __m128 ret = _mm_load_ps(addr);
+++
+++ return validateFloat(ret, addr[0], addr[1], addr[2], addr[3]);
+++}
+++
+++result_t test_mm_load_ps1(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *addr = impl.mTestFloatPointer1;
+++
+++ __m128 ret = _mm_load_ps1(addr);
+++
+++ return validateFloat(ret, addr[0], addr[0], addr[0], addr[0]);
+++}
+++
+++result_t test_mm_load_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *addr = impl.mTestFloatPointer1;
+++
+++ __m128 ret = _mm_load_ss(addr);
+++
+++ return validateFloat(ret, addr[0], 0, 0, 0);
+++}
+++
+++result_t test_mm_load1_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p = impl.mTestFloatPointer1;
+++ __m128 a = _mm_load1_ps(p);
+++ return validateFloat(a, p[0], p[0], p[0], p[0]);
+++}
+++
+++result_t test_mm_loadh_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p1 = impl.mTestFloatPointer1;
+++ const float *p2 = impl.mTestFloatPointer2;
+++ const __m64 *b = (const __m64 *) p2;
+++ __m128 a = _mm_load_ps(p1);
+++ __m128 c = _mm_loadh_pi(a, b);
+++
+++ return validateFloat(c, p1[0], p1[1], p2[0], p2[1]);
+++}
+++
+++result_t test_mm_loadl_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p1 = impl.mTestFloatPointer1;
+++ const float *p2 = impl.mTestFloatPointer2;
+++ __m128 a = _mm_load_ps(p1);
+++ const __m64 *b = (const __m64 *) p2;
+++ __m128 c = _mm_loadl_pi(a, b);
+++
+++ return validateFloat(c, p2[0], p2[1], p1[2], p1[3]);
+++}
+++
+++result_t test_mm_loadr_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *addr = impl.mTestFloatPointer1;
+++
+++ __m128 ret = _mm_loadr_ps(addr);
+++
+++ return validateFloat(ret, addr[3], addr[2], addr[1], addr[0]);
+++}
+++
+++result_t test_mm_loadu_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *addr = impl.mTestFloatPointer1;
+++
+++ __m128 ret = _mm_loadu_ps(addr);
+++
+++ return validateFloat(ret, addr[0], addr[1], addr[2], addr[3]);
+++}
+++
+++result_t test_mm_loadu_si16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // The GCC version before 11 does not implement intrinsic function
+++ // _mm_loadu_si16. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+++ // for more information.
+++#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+++ return TEST_UNIMPL;
+++#else
+++ const int16_t *addr = (const int16_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_loadu_si16((const void *) addr);
+++
+++ return validateInt16(ret, addr[0], 0, 0, 0, 0, 0, 0, 0);
+++#endif
+++}
+++
+++result_t test_mm_loadu_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // Versions of GCC prior to 9 do not implement intrinsic function
+++ // _mm_loadu_si64. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78782
+++ // for more information.
+++#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 9)
+++ return TEST_UNIMPL;
+++#else
+++ const int64_t *addr = (const int64_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_loadu_si64((const void *) addr);
+++
+++ return validateInt64(ret, addr[0], 0);
+++#endif
+++}
+++
+++result_t test_mm_malloc(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const size_t *a = (const size_t *) impl.mTestIntPointer1;
+++ const size_t *b = (const size_t *) impl.mTestIntPointer2;
+++ size_t size = *a % (1024 * 16) + 1;
+++ size_t align = 2 << (*b % 5);
+++
+++ void *p = _mm_malloc(size, align);
+++ if (!p)
+++ return TEST_FAIL;
+++ result_t res = (((uintptr_t) p % align) == 0) ? TEST_SUCCESS : TEST_FAIL;
+++ _mm_free(p);
+++ return res;
+++}
+++
+++result_t test_mm_maskmove_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_mask = (const uint8_t *) impl.mTestIntPointer2;
+++ char mem_addr[16];
+++
+++ const __m64 *a = (const __m64 *) _a;
+++ const __m64 *mask = (const __m64 *) _mask;
+++ _mm_maskmove_si64(*a, *mask, (char *) mem_addr);
+++
+++ for (int i = 0; i < 8; i++) {
+++ if (_mask[i] >> 7) {
+++ ASSERT_RETURN(_a[i] == (uint8_t) mem_addr[i]);
+++ }
+++ }
+++
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_m_maskmovq(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_maskmove_si64(impl, iter);
+++}
+++
+++result_t test_mm_max_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t c[4];
+++
+++ c[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ c[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ c[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ c[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret = _mm_max_pi16(a, b);
+++ return VALIDATE_INT16_M64(ret, c);
+++}
+++
+++result_t test_mm_max_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float c[4];
+++
+++ c[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ c[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ c[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ c[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 ret = _mm_max_ps(a, b);
+++ return validateFloat(ret, c[0], c[1], c[2], c[3]);
+++}
+++
+++result_t test_mm_max_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ uint8_t c[8];
+++
+++ c[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ c[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ c[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ c[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++ c[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+++ c[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+++ c[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+++ c[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret = _mm_max_pu8(a, b);
+++ return VALIDATE_UINT8_M64(ret, c);
+++}
+++
+++result_t test_mm_max_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer1;
+++
+++ float f0 = _a[0] > _b[0] ? _a[0] : _b[0];
+++ float f1 = _a[1];
+++ float f2 = _a[2];
+++ float f3 = _a[3];
+++
+++ __m128 a = _mm_load_ps(_a);
+++ __m128 b = _mm_load_ps(_b);
+++ __m128 c = _mm_max_ss(a, b);
+++
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_min_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t c[4];
+++
+++ c[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ c[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ c[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ c[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret = _mm_min_pi16(a, b);
+++ return VALIDATE_INT16_M64(ret, c);
+++}
+++
+++result_t test_mm_min_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float c[4];
+++
+++ c[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ c[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ c[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ c[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 ret = _mm_min_ps(a, b);
+++ return validateFloat(ret, c[0], c[1], c[2], c[3]);
+++}
+++
+++result_t test_mm_min_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ uint8_t c[8];
+++
+++ c[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ c[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ c[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ c[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++ c[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+++ c[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+++ c[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+++ c[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret = _mm_min_pu8(a, b);
+++ return VALIDATE_UINT8_M64(ret, c);
+++}
+++
+++result_t test_mm_min_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float c;
+++
+++ c = _a[0] < _b[0] ? _a[0] : _b[0];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 ret = _mm_min_ss(a, b);
+++
+++ return validateFloat(ret, c, _a[1], _a[2], _a[3]);
+++}
+++
+++result_t test_mm_move_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++
+++ float result[4];
+++ result[0] = _b[0];
+++ result[1] = _a[1];
+++ result[2] = _a[2];
+++ result[3] = _a[3];
+++
+++ __m128 ret = _mm_move_ss(a, b);
+++ return validateFloat(ret, result[0], result[1], result[2], result[3]);
+++}
+++
+++result_t test_mm_movehl_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ float f0 = _b[2];
+++ float f1 = _b[3];
+++ float f2 = _a[2];
+++ float f3 = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 ret = _mm_movehl_ps(a, b);
+++
+++ return validateFloat(ret, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_movelh_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ float f0 = _a[0];
+++ float f1 = _a[1];
+++ float f2 = _b[0];
+++ float f3 = _b[1];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 ret = _mm_movelh_ps(a, b);
+++
+++ return validateFloat(ret, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_movemask_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ unsigned int _c = 0;
+++ for (int i = 0; i < 8; i++) {
+++ if (_a[i] & 0x80) {
+++ _c |= (1 << i);
+++ }
+++ }
+++
+++ const __m64 *a = (const __m64 *) _a;
+++ int c = _mm_movemask_pi8(*a);
+++
+++ ASSERT_RETURN((unsigned int) c == _c);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_movemask_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p = impl.mTestFloatPointer1;
+++ int ret = 0;
+++
+++ const uint32_t *ip = (const uint32_t *) p;
+++ if (ip[0] & 0x80000000) {
+++ ret |= 1;
+++ }
+++ if (ip[1] & 0x80000000) {
+++ ret |= 2;
+++ }
+++ if (ip[2] & 0x80000000) {
+++ ret |= 4;
+++ }
+++ if (ip[3] & 0x80000000) {
+++ ret |= 8;
+++ }
+++ __m128 a = load_m128(p);
+++ int val = _mm_movemask_ps(a);
+++ return val == ret ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_mul_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float dx = _a[0] * _b[0];
+++ float dy = _a[1] * _b[1];
+++ float dz = _a[2] * _b[2];
+++ float dw = _a[3] * _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_mul_ps(a, b);
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_mul_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ float dx = _a[0] * _b[0];
+++ float dy = _a[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_mul_ss(a, b);
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_mulhi_pu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+++ uint16_t d[4];
+++ for (uint32_t i = 0; i < 4; i++) {
+++ uint32_t m = (uint32_t) _a[i] * (uint32_t) _b[i];
+++ d[i] = (uint16_t) (m >> 16);
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_mulhi_pu16(a, b);
+++ return VALIDATE_UINT16_M64(c, d);
+++}
+++
+++result_t test_mm_or_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_or_ps(a, b);
+++ // now for the assertion...
+++ const uint32_t *ia = (const uint32_t *) &a;
+++ const uint32_t *ib = (const uint32_t *) &b;
+++ uint32_t r[4];
+++ r[0] = ia[0] | ib[0];
+++ r[1] = ia[1] | ib[1];
+++ r[2] = ia[2] | ib[2];
+++ r[3] = ia[3] | ib[3];
+++ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+++ result_t res = VALIDATE_INT32_M128(*(const __m128i *) &c, r);
+++ if (res) {
+++ res = VALIDATE_INT32_M128(ret, r);
+++ }
+++
+++ return res;
+++}
+++
+++result_t test_m_pavgb(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_avg_pu8(impl, iter);
+++}
+++
+++result_t test_m_pavgw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_avg_pu16(impl, iter);
+++}
+++
+++result_t test_m_pextrw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_extract_pi16(impl, iter);
+++}
+++
+++result_t test_m_pinsrw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_insert_pi16(impl, iter);
+++}
+++
+++result_t test_m_pmaxsw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_max_pi16(impl, iter);
+++}
+++
+++result_t test_m_pmaxub(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_max_pu8(impl, iter);
+++}
+++
+++result_t test_m_pminsw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_min_pi16(impl, iter);
+++}
+++
+++result_t test_m_pminub(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_min_pu8(impl, iter);
+++}
+++
+++result_t test_m_pmovmskb(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_movemask_pi8(impl, iter);
+++}
+++
+++result_t test_m_pmulhuw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_mulhi_pu16(impl, iter);
+++}
+++
+++result_t test_mm_prefetch(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ typedef struct {
+++ __m128 a;
+++ float r[4];
+++ } prefetch_test_t;
+++ prefetch_test_t test_vec[8] = {
+++ {
+++ _mm_set_ps(-0.1f, 0.2f, 0.3f, 0.4f),
+++ {0.4f, 0.3f, 0.2f, -0.1f},
+++ },
+++ {
+++ _mm_set_ps(0.5f, 0.6f, -0.7f, -0.8f),
+++ {-0.8f, -0.7f, 0.6f, 0.5f},
+++ },
+++ {
+++ _mm_set_ps(0.9f, 0.10f, -0.11f, 0.12f),
+++ {0.12f, -0.11f, 0.10f, 0.9f},
+++ },
+++ {
+++ _mm_set_ps(-1.1f, -2.1f, -3.1f, -4.1f),
+++ {-4.1f, -3.1f, -2.1f, -1.1f},
+++ },
+++ {
+++ _mm_set_ps(100.0f, -110.0f, 120.0f, -130.0f),
+++ {-130.0f, 120.0f, -110.0f, 100.0f},
+++ },
+++ {
+++ _mm_set_ps(200.5f, 210.5f, -220.5f, 230.5f),
+++ {995.74f, -93.04f, 144.03f, 902.50f},
+++ },
+++ {
+++ _mm_set_ps(10.11f, -11.12f, -12.13f, 13.14f),
+++ {13.14f, -12.13f, -11.12f, 10.11f},
+++ },
+++ {
+++ _mm_set_ps(10.1f, -20.2f, 30.3f, 40.4f),
+++ {40.4f, 30.3f, -20.2f, 10.1f},
+++ },
+++ };
+++
+++ for (size_t i = 0; i < (sizeof(test_vec) / (sizeof(test_vec[0]))); i++) {
+++ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_T0);
+++ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_T1);
+++ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_T2);
+++ _mm_prefetch(((const char *) &test_vec[i].a), _MM_HINT_NTA);
+++ }
+++
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_m_psadbw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ uint16_t d = 0;
+++ for (int i = 0; i < 8; i++) {
+++ d += abs(_a[i] - _b[i]);
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _m_psadbw(a, b);
+++ return validateUInt16(c, d, 0, 0, 0);
+++}
+++
+++result_t test_m_pshufw(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_shuffle_pi16(impl, iter);
+++}
+++
+++result_t test_mm_rcp_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ float dx = 1.0f / _a[0];
+++ float dy = 1.0f / _a[1];
+++ float dz = 1.0f / _a[2];
+++ float dw = 1.0f / _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_rcp_ps(a);
+++ return validateFloatError(c, dx, dy, dz, dw, 0.001f);
+++}
+++
+++result_t test_mm_rcp_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ float dx = 1.0f / _a[0];
+++ float dy = _a[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_rcp_ss(a);
+++ return validateFloatError(c, dx, dy, dz, dw, 0.001f);
+++}
+++
+++result_t test_mm_rsqrt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = (const float *) impl.mTestFloatPointer1;
+++
+++ float f0 = 1 / sqrtf(_a[0]);
+++ float f1 = 1 / sqrtf(_a[1]);
+++ float f2 = 1 / sqrtf(_a[2]);
+++ float f3 = 1 / sqrtf(_a[3]);
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_rsqrt_ps(a);
+++
+++ // Here, we ensure the error rate of "_mm_rsqrt_ps()" is under 0.1% compared
+++ // to the C implementation.
+++ return validateFloatError(c, f0, f1, f2, f3, 0.001f);
+++}
+++
+++result_t test_mm_rsqrt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = (const float *) impl.mTestFloatPointer1;
+++
+++ float f0 = 1 / sqrtf(_a[0]);
+++ float f1 = _a[1];
+++ float f2 = _a[2];
+++ float f3 = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_rsqrt_ss(a);
+++
+++ // Here, we ensure the error rate of "_mm_rsqrt_ps()" is under 0.1% compared
+++ // to the C implementation.
+++ return validateFloatError(c, f0, f1, f2, f3, 0.001f);
+++}
+++
+++result_t test_mm_sad_pu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ uint16_t d = 0;
+++ for (int i = 0; i < 8; i++) {
+++ d += abs(_a[i] - _b[i]);
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_sad_pu8(a, b);
+++ return validateUInt16(c, d, 0, 0, 0);
+++}
+++
+++result_t test_mm_set_flush_zero_mode(const SSE2NEONTestImpl &impl,
+++ uint32_t iter)
+++{
+++ // TODO:
+++ // After the behavior of denormal number and flush zero mode is fully
+++ // investigated, the testing would be added.
+++ return TEST_UNIMPL;
+++}
+++
+++result_t test_mm_set_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float x = impl.mTestFloats[iter];
+++ float y = impl.mTestFloats[iter + 1];
+++ float z = impl.mTestFloats[iter + 2];
+++ float w = impl.mTestFloats[iter + 3];
+++ __m128 a = _mm_set_ps(x, y, z, w);
+++ return validateFloat(a, w, z, y, x);
+++}
+++
+++result_t test_mm_set_ps1(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float a = impl.mTestFloats[iter];
+++
+++ __m128 ret = _mm_set_ps1(a);
+++
+++ return validateFloat(ret, a, a, a, a);
+++}
+++
+++result_t test_mm_set_rounding_mode(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ result_t res_toward_zero, res_to_neg_inf, res_to_pos_inf, res_nearest;
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b, c;
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ c = _mm_round_ps(a, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+++ res_toward_zero = validate128(c, b);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ c = _mm_round_ps(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+++ res_to_neg_inf = validate128(c, b);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ c = _mm_round_ps(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+++ res_to_pos_inf = validate128(c, b);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ b = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ c = _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+++ res_nearest = validate128(c, b);
+++
+++ if (res_toward_zero == TEST_SUCCESS && res_to_neg_inf == TEST_SUCCESS &&
+++ res_to_pos_inf == TEST_SUCCESS && res_nearest == TEST_SUCCESS) {
+++ return TEST_SUCCESS;
+++ } else {
+++ return TEST_FAIL;
+++ }
+++}
+++
+++result_t test_mm_set_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float a = impl.mTestFloats[iter];
+++ __m128 c = _mm_set_ss(a);
+++ return validateFloat(c, a, 0, 0, 0);
+++}
+++
+++result_t test_mm_set1_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float w = impl.mTestFloats[iter];
+++ __m128 a = _mm_set1_ps(w);
+++ return validateFloat(a, w, w, w, w);
+++}
+++
+++result_t test_mm_setcsr(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_set_rounding_mode(impl, iter);
+++}
+++
+++result_t test_mm_setr_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float x = impl.mTestFloats[iter];
+++ float y = impl.mTestFloats[iter + 1];
+++ float z = impl.mTestFloats[iter + 2];
+++ float w = impl.mTestFloats[iter + 3];
+++
+++ __m128 ret = _mm_setr_ps(w, z, y, x);
+++
+++ return validateFloat(ret, w, z, y, x);
+++}
+++
+++result_t test_mm_setzero_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ __m128 a = _mm_setzero_ps();
+++ return validateFloat(a, 0, 0, 0, 0);
+++}
+++
+++result_t test_mm_sfence(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ /* FIXME: Assume that memory barriers always function as intended. */
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_shuffle_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ __m64 a;
+++ __m64 d;
+++
+++#define TEST_IMPL(IDX) \
+++ a = load_m64(_a); \
+++ d = _mm_shuffle_pi16(a, IDX); \
+++ \
+++ int16_t _d##IDX[4]; \
+++ _d##IDX[0] = _a[IDX & 0x3]; \
+++ _d##IDX[1] = _a[(IDX >> 2) & 0x3]; \
+++ _d##IDX[2] = _a[(IDX >> 4) & 0x3]; \
+++ _d##IDX[3] = _a[(IDX >> 6) & 0x3]; \
+++ if (VALIDATE_INT16_M64(d, _d##IDX) != TEST_SUCCESS) { \
+++ return TEST_FAIL; \
+++ }
+++
+++ IMM_256_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++// Note, NEON does not have a general purpose shuffled command like SSE.
+++// When invoking this method, there is special code for a number of the most
+++// common shuffle permutations
+++result_t test_mm_shuffle_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ result_t isValid = TEST_SUCCESS;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ // Test many permutations of the shuffle operation, including all
+++ // permutations which have an optimized/customized implementation
+++ __m128 ret;
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(0, 1, 2, 3));
+++ if (!validateFloat(ret, _a[3], _a[2], _b[1], _b[0])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(3, 2, 1, 0));
+++ if (!validateFloat(ret, _a[0], _a[1], _b[2], _b[3])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(0, 0, 1, 1));
+++ if (!validateFloat(ret, _a[1], _a[1], _b[0], _b[0])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(3, 1, 0, 2));
+++ if (!validateFloat(ret, _a[2], _a[0], _b[1], _b[3])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(1, 0, 3, 2));
+++ if (!validateFloat(ret, _a[2], _a[3], _b[0], _b[1])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 3, 0, 1));
+++ if (!validateFloat(ret, _a[1], _a[0], _b[3], _b[2])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(0, 0, 2, 2));
+++ if (!validateFloat(ret, _a[2], _a[2], _b[0], _b[0])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 2, 0, 0));
+++ if (!validateFloat(ret, _a[0], _a[0], _b[2], _b[2])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(3, 2, 0, 2));
+++ if (!validateFloat(ret, _a[2], _a[0], _b[2], _b[3])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(1, 1, 3, 3));
+++ if (!validateFloat(ret, _a[3], _a[3], _b[1], _b[1])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 0, 1, 0));
+++ if (!validateFloat(ret, _a[0], _a[1], _b[0], _b[2])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 0, 0, 1));
+++ if (!validateFloat(ret, _a[1], _a[0], _b[0], _b[2])) {
+++ isValid = TEST_FAIL;
+++ }
+++ ret = _mm_shuffle_ps(a, b, _MM_SHUFFLE(2, 0, 3, 2));
+++ if (!validateFloat(ret, _a[2], _a[3], _b[0], _b[2])) {
+++ isValid = TEST_FAIL;
+++ }
+++
+++ return isValid;
+++}
+++
+++result_t test_mm_sqrt_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = (const float *) impl.mTestFloatPointer1;
+++
+++ float f0 = sqrtf(_a[0]);
+++ float f1 = sqrtf(_a[1]);
+++ float f2 = sqrtf(_a[2]);
+++ float f3 = sqrtf(_a[3]);
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_sqrt_ps(a);
+++
+++#if defined(__arm__) && !defined(__arm64__) && !defined(_M_ARM64)
+++ // Here, we ensure the error rate of "_mm_sqrt_ps()" ARMv7-A implementation
+++ // is under 10^-4% compared to the C implementation.
+++ return validateFloatError(c, f0, f1, f2, f3, 0.0001f);
+++#else
+++ // Here, we ensure the error rate of "_mm_sqrt_ps()" is under 10^-6%
+++ // compared to the C implementation.
+++ return validateFloatError(c, f0, f1, f2, f3, 0.000001f);
+++#endif
+++}
+++
+++result_t test_mm_sqrt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = (const float *) impl.mTestFloatPointer1;
+++
+++ float f0 = sqrtf(_a[0]);
+++ float f1 = _a[1];
+++ float f2 = _a[2];
+++ float f3 = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_sqrt_ss(a);
+++
+++#if defined(__arm__) && !defined(__arm64__) && !defined(_M_ARM64)
+++ // Here, we ensure the error rate of "_mm_sqrt_ps()" ARMv7-A implementation
+++ // is under 10^-4% compared to the C implementation.
+++ return validateFloatError(c, f0, f1, f2, f3, 0.0001f);
+++#else
+++ // Here, we ensure the error rate of "_mm_sqrt_ps()" is under 10^-6%
+++ // compared to the C implementation.
+++ return validateFloatError(c, f0, f1, f2, f3, 0.000001f);
+++#endif
+++}
+++
+++result_t test_mm_store_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int32_t *p = impl.mTestIntPointer1;
+++ int32_t x = impl.mTestInts[iter];
+++ int32_t y = impl.mTestInts[iter + 1];
+++ int32_t z = impl.mTestInts[iter + 2];
+++ int32_t w = impl.mTestInts[iter + 3];
+++ __m128i a = _mm_set_epi32(x, y, z, w);
+++ _mm_store_ps((float *) p, *(const __m128 *) &a);
+++ ASSERT_RETURN(p[0] == w);
+++ ASSERT_RETURN(p[1] == z);
+++ ASSERT_RETURN(p[2] == y);
+++ ASSERT_RETURN(p[3] == x);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_store_ps1(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float *p = impl.mTestFloatPointer1;
+++ float d[4];
+++
+++ __m128 a = load_m128(p);
+++ _mm_store_ps1(d, a);
+++
+++ ASSERT_RETURN(d[0] == *p);
+++ ASSERT_RETURN(d[1] == *p);
+++ ASSERT_RETURN(d[2] == *p);
+++ ASSERT_RETURN(d[3] == *p);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_store_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float x = impl.mTestFloats[iter];
+++ float p[4];
+++
+++ __m128 a = _mm_set_ss(x);
+++ _mm_store_ss(p, a);
+++ ASSERT_RETURN(p[0] == x);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_store1_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float *p = impl.mTestFloatPointer1;
+++ float d[4];
+++
+++ __m128 a = load_m128(p);
+++ _mm_store1_ps(d, a);
+++
+++ ASSERT_RETURN(d[0] == *p);
+++ ASSERT_RETURN(d[1] == *p);
+++ ASSERT_RETURN(d[2] == *p);
+++ ASSERT_RETURN(d[3] == *p);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storeh_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p = impl.mTestFloatPointer1;
+++ float d[4] = {1.0f, 2.0f, 3.0f, 4.0f};
+++ __m128 a = _mm_load_ps(p);
+++ __m64 *b = (__m64 *) d;
+++
+++ _mm_storeh_pi(b, a);
+++ ASSERT_RETURN(d[0] == p[2]);
+++ ASSERT_RETURN(d[1] == p[3]);
+++ ASSERT_RETURN(d[2] == 3.0f);
+++ ASSERT_RETURN(d[3] == 4.0f);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storel_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p = impl.mTestFloatPointer1;
+++ float d[4] = {1.0f, 2.0f, 3.0f, 4.0f};
+++ __m128 a = _mm_load_ps(p);
+++ __m64 *b = (__m64 *) d;
+++
+++ _mm_storel_pi(b, a);
+++ ASSERT_RETURN(d[0] == p[0]);
+++ ASSERT_RETURN(d[1] == p[1]);
+++ ASSERT_RETURN(d[2] == 3.0f);
+++ ASSERT_RETURN(d[3] == 4.0f);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storer_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float *p = impl.mTestFloatPointer1;
+++ float d[4];
+++
+++ __m128 a = load_m128(p);
+++ _mm_storer_ps(d, a);
+++
+++ ASSERT_RETURN(d[0] == p[3]);
+++ ASSERT_RETURN(d[1] == p[2]);
+++ ASSERT_RETURN(d[2] == p[1]);
+++ ASSERT_RETURN(d[3] == p[0]);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storeu_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float *_a = impl.mTestFloatPointer1;
+++ float f[4];
+++ __m128 a = _mm_load_ps(_a);
+++
+++ _mm_storeu_ps(f, a);
+++ return validateFloat(a, f[0], f[1], f[2], f[3]);
+++}
+++
+++result_t test_mm_storeu_si16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // The GCC version before 11 does not implement intrinsic function
+++ // _mm_storeu_si16. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+++ // for more information.
+++#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+++ return TEST_UNIMPL;
+++#else
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i b;
+++ __m128i a = load_m128i(_a);
+++ _mm_storeu_si16(&b, a);
+++ int16_t *_b = (int16_t *) &b;
+++ int16_t *_c = (int16_t *) &a;
+++ return validateInt16(b, _c[0], _b[1], _b[2], _b[3], _b[4], _b[5], _b[6],
+++ _b[7]);
+++#endif
+++}
+++
+++result_t test_mm_storeu_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // Versions of GCC prior to 9 do not implement intrinsic function
+++ // _mm_storeu_si64. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87558
+++ // for more information.
+++#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ < 9)
+++ return TEST_UNIMPL;
+++#else
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i b;
+++ __m128i a = load_m128i(_a);
+++ _mm_storeu_si64(&b, a);
+++ int64_t *_b = (int64_t *) &b;
+++ int64_t *_c = (int64_t *) &a;
+++ return validateInt64(b, _c[0], _b[1]);
+++#endif
+++}
+++
+++result_t test_mm_stream_pi(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ __m64 a = load_m64(_a);
+++ __m64 p;
+++
+++ _mm_stream_pi(&p, a);
+++ return validateInt64(p, _a[0]);
+++}
+++
+++result_t test_mm_stream_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ __m128 a = load_m128(_a);
+++ alignas(16) float p[4];
+++
+++ _mm_stream_ps(p, a);
+++ ASSERT_RETURN(p[0] == _a[0]);
+++ ASSERT_RETURN(p[1] == _a[1]);
+++ ASSERT_RETURN(p[2] == _a[2]);
+++ ASSERT_RETURN(p[3] == _a[3]);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_sub_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float dx = _a[0] - _b[0];
+++ float dy = _a[1] - _b[1];
+++ float dz = _a[2] - _b[2];
+++ float dw = _a[3] - _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_sub_ps(a, b);
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_sub_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float dx = _a[0] - _b[0];
+++ float dy = _a[1];
+++ float dz = _a[2];
+++ float dw = _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_sub_ss(a, b);
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_ucomieq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // _mm_ucomieq_ss is equal to _mm_comieq_ss
+++ return test_mm_comieq_ss(impl, iter);
+++}
+++
+++result_t test_mm_ucomige_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // _mm_ucomige_ss is equal to _mm_comige_ss
+++ return test_mm_comige_ss(impl, iter);
+++}
+++
+++result_t test_mm_ucomigt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // _mm_ucomigt_ss is equal to _mm_comigt_ss
+++ return test_mm_comigt_ss(impl, iter);
+++}
+++
+++result_t test_mm_ucomile_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // _mm_ucomile_ss is equal to _mm_comile_ss
+++ return test_mm_comile_ss(impl, iter);
+++}
+++
+++result_t test_mm_ucomilt_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // _mm_ucomilt_ss is equal to _mm_comilt_ss
+++ return test_mm_comilt_ss(impl, iter);
+++}
+++
+++result_t test_mm_ucomineq_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // _mm_ucomineq_ss is equal to _mm_comineq_ss
+++ return test_mm_comineq_ss(impl, iter);
+++}
+++
+++result_t test_mm_undefined_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ __m128 a = _mm_undefined_ps();
+++ a = _mm_xor_ps(a, a);
+++ return validateFloat(a, 0, 0, 0, 0);
+++}
+++
+++result_t test_mm_unpackhi_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float *_a = impl.mTestFloatPointer1;
+++ float *_b = impl.mTestFloatPointer1;
+++
+++ float f0 = _a[2];
+++ float f1 = _b[2];
+++ float f2 = _a[3];
+++ float f3 = _b[3];
+++
+++ __m128 a = _mm_load_ps(_a);
+++ __m128 b = _mm_load_ps(_b);
+++ __m128 c = _mm_unpackhi_ps(a, b);
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_unpacklo_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ float *_a = impl.mTestFloatPointer1;
+++ float *_b = impl.mTestFloatPointer1;
+++
+++ float f0 = _a[0];
+++ float f1 = _b[0];
+++ float f2 = _a[1];
+++ float f3 = _b[1];
+++
+++ __m128 a = _mm_load_ps(_a);
+++ __m128 b = _mm_load_ps(_b);
+++ __m128 c = _mm_unpacklo_ps(a, b);
+++
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_xor_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestFloatPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestFloatPointer2;
+++
+++ int32_t d0 = _a[0] ^ _b[0];
+++ int32_t d1 = _a[1] ^ _b[1];
+++ int32_t d2 = _a[2] ^ _b[2];
+++ int32_t d3 = _a[3] ^ _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_xor_ps(a, b);
+++
+++ return validateFloat(c, *((float *) &d0), *((float *) &d1),
+++ *((float *) &d2), *((float *) &d3));
+++}
+++
+++/* SSE2 */
+++result_t test_mm_add_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[8];
+++ d[0] = _a[0] + _b[0];
+++ d[1] = _a[1] + _b[1];
+++ d[2] = _a[2] + _b[2];
+++ d[3] = _a[3] + _b[3];
+++ d[4] = _a[4] + _b[4];
+++ d[5] = _a[5] + _b[5];
+++ d[6] = _a[6] + _b[6];
+++ d[7] = _a[7] + _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_add_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_add_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ int32_t d[4];
+++ d[0] = _a[0] + _b[0];
+++ d[1] = _a[1] + _b[1];
+++ d[2] = _a[2] + _b[2];
+++ d[3] = _a[3] + _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_add_epi32(a, b);
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_add_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++
+++ int64_t d0 = _a[0] + _b[0];
+++ int64_t d1 = _a[1] + _b[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_add_epi64(a, b);
+++
+++ return validateInt64(c, d0, d1);
+++}
+++
+++result_t test_mm_add_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t d[16];
+++ d[0] = _a[0] + _b[0];
+++ d[1] = _a[1] + _b[1];
+++ d[2] = _a[2] + _b[2];
+++ d[3] = _a[3] + _b[3];
+++ d[4] = _a[4] + _b[4];
+++ d[5] = _a[5] + _b[5];
+++ d[6] = _a[6] + _b[6];
+++ d[7] = _a[7] + _b[7];
+++ d[8] = _a[8] + _b[8];
+++ d[9] = _a[9] + _b[9];
+++ d[10] = _a[10] + _b[10];
+++ d[11] = _a[11] + _b[11];
+++ d[12] = _a[12] + _b[12];
+++ d[13] = _a[13] + _b[13];
+++ d[14] = _a[14] + _b[14];
+++ d[15] = _a[15] + _b[15];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_add_epi8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_add_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = _a[0] + _b[0];
+++ double d1 = _a[1] + _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_add_pd(a, b);
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_add_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = _a[0] + _b[0];
+++ double d1 = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_add_sd(a, b);
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_add_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++
+++ int64_t d0 = _a[0] + _b[0];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_add_si64(a, b);
+++
+++ return validateInt64(c, d0);
+++}
+++
+++result_t test_mm_adds_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int32_t d[8];
+++ d[0] = (int32_t) _a[0] + (int32_t) _b[0];
+++ if (d[0] > 32767)
+++ d[0] = 32767;
+++ if (d[0] < -32768)
+++ d[0] = -32768;
+++ d[1] = (int32_t) _a[1] + (int32_t) _b[1];
+++ if (d[1] > 32767)
+++ d[1] = 32767;
+++ if (d[1] < -32768)
+++ d[1] = -32768;
+++ d[2] = (int32_t) _a[2] + (int32_t) _b[2];
+++ if (d[2] > 32767)
+++ d[2] = 32767;
+++ if (d[2] < -32768)
+++ d[2] = -32768;
+++ d[3] = (int32_t) _a[3] + (int32_t) _b[3];
+++ if (d[3] > 32767)
+++ d[3] = 32767;
+++ if (d[3] < -32768)
+++ d[3] = -32768;
+++ d[4] = (int32_t) _a[4] + (int32_t) _b[4];
+++ if (d[4] > 32767)
+++ d[4] = 32767;
+++ if (d[4] < -32768)
+++ d[4] = -32768;
+++ d[5] = (int32_t) _a[5] + (int32_t) _b[5];
+++ if (d[5] > 32767)
+++ d[5] = 32767;
+++ if (d[5] < -32768)
+++ d[5] = -32768;
+++ d[6] = (int32_t) _a[6] + (int32_t) _b[6];
+++ if (d[6] > 32767)
+++ d[6] = 32767;
+++ if (d[6] < -32768)
+++ d[6] = -32768;
+++ d[7] = (int32_t) _a[7] + (int32_t) _b[7];
+++ if (d[7] > 32767)
+++ d[7] = 32767;
+++ if (d[7] < -32768)
+++ d[7] = -32768;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++
+++ __m128i c = _mm_adds_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_adds_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[16];
+++ for (int i = 0; i < 16; i++) {
+++ d[i] = (int16_t) _a[i] + (int16_t) _b[i];
+++ if (d[i] > 127)
+++ d[i] = 127;
+++ if (d[i] < -128)
+++ d[i] = -128;
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_adds_epi8(a, b);
+++
+++ return VALIDATE_INT8_M128(c, (int8_t) d);
+++}
+++
+++result_t test_mm_adds_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint32_t max = 0xFFFF;
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+++
+++ uint16_t d[8];
+++ d[0] = (uint32_t) _a[0] + (uint32_t) _b[0] > max ? max : _a[0] + _b[0];
+++ d[1] = (uint32_t) _a[1] + (uint32_t) _b[1] > max ? max : _a[1] + _b[1];
+++ d[2] = (uint32_t) _a[2] + (uint32_t) _b[2] > max ? max : _a[2] + _b[2];
+++ d[3] = (uint32_t) _a[3] + (uint32_t) _b[3] > max ? max : _a[3] + _b[3];
+++ d[4] = (uint32_t) _a[4] + (uint32_t) _b[4] > max ? max : _a[4] + _b[4];
+++ d[5] = (uint32_t) _a[5] + (uint32_t) _b[5] > max ? max : _a[5] + _b[5];
+++ d[6] = (uint32_t) _a[6] + (uint32_t) _b[6] > max ? max : _a[6] + _b[6];
+++ d[7] = (uint32_t) _a[7] + (uint32_t) _b[7] > max ? max : _a[7] + _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_adds_epu16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_adds_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ uint8_t d[16];
+++ d[0] = (uint8_t) _a[0] + (uint8_t) _b[0];
+++ if (d[0] < (uint8_t) _a[0])
+++ d[0] = 255;
+++ d[1] = (uint8_t) _a[1] + (uint8_t) _b[1];
+++ if (d[1] < (uint8_t) _a[1])
+++ d[1] = 255;
+++ d[2] = (uint8_t) _a[2] + (uint8_t) _b[2];
+++ if (d[2] < (uint8_t) _a[2])
+++ d[2] = 255;
+++ d[3] = (uint8_t) _a[3] + (uint8_t) _b[3];
+++ if (d[3] < (uint8_t) _a[3])
+++ d[3] = 255;
+++ d[4] = (uint8_t) _a[4] + (uint8_t) _b[4];
+++ if (d[4] < (uint8_t) _a[4])
+++ d[4] = 255;
+++ d[5] = (uint8_t) _a[5] + (uint8_t) _b[5];
+++ if (d[5] < (uint8_t) _a[5])
+++ d[5] = 255;
+++ d[6] = (uint8_t) _a[6] + (uint8_t) _b[6];
+++ if (d[6] < (uint8_t) _a[6])
+++ d[6] = 255;
+++ d[7] = (uint8_t) _a[7] + (uint8_t) _b[7];
+++ if (d[7] < (uint8_t) _a[7])
+++ d[7] = 255;
+++ d[8] = (uint8_t) _a[8] + (uint8_t) _b[8];
+++ if (d[8] < (uint8_t) _a[8])
+++ d[8] = 255;
+++ d[9] = (uint8_t) _a[9] + (uint8_t) _b[9];
+++ if (d[9] < (uint8_t) _a[9])
+++ d[9] = 255;
+++ d[10] = (uint8_t) _a[10] + (uint8_t) _b[10];
+++ if (d[10] < (uint8_t) _a[10])
+++ d[10] = 255;
+++ d[11] = (uint8_t) _a[11] + (uint8_t) _b[11];
+++ if (d[11] < (uint8_t) _a[11])
+++ d[11] = 255;
+++ d[12] = (uint8_t) _a[12] + (uint8_t) _b[12];
+++ if (d[12] < (uint8_t) _a[12])
+++ d[12] = 255;
+++ d[13] = (uint8_t) _a[13] + (uint8_t) _b[13];
+++ if (d[13] < (uint8_t) _a[13])
+++ d[13] = 255;
+++ d[14] = (uint8_t) _a[14] + (uint8_t) _b[14];
+++ if (d[14] < (uint8_t) _a[14])
+++ d[14] = 255;
+++ d[15] = (uint8_t) _a[15] + (uint8_t) _b[15];
+++ if (d[15] < (uint8_t) _a[15])
+++ d[15] = 255;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_adds_epu8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_and_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestFloatPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestFloatPointer2;
+++
+++ int64_t d0 = _a[0] & _b[0];
+++ int64_t d1 = _a[1] & _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_and_pd(a, b);
+++
+++ return validateDouble(c, *((double *) &d0), *((double *) &d1));
+++}
+++
+++result_t test_mm_and_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128 fc = _mm_and_ps(*(const __m128 *) &a, *(const __m128 *) &b);
+++ __m128i c = *(const __m128i *) &fc;
+++ // now for the assertion...
+++ const uint32_t *ia = (const uint32_t *) &a;
+++ const uint32_t *ib = (const uint32_t *) &b;
+++ uint32_t r[4];
+++ r[0] = ia[0] & ib[0];
+++ r[1] = ia[1] & ib[1];
+++ r[2] = ia[2] & ib[2];
+++ r[3] = ia[3] & ib[3];
+++ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+++ result_t res = VALIDATE_INT32_M128(c, r);
+++ if (res) {
+++ res = VALIDATE_INT32_M128(ret, r);
+++ }
+++ return res;
+++}
+++
+++result_t test_mm_andnot_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_andnot_pd(a, b);
+++
+++ // Take AND operation a complement of 'a' and 'b'. Bitwise operations are
+++ // not allowed on float/double datatype, so 'a' and 'b' are calculated in
+++ // uint64_t datatype.
+++ const uint64_t *ia = (const uint64_t *) &a;
+++ const uint64_t *ib = (const uint64_t *) &b;
+++ uint64_t r0 = ~ia[0] & ib[0];
+++ uint64_t r1 = ~ia[1] & ib[1];
+++ return validateUInt64(*(const __m128i *) &c, r0, r1);
+++}
+++
+++result_t test_mm_andnot_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128 fc = _mm_andnot_ps(*(const __m128 *) &a, *(const __m128 *) &b);
+++ __m128i c = *(const __m128i *) &fc;
+++ // now for the assertion...
+++ const uint32_t *ia = (const uint32_t *) &a;
+++ const uint32_t *ib = (const uint32_t *) &b;
+++ uint32_t r[4];
+++ r[0] = ~ia[0] & ib[0];
+++ r[1] = ~ia[1] & ib[1];
+++ r[2] = ~ia[2] & ib[2];
+++ r[3] = ~ia[3] & ib[3];
+++ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+++ result_t res = TEST_SUCCESS;
+++ res = VALIDATE_INT32_M128(c, r);
+++ if (res) {
+++ res = VALIDATE_INT32_M128(ret, r);
+++ }
+++ return res;
+++}
+++
+++result_t test_mm_avg_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ uint16_t d[8];
+++ d[0] = ((uint16_t) _a[0] + (uint16_t) _b[0] + 1) >> 1;
+++ d[1] = ((uint16_t) _a[1] + (uint16_t) _b[1] + 1) >> 1;
+++ d[2] = ((uint16_t) _a[2] + (uint16_t) _b[2] + 1) >> 1;
+++ d[3] = ((uint16_t) _a[3] + (uint16_t) _b[3] + 1) >> 1;
+++ d[4] = ((uint16_t) _a[4] + (uint16_t) _b[4] + 1) >> 1;
+++ d[5] = ((uint16_t) _a[5] + (uint16_t) _b[5] + 1) >> 1;
+++ d[6] = ((uint16_t) _a[6] + (uint16_t) _b[6] + 1) >> 1;
+++ d[7] = ((uint16_t) _a[7] + (uint16_t) _b[7] + 1) >> 1;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_avg_epu16(a, b);
+++ return VALIDATE_UINT16_M128(c, d);
+++}
+++
+++result_t test_mm_avg_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ uint8_t d[16];
+++ d[0] = ((uint8_t) _a[0] + (uint8_t) _b[0] + 1) >> 1;
+++ d[1] = ((uint8_t) _a[1] + (uint8_t) _b[1] + 1) >> 1;
+++ d[2] = ((uint8_t) _a[2] + (uint8_t) _b[2] + 1) >> 1;
+++ d[3] = ((uint8_t) _a[3] + (uint8_t) _b[3] + 1) >> 1;
+++ d[4] = ((uint8_t) _a[4] + (uint8_t) _b[4] + 1) >> 1;
+++ d[5] = ((uint8_t) _a[5] + (uint8_t) _b[5] + 1) >> 1;
+++ d[6] = ((uint8_t) _a[6] + (uint8_t) _b[6] + 1) >> 1;
+++ d[7] = ((uint8_t) _a[7] + (uint8_t) _b[7] + 1) >> 1;
+++ d[8] = ((uint8_t) _a[8] + (uint8_t) _b[8] + 1) >> 1;
+++ d[9] = ((uint8_t) _a[9] + (uint8_t) _b[9] + 1) >> 1;
+++ d[10] = ((uint8_t) _a[10] + (uint8_t) _b[10] + 1) >> 1;
+++ d[11] = ((uint8_t) _a[11] + (uint8_t) _b[11] + 1) >> 1;
+++ d[12] = ((uint8_t) _a[12] + (uint8_t) _b[12] + 1) >> 1;
+++ d[13] = ((uint8_t) _a[13] + (uint8_t) _b[13] + 1) >> 1;
+++ d[14] = ((uint8_t) _a[14] + (uint8_t) _b[14] + 1) >> 1;
+++ d[15] = ((uint8_t) _a[15] + (uint8_t) _b[15] + 1) >> 1;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_avg_epu8(a, b);
+++ return VALIDATE_UINT8_M128(c, d);
+++}
+++
+++result_t test_mm_bslli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_slli_si128(impl, iter);
+++}
+++
+++result_t test_mm_bsrli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_srli_si128(impl, iter);
+++}
+++
+++result_t test_mm_castpd_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const __m128d a = load_m128d(_a);
+++ const __m128 _c = load_m128(_a);
+++
+++ __m128 r = _mm_castpd_ps(a);
+++
+++ return validate128(r, _c);
+++}
+++
+++result_t test_mm_castpd_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const __m128d a = load_m128d(_a);
+++ const __m128i *_c = (const __m128i *) _a;
+++
+++ __m128i r = _mm_castpd_si128(a);
+++
+++ return validate128(r, *_c);
+++}
+++
+++result_t test_mm_castps_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const __m128 a = load_m128(_a);
+++ const __m128d *_c = (const __m128d *) _a;
+++
+++ __m128d r = _mm_castps_pd(a);
+++
+++ return validate128(r, *_c);
+++}
+++
+++result_t test_mm_castps_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++
+++ const __m128i *_c = (const __m128i *) _a;
+++
+++ const __m128 a = load_m128(_a);
+++ __m128i r = _mm_castps_si128(a);
+++
+++ return validate128(r, *_c);
+++}
+++
+++result_t test_mm_castsi128_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++
+++ const __m128d *_c = (const __m128d *) _a;
+++
+++ const __m128i a = load_m128i(_a);
+++ __m128d r = _mm_castsi128_pd(a);
+++
+++ return validate128(r, *_c);
+++}
+++
+++result_t test_mm_castsi128_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++
+++ const __m128 *_c = (const __m128 *) _a;
+++
+++ const __m128i a = load_m128i(_a);
+++ __m128 r = _mm_castsi128_ps(a);
+++
+++ return validate128(r, *_c);
+++}
+++
+++result_t test_mm_clflush(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ /* FIXME: Assume that we have portable mechanisms to flush cache. */
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_cmpeq_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[8];
+++ d[0] = (_a[0] == _b[0]) ? ~UINT16_C(0) : 0x0;
+++ d[1] = (_a[1] == _b[1]) ? ~UINT16_C(0) : 0x0;
+++ d[2] = (_a[2] == _b[2]) ? ~UINT16_C(0) : 0x0;
+++ d[3] = (_a[3] == _b[3]) ? ~UINT16_C(0) : 0x0;
+++ d[4] = (_a[4] == _b[4]) ? ~UINT16_C(0) : 0x0;
+++ d[5] = (_a[5] == _b[5]) ? ~UINT16_C(0) : 0x0;
+++ d[6] = (_a[6] == _b[6]) ? ~UINT16_C(0) : 0x0;
+++ d[7] = (_a[7] == _b[7]) ? ~UINT16_C(0) : 0x0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmpeq_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_cmpeq_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++
+++ int32_t d[4];
+++ d[0] = (_a[0] == _b[0]) ? ~UINT32_C(0) : 0x0;
+++ d[1] = (_a[1] == _b[1]) ? ~UINT32_C(0) : 0x0;
+++ d[2] = (_a[2] == _b[2]) ? ~UINT32_C(0) : 0x0;
+++ d[3] = (_a[3] == _b[3]) ? ~UINT32_C(0) : 0x0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmpeq_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_cmpeq_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t d[16];
+++ d[0] = (_a[0] == _b[0]) ? ~UINT8_C(0) : 0x00;
+++ d[1] = (_a[1] == _b[1]) ? ~UINT8_C(0) : 0x00;
+++ d[2] = (_a[2] == _b[2]) ? ~UINT8_C(0) : 0x00;
+++ d[3] = (_a[3] == _b[3]) ? ~UINT8_C(0) : 0x00;
+++ d[4] = (_a[4] == _b[4]) ? ~UINT8_C(0) : 0x00;
+++ d[5] = (_a[5] == _b[5]) ? ~UINT8_C(0) : 0x00;
+++ d[6] = (_a[6] == _b[6]) ? ~UINT8_C(0) : 0x00;
+++ d[7] = (_a[7] == _b[7]) ? ~UINT8_C(0) : 0x00;
+++ d[8] = (_a[8] == _b[8]) ? ~UINT8_C(0) : 0x00;
+++ d[9] = (_a[9] == _b[9]) ? ~UINT8_C(0) : 0x00;
+++ d[10] = (_a[10] == _b[10]) ? ~UINT8_C(0) : 0x00;
+++ d[11] = (_a[11] == _b[11]) ? ~UINT8_C(0) : 0x00;
+++ d[12] = (_a[12] == _b[12]) ? ~UINT8_C(0) : 0x00;
+++ d[13] = (_a[13] == _b[13]) ? ~UINT8_C(0) : 0x00;
+++ d[14] = (_a[14] == _b[14]) ? ~UINT8_C(0) : 0x00;
+++ d[15] = (_a[15] == _b[15]) ? ~UINT8_C(0) : 0x00;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmpeq_epi8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_cmpeq_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] == _b[0]) ? 0xffffffffffffffff : 0;
+++ uint64_t d1 = (_a[1] == _b[1]) ? 0xffffffffffffffff : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpeq_pd(a, b);
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpeq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ const uint64_t d0 = (_a[0] == _b[0]) ? ~UINT64_C(0) : 0;
+++ const uint64_t d1 = ((const uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpeq_sd(a, b);
+++
+++ return validateDouble(c, *(const double *) &d0, *(const double *) &d1);
+++}
+++
+++result_t test_mm_cmpge_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = (_a[1] >= _b[1]) ? ~UINT64_C(0) : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpge_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpge_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpge_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpgt_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ uint16_t d[8];
+++ d[0] = _a[0] > _b[0] ? ~UINT16_C(0) : 0;
+++ d[1] = _a[1] > _b[1] ? ~UINT16_C(0) : 0;
+++ d[2] = _a[2] > _b[2] ? ~UINT16_C(0) : 0;
+++ d[3] = _a[3] > _b[3] ? ~UINT16_C(0) : 0;
+++ d[4] = _a[4] > _b[4] ? ~UINT16_C(0) : 0;
+++ d[5] = _a[5] > _b[5] ? ~UINT16_C(0) : 0;
+++ d[6] = _a[6] > _b[6] ? ~UINT16_C(0) : 0;
+++ d[7] = _a[7] > _b[7] ? ~UINT16_C(0) : 0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmpgt_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_cmpgt_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++
+++ int32_t result[4];
+++
+++ result[0] = _a[0] > _b[0] ? -1 : 0;
+++ result[1] = _a[1] > _b[1] ? -1 : 0;
+++ result[2] = _a[2] > _b[2] ? -1 : 0;
+++ result[3] = _a[3] > _b[3] ? -1 : 0;
+++
+++ __m128i iret = _mm_cmpgt_epi32(a, b);
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmpgt_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t d[16];
+++ d[0] = (_a[0] > _b[0]) ? ~UINT8_C(0) : 0x00;
+++ d[1] = (_a[1] > _b[1]) ? ~UINT8_C(0) : 0x00;
+++ d[2] = (_a[2] > _b[2]) ? ~UINT8_C(0) : 0x00;
+++ d[3] = (_a[3] > _b[3]) ? ~UINT8_C(0) : 0x00;
+++ d[4] = (_a[4] > _b[4]) ? ~UINT8_C(0) : 0x00;
+++ d[5] = (_a[5] > _b[5]) ? ~UINT8_C(0) : 0x00;
+++ d[6] = (_a[6] > _b[6]) ? ~UINT8_C(0) : 0x00;
+++ d[7] = (_a[7] > _b[7]) ? ~UINT8_C(0) : 0x00;
+++ d[8] = (_a[8] > _b[8]) ? ~UINT8_C(0) : 0x00;
+++ d[9] = (_a[9] > _b[9]) ? ~UINT8_C(0) : 0x00;
+++ d[10] = (_a[10] > _b[10]) ? ~UINT8_C(0) : 0x00;
+++ d[11] = (_a[11] > _b[11]) ? ~UINT8_C(0) : 0x00;
+++ d[12] = (_a[12] > _b[12]) ? ~UINT8_C(0) : 0x00;
+++ d[13] = (_a[13] > _b[13]) ? ~UINT8_C(0) : 0x00;
+++ d[14] = (_a[14] > _b[14]) ? ~UINT8_C(0) : 0x00;
+++ d[15] = (_a[15] > _b[15]) ? ~UINT8_C(0) : 0x00;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmpgt_epi8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_cmpgt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = (_a[1] > _b[1]) ? ~UINT64_C(0) : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpgt_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpgt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpgt_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmple_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = (_a[1] <= _b[1]) ? ~UINT64_C(0) : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmple_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmple_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmple_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmplt_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ uint16_t d[8];
+++ d[0] = _a[0] < _b[0] ? ~UINT16_C(0) : 0;
+++ d[1] = _a[1] < _b[1] ? ~UINT16_C(0) : 0;
+++ d[2] = _a[2] < _b[2] ? ~UINT16_C(0) : 0;
+++ d[3] = _a[3] < _b[3] ? ~UINT16_C(0) : 0;
+++ d[4] = _a[4] < _b[4] ? ~UINT16_C(0) : 0;
+++ d[5] = _a[5] < _b[5] ? ~UINT16_C(0) : 0;
+++ d[6] = _a[6] < _b[6] ? ~UINT16_C(0) : 0;
+++ d[7] = _a[7] < _b[7] ? ~UINT16_C(0) : 0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmplt_epi16(a, b);
+++
+++ return VALIDATE_UINT16_M128(c, d);
+++}
+++
+++result_t test_mm_cmplt_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++
+++ int32_t result[4];
+++ result[0] = _a[0] < _b[0] ? -1 : 0;
+++ result[1] = _a[1] < _b[1] ? -1 : 0;
+++ result[2] = _a[2] < _b[2] ? -1 : 0;
+++ result[3] = _a[3] < _b[3] ? -1 : 0;
+++
+++ __m128i iret = _mm_cmplt_epi32(a, b);
+++ return VALIDATE_INT32_M128(iret, result);
+++}
+++
+++result_t test_mm_cmplt_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t d[16];
+++ d[0] = (_a[0] < _b[0]) ? ~UINT8_C(0) : 0x00;
+++ d[1] = (_a[1] < _b[1]) ? ~UINT8_C(0) : 0x00;
+++ d[2] = (_a[2] < _b[2]) ? ~UINT8_C(0) : 0x00;
+++ d[3] = (_a[3] < _b[3]) ? ~UINT8_C(0) : 0x00;
+++ d[4] = (_a[4] < _b[4]) ? ~UINT8_C(0) : 0x00;
+++ d[5] = (_a[5] < _b[5]) ? ~UINT8_C(0) : 0x00;
+++ d[6] = (_a[6] < _b[6]) ? ~UINT8_C(0) : 0x00;
+++ d[7] = (_a[7] < _b[7]) ? ~UINT8_C(0) : 0x00;
+++ d[8] = (_a[8] < _b[8]) ? ~UINT8_C(0) : 0x00;
+++ d[9] = (_a[9] < _b[9]) ? ~UINT8_C(0) : 0x00;
+++ d[10] = (_a[10] < _b[10]) ? ~UINT8_C(0) : 0x00;
+++ d[11] = (_a[11] < _b[11]) ? ~UINT8_C(0) : 0x00;
+++ d[12] = (_a[12] < _b[12]) ? ~UINT8_C(0) : 0x00;
+++ d[13] = (_a[13] < _b[13]) ? ~UINT8_C(0) : 0x00;
+++ d[14] = (_a[14] < _b[14]) ? ~UINT8_C(0) : 0x00;
+++ d[15] = (_a[15] < _b[15]) ? ~UINT8_C(0) : 0x00;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmplt_epi8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_cmplt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ int64_t f0 = (_a[0] < _b[0]) ? ~UINT64_C(0) : UINT64_C(0);
+++ int64_t f1 = (_a[1] < _b[1]) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmplt_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &f0, *(double *) &f1);
+++}
+++
+++result_t test_mm_cmplt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = (_a[0] < _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmplt_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpneq_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ int64_t f0 = (_a[0] != _b[0]) ? ~UINT64_C(0) : UINT64_C(0);
+++ int64_t f1 = (_a[1] != _b[1]) ? ~UINT64_C(0) : UINT64_C(0);
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpneq_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &f0, *(double *) &f1);
+++}
+++
+++result_t test_mm_cmpneq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++
+++ int64_t f0 = (_a[0] != _b[0]) ? ~UINT64_C(0) : UINT64_C(0);
+++ int64_t f1 = ((int64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpneq_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &f0, *(double *) &f1);
+++}
+++
+++result_t test_mm_cmpnge_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = !(_a[1] >= _b[1]) ? ~UINT64_C(0) : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpnge_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpnge_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] >= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpnge_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpngt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = !(_a[1] > _b[1]) ? ~UINT64_C(0) : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpngt_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpngt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] > _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpngt_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpnle_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = !(_a[1] <= _b[1]) ? ~UINT64_C(0) : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpnle_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpnle_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] <= _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpnle_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpnlt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] < _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = !(_a[1] < _b[1]) ? ~UINT64_C(0) : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpnlt_pd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpnlt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ uint64_t d0 = !(_a[0] < _b[0]) ? ~UINT64_C(0) : 0;
+++ uint64_t d1 = ((uint64_t *) _a)[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_cmpnlt_sd(a, b);
+++
+++ return validateDouble(c, *(double *) &d0, *(double *) &d1);
+++}
+++
+++result_t test_mm_cmpord_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ __m128d a = _mm_load_pd(_a);
+++ __m128d b = _mm_load_pd(_b);
+++
+++ double result[2];
+++
+++ for (uint32_t i = 0; i < 2; i++) {
+++ result[i] = cmp_noNaN(_a[i], _b[i]);
+++ }
+++
+++ __m128d ret = _mm_cmpord_pd(a, b);
+++
+++ return validateDouble(ret, result[0], result[1]);
+++}
+++
+++result_t test_mm_cmpord_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ __m128d a = _mm_load_pd(_a);
+++ __m128d b = _mm_load_pd(_b);
+++
+++ double c0 = cmp_noNaN(_a[0], _b[0]);
+++ double c1 = _a[1];
+++
+++ __m128d ret = _mm_cmpord_sd(a, b);
+++ return validateDouble(ret, c0, c1);
+++}
+++
+++result_t test_mm_cmpunord_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ __m128d a = _mm_load_pd(_a);
+++ __m128d b = _mm_load_pd(_b);
+++
+++ double result[2];
+++ result[0] = cmp_hasNaN(_a[0], _b[0]);
+++ result[1] = cmp_hasNaN(_a[1], _b[1]);
+++
+++ __m128d ret = _mm_cmpunord_pd(a, b);
+++ return validateDouble(ret, result[0], result[1]);
+++}
+++
+++result_t test_mm_cmpunord_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *_a = (double *) impl.mTestFloatPointer1;
+++ double *_b = (double *) impl.mTestFloatPointer2;
+++ __m128d a = _mm_load_pd(_a);
+++ __m128d b = _mm_load_pd(_b);
+++
+++ double result[2];
+++ result[0] = cmp_hasNaN(_a[0], _b[0]);
+++ result[1] = _a[1];
+++
+++ __m128d ret = _mm_cmpunord_sd(a, b);
+++ return validateDouble(ret, result[0], result[1]);
+++}
+++
+++result_t test_mm_comieq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comieq_sd correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ int32_t _c = (_a[0] == _b[0]) ? 1 : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ int32_t c = _mm_comieq_sd(a, b);
+++
+++ ASSERT_RETURN(c == _c);
+++ return TEST_SUCCESS;
+++#endif
+++}
+++
+++result_t test_mm_comige_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ int32_t _c = (_a[0] >= _b[0]) ? 1 : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ int32_t c = _mm_comige_sd(a, b);
+++
+++ ASSERT_RETURN(c == _c);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_comigt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ int32_t _c = (_a[0] > _b[0]) ? 1 : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ int32_t c = _mm_comigt_sd(a, b);
+++
+++ ASSERT_RETURN(c == _c);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_comile_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comile_sd correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ int32_t _c = (_a[0] <= _b[0]) ? 1 : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ int32_t c = _mm_comile_sd(a, b);
+++
+++ ASSERT_RETURN(c == _c);
+++ return TEST_SUCCESS;
+++#endif
+++}
+++
+++result_t test_mm_comilt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comilt_sd correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ int32_t _c = (_a[0] < _b[0]) ? 1 : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ int32_t c = _mm_comilt_sd(a, b);
+++
+++ ASSERT_RETURN(c == _c);
+++ return TEST_SUCCESS;
+++#endif
+++}
+++
+++result_t test_mm_comineq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME:
+++ // The GCC does not implement _mm_comineq_sd correctly.
+++ // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98612 for more
+++ // information.
+++#if defined(__GNUC__) && !defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ int32_t _c = (_a[0] != _b[0]) ? 1 : 0;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ int32_t c = _mm_comineq_sd(a, b);
+++
+++ ASSERT_RETURN(c == _c);
+++ return TEST_SUCCESS;
+++#endif
+++}
+++
+++result_t test_mm_cvtepi32_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ double trun[2] = {(double) _a[0], (double) _a[1]};
+++
+++ __m128d ret = _mm_cvtepi32_pd(a);
+++ return validateDouble(ret, trun[0], trun[1]);
+++}
+++
+++result_t test_mm_cvtepi32_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ float trun[4];
+++ for (uint32_t i = 0; i < 4; i++) {
+++ trun[i] = (float) _a[i];
+++ }
+++
+++ __m128 ret = _mm_cvtepi32_ps(a);
+++ return validateFloat(ret, trun[0], trun[1], trun[2], trun[3]);
+++}
+++
+++result_t test_mm_cvtpd_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ int32_t d[2];
+++
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d[0] = (int32_t) (bankersRounding(_a[0]));
+++ d[1] = (int32_t) (bankersRounding(_a[1]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d[0] = (int32_t) (floor(_a[0]));
+++ d[1] = (int32_t) (floor(_a[1]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d[0] = (int32_t) (ceil(_a[0]));
+++ d[1] = (int32_t) (ceil(_a[1]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d[0] = (int32_t) (_a[0]);
+++ d[1] = (int32_t) (_a[1]);
+++ break;
+++ }
+++
+++#if defined(__ARM_FEATURE_FRINT) && !defined(__clang__)
+++ /* Floats that cannot fit into 32-bits should instead return
+++ * indefinite integer value (INT32_MIN). This behaviour is
+++ * currently only emulated when using the round-to-integral
+++ * instructions. */
+++ for (int i = 0; i < 2; i++) {
+++ if (_a[i] > (float) INT32_MAX || _a[i] < (float) INT32_MIN)
+++ d[i] = INT32_MIN;
+++ }
+++#endif
+++
+++ __m128d a = load_m128d(_a);
+++ __m128i ret = _mm_cvtpd_epi32(a);
+++
+++ return validateInt32(ret, d[0], d[1], 0, 0);
+++}
+++
+++result_t test_mm_cvtpd_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ int32_t d[2];
+++
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d[0] = (int32_t) (bankersRounding(_a[0]));
+++ d[1] = (int32_t) (bankersRounding(_a[1]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d[0] = (int32_t) (floor(_a[0]));
+++ d[1] = (int32_t) (floor(_a[1]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d[0] = (int32_t) (ceil(_a[0]));
+++ d[1] = (int32_t) (ceil(_a[1]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d[0] = (int32_t) (_a[0]);
+++ d[1] = (int32_t) (_a[1]);
+++ break;
+++ }
+++
+++ __m128d a = load_m128d(_a);
+++ __m64 ret = _mm_cvtpd_pi32(a);
+++
+++ return VALIDATE_INT32_M64(ret, d);
+++}
+++
+++result_t test_mm_cvtpd_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ float f0 = (float) _a[0];
+++ float f1 = (float) _a[1];
+++ const __m128d a = load_m128d(_a);
+++
+++ __m128 r = _mm_cvtpd_ps(a);
+++
+++ return validateFloat(r, f0, f1, 0, 0);
+++}
+++
+++result_t test_mm_cvtpi32_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ __m64 a = load_m64(_a);
+++
+++ double trun[2] = {(double) _a[0], (double) _a[1]};
+++
+++ __m128d ret = _mm_cvtpi32_pd(a);
+++
+++ return validateDouble(ret, trun[0], trun[1]);
+++}
+++
+++result_t test_mm_cvtps_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ __m128 a = load_m128(_a);
+++ int32_t d[4];
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ for (uint32_t i = 0; i < 4; i++) {
+++ d[i] = (int32_t) (bankersRounding(_a[i]));
+++ }
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ for (uint32_t i = 0; i < 4; i++) {
+++ d[i] = (int32_t) (floorf(_a[i]));
+++ }
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ for (uint32_t i = 0; i < 4; i++) {
+++ d[i] = (int32_t) (ceilf(_a[i]));
+++ }
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ for (uint32_t i = 0; i < 4; i++) {
+++ d[i] = (int32_t) (_a[i]);
+++ }
+++ break;
+++ }
+++
+++ __m128i ret = _mm_cvtps_epi32(a);
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_cvtps_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ double d0 = (double) _a[0];
+++ double d1 = (double) _a[1];
+++ const __m128 a = load_m128(_a);
+++
+++ __m128d r = _mm_cvtps_pd(a);
+++
+++ return validateDouble(r, d0, d1);
+++}
+++
+++result_t test_mm_cvtsd_f64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ double d = _a[0];
+++
+++ const __m128d *a = (const __m128d *) _a;
+++ double r = _mm_cvtsd_f64(*a);
+++
+++ return r == d ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtsd_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ int32_t d;
+++
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d = (int32_t) (bankersRounding(_a[0]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d = (int32_t) (floor(_a[0]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d = (int32_t) (ceil(_a[0]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d = (int32_t) (_a[0]);
+++ break;
+++ }
+++
+++ __m128d a = load_m128d(_a);
+++ int32_t ret = _mm_cvtsd_si32(a);
+++
+++ return ret == d ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtsd_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ int64_t d;
+++
+++ switch (iter & 0x3) {
+++ case 0:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ d = (int64_t) (bankersRounding(_a[0]));
+++ break;
+++ case 1:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ d = (int64_t) (floor(_a[0]));
+++ break;
+++ case 2:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ d = (int64_t) (ceil(_a[0]));
+++ break;
+++ case 3:
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ d = (int64_t) (_a[0]);
+++ break;
+++ }
+++
+++ __m128d a = load_m128d(_a);
+++ int64_t ret = _mm_cvtsd_si64(a);
+++
+++ return ret == d ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtsd_si64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_cvtsd_si64(impl, iter);
+++}
+++
+++result_t test_mm_cvtsd_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ float f0 = (float) _b[0];
+++ float f1 = (float) _a[1];
+++ float f2 = (float) _a[2];
+++ float f3 = (float) _a[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128 c = _mm_cvtsd_ss(a, b);
+++
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_cvtsi128_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++
+++ int32_t d = _a[0];
+++
+++ __m128i a = load_m128i(_a);
+++ int c = _mm_cvtsi128_si32(a);
+++
+++ return d == c ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtsi128_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ int64_t d = _a[0];
+++
+++ __m128i a = load_m128i(_a);
+++ int64_t c = _mm_cvtsi128_si64(a);
+++
+++ return d == c ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvtsi128_si64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_cvtsi128_si64(impl, iter);
+++}
+++
+++result_t test_mm_cvtsi32_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const int32_t b = (const int32_t) impl.mTestInts[iter];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d c = _mm_cvtsi32_sd(a, b);
+++
+++ return validateDouble(c, b, _a[1]);
+++}
+++
+++result_t test_mm_cvtsi32_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++
+++ int32_t d = _a[0];
+++
+++ __m128i c = _mm_cvtsi32_si128(*_a);
+++
+++ return validateInt32(c, d, 0, 0, 0);
+++}
+++
+++result_t test_mm_cvtsi64_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const int64_t b = (const int64_t) impl.mTestInts[iter];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d c = _mm_cvtsi64_sd(a, b);
+++
+++ return validateDouble(c, (double) b, _a[1]);
+++}
+++
+++result_t test_mm_cvtsi64_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ int64_t d = _a[0];
+++
+++ __m128i c = _mm_cvtsi64_si128(*_a);
+++
+++ return validateInt64(c, d, 0);
+++}
+++
+++result_t test_mm_cvtsi64x_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_cvtsi64_sd(impl, iter);
+++}
+++
+++result_t test_mm_cvtsi64x_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_cvtsi64_si128(impl, iter);
+++}
+++
+++result_t test_mm_cvtss_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ double d0 = double(_b[0]);
+++ double d1 = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128 b = load_m128(_b);
+++ __m128d c = _mm_cvtss_sd(a, b);
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_cvttpd_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d a = load_m128d(_a);
+++ int32_t d0 = (int32_t) (_a[0]);
+++ int32_t d1 = (int32_t) (_a[1]);
+++
+++ __m128i ret = _mm_cvttpd_epi32(a);
+++ return validateInt32(ret, d0, d1, 0, 0);
+++}
+++
+++result_t test_mm_cvttpd_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d a = load_m128d(_a);
+++ int32_t d0 = (int32_t) (_a[0]);
+++ int32_t d1 = (int32_t) (_a[1]);
+++
+++ __m64 ret = _mm_cvttpd_pi32(a);
+++ return validateInt32(ret, d0, d1);
+++}
+++
+++result_t test_mm_cvttps_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ __m128 a = load_m128(_a);
+++ int32_t trun[4];
+++ for (uint32_t i = 0; i < 4; i++) {
+++ trun[i] = (int32_t) _a[i];
+++ }
+++
+++ __m128i ret = _mm_cvttps_epi32(a);
+++ return VALIDATE_INT32_M128(ret, trun);
+++}
+++
+++result_t test_mm_cvttsd_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d a = _mm_load_sd(_a);
+++ int32_t ret = _mm_cvttsd_si32(a);
+++
+++ return ret == (int32_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvttsd_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d a = _mm_load_sd(_a);
+++ int64_t ret = _mm_cvttsd_si64(a);
+++
+++ return ret == (int64_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_cvttsd_si64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++#if defined(__clang__)
+++ // The intrinsic _mm_cvttsd_si64x() does not exist in Clang
+++ return TEST_UNIMPL;
+++#else
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d a = _mm_load_sd(_a);
+++ int64_t ret = _mm_cvttsd_si64x(a);
+++
+++ return ret == (int64_t) _a[0] ? TEST_SUCCESS : TEST_FAIL;
+++#endif
+++}
+++
+++result_t test_mm_div_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = 0.0, d1 = 0.0;
+++
+++ if (_b[0] != 0.0)
+++ d0 = _a[0] / _b[0];
+++ if (_b[1] != 0.0)
+++ d1 = _a[1] / _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_div_pd(a, b);
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_div_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double d0 = _a[0] / _b[0];
+++ double d1 = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++
+++ __m128d c = _mm_div_sd(a, b);
+++
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_extract_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint16_t *_a = (uint16_t *) impl.mTestIntPointer1;
+++ const int idx = iter & 0x7;
+++ __m128i a = load_m128i(_a);
+++ int c;
+++ switch (idx) {
+++ case 0:
+++ c = _mm_extract_epi16(a, 0);
+++ break;
+++ case 1:
+++ c = _mm_extract_epi16(a, 1);
+++ break;
+++ case 2:
+++ c = _mm_extract_epi16(a, 2);
+++ break;
+++ case 3:
+++ c = _mm_extract_epi16(a, 3);
+++ break;
+++ case 4:
+++ c = _mm_extract_epi16(a, 4);
+++ break;
+++ case 5:
+++ c = _mm_extract_epi16(a, 5);
+++ break;
+++ case 6:
+++ c = _mm_extract_epi16(a, 6);
+++ break;
+++ case 7:
+++ c = _mm_extract_epi16(a, 7);
+++ break;
+++ }
+++
+++ ASSERT_RETURN(c == *(_a + idx));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_insert_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t insert = (int16_t) *impl.mTestIntPointer2;
+++
+++#define TEST_IMPL(IDX) \
+++ int16_t d##IDX[8]; \
+++ for (int i = 0; i < 8; i++) { \
+++ d##IDX[i] = _a[i]; \
+++ } \
+++ d##IDX[IDX] = insert; \
+++ \
+++ __m128i a##IDX = load_m128i(_a); \
+++ __m128i b##IDX = _mm_insert_epi16(a##IDX, insert, IDX); \
+++ CHECK_RESULT(VALIDATE_INT16_M128(b##IDX, d##IDX))
+++
+++ IMM_8_ITER
+++#undef TEST_IMPL
+++
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_lfence(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ /* FIXME: Assume that memory barriers always function as intended. */
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_load_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *p = (const double *) impl.mTestFloatPointer1;
+++ __m128d a = _mm_load_pd(p);
+++ return validateDouble(a, p[0], p[1]);
+++}
+++
+++result_t test_mm_load_pd1(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *p = (const double *) impl.mTestFloatPointer1;
+++ __m128d a = _mm_load_pd1(p);
+++ return validateDouble(a, p[0], p[0]);
+++}
+++
+++result_t test_mm_load_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *p = (const double *) impl.mTestFloatPointer1;
+++ __m128d a = _mm_load_sd(p);
+++ return validateDouble(a, p[0], 0);
+++}
+++
+++result_t test_mm_load_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *addr = impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_load_si128((const __m128i *) addr);
+++
+++ return VALIDATE_INT32_M128(ret, addr);
+++}
+++
+++result_t test_mm_load1_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *addr = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d ret = _mm_load1_pd(addr);
+++
+++ return validateDouble(ret, addr[0], addr[0]);
+++}
+++
+++result_t test_mm_loadh_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *addr = (const double *) impl.mTestFloatPointer2;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d ret = _mm_loadh_pd(a, addr);
+++
+++ return validateDouble(ret, _a[0], addr[0]);
+++}
+++
+++result_t test_mm_loadl_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *addr = (const int64_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_loadl_epi64((const __m128i *) addr);
+++
+++ return validateInt64(ret, addr[0], 0);
+++}
+++
+++result_t test_mm_loadl_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *addr = (const double *) impl.mTestFloatPointer2;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d ret = _mm_loadl_pd(a, addr);
+++
+++ return validateDouble(ret, addr[0], _a[1]);
+++}
+++
+++result_t test_mm_loadr_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *addr = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d ret = _mm_loadr_pd(addr);
+++
+++ return validateDouble(ret, addr[1], addr[0]);
+++}
+++
+++result_t test_mm_loadu_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *p = (const double *) impl.mTestFloatPointer1;
+++ __m128d a = _mm_loadu_pd(p);
+++ return validateDouble(a, p[0], p[1]);
+++}
+++
+++result_t test_mm_loadu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i c = _mm_loadu_si128((const __m128i *) _a);
+++ return VALIDATE_INT32_M128(c, _a);
+++}
+++
+++result_t test_mm_loadu_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // The GCC version before 11 does not implement intrinsic function
+++ // _mm_loadu_si32. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+++ // for more information.
+++#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+++ return TEST_UNIMPL;
+++#else
+++ const int32_t *addr = (const int32_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_loadu_si32((const void *) addr);
+++
+++ return validateInt32(ret, addr[0], 0, 0, 0);
+++#endif
+++}
+++
+++result_t test_mm_madd_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int32_t d0 = (int32_t) _a[0] * _b[0];
+++ int32_t d1 = (int32_t) _a[1] * _b[1];
+++ int32_t d2 = (int32_t) _a[2] * _b[2];
+++ int32_t d3 = (int32_t) _a[3] * _b[3];
+++ int32_t d4 = (int32_t) _a[4] * _b[4];
+++ int32_t d5 = (int32_t) _a[5] * _b[5];
+++ int32_t d6 = (int32_t) _a[6] * _b[6];
+++ int32_t d7 = (int32_t) _a[7] * _b[7];
+++
+++ int32_t e[4];
+++ e[0] = d0 + d1;
+++ e[1] = d2 + d3;
+++ e[2] = d4 + d5;
+++ e[3] = d6 + d7;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_madd_epi16(a, b);
+++ return VALIDATE_INT32_M128(c, e);
+++}
+++
+++result_t test_mm_maskmoveu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_mask = (const uint8_t *) impl.mTestIntPointer2;
+++ char mem_addr[16];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i mask = load_m128i(_mask);
+++ _mm_maskmoveu_si128(a, mask, mem_addr);
+++
+++ for (int i = 0; i < 16; i++) {
+++ if (_mask[i] >> 7) {
+++ ASSERT_RETURN(_a[i] == (uint8_t) mem_addr[i]);
+++ }
+++ }
+++
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_max_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[8];
+++ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++ d[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+++ d[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+++ d[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+++ d[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++
+++ __m128i c = _mm_max_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_max_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ uint8_t d[16];
+++ d[0] = ((uint8_t) _a[0] > (uint8_t) _b[0]) ? ((uint8_t) _a[0])
+++ : ((uint8_t) _b[0]);
+++ d[1] = ((uint8_t) _a[1] > (uint8_t) _b[1]) ? ((uint8_t) _a[1])
+++ : ((uint8_t) _b[1]);
+++ d[2] = ((uint8_t) _a[2] > (uint8_t) _b[2]) ? ((uint8_t) _a[2])
+++ : ((uint8_t) _b[2]);
+++ d[3] = ((uint8_t) _a[3] > (uint8_t) _b[3]) ? ((uint8_t) _a[3])
+++ : ((uint8_t) _b[3]);
+++ d[4] = ((uint8_t) _a[4] > (uint8_t) _b[4]) ? ((uint8_t) _a[4])
+++ : ((uint8_t) _b[4]);
+++ d[5] = ((uint8_t) _a[5] > (uint8_t) _b[5]) ? ((uint8_t) _a[5])
+++ : ((uint8_t) _b[5]);
+++ d[6] = ((uint8_t) _a[6] > (uint8_t) _b[6]) ? ((uint8_t) _a[6])
+++ : ((uint8_t) _b[6]);
+++ d[7] = ((uint8_t) _a[7] > (uint8_t) _b[7]) ? ((uint8_t) _a[7])
+++ : ((uint8_t) _b[7]);
+++ d[8] = ((uint8_t) _a[8] > (uint8_t) _b[8]) ? ((uint8_t) _a[8])
+++ : ((uint8_t) _b[8]);
+++ d[9] = ((uint8_t) _a[9] > (uint8_t) _b[9]) ? ((uint8_t) _a[9])
+++ : ((uint8_t) _b[9]);
+++ d[10] = ((uint8_t) _a[10] > (uint8_t) _b[10]) ? ((uint8_t) _a[10])
+++ : ((uint8_t) _b[10]);
+++ d[11] = ((uint8_t) _a[11] > (uint8_t) _b[11]) ? ((uint8_t) _a[11])
+++ : ((uint8_t) _b[11]);
+++ d[12] = ((uint8_t) _a[12] > (uint8_t) _b[12]) ? ((uint8_t) _a[12])
+++ : ((uint8_t) _b[12]);
+++ d[13] = ((uint8_t) _a[13] > (uint8_t) _b[13]) ? ((uint8_t) _a[13])
+++ : ((uint8_t) _b[13]);
+++ d[14] = ((uint8_t) _a[14] > (uint8_t) _b[14]) ? ((uint8_t) _a[14])
+++ : ((uint8_t) _b[14]);
+++ d[15] = ((uint8_t) _a[15] > (uint8_t) _b[15]) ? ((uint8_t) _a[15])
+++ : ((uint8_t) _b[15]);
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_max_epu8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_max_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double f0 = _a[0] > _b[0] ? _a[0] : _b[0];
+++ double f1 = _a[1] > _b[1] ? _a[1] : _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_max_pd(a, b);
+++
+++ return validateDouble(c, f0, f1);
+++}
+++
+++result_t test_mm_max_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = _a[0] > _b[0] ? _a[0] : _b[0];
+++ double d1 = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_max_sd(a, b);
+++
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_mfence(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ /* FIXME: Assume that memory barriers always function as intended. */
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_min_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[8];
+++ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++ d[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+++ d[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+++ d[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+++ d[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_min_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_min_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ uint8_t d[16];
+++ d[0] =
+++ ((uint8_t) _a[0] < (uint8_t) _b[0]) ? (uint8_t) _a[0] : (uint8_t) _b[0];
+++ d[1] =
+++ ((uint8_t) _a[1] < (uint8_t) _b[1]) ? (uint8_t) _a[1] : (uint8_t) _b[1];
+++ d[2] =
+++ ((uint8_t) _a[2] < (uint8_t) _b[2]) ? (uint8_t) _a[2] : (uint8_t) _b[2];
+++ d[3] =
+++ ((uint8_t) _a[3] < (uint8_t) _b[3]) ? (uint8_t) _a[3] : (uint8_t) _b[3];
+++ d[4] =
+++ ((uint8_t) _a[4] < (uint8_t) _b[4]) ? (uint8_t) _a[4] : (uint8_t) _b[4];
+++ d[5] =
+++ ((uint8_t) _a[5] < (uint8_t) _b[5]) ? (uint8_t) _a[5] : (uint8_t) _b[5];
+++ d[6] =
+++ ((uint8_t) _a[6] < (uint8_t) _b[6]) ? (uint8_t) _a[6] : (uint8_t) _b[6];
+++ d[7] =
+++ ((uint8_t) _a[7] < (uint8_t) _b[7]) ? (uint8_t) _a[7] : (uint8_t) _b[7];
+++ d[8] =
+++ ((uint8_t) _a[8] < (uint8_t) _b[8]) ? (uint8_t) _a[8] : (uint8_t) _b[8];
+++ d[9] =
+++ ((uint8_t) _a[9] < (uint8_t) _b[9]) ? (uint8_t) _a[9] : (uint8_t) _b[9];
+++ d[10] = ((uint8_t) _a[10] < (uint8_t) _b[10]) ? (uint8_t) _a[10]
+++ : (uint8_t) _b[10];
+++ d[11] = ((uint8_t) _a[11] < (uint8_t) _b[11]) ? (uint8_t) _a[11]
+++ : (uint8_t) _b[11];
+++ d[12] = ((uint8_t) _a[12] < (uint8_t) _b[12]) ? (uint8_t) _a[12]
+++ : (uint8_t) _b[12];
+++ d[13] = ((uint8_t) _a[13] < (uint8_t) _b[13]) ? (uint8_t) _a[13]
+++ : (uint8_t) _b[13];
+++ d[14] = ((uint8_t) _a[14] < (uint8_t) _b[14]) ? (uint8_t) _a[14]
+++ : (uint8_t) _b[14];
+++ d[15] = ((uint8_t) _a[15] < (uint8_t) _b[15]) ? (uint8_t) _a[15]
+++ : (uint8_t) _b[15];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_min_epu8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_min_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double f0 = _a[0] < _b[0] ? _a[0] : _b[0];
+++ double f1 = _a[1] < _b[1] ? _a[1] : _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++
+++ __m128d c = _mm_min_pd(a, b);
+++ return validateDouble(c, f0, f1);
+++}
+++
+++result_t test_mm_min_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = _a[0] < _b[0] ? _a[0] : _b[0];
+++ double d1 = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_min_sd(a, b);
+++
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_move_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ int64_t d0 = _a[0];
+++ int64_t d1 = 0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_move_epi64(a);
+++
+++ return validateInt64(c, d0, d1);
+++}
+++
+++result_t test_mm_move_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++
+++ double result[2];
+++ result[0] = _b[0];
+++ result[1] = _a[1];
+++
+++ __m128d ret = _mm_move_sd(a, b);
+++ return validateDouble(ret, result[0], result[1]);
+++}
+++
+++result_t test_mm_movemask_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++
+++ const uint8_t *ip = (const uint8_t *) _a;
+++ int ret = 0;
+++ uint32_t mask = 1;
+++ for (uint32_t i = 0; i < 16; i++) {
+++ if (ip[i] & 0x80) {
+++ ret |= mask;
+++ }
+++ mask = mask << 1;
+++ }
+++ int test = _mm_movemask_epi8(a);
+++ ASSERT_RETURN(test == ret);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_movemask_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ unsigned int _c = 0;
+++ _c |= ((*(const uint64_t *) _a) >> 63) & 0x1;
+++ _c |= (((*(const uint64_t *) (_a + 1)) >> 62) & 0x2);
+++
+++ __m128d a = load_m128d(_a);
+++ int c = _mm_movemask_pd(a);
+++
+++ ASSERT_RETURN((unsigned int) c == _c);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_movepi64_pi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ int64_t d0 = _a[0];
+++
+++ __m128i a = load_m128i(_a);
+++ __m64 c = _mm_movepi64_pi64(a);
+++
+++ return validateInt64(c, d0);
+++}
+++
+++result_t test_mm_movpi64_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ int64_t d0 = _a[0];
+++
+++ __m64 a = load_m64(_a);
+++ __m128i c = _mm_movpi64_epi64(a);
+++
+++ return validateInt64(c, d0, 0);
+++}
+++
+++result_t test_mm_mul_epu32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+++ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+++ uint64_t dx = (uint64_t) (_a[0]) * (uint64_t) (_b[0]);
+++ uint64_t dy = (uint64_t) (_a[2]) * (uint64_t) (_b[2]);
+++
+++ __m128i a = _mm_loadu_si128((const __m128i *) _a);
+++ __m128i b = _mm_loadu_si128((const __m128i *) _b);
+++ __m128i r = _mm_mul_epu32(a, b);
+++ return validateUInt64(r, dx, dy);
+++}
+++
+++result_t test_mm_mul_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = _a[0] * _b[0];
+++ double d1 = _a[1] * _b[1];
+++
+++ __m128d a = _mm_load_pd(_a);
+++ __m128d b = _mm_load_pd(_b);
+++ __m128d c = _mm_mul_pd(a, b);
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_mul_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double dx = _a[0] * _b[0];
+++ double dy = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_mul_sd(a, b);
+++ return validateDouble(c, dx, dy);
+++}
+++
+++result_t test_mm_mul_su32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+++ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+++
+++ uint64_t u = (uint64_t) (_a[0]) * (uint64_t) (_b[0]);
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 r = _mm_mul_su32(a, b);
+++
+++ return validateUInt64(r, u);
+++}
+++
+++result_t test_mm_mulhi_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[8];
+++ for (uint32_t i = 0; i < 8; i++) {
+++ int32_t m = (int32_t) _a[i] * (int32_t) _b[i];
+++ d[i] = (int16_t) (m >> 16);
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_mulhi_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_mulhi_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+++ uint16_t d[8];
+++ for (uint32_t i = 0; i < 8; i++) {
+++ uint32_t m = (uint32_t) _a[i] * (uint32_t) _b[i];
+++ d[i] = (uint16_t) (m >> 16);
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_mulhi_epu16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_mullo_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[8];
+++ d[0] = _a[0] * _b[0];
+++ d[1] = _a[1] * _b[1];
+++ d[2] = _a[2] * _b[2];
+++ d[3] = _a[3] * _b[3];
+++ d[4] = _a[4] * _b[4];
+++ d[5] = _a[5] * _b[5];
+++ d[6] = _a[6] * _b[6];
+++ d[7] = _a[7] * _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_mullo_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_or_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestFloatPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestFloatPointer2;
+++
+++ int64_t d0 = _a[0] | _b[0];
+++ int64_t d1 = _a[1] | _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_or_pd(a, b);
+++
+++ return validateDouble(c, *((double *) &d0), *((double *) &d1));
+++}
+++
+++result_t test_mm_or_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128 fc = _mm_or_ps(*(const __m128 *) &a, *(const __m128 *) &b);
+++ __m128i c = *(const __m128i *) &fc;
+++ // now for the assertion...
+++ const uint32_t *ia = (const uint32_t *) &a;
+++ const uint32_t *ib = (const uint32_t *) &b;
+++ uint32_t r[4];
+++ r[0] = ia[0] | ib[0];
+++ r[1] = ia[1] | ib[1];
+++ r[2] = ia[2] | ib[2];
+++ r[3] = ia[3] | ib[3];
+++ __m128i ret = do_mm_set_epi32(r[3], r[2], r[1], r[0]);
+++ result_t res = VALIDATE_INT32_M128(c, r);
+++ if (res) {
+++ res = VALIDATE_INT32_M128(ret, r);
+++ }
+++ return res;
+++}
+++
+++result_t test_mm_packs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int8_t max = INT8_MAX;
+++ int8_t min = INT8_MIN;
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int8_t d[16];
+++ for (int i = 0; i < 8; i++) {
+++ if (_a[i] > max)
+++ d[i] = max;
+++ else if (_a[i] < min)
+++ d[i] = min;
+++ else
+++ d[i] = (int8_t) _a[i];
+++ }
+++ for (int i = 0; i < 8; i++) {
+++ if (_b[i] > max)
+++ d[i + 8] = max;
+++ else if (_b[i] < min)
+++ d[i + 8] = min;
+++ else
+++ d[i + 8] = (int8_t) _b[i];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_packs_epi16(a, b);
+++
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_packs_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int16_t max = INT16_MAX;
+++ int16_t min = INT16_MIN;
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[8];
+++ for (int i = 0; i < 4; i++) {
+++ if (_a[i] > max)
+++ d[i] = max;
+++ else if (_a[i] < min)
+++ d[i] = min;
+++ else
+++ d[i] = (int16_t) _a[i];
+++ }
+++ for (int i = 0; i < 4; i++) {
+++ if (_b[i] > max)
+++ d[i + 4] = max;
+++ else if (_b[i] < min)
+++ d[i + 4] = min;
+++ else
+++ d[i + 4] = (int16_t) _b[i];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_packs_epi32(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_packus_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint8_t max = UINT8_MAX;
+++ uint8_t min = 0;
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ uint8_t d[16];
+++ for (int i = 0; i < 8; i++) {
+++ if (_a[i] > (int16_t) max)
+++ d[i] = max;
+++ else if (_a[i] < (int16_t) min)
+++ d[i] = min;
+++ else
+++ d[i] = (uint8_t) _a[i];
+++ }
+++ for (int i = 0; i < 8; i++) {
+++ if (_b[i] > (int16_t) max)
+++ d[i + 8] = max;
+++ else if (_b[i] < (int16_t) min)
+++ d[i + 8] = min;
+++ else
+++ d[i + 8] = (uint8_t) _b[i];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_packus_epi16(a, b);
+++
+++ return VALIDATE_UINT8_M128(c, d);
+++}
+++
+++result_t test_mm_pause(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ _mm_pause();
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_sad_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ uint16_t d0 = 0;
+++ uint16_t d1 = 0;
+++ for (int i = 0; i < 8; i++) {
+++ d0 += abs(_a[i] - _b[i]);
+++ }
+++ for (int i = 8; i < 16; i++) {
+++ d1 += abs(_a[i] - _b[i]);
+++ }
+++
+++ const __m128i a = load_m128i(_a);
+++ const __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sad_epu8(a, b);
+++ return validateUInt16(c, d0, 0, 0, 0, d1, 0, 0, 0);
+++}
+++
+++result_t test_mm_set_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ int16_t d[8];
+++ d[0] = _a[0];
+++ d[1] = _a[1];
+++ d[2] = _a[2];
+++ d[3] = _a[3];
+++ d[4] = _a[4];
+++ d[5] = _a[5];
+++ d[6] = _a[6];
+++ d[7] = _a[7];
+++
+++ __m128i c = _mm_set_epi16(d[7], d[6], d[5], d[4], d[3], d[2], d[1], d[0]);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_set_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int32_t d[4];
+++ d[3] = impl.mTestInts[iter];
+++ d[2] = impl.mTestInts[iter + 1];
+++ d[1] = impl.mTestInts[iter + 2];
+++ d[0] = impl.mTestInts[iter + 3];
+++ __m128i a = _mm_set_epi32(d[3], d[2], d[1], d[0]);
+++ return VALIDATE_INT32_M128(a, d);
+++}
+++
+++result_t test_mm_set_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_set_epi64(load_m64(&_a[1]), load_m64(&_a[0]));
+++
+++ return validateInt64(ret, _a[0], _a[1]);
+++}
+++
+++result_t test_mm_set_epi64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_set_epi64x(_a[1], _a[0]);
+++
+++ return validateInt64(ret, _a[0], _a[1]);
+++}
+++
+++result_t test_mm_set_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ int8_t d[16];
+++ d[0] = _a[0];
+++ d[1] = _a[1];
+++ d[2] = _a[2];
+++ d[3] = _a[3];
+++ d[4] = _a[4];
+++ d[5] = _a[5];
+++ d[6] = _a[6];
+++ d[7] = _a[7];
+++ d[8] = _a[8];
+++ d[9] = _a[9];
+++ d[10] = _a[10];
+++ d[11] = _a[11];
+++ d[12] = _a[12];
+++ d[13] = _a[13];
+++ d[14] = _a[14];
+++ d[15] = _a[15];
+++
+++ __m128i c =
+++ _mm_set_epi8(d[15], d[14], d[13], d[12], d[11], d[10], d[9], d[8], d[7],
+++ d[6], d[5], d[4], d[3], d[2], d[1], d[0]);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_set_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *p = (const double *) impl.mTestFloatPointer1;
+++ double x = p[0];
+++ double y = p[1];
+++ __m128d a = _mm_set_pd(x, y);
+++ return validateDouble(a, y, x);
+++}
+++
+++result_t test_mm_set_pd1(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double _a = impl.mTestFloats[iter];
+++
+++ __m128d a = _mm_set_pd1(_a);
+++
+++ return validateDouble(a, _a, _a);
+++}
+++
+++result_t test_mm_set_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ double f0 = _a[0];
+++ double f1 = 0.0;
+++
+++ __m128d a = _mm_set_sd(_a[0]);
+++ return validateDouble(a, f0, f1);
+++}
+++
+++result_t test_mm_set1_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ int16_t d0 = _a[0];
+++
+++ __m128i c = _mm_set1_epi16(d0);
+++ return validateInt16(c, d0, d0, d0, d0, d0, d0, d0, d0);
+++}
+++
+++result_t test_mm_set1_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int32_t x = impl.mTestInts[iter];
+++ __m128i a = _mm_set1_epi32(x);
+++ return validateInt32(a, x, x, x, x);
+++}
+++
+++result_t test_mm_set1_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_set1_epi64(load_m64(&_a[0]));
+++
+++ return validateInt64(ret, _a[0], _a[0]);
+++}
+++
+++result_t test_mm_set1_epi64x(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_set1_epi64x(_a[0]);
+++
+++ return validateInt64(ret, _a[0], _a[0]);
+++}
+++
+++result_t test_mm_set1_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ int8_t d0 = _a[0];
+++ __m128i c = _mm_set1_epi8(d0);
+++ return validateInt8(c, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0, d0,
+++ d0, d0, d0);
+++}
+++
+++result_t test_mm_set1_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ double d0 = _a[0];
+++ __m128d c = _mm_set1_pd(d0);
+++ return validateDouble(c, d0, d0);
+++}
+++
+++result_t test_mm_setr_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++
+++ __m128i c =
+++ _mm_setr_epi16(_a[0], _a[1], _a[2], _a[3], _a[4], _a[5], _a[6], _a[7]);
+++
+++ return VALIDATE_INT16_M128(c, _a);
+++}
+++
+++result_t test_mm_setr_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i c = _mm_setr_epi32(_a[0], _a[1], _a[2], _a[3]);
+++ return VALIDATE_INT32_M128(c, _a);
+++}
+++
+++result_t test_mm_setr_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ __m128i c = _mm_setr_epi64(load_m64(&_a[0]), load_m64(&_a[1]));
+++ return validateInt64(c, _a[0], _a[1]);
+++}
+++
+++result_t test_mm_setr_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++
+++ __m128i c = _mm_setr_epi8(_a[0], _a[1], _a[2], _a[3], _a[4], _a[5], _a[6],
+++ _a[7], _a[8], _a[9], _a[10], _a[11], _a[12],
+++ _a[13], _a[14], _a[15]);
+++
+++ return VALIDATE_INT8_M128(c, _a);
+++}
+++
+++result_t test_mm_setr_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *p = (const double *) impl.mTestFloatPointer1;
+++
+++ double x = p[0];
+++ double y = p[1];
+++
+++ __m128d a = _mm_setr_pd(x, y);
+++
+++ return validateDouble(a, x, y);
+++}
+++
+++result_t test_mm_setzero_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ __m128d a = _mm_setzero_pd();
+++ return validateDouble(a, 0, 0);
+++}
+++
+++result_t test_mm_setzero_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ __m128i a = _mm_setzero_si128();
+++ return validateInt32(a, 0, 0, 0, 0);
+++}
+++
+++result_t test_mm_shuffle_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ __m128i a, c;
+++
+++#define TEST_IMPL(IDX) \
+++ int32_t d##IDX[4]; \
+++ d##IDX[0] = _a[((IDX) &0x3)]; \
+++ d##IDX[1] = _a[((IDX >> 2) & 0x3)]; \
+++ d##IDX[2] = _a[((IDX >> 4) & 0x3)]; \
+++ d##IDX[3] = _a[((IDX >> 6) & 0x3)]; \
+++ \
+++ a = load_m128i(_a); \
+++ c = _mm_shuffle_epi32(a, IDX); \
+++ CHECK_RESULT(VALIDATE_INT32_M128(c, d##IDX))
+++
+++ IMM_256_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_shuffle_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ __m128d a, b, c;
+++
+++#define TEST_IMPL(IDX) \
+++ a = load_m128d(_a); \
+++ b = load_m128d(_b); \
+++ c = _mm_shuffle_pd(a, b, IDX); \
+++ \
+++ double d0##IDX = _a[IDX & 0x1]; \
+++ double d1##IDX = _b[(IDX & 0x2) >> 1]; \
+++ CHECK_RESULT(validateDouble(c, d0##IDX, d1##IDX))
+++
+++ IMM_4_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_shufflehi_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ __m128i a, c;
+++
+++#define TEST_IMPL(IDX) \
+++ int16_t d##IDX[8]; \
+++ d##IDX[0] = _a[0]; \
+++ d##IDX[1] = _a[1]; \
+++ d##IDX[2] = _a[2]; \
+++ d##IDX[3] = _a[3]; \
+++ d##IDX[4] = (int16_t) (((const int64_t *) _a)[1] >> ((IDX & 0x3) * 16)); \
+++ d##IDX[5] = \
+++ (int16_t) (((const int64_t *) _a)[1] >> (((IDX >> 2) & 0x3) * 16)); \
+++ d##IDX[6] = \
+++ (int16_t) (((const int64_t *) _a)[1] >> (((IDX >> 4) & 0x3) * 16)); \
+++ d##IDX[7] = \
+++ (int16_t) (((const int64_t *) _a)[1] >> (((IDX >> 6) & 0x3) * 16)); \
+++ \
+++ a = load_m128i(_a); \
+++ c = _mm_shufflehi_epi16(a, IDX); \
+++ \
+++ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+++
+++ IMM_256_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_shufflelo_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ __m128i a, c;
+++
+++#define TEST_IMPL(IDX) \
+++ int16_t d##IDX[8]; \
+++ d##IDX[0] = (int16_t) (((const int64_t *) _a)[0] >> ((IDX & 0x3) * 16)); \
+++ d##IDX[1] = \
+++ (int16_t) (((const int64_t *) _a)[0] >> (((IDX >> 2) & 0x3) * 16)); \
+++ d##IDX[2] = \
+++ (int16_t) (((const int64_t *) _a)[0] >> (((IDX >> 4) & 0x3) * 16)); \
+++ d##IDX[3] = \
+++ (int16_t) (((const int64_t *) _a)[0] >> (((IDX >> 6) & 0x3) * 16)); \
+++ d##IDX[4] = _a[4]; \
+++ d##IDX[5] = _a[5]; \
+++ d##IDX[6] = _a[6]; \
+++ d##IDX[7] = _a[7]; \
+++ \
+++ a = load_m128i(_a); \
+++ c = _mm_shufflelo_epi16(a, IDX); \
+++ \
+++ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+++
+++ IMM_256_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_sll_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ __m128i a, b, c;
+++ uint8_t idx;
+++#define TEST_IMPL(IDX) \
+++ uint16_t d##IDX[8]; \
+++ idx = IDX; \
+++ d##IDX[0] = (idx > 15) ? 0 : _a[0] << idx; \
+++ d##IDX[1] = (idx > 15) ? 0 : _a[1] << idx; \
+++ d##IDX[2] = (idx > 15) ? 0 : _a[2] << idx; \
+++ d##IDX[3] = (idx > 15) ? 0 : _a[3] << idx; \
+++ d##IDX[4] = (idx > 15) ? 0 : _a[4] << idx; \
+++ d##IDX[5] = (idx > 15) ? 0 : _a[5] << idx; \
+++ d##IDX[6] = (idx > 15) ? 0 : _a[6] << idx; \
+++ d##IDX[7] = (idx > 15) ? 0 : _a[7] << idx; \
+++ \
+++ a = load_m128i(_a); \
+++ b = _mm_set1_epi64x(IDX); \
+++ c = _mm_sll_epi16(a, b); \
+++ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+++
+++ IMM_64_ITER
+++#undef TEST_IMPL
+++
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_sll_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i a, b, c;
+++ uint8_t idx;
+++
+++#define TEST_IMPL(IDX) \
+++ uint32_t d##IDX[4]; \
+++ idx = IDX; \
+++ d##IDX[0] = (idx > 31) ? 0 : _a[0] << idx; \
+++ d##IDX[1] = (idx > 31) ? 0 : _a[1] << idx; \
+++ d##IDX[2] = (idx > 31) ? 0 : _a[2] << idx; \
+++ d##IDX[3] = (idx > 31) ? 0 : _a[3] << idx; \
+++ \
+++ a = load_m128i(_a); \
+++ b = _mm_set1_epi64x(IDX); \
+++ c = _mm_sll_epi32(a, b); \
+++ CHECK_RESULT(VALIDATE_INT32_M128(c, d##IDX))
+++
+++ IMM_64_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_sll_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ __m128i a, b, c;
+++
+++#define TEST_IMPL(IDX) \
+++ uint64_t d0##IDX = (IDX & ~63) ? 0 : _a[0] << IDX; \
+++ uint64_t d1##IDX = (IDX & ~63) ? 0 : _a[1] << IDX; \
+++ \
+++ a = load_m128i(_a); \
+++ b = _mm_set1_epi64x(IDX); \
+++ c = _mm_sll_epi64(a, b); \
+++ \
+++ CHECK_RESULT(validateInt64(c, d0##IDX, d1##IDX))
+++
+++ IMM_64_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_slli_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ __m128i a, c;
+++ uint8_t idx;
+++#define TEST_IMPL(IDX) \
+++ int16_t d##IDX[8]; \
+++ idx = IDX; \
+++ d##IDX[0] = (idx > 15) ? 0 : _a[0] << idx; \
+++ d##IDX[1] = (idx > 15) ? 0 : _a[1] << idx; \
+++ d##IDX[2] = (idx > 15) ? 0 : _a[2] << idx; \
+++ d##IDX[3] = (idx > 15) ? 0 : _a[3] << idx; \
+++ d##IDX[4] = (idx > 15) ? 0 : _a[4] << idx; \
+++ d##IDX[5] = (idx > 15) ? 0 : _a[5] << idx; \
+++ d##IDX[6] = (idx > 15) ? 0 : _a[6] << idx; \
+++ d##IDX[7] = (idx > 15) ? 0 : _a[7] << idx; \
+++ \
+++ a = load_m128i(_a); \
+++ c = _mm_slli_epi16(a, IDX); \
+++ CHECK_RESULT(VALIDATE_INT16_M128(c, d##IDX))
+++
+++ IMM_64_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_slli_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++#if defined(__clang__)
+++ // Clang compiler does not allow the second argument of _mm_slli_epi32() to
+++ // be greater than 31.
+++ const int count = (int) (iter % 33 - 1); // range: -1 ~ 31
+++#else
+++ const int count = (int) (iter % 34 - 1); // range: -1 ~ 32
+++#endif
+++
+++ int32_t d[4];
+++ d[0] = (count & ~31) ? 0 : _a[0] << count;
+++ d[1] = (count & ~31) ? 0 : _a[1] << count;
+++ d[2] = (count & ~31) ? 0 : _a[2] << count;
+++ d[3] = (count & ~31) ? 0 : _a[3] << count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_slli_epi32(a, count);
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_slli_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++#if defined(__clang__)
+++ // Clang compiler does not allow the second argument of "_mm_slli_epi64()"
+++ // to be greater than 63.
+++ const int count = (int) (iter % 65 - 1); // range: -1 ~ 63
+++#else
+++ const int count = (int) (iter % 66 - 1); // range: -1 ~ 64
+++#endif
+++ int64_t d0 = (count & ~63) ? 0 : _a[0] << count;
+++ int64_t d1 = (count & ~63) ? 0 : _a[1] << count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_slli_epi64(a, count);
+++ return validateInt64(c, d0, d1);
+++}
+++
+++result_t test_mm_slli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++
+++ int8_t d[16];
+++ int count = (iter % 5) << 2;
+++ for (int i = 0; i < 16; i++) {
+++ if (i < count)
+++ d[i] = 0;
+++ else
+++ d[i] = ((const int8_t *) _a)[i - count];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret;
+++ switch (iter % 5) {
+++ case 0:
+++ ret = _mm_slli_si128(a, 0);
+++ break;
+++ case 1:
+++ ret = _mm_slli_si128(a, 4);
+++ break;
+++ case 2:
+++ ret = _mm_slli_si128(a, 8);
+++ break;
+++ case 3:
+++ ret = _mm_slli_si128(a, 12);
+++ break;
+++ case 4:
+++ ret = _mm_slli_si128(a, 16);
+++ break;
+++ }
+++
+++ return VALIDATE_INT8_M128(ret, d);
+++}
+++
+++result_t test_mm_sqrt_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ double f0 = sqrt(_a[0]);
+++ double f1 = sqrt(_a[1]);
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d c = _mm_sqrt_pd(a);
+++
+++ return validateFloatError(c, f0, f1, 1.0e-15);
+++}
+++
+++result_t test_mm_sqrt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double f0 = sqrt(_b[0]);
+++ double f1 = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_sqrt_sd(a, b);
+++
+++ return validateFloatError(c, f0, f1, 1.0e-15);
+++}
+++
+++result_t test_mm_sra_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int64_t count = (int64_t) (iter % 18 - 1); // range: -1 ~ 16
+++
+++ int16_t d[8];
+++ d[0] = (count & ~15) ? (_a[0] < 0 ? ~UINT16_C(0) : 0) : (_a[0] >> count);
+++ d[1] = (count & ~15) ? (_a[1] < 0 ? ~UINT16_C(0) : 0) : (_a[1] >> count);
+++ d[2] = (count & ~15) ? (_a[2] < 0 ? ~UINT16_C(0) : 0) : (_a[2] >> count);
+++ d[3] = (count & ~15) ? (_a[3] < 0 ? ~UINT16_C(0) : 0) : (_a[3] >> count);
+++ d[4] = (count & ~15) ? (_a[4] < 0 ? ~UINT16_C(0) : 0) : (_a[4] >> count);
+++ d[5] = (count & ~15) ? (_a[5] < 0 ? ~UINT16_C(0) : 0) : (_a[5] >> count);
+++ d[6] = (count & ~15) ? (_a[6] < 0 ? ~UINT16_C(0) : 0) : (_a[6] >> count);
+++ d[7] = (count & ~15) ? (_a[7] < 0 ? ~UINT16_C(0) : 0) : (_a[7] >> count);
+++
+++ __m128i a = _mm_load_si128((const __m128i *) _a);
+++ __m128i b = _mm_set1_epi64x(count);
+++ __m128i c = _mm_sra_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_sra_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int64_t count = (int64_t) (iter % 34 - 1); // range: -1 ~ 32
+++
+++ int32_t d[4];
+++ d[0] = (count & ~31) ? (_a[0] < 0 ? ~UINT32_C(0) : 0) : _a[0] >> count;
+++ d[1] = (count & ~31) ? (_a[1] < 0 ? ~UINT32_C(0) : 0) : _a[1] >> count;
+++ d[2] = (count & ~31) ? (_a[2] < 0 ? ~UINT32_C(0) : 0) : _a[2] >> count;
+++ d[3] = (count & ~31) ? (_a[3] < 0 ? ~UINT32_C(0) : 0) : _a[3] >> count;
+++
+++ __m128i a = _mm_load_si128((const __m128i *) _a);
+++ __m128i b = _mm_set1_epi64x(count);
+++ __m128i c = _mm_sra_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_srai_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int32_t b = (int32_t) (iter % 18 - 1); // range: -1 ~ 16
+++ int16_t d[8];
+++ int count = (b & ~15) ? 15 : b;
+++
+++ for (int i = 0; i < 8; i++) {
+++ d[i] = _a[i] >> count;
+++ }
+++
+++ __m128i a = _mm_load_si128((const __m128i *) _a);
+++ __m128i c = _mm_srai_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_srai_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t b = (int32_t) (iter % 34 - 1); // range: -1 ~ 32
+++
+++ int32_t d[4];
+++ int count = (b & ~31) ? 31 : b;
+++ for (int i = 0; i < 4; i++) {
+++ d[i] = _a[i] >> count;
+++ }
+++
+++ __m128i a = _mm_load_si128((const __m128i *) _a);
+++ __m128i c = _mm_srai_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_srl_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int64_t count = (int64_t) (iter % 18 - 1); // range: -1 ~ 16
+++
+++ uint16_t d[8];
+++ d[0] = (count & ~15) ? 0 : (uint16_t) (_a[0]) >> count;
+++ d[1] = (count & ~15) ? 0 : (uint16_t) (_a[1]) >> count;
+++ d[2] = (count & ~15) ? 0 : (uint16_t) (_a[2]) >> count;
+++ d[3] = (count & ~15) ? 0 : (uint16_t) (_a[3]) >> count;
+++ d[4] = (count & ~15) ? 0 : (uint16_t) (_a[4]) >> count;
+++ d[5] = (count & ~15) ? 0 : (uint16_t) (_a[5]) >> count;
+++ d[6] = (count & ~15) ? 0 : (uint16_t) (_a[6]) >> count;
+++ d[7] = (count & ~15) ? 0 : (uint16_t) (_a[7]) >> count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = _mm_set1_epi64x(count);
+++ __m128i c = _mm_srl_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_srl_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int64_t count = (int64_t) (iter % 34 - 1); // range: -1 ~ 32
+++
+++ uint32_t d[4];
+++ d[0] = (count & ~31) ? 0 : (uint32_t) (_a[0]) >> count;
+++ d[1] = (count & ~31) ? 0 : (uint32_t) (_a[1]) >> count;
+++ d[2] = (count & ~31) ? 0 : (uint32_t) (_a[2]) >> count;
+++ d[3] = (count & ~31) ? 0 : (uint32_t) (_a[3]) >> count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = _mm_set1_epi64x(count);
+++ __m128i c = _mm_srl_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_srl_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t count = (int64_t) (iter % 66 - 1); // range: -1 ~ 64
+++
+++ uint64_t d0 = (count & ~63) ? 0 : (uint64_t) (_a[0]) >> count;
+++ uint64_t d1 = (count & ~63) ? 0 : (uint64_t) (_a[1]) >> count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = _mm_set1_epi64x(count);
+++ __m128i c = _mm_srl_epi64(a, b);
+++
+++ return validateInt64(c, d0, d1);
+++}
+++
+++result_t test_mm_srli_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int count = (int) (iter % 18 - 1); // range: -1 ~ 16
+++
+++ int16_t d[8];
+++ d[0] = count & (~15) ? 0 : (uint16_t) (_a[0]) >> count;
+++ d[1] = count & (~15) ? 0 : (uint16_t) (_a[1]) >> count;
+++ d[2] = count & (~15) ? 0 : (uint16_t) (_a[2]) >> count;
+++ d[3] = count & (~15) ? 0 : (uint16_t) (_a[3]) >> count;
+++ d[4] = count & (~15) ? 0 : (uint16_t) (_a[4]) >> count;
+++ d[5] = count & (~15) ? 0 : (uint16_t) (_a[5]) >> count;
+++ d[6] = count & (~15) ? 0 : (uint16_t) (_a[6]) >> count;
+++ d[7] = count & (~15) ? 0 : (uint16_t) (_a[7]) >> count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_srli_epi16(a, count);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_srli_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int count = (int) (iter % 34 - 1); // range: -1 ~ 32
+++
+++ int32_t d[4];
+++ d[0] = count & (~31) ? 0 : (uint32_t) (_a[0]) >> count;
+++ d[1] = count & (~31) ? 0 : (uint32_t) (_a[1]) >> count;
+++ d[2] = count & (~31) ? 0 : (uint32_t) (_a[2]) >> count;
+++ d[3] = count & (~31) ? 0 : (uint32_t) (_a[3]) >> count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_srli_epi32(a, count);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_srli_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int count = (int) (iter % 66 - 1); // range: -1 ~ 64
+++
+++ int64_t d0 = count & (~63) ? 0 : (uint64_t) (_a[0]) >> count;
+++ int64_t d1 = count & (~63) ? 0 : (uint64_t) (_a[1]) >> count;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_srli_epi64(a, count);
+++
+++ return validateInt64(c, d0, d1);
+++}
+++
+++result_t test_mm_srli_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int count = (iter % 5) << 2;
+++
+++ int8_t d[16];
+++ for (int i = 0; i < 16; i++) {
+++ if (i >= (16 - count))
+++ d[i] = 0;
+++ else
+++ d[i] = _a[i + count];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret;
+++ switch (iter % 5) {
+++ case 0:
+++ ret = _mm_srli_si128(a, 0);
+++ break;
+++ case 1:
+++ ret = _mm_srli_si128(a, 4);
+++ break;
+++ case 2:
+++ ret = _mm_srli_si128(a, 8);
+++ break;
+++ case 3:
+++ ret = _mm_srli_si128(a, 12);
+++ break;
+++ case 4:
+++ ret = _mm_srli_si128(a, 16);
+++ break;
+++ }
+++
+++ return VALIDATE_INT8_M128(ret, d);
+++}
+++
+++result_t test_mm_store_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *p = (double *) impl.mTestFloatPointer1;
+++ double x = impl.mTestFloats[iter + 4];
+++ double y = impl.mTestFloats[iter + 6];
+++
+++ __m128d a = _mm_set_pd(x, y);
+++ _mm_store_pd(p, a);
+++ ASSERT_RETURN(p[0] == y);
+++ ASSERT_RETURN(p[1] == x);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_store_pd1(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *p = (double *) impl.mTestFloatPointer1;
+++ double _a[2] = {(double) impl.mTestFloats[iter],
+++ (double) impl.mTestFloats[iter + 1]};
+++
+++ __m128d a = load_m128d(_a);
+++ _mm_store_pd1(p, a);
+++ ASSERT_RETURN(p[0] == impl.mTestFloats[iter]);
+++ ASSERT_RETURN(p[1] == impl.mTestFloats[iter]);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_store_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *p = (double *) impl.mTestFloatPointer1;
+++ double _a[2] = {(double) impl.mTestFloats[iter],
+++ (double) impl.mTestFloats[iter + 1]};
+++
+++ __m128d a = load_m128d(_a);
+++ _mm_store_sd(p, a);
+++ ASSERT_RETURN(p[0] == impl.mTestFloats[iter]);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_store_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ alignas(16) int32_t p[4];
+++
+++ __m128i a = load_m128i(_a);
+++ _mm_store_si128((__m128i *) p, a);
+++
+++ return VALIDATE_INT32_M128(a, p);
+++}
+++
+++result_t test_mm_store1_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_store_pd1(impl, iter);
+++}
+++
+++result_t test_mm_storeh_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *p = (double *) impl.mTestFloatPointer1;
+++ double mem;
+++
+++ __m128d a = load_m128d(p);
+++ _mm_storeh_pd(&mem, a);
+++
+++ ASSERT_RETURN(mem == p[1]);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storel_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int64_t *p = (int64_t *) impl.mTestIntPointer1;
+++ __m128i mem;
+++
+++ __m128i a = load_m128i(p);
+++ _mm_storel_epi64(&mem, a);
+++
+++ ASSERT_RETURN(((SIMDVec *) &mem)->m128_u64[0] == (uint64_t) p[0]);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storel_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *p = (double *) impl.mTestFloatPointer1;
+++ double mem;
+++
+++ __m128d a = load_m128d(p);
+++ _mm_storel_pd(&mem, a);
+++
+++ ASSERT_RETURN(mem == p[0]);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storer_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *p = (double *) impl.mTestFloatPointer1;
+++ double mem[2];
+++
+++ __m128d a = load_m128d(p);
+++ _mm_storer_pd(mem, a);
+++
+++ __m128d res = load_m128d(mem);
+++ return validateDouble(res, p[1], p[0]);
+++}
+++
+++result_t test_mm_storeu_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ double *p = (double *) impl.mTestFloatPointer1;
+++ double x = impl.mTestFloats[iter + 4];
+++ double y = impl.mTestFloats[iter + 6];
+++
+++ __m128d a = _mm_set_pd(x, y);
+++ _mm_storeu_pd(p, a);
+++ ASSERT_RETURN(p[0] == y);
+++ ASSERT_RETURN(p[1] == x);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_storeu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i b;
+++ __m128i a = load_m128i(_a);
+++ _mm_storeu_si128(&b, a);
+++ int32_t *_b = (int32_t *) &b;
+++ return VALIDATE_INT32_M128(a, _b);
+++}
+++
+++result_t test_mm_storeu_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // The GCC version before 11 does not implement intrinsic function
+++ // _mm_storeu_si32. Check https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95483
+++ // for more information.
+++#if (defined(__GNUC__) && !defined(__clang__)) && (__GNUC__ <= 10)
+++ return TEST_UNIMPL;
+++#else
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i b;
+++ __m128i a = load_m128i(_a);
+++ _mm_storeu_si32(&b, a);
+++ int32_t *_b = (int32_t *) &b;
+++ return validateInt32(b, _a[0], _b[1], _b[2], _b[3]);
+++#endif
+++}
+++
+++result_t test_mm_stream_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ double p[2];
+++
+++ __m128d a = load_m128d(_a);
+++ _mm_stream_pd(p, a);
+++
+++ return validateDouble(a, p[0], p[1]);
+++}
+++
+++result_t test_mm_stream_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ alignas(16) int32_t p[4];
+++
+++ __m128i a = load_m128i(_a);
+++ _mm_stream_si128((__m128i *) p, a);
+++
+++ return VALIDATE_INT32_M128(a, p);
+++}
+++
+++result_t test_mm_stream_si32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t a = (const int32_t) impl.mTestInts[iter];
+++ int32_t p;
+++
+++ _mm_stream_si32(&p, a);
+++
+++ ASSERT_RETURN(a == p)
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_stream_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t a = (const int64_t) impl.mTestInts[iter];
+++ __int64 p[1];
+++ _mm_stream_si64(p, a);
+++ ASSERT_RETURN(p[0] == a);
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_sub_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[8];
+++ d[0] = _a[0] - _b[0];
+++ d[1] = _a[1] - _b[1];
+++ d[2] = _a[2] - _b[2];
+++ d[3] = _a[3] - _b[3];
+++ d[4] = _a[4] - _b[4];
+++ d[5] = _a[5] - _b[5];
+++ d[6] = _a[6] - _b[6];
+++ d[7] = _a[7] - _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sub_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_sub_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ int32_t d[4];
+++ d[0] = _a[0] - _b[0];
+++ d[1] = _a[1] - _b[1];
+++ d[2] = _a[2] - _b[2];
+++ d[3] = _a[3] - _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sub_epi32(a, b);
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_sub_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (int64_t *) impl.mTestIntPointer2;
+++ int64_t d0 = _a[0] - _b[0];
+++ int64_t d1 = _a[1] - _b[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sub_epi64(a, b);
+++ return validateInt64(c, d0, d1);
+++}
+++
+++result_t test_mm_sub_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t d[16];
+++ d[0] = _a[0] - _b[0];
+++ d[1] = _a[1] - _b[1];
+++ d[2] = _a[2] - _b[2];
+++ d[3] = _a[3] - _b[3];
+++ d[4] = _a[4] - _b[4];
+++ d[5] = _a[5] - _b[5];
+++ d[6] = _a[6] - _b[6];
+++ d[7] = _a[7] - _b[7];
+++ d[8] = _a[8] - _b[8];
+++ d[9] = _a[9] - _b[9];
+++ d[10] = _a[10] - _b[10];
+++ d[11] = _a[11] - _b[11];
+++ d[12] = _a[12] - _b[12];
+++ d[13] = _a[13] - _b[13];
+++ d[14] = _a[14] - _b[14];
+++ d[15] = _a[15] - _b[15];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sub_epi8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_sub_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = _a[0] - _b[0];
+++ double d1 = _a[1] - _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_sub_pd(a, b);
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_sub_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ double d0 = _a[0] - _b[0];
+++ double d1 = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_sub_sd(a, b);
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_sub_si64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++
+++ int64_t d = _a[0] - _b[0];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_sub_si64(a, b);
+++
+++ return validateInt64(c, d);
+++}
+++
+++result_t test_mm_subs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int32_t max = 32767;
+++ int32_t min = -32768;
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[8];
+++ for (int i = 0; i < 8; i++) {
+++ int32_t res = (int32_t) _a[i] - (int32_t) _b[i];
+++ if (res > max)
+++ d[i] = max;
+++ else if (res < min)
+++ d[i] = min;
+++ else
+++ d[i] = (int16_t) res;
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_subs_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_subs_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int16_t max = 127;
+++ int16_t min = -128;
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++
+++ int8_t d[16];
+++ for (int i = 0; i < 16; i++) {
+++ int16_t res = (int16_t) _a[i] - (int16_t) _b[i];
+++ if (res > max)
+++ d[i] = (int8_t) max;
+++ else if (res < min)
+++ d[i] = (int8_t) min;
+++ else
+++ d[i] = (int8_t) res;
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_subs_epi8(a, b);
+++
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_subs_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ uint16_t d[8];
+++ d[0] = (uint16_t) _a[0] - (uint16_t) _b[0];
+++ if (d[0] > (uint16_t) _a[0])
+++ d[0] = 0;
+++ d[1] = (uint16_t) _a[1] - (uint16_t) _b[1];
+++ if (d[1] > (uint16_t) _a[1])
+++ d[1] = 0;
+++ d[2] = (uint16_t) _a[2] - (uint16_t) _b[2];
+++ if (d[2] > (uint16_t) _a[2])
+++ d[2] = 0;
+++ d[3] = (uint16_t) _a[3] - (uint16_t) _b[3];
+++ if (d[3] > (uint16_t) _a[3])
+++ d[3] = 0;
+++ d[4] = (uint16_t) _a[4] - (uint16_t) _b[4];
+++ if (d[4] > (uint16_t) _a[4])
+++ d[4] = 0;
+++ d[5] = (uint16_t) _a[5] - (uint16_t) _b[5];
+++ if (d[5] > (uint16_t) _a[5])
+++ d[5] = 0;
+++ d[6] = (uint16_t) _a[6] - (uint16_t) _b[6];
+++ if (d[6] > (uint16_t) _a[6])
+++ d[6] = 0;
+++ d[7] = (uint16_t) _a[7] - (uint16_t) _b[7];
+++ if (d[7] > (uint16_t) _a[7])
+++ d[7] = 0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++
+++ __m128i c = _mm_subs_epu16(a, b);
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_subs_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ uint8_t d[16];
+++ d[0] = (uint8_t) _a[0] - (uint8_t) _b[0];
+++ if (d[0] > (uint8_t) _a[0])
+++ d[0] = 0;
+++ d[1] = (uint8_t) _a[1] - (uint8_t) _b[1];
+++ if (d[1] > (uint8_t) _a[1])
+++ d[1] = 0;
+++ d[2] = (uint8_t) _a[2] - (uint8_t) _b[2];
+++ if (d[2] > (uint8_t) _a[2])
+++ d[2] = 0;
+++ d[3] = (uint8_t) _a[3] - (uint8_t) _b[3];
+++ if (d[3] > (uint8_t) _a[3])
+++ d[3] = 0;
+++ d[4] = (uint8_t) _a[4] - (uint8_t) _b[4];
+++ if (d[4] > (uint8_t) _a[4])
+++ d[4] = 0;
+++ d[5] = (uint8_t) _a[5] - (uint8_t) _b[5];
+++ if (d[5] > (uint8_t) _a[5])
+++ d[5] = 0;
+++ d[6] = (uint8_t) _a[6] - (uint8_t) _b[6];
+++ if (d[6] > (uint8_t) _a[6])
+++ d[6] = 0;
+++ d[7] = (uint8_t) _a[7] - (uint8_t) _b[7];
+++ if (d[7] > (uint8_t) _a[7])
+++ d[7] = 0;
+++ d[8] = (uint8_t) _a[8] - (uint8_t) _b[8];
+++ if (d[8] > (uint8_t) _a[8])
+++ d[8] = 0;
+++ d[9] = (uint8_t) _a[9] - (uint8_t) _b[9];
+++ if (d[9] > (uint8_t) _a[9])
+++ d[9] = 0;
+++ d[10] = (uint8_t) _a[10] - (uint8_t) _b[10];
+++ if (d[10] > (uint8_t) _a[10])
+++ d[10] = 0;
+++ d[11] = (uint8_t) _a[11] - (uint8_t) _b[11];
+++ if (d[11] > (uint8_t) _a[11])
+++ d[11] = 0;
+++ d[12] = (uint8_t) _a[12] - (uint8_t) _b[12];
+++ if (d[12] > (uint8_t) _a[12])
+++ d[12] = 0;
+++ d[13] = (uint8_t) _a[13] - (uint8_t) _b[13];
+++ if (d[13] > (uint8_t) _a[13])
+++ d[13] = 0;
+++ d[14] = (uint8_t) _a[14] - (uint8_t) _b[14];
+++ if (d[14] > (uint8_t) _a[14])
+++ d[14] = 0;
+++ d[15] = (uint8_t) _a[15] - (uint8_t) _b[15];
+++ if (d[15] > (uint8_t) _a[15])
+++ d[15] = 0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_subs_epu8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_ucomieq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_comieq_sd(impl, iter);
+++}
+++
+++result_t test_mm_ucomige_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_comige_sd(impl, iter);
+++}
+++
+++result_t test_mm_ucomigt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_comigt_sd(impl, iter);
+++}
+++
+++result_t test_mm_ucomile_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_comile_sd(impl, iter);
+++}
+++
+++result_t test_mm_ucomilt_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_comilt_sd(impl, iter);
+++}
+++
+++result_t test_mm_ucomineq_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_comineq_sd(impl, iter);
+++}
+++
+++result_t test_mm_undefined_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ __m128d a = _mm_undefined_pd();
+++ a = _mm_xor_pd(a, a);
+++ return validateDouble(a, 0, 0);
+++}
+++
+++result_t test_mm_undefined_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ __m128i a = _mm_undefined_si128();
+++ a = _mm_xor_si128(a, a);
+++ return validateInt64(a, 0, 0);
+++}
+++
+++result_t test_mm_unpackhi_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[8];
+++ d[0] = _a[4];
+++ d[1] = _b[4];
+++ d[2] = _a[5];
+++ d[3] = _b[5];
+++ d[4] = _a[6];
+++ d[5] = _b[6];
+++ d[6] = _a[7];
+++ d[7] = _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpackhi_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(ret, d);
+++}
+++
+++result_t test_mm_unpackhi_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int32_t d[4];
+++ d[0] = _a[2];
+++ d[1] = _b[2];
+++ d[2] = _a[3];
+++ d[3] = _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpackhi_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_unpackhi_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++
+++ int64_t i0 = _a[1];
+++ int64_t i1 = _b[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpackhi_epi64(a, b);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++result_t test_mm_unpackhi_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++
+++ int8_t d[16];
+++ d[0] = _a[8];
+++ d[1] = _b[8];
+++ d[2] = _a[9];
+++ d[3] = _b[9];
+++ d[4] = _a[10];
+++ d[5] = _b[10];
+++ d[6] = _a[11];
+++ d[7] = _b[11];
+++ d[8] = _a[12];
+++ d[9] = _b[12];
+++ d[10] = _a[13];
+++ d[11] = _b[13];
+++ d[12] = _a[14];
+++ d[13] = _b[14];
+++ d[14] = _a[15];
+++ d[15] = _b[15];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpackhi_epi8(a, b);
+++
+++ return VALIDATE_INT8_M128(ret, d);
+++}
+++
+++result_t test_mm_unpackhi_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d ret = _mm_unpackhi_pd(a, b);
+++
+++ return validateDouble(ret, _a[1], _b[1]);
+++}
+++
+++result_t test_mm_unpacklo_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[8];
+++ d[0] = _a[0];
+++ d[1] = _b[0];
+++ d[2] = _a[1];
+++ d[3] = _b[1];
+++ d[4] = _a[2];
+++ d[5] = _b[2];
+++ d[6] = _a[3];
+++ d[7] = _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpacklo_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(ret, d);
+++}
+++
+++result_t test_mm_unpacklo_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int32_t d[4];
+++ d[0] = _a[0];
+++ d[1] = _b[0];
+++ d[2] = _a[1];
+++ d[3] = _b[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpacklo_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_unpacklo_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++
+++ int64_t i0 = _a[0];
+++ int64_t i1 = _b[0];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpacklo_epi64(a, b);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++result_t test_mm_unpacklo_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++
+++ int8_t d[16];
+++ d[0] = _a[0];
+++ d[1] = _b[0];
+++ d[2] = _a[1];
+++ d[3] = _b[1];
+++ d[4] = _a[2];
+++ d[5] = _b[2];
+++ d[6] = _a[3];
+++ d[7] = _b[3];
+++ d[8] = _a[4];
+++ d[9] = _b[4];
+++ d[10] = _a[5];
+++ d[11] = _b[5];
+++ d[12] = _a[6];
+++ d[13] = _b[6];
+++ d[14] = _a[7];
+++ d[15] = _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_unpacklo_epi8(a, b);
+++
+++ return VALIDATE_INT8_M128(ret, d);
+++}
+++
+++result_t test_mm_unpacklo_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d ret = _mm_unpacklo_pd(a, b);
+++
+++ return validateDouble(ret, _a[0], _b[0]);
+++}
+++
+++result_t test_mm_xor_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestFloatPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestFloatPointer2;
+++
+++ int64_t d0 = _a[0] ^ _b[0];
+++ int64_t d1 = _a[1] ^ _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_xor_pd(a, b);
+++
+++ return validateDouble(c, *((double *) &d0), *((double *) &d1));
+++}
+++
+++result_t test_mm_xor_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++
+++ int64_t d0 = _a[0] ^ _b[0];
+++ int64_t d1 = _a[1] ^ _b[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_xor_si128(a, b);
+++
+++ return validateInt64(c, d0, d1);
+++}
+++
+++/* SSE3 */
+++result_t test_mm_addsub_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double d0 = _a[0] - _b[0];
+++ double d1 = _a[1] + _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_addsub_pd(a, b);
+++
+++ return validateDouble(c, d0, d1);
+++}
+++
+++result_t test_mm_addsub_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME: The rounding mode would affect the testing result on ARM platform.
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ float f0 = _a[0] - _b[0];
+++ float f1 = _a[1] + _b[1];
+++ float f2 = _a[2] - _b[2];
+++ float f3 = _a[3] + _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_addsub_ps(a, b);
+++
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_hadd_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double f0 = _a[0] + _a[1];
+++ double f1 = _b[0] + _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_hadd_pd(a, b);
+++
+++ return validateDouble(c, f0, f1);
+++}
+++
+++result_t test_mm_hadd_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME: The rounding mode would affect the testing result on ARM platform.
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ float f0 = _a[0] + _a[1];
+++ float f1 = _a[2] + _a[3];
+++ float f2 = _b[0] + _b[1];
+++ float f3 = _b[2] + _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_hadd_ps(a, b);
+++
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_hsub_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double f0 = _a[0] - _a[1];
+++ double f1 = _b[0] - _b[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d c = _mm_hsub_pd(a, b);
+++
+++ return validateDouble(c, f0, f1);
+++}
+++
+++result_t test_mm_hsub_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ // FIXME: The rounding mode would affect the testing result on ARM platform.
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ float f0 = _a[0] - _a[1];
+++ float f1 = _a[2] - _a[3];
+++ float f2 = _b[0] - _b[1];
+++ float f3 = _b[2] - _b[3];
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_hsub_ps(a, b);
+++
+++ return validateFloat(c, f0, f1, f2, f3);
+++}
+++
+++result_t test_mm_lddqu_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_loadu_si128(impl, iter);
+++}
+++
+++result_t test_mm_loaddup_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *addr = (const double *) impl.mTestFloatPointer1;
+++
+++ __m128d ret = _mm_loaddup_pd(addr);
+++
+++ return validateDouble(ret, addr[0], addr[0]);
+++}
+++
+++result_t test_mm_movedup_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *p = (const double *) impl.mTestFloatPointer1;
+++ __m128d a = load_m128d(p);
+++ __m128d b = _mm_movedup_pd(a);
+++
+++ return validateDouble(b, p[0], p[0]);
+++}
+++
+++result_t test_mm_movehdup_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p = impl.mTestFloatPointer1;
+++ __m128 a = load_m128(p);
+++ return validateFloat(_mm_movehdup_ps(a), p[1], p[1], p[3], p[3]);
+++}
+++
+++result_t test_mm_moveldup_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *p = impl.mTestFloatPointer1;
+++ __m128 a = load_m128(p);
+++ return validateFloat(_mm_moveldup_ps(a), p[0], p[0], p[2], p[2]);
+++}
+++
+++/* SSSE3 */
+++result_t test_mm_abs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_abs_epi16(a);
+++
+++ uint32_t d[8];
+++ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+++ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+++ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+++ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+++ d[4] = (_a[4] < 0) ? -_a[4] : _a[4];
+++ d[5] = (_a[5] < 0) ? -_a[5] : _a[5];
+++ d[6] = (_a[6] < 0) ? -_a[6] : _a[6];
+++ d[7] = (_a[7] < 0) ? -_a[7] : _a[7];
+++
+++ return VALIDATE_UINT16_M128(c, d);
+++}
+++
+++result_t test_mm_abs_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_abs_epi32(a);
+++
+++ uint32_t d[4];
+++ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+++ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+++ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+++ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+++
+++ return VALIDATE_UINT32_M128(c, d);
+++}
+++
+++result_t test_mm_abs_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ __m128i c = _mm_abs_epi8(a);
+++
+++ uint32_t d[16];
+++ for (int i = 0; i < 16; i++) {
+++ d[i] = (_a[i] < 0) ? -_a[i] : _a[i];
+++ }
+++
+++ return VALIDATE_UINT8_M128(c, d);
+++}
+++
+++result_t test_mm_abs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ __m64 a = load_m64(_a);
+++ __m64 c = _mm_abs_pi16(a);
+++
+++ uint32_t d[4];
+++ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+++ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+++ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+++ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+++
+++ return VALIDATE_UINT16_M64(c, d);
+++}
+++
+++result_t test_mm_abs_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m64 a = load_m64(_a);
+++ __m64 c = _mm_abs_pi32(a);
+++
+++ uint32_t d[2];
+++ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+++ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+++
+++ return VALIDATE_UINT32_M64(c, d);
+++}
+++
+++result_t test_mm_abs_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ __m64 a = load_m64(_a);
+++ __m64 c = _mm_abs_pi8(a);
+++
+++ uint32_t d[8];
+++ d[0] = (_a[0] < 0) ? -_a[0] : _a[0];
+++ d[1] = (_a[1] < 0) ? -_a[1] : _a[1];
+++ d[2] = (_a[2] < 0) ? -_a[2] : _a[2];
+++ d[3] = (_a[3] < 0) ? -_a[3] : _a[3];
+++ d[4] = (_a[4] < 0) ? -_a[4] : _a[4];
+++ d[5] = (_a[5] < 0) ? -_a[5] : _a[5];
+++ d[6] = (_a[6] < 0) ? -_a[6] : _a[6];
+++ d[7] = (_a[7] < 0) ? -_a[7] : _a[7];
+++
+++ return VALIDATE_UINT8_M64(c, d);
+++}
+++
+++result_t test_mm_alignr_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++#if defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ unsigned int shift = (iter % 5) << 3;
+++ uint8_t d[32];
+++
+++ if (shift >= 32) {
+++ memset((void *) d, 0, sizeof(d));
+++ } else {
+++ memcpy((void *) d, (const void *) _b, 16);
+++ memcpy((void *) (d + 16), (const void *) _a, 16);
+++ // shifting
+++ for (size_t x = 0; x < sizeof(d); x++) {
+++ if (x + shift >= sizeof(d))
+++ d[x] = 0;
+++ else
+++ d[x] = d[x + shift];
+++ }
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret;
+++ switch (iter % 5) {
+++ case 0:
+++ ret = _mm_alignr_epi8(a, b, 0);
+++ break;
+++ case 1:
+++ ret = _mm_alignr_epi8(a, b, 8);
+++ break;
+++ case 2:
+++ ret = _mm_alignr_epi8(a, b, 16);
+++ break;
+++ case 3:
+++ ret = _mm_alignr_epi8(a, b, 24);
+++ break;
+++ case 4:
+++ ret = _mm_alignr_epi8(a, b, 32);
+++ break;
+++ }
+++
+++ return VALIDATE_UINT8_M128(ret, d);
+++#endif
+++}
+++
+++result_t test_mm_alignr_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++#if defined(__clang__)
+++ return TEST_UNIMPL;
+++#else
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++ unsigned int shift = (iter % 3) << 3;
+++ uint8_t d[16];
+++
+++ if (shift >= 16) {
+++ memset((void *) d, 0, sizeof(d));
+++ } else {
+++ memcpy((void *) d, (const void *) _b, 8);
+++ memcpy((void *) (d + 8), (const void *) _a, 8);
+++ // shifting
+++ for (size_t x = 0; x < sizeof(d); x++) {
+++ if (x + shift >= sizeof(d))
+++ d[x] = 0;
+++ else
+++ d[x] = d[x + shift];
+++ }
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret;
+++ switch (iter % 3) {
+++ case 0:
+++ ret = _mm_alignr_pi8(a, b, 0);
+++ break;
+++ case 1:
+++ ret = _mm_alignr_pi8(a, b, 8);
+++ break;
+++ case 2:
+++ ret = _mm_alignr_pi8(a, b, 16);
+++ break;
+++ }
+++
+++ return VALIDATE_UINT8_M64(ret, d);
+++#endif
+++}
+++
+++result_t test_mm_hadd_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[8];
+++ d[0] = _a[0] + _a[1];
+++ d[1] = _a[2] + _a[3];
+++ d[2] = _a[4] + _a[5];
+++ d[3] = _a[6] + _a[7];
+++ d[4] = _b[0] + _b[1];
+++ d[5] = _b[2] + _b[3];
+++ d[6] = _b[4] + _b[5];
+++ d[7] = _b[6] + _b[7];
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_hadd_epi16(a, b);
+++ return VALIDATE_INT16_M128(ret, d);
+++}
+++
+++result_t test_mm_hadd_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++ int32_t d[4];
+++ d[0] = _a[0] + _a[1];
+++ d[1] = _a[2] + _a[3];
+++ d[2] = _b[0] + _b[1];
+++ d[3] = _b[2] + _b[3];
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_hadd_epi32(a, b);
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_hadd_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t d[4];
+++ d[0] = _a[0] + _a[1];
+++ d[1] = _a[2] + _a[3];
+++ d[2] = _b[0] + _b[1];
+++ d[3] = _b[2] + _b[3];
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret = _mm_hadd_pi16(a, b);
+++ return VALIDATE_INT16_M64(ret, d);
+++}
+++
+++result_t test_mm_hadd_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++ int32_t d[2];
+++ d[0] = _a[0] + _a[1];
+++ d[1] = _b[0] + _b[1];
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret = _mm_hadd_pi32(a, b);
+++ return VALIDATE_INT32_M64(ret, d);
+++}
+++
+++result_t test_mm_hadds_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+++
+++ int16_t d16[8];
+++ int32_t d32[8];
+++ d32[0] = (int32_t) _a[0] + (int32_t) _a[1];
+++ d32[1] = (int32_t) _a[2] + (int32_t) _a[3];
+++ d32[2] = (int32_t) _a[4] + (int32_t) _a[5];
+++ d32[3] = (int32_t) _a[6] + (int32_t) _a[7];
+++ d32[4] = (int32_t) _b[0] + (int32_t) _b[1];
+++ d32[5] = (int32_t) _b[2] + (int32_t) _b[3];
+++ d32[6] = (int32_t) _b[4] + (int32_t) _b[5];
+++ d32[7] = (int32_t) _b[6] + (int32_t) _b[7];
+++ for (int i = 0; i < 8; i++) {
+++ if (d32[i] > (int32_t) INT16_MAX)
+++ d16[i] = INT16_MAX;
+++ else if (d32[i] < (int32_t) INT16_MIN)
+++ d16[i] = INT16_MIN;
+++ else
+++ d16[i] = (int16_t) d32[i];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_hadds_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d16);
+++}
+++
+++result_t test_mm_hadds_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+++
+++ int16_t d16[8];
+++ int32_t d32[8];
+++ d32[0] = (int32_t) _a[0] + (int32_t) _a[1];
+++ d32[1] = (int32_t) _a[2] + (int32_t) _a[3];
+++ d32[2] = (int32_t) _b[0] + (int32_t) _b[1];
+++ d32[3] = (int32_t) _b[2] + (int32_t) _b[3];
+++ for (int i = 0; i < 8; i++) {
+++ if (d32[i] > (int32_t) INT16_MAX)
+++ d16[i] = INT16_MAX;
+++ else if (d32[i] < (int32_t) INT16_MIN)
+++ d16[i] = INT16_MIN;
+++ else
+++ d16[i] = (int16_t) d32[i];
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_hadds_pi16(a, b);
+++
+++ return VALIDATE_INT16_M64(c, d16);
+++}
+++
+++result_t test_mm_hsub_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+++
+++ int16_t d[8];
+++ d[0] = _a[0] - _a[1];
+++ d[1] = _a[2] - _a[3];
+++ d[2] = _a[4] - _a[5];
+++ d[3] = _a[6] - _a[7];
+++ d[4] = _b[0] - _b[1];
+++ d[5] = _b[2] - _b[3];
+++ d[6] = _b[4] - _b[5];
+++ d[7] = _b[6] - _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_hsub_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_hsub_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer1;
+++
+++ int32_t d[4];
+++ d[0] = _a[0] - _a[1];
+++ d[1] = _a[2] - _a[3];
+++ d[2] = _b[0] - _b[1];
+++ d[3] = _b[2] - _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_hsub_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_hsub_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[4];
+++ d[0] = _a[0] - _a[1];
+++ d[1] = _a[2] - _a[3];
+++ d[2] = _b[0] - _b[1];
+++ d[3] = _b[2] - _b[3];
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_hsub_pi16(a, b);
+++
+++ return VALIDATE_INT16_M64(c, d);
+++}
+++
+++result_t test_mm_hsub_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++
+++ int32_t d[2];
+++ d[0] = _a[0] - _a[1];
+++ d[1] = _b[0] - _b[1];
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_hsub_pi32(a, b);
+++
+++ return VALIDATE_INT32_M64(c, d);
+++}
+++
+++result_t test_mm_hsubs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+++
+++ int16_t d16[8];
+++ int32_t d32[8];
+++ d32[0] = (int32_t) _a[0] - (int32_t) _a[1];
+++ d32[1] = (int32_t) _a[2] - (int32_t) _a[3];
+++ d32[2] = (int32_t) _a[4] - (int32_t) _a[5];
+++ d32[3] = (int32_t) _a[6] - (int32_t) _a[7];
+++ d32[4] = (int32_t) _b[0] - (int32_t) _b[1];
+++ d32[5] = (int32_t) _b[2] - (int32_t) _b[3];
+++ d32[6] = (int32_t) _b[4] - (int32_t) _b[5];
+++ d32[7] = (int32_t) _b[6] - (int32_t) _b[7];
+++ for (int i = 0; i < 8; i++) {
+++ if (d32[i] > (int32_t) INT16_MAX)
+++ d16[i] = INT16_MAX;
+++ else if (d32[i] < (int32_t) INT16_MIN)
+++ d16[i] = INT16_MIN;
+++ else
+++ d16[i] = (int16_t) d32[i];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_hsubs_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d16);
+++}
+++
+++result_t test_mm_hsubs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer1;
+++
+++ int32_t _d[4];
+++ _d[0] = (int32_t) _a[0] - (int32_t) _a[1];
+++ _d[1] = (int32_t) _a[2] - (int32_t) _a[3];
+++ _d[2] = (int32_t) _b[0] - (int32_t) _b[1];
+++ _d[3] = (int32_t) _b[2] - (int32_t) _b[3];
+++
+++ for (int i = 0; i < 4; i++) {
+++ if (_d[i] > (int32_t) INT16_MAX) {
+++ _d[i] = INT16_MAX;
+++ } else if (_d[i] < (int32_t) INT16_MIN) {
+++ _d[i] = INT16_MIN;
+++ }
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_hsubs_pi16(a, b);
+++
+++ return VALIDATE_INT16_M64(c, _d);
+++}
+++
+++result_t test_mm_maddubs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int32_t d0 = (int32_t) (_a[0] * _b[0]);
+++ int32_t d1 = (int32_t) (_a[1] * _b[1]);
+++ int32_t d2 = (int32_t) (_a[2] * _b[2]);
+++ int32_t d3 = (int32_t) (_a[3] * _b[3]);
+++ int32_t d4 = (int32_t) (_a[4] * _b[4]);
+++ int32_t d5 = (int32_t) (_a[5] * _b[5]);
+++ int32_t d6 = (int32_t) (_a[6] * _b[6]);
+++ int32_t d7 = (int32_t) (_a[7] * _b[7]);
+++ int32_t d8 = (int32_t) (_a[8] * _b[8]);
+++ int32_t d9 = (int32_t) (_a[9] * _b[9]);
+++ int32_t d10 = (int32_t) (_a[10] * _b[10]);
+++ int32_t d11 = (int32_t) (_a[11] * _b[11]);
+++ int32_t d12 = (int32_t) (_a[12] * _b[12]);
+++ int32_t d13 = (int32_t) (_a[13] * _b[13]);
+++ int32_t d14 = (int32_t) (_a[14] * _b[14]);
+++ int32_t d15 = (int32_t) (_a[15] * _b[15]);
+++
+++ int16_t e[8];
+++ e[0] = saturate_16(d0 + d1);
+++ e[1] = saturate_16(d2 + d3);
+++ e[2] = saturate_16(d4 + d5);
+++ e[3] = saturate_16(d6 + d7);
+++ e[4] = saturate_16(d8 + d9);
+++ e[5] = saturate_16(d10 + d11);
+++ e[6] = saturate_16(d12 + d13);
+++ e[7] = saturate_16(d14 + d15);
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_maddubs_epi16(a, b);
+++ return VALIDATE_INT16_M128(c, e);
+++}
+++
+++result_t test_mm_maddubs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int16_t d0 = (int16_t) (_a[0] * _b[0]);
+++ int16_t d1 = (int16_t) (_a[1] * _b[1]);
+++ int16_t d2 = (int16_t) (_a[2] * _b[2]);
+++ int16_t d3 = (int16_t) (_a[3] * _b[3]);
+++ int16_t d4 = (int16_t) (_a[4] * _b[4]);
+++ int16_t d5 = (int16_t) (_a[5] * _b[5]);
+++ int16_t d6 = (int16_t) (_a[6] * _b[6]);
+++ int16_t d7 = (int16_t) (_a[7] * _b[7]);
+++
+++ int16_t e[4];
+++ e[0] = saturate_16(d0 + d1);
+++ e[1] = saturate_16(d2 + d3);
+++ e[2] = saturate_16(d4 + d5);
+++ e[3] = saturate_16(d6 + d7);
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_maddubs_pi16(a, b);
+++
+++ return VALIDATE_INT16_M64(c, e);
+++}
+++
+++result_t test_mm_mulhrs_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ int32_t _c[8];
+++ for (int i = 0; i < 8; i++) {
+++ _c[i] =
+++ (((((int32_t) _a[i] * (int32_t) _b[i]) >> 14) + 1) & 0x1FFFE) >> 1;
+++ }
+++ __m128i c = _mm_mulhrs_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, _c);
+++}
+++
+++result_t test_mm_mulhrs_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ int32_t _c[4];
+++ for (int i = 0; i < 4; i++) {
+++ _c[i] =
+++ (((((int32_t) _a[i] * (int32_t) _b[i]) >> 14) + 1) & 0x1FFFE) >> 1;
+++ }
+++ __m64 c = _mm_mulhrs_pi16(a, b);
+++
+++ return VALIDATE_INT16_M64(c, _c);
+++}
+++
+++result_t test_mm_shuffle_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t dst[16];
+++
+++ for (int i = 0; i < 16; i++) {
+++ if (_b[i] & 0x80) {
+++ dst[i] = 0;
+++ } else {
+++ dst[i] = _a[_b[i] & 0x0F];
+++ }
+++ }
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i ret = _mm_shuffle_epi8(a, b);
+++
+++ return VALIDATE_INT8_M128(ret, dst);
+++}
+++
+++result_t test_mm_shuffle_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t dst[8];
+++
+++ for (int i = 0; i < 8; i++) {
+++ if (_b[i] & 0x80) {
+++ dst[i] = 0;
+++ } else {
+++ dst[i] = _a[_b[i] & 0x07];
+++ }
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 ret = _mm_shuffle_pi8(a, b);
+++
+++ return VALIDATE_INT8_M64(ret, dst);
+++}
+++
+++result_t test_mm_sign_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[8];
+++ for (int i = 0; i < 8; i++) {
+++ if (_b[i] < 0) {
+++ d[i] = -_a[i];
+++ } else if (_b[i] == 0) {
+++ d[i] = 0;
+++ } else {
+++ d[i] = _a[i];
+++ }
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sign_epi16(a, b);
+++
+++ return VALIDATE_INT16_M128(c, d);
+++}
+++
+++result_t test_mm_sign_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int32_t d[4];
+++ for (int i = 0; i < 4; i++) {
+++ if (_b[i] < 0) {
+++ d[i] = -_a[i];
+++ } else if (_b[i] == 0) {
+++ d[i] = 0;
+++ } else {
+++ d[i] = _a[i];
+++ }
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sign_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_sign_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++
+++ int8_t d[16];
+++ for (int i = 0; i < 16; i++) {
+++ if (_b[i] < 0) {
+++ d[i] = -_a[i];
+++ } else if (_b[i] == 0) {
+++ d[i] = 0;
+++ } else {
+++ d[i] = _a[i];
+++ }
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_sign_epi8(a, b);
+++
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_sign_pi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++
+++ int16_t d[4];
+++ for (int i = 0; i < 4; i++) {
+++ if (_b[i] < 0) {
+++ d[i] = -_a[i];
+++ } else if (_b[i] == 0) {
+++ d[i] = 0;
+++ } else {
+++ d[i] = _a[i];
+++ }
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_sign_pi16(a, b);
+++
+++ return VALIDATE_INT16_M64(c, d);
+++}
+++
+++result_t test_mm_sign_pi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int32_t d[2];
+++ for (int i = 0; i < 2; i++) {
+++ if (_b[i] < 0) {
+++ d[i] = -_a[i];
+++ } else if (_b[i] == 0) {
+++ d[i] = 0;
+++ } else {
+++ d[i] = _a[i];
+++ }
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_sign_pi32(a, b);
+++
+++ return VALIDATE_INT32_M64(c, d);
+++}
+++
+++result_t test_mm_sign_pi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++
+++ int8_t d[8];
+++ for (int i = 0; i < 8; i++) {
+++ if (_b[i] < 0) {
+++ d[i] = -_a[i];
+++ } else if (_b[i] == 0) {
+++ d[i] = 0;
+++ } else {
+++ d[i] = _a[i];
+++ }
+++ }
+++
+++ __m64 a = load_m64(_a);
+++ __m64 b = load_m64(_b);
+++ __m64 c = _mm_sign_pi8(a, b);
+++
+++ return VALIDATE_INT8_M64(c, d);
+++}
+++
+++/* SSE4.1 */
+++result_t test_mm_blend_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ const int16_t *_b = (const int16_t *) impl.mTestIntPointer2;
+++ int16_t _c[8];
+++ __m128i a, b, c;
+++
+++#define TEST_IMPL(IDX) \
+++ for (int j = 0; j < 8; j++) { \
+++ if ((IDX >> j) & 0x1) { \
+++ _c[j] = _b[j]; \
+++ } else { \
+++ _c[j] = _a[j]; \
+++ } \
+++ } \
+++ a = load_m128i(_a); \
+++ b = load_m128i(_b); \
+++ c = _mm_blend_epi16(a, b, IDX); \
+++ CHECK_RESULT(VALIDATE_INT16_M128(c, _c));
+++
+++ IMM_256_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_blend_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ __m128d a, b, c;
+++
+++#define TEST_IMPL(IDX) \
+++ double _c##IDX[2]; \
+++ for (int j = 0; j < 2; j++) { \
+++ if ((IDX >> j) & 0x1) { \
+++ _c##IDX[j] = _b[j]; \
+++ } else { \
+++ _c##IDX[j] = _a[j]; \
+++ } \
+++ } \
+++ \
+++ a = load_m128d(_a); \
+++ b = load_m128d(_b); \
+++ c = _mm_blend_pd(a, b, IDX); \
+++ CHECK_RESULT(validateDouble(c, _c##IDX[0], _c##IDX[1]))
+++
+++ IMM_4_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_blend_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c;
+++
+++ // gcc and clang can't compile call to _mm_blend_ps with 3rd argument as
+++ // integer type due 4 bit size limitation.
+++#define TEST_IMPL(IDX) \
+++ float _c##IDX[4]; \
+++ for (int i = 0; i < 4; i++) { \
+++ if (IDX & (1 << i)) { \
+++ _c##IDX[i] = _b[i]; \
+++ } else { \
+++ _c##IDX[i] = _a[i]; \
+++ } \
+++ } \
+++ \
+++ c = _mm_blend_ps(a, b, IDX); \
+++ CHECK_RESULT( \
+++ validateFloat(c, _c##IDX[0], _c##IDX[1], _c##IDX[2], _c##IDX[3]))
+++
+++ IMM_4_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_blendv_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ const int8_t _mask[16] = {(const int8_t) impl.mTestInts[iter],
+++ (const int8_t) impl.mTestInts[iter + 1],
+++ (const int8_t) impl.mTestInts[iter + 2],
+++ (const int8_t) impl.mTestInts[iter + 3],
+++ (const int8_t) impl.mTestInts[iter + 4],
+++ (const int8_t) impl.mTestInts[iter + 5],
+++ (const int8_t) impl.mTestInts[iter + 6],
+++ (const int8_t) impl.mTestInts[iter + 7]};
+++
+++ int8_t _c[16];
+++ for (int i = 0; i < 16; i++) {
+++ if (_mask[i] >> 7) {
+++ _c[i] = _b[i];
+++ } else {
+++ _c[i] = _a[i];
+++ }
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i mask = load_m128i(_mask);
+++ __m128i c = _mm_blendv_epi8(a, b, mask);
+++
+++ return VALIDATE_INT8_M128(c, _c);
+++}
+++
+++result_t test_mm_blendv_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++ const double _mask[] = {(double) impl.mTestFloats[iter],
+++ (double) impl.mTestFloats[iter + 1]};
+++
+++ double _c[2];
+++ for (int i = 0; i < 2; i++) {
+++ // signed shift right would return a result which is either all 1's from
+++ // negative numbers or all 0's from positive numbers
+++ if ((*(const int64_t *) (_mask + i)) >> 63) {
+++ _c[i] = _b[i];
+++ } else {
+++ _c[i] = _a[i];
+++ }
+++ }
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d mask = load_m128d(_mask);
+++
+++ __m128d c = _mm_blendv_pd(a, b, mask);
+++
+++ return validateDouble(c, _c[0], _c[1]);
+++}
+++
+++result_t test_mm_blendv_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ const float _mask[] = {impl.mTestFloats[iter], impl.mTestFloats[iter + 1],
+++ impl.mTestFloats[iter + 2],
+++ impl.mTestFloats[iter + 3]};
+++
+++ float _c[4];
+++ for (int i = 0; i < 4; i++) {
+++ // signed shift right would return a result which is either all 1's from
+++ // negative numbers or all 0's from positive numbers
+++ if ((*(const int32_t *) (_mask + i)) >> 31) {
+++ _c[i] = _b[i];
+++ } else {
+++ _c[i] = _a[i];
+++ }
+++ }
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 mask = load_m128(_mask);
+++
+++ __m128 c = _mm_blendv_ps(a, b, mask);
+++
+++ return validateFloat(c, _c[0], _c[1], _c[2], _c[3]);
+++}
+++
+++result_t test_mm_ceil_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ double dx = ceil(_a[0]);
+++ double dy = ceil(_a[1]);
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d ret = _mm_ceil_pd(a);
+++
+++ return validateDouble(ret, dx, dy);
+++}
+++
+++result_t test_mm_ceil_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ float dx = ceilf(_a[0]);
+++ float dy = ceilf(_a[1]);
+++ float dz = ceilf(_a[2]);
+++ float dw = ceilf(_a[3]);
+++
+++ __m128 a = _mm_load_ps(_a);
+++ __m128 c = _mm_ceil_ps(a);
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_ceil_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double dx = ceil(_b[0]);
+++ double dy = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d ret = _mm_ceil_sd(a, b);
+++
+++ return validateDouble(ret, dx, dy);
+++}
+++
+++result_t test_mm_ceil_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer1;
+++
+++ float f0 = ceilf(_b[0]);
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_ceil_ss(a, b);
+++
+++ return validateFloat(c, f0, _a[1], _a[2], _a[3]);
+++}
+++
+++result_t test_mm_cmpeq_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++ int64_t d0 = (_a[0] == _b[0]) ? 0xffffffffffffffff : 0x0;
+++ int64_t d1 = (_a[1] == _b[1]) ? 0xffffffffffffffff : 0x0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_cmpeq_epi64(a, b);
+++ return validateInt64(c, d0, d1);
+++}
+++
+++result_t test_mm_cvtepi16_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++
+++ int32_t d[4];
+++ d[0] = (int32_t) _a[0];
+++ d[1] = (int32_t) _a[1];
+++ d[2] = (int32_t) _a[2];
+++ d[3] = (int32_t) _a[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepi16_epi32(a);
+++
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_cvtepi16_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++
+++ int64_t i0 = (int64_t) _a[0];
+++ int64_t i1 = (int64_t) _a[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepi16_epi64(a);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++result_t test_mm_cvtepi32_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++
+++ int64_t i0 = (int64_t) _a[0];
+++ int64_t i1 = (int64_t) _a[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepi32_epi64(a);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++result_t test_mm_cvtepi8_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++
+++ int16_t d[8];
+++ d[0] = (int16_t) _a[0];
+++ d[1] = (int16_t) _a[1];
+++ d[2] = (int16_t) _a[2];
+++ d[3] = (int16_t) _a[3];
+++ d[4] = (int16_t) _a[4];
+++ d[5] = (int16_t) _a[5];
+++ d[6] = (int16_t) _a[6];
+++ d[7] = (int16_t) _a[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepi8_epi16(a);
+++
+++ return VALIDATE_INT16_M128(ret, d);
+++}
+++
+++result_t test_mm_cvtepi8_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++
+++ int32_t d[4];
+++ d[0] = (int32_t) _a[0];
+++ d[1] = (int32_t) _a[1];
+++ d[2] = (int32_t) _a[2];
+++ d[3] = (int32_t) _a[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepi8_epi32(a);
+++
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_cvtepi8_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++
+++ int64_t i0 = (int64_t) _a[0];
+++ int64_t i1 = (int64_t) _a[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepi8_epi64(a);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++result_t test_mm_cvtepu16_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++
+++ int32_t d[4];
+++ d[0] = (int32_t) _a[0];
+++ d[1] = (int32_t) _a[1];
+++ d[2] = (int32_t) _a[2];
+++ d[3] = (int32_t) _a[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepu16_epi32(a);
+++
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_cvtepu16_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++
+++ int64_t i0 = (int64_t) _a[0];
+++ int64_t i1 = (int64_t) _a[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepu16_epi64(a);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++result_t test_mm_cvtepu32_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+++
+++ int64_t i0 = (int64_t) _a[0];
+++ int64_t i1 = (int64_t) _a[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepu32_epi64(a);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++result_t test_mm_cvtepu8_epi16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++
+++ int16_t d[8];
+++ d[0] = (int16_t) _a[0];
+++ d[1] = (int16_t) _a[1];
+++ d[2] = (int16_t) _a[2];
+++ d[3] = (int16_t) _a[3];
+++ d[4] = (int16_t) _a[4];
+++ d[5] = (int16_t) _a[5];
+++ d[6] = (int16_t) _a[6];
+++ d[7] = (int16_t) _a[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepu8_epi16(a);
+++
+++ return VALIDATE_INT16_M128(ret, d);
+++}
+++
+++result_t test_mm_cvtepu8_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++
+++ int32_t d[4];
+++ d[0] = (int32_t) _a[0];
+++ d[1] = (int32_t) _a[1];
+++ d[2] = (int32_t) _a[2];
+++ d[3] = (int32_t) _a[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepu8_epi32(a);
+++
+++ return VALIDATE_INT32_M128(ret, d);
+++}
+++
+++result_t test_mm_cvtepu8_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++
+++ int64_t i0 = (int64_t) _a[0];
+++ int64_t i1 = (int64_t) _a[1];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_cvtepu8_epi64(a);
+++
+++ return validateInt64(ret, i0, i1);
+++}
+++
+++#define MM_DP_PD_TEST_CASE_WITH(imm8) \
+++ do { \
+++ const double *_a = (const double *) impl.mTestFloatPointer1; \
+++ const double *_b = (const double *) impl.mTestFloatPointer2; \
+++ const int imm = imm8; \
+++ double d[2]; \
+++ double sum = 0; \
+++ for (size_t i = 0; i < 2; i++) \
+++ sum += ((imm) & (1 << (i + 4))) ? _a[i] * _b[i] : 0; \
+++ for (size_t i = 0; i < 2; i++) \
+++ d[i] = (imm & (1 << i)) ? sum : 0; \
+++ __m128d a = load_m128d(_a); \
+++ __m128d b = load_m128d(_b); \
+++ __m128d ret = _mm_dp_pd(a, b, imm); \
+++ if (validateDouble(ret, d[0], d[1]) != TEST_SUCCESS) \
+++ return TEST_FAIL; \
+++ } while (0)
+++
+++#define GENERATE_MM_DP_PD_TEST_CASES \
+++ MM_DP_PD_TEST_CASE_WITH(0xF0); \
+++ MM_DP_PD_TEST_CASE_WITH(0xF1); \
+++ MM_DP_PD_TEST_CASE_WITH(0xF2); \
+++ MM_DP_PD_TEST_CASE_WITH(0xFF); \
+++ MM_DP_PD_TEST_CASE_WITH(0x10); \
+++ MM_DP_PD_TEST_CASE_WITH(0x11); \
+++ MM_DP_PD_TEST_CASE_WITH(0x12); \
+++ MM_DP_PD_TEST_CASE_WITH(0x13); \
+++ MM_DP_PD_TEST_CASE_WITH(0x00); \
+++ MM_DP_PD_TEST_CASE_WITH(0x01); \
+++ MM_DP_PD_TEST_CASE_WITH(0x02); \
+++ MM_DP_PD_TEST_CASE_WITH(0x03); \
+++ MM_DP_PD_TEST_CASE_WITH(0x20); \
+++ MM_DP_PD_TEST_CASE_WITH(0x21); \
+++ MM_DP_PD_TEST_CASE_WITH(0x22); \
+++ MM_DP_PD_TEST_CASE_WITH(0x23);
+++
+++result_t test_mm_dp_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_DP_PD_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define MM_DP_PS_TEST_CASE_WITH(IMM) \
+++ do { \
+++ const float *_a = impl.mTestFloatPointer1; \
+++ const float *_b = impl.mTestFloatPointer2; \
+++ const int imm = IMM; \
+++ __m128 a = load_m128(_a); \
+++ __m128 b = load_m128(_b); \
+++ __m128 out = _mm_dp_ps(a, b, imm); \
+++ float r[4]; /* the reference */ \
+++ float sum = 0; \
+++ for (size_t i = 0; i < 4; i++) \
+++ sum += ((imm) & (1 << (i + 4))) ? _a[i] * _b[i] : 0; \
+++ for (size_t i = 0; i < 4; i++) \
+++ r[i] = (imm & (1 << i)) ? sum : 0; \
+++ /* the epsilon has to be large enough, otherwise test suite fails. */ \
+++ if (validateFloatEpsilon(out, r[0], r[1], r[2], r[3], 2050.0f) != \
+++ TEST_SUCCESS) \
+++ return TEST_FAIL; \
+++ } while (0)
+++
+++#define GENERATE_MM_DP_PS_TEST_CASES \
+++ MM_DP_PS_TEST_CASE_WITH(0xFF); \
+++ MM_DP_PS_TEST_CASE_WITH(0x7F); \
+++ MM_DP_PS_TEST_CASE_WITH(0x9F); \
+++ MM_DP_PS_TEST_CASE_WITH(0x2F); \
+++ MM_DP_PS_TEST_CASE_WITH(0x0F); \
+++ MM_DP_PS_TEST_CASE_WITH(0x23); \
+++ MM_DP_PS_TEST_CASE_WITH(0xB5);
+++
+++result_t test_mm_dp_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_DP_PS_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_extract_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int32_t *_a = (int32_t *) impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ int c;
+++
+++#define TEST_IMPL(IDX) \
+++ c = _mm_extract_epi32(a, IDX); \
+++ ASSERT_RETURN(c == *(_a + IDX));
+++
+++ IMM_4_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_extract_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int64_t *_a = (int64_t *) impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ __int64 c;
+++
+++#define TEST_IMPL(IDX) \
+++ c = _mm_extract_epi64(a, IDX); \
+++ ASSERT_RETURN(c == *(_a + IDX));
+++
+++ IMM_2_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_extract_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint8_t *_a = (uint8_t *) impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++ int c;
+++
+++#define TEST_IMPL(IDX) \
+++ c = _mm_extract_epi8(a, IDX); \
+++ ASSERT_RETURN(c == *(_a + IDX));
+++
+++ IMM_8_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_extract_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = (const float *) impl.mTestFloatPointer1;
+++
+++ __m128 a = _mm_load_ps(_a);
+++ int32_t c;
+++
+++#define TEST_IMPL(IDX) \
+++ c = _mm_extract_ps(a, IDX); \
+++ ASSERT_RETURN(c == *(const int32_t *) (_a + IDX));
+++
+++ IMM_4_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_floor_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++
+++ double dx = floor(_a[0]);
+++ double dy = floor(_a[1]);
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d ret = _mm_floor_pd(a);
+++
+++ return validateDouble(ret, dx, dy);
+++}
+++
+++result_t test_mm_floor_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ float dx = floorf(_a[0]);
+++ float dy = floorf(_a[1]);
+++ float dz = floorf(_a[2]);
+++ float dw = floorf(_a[3]);
+++
+++ __m128 a = load_m128(_a);
+++ __m128 c = _mm_floor_ps(a);
+++ return validateFloat(c, dx, dy, dz, dw);
+++}
+++
+++result_t test_mm_floor_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (const double *) impl.mTestFloatPointer1;
+++ const double *_b = (const double *) impl.mTestFloatPointer2;
+++
+++ double dx = floor(_b[0]);
+++ double dy = _a[1];
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ __m128d ret = _mm_floor_sd(a, b);
+++
+++ return validateDouble(ret, dx, dy);
+++}
+++
+++result_t test_mm_floor_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer1;
+++
+++ float f0 = floorf(_b[0]);
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ __m128 c = _mm_floor_ss(a, b);
+++
+++ return validateFloat(c, f0, _a[1], _a[2], _a[3]);
+++}
+++
+++result_t test_mm_insert_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t insert = (int32_t) *impl.mTestIntPointer2;
+++ __m128i a, b;
+++
+++#define TEST_IMPL(IDX) \
+++ int32_t d##IDX[4]; \
+++ for (int i = 0; i < 4; i++) { \
+++ d##IDX[i] = _a[i]; \
+++ } \
+++ d##IDX[IDX] = insert; \
+++ \
+++ a = load_m128i(_a); \
+++ b = _mm_insert_epi32(a, (int) insert, IDX); \
+++ CHECK_RESULT(VALIDATE_INT32_M128(b, d##IDX));
+++
+++ IMM_4_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_insert_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ int64_t insert = (int64_t) *impl.mTestIntPointer2;
+++
+++ __m128i a, b;
+++ int64_t d[2];
+++#define TEST_IMPL(IDX) \
+++ d[0] = _a[0]; \
+++ d[1] = _a[1]; \
+++ d[IDX] = insert; \
+++ a = load_m128i(_a); \
+++ b = _mm_insert_epi64(a, insert, IDX); \
+++ CHECK_RESULT(validateInt64(b, d[0], d[1]));
+++
+++ IMM_2_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_insert_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t insert = (int8_t) *impl.mTestIntPointer2;
+++ __m128i a, b;
+++ int8_t d[16];
+++
+++#define TEST_IMPL(IDX) \
+++ for (int i = 0; i < 16; i++) { \
+++ d[i] = _a[i]; \
+++ } \
+++ d[IDX] = insert; \
+++ a = load_m128i(_a); \
+++ b = _mm_insert_epi8(a, insert, IDX); \
+++ CHECK_RESULT(VALIDATE_INT8_M128(b, d));
+++
+++ IMM_16_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_insert_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++
+++ __m128 a, b, c;
+++#define TEST_IMPL(IDX) \
+++ float d##IDX[4] = {_a[0], _a[1], _a[2], _a[3]}; \
+++ d##IDX[(IDX >> 4) & 0x3] = _b[(IDX >> 6) & 0x3]; \
+++ \
+++ for (int j = 0; j < 4; j++) { \
+++ if (IDX & (1 << j)) { \
+++ d##IDX[j] = 0; \
+++ } \
+++ } \
+++ \
+++ a = _mm_load_ps(_a); \
+++ b = _mm_load_ps(_b); \
+++ c = _mm_insert_ps(a, b, IDX); \
+++ CHECK_RESULT(validateFloat(c, d##IDX[0], d##IDX[1], d##IDX[2], d##IDX[3]));
+++
+++ IMM_256_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_max_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int32_t d[4];
+++ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_max_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_max_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++ int8_t d[16];
+++ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++ d[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+++ d[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+++ d[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+++ d[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+++ d[8] = _a[8] > _b[8] ? _a[8] : _b[8];
+++ d[9] = _a[9] > _b[9] ? _a[9] : _b[9];
+++ d[10] = _a[10] > _b[10] ? _a[10] : _b[10];
+++ d[11] = _a[11] > _b[11] ? _a[11] : _b[11];
+++ d[12] = _a[12] > _b[12] ? _a[12] : _b[12];
+++ d[13] = _a[13] > _b[13] ? _a[13] : _b[13];
+++ d[14] = _a[14] > _b[14] ? _a[14] : _b[14];
+++ d[15] = _a[15] > _b[15] ? _a[15] : _b[15];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++
+++ __m128i c = _mm_max_epi8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_max_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+++
+++ uint16_t d[8];
+++ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++ d[4] = _a[4] > _b[4] ? _a[4] : _b[4];
+++ d[5] = _a[5] > _b[5] ? _a[5] : _b[5];
+++ d[6] = _a[6] > _b[6] ? _a[6] : _b[6];
+++ d[7] = _a[7] > _b[7] ? _a[7] : _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_max_epu16(a, b);
+++
+++ return VALIDATE_UINT16_M128(c, d);
+++}
+++
+++result_t test_mm_max_epu32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+++ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+++
+++ uint32_t d[4];
+++ d[0] = _a[0] > _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] > _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] > _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] > _b[3] ? _a[3] : _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_max_epu32(a, b);
+++
+++ return VALIDATE_UINT32_M128(c, d);
+++}
+++
+++result_t test_mm_min_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int32_t d[4];
+++ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_min_epi32(a, b);
+++
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_min_epi8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int8_t *_a = (const int8_t *) impl.mTestIntPointer1;
+++ const int8_t *_b = (const int8_t *) impl.mTestIntPointer2;
+++
+++ int8_t d[16];
+++ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++ d[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+++ d[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+++ d[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+++ d[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+++ d[8] = _a[8] < _b[8] ? _a[8] : _b[8];
+++ d[9] = _a[9] < _b[9] ? _a[9] : _b[9];
+++ d[10] = _a[10] < _b[10] ? _a[10] : _b[10];
+++ d[11] = _a[11] < _b[11] ? _a[11] : _b[11];
+++ d[12] = _a[12] < _b[12] ? _a[12] : _b[12];
+++ d[13] = _a[13] < _b[13] ? _a[13] : _b[13];
+++ d[14] = _a[14] < _b[14] ? _a[14] : _b[14];
+++ d[15] = _a[15] < _b[15] ? _a[15] : _b[15];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++
+++ __m128i c = _mm_min_epi8(a, b);
+++ return VALIDATE_INT8_M128(c, d);
+++}
+++
+++result_t test_mm_min_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint16_t *_a = (const uint16_t *) impl.mTestIntPointer1;
+++ const uint16_t *_b = (const uint16_t *) impl.mTestIntPointer2;
+++
+++ uint16_t d[8];
+++ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++ d[4] = _a[4] < _b[4] ? _a[4] : _b[4];
+++ d[5] = _a[5] < _b[5] ? _a[5] : _b[5];
+++ d[6] = _a[6] < _b[6] ? _a[6] : _b[6];
+++ d[7] = _a[7] < _b[7] ? _a[7] : _b[7];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_min_epu16(a, b);
+++
+++ return VALIDATE_UINT16_M128(c, d);
+++}
+++
+++result_t test_mm_min_epu32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint32_t *_a = (const uint32_t *) impl.mTestIntPointer1;
+++ const uint32_t *_b = (const uint32_t *) impl.mTestIntPointer2;
+++
+++ uint32_t d[4];
+++ d[0] = _a[0] < _b[0] ? _a[0] : _b[0];
+++ d[1] = _a[1] < _b[1] ? _a[1] : _b[1];
+++ d[2] = _a[2] < _b[2] ? _a[2] : _b[2];
+++ d[3] = _a[3] < _b[3] ? _a[3] : _b[3];
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_min_epu32(a, b);
+++
+++ return VALIDATE_UINT32_M128(c, d);
+++}
+++
+++result_t test_mm_minpos_epu16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int16_t *_a = (const int16_t *) impl.mTestIntPointer1;
+++ uint16_t index = 0, min = (uint16_t) _a[0];
+++ for (int i = 0; i < 8; i++) {
+++ if ((uint16_t) _a[i] < min) {
+++ index = (uint16_t) i;
+++ min = (uint16_t) _a[i];
+++ }
+++ }
+++
+++ uint16_t d[8] = {min, index, 0, 0, 0, 0, 0, 0};
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i ret = _mm_minpos_epu16(a);
+++ return VALIDATE_UINT16_M128(ret, d);
+++}
+++
+++result_t test_mm_mpsadbw_epu8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *_a = (const uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *_b = (const uint8_t *) impl.mTestIntPointer2;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c;
+++#define TEST_IMPL(IDX) \
+++ uint8_t a_offset##IDX = ((IDX >> 2) & 0x1) * 4; \
+++ uint8_t b_offset##IDX = (IDX & 0x3) * 4; \
+++ \
+++ uint16_t d##IDX[8] = {}; \
+++ for (int i = 0; i < 8; i++) { \
+++ for (int j = 0; j < 4; j++) { \
+++ d##IDX[i] += \
+++ abs(_a[(a_offset##IDX + i) + j] - _b[b_offset##IDX + j]); \
+++ } \
+++ } \
+++ c = _mm_mpsadbw_epu8(a, b, IDX); \
+++ CHECK_RESULT(VALIDATE_UINT16_M128(c, d##IDX));
+++
+++ IMM_8_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_mul_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ int64_t dx = (int64_t) (_a[0]) * (int64_t) (_b[0]);
+++ int64_t dy = (int64_t) (_a[2]) * (int64_t) (_b[2]);
+++
+++ __m128i a = _mm_loadu_si128((const __m128i *) _a);
+++ __m128i b = _mm_loadu_si128((const __m128i *) _b);
+++ __m128i r = _mm_mul_epi32(a, b);
+++
+++ return validateInt64(r, dx, dy);
+++}
+++
+++result_t test_mm_mullo_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ int32_t d[4];
+++
+++ for (int i = 0; i < 4; i++) {
+++ d[i] = (int32_t) ((int64_t) _a[i] * (int64_t) _b[i]);
+++ }
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_mullo_epi32(a, b);
+++ return VALIDATE_INT32_M128(c, d);
+++}
+++
+++result_t test_mm_packus_epi32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint16_t max = UINT16_MAX;
+++ uint16_t min = 0;
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_b = (const int32_t *) impl.mTestIntPointer2;
+++
+++ uint16_t d[8];
+++ for (int i = 0; i < 4; i++) {
+++ if (_a[i] > (int32_t) max)
+++ d[i] = max;
+++ else if (_a[i] < (int32_t) min)
+++ d[i] = min;
+++ else
+++ d[i] = (uint16_t) _a[i];
+++ }
+++ for (int i = 0; i < 4; i++) {
+++ if (_b[i] > (int32_t) max)
+++ d[i + 4] = max;
+++ else if (_b[i] < (int32_t) min)
+++ d[i + 4] = min;
+++ else
+++ d[i + 4] = (uint16_t) _b[i];
+++ }
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i c = _mm_packus_epi32(a, b);
+++
+++ return VALIDATE_UINT16_M128(c, d);
+++}
+++
+++result_t test_mm_round_pd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (double *) impl.mTestFloatPointer1;
+++ double d[2];
+++ __m128d ret;
+++
+++ __m128d a = load_m128d(_a);
+++ switch (iter & 0x7) {
+++ case 0:
+++ d[0] = bankersRounding(_a[0]);
+++ d[1] = bankersRounding(_a[1]);
+++
+++ ret = _mm_round_pd(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+++ break;
+++ case 1:
+++ d[0] = floor(_a[0]);
+++ d[1] = floor(_a[1]);
+++
+++ ret = _mm_round_pd(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 2:
+++ d[0] = ceil(_a[0]);
+++ d[1] = ceil(_a[1]);
+++
+++ ret = _mm_round_pd(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 3:
+++ d[0] = _a[0] > 0 ? floor(_a[0]) : ceil(_a[0]);
+++ d[1] = _a[1] > 0 ? floor(_a[1]) : ceil(_a[1]);
+++
+++ ret = _mm_round_pd(a, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+++ break;
+++ case 4:
+++ d[0] = bankersRounding(_a[0]);
+++ d[1] = bankersRounding(_a[1]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 5:
+++ d[0] = floor(_a[0]);
+++ d[1] = floor(_a[1]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 6:
+++ d[0] = ceil(_a[0]);
+++ d[1] = ceil(_a[1]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 7:
+++ d[0] = _a[0] > 0 ? floor(_a[0]) : ceil(_a[0]);
+++ d[1] = _a[1] > 0 ? floor(_a[1]) : ceil(_a[1]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ ret = _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ }
+++
+++ return validateDouble(ret, d[0], d[1]);
+++}
+++
+++result_t test_mm_round_ps(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ float f[4];
+++ __m128 ret;
+++
+++ __m128 a = load_m128(_a);
+++ switch (iter & 0x7) {
+++ case 0:
+++ f[0] = bankersRounding(_a[0]);
+++ f[1] = bankersRounding(_a[1]);
+++ f[2] = bankersRounding(_a[2]);
+++ f[3] = bankersRounding(_a[3]);
+++
+++ ret = _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+++ break;
+++ case 1:
+++ f[0] = floorf(_a[0]);
+++ f[1] = floorf(_a[1]);
+++ f[2] = floorf(_a[2]);
+++ f[3] = floorf(_a[3]);
+++
+++ ret = _mm_round_ps(a, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 2:
+++ f[0] = ceilf(_a[0]);
+++ f[1] = ceilf(_a[1]);
+++ f[2] = ceilf(_a[2]);
+++ f[3] = ceilf(_a[3]);
+++
+++ ret = _mm_round_ps(a, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 3:
+++ f[0] = _a[0] > 0 ? floorf(_a[0]) : ceilf(_a[0]);
+++ f[1] = _a[1] > 0 ? floorf(_a[1]) : ceilf(_a[1]);
+++ f[2] = _a[2] > 0 ? floorf(_a[2]) : ceilf(_a[2]);
+++ f[3] = _a[3] > 0 ? floorf(_a[3]) : ceilf(_a[3]);
+++
+++ ret = _mm_round_ps(a, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+++ break;
+++ case 4:
+++ f[0] = bankersRounding(_a[0]);
+++ f[1] = bankersRounding(_a[1]);
+++ f[2] = bankersRounding(_a[2]);
+++ f[3] = bankersRounding(_a[3]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 5:
+++ f[0] = floorf(_a[0]);
+++ f[1] = floorf(_a[1]);
+++ f[2] = floorf(_a[2]);
+++ f[3] = floorf(_a[3]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 6:
+++ f[0] = ceilf(_a[0]);
+++ f[1] = ceilf(_a[1]);
+++ f[2] = ceilf(_a[2]);
+++ f[3] = ceilf(_a[3]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 7:
+++ f[0] = _a[0] > 0 ? floorf(_a[0]) : ceilf(_a[0]);
+++ f[1] = _a[1] > 0 ? floorf(_a[1]) : ceilf(_a[1]);
+++ f[2] = _a[2] > 0 ? floorf(_a[2]) : ceilf(_a[2]);
+++ f[3] = _a[3] > 0 ? floorf(_a[3]) : ceilf(_a[3]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ ret = _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ }
+++
+++ return validateFloat(ret, f[0], f[1], f[2], f[3]);
+++}
+++
+++result_t test_mm_round_sd(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const double *_a = (double *) impl.mTestFloatPointer1;
+++ const double *_b = (double *) impl.mTestFloatPointer2;
+++ double d[2];
+++ __m128d ret;
+++
+++ __m128d a = load_m128d(_a);
+++ __m128d b = load_m128d(_b);
+++ d[1] = _a[1];
+++ switch (iter & 0x7) {
+++ case 0:
+++ d[0] = bankersRounding(_b[0]);
+++
+++ ret = _mm_round_sd(a, b, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+++ break;
+++ case 1:
+++ d[0] = floor(_b[0]);
+++
+++ ret = _mm_round_sd(a, b, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 2:
+++ d[0] = ceil(_b[0]);
+++
+++ ret = _mm_round_sd(a, b, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 3:
+++ d[0] = _b[0] > 0 ? floor(_b[0]) : ceil(_b[0]);
+++
+++ ret = _mm_round_sd(a, b, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+++ break;
+++ case 4:
+++ d[0] = bankersRounding(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 5:
+++ d[0] = floor(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 6:
+++ d[0] = ceil(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 7:
+++ d[0] = _b[0] > 0 ? floor(_b[0]) : ceil(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ ret = _mm_round_sd(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ }
+++
+++ return validateDouble(ret, d[0], d[1]);
+++}
+++
+++result_t test_mm_round_ss(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const float *_a = impl.mTestFloatPointer1;
+++ const float *_b = impl.mTestFloatPointer2;
+++ float f[4];
+++ __m128 ret;
+++
+++ __m128 a = load_m128(_a);
+++ __m128 b = load_m128(_b);
+++ switch (iter & 0x7) {
+++ case 0:
+++ f[0] = bankersRounding(_b[0]);
+++
+++ ret = _mm_round_ss(a, b, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+++ break;
+++ case 1:
+++ f[0] = floorf(_b[0]);
+++
+++ ret = _mm_round_ss(a, b, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 2:
+++ f[0] = ceilf(_b[0]);
+++
+++ ret = _mm_round_ss(a, b, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC);
+++ break;
+++ case 3:
+++ f[0] = _b[0] > 0 ? floorf(_b[0]) : ceilf(_b[0]);
+++
+++ ret = _mm_round_ss(a, b, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
+++ break;
+++ case 4:
+++ f[0] = bankersRounding(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
+++ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 5:
+++ f[0] = floorf(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_DOWN);
+++ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 6:
+++ f[0] = ceilf(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_UP);
+++ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ case 7:
+++ f[0] = _b[0] > 0 ? floorf(_b[0]) : ceilf(_b[0]);
+++
+++ _MM_SET_ROUNDING_MODE(_MM_ROUND_TOWARD_ZERO);
+++ ret = _mm_round_ss(a, b, _MM_FROUND_CUR_DIRECTION);
+++ break;
+++ }
+++ f[1] = _a[1];
+++ f[2] = _a[2];
+++ f[3] = _a[3];
+++
+++
+++ return validateFloat(ret, f[0], f[1], f[2], f[3]);
+++}
+++
+++result_t test_mm_stream_load_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ int32_t *addr = impl.mTestIntPointer1;
+++
+++ __m128i ret = _mm_stream_load_si128((__m128i *) addr);
+++
+++ return VALIDATE_INT32_M128(ret, addr);
+++}
+++
+++result_t test_mm_test_all_ones(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ __m128i a = load_m128i(_a);
+++
+++ int32_t d0 = ~_a[0] & (~(uint32_t) 0);
+++ int32_t d1 = ~_a[1] & (~(uint32_t) 0);
+++ int32_t d2 = ~_a[2] & (~(uint32_t) 0);
+++ int32_t d3 = ~_a[3] & (~(uint32_t) 0);
+++ int32_t result = ((d0 | d1 | d2 | d3) == 0) ? 1 : 0;
+++
+++ int32_t ret = _mm_test_all_ones(a);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_test_all_zeros(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_mask = (const int32_t *) impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i mask = load_m128i(_mask);
+++
+++ int32_t d0 = _a[0] & _mask[0];
+++ int32_t d1 = _a[1] & _mask[1];
+++ int32_t d2 = _a[2] & _mask[2];
+++ int32_t d3 = _a[3] & _mask[3];
+++ int32_t result = ((d0 | d1 | d2 | d3) == 0) ? 1 : 0;
+++
+++ int32_t ret = _mm_test_all_zeros(a, mask);
+++
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_test_mix_ones_zeros(const SSE2NEONTestImpl &impl,
+++ uint32_t iter)
+++{
+++ const int32_t *_a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *_mask = (const int32_t *) impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i mask = load_m128i(_mask);
+++
+++ int32_t ZF = 1;
+++ int32_t CF = 1;
+++ for (int i = 0; i < 4; i++) {
+++ ZF &= ((_a[i] & _mask[i]) == 0);
+++ CF &= ((~_a[i] & _mask[i]) == 0);
+++ }
+++ int32_t result = (ZF == 0 && CF == 0);
+++
+++ int32_t ret = _mm_test_mix_ones_zeros(a, mask);
+++ return result == ret ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_testc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ __m128i a = _mm_load_si128((const __m128i *) _a);
+++ __m128i b = _mm_load_si128((const __m128i *) _b);
+++ int testc = 1;
+++ for (int i = 0; i < 2; i++) {
+++ if ((~(((SIMDVec *) &a)->m128_u64[i]) &
+++ ((SIMDVec *) &b)->m128_u64[i])) {
+++ testc = 0;
+++ break;
+++ }
+++ }
+++ return _mm_testc_si128(a, b) == testc ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++result_t test_mm_testnzc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return test_mm_test_mix_ones_zeros(impl, iter);
+++}
+++
+++result_t test_mm_testz_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *_a = impl.mTestIntPointer1;
+++ const int32_t *_b = impl.mTestIntPointer2;
+++ __m128i a = _mm_load_si128((const __m128i *) _a);
+++ __m128i b = _mm_load_si128((const __m128i *) _b);
+++ int testz = 1;
+++ for (int i = 0; i < 2; i++) {
+++ if ((((SIMDVec *) &a)->m128_u64[i] & ((SIMDVec *) &b)->m128_u64[i])) {
+++ testz = 0;
+++ break;
+++ }
+++ }
+++ return _mm_testz_si128(a, b) == testz ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++/* SSE4.2 */
+++#define IS_CMPESTRI 1
+++
+++#define DEF_ENUM_MM_CMPESTRX_VARIANT(c, ...) c,
+++
+++#define EVAL_MM_CMPESTRX_TEST_CASE(c, type, data_type, im, IM) \
+++ do { \
+++ data_type *a = test_mm_##im##_##type##_data[c].a, \
+++ *b = test_mm_##im##_##type##_data[c].b; \
+++ int la = test_mm_##im##_##type##_data[c].la, \
+++ lb = test_mm_##im##_##type##_data[c].lb; \
+++ const int imm8 = IMM_##c; \
+++ IIF(IM) \
+++ (int expect = test_mm_##im##_##type##_data[c].expect, \
+++ data_type *expect = test_mm_##im##_##type##_data[c].expect); \
+++ __m128i ma, mb; \
+++ memcpy(&ma, a, sizeof(ma)); \
+++ memcpy(&mb, b, sizeof(mb)); \
+++ IIF(IM) \
+++ (int res = _mm_##im(ma, la, mb, lb, imm8), \
+++ __m128i res = _mm_##im(ma, la, mb, lb, imm8)); \
+++ if (IIF(IM)(res != expect, memcmp(expect, &res, sizeof(__m128i)))) \
+++ return TEST_FAIL; \
+++ } while (0);
+++
+++#define ENUM_MM_CMPESTRX_TEST_CASES(type, type_lower, data_type, func, FUNC, \
+++ IM) \
+++ enum { MM_##FUNC##_##type##_TEST_CASES(DEF_ENUM_MM_CMPESTRX_VARIANT) }; \
+++ MM_##FUNC##_##type##_TEST_CASES(EVAL_MM_CMPESTRX_TEST_CASE, type_lower, \
+++ data_type, func, IM)
+++
+++#define IMM_UBYTE_EACH_LEAST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UBYTE_EACH_LEAST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_EACH_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_EACH_MOST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UBYTE_EACH_MOST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_EACH_MOST_MASKED_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ANY_LEAST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UBYTE_ANY_LEAST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ANY_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ANY_MOST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UBYTE_ANY_MOST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ANY_MOST_MASKED_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_RANGES_LEAST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UBYTE_RANGES_MOST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UBYTE_RANGES_LEAST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_RANGES_MOST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_RANGES_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_RANGES_MOST_MASKED_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ORDERED_LEAST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UBYTE_ORDERED_LEAST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ORDERED_MOST \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UBYTE_ORDERED_MOST_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ORDERED_MOST_MASKED_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++
+++#define IMM_SBYTE_EACH_LEAST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SBYTE_EACH_LEAST_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_EACH_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_EACH_MOST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SBYTE_EACH_MOST_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_EACH_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_ANY_LEAST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SBYTE_ANY_LEAST_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_ANY_MOST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SBYTE_ANY_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_RANGES_LEAST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SBYTE_RANGES_LEAST_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_RANGES_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_RANGES_MOST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SBYTE_RANGES_MOST_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_RANGES_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_ORDERED_LEAST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SBYTE_ORDERED_LEAST_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_ORDERED_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_ORDERED_MOST_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_ORDERED_MOST \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SBYTE_ORDERED_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++
+++#define IMM_UWORD_RANGES_LEAST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UWORD_RANGES_LEAST_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UWORD_RANGES_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UWORD_RANGES_MOST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UWORD_RANGES_MOST_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UWORD_RANGES_MOST_MASKED_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UWORD_EACH_LEAST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UWORD_EACH_MOST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UWORD_EACH_LEAST_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UWORD_EACH_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UWORD_EACH_MOST_MASKED_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UWORD_ANY_LEAST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UWORD_ANY_MOST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UWORD_ANY_MOST_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UWORD_ANY_LEAST_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UWORD_ANY_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UWORD_ORDERED_LEAST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_UWORD_ORDERED_LEAST_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UWORD_ORDERED_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_UWORD_ORDERED_MOST \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_UWORD_ORDERED_MOST_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UWORD_ORDERED_MOST_MASKED_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++
+++#define IMM_SWORD_RANGES_LEAST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SWORD_RANGES_MOST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SWORD_RANGES_LEAST_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SWORD_RANGES_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_RANGES_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_EACH_LEAST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SWORD_EACH_MOST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SWORD_EACH_LEAST_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SWORD_EACH_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_EACH_MOST_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SWORD_EACH_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_ANY_LEAST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SWORD_ANY_LEAST_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SWORD_ANY_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_ANY_MOST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SWORD_ANY_MOST_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SWORD_ANY_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_ANY_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_ORDERED_LEAST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT)
+++#define IMM_SWORD_ORDERED_LEAST_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_SWORD_ORDERED_LEAST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_LEAST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SWORD_ORDERED_MOST \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT)
+++#define IMM_SWORD_ORDERED_MOST_MASKED_NEGATIVE \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_MOST_SIGNIFICANT | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++
+++typedef struct {
+++ uint8_t a[16], b[16];
+++ int la, lb;
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpestri_ubyte_data_t;
+++typedef struct {
+++ int8_t a[16], b[16];
+++ int la, lb;
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpestri_sbyte_data_t;
+++typedef struct {
+++ uint16_t a[8], b[8];
+++ int la, lb;
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpestri_uword_data_t;
+++typedef struct {
+++ int16_t a[8], b[8];
+++ int la, lb;
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpestri_sword_data_t;
+++
+++#define TEST_MM_CMPESTRA_UBYTE_DATA_LEN 3
+++static test_mm_cmpestri_ubyte_data_t
+++ test_mm_cmpestra_ubyte_data[TEST_MM_CMPESTRA_UBYTE_DATA_LEN] = {
+++ {{20, 10, 33, 56, 78},
+++ {20, 10, 34, 98, 127, 20, 10, 32, 20, 10, 32, 11, 3, 20, 10, 31},
+++ 3,
+++ 17,
+++ IMM_UBYTE_ORDERED_MOST,
+++ 1},
+++ {{20, 127, 0, 45, 77, 1, 34, 43, 109},
+++ {2, 127, 0, 54, 6, 43, 12, 110, 100},
+++ 9,
+++ 20,
+++ IMM_UBYTE_EACH_LEAST_NEGATIVE,
+++ 0},
+++ {{22, 33, 90, 1},
+++ {22, 33, 90, 1, 1, 5, 4, 7, 98, 34, 1, 12, 13, 14, 15, 16},
+++ 4,
+++ 11,
+++ IMM_UBYTE_ANY_LEAST_MASKED_NEGATIVE,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPESTRA_SBYTE_DATA_LEN 3
+++static test_mm_cmpestri_sbyte_data_t
+++ test_mm_cmpestra_sbyte_data[TEST_MM_CMPESTRA_SBYTE_DATA_LEN] = {
+++ {{45, -94, 38, -11, 84, -123, -43, -49, 25, -55, -121, -6, 57, 108, -55,
+++ 69},
+++ {-26, -61, -21, -96, 48, -112, 95, -56, 29, -55, -121, -6, 57, 108,
+++ -55, 69},
+++ 23,
+++ 28,
+++ IMM_SBYTE_RANGES_LEAST,
+++ 0},
+++ {{-12, 8},
+++ {-12, 7, -12, 8, -13, 45, -12, 8},
+++ 2,
+++ 8,
+++ IMM_SBYTE_ORDERED_MOST_NEGATIVE,
+++ 0},
+++ {{-100, -127, 56, 78, 21, -1, 9, 127, 45},
+++ {100, 126, 30, 65, 87, 54, 80, 81, -98, -101, 90, 1, 5, 60, -77, -65},
+++ 10,
+++ 20,
+++ IMM_SBYTE_ANY_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPESTRA_UWORD_DATA_LEN 3
+++static test_mm_cmpestri_uword_data_t
+++ test_mm_cmpestra_uword_data[TEST_MM_CMPESTRA_UWORD_DATA_LEN] = {
+++ {{10000, 20000, 30000, 40000, 50000},
+++ {40001, 50002, 10000, 20000, 30000, 40000, 50000},
+++ 5,
+++ 10,
+++ IMM_UWORD_ORDERED_LEAST,
+++ 0},
+++ {{1001, 9487, 9487, 8000},
+++ {1001, 1002, 1003, 8709, 100, 1, 1000, 999},
+++ 4,
+++ 6,
+++ IMM_UWORD_RANGES_LEAST_MASKED_NEGATIVE,
+++ 0},
+++ {{12, 21, 0, 45, 88, 10001, 10002, 65535},
+++ {22, 13, 3, 54, 888, 10003, 10000, 65530},
+++ 13,
+++ 13,
+++ IMM_UWORD_EACH_MOST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPESTRA_SWORD_DATA_LEN 3
+++static test_mm_cmpestri_sword_data_t
+++ test_mm_cmpestra_sword_data[TEST_MM_CMPESTRA_SWORD_DATA_LEN] = {
+++ {{-100, -80, -5, -1, 10, 1000},
+++ {-100, -99, -80, -2, 11, 789, 889, 999},
+++ 6,
+++ 12,
+++ IMM_SWORD_RANGES_LEAST_NEGATIVE,
+++ 1},
+++ {{-30000, -90, -32766, 1200, 5},
+++ {-30001, 21, 10000, 1201, 888},
+++ 5,
+++ 5,
+++ IMM_SWORD_EACH_MOST,
+++ 0},
+++ {{2001, -1928},
+++ {2000, 1928, 3000, 2289, 4000, 111, 2002, -1928},
+++ 2,
+++ 9,
+++ IMM_SWORD_ANY_LEAST_MASKED_NEGATIVE,
+++ 0},
+++};
+++
+++
+++#define MM_CMPESTRA_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ORDERED_MOST, __VA_ARGS__) \
+++ _(UBYTE_EACH_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPESTRA_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPESTRA_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_ORDERED_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_EACH_MOST, __VA_ARGS__)
+++
+++#define MM_CMPESTRA_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_EACH_MOST, __VA_ARGS__) \
+++ _(SWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPESTRA_TEST_CASES \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestra, CMPESTRA, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestra, CMPESTRA, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestra, CMPESTRA, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestra, CMPESTRA, \
+++ IS_CMPESTRI)
+++
+++result_t test_mm_cmpestra(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPESTRA_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPESTRC_UBYTE_DATA_LEN 4
+++static test_mm_cmpestri_ubyte_data_t
+++ test_mm_cmpestrc_ubyte_data[TEST_MM_CMPESTRC_UBYTE_DATA_LEN] = {
+++ {{66, 3, 3, 65},
+++ {66, 3, 3, 65, 67, 2, 2, 67, 56, 11, 1, 23, 66, 3, 3, 65},
+++ 4,
+++ 16,
+++ IMM_UBYTE_ORDERED_MOST_MASKED_NEGATIVE,
+++ 1},
+++ {{1, 11, 2, 22, 3, 33, 4, 44, 5, 55, 6, 66, 7, 77, 8, 88},
+++ {2, 22, 3, 23, 5, 66, 255, 43, 6, 66, 7, 77, 9, 99, 10, 100},
+++ 16,
+++ 16,
+++ IMM_UBYTE_EACH_MOST,
+++ 0},
+++ {{36, 72, 108}, {12, 24, 48, 96, 77, 84}, 3, 6, IMM_UBYTE_ANY_LEAST, 0},
+++ {{12, 24, 36, 48},
+++ {11, 49, 50, 56, 77, 15, 10},
+++ 4,
+++ 7,
+++ IMM_UBYTE_RANGES_LEAST_NEGATIVE,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPESTRC_SBYTE_DATA_LEN 4
+++static test_mm_cmpestri_sbyte_data_t
+++ test_mm_cmpestrc_sbyte_data[TEST_MM_CMPESTRC_SBYTE_DATA_LEN] = {
+++ {{-22, -30, 40, 45},
+++ {-31, -32, 46, 77},
+++ 4,
+++ 4,
+++ IMM_SBYTE_RANGES_MOST,
+++ 0},
+++ {{-12, -7, 33, 100, 12},
+++ {-12, -7, 33, 100, 11, -11, -7, 33, 100, 12},
+++ 5,
+++ 10,
+++ IMM_SBYTE_ORDERED_MOST_MASKED_NEGATIVE,
+++ 1},
+++ {{1, 2, 3, 4, 5, -1, -2, -3, -4, -5},
+++ {1, 2, 3, 4, 5, -1, -2, -3, -5},
+++ 10,
+++ 9,
+++ IMM_SBYTE_ANY_MOST_MASKED_NEGATIVE,
+++ 0},
+++ {{101, -128, -88, -76, 89, 109, 44, -12, -45, -100, 22, 1, 91},
+++ {102, -120, 88, -76, 98, 107, 33, 12, 45, -100, 22, 10, 19},
+++ 13,
+++ 13,
+++ IMM_SBYTE_EACH_MOST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPESTRC_UWORD_DATA_LEN 4
+++static test_mm_cmpestri_uword_data_t
+++ test_mm_cmpestrc_uword_data[TEST_MM_CMPESTRC_UWORD_DATA_LEN] = {
+++ {{1000, 2000, 4000, 8000, 16000},
+++ {40001, 1000, 2000, 40000, 8000, 16000},
+++ 5,
+++ 6,
+++ IMM_UWORD_ORDERED_LEAST_NEGATIVE,
+++ 1},
+++ {{1111, 1212},
+++ {1110, 1213, 1110, 1214, 1100, 1220, 1000, 1233},
+++ 2,
+++ 8,
+++ IMM_UWORD_RANGES_MOST,
+++ 0},
+++ {{10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000},
+++ {9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000},
+++ 13,
+++ 13,
+++ IMM_UWORD_EACH_LEAST_MASKED_NEGATIVE,
+++ 1},
+++ {{12}, {11, 13, 14, 15, 10}, 1, 5, IMM_UWORD_ANY_MOST, 0},
+++};
+++
+++#define TEST_MM_CMPESTRC_SWORD_DATA_LEN 4
+++static test_mm_cmpestri_sword_data_t
+++ test_mm_cmpestrc_sword_data[TEST_MM_CMPESTRC_SWORD_DATA_LEN] = {
+++ {{-100, -90, -80, -66, 1},
+++ {-101, -102, -1000, 2, 67, 10000},
+++ 5,
+++ 6,
+++ IMM_SWORD_RANGES_LEAST,
+++ 0},
+++ {{12, 13, -700, 888, 44, -987, 19},
+++ {12, 13, -700, 888, 44, -987, 19},
+++ 7,
+++ 7,
+++ IMM_SWORD_EACH_MOST_NEGATIVE,
+++ 0},
+++ {{2001, -1992, 1995, 10007, 2000},
+++ {2000, 1928, 3000, 9822, 5000, 1111, 2002, -1928},
+++ 5,
+++ 9,
+++ IMM_SWORD_ANY_LEAST_NEGATIVE,
+++ 1},
+++ {{13, -26, 39},
+++ {12, -25, 33, 13, -26, 39},
+++ 3,
+++ 6,
+++ IMM_SWORD_ORDERED_MOST,
+++ 1},
+++};
+++
+++
+++#define MM_CMPESTRC_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_EACH_MOST, __VA_ARGS__) \
+++ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPESTRC_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_RANGES_MOST, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ANY_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_EACH_MOST, __VA_ARGS__)
+++
+++#define MM_CMPESTRC_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_ORDERED_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_RANGES_MOST, __VA_ARGS__) \
+++ _(UWORD_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_ANY_MOST, __VA_ARGS__)
+++
+++#define MM_CMPESTRC_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(SWORD_EACH_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_ORDERED_MOST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPESTRC_TEST_CASES \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrc, CMPESTRC, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrc, CMPESTRC, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrc, CMPESTRC, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrc, CMPESTRC, \
+++ IS_CMPESTRI)
+++
+++result_t test_mm_cmpestrc(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPESTRC_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPESTRI_UBYTE_DATA_LEN 4
+++static test_mm_cmpestri_ubyte_data_t
+++ test_mm_cmpestri_ubyte_data[TEST_MM_CMPESTRI_UBYTE_DATA_LEN] = {
+++ {{23, 89, 255, 0, 90, 45, 67, 12, 1, 56, 200, 141, 3, 4, 2, 76},
+++ {32, 89, 255, 128, 9, 54, 78, 12, 1, 56, 100, 41, 42, 68, 32, 5},
+++ 16,
+++ 16,
+++ IMM_UBYTE_ANY_LEAST_NEGATIVE,
+++ 0},
+++ {{0, 83, 112, 12, 221, 54, 76, 83, 112, 10},
+++ {0, 83, 112, 83, 122, 45, 67, 83, 112, 9},
+++ 10,
+++ 10,
+++ IMM_UBYTE_EACH_LEAST,
+++ 0},
+++ {{34, 78, 12},
+++ {56, 100, 11, 67, 35, 79, 67, 255, 0, 43, 121, 234, 225, 91, 31, 23},
+++ 3,
+++ 16,
+++ IMM_UBYTE_RANGES_LEAST,
+++ 0},
+++ {{13, 10, 9, 32, 105, 103, 110, 111, 114, 101, 32, 116, 104, 105, 115,
+++ 32},
+++ {83, 112, 108, 105, 116, 32, 13, 10, 9, 32, 108, 105, 110, 101, 32,
+++ 32},
+++ 3,
+++ 15,
+++ IMM_UBYTE_ORDERED_LEAST,
+++ 6},
+++};
+++
+++#define TEST_MM_CMPESTRI_SBYTE_DATA_LEN 4
+++static test_mm_cmpestri_sbyte_data_t
+++ test_mm_cmpestri_sbyte_data[TEST_MM_CMPESTRI_SBYTE_DATA_LEN] = {
+++ {{-12, -1, 90, -128, 43, 6, 87, 127},
+++ {-1, -1, 9, -127, 126, 6, 78, 23},
+++ 8,
+++ 8,
+++ IMM_SBYTE_EACH_LEAST,
+++ 1},
+++ {{34, 67, -90, 33, 123, -100, 43, 56},
+++ {43, 76, -90, 44, 20, -100, 54, 56},
+++ 8,
+++ 8,
+++ IMM_SBYTE_ANY_LEAST,
+++ 0},
+++ {{-43, 67, 89},
+++ {-44, -54, -30, -128, 127, 34, 10, -62},
+++ 3,
+++ 7,
+++ IMM_SBYTE_RANGES_LEAST,
+++ 2},
+++ {{90, 34, -32, 0, 5},
+++ {19, 34, -32, 90, 34, -32, 45, 0, 5, 90, 34, -32, 0, 5, 19, 87},
+++ 3,
+++ 16,
+++ IMM_SBYTE_ORDERED_LEAST,
+++ 3},
+++};
+++
+++#define TEST_MM_CMPESTRI_UWORD_DATA_LEN 4
+++static test_mm_cmpestri_uword_data_t
+++ test_mm_cmpestri_uword_data[TEST_MM_CMPESTRI_UWORD_DATA_LEN] = {
+++ {{45, 65535, 0, 87, 1000, 10, 45, 26},
+++ {65534, 0, 0, 78, 1000, 10, 32, 26},
+++ 8,
+++ 8,
+++ IMM_UWORD_EACH_LEAST,
+++ 2},
+++ {{45, 23, 10, 54, 88, 10000, 20000, 100},
+++ {544, 10000, 20000, 1, 0, 2897, 2330, 2892},
+++ 8,
+++ 8,
+++ IMM_UWORD_ANY_LEAST,
+++ 1},
+++ {{10000, 15000},
+++ {12, 45, 67, 899, 10001, 32, 15001, 15000},
+++ 2,
+++ 8,
+++ IMM_UWORD_RANGES_LEAST,
+++ 4},
+++ {{0, 1, 54, 89, 100},
+++ {101, 102, 65535, 0, 1, 54, 89, 100},
+++ 5,
+++ 8,
+++ IMM_UWORD_ORDERED_LEAST,
+++ 3},
+++};
+++
+++#define TEST_MM_CMPESTRI_SWORD_DATA_LEN 4
+++static test_mm_cmpestri_sword_data_t
+++ test_mm_cmpestri_sword_data[TEST_MM_CMPESTRI_SWORD_DATA_LEN] = {
+++ {{13, 6, 5, 4, 3, 2, 1, 3},
+++ {-7, 16, 5, 4, -1, 6, 1, 3},
+++ 10,
+++ 10,
+++ IMM_SWORD_RANGES_MOST,
+++ 7},
+++ {{13, 6, 5, 4, 3, 2, 1, 3},
+++ {-7, 16, 5, 4, -1, 6, 1, 3},
+++ 8,
+++ 8,
+++ IMM_SWORD_EACH_LEAST,
+++ 2},
+++ {{-32768, 90, 455, 67, -1000, -10000, 21, 12},
+++ {-7, 61, 455, 67, -32768, 32767, 11, 888},
+++ 8,
+++ 8,
+++ IMM_SWORD_ANY_LEAST,
+++ 2},
+++ {{-12, -56},
+++ {-7, 16, 555, 554, -12, 61, -16, 3},
+++ 2,
+++ 8,
+++ IMM_SWORD_ORDERED_LEAST,
+++ 8},
+++};
+++
+++#define MM_CMPESTRI_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPESTRI_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPESTRI_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPESTRI_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_MOST, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPESTRI_TEST_CASES \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestri, CMPESTRI, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestri, CMPESTRI, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestri, CMPESTRI, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestri, CMPESTRI, \
+++ IS_CMPESTRI)
+++
+++result_t test_mm_cmpestri(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPESTRI_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define IS_CMPESTRM 0
+++
+++typedef struct {
+++ uint8_t a[16], b[16];
+++ int la, lb;
+++ const int imm8;
+++ uint8_t expect[16];
+++} test_mm_cmpestrm_ubyte_data_t;
+++typedef struct {
+++ int8_t a[16], b[16];
+++ int la, lb;
+++ const int imm8;
+++ int8_t expect[16];
+++} test_mm_cmpestrm_sbyte_data_t;
+++typedef struct {
+++ uint16_t a[8], b[8];
+++ int la, lb;
+++ const int imm8;
+++ uint16_t expect[8];
+++} test_mm_cmpestrm_uword_data_t;
+++typedef struct {
+++ int16_t a[8], b[8];
+++ int la, lb;
+++ const int imm8;
+++ int16_t expect[8];
+++} test_mm_cmpestrm_sword_data_t;
+++
+++#define IMM_UBYTE_EACH_UNIT \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+++#define IMM_UBYTE_EACH_UNIT_NEGATIVE \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK | \
+++ _SIDD_NEGATIVE_POLARITY)
+++#define IMM_UBYTE_ANY_UNIT \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+++#define IMM_UBYTE_ANY_BIT \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK)
+++#define IMM_UBYTE_RANGES_UNIT \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+++#define IMM_UBYTE_ORDERED_UNIT \
+++ (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+++
+++#define IMM_SBYTE_EACH_UNIT \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+++#define IMM_SBYTE_EACH_BIT_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_BIT_MASK | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_ANY_UNIT \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+++#define IMM_SBYTE_ANY_UNIT_MASKED_NEGATIVE \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK | \
+++ _SIDD_MASKED_NEGATIVE_POLARITY)
+++#define IMM_SBYTE_RANGES_UNIT \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+++#define IMM_SBYTE_ORDERED_UNIT \
+++ (_SIDD_SBYTE_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+++
+++#define IMM_UWORD_RANGES_UNIT \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+++#define IMM_UWORD_EACH_UNIT \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+++#define IMM_UWORD_ANY_UNIT \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+++#define IMM_UWORD_ANY_BIT \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK)
+++#define IMM_UWORD_ORDERED_UNIT \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+++#define IMM_UWORD_ORDERED_UNIT_NEGATIVE \
+++ (_SIDD_UWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK | \
+++ _SIDD_NEGATIVE_POLARITY)
+++
+++#define IMM_SWORD_RANGES_UNIT \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_UNIT_MASK)
+++#define IMM_SWORD_RANGES_BIT \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_RANGES | _SIDD_BIT_MASK)
+++#define IMM_SWORD_EACH_UNIT \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_UNIT_MASK)
+++#define IMM_SWORD_ANY_UNIT \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_UNIT_MASK)
+++#define IMM_SWORD_ORDERED_UNIT \
+++ (_SIDD_SWORD_OPS | _SIDD_CMP_EQUAL_ORDERED | _SIDD_UNIT_MASK)
+++
+++#define TEST_MM_CMPESTRM_UBYTE_DATA_LEN 4
+++static test_mm_cmpestrm_ubyte_data_t
+++ test_mm_cmpestrm_ubyte_data[TEST_MM_CMPESTRM_UBYTE_DATA_LEN] = {
+++ {{85, 115, 101, 70, 108, 97, 116, 65, 115, 115, 101, 109, 98, 108, 101,
+++ 114},
+++ {85, 115, 105, 110, 103, 65, 110, 65, 115, 115, 101, 109, 98, 108, 101,
+++ 114},
+++ 16,
+++ 16,
+++ IMM_UBYTE_EACH_UNIT_NEGATIVE,
+++ {0, 0, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+++ {{97, 101, 105, 111, 117, 121},
+++ {89, 111, 117, 32, 68, 114, 105, 118, 101, 32, 77, 101, 32, 77, 97,
+++ 100},
+++ 6,
+++ 16,
+++ IMM_UBYTE_ANY_UNIT,
+++ {0, 255, 255, 0, 0, 0, 255, 0, 255, 0, 0, 255, 0, 0, 255, 0}},
+++ {{97, 122, 65, 90},
+++ {73, 39, 109, 32, 104, 101, 114, 101, 32, 98, 101, 99, 97, 117, 115,
+++ 101},
+++ 4,
+++ 16,
+++ IMM_UBYTE_RANGES_UNIT,
+++ {255, 0, 255, 0, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255,
+++ 255}},
+++ {{87, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+++ {87, 104, 101, 110, 87, 101, 87, 105, 108, 108, 66, 101, 87, 101, 100,
+++ 33},
+++ 2,
+++ 16,
+++ IMM_UBYTE_ORDERED_UNIT,
+++ {0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0}},
+++};
+++
+++#define TEST_MM_CMPESTRM_SBYTE_DATA_LEN 4
+++static test_mm_cmpestrm_sbyte_data_t
+++ test_mm_cmpestrm_sbyte_data[TEST_MM_CMPESTRM_SBYTE_DATA_LEN] = {
+++ {{-127, -127, 34, 88, 0, 1, -1, 78, 90, 9, 23, 34, 3, -128, 127, 0},
+++ {0, -127, 34, 88, 12, 43, -128, 78, 8, 9, 43, 32, 7, 126, 115, 0},
+++ 16,
+++ 16,
+++ IMM_SBYTE_EACH_UNIT,
+++ {0, -1, -1, -1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, 0, -1}},
+++ {{0, 32, 7, 115, -128, 44, 33},
+++ {0, -127, 34, 88, 12, 43, -128, 78, 8, 9, 43, 32, 7, 126, 115, 0},
+++ 7,
+++ 10,
+++ IMM_SBYTE_ANY_UNIT_MASKED_NEGATIVE,
+++ {0, -1, -1, -1, -1, -1, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0}},
+++ {{-128, -80, -90, 10, 33},
+++ {-126, -93, -80, -77, -56, -23, -10, -1, 0, 3, 10, 12, 13, 33, 34, 56},
+++ 5,
+++ 16,
+++ IMM_SBYTE_RANGES_UNIT,
+++ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0}},
+++ {{104, 9, -12},
+++ {0, 0, 87, 104, 9, -12, 89, -117, 9, 10, -11, 87, -114, 104, 9, -61},
+++ 3,
+++ 16,
+++ IMM_SBYTE_ORDERED_UNIT,
+++ {0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+++};
+++
+++#define TEST_MM_CMPESTRM_UWORD_DATA_LEN 4
+++static test_mm_cmpestrm_uword_data_t
+++ test_mm_cmpestrm_uword_data[TEST_MM_CMPESTRM_UWORD_DATA_LEN] = {
+++ {{1, 5, 13, 19, 22},
+++ {12, 60000, 5, 1, 100, 1000, 34, 20},
+++ 5,
+++ 8,
+++ IMM_UWORD_RANGES_UNIT,
+++ {0, 0, 65535, 65535, 0, 0, 0, 0}},
+++ {{65535, 12, 7, 9876, 3456, 12345, 10, 98},
+++ {65535, 0, 10, 9876, 3456, 0, 13, 32},
+++ 8,
+++ 8,
+++ IMM_UWORD_EACH_UNIT,
+++ {65535, 0, 0, 65535, 65535, 0, 0, 0}},
+++ {{100, 0},
+++ {12345, 6766, 234, 0, 1, 34, 89, 100},
+++ 2,
+++ 8,
+++ IMM_UWORD_ANY_BIT,
+++ {136, 0, 0, 0, 0, 0, 0, 0}},
+++ {{123, 67, 890},
+++ {123, 67, 890, 8900, 4, 0, 123, 67},
+++ 3,
+++ 8,
+++ IMM_UWORD_ORDERED_UNIT,
+++ {65535, 0, 0, 0, 0, 0, 65535, 0}},
+++};
+++
+++#define TEST_MM_CMPESTRM_SWORD_DATA_LEN 4
+++static test_mm_cmpestrm_sword_data_t
+++ test_mm_cmpestrm_sword_data[TEST_MM_CMPESTRM_SWORD_DATA_LEN] = {
+++ {{13, 6, 5, 4, 3, 2, 1, 3},
+++ {-7, 16, 5, 4, -1, 6, 1, 3},
+++ 10,
+++ 10,
+++ IMM_SWORD_RANGES_UNIT,
+++ {0, 0, 0, 0, 0, 0, -1, -1}},
+++ {{85, 115, 101, 70, 108, 97, 116, 65},
+++ {85, 115, 105, 110, 103, 65, 110, 65},
+++ 8,
+++ 8,
+++ IMM_SWORD_EACH_UNIT,
+++ {-1, -1, 0, 0, 0, 0, 0, -1}},
+++ {{-32768, 10000, 10, -13},
+++ {-32767, 32767, -32768, 90, 0, -13, 23, 45},
+++ 4,
+++ 8,
+++ IMM_SWORD_ANY_UNIT,
+++ {0, 0, -1, 0, 0, -1, 0, 0}},
+++ {{10, 20, -10, 60},
+++ {0, 0, 0, 10, 20, -10, 60, 10},
+++ 4,
+++ 8,
+++ IMM_SWORD_ORDERED_UNIT,
+++ {0, 0, 0, -1, 0, 0, 0, -1}},
+++};
+++
+++#define MM_CMPESTRM_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_EACH_UNIT_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_ANY_UNIT, __VA_ARGS__) \
+++ _(UBYTE_RANGES_UNIT, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_UNIT, __VA_ARGS__)
+++
+++#define MM_CMPESTRM_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_UNIT, __VA_ARGS__) \
+++ _(SBYTE_ANY_UNIT_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_RANGES_UNIT, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_UNIT, __VA_ARGS__)
+++
+++#define MM_CMPESTRM_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_RANGES_UNIT, __VA_ARGS__) \
+++ _(UWORD_EACH_UNIT, __VA_ARGS__) \
+++ _(UWORD_ANY_BIT, __VA_ARGS__) \
+++ _(UWORD_ORDERED_UNIT, __VA_ARGS__)
+++
+++#define MM_CMPESTRM_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_UNIT, __VA_ARGS__) \
+++ _(SWORD_EACH_UNIT, __VA_ARGS__) \
+++ _(SWORD_ANY_UNIT, __VA_ARGS__) \
+++ _(SWORD_ORDERED_UNIT, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPESTRM_TEST_CASES \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrm, CMPESTRM, \
+++ IS_CMPESTRM) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrm, CMPESTRM, \
+++ IS_CMPESTRM) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrm, CMPESTRM, \
+++ IS_CMPESTRM) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrm, CMPESTRM, \
+++ IS_CMPESTRM)
+++
+++result_t test_mm_cmpestrm(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPESTRM_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#undef IS_CMPESTRM
+++
+++#define TEST_MM_CMPESTRO_UBYTE_DATA_LEN 4
+++static test_mm_cmpestri_ubyte_data_t
+++ test_mm_cmpestro_ubyte_data[TEST_MM_CMPESTRO_UBYTE_DATA_LEN] = {
+++ {{56, 78, 255, 1, 9},
+++ {56, 78, 43, 255, 1, 6, 9},
+++ 5,
+++ 7,
+++ IMM_UBYTE_ANY_MOST_NEGATIVE,
+++ 0},
+++ {{33, 44, 100, 24, 3, 89, 127, 254, 33, 45, 250},
+++ {33, 44, 100, 22, 3, 98, 125, 254, 33, 4, 243},
+++ 11,
+++ 11,
+++ IMM_UBYTE_EACH_LEAST_MASKED_NEGATIVE,
+++ 0},
+++ {{34, 27, 18, 9}, {}, 4, 16, IMM_UBYTE_RANGES_LEAST_MASKED_NEGATIVE, 1},
+++ {{3, 18, 216},
+++ {3, 18, 222, 3, 17, 216, 3, 18, 216},
+++ 3,
+++ 9,
+++ IMM_UBYTE_ORDERED_LEAST_NEGATIVE,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPESTRO_SBYTE_DATA_LEN 4
+++static test_mm_cmpestri_sbyte_data_t
+++ test_mm_cmpestro_sbyte_data[TEST_MM_CMPESTRO_SBYTE_DATA_LEN] = {
+++ {{23, -23, 24, -24, 25, -25, 26, -26, 27, -27, 28, -28, -29, 29, 30,
+++ 31},
+++ {24, -23, 25, -24, 25, -25, 26, -26, 27, -27, 28, -28, -29, 29, 30,
+++ 31},
+++ 16,
+++ 16,
+++ IMM_SBYTE_EACH_MOST_NEGATIVE,
+++ 1},
+++ {{34, 33, 67, 72, -90, 127, 33, -128, 123, -90, -100, 34, 43, 15, 56,
+++ 3},
+++ {3, 14, 15, 65, 90, -127, 100, 100},
+++ 16,
+++ 8,
+++ IMM_SBYTE_ANY_MOST,
+++ 1},
+++ {{-13, 0, 34},
+++ {-12, -11, 1, 12, 56, 57, 3, 2, -17},
+++ 6,
+++ 9,
+++ IMM_SBYTE_RANGES_MOST_MASKED_NEGATIVE,
+++ 0},
+++ {{1, 2, 3, 4, 5, 6, 7, 8},
+++ {-1, -2, -3, -4, -5, -6, -7, -8, 1, 2, 3, 4, 5, 6, 7, 8},
+++ 8,
+++ 16,
+++ IMM_SBYTE_ORDERED_MOST,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPESTRO_UWORD_DATA_LEN 4
+++static test_mm_cmpestri_uword_data_t
+++ test_mm_cmpestro_uword_data[TEST_MM_CMPESTRO_UWORD_DATA_LEN] = {
+++ {{0, 0, 0, 4, 4, 4, 8, 8},
+++ {0, 0, 0, 3, 3, 16653, 3333, 222},
+++ 8,
+++ 8,
+++ IMM_UWORD_EACH_MOST_MASKED_NEGATIVE,
+++ 0},
+++ {{12, 666, 9456, 10000, 32, 444, 57, 0},
+++ {11, 777, 9999, 32767, 23},
+++ 8,
+++ 5,
+++ IMM_UWORD_ANY_LEAST_MASKED_NEGATIVE,
+++ 1},
+++ {{23, 32, 45, 67},
+++ {10022, 23, 32, 44, 66, 67, 12, 22},
+++ 4,
+++ 8,
+++ IMM_UWORD_RANGES_LEAST_NEGATIVE,
+++ 1},
+++ {{222, 45, 8989},
+++ {221, 222, 45, 8989, 222, 45, 8989},
+++ 3,
+++ 7,
+++ IMM_UWORD_ORDERED_MOST,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPESTRO_SWORD_DATA_LEN 4
+++static test_mm_cmpestri_sword_data_t
+++ test_mm_cmpestro_sword_data[TEST_MM_CMPESTRO_SWORD_DATA_LEN] = {
+++ {{-9999, -9487, -5000, -4433, -3000, -2999, -2000, -1087},
+++ {-32767, -30000, -4998},
+++ 100,
+++ 3,
+++ IMM_SWORD_RANGES_MOST_MASKED_NEGATIVE,
+++ 1},
+++ {{-30, 89, 7777},
+++ {-30, 89, 7777},
+++ 3,
+++ 3,
+++ IMM_SWORD_EACH_MOST_MASKED_NEGATIVE,
+++ 0},
+++ {{8, 9, -100, 1000, -5000, -32000, 32000, 7},
+++ {29999, 32001, 5, 555},
+++ 8,
+++ 4,
+++ IMM_SWORD_ANY_MOST_MASKED_NEGATIVE,
+++ 1},
+++ {{-1, 56, -888, 9000, -23, 12, -1, -1},
+++ {-1, 56, -888, 9000, -23, 12, -1, -1},
+++ 8,
+++ 8,
+++ IMM_SWORD_ORDERED_MOST_MASKED_NEGATIVE,
+++ 0},
+++};
+++
+++#define MM_CMPESTRO_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_LEAST_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPESTRO_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ANY_MOST, __VA_ARGS__) \
+++ _(SBYTE_RANGES_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_MOST, __VA_ARGS__)
+++
+++#define MM_CMPESTRO_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_ORDERED_MOST, __VA_ARGS__)
+++
+++#define MM_CMPESTRO_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_ANY_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPESTRO_TEST_CASES \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestro, CMPESTRO, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestro, CMPESTRO, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestro, CMPESTRO, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestro, CMPESTRO, \
+++ IS_CMPESTRI)
+++
+++result_t test_mm_cmpestro(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPESTRO_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPESTRS_UBYTE_DATA_LEN 2
+++static test_mm_cmpestri_ubyte_data_t
+++ test_mm_cmpestrs_ubyte_data[TEST_MM_CMPESTRS_UBYTE_DATA_LEN] = {
+++ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+++ {0},
+++ 16,
+++ 0,
+++ IMM_UBYTE_ANY_MOST,
+++ 0},
+++ {{1, 2, 3}, {1, 2, 3}, 3, 8, IMM_UBYTE_RANGES_MOST, 1},
+++};
+++
+++#define TEST_MM_CMPESTRS_SBYTE_DATA_LEN 2
+++static test_mm_cmpestri_sbyte_data_t
+++ test_mm_cmpestrs_sbyte_data[TEST_MM_CMPESTRS_SBYTE_DATA_LEN] = {
+++ {{-1, -2, -3, -4, -100, 100, 1, 2, 3, 4},
+++ {-90, -80, 111, 67, 88},
+++ 10,
+++ 5,
+++ IMM_SBYTE_EACH_LEAST_MASKED_NEGATIVE,
+++ 1},
+++ {{99, 100, 101, -99, -100, -101, 56, 7},
+++ {-128, -126, 100, 127},
+++ 23,
+++ 4,
+++ IMM_SBYTE_ORDERED_LEAST_MASKED_NEGATIVE,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPESTRS_UWORD_DATA_LEN 2
+++static test_mm_cmpestri_uword_data_t
+++ test_mm_cmpestrs_uword_data[TEST_MM_CMPESTRS_UWORD_DATA_LEN] = {
+++ {{1},
+++ {90, 65535, 63355, 12, 8, 5, 34, 10000},
+++ 100,
+++ 7,
+++ IMM_UWORD_ANY_MOST_NEGATIVE,
+++ 0},
+++ {{}, {0}, 0, 28, IMM_UWORD_RANGES_MOST_MASKED_NEGATIVE, 1},
+++};
+++
+++#define TEST_MM_CMPESTRS_SWORD_DATA_LEN 2
+++static test_mm_cmpestri_sword_data_t
+++ test_mm_cmpestrs_sword_data[TEST_MM_CMPESTRS_SWORD_DATA_LEN] = {
+++ {{-30000, 2897, 1111, -4455},
+++ {30, 40, 500, 6000, 20, -10, -789, -29999},
+++ 4,
+++ 8,
+++ IMM_SWORD_ORDERED_LEAST_MASKED_NEGATIVE,
+++ 1},
+++ {{34, 56, 789, 1024, 2048, 4096, 8192, -16384},
+++ {3, 9, -27, 81, -216, 1011},
+++ 9,
+++ 6,
+++ IMM_SWORD_EACH_LEAST_NEGATIVE,
+++ 0},
+++};
+++
+++#define MM_CMPESTRS_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_MOST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_MOST, __VA_ARGS__)
+++
+++#define MM_CMPESTRS_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPESTRS_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_ANY_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_RANGES_MOST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPESTRS_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST_NEGATIVE, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPESTRS_TEST_CASES \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrs, CMPESTRS, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrs, CMPESTRS, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrs, CMPESTRS, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrs, CMPESTRS, \
+++ IS_CMPESTRI)
+++
+++result_t test_mm_cmpestrs(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPESTRS_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPESTRZ_UBYTE_DATA_LEN 2
+++static test_mm_cmpestri_ubyte_data_t
+++ test_mm_cmpestrz_ubyte_data[TEST_MM_CMPESTRZ_UBYTE_DATA_LEN] = {
+++ {{0, 1, 2, 3, 4, 5, 6, 7},
+++ {12, 67, 0, 3},
+++ 8,
+++ 4,
+++ IMM_UBYTE_ANY_MOST_MASKED_NEGATIVE,
+++ 1},
+++ {{255, 0, 127, 88},
+++ {1, 2, 4, 8, 16, 32, 64, 128, 254, 233, 209, 41, 66, 77, 90, 100},
+++ 4,
+++ 16,
+++ IMM_UBYTE_RANGES_MOST_MASKED_NEGATIVE,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPESTRZ_SBYTE_DATA_LEN 2
+++static test_mm_cmpestri_sbyte_data_t
+++ test_mm_cmpestrz_sbyte_data[TEST_MM_CMPESTRZ_SBYTE_DATA_LEN] = {
+++ {{}, {-90, -80, 111, 67, 88}, 0, 18, IMM_SBYTE_EACH_LEAST_NEGATIVE, 0},
+++ {{9, 10, 10, -99, -100, -101, 56, 76},
+++ {-127, 127, -100, -120, 13, 108, 1, -66, -34, 89, -89, 123, 22, -19,
+++ -8},
+++ 7,
+++ 15,
+++ IMM_SBYTE_ORDERED_LEAST_NEGATIVE,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPESTRZ_UWORD_DATA_LEN 2
+++static test_mm_cmpestri_uword_data_t
+++ test_mm_cmpestrz_uword_data[TEST_MM_CMPESTRZ_UWORD_DATA_LEN] = {
+++ {{1},
+++ {9000, 33333, 63333, 120, 8, 55, 34, 100},
+++ 100,
+++ 7,
+++ IMM_UWORD_ANY_LEAST_NEGATIVE,
+++ 1},
+++ {{1, 2, 3},
+++ {1, 10000, 65535, 8964, 9487, 32, 451, 666},
+++ 3,
+++ 8,
+++ IMM_UWORD_RANGES_MOST_NEGATIVE,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPESTRZ_SWORD_DATA_LEN 2
+++static test_mm_cmpestri_sword_data_t
+++ test_mm_cmpestrz_sword_data[TEST_MM_CMPESTRZ_SWORD_DATA_LEN] = {
+++ {{30000, 28997, 11111, 4455},
+++ {30, 40, 500, 6000, 20, -10, -789, -29999},
+++ 4,
+++ 8,
+++ IMM_SWORD_ORDERED_LEAST_MASKED_NEGATIVE,
+++ 0},
+++ {{789, 1024, 2048, 4096, 8192},
+++ {-3, 9, -27, 18, -217, 10111, 22222},
+++ 5,
+++ 7,
+++ IMM_SWORD_EACH_LEAST_MASKED_NEGATIVE,
+++ 1},
+++};
+++
+++#define MM_CMPESTRZ_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_MOST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_MOST, __VA_ARGS__)
+++
+++#define MM_CMPESTRZ_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPESTRZ_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_RANGES_MOST_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPESTRZ_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_ANY_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPESTRZ_TEST_CASES \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpestrz, CMPESTRZ, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpestrz, CMPESTRZ, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(UWORD, uword, uint16_t, cmpestrz, CMPESTRZ, \
+++ IS_CMPESTRI) \
+++ ENUM_MM_CMPESTRX_TEST_CASES(SWORD, sword, int16_t, cmpestrz, CMPESTRZ, \
+++ IS_CMPESTRI)
+++
+++result_t test_mm_cmpestrz(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPESTRZ_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#undef IS_CMPESTRI
+++
+++result_t test_mm_cmpgt_epi64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int64_t *_a = (const int64_t *) impl.mTestIntPointer1;
+++ const int64_t *_b = (const int64_t *) impl.mTestIntPointer2;
+++
+++ int64_t result[2];
+++ result[0] = _a[0] > _b[0] ? -1 : 0;
+++ result[1] = _a[1] > _b[1] ? -1 : 0;
+++
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ __m128i iret = _mm_cmpgt_epi64(a, b);
+++
+++ return validateInt64(iret, result[0], result[1]);
+++}
+++
+++#define IS_CMPISTRI 1
+++
+++#define DEF_ENUM_MM_CMPISTRX_VARIANT(c, ...) c,
+++
+++#define EVAL_MM_CMPISTRX_TEST_CASE(c, type, data_type, im, IM) \
+++ do { \
+++ data_type *a = test_mm_##im##_##type##_data[c].a, \
+++ *b = test_mm_##im##_##type##_data[c].b; \
+++ const int imm8 = IMM_##c; \
+++ IIF(IM) \
+++ (int expect = test_mm_##im##_##type##_data[c].expect, \
+++ data_type *expect = test_mm_##im##_##type##_data[c].expect); \
+++ __m128i ma, mb; \
+++ memcpy(&ma, a, sizeof(ma)); \
+++ memcpy(&mb, b, sizeof(mb)); \
+++ IIF(IM) \
+++ (int res = _mm_##im(ma, mb, imm8), \
+++ __m128i res = _mm_##im(ma, mb, imm8)); \
+++ if (IIF(IM)(res != expect, memcmp(expect, &res, sizeof(__m128i)))) \
+++ return TEST_FAIL; \
+++ } while (0);
+++
+++#define ENUM_MM_CMPISTRX_TEST_CASES(type, type_lower, data_type, func, FUNC, \
+++ IM) \
+++ enum { MM_##FUNC##_##type##_TEST_CASES(DEF_ENUM_MM_CMPISTRX_VARIANT) }; \
+++ MM_##FUNC##_##type##_TEST_CASES(EVAL_MM_CMPISTRX_TEST_CASE, type_lower, \
+++ data_type, func, IM)
+++
+++typedef struct {
+++ uint8_t a[16], b[16];
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpistri_ubyte_data_t;
+++typedef struct {
+++ int8_t a[16], b[16];
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpistri_sbyte_data_t;
+++typedef struct {
+++ uint16_t a[8], b[8];
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpistri_uword_data_t;
+++typedef struct {
+++ int16_t a[8], b[8];
+++ const int imm8;
+++ int expect;
+++} test_mm_cmpistri_sword_data_t;
+++
+++#define TEST_MM_CMPISTRA_UBYTE_DATA_LEN 4
+++static test_mm_cmpistri_ubyte_data_t
+++ test_mm_cmpistra_ubyte_data[TEST_MM_CMPISTRA_UBYTE_DATA_LEN] = {
+++ {{10, 11, 12, 13, 14, 15, 16, 17, 18, 9, 20, 98, 97, 96, 95, 127},
+++ {1, 2, 3, 4, 5, 6, 7, 8, 99, 100, 101, 102, 103, 104, 105, 106},
+++ IMM_UBYTE_ANY_LEAST,
+++ 1},
+++ {{1, 22, 33, 44, 5, 66, 7, 88, 9, 10, 111, 0},
+++ {2, 23, 34, 21, 6, 65, 8, 84, 99, 100, 11, 112, 123, 14, 15, 6},
+++ IMM_UBYTE_EACH_LEAST,
+++ 1},
+++ {{5, 15, 25, 35, 45, 55, 65, 75, 0},
+++ {4, 6, 14, 16, 24, 26, 34, 36, 44, 46, 54, 56, 74, 76},
+++ IMM_UBYTE_RANGES_LEAST,
+++ 0},
+++ {{4, 14, 64, 84, 0},
+++ {4, 14, 64, 84, 0, 4, 14, 65, 84, 0, 4, 14, 64, 84, 0, 1},
+++ IMM_UBYTE_ORDERED_MOST_NEGATIVE,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPISTRA_SBYTE_DATA_LEN 4
+++static test_mm_cmpistri_sbyte_data_t
+++ test_mm_cmpistra_sbyte_data[TEST_MM_CMPISTRA_SBYTE_DATA_LEN] = {
+++ {{-11, -13, -43, -50, 66, 77, 87, 98, -128, 127, 126, 99, 1, 2, 3, -5},
+++ {-12, -13, -43, -56, 66, 78, 88, 98, -125, 127, 120, 9, 100, 22, 54,
+++ -10},
+++ IMM_SBYTE_EACH_LEAST,
+++ 0},
+++ {{10, 11, 100, -90, 0},
+++ {8, 9, 10, 11, 0, 8, 9, 10, -90, 0},
+++ IMM_SBYTE_ANY_LEAST_NEGATIVE,
+++ 0},
+++ {{-90, -60, -34, -25, 34, 56, 70, 79, 0},
+++ {-100, -59, -35, -24, -101, 33, 57, 69, 80, 81, -128, 100, 101, 102,
+++ -101, -102},
+++ IMM_SBYTE_RANGES_LEAST,
+++ 1},
+++ {{1, 1, 1, 1, -1, -1, -1, -1, -10, 10, -10, 10, 44, -44, 44, -44},
+++ {1, 1, -1, 1, -1, -1, -1, -1, -10, 10, -10, 10, 44, -44, 44, -44},
+++ IMM_SBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRA_UWORD_DATA_LEN 4
+++static test_mm_cmpistri_uword_data_t
+++ test_mm_cmpistra_uword_data[TEST_MM_CMPISTRA_UWORD_DATA_LEN] = {
+++ {{88, 888, 8888, 31888, 10888, 18088, 10880, 28888},
+++ {888, 88, 8888, 32000, 10888, 18000, 10888, 28888},
+++ IMM_UWORD_EACH_LEAST_NEGATIVE,
+++ 0},
+++ {{3, 4, 555, 6666, 7777, 888, 9, 100},
+++ {1, 2, 333, 4444, 5555, 666, 7, 8},
+++ IMM_UWORD_ANY_LEAST,
+++ 1},
+++ {{1000, 2000, 2002, 3000, 3002, 4000, 5000, 5999},
+++ {999, 2001, 3001, 4001, 4002, 4999, 6000, 6001},
+++ IMM_UWORD_RANGES_LEAST,
+++ 1},
+++ {{55, 66, 77, 888, 0},
+++ {55, 66, 77, 888, 0, 33, 2, 10000},
+++ IMM_UWORD_ORDERED_LEAST,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPISTRA_SWORD_DATA_LEN 4
+++static test_mm_cmpistri_sword_data_t
+++ test_mm_cmpistra_sword_data[TEST_MM_CMPISTRA_SWORD_DATA_LEN] = {
+++ {{-32000, -28000, 0},
+++ {-32001, -29999, -28001, -28000, -27999, -26000, -32768},
+++ IMM_SWORD_RANGES_LEAST_MASKED_NEGATIVE,
+++ 0},
+++ {{-12, -11, -10, -9, -8, -7, 90, 1000},
+++ {-13, -10, 9, -8, -7, 1000, 1000, 90},
+++ IMM_SWORD_EACH_LEAST,
+++ 1},
+++ {{33, 44, 787, 23, 0},
+++ {32, 43, 788, 0, 32, 0, 43, 0},
+++ IMM_SWORD_ANY_LEAST,
+++ 0},
+++ {{18, 78, 999, -56, 0},
+++ {18, 78, 999, 56, 18, 78, 999, 4},
+++ IMM_SWORD_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define MM_CMPISTRA_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_MOST_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPISTRA_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRA_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRA_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPISTRA_TEST_CASES \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistra, CMPISTRA, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistra, CMPISTRA, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistra, CMPISTRA, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistra, CMPISTRA, \
+++ IS_CMPISTRI)
+++
+++result_t test_mm_cmpistra(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPISTRA_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPISTRC_UBYTE_DATA_LEN 4
+++static test_mm_cmpistri_ubyte_data_t
+++ test_mm_cmpistrc_ubyte_data[TEST_MM_CMPISTRC_UBYTE_DATA_LEN] = {
+++ {{89, 64, 88, 23, 11, 109, 34, 55, 0},
+++ {2, 64, 87, 32, 1, 110, 43, 66, 0},
+++ IMM_UBYTE_ANY_LEAST,
+++ 1},
+++ {{99, 67, 2, 127, 125, 3, 24, 77, 32, 68, 96, 74, 70, 110, 111, 5},
+++ {98, 88, 67, 125, 111, 4, 56, 88, 33, 69, 99, 79, 123, 11, 10, 6},
+++ IMM_UBYTE_EACH_LEAST,
+++ 0},
+++ {{2, 3, 74, 78, 81, 83, 85, 87, 89, 90, 0},
+++ {86, 90, 74, 85, 87, 81, 2, 3, 3, 3, 75, 76, 77, 78, 82, 85},
+++ IMM_UBYTE_RANGES_MOST_NEGATIVE,
+++ 0},
+++ {{45, 67, 8, 9, 0},
+++ {67, 45, 67, 8, 9, 45, 67, 8, 9, 45, 67, 8, 9, 45, 67, 8},
+++ IMM_UBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRC_SBYTE_DATA_LEN 4
+++static test_mm_cmpistri_sbyte_data_t
+++ test_mm_cmpistrc_sbyte_data[TEST_MM_CMPISTRC_SBYTE_DATA_LEN] = {
+++ {{35, -35, 67, -66, 34, 55, 12, -100, 34, -34, 66, -67, 52, 100, 127,
+++ -128},
+++ {35, -35, 67, -66, 0, 55, 12, -100, 0, -34, 66, -67, 0, 100, 127,
+++ -128},
+++ IMM_SBYTE_EACH_MOST_MASKED_NEGATIVE,
+++ 0},
+++ {{-119, 112, 105, 104, 0},
+++ {119, -112, 105, -104, 104, -34, 112, -119, 0},
+++ IMM_SBYTE_ANY_LEAST,
+++ 1},
+++ {{-79, -69, -40, -35, 34, 45, 67, 88, 0},
+++ {1, 2, 3, 4, 5, 6, 7, 8, 0},
+++ IMM_SBYTE_RANGES_LEAST,
+++ 0},
+++ {{22, -109, 123, 115, -12, 0},
+++ {22, -109, 12, 115, 22, -109, 123, 115, -12, 0},
+++ IMM_SBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRC_UWORD_DATA_LEN 4
+++static test_mm_cmpistri_uword_data_t
+++ test_mm_cmpistrc_uword_data[TEST_MM_CMPISTRC_UWORD_DATA_LEN] = {
+++ {{23, 45, 67, 89, 102, 121, 23, 45},
+++ {23, 45, 67, 89, 102, 121, 23, 44},
+++ IMM_UWORD_EACH_LEAST,
+++ 1},
+++ {{1, 11, 55, 75}, {13, 14, 56, 77, 0}, IMM_UWORD_ANY_LEAST, 0},
+++ {{1, 9, 11, 19, 21, 29, 91, 99},
+++ {10, 29, 30, 40, 50, 60, 70, 80},
+++ IMM_UWORD_RANGES_LEAST,
+++ 1},
+++ {{3, 4, 5, 0},
+++ {0, 3, 4, 5, 3, 4, 5, 0},
+++ IMM_UWORD_ORDERED_LEAST_MASKED_NEGATIVE,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPISTRC_SWORD_DATA_LEN 4
+++static test_mm_cmpistri_sword_data_t
+++ test_mm_cmpistrc_sword_data[TEST_MM_CMPISTRC_SWORD_DATA_LEN] = {
+++ {{-78, -56, 1000, 1002},
+++ {-79, -55, -12, -13, 999, 1003, -80, 10000},
+++ IMM_SWORD_RANGES_LEAST,
+++ 0},
+++ {{45, 32767, -30000, 2345, -23450, 0},
+++ {45, 32767, -30000, 2346, -23456, 0, 45, 333},
+++ IMM_SWORD_EACH_LEAST,
+++ 1},
+++ {{-10000, -20000, -30000, 10000, 20000, 30000, 0},
+++ {10000, 20000, 30000, -10000, -20000, 20000, -30000, 12},
+++ IMM_SWORD_ANY_MOST_NEGATIVE,
+++ 1},
+++ {{1, 2, -3, -55, -666, -7777, 8888},
+++ {2, -3, -55, -666, -7777, 8888, 1},
+++ IMM_SWORD_ORDERED_LEAST,
+++ 0},
+++};
+++
+++#define MM_CMPISTRC_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRC_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRC_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(UWORD_ORDERED_LEAST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPISTRC_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(SWORD_ANY_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPISTRC_TEST_CASES \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrc, CMPISTRC, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrc, CMPISTRC, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrc, CMPISTRC, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrc, CMPISTRC, \
+++ IS_CMPISTRI)
+++
+++result_t test_mm_cmpistrc(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPISTRC_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPISTRI_UBYTE_DATA_LEN 4
+++static test_mm_cmpistri_ubyte_data_t
+++ test_mm_cmpistri_ubyte_data[TEST_MM_CMPISTRI_UBYTE_DATA_LEN] = {
+++ {{104, 117, 110, 116, 114, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+++ {33, 64, 35, 36, 37, 94, 38, 42, 40, 41, 91, 93, 58, 59, 60, 62},
+++ IMM_UBYTE_ANY_LEAST,
+++ 16},
+++ {{4, 5, 6, 7, 8, 111, 34, 21, 0, 0, 0, 0, 0, 0, 0, 0},
+++ {5, 6, 7, 8, 8, 111, 43, 12, 0, 0, 0, 0, 0, 0, 0, 0},
+++ IMM_UBYTE_EACH_MOST_MASKED_NEGATIVE,
+++ 15},
+++ {{65, 90, 97, 122, 48, 57, 0},
+++ {47, 46, 43, 44, 42, 43, 45, 41, 40, 123, 124, 125, 126, 127, 1, 2},
+++ IMM_UBYTE_RANGES_LEAST,
+++ 16},
+++ {{111, 222, 22, 0},
+++ {33, 44, 55, 66, 77, 88, 99, 111, 222, 22, 11, 0},
+++ IMM_UBYTE_ORDERED_LEAST,
+++ 7},
+++};
+++
+++#define TEST_MM_CMPISTRI_SBYTE_DATA_LEN 4
+++static test_mm_cmpistri_sbyte_data_t
+++ test_mm_cmpistri_sbyte_data[TEST_MM_CMPISTRI_SBYTE_DATA_LEN] = {
+++ {{1, 2, 3, 4, 5, -99, -128, -100, -1, 49, 0},
+++ {2, 3, 3, 4, 5, -100, -128, -99, 1, 44, 0},
+++ IMM_SBYTE_EACH_LEAST,
+++ 2},
+++ {{99, 100, 23, -90, 0},
+++ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 99, 100, 23, -90, -90, 100},
+++ IMM_SBYTE_ANY_LEAST,
+++ 10},
+++ {{-10, -2, 89, 97, 0},
+++ {-11, -12, -3, 1, 97, 0},
+++ IMM_SBYTE_RANGES_LEAST_NEGATIVE,
+++ 0},
+++ {{-10, -90, -22, 30, 87, 127, 0}, {0}, IMM_SBYTE_ORDERED_LEAST, 16},
+++};
+++
+++#define TEST_MM_CMPISTRI_UWORD_DATA_LEN 4
+++static test_mm_cmpistri_uword_data_t
+++ test_mm_cmpistri_uword_data[TEST_MM_CMPISTRI_UWORD_DATA_LEN] = {
+++ {{38767, 99, 1234, 65535, 2222, 1, 34456, 11},
+++ {38768, 999, 1235, 4444, 2222, 1, 34456, 12},
+++ IMM_UWORD_EACH_LEAST,
+++ 4},
+++ {{22222, 33333, 44444, 55555, 6000, 600, 60, 6},
+++ {0},
+++ IMM_UWORD_ANY_LEAST,
+++ 8},
+++ {{34, 777, 1000, 1004, 0},
+++ {33, 32, 889, 1003, 0},
+++ IMM_UWORD_RANGES_LEAST,
+++ 3},
+++ {{44, 555, 44, 0},
+++ {44, 555, 44, 555, 44, 555, 44, 0},
+++ IMM_UWORD_ORDERED_MOST_NEGATIVE,
+++ 7},
+++};
+++
+++#define TEST_MM_CMPISTRI_SWORD_DATA_LEN 4
+++static test_mm_cmpistri_sword_data_t
+++ test_mm_cmpistri_sword_data[TEST_MM_CMPISTRI_SWORD_DATA_LEN] = {
+++ {{-1, -5, 10, 30, 40, 0},
+++ {13, -2, 7, 80, 11, 0},
+++ IMM_SWORD_RANGES_LEAST,
+++ 0},
+++ {{-12, 12, 6666, 777, 0},
+++ {11, 12, 6666, 777, 0},
+++ IMM_SWORD_EACH_LEAST,
+++ 1},
+++ {{23, 22, 33, 567, 9999, 12345, 0},
+++ {23, 22, 23, 22, 23, 22, 23, 12222},
+++ IMM_SWORD_ANY_MOST,
+++ 6},
+++ {{12, -234, -567, 8888, 0},
+++ {13, -234, -567, 8888, 12, -234, -567, 8889},
+++ IMM_SWORD_ORDERED_LEAST,
+++ 8},
+++};
+++
+++#define MM_CMPISTRI_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(UBYTE_EACH_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRI_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(SBYTE_RANGES_LEAST_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRI_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(UWORD_ORDERED_MOST_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPISTRI_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(SWORD_ANY_MOST, __VA_ARGS__) \
+++ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPISTRI_TEST_CASES \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistri, CMPISTRI, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistri, CMPISTRI, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistri, CMPISTRI, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistri, CMPISTRI, \
+++ IS_CMPISTRI)
+++
+++result_t test_mm_cmpistri(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPISTRI_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define IS_CMPISTRM 0
+++
+++typedef struct {
+++ uint8_t a[16], b[16];
+++ const int imm8;
+++ uint8_t expect[16];
+++} test_mm_cmpistrm_ubyte_data_t;
+++typedef struct {
+++ int8_t a[16], b[16];
+++ const int imm8;
+++ int8_t expect[16];
+++} test_mm_cmpistrm_sbyte_data_t;
+++typedef struct {
+++ uint16_t a[8], b[8];
+++ const int imm8;
+++ uint16_t expect[8];
+++} test_mm_cmpistrm_uword_data_t;
+++typedef struct {
+++ int16_t a[8], b[8];
+++ const int imm8;
+++ int16_t expect[8];
+++} test_mm_cmpistrm_sword_data_t;
+++
+++#define TEST_MM_CMPISTRM_UBYTE_DATA_LEN 4
+++static test_mm_cmpistrm_ubyte_data_t
+++ test_mm_cmpistrm_ubyte_data[TEST_MM_CMPISTRM_UBYTE_DATA_LEN] = {
+++ {{88, 89, 90, 91, 92, 93, 0},
+++ {78, 88, 99, 127, 92, 93, 0},
+++ IMM_UBYTE_EACH_UNIT,
+++ {0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+++ 255}},
+++ {{30, 41, 52, 63, 74, 85, 0},
+++ {30, 42, 51, 63, 74, 85, 0},
+++ IMM_UBYTE_ANY_BIT,
+++ {57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+++ {{34, 32, 21, 16, 7, 0},
+++ {34, 33, 32, 31, 30, 29, 10, 6, 0},
+++ IMM_UBYTE_RANGES_UNIT,
+++ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+++ {{33, 21, 123, 89, 76, 56, 0},
+++ {33, 21, 124, 33, 21, 123, 89, 76, 56, 33, 21, 123, 89, 76, 56, 22},
+++ IMM_UBYTE_ORDERED_UNIT,
+++ {0, 0, 0, 255, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0}},
+++};
+++
+++#define TEST_MM_CMPISTRM_SBYTE_DATA_LEN 4
+++static test_mm_cmpistrm_sbyte_data_t
+++ test_mm_cmpistrm_sbyte_data[TEST_MM_CMPISTRM_SBYTE_DATA_LEN] = {
+++ {{-11, -90, -128, 127, 66, 45, 23, 32, 99, 10, 0},
+++ {-10, -90, -124, 33, 66, 45, 23, 22, 99, 100, 0},
+++ IMM_SBYTE_EACH_BIT_MASKED_NEGATIVE,
+++ {-115, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+++ {{13, 14, 55, 1, 32, 100, 101, 102, 103, 97, 23, 21, 45, 54, 55, 56},
+++ {22, 109, 87, 45, 1, 103, 22, 102, 43, 87, 78, 56, 65, 55, 44, 33},
+++ IMM_SBYTE_ANY_UNIT,
+++ {0, 0, 0, -1, -1, -1, 0, -1, 0, 0, 0, -1, 0, -1, 0, 0}},
+++ {{-31, -28, -9, 10, 45, 67, 88, 0},
+++ {-30, -32, -33, -44, 93, 44, 9, 89, 0},
+++ IMM_SBYTE_RANGES_UNIT,
+++ {-1, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+++ {{34, -10, 78, -99, -100, 100, 0},
+++ {34, 123, 88, 4, 34, -10, 78, -99, -100, 100, 34, -10, 78, -99, -100,
+++ -100},
+++ IMM_SBYTE_ORDERED_UNIT,
+++ {0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+++};
+++
+++#define TEST_MM_CMPISTRM_UWORD_DATA_LEN 4
+++static test_mm_cmpistrm_uword_data_t
+++ test_mm_cmpistrm_uword_data[TEST_MM_CMPISTRM_UWORD_DATA_LEN] = {
+++ {{1024, 2048, 4096, 5000, 0},
+++ {1023, 1000, 2047, 1596, 5566, 5666, 4477, 9487},
+++ IMM_UWORD_RANGES_UNIT,
+++ {0, 0, 65535, 65535, 0, 0, 65535, 0}},
+++ {{1, 2, 345, 7788, 10000, 0},
+++ {2, 1, 345, 7788, 10000, 0},
+++ IMM_UWORD_EACH_UNIT,
+++ {0, 0, 65535, 65535, 65535, 65535, 65535, 65535}},
+++ {{100, 0},
+++ {12345, 6766, 234, 0, 1, 34, 89, 100},
+++ IMM_UWORD_ANY_UNIT,
+++ {0, 0, 0, 0, 0, 0, 0, 0}},
+++ {{34, 122, 9000, 0},
+++ {34, 122, 9000, 34, 122, 9000, 34, 122},
+++ IMM_UWORD_ORDERED_UNIT_NEGATIVE,
+++ {0, 65535, 65535, 0, 65535, 65535, 0, 65535}},
+++};
+++
+++#define TEST_MM_CMPISTRM_SWORD_DATA_LEN 4
+++static test_mm_cmpistrm_sword_data_t
+++ test_mm_cmpistrm_sword_data[TEST_MM_CMPISTRM_SWORD_DATA_LEN] = {
+++ {{-39, -10, 17, 89, 998, 1000, 1234, 4566},
+++ {-40, -52, -39, -29, 100, 1024, 4565, 4600},
+++ IMM_SWORD_RANGES_BIT,
+++ {0, 0, -1, -1, 0, 0, -1, 0}},
+++ {{345, -1900, -10000, -30000, 50, 6789, 0},
+++ {103, -1901, -10000, 32767, 50, 6780, 0},
+++ IMM_SWORD_EACH_UNIT,
+++ {0, 0, -1, 0, -1, 0, -1, -1}},
+++ {{677, 10001, 1001, 23, 0},
+++ {345, 677, 10001, 1003, 1001, 32, 23, 677},
+++ IMM_SWORD_ANY_UNIT,
+++ {0, -1, -1, 0, -1, 0, -1, -1}},
+++ {{1024, -2288, 3752, -4096, 0},
+++ {1024, 1024, -2288, 3752, -4096, 1024, -2288, 3752},
+++ IMM_SWORD_ORDERED_UNIT,
+++ {0, -1, 0, 0, 0, -1, 0, 0}},
+++};
+++
+++#define MM_CMPISTRM_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_EACH_UNIT, __VA_ARGS__) \
+++ _(UBYTE_ANY_BIT, __VA_ARGS__) \
+++ _(UBYTE_RANGES_UNIT, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_UNIT, __VA_ARGS__)
+++
+++#define MM_CMPISTRM_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_BIT_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ANY_UNIT, __VA_ARGS__) \
+++ _(SBYTE_RANGES_UNIT, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_UNIT, __VA_ARGS__)
+++
+++#define MM_CMPISTRM_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_RANGES_UNIT, __VA_ARGS__) \
+++ _(UWORD_EACH_UNIT, __VA_ARGS__) \
+++ _(UWORD_ANY_UNIT, __VA_ARGS__) \
+++ _(UWORD_ORDERED_UNIT_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPISTRM_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_UNIT, __VA_ARGS__) \
+++ _(SWORD_EACH_UNIT, __VA_ARGS__) \
+++ _(SWORD_ANY_UNIT, __VA_ARGS__) \
+++ _(SWORD_ORDERED_UNIT, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPISTRM_TEST_CASES \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrm, CMPISTRM, \
+++ IS_CMPISTRM) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrm, CMPISTRM, \
+++ IS_CMPISTRM) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrm, CMPISTRM, \
+++ IS_CMPISTRM) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrm, CMPISTRM, \
+++ IS_CMPISTRM)
+++
+++result_t test_mm_cmpistrm(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPISTRM_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#undef IS_CMPISTRM
+++
+++#define TEST_MM_CMPISTRO_UBYTE_DATA_LEN 4
+++static test_mm_cmpistri_ubyte_data_t
+++ test_mm_cmpistro_ubyte_data[TEST_MM_CMPISTRO_UBYTE_DATA_LEN] = {
+++ {{3, 4, 5, 0}, {5, 5, 5, 4, 3, 0}, IMM_UBYTE_ANY_LEAST, 1},
+++ {{23, 127, 88, 3, 45, 6, 7, 2, 0},
+++ {32, 127, 87, 2, 44, 32, 1, 2, 0},
+++ IMM_UBYTE_EACH_MOST_NEGATIVE,
+++ 1},
+++ {{3, 4, 55, 56, 0},
+++ {2, 3, 4, 5, 43, 54, 55, 56, 0},
+++ IMM_UBYTE_RANGES_LEAST,
+++ 0},
+++ {{55, 66, 77, 11, 23, 0},
+++ {55, 55, 66, 77, 11, 23, 55, 66, 77, 11, 23, 33, 123, 18, 0},
+++ IMM_UBYTE_ORDERED_LEAST,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPISTRO_SBYTE_DATA_LEN 4
+++static test_mm_cmpistri_sbyte_data_t
+++ test_mm_cmpistro_sbyte_data[TEST_MM_CMPISTRO_SBYTE_DATA_LEN] = {
+++ {{33, -33, 23, -32, -1, -1, 23, 46, 78, 34, 54, 100, 90, 91, 92, 101},
+++ {32, 33, 23, -33, -2, -3, 23, 46, -78, 43, 56, 10, 9, 91, 90, 126},
+++ IMM_SBYTE_EACH_LEAST,
+++ 0},
+++ {{-1, -2, -3, -4, -5, -6, -7, -8, 87, 86, 85, 84, 83, 82, 81, 80},
+++ {87, 79, 0},
+++ IMM_SBYTE_ANY_LEAST,
+++ 1},
+++ {{3, 4, 2, 0},
+++ {3, 3, 4, 5, 6, 2, 0},
+++ IMM_SBYTE_RANGES_MOST_NEGATIVE,
+++ 0},
+++ {{23, 66, 1, 13, 17, 1, 13, 17, 0},
+++ {23, 66, 1, 13, 17, 1, 13, 17, 32, 23, 66, 1, 13, 17, 1, 13},
+++ IMM_SBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRO_UWORD_DATA_LEN 4
+++static test_mm_cmpistri_uword_data_t
+++ test_mm_cmpistro_uword_data[TEST_MM_CMPISTRO_UWORD_DATA_LEN] = {
+++ {{3333, 4444, 10000, 20000, 40000, 50000, 65535, 0},
+++ {3332, 4443, 10000, 20001, 40000, 50000, 65534, 0},
+++ IMM_UWORD_EACH_LEAST,
+++ 0},
+++ {{1, 2, 333, 4444, 55555, 7777, 23, 347},
+++ {4444, 7777, 55555, 23, 347, 2, 1, 0},
+++ IMM_UWORD_ANY_LEAST,
+++ 1},
+++ {{356, 380, 320, 456, 0},
+++ {455, 379, 333, 319, 300, 299, 0},
+++ IMM_UWORD_RANGES_LEAST,
+++ 1},
+++ {{3, 1001, 235, 0},
+++ {3, 1001, 235, 0, 3, 1001, 235, 0},
+++ IMM_UWORD_ORDERED_MOST_MASKED_NEGATIVE,
+++ 0},
+++};
+++
+++#define TEST_MM_CMPISTRO_SWORD_DATA_LEN 4
+++static test_mm_cmpistri_sword_data_t
+++ test_mm_cmpistro_sword_data[TEST_MM_CMPISTRO_SWORD_DATA_LEN] = {
+++ {{-10, -5, -100, -90, 45, 56, 1000, 1009},
+++ {54, -1, -5, -6, 1001, 10001, 1009, 1009},
+++ IMM_SWORD_RANGES_LEAST,
+++ 1},
+++ {{456, -32768, 32767, 13, 0},
+++ {455, -32768, 32767, 31, 0},
+++ IMM_SWORD_EACH_LEAST,
+++ 0},
+++ {{23, 46, -44, 32000, 0},
+++ {23, 66, -44, 678, 32000, 0},
+++ IMM_SWORD_ANY_MOST_MASKED_NEGATIVE,
+++ 0},
+++ {{-7900, -101, -34, 666, 345, 0},
+++ {-7900, -101, -34, 666, 345, -7900, -191, -34},
+++ IMM_SWORD_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define MM_CMPISTRO_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(UBYTE_EACH_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRO_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(SBYTE_RANGES_MOST_NEGATIVE, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRO_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(UWORD_ORDERED_MOST_MASKED_NEGATIVE, __VA_ARGS__)
+++
+++#define MM_CMPISTRO_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(SWORD_ANY_MOST_MASKED_NEGATIVE, __VA_ARGS__) \
+++ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPISTRO_TEST_CASES \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistro, CMPISTRO, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistro, CMPISTRO, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistro, CMPISTRO, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistro, CMPISTRO, \
+++ IS_CMPISTRI)
+++
+++result_t test_mm_cmpistro(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPISTRO_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPISTRS_UBYTE_DATA_LEN 4
+++static test_mm_cmpistri_ubyte_data_t
+++ test_mm_cmpistrs_ubyte_data[TEST_MM_CMPISTRS_UBYTE_DATA_LEN] = {
+++ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+++ {1, 2, 3, 4, 5, 0},
+++ IMM_UBYTE_ANY_LEAST,
+++ 0},
+++ {{127, 126, 125, 124, 0},
+++ {127, 1, 34, 43, 54, 0},
+++ IMM_UBYTE_EACH_LEAST,
+++ 1},
+++ {{127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127,
+++ 127, 127},
+++ {56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 0},
+++ IMM_UBYTE_RANGES_LEAST,
+++ 0},
+++ {{33, 44, 55, 78, 99, 100, 101, 102, 0},
+++ {0},
+++ IMM_UBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRS_SBYTE_DATA_LEN 4
+++static test_mm_cmpistri_sbyte_data_t
+++ test_mm_cmpistrs_sbyte_data[TEST_MM_CMPISTRS_SBYTE_DATA_LEN] = {
+++ {{100, 99, 98, 97, -67, -4, -5, -6, -7, -1, -2, -3, -128, -128, -128,
+++ -128},
+++ {0},
+++ IMM_SBYTE_EACH_LEAST,
+++ 0},
+++ {{-128, -128, -128, -128, 127, 127, 127, 127, -128, -128, -128, -128,
+++ 127, 127, 127, 127},
+++ {-1, -2, -11, -98, -12, 0},
+++ IMM_SBYTE_ANY_LEAST,
+++ 0},
+++ {{0, 1, 2, 3, 4, 5, -6, -7},
+++ {0, 1, 2, 3, 4, 5, 6, 7},
+++ IMM_SBYTE_RANGES_LEAST,
+++ 1},
+++ {{0, 1, 0, -1, 0, -2, 0, 0, -3, 4, 0, 0, 5, 6, 7, 8},
+++ {0},
+++ IMM_SBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRS_UWORD_DATA_LEN 4
+++static test_mm_cmpistri_uword_data_t
+++ test_mm_cmpistrs_uword_data[TEST_MM_CMPISTRS_UWORD_DATA_LEN] = {
+++ {{0, 1, 2, 3, 65535, 0, 0, 0},
+++ {9, 8, 7, 6, 5, 4, 3, 2},
+++ IMM_UWORD_EACH_LEAST,
+++ 1},
+++ {{4, 567, 65535, 32, 34, 0}, {0}, IMM_UWORD_ANY_LEAST, 1},
+++ {{65535, 65535, 65535, 65535, 65535, 65535, 65535, 65535},
+++ {1, 2, 3, 4, 900, 7890, 6767, 0},
+++ IMM_UWORD_RANGES_LEAST,
+++ 0},
+++ {{1, 2, 3, 4, 5, 6, 7, 8}, {1, 2, 3, 4, 0}, IMM_UWORD_ORDERED_LEAST, 0},
+++};
+++
+++#define TEST_MM_CMPISTRS_SWORD_DATA_LEN 4
+++static test_mm_cmpistri_sword_data_t
+++ test_mm_cmpistrs_sword_data[TEST_MM_CMPISTRS_SWORD_DATA_LEN] = {
+++ {{-32768, -32768, -32768, -32768, -32768, -32768, -32768, -3276},
+++ {34, 45, 6, 7, 9, 8, 7, 6},
+++ IMM_SWORD_RANGES_LEAST,
+++ 0},
+++ {{1000, 2000, 4000, 8000, 16000, 32000, 32767, 0},
+++ {3, 4, 56, 23, 0},
+++ IMM_SWORD_EACH_LEAST,
+++ 1},
+++ {{0, 1, 3, 4, -32768, 9, 0, 1},
+++ {56, 47, 43, 999, 1111, 0},
+++ IMM_SWORD_ANY_LEAST,
+++ 1},
+++ {{1111, 1212, 831, 2345, 32767, 32767, -32768, 32767},
+++ {0},
+++ IMM_SWORD_ORDERED_LEAST,
+++ 0},
+++};
+++
+++#define MM_CMPISTRS_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRS_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRS_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRS_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPISTRS_TEST_CASES \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrs, CMPISTRS, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrs, CMPISTRS, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrs, CMPISTRS, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrs, CMPISTRS, \
+++ IS_CMPISTRI)
+++
+++result_t test_mm_cmpistrs(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPISTRS_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++#define TEST_MM_CMPISTRZ_UBYTE_DATA_LEN 4
+++static test_mm_cmpistri_ubyte_data_t
+++ test_mm_cmpistrz_ubyte_data[TEST_MM_CMPISTRZ_UBYTE_DATA_LEN] = {
+++ {{0},
+++ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+++ 255, 255},
+++ IMM_UBYTE_ANY_LEAST,
+++ 0},
+++ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+++ {1, 1, 1, 1, 2, 2, 2, 2, 4, 5, 6, 7, 89, 89, 89, 89},
+++ IMM_UBYTE_EACH_LEAST,
+++ 0},
+++ {{1, 2, 3, 4, 0}, {}, IMM_UBYTE_RANGES_LEAST, 1},
+++ {{127, 126, 125, 124, 124, 0},
+++ {100, 101, 123, 100, 111, 122, 0},
+++ IMM_UBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRZ_SBYTE_DATA_LEN 4
+++static test_mm_cmpistri_sbyte_data_t
+++ test_mm_cmpistrz_sbyte_data[TEST_MM_CMPISTRZ_SBYTE_DATA_LEN] = {
+++ {{127, 126, 99, -100, 0},
+++ {-128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128,
+++ -128, -128, -128, -128, -128},
+++ IMM_SBYTE_EACH_LEAST,
+++ 0},
+++ {{120, 66, 54, 0}, {3, 4, 5, -99, -6, 0}, IMM_SBYTE_ANY_LEAST, 1},
+++ {{0},
+++ {127, 127, 127, 127, 126, 126, 126, 126, -127, -127, -127, -127, -1,
+++ -1, -1, -1},
+++ IMM_SBYTE_RANGES_LEAST,
+++ 0},
+++ {{12, 3, 4, 5, 6, 7, 8, 0},
+++ {-1, -2, -3, -4, -6, 75, 0},
+++ IMM_SBYTE_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRZ_UWORD_DATA_LEN 4
+++static test_mm_cmpistri_uword_data_t
+++ test_mm_cmpistrz_uword_data[TEST_MM_CMPISTRZ_UWORD_DATA_LEN] = {
+++ {{10000, 20000, 50000, 40000, 0},
+++ {65535, 65533, 60000, 60000, 50000, 123, 1, 2},
+++ IMM_UWORD_EACH_LEAST,
+++ 0},
+++ {{0},
+++ {65528, 65529, 65530, 65531, 65532, 65533, 65534, 65535},
+++ IMM_UWORD_ANY_LEAST,
+++ 0},
+++ {{3, 333, 3333, 33333, 0}, {0}, IMM_UWORD_RANGES_LEAST, 1},
+++ {{123, 456, 7, 890, 0},
+++ {123, 456, 7, 900, 0},
+++ IMM_UWORD_ORDERED_LEAST,
+++ 1},
+++};
+++
+++#define TEST_MM_CMPISTRZ_SWORD_DATA_LEN 4
+++static test_mm_cmpistri_sword_data_t
+++ test_mm_cmpistrz_sword_data[TEST_MM_CMPISTRZ_SWORD_DATA_LEN] = {
+++ {{2, 22, 222, 2222, 22222, -2222, -222, -22},
+++ {-32768, 32767, -32767, 32766, -32766, 32765, -32768, 32767},
+++ IMM_SWORD_RANGES_LEAST,
+++ 0},
+++ {{345, 10000, -10000, -30000, 0},
+++ {1, 2, 3, 4, 5, 6, 7, 0},
+++ IMM_SWORD_EACH_LEAST,
+++ 1},
+++ {{}, {0}, IMM_SWORD_ANY_LEAST, 1},
+++ {{1, 2, -789, -1, -90, 0},
+++ {1, 10, 100, 1000, 10000, -10000, -1000, 1000},
+++ IMM_SWORD_ORDERED_LEAST,
+++ 0},
+++};
+++
+++#define MM_CMPISTRZ_UBYTE_TEST_CASES(_, ...) \
+++ _(UBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(UBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(UBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(UBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRZ_SBYTE_TEST_CASES(_, ...) \
+++ _(SBYTE_EACH_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ANY_LEAST, __VA_ARGS__) \
+++ _(SBYTE_RANGES_LEAST, __VA_ARGS__) \
+++ _(SBYTE_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRZ_UWORD_TEST_CASES(_, ...) \
+++ _(UWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(UWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(UWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(UWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define MM_CMPISTRZ_SWORD_TEST_CASES(_, ...) \
+++ _(SWORD_RANGES_LEAST, __VA_ARGS__) \
+++ _(SWORD_EACH_LEAST, __VA_ARGS__) \
+++ _(SWORD_ANY_LEAST, __VA_ARGS__) \
+++ _(SWORD_ORDERED_LEAST, __VA_ARGS__)
+++
+++#define GENERATE_MM_CMPISTRZ_TEST_CASES \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UBYTE, ubyte, uint8_t, cmpistrz, CMPISTRZ, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SBYTE, sbyte, int8_t, cmpistrz, CMPISTRZ, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(UWORD, uword, uint16_t, cmpistrz, CMPISTRZ, \
+++ IS_CMPISTRI) \
+++ ENUM_MM_CMPISTRX_TEST_CASES(SWORD, sword, int16_t, cmpistrz, CMPISTRZ, \
+++ IS_CMPISTRI)
+++
+++result_t test_mm_cmpistrz(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ GENERATE_MM_CMPISTRZ_TEST_CASES
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_crc32_u16(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint32_t crc = *(const uint32_t *) impl.mTestIntPointer1;
+++ uint16_t v = iter;
+++ uint32_t result = _mm_crc32_u16(crc, v);
+++ ASSERT_RETURN(result == canonical_crc32_u16(crc, v));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_crc32_u32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint32_t crc = *(const uint32_t *) impl.mTestIntPointer1;
+++ uint32_t v = *(const uint32_t *) impl.mTestIntPointer2;
+++ uint32_t result = _mm_crc32_u32(crc, v);
+++ ASSERT_RETURN(result == canonical_crc32_u32(crc, v));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_crc32_u64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint64_t crc = *(const uint64_t *) impl.mTestIntPointer1;
+++ uint64_t v = *(const uint64_t *) impl.mTestIntPointer2;
+++ uint64_t result = _mm_crc32_u64(crc, v);
+++ ASSERT_RETURN(result == canonical_crc32_u64(crc, v));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_crc32_u8(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint32_t crc = *(const uint32_t *) impl.mTestIntPointer1;
+++ uint8_t v = iter;
+++ uint32_t result = _mm_crc32_u8(crc, v);
+++ ASSERT_RETURN(result == canonical_crc32_u8(crc, v));
+++ return TEST_SUCCESS;
+++}
+++
+++/* AES */
+++result_t test_mm_aesenc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *a = (int32_t *) impl.mTestIntPointer1;
+++ const int32_t *b = (int32_t *) impl.mTestIntPointer2;
+++ __m128i data = _mm_loadu_si128((const __m128i *) a);
+++ __m128i rk = _mm_loadu_si128((const __m128i *) b);
+++
+++ __m128i resultReference = aesenc_128_reference(data, rk);
+++ __m128i resultIntrinsic = _mm_aesenc_si128(data, rk);
+++
+++ return validate128(resultReference, resultIntrinsic);
+++}
+++
+++result_t test_mm_aesdec_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *a = (int32_t *) impl.mTestIntPointer1;
+++ const int32_t *b = (int32_t *) impl.mTestIntPointer2;
+++ __m128i data = _mm_loadu_si128((const __m128i *) a);
+++ __m128i rk = _mm_loadu_si128((const __m128i *) b);
+++
+++ __m128i resultReference = aesdec_128_reference(data, rk);
+++ __m128i resultIntrinsic = _mm_aesdec_si128(data, rk);
+++
+++ return validate128(resultReference, resultIntrinsic);
+++}
+++
+++result_t test_mm_aesenclast_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const int32_t *a = (const int32_t *) impl.mTestIntPointer1;
+++ const int32_t *b = (const int32_t *) impl.mTestIntPointer2;
+++ __m128i data = _mm_loadu_si128((const __m128i *) a);
+++ __m128i rk = _mm_loadu_si128((const __m128i *) b);
+++
+++ __m128i resultReference = aesenclast_128_reference(data, rk);
+++ __m128i resultIntrinsic = _mm_aesenclast_si128(data, rk);
+++
+++ return validate128(resultReference, resultIntrinsic);
+++}
+++
+++result_t test_mm_aesdeclast_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *a = (uint8_t *) impl.mTestIntPointer1;
+++ const uint8_t *rk = (uint8_t *) impl.mTestIntPointer2;
+++ __m128i _a = _mm_loadu_si128((const __m128i *) a);
+++ __m128i _rk = _mm_loadu_si128((const __m128i *) rk);
+++ uint8_t c[16] = {};
+++
+++ uint8_t v[4][4];
+++ for (int i = 0; i < 16; ++i) {
+++ v[((i / 4) + (i % 4)) % 4][i % 4] = crypto_aes_rsbox[a[i]];
+++ }
+++ for (int i = 0; i < 16; ++i) {
+++ c[i] = v[i / 4][i % 4] ^ rk[i];
+++ }
+++
+++ __m128i result_reference = _mm_loadu_si128((const __m128i *) c);
+++ __m128i result_intrinsic = _mm_aesdeclast_si128(_a, _rk);
+++
+++ return validate128(result_reference, result_intrinsic);
+++}
+++
+++result_t test_mm_aesimc_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint8_t *a = (uint8_t *) impl.mTestIntPointer1;
+++ __m128i _a = _mm_loadu_si128((const __m128i *) a);
+++
+++ uint8_t e, f, g, h, v[4][4];
+++ for (int i = 0; i < 16; ++i) {
+++ ((uint8_t *) v)[i] = a[i];
+++ }
+++ for (int i = 0; i < 4; ++i) {
+++ e = v[i][0];
+++ f = v[i][1];
+++ g = v[i][2];
+++ h = v[i][3];
+++
+++ v[i][0] = MULTIPLY(e, 0x0e) ^ MULTIPLY(f, 0x0b) ^ MULTIPLY(g, 0x0d) ^
+++ MULTIPLY(h, 0x09);
+++ v[i][1] = MULTIPLY(e, 0x09) ^ MULTIPLY(f, 0x0e) ^ MULTIPLY(g, 0x0b) ^
+++ MULTIPLY(h, 0x0d);
+++ v[i][2] = MULTIPLY(e, 0x0d) ^ MULTIPLY(f, 0x09) ^ MULTIPLY(g, 0x0e) ^
+++ MULTIPLY(h, 0x0b);
+++ v[i][3] = MULTIPLY(e, 0x0b) ^ MULTIPLY(f, 0x0d) ^ MULTIPLY(g, 0x09) ^
+++ MULTIPLY(h, 0x0e);
+++ }
+++
+++ __m128i result_reference = _mm_loadu_si128((const __m128i *) v);
+++ __m128i result_intrinsic = _mm_aesimc_si128(_a);
+++
+++ return validate128(result_reference, result_intrinsic);
+++}
+++
+++static inline uint32_t sub_word(uint32_t in)
+++{
+++ return (crypto_aes_sbox[(in >> 24) & 0xff] << 24) |
+++ (crypto_aes_sbox[(in >> 16) & 0xff] << 16) |
+++ (crypto_aes_sbox[(in >> 8) & 0xff] << 8) |
+++ (crypto_aes_sbox[in & 0xff]);
+++}
+++
+++// FIXME: improve the test case for AES-256 key expansion.
+++// Reference:
+++// https://github.com/randombit/botan/blob/master/src/lib/block/aes/aes_ni/aes_ni.cpp
+++result_t test_mm_aeskeygenassist_si128(const SSE2NEONTestImpl &impl,
+++ uint32_t iter)
+++{
+++ const uint32_t *a = (uint32_t *) impl.mTestIntPointer1;
+++ __m128i data = load_m128i(a);
+++ uint32_t sub_x1 = sub_word(a[1]);
+++ uint32_t sub_x3 = sub_word(a[3]);
+++ __m128i result_reference;
+++ __m128i result_intrinsic;
+++#define TEST_IMPL(IDX) \
+++ uint32_t res##IDX[4] = { \
+++ sub_x1, \
+++ rotr(sub_x1, 8) ^ IDX, \
+++ sub_x3, \
+++ rotr(sub_x3, 8) ^ IDX, \
+++ }; \
+++ result_reference = load_m128i(res##IDX); \
+++ result_intrinsic = _mm_aeskeygenassist_si128(data, IDX); \
+++ CHECK_RESULT(validate128(result_reference, result_intrinsic));
+++
+++ IMM_256_ITER
+++#undef TEST_IMPL
+++ return TEST_SUCCESS;
+++}
+++
+++/* Others */
+++result_t test_mm_clmulepi64_si128(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint64_t *_a = (const uint64_t *) impl.mTestIntPointer1;
+++ const uint64_t *_b = (const uint64_t *) impl.mTestIntPointer2;
+++ __m128i a = load_m128i(_a);
+++ __m128i b = load_m128i(_b);
+++ auto result = clmul_64(_a[0], _b[0]);
+++ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x00), result.first,
+++ result.second))
+++ return TEST_FAIL;
+++ result = clmul_64(_a[1], _b[0]);
+++ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x01), result.first,
+++ result.second))
+++ return TEST_FAIL;
+++ result = clmul_64(_a[0], _b[1]);
+++ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x10), result.first,
+++ result.second))
+++ return TEST_FAIL;
+++ result = clmul_64(_a[1], _b[1]);
+++ if (!validateUInt64(_mm_clmulepi64_si128(a, b, 0x11), result.first,
+++ result.second))
+++ return TEST_FAIL;
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_get_denormals_zero_mode(const SSE2NEONTestImpl &impl,
+++ uint32_t iter)
+++{
+++ int res_denormals_zero_on, res_denormals_zero_off;
+++
+++ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
+++ res_denormals_zero_on =
+++ _MM_GET_DENORMALS_ZERO_MODE() == _MM_DENORMALS_ZERO_ON;
+++
+++ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_OFF);
+++ res_denormals_zero_off =
+++ _MM_GET_DENORMALS_ZERO_MODE() == _MM_DENORMALS_ZERO_OFF;
+++
+++ return (res_denormals_zero_on && res_denormals_zero_off) ? TEST_SUCCESS
+++ : TEST_FAIL;
+++}
+++
+++static int popcnt_reference(uint64_t a)
+++{
+++ int count = 0;
+++ while (a != 0) {
+++ count += a & 1;
+++ a >>= 1;
+++ }
+++ return count;
+++}
+++
+++result_t test_mm_popcnt_u32(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint64_t *a = (const uint64_t *) impl.mTestIntPointer1;
+++ ASSERT_RETURN(popcnt_reference((uint32_t) a[0]) ==
+++ _mm_popcnt_u32((unsigned int) a[0]));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_popcnt_u64(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ const uint64_t *a = (const uint64_t *) impl.mTestIntPointer1;
+++ ASSERT_RETURN(popcnt_reference(a[0]) == _mm_popcnt_u64(a[0]));
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_mm_set_denormals_zero_mode(const SSE2NEONTestImpl &impl,
+++ uint32_t iter)
+++{
+++ result_t res_set_denormals_zero_on, res_set_denormals_zero_off;
+++ float factor = 2;
+++ float denormal = FLT_MIN / factor;
+++ float denormals[4] = {denormal, denormal, denormal, denormal};
+++ float factors[4] = {factor, factor, factor, factor};
+++ __m128 ret;
+++
+++ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
+++ ret = _mm_mul_ps(load_m128(denormals), load_m128(factors));
+++ res_set_denormals_zero_on = validateFloat(ret, 0, 0, 0, 0);
+++
+++ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_OFF);
+++ ret = _mm_mul_ps(load_m128(denormals), load_m128(factors));
+++#if defined(__arm__)
+++ // AArch32 Advanced SIMD arithmetic always uses the Flush-to-zero setting,
+++ // regardless of the value of the FZ bit.
+++ res_set_denormals_zero_off = validateFloat(ret, 0, 0, 0, 0);
+++#else
+++ res_set_denormals_zero_off =
+++ validateFloat(ret, FLT_MIN, FLT_MIN, FLT_MIN, FLT_MIN);
+++#endif
+++
+++ if (res_set_denormals_zero_on == TEST_FAIL ||
+++ res_set_denormals_zero_off == TEST_FAIL)
+++ return TEST_FAIL;
+++ return TEST_SUCCESS;
+++}
+++
+++result_t test_rdtsc(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ uint64_t start = _rdtsc();
+++ for (int i = 0; i < 100000; i++) {
+++#if defined(_MSC_VER)
+++ _ReadWriteBarrier();
+++#else
+++ __asm__ __volatile__("" ::: "memory");
+++#endif
+++ }
+++ uint64_t end = _rdtsc();
+++ return end > start ? TEST_SUCCESS : TEST_FAIL;
+++}
+++
+++SSE2NEONTestImpl::SSE2NEONTestImpl(void)
+++{
+++ mTestFloatPointer1 = (float *) platformAlignedAlloc(sizeof(__m128));
+++ mTestFloatPointer2 = (float *) platformAlignedAlloc(sizeof(__m128));
+++ mTestIntPointer1 = (int32_t *) platformAlignedAlloc(sizeof(__m128i));
+++ mTestIntPointer2 = (int32_t *) platformAlignedAlloc(sizeof(__m128i));
+++ SSE2NEON_INIT_RNG(123456);
+++ for (uint32_t i = 0; i < MAX_TEST_VALUE; i++) {
+++ mTestFloats[i] = ranf(-100000, 100000);
+++ mTestInts[i] = (int32_t) ranf(-100000, 100000);
+++ }
+++}
+++
+++// Dummy function to match the case label in runSingleTest.
+++result_t test_last(const SSE2NEONTestImpl &impl, uint32_t iter)
+++{
+++ return TEST_SUCCESS;
+++}
+++
+++result_t SSE2NEONTestImpl::loadTestFloatPointers(uint32_t i)
+++{
+++ result_t ret =
+++ do_mm_store_ps(mTestFloatPointer1, mTestFloats[i], mTestFloats[i + 1],
+++ mTestFloats[i + 2], mTestFloats[i + 3]);
+++ if (ret == TEST_SUCCESS) {
+++ ret = do_mm_store_ps(mTestFloatPointer2, mTestFloats[i + 4],
+++ mTestFloats[i + 5], mTestFloats[i + 6],
+++ mTestFloats[i + 7]);
+++ }
+++ return ret;
+++}
+++
+++result_t SSE2NEONTestImpl::loadTestIntPointers(uint32_t i)
+++{
+++ result_t ret =
+++ do_mm_store_ps(mTestIntPointer1, mTestInts[i], mTestInts[i + 1],
+++ mTestInts[i + 2], mTestInts[i + 3]);
+++ if (ret == TEST_SUCCESS) {
+++ ret =
+++ do_mm_store_ps(mTestIntPointer2, mTestInts[i + 4], mTestInts[i + 5],
+++ mTestInts[i + 6], mTestInts[i + 7]);
+++ }
+++
+++ return ret;
+++}
+++
+++result_t SSE2NEONTestImpl::runSingleTest(InstructionTest test, uint32_t i)
+++{
+++ result_t ret = TEST_SUCCESS;
+++
+++ switch (test) {
+++#define _(x) \
+++ case it_##x: \
+++ ret = test_##x(*this, i); \
+++ break;
+++ INTRIN_LIST
+++#undef _
+++ }
+++
+++ return ret;
+++}
+++
+++SSE2NEONTest *SSE2NEONTest::create(void)
+++{
+++ SSE2NEONTestImpl *st = new SSE2NEONTestImpl;
+++ return static_cast<SSE2NEONTest *>(st);
+++}
+++
+++} // namespace SSE2NEON
--- /dev/null
--- /dev/null
--- /dev/null
+++#ifndef SSE2NEONTEST_H
+++#define SSE2NEONTEST_H
+++
+++#include "common.h"
+++
+++#define INTRIN_LIST \
+++ /* MMX */ \
+++ _(mm_empty) \
+++ /* SSE */ \
+++ _(mm_add_ps) \
+++ _(mm_add_ss) \
+++ _(mm_and_ps) \
+++ _(mm_andnot_ps) \
+++ _(mm_avg_pu16) \
+++ _(mm_avg_pu8) \
+++ _(mm_cmpeq_ps) \
+++ _(mm_cmpeq_ss) \
+++ _(mm_cmpge_ps) \
+++ _(mm_cmpge_ss) \
+++ _(mm_cmpgt_ps) \
+++ _(mm_cmpgt_ss) \
+++ _(mm_cmple_ps) \
+++ _(mm_cmple_ss) \
+++ _(mm_cmplt_ps) \
+++ _(mm_cmplt_ss) \
+++ _(mm_cmpneq_ps) \
+++ _(mm_cmpneq_ss) \
+++ _(mm_cmpnge_ps) \
+++ _(mm_cmpnge_ss) \
+++ _(mm_cmpngt_ps) \
+++ _(mm_cmpngt_ss) \
+++ _(mm_cmpnle_ps) \
+++ _(mm_cmpnle_ss) \
+++ _(mm_cmpnlt_ps) \
+++ _(mm_cmpnlt_ss) \
+++ _(mm_cmpord_ps) \
+++ _(mm_cmpord_ss) \
+++ _(mm_cmpunord_ps) \
+++ _(mm_cmpunord_ss) \
+++ _(mm_comieq_ss) \
+++ _(mm_comige_ss) \
+++ _(mm_comigt_ss) \
+++ _(mm_comile_ss) \
+++ _(mm_comilt_ss) \
+++ _(mm_comineq_ss) \
+++ _(mm_cvt_pi2ps) \
+++ _(mm_cvt_ps2pi) \
+++ _(mm_cvt_si2ss) \
+++ _(mm_cvt_ss2si) \
+++ _(mm_cvtpi16_ps) \
+++ _(mm_cvtpi32_ps) \
+++ _(mm_cvtpi32x2_ps) \
+++ _(mm_cvtpi8_ps) \
+++ _(mm_cvtps_pi16) \
+++ _(mm_cvtps_pi32) \
+++ _(mm_cvtps_pi8) \
+++ _(mm_cvtpu16_ps) \
+++ _(mm_cvtpu8_ps) \
+++ _(mm_cvtsi32_ss) \
+++ _(mm_cvtsi64_ss) \
+++ _(mm_cvtss_f32) \
+++ _(mm_cvtss_si32) \
+++ _(mm_cvtss_si64) \
+++ _(mm_cvtt_ps2pi) \
+++ _(mm_cvtt_ss2si) \
+++ _(mm_cvttps_pi32) \
+++ _(mm_cvttss_si32) \
+++ _(mm_cvttss_si64) \
+++ _(mm_div_ps) \
+++ _(mm_div_ss) \
+++ _(mm_extract_pi16) \
+++ _(mm_free) \
+++ _(mm_get_flush_zero_mode) \
+++ _(mm_get_rounding_mode) \
+++ _(mm_getcsr) \
+++ _(mm_insert_pi16) \
+++ _(mm_load_ps) \
+++ _(mm_load_ps1) \
+++ _(mm_load_ss) \
+++ _(mm_load1_ps) \
+++ _(mm_loadh_pi) \
+++ _(mm_loadl_pi) \
+++ _(mm_loadr_ps) \
+++ _(mm_loadu_ps) \
+++ _(mm_loadu_si16) \
+++ _(mm_loadu_si64) \
+++ _(mm_malloc) \
+++ _(mm_maskmove_si64) \
+++ _(m_maskmovq) \
+++ _(mm_max_pi16) \
+++ _(mm_max_ps) \
+++ _(mm_max_pu8) \
+++ _(mm_max_ss) \
+++ _(mm_min_pi16) \
+++ _(mm_min_ps) \
+++ _(mm_min_pu8) \
+++ _(mm_min_ss) \
+++ _(mm_move_ss) \
+++ _(mm_movehl_ps) \
+++ _(mm_movelh_ps) \
+++ _(mm_movemask_pi8) \
+++ _(mm_movemask_ps) \
+++ _(mm_mul_ps) \
+++ _(mm_mul_ss) \
+++ _(mm_mulhi_pu16) \
+++ _(mm_or_ps) \
+++ _(m_pavgb) \
+++ _(m_pavgw) \
+++ _(m_pextrw) \
+++ _(m_pinsrw) \
+++ _(m_pmaxsw) \
+++ _(m_pmaxub) \
+++ _(m_pminsw) \
+++ _(m_pminub) \
+++ _(m_pmovmskb) \
+++ _(m_pmulhuw) \
+++ _(mm_prefetch) \
+++ _(m_psadbw) \
+++ _(m_pshufw) \
+++ _(mm_rcp_ps) \
+++ _(mm_rcp_ss) \
+++ _(mm_rsqrt_ps) \
+++ _(mm_rsqrt_ss) \
+++ _(mm_sad_pu8) \
+++ _(mm_set_flush_zero_mode) \
+++ _(mm_set_ps) \
+++ _(mm_set_ps1) \
+++ _(mm_set_rounding_mode) \
+++ _(mm_set_ss) \
+++ _(mm_set1_ps) \
+++ _(mm_setcsr) \
+++ _(mm_setr_ps) \
+++ _(mm_setzero_ps) \
+++ _(mm_sfence) \
+++ _(mm_shuffle_pi16) \
+++ _(mm_shuffle_ps) \
+++ _(mm_sqrt_ps) \
+++ _(mm_sqrt_ss) \
+++ _(mm_store_ps) \
+++ _(mm_store_ps1) \
+++ _(mm_store_ss) \
+++ _(mm_store1_ps) \
+++ _(mm_storeh_pi) \
+++ _(mm_storel_pi) \
+++ _(mm_storer_ps) \
+++ _(mm_storeu_ps) \
+++ _(mm_storeu_si16) \
+++ _(mm_storeu_si64) \
+++ _(mm_stream_pi) \
+++ _(mm_stream_ps) \
+++ _(mm_sub_ps) \
+++ _(mm_sub_ss) \
+++ _(mm_ucomieq_ss) \
+++ _(mm_ucomige_ss) \
+++ _(mm_ucomigt_ss) \
+++ _(mm_ucomile_ss) \
+++ _(mm_ucomilt_ss) \
+++ _(mm_ucomineq_ss) \
+++ _(mm_undefined_ps) \
+++ _(mm_unpackhi_ps) \
+++ _(mm_unpacklo_ps) \
+++ _(mm_xor_ps) \
+++ /* SSE2 */ \
+++ _(mm_add_epi16) \
+++ _(mm_add_epi32) \
+++ _(mm_add_epi64) \
+++ _(mm_add_epi8) \
+++ _(mm_add_pd) \
+++ _(mm_add_sd) \
+++ _(mm_add_si64) \
+++ _(mm_adds_epi16) \
+++ _(mm_adds_epi8) \
+++ _(mm_adds_epu16) \
+++ _(mm_adds_epu8) \
+++ _(mm_and_pd) \
+++ _(mm_and_si128) \
+++ _(mm_andnot_pd) \
+++ _(mm_andnot_si128) \
+++ _(mm_avg_epu16) \
+++ _(mm_avg_epu8) \
+++ _(mm_bslli_si128) \
+++ _(mm_bsrli_si128) \
+++ _(mm_castpd_ps) \
+++ _(mm_castpd_si128) \
+++ _(mm_castps_pd) \
+++ _(mm_castps_si128) \
+++ _(mm_castsi128_pd) \
+++ _(mm_castsi128_ps) \
+++ _(mm_clflush) \
+++ _(mm_cmpeq_epi16) \
+++ _(mm_cmpeq_epi32) \
+++ _(mm_cmpeq_epi8) \
+++ _(mm_cmpeq_pd) \
+++ _(mm_cmpeq_sd) \
+++ _(mm_cmpge_pd) \
+++ _(mm_cmpge_sd) \
+++ _(mm_cmpgt_epi16) \
+++ _(mm_cmpgt_epi32) \
+++ _(mm_cmpgt_epi8) \
+++ _(mm_cmpgt_pd) \
+++ _(mm_cmpgt_sd) \
+++ _(mm_cmple_pd) \
+++ _(mm_cmple_sd) \
+++ _(mm_cmplt_epi16) \
+++ _(mm_cmplt_epi32) \
+++ _(mm_cmplt_epi8) \
+++ _(mm_cmplt_pd) \
+++ _(mm_cmplt_sd) \
+++ _(mm_cmpneq_pd) \
+++ _(mm_cmpneq_sd) \
+++ _(mm_cmpnge_pd) \
+++ _(mm_cmpnge_sd) \
+++ _(mm_cmpngt_pd) \
+++ _(mm_cmpngt_sd) \
+++ _(mm_cmpnle_pd) \
+++ _(mm_cmpnle_sd) \
+++ _(mm_cmpnlt_pd) \
+++ _(mm_cmpnlt_sd) \
+++ _(mm_cmpord_pd) \
+++ _(mm_cmpord_sd) \
+++ _(mm_cmpunord_pd) \
+++ _(mm_cmpunord_sd) \
+++ _(mm_comieq_sd) \
+++ _(mm_comige_sd) \
+++ _(mm_comigt_sd) \
+++ _(mm_comile_sd) \
+++ _(mm_comilt_sd) \
+++ _(mm_comineq_sd) \
+++ _(mm_cvtepi32_pd) \
+++ _(mm_cvtepi32_ps) \
+++ _(mm_cvtpd_epi32) \
+++ _(mm_cvtpd_pi32) \
+++ _(mm_cvtpd_ps) \
+++ _(mm_cvtpi32_pd) \
+++ _(mm_cvtps_epi32) \
+++ _(mm_cvtps_pd) \
+++ _(mm_cvtsd_f64) \
+++ _(mm_cvtsd_si32) \
+++ _(mm_cvtsd_si64) \
+++ _(mm_cvtsd_si64x) \
+++ _(mm_cvtsd_ss) \
+++ _(mm_cvtsi128_si32) \
+++ _(mm_cvtsi128_si64) \
+++ _(mm_cvtsi128_si64x) \
+++ _(mm_cvtsi32_sd) \
+++ _(mm_cvtsi32_si128) \
+++ _(mm_cvtsi64_sd) \
+++ _(mm_cvtsi64_si128) \
+++ _(mm_cvtsi64x_sd) \
+++ _(mm_cvtsi64x_si128) \
+++ _(mm_cvtss_sd) \
+++ _(mm_cvttpd_epi32) \
+++ _(mm_cvttpd_pi32) \
+++ _(mm_cvttps_epi32) \
+++ _(mm_cvttsd_si32) \
+++ _(mm_cvttsd_si64) \
+++ _(mm_cvttsd_si64x) \
+++ _(mm_div_pd) \
+++ _(mm_div_sd) \
+++ _(mm_extract_epi16) \
+++ _(mm_insert_epi16) \
+++ _(mm_lfence) \
+++ _(mm_load_pd) \
+++ _(mm_load_pd1) \
+++ _(mm_load_sd) \
+++ _(mm_load_si128) \
+++ _(mm_load1_pd) \
+++ _(mm_loadh_pd) \
+++ _(mm_loadl_epi64) \
+++ _(mm_loadl_pd) \
+++ _(mm_loadr_pd) \
+++ _(mm_loadu_pd) \
+++ _(mm_loadu_si128) \
+++ _(mm_loadu_si32) \
+++ _(mm_madd_epi16) \
+++ _(mm_maskmoveu_si128) \
+++ _(mm_max_epi16) \
+++ _(mm_max_epu8) \
+++ _(mm_max_pd) \
+++ _(mm_max_sd) \
+++ _(mm_mfence) \
+++ _(mm_min_epi16) \
+++ _(mm_min_epu8) \
+++ _(mm_min_pd) \
+++ _(mm_min_sd) \
+++ _(mm_move_epi64) \
+++ _(mm_move_sd) \
+++ _(mm_movemask_epi8) \
+++ _(mm_movemask_pd) \
+++ _(mm_movepi64_pi64) \
+++ _(mm_movpi64_epi64) \
+++ _(mm_mul_epu32) \
+++ _(mm_mul_pd) \
+++ _(mm_mul_sd) \
+++ _(mm_mul_su32) \
+++ _(mm_mulhi_epi16) \
+++ _(mm_mulhi_epu16) \
+++ _(mm_mullo_epi16) \
+++ _(mm_or_pd) \
+++ _(mm_or_si128) \
+++ _(mm_packs_epi16) \
+++ _(mm_packs_epi32) \
+++ _(mm_packus_epi16) \
+++ _(mm_pause) \
+++ _(mm_sad_epu8) \
+++ _(mm_set_epi16) \
+++ _(mm_set_epi32) \
+++ _(mm_set_epi64) \
+++ _(mm_set_epi64x) \
+++ _(mm_set_epi8) \
+++ _(mm_set_pd) \
+++ _(mm_set_pd1) \
+++ _(mm_set_sd) \
+++ _(mm_set1_epi16) \
+++ _(mm_set1_epi32) \
+++ _(mm_set1_epi64) \
+++ _(mm_set1_epi64x) \
+++ _(mm_set1_epi8) \
+++ _(mm_set1_pd) \
+++ _(mm_setr_epi16) \
+++ _(mm_setr_epi32) \
+++ _(mm_setr_epi64) \
+++ _(mm_setr_epi8) \
+++ _(mm_setr_pd) \
+++ _(mm_setzero_pd) \
+++ _(mm_setzero_si128) \
+++ _(mm_shuffle_epi32) \
+++ _(mm_shuffle_pd) \
+++ _(mm_shufflehi_epi16) \
+++ _(mm_shufflelo_epi16) \
+++ _(mm_sll_epi16) \
+++ _(mm_sll_epi32) \
+++ _(mm_sll_epi64) \
+++ _(mm_slli_epi16) \
+++ _(mm_slli_epi32) \
+++ _(mm_slli_epi64) \
+++ _(mm_slli_si128) \
+++ _(mm_sqrt_pd) \
+++ _(mm_sqrt_sd) \
+++ _(mm_sra_epi16) \
+++ _(mm_sra_epi32) \
+++ _(mm_srai_epi16) \
+++ _(mm_srai_epi32) \
+++ _(mm_srl_epi16) \
+++ _(mm_srl_epi32) \
+++ _(mm_srl_epi64) \
+++ _(mm_srli_epi16) \
+++ _(mm_srli_epi32) \
+++ _(mm_srli_epi64) \
+++ _(mm_srli_si128) \
+++ _(mm_store_pd) \
+++ _(mm_store_pd1) \
+++ _(mm_store_sd) \
+++ _(mm_store_si128) \
+++ _(mm_store1_pd) \
+++ _(mm_storeh_pd) \
+++ _(mm_storel_epi64) \
+++ _(mm_storel_pd) \
+++ _(mm_storer_pd) \
+++ _(mm_storeu_pd) \
+++ _(mm_storeu_si128) \
+++ _(mm_storeu_si32) \
+++ _(mm_stream_pd) \
+++ _(mm_stream_si128) \
+++ _(mm_stream_si32) \
+++ _(mm_stream_si64) \
+++ _(mm_sub_epi16) \
+++ _(mm_sub_epi32) \
+++ _(mm_sub_epi64) \
+++ _(mm_sub_epi8) \
+++ _(mm_sub_pd) \
+++ _(mm_sub_sd) \
+++ _(mm_sub_si64) \
+++ _(mm_subs_epi16) \
+++ _(mm_subs_epi8) \
+++ _(mm_subs_epu16) \
+++ _(mm_subs_epu8) \
+++ _(mm_ucomieq_sd) \
+++ _(mm_ucomige_sd) \
+++ _(mm_ucomigt_sd) \
+++ _(mm_ucomile_sd) \
+++ _(mm_ucomilt_sd) \
+++ _(mm_ucomineq_sd) \
+++ _(mm_undefined_pd) \
+++ _(mm_undefined_si128) \
+++ _(mm_unpackhi_epi16) \
+++ _(mm_unpackhi_epi32) \
+++ _(mm_unpackhi_epi64) \
+++ _(mm_unpackhi_epi8) \
+++ _(mm_unpackhi_pd) \
+++ _(mm_unpacklo_epi16) \
+++ _(mm_unpacklo_epi32) \
+++ _(mm_unpacklo_epi64) \
+++ _(mm_unpacklo_epi8) \
+++ _(mm_unpacklo_pd) \
+++ _(mm_xor_pd) \
+++ _(mm_xor_si128) \
+++ /* SSE3 */ \
+++ _(mm_addsub_pd) \
+++ _(mm_addsub_ps) \
+++ _(mm_hadd_pd) \
+++ _(mm_hadd_ps) \
+++ _(mm_hsub_pd) \
+++ _(mm_hsub_ps) \
+++ _(mm_lddqu_si128) \
+++ _(mm_loaddup_pd) \
+++ _(mm_movedup_pd) \
+++ _(mm_movehdup_ps) \
+++ _(mm_moveldup_ps) \
+++ /* SSSE3 */ \
+++ _(mm_abs_epi16) \
+++ _(mm_abs_epi32) \
+++ _(mm_abs_epi8) \
+++ _(mm_abs_pi16) \
+++ _(mm_abs_pi32) \
+++ _(mm_abs_pi8) \
+++ _(mm_alignr_epi8) \
+++ _(mm_alignr_pi8) \
+++ _(mm_hadd_epi16) \
+++ _(mm_hadd_epi32) \
+++ _(mm_hadd_pi16) \
+++ _(mm_hadd_pi32) \
+++ _(mm_hadds_epi16) \
+++ _(mm_hadds_pi16) \
+++ _(mm_hsub_epi16) \
+++ _(mm_hsub_epi32) \
+++ _(mm_hsub_pi16) \
+++ _(mm_hsub_pi32) \
+++ _(mm_hsubs_epi16) \
+++ _(mm_hsubs_pi16) \
+++ _(mm_maddubs_epi16) \
+++ _(mm_maddubs_pi16) \
+++ _(mm_mulhrs_epi16) \
+++ _(mm_mulhrs_pi16) \
+++ _(mm_shuffle_epi8) \
+++ _(mm_shuffle_pi8) \
+++ _(mm_sign_epi16) \
+++ _(mm_sign_epi32) \
+++ _(mm_sign_epi8) \
+++ _(mm_sign_pi16) \
+++ _(mm_sign_pi32) \
+++ _(mm_sign_pi8) \
+++ /* SSE4.1 */ \
+++ _(mm_blend_epi16) \
+++ _(mm_blend_pd) \
+++ _(mm_blend_ps) \
+++ _(mm_blendv_epi8) \
+++ _(mm_blendv_pd) \
+++ _(mm_blendv_ps) \
+++ _(mm_ceil_pd) \
+++ _(mm_ceil_ps) \
+++ _(mm_ceil_sd) \
+++ _(mm_ceil_ss) \
+++ _(mm_cmpeq_epi64) \
+++ _(mm_cvtepi16_epi32) \
+++ _(mm_cvtepi16_epi64) \
+++ _(mm_cvtepi32_epi64) \
+++ _(mm_cvtepi8_epi16) \
+++ _(mm_cvtepi8_epi32) \
+++ _(mm_cvtepi8_epi64) \
+++ _(mm_cvtepu16_epi32) \
+++ _(mm_cvtepu16_epi64) \
+++ _(mm_cvtepu32_epi64) \
+++ _(mm_cvtepu8_epi16) \
+++ _(mm_cvtepu8_epi32) \
+++ _(mm_cvtepu8_epi64) \
+++ _(mm_dp_pd) \
+++ _(mm_dp_ps) \
+++ _(mm_extract_epi32) \
+++ _(mm_extract_epi64) \
+++ _(mm_extract_epi8) \
+++ _(mm_extract_ps) \
+++ _(mm_floor_pd) \
+++ _(mm_floor_ps) \
+++ _(mm_floor_sd) \
+++ _(mm_floor_ss) \
+++ _(mm_insert_epi32) \
+++ _(mm_insert_epi64) \
+++ _(mm_insert_epi8) \
+++ _(mm_insert_ps) \
+++ _(mm_max_epi32) \
+++ _(mm_max_epi8) \
+++ _(mm_max_epu16) \
+++ _(mm_max_epu32) \
+++ _(mm_min_epi32) \
+++ _(mm_min_epi8) \
+++ _(mm_min_epu16) \
+++ _(mm_min_epu32) \
+++ _(mm_minpos_epu16) \
+++ _(mm_mpsadbw_epu8) \
+++ _(mm_mul_epi32) \
+++ _(mm_mullo_epi32) \
+++ _(mm_packus_epi32) \
+++ _(mm_round_pd) \
+++ _(mm_round_ps) \
+++ _(mm_round_sd) \
+++ _(mm_round_ss) \
+++ _(mm_stream_load_si128) \
+++ _(mm_test_all_ones) \
+++ _(mm_test_all_zeros) \
+++ _(mm_test_mix_ones_zeros) \
+++ _(mm_testc_si128) \
+++ _(mm_testnzc_si128) \
+++ _(mm_testz_si128) \
+++ /* SSE4.2 */ \
+++ _(mm_cmpestra) \
+++ _(mm_cmpestrc) \
+++ _(mm_cmpestri) \
+++ _(mm_cmpestrm) \
+++ _(mm_cmpestro) \
+++ _(mm_cmpestrs) \
+++ _(mm_cmpestrz) \
+++ _(mm_cmpgt_epi64) \
+++ _(mm_cmpistra) \
+++ _(mm_cmpistrc) \
+++ _(mm_cmpistri) \
+++ _(mm_cmpistrm) \
+++ _(mm_cmpistro) \
+++ _(mm_cmpistrs) \
+++ _(mm_cmpistrz) \
+++ _(mm_crc32_u16) \
+++ _(mm_crc32_u32) \
+++ _(mm_crc32_u64) \
+++ _(mm_crc32_u8) \
+++ /* AES */ \
+++ _(mm_aesenc_si128) \
+++ _(mm_aesdec_si128) \
+++ _(mm_aesenclast_si128) \
+++ _(mm_aesdeclast_si128) \
+++ _(mm_aesimc_si128) \
+++ _(mm_aeskeygenassist_si128) \
+++ /* Others */ \
+++ _(mm_clmulepi64_si128) \
+++ _(mm_get_denormals_zero_mode) \
+++ _(mm_popcnt_u32) \
+++ _(mm_popcnt_u64) \
+++ _(mm_set_denormals_zero_mode) \
+++ _(rdtsc) \
+++ _(last) /* This indicates the end of macros */
+++
+++namespace SSE2NEON
+++{
+++// The way unit tests are implemented is that 10,000 random floating point and
+++// integer vec4 numbers are generated as sample data.
+++//
+++// A short C implementation of every intrinsic is implemented and compared to
+++// the actual expected results from the corresponding SSE intrinsic against all
+++// of the 10,000 randomized input vectors. When running on ARM, then the results
+++// are compared to the NEON approximate version.
+++extern const char *instructionString[];
+++enum InstructionTest {
+++#define _(x) it_##x,
+++ INTRIN_LIST
+++#undef _
+++};
+++
+++class SSE2NEONTest
+++{
+++public:
+++ static SSE2NEONTest *create(void); // create the test.
+++
+++ // Run test of this instruction;
+++ // Passed: TEST_SUCCESS (1)
+++ // Failed: TEST_FAIL (0)
+++ // Unimplemented: TEST_UNIMPL (-1)
+++ virtual result_t runTest(InstructionTest test) = 0;
+++ virtual void release(void) = 0;
+++};
+++
+++} // namespace SSE2NEON
+++
+++#endif
--- /dev/null
--- /dev/null
--- /dev/null
+++#include <stdint.h>
+++#include <stdio.h>
+++#include "impl.h"
+++
+++int main(int /*argc*/, const char ** /*argv*/)
+++{
+++ SSE2NEON::SSE2NEONTest *test = SSE2NEON::SSE2NEONTest::create();
+++ uint32_t passCount = 0;
+++ uint32_t failedCount = 0;
+++ uint32_t ignoreCount = 0;
+++ for (uint32_t i = 0; i < SSE2NEON::it_last; i++) {
+++ SSE2NEON::InstructionTest it = SSE2NEON::InstructionTest(i);
+++ SSE2NEON::result_t ret = test->runTest(it);
+++ // If the test fails, we will run it again so we can step into the
+++ // debugger and figure out why!
+++ if (ret == SSE2NEON::TEST_FAIL) {
+++ printf("Test %-30s failed\n", SSE2NEON::instructionString[it]);
+++ failedCount++;
+++ } else if (ret == SSE2NEON::TEST_UNIMPL) {
+++ printf("Test %-30s skipped\n", SSE2NEON::instructionString[it]);
+++ ignoreCount++;
+++ } else {
+++ printf("Test %-30s passed\n", SSE2NEON::instructionString[it]);
+++ passCount++;
+++ }
+++ }
+++ test->release();
+++ printf(
+++ "SSE2NEONTest Complete!\n"
+++ "Passed: %d\n"
+++ "Failed: %d\n"
+++ "Ignored: %d\n"
+++ "Coverage rate: %.2f%%\n",
+++ passCount, failedCount, ignoreCount,
+++ (float) passCount / (passCount + failedCount + ignoreCount) * 100);
+++
+++ return failedCount ? -1 : 0;
+++}