From 6a73bf101c420c67eef48b280502bf2c0c612143 Mon Sep 17 00:00:00 2001 From: GNU Libc Maintainers Date: Mon, 21 Feb 2022 08:47:11 +0000 Subject: [PATCH] git-updates GIT update of https://sourceware.org/git/glibc.git/release/2.33/master from glibc-2.33 GIT update of https://sourceware.org/git/glibc.git/release/2.33/master from glibc-2.33 Gbp-Pq: Name git-updates.diff --- NEWS | 61 + config.h.in | 6 + dlfcn/dlerror.c | 13 +- elf/Makefile | 41 +- elf/Versions | 2 +- elf/dl-cache.c | 124 +- elf/dl-diagnostics-cpu.c | 24 + elf/dl-diagnostics-kernel.c | 24 + elf/dl-diagnostics.c | 265 +++++ elf/dl-diagnostics.h | 46 + elf/dl-error-skeleton.c | 15 + elf/dl-libc.c | 8 +- elf/dl-load.c | 71 +- elf/dl-main.h | 5 +- elf/dl-open.c | 2 +- elf/dl-tunable-types.h | 21 +- elf/dl-tunables.c | 187 ++- elf/dl-tunables.h | 55 +- elf/dl-tunables.list | 6 + elf/dl-usage.c | 1 + elf/rtld.c | 19 +- elf/tst-dlmopen-dlerror-mod.c | 41 + elf/tst-dlmopen-dlerror.c | 37 + elf/tst-dlmopen-gethostbyname-mod.c | 29 + elf/tst-dlmopen-gethostbyname.c | 31 + elf/tst-dst-static.c | 32 + elf/tst-env-setuid-tunables.c | 118 +- elf/tst-env-setuid.c | 197 +--- elf/tst-rtld-list-tunables.exp | 2 +- iconvdata/Makefile | 5 +- iconvdata/bug-iconv15.c | 60 + iconvdata/iso-2022-jp-3.c | 28 +- include/libc-symbols.h | 26 +- include/sys/un.h | 12 + include/time.h | 5 + io/Makefile | 2 +- io/fstat.c | 6 + io/fstat64.c | 6 + io/tst-stat-lfs.c | 2 + io/tst-stat.c | 102 ++ malloc/malloc.c | 4 +- misc/Makefile | 2 +- misc/tst-select.c | 143 +++ nptl/Makefile | 26 +- nptl/pthreadP.h | 61 + nptl/pthread_create.c | 12 +- nptl/pthread_once.c | 4 +- nptl/tst-once5.cc | 4 +- nptl/tst-pthread-gdb-attach-static.c | 1 + nptl/tst-pthread-gdb-attach.c | 217 ++++ nptl_db/structs.def | 3 +- nptl_db/td_init.c | 15 +- nptl_db/thread_dbP.h | 2 + nscd/netgroupcache.c | 4 +- nss/nss_database.c | 4 +- .../etc/nsswitch.conf | 1 + nss/tst-reload2.c | 35 +- nss/tst-reload2.root/etc/hosts | 1 + nss/tst-reload2.root/etc/nsswitch.conf | 1 + nss/tst-reload2.root/subdir/etc/hosts | 1 + nss/tst-reload2.root/subdir/etc/nsswitch.conf | 1 + posix/bits/unistd.h | 5 +- posix/unistd.h | 3 +- posix/wordexp-test.c | 1 + posix/wordexp.c | 2 +- rt/Makefile | 1 + rt/tst-bz28213.c | 101 ++ socket/Makefile | 6 +- socket/opensock.c | 62 +- socket/sockaddr_un_set.c | 41 + socket/tst-sockaddr_un_set.c | 62 + stdlib/Makefile | 3 +- stdlib/canonicalize.c | 12 +- stdlib/tst-realpath-toolong.c | 53 + stdlib/tst-secure-getenv.c | 199 +--- string/rawmemchr.c | 26 +- string/test-memchr.c | 39 +- string/test-strncat.c | 61 + string/test-strncmp.c | 13 + string/test-strnlen.c | 67 +- sunrpc/Makefile | 5 +- sunrpc/clnt_gen.c | 10 +- sunrpc/svc_unix.c | 11 +- sunrpc/svcauth_des.c | 1 - sunrpc/tst-bug22542.c | 44 + sunrpc/tst-bug28768.c | 42 + support/Makefile | 4 + support/capture_subprocess.h | 6 + support/subprocess.h | 5 + support/support.h | 9 + support/support_capture_subprocess.c | 128 +- support/support_select_modifies_timeout.c | 29 + support/support_select_normalizes_timeout.c | 29 + support/support_subprocess.c | 21 +- support/temp_file.c | 161 ++- support/temp_file.h | 9 + support/test-container.c | 23 +- support/xclone.c | 49 + support/xpthread_kill.c | 26 + support/xsched.h | 34 + support/xthread.h | 2 + sysdeps/generic/ldsodefs.h | 11 + sysdeps/mach/hurd/if_index.c | 6 +- sysdeps/nios2/libm-test-ulps | 11 +- sysdeps/nptl/lowlevellock-futex.h | 14 +- sysdeps/posix/getcwd.c | 7 + sysdeps/powerpc/Makefile | 5 - sysdeps/powerpc/powerpc64/sysdep.h | 15 +- .../powerpc64/tst-ucontext-ppc64-vscr.c | 1 + sysdeps/powerpc/tst-set_ppr.c | 3 +- sysdeps/pthread/Makefile | 5 +- sysdeps/pthread/tst-oncey3.c | 1 + sysdeps/pthread/tst-oncey4.c | 1 + sysdeps/pthread/tst-pthread-exit-signal.c | 45 + sysdeps/riscv/rv64/rvd/libm-test-ulps | 24 +- sysdeps/s390/configure | 8 +- sysdeps/s390/configure.ac | 8 +- sysdeps/s390/dl-procinfo.c | 5 +- sysdeps/s390/dl-procinfo.h | 6 +- sysdeps/s390/memmem-arch13.S | 2 +- sysdeps/s390/memmove.c | 2 +- sysdeps/s390/multiarch/ifunc-impl-list.c | 3 +- sysdeps/s390/strstr-arch13.S | 2 +- sysdeps/unix/sysv/linux/Makefile | 7 +- sysdeps/unix/sysv/linux/aarch64/clone.S | 2 + .../unix/sysv/linux/aarch64/cpu-features.c | 2 +- .../unix/sysv/linux/dl-diagnostics-kernel.c | 77 ++ sysdeps/unix/sysv/linux/fstat.c | 6 + sysdeps/unix/sysv/linux/fstat64.c | 12 + sysdeps/unix/sysv/linux/mips/fxstat.c | 4 +- sysdeps/unix/sysv/linux/mips/lxstat.c | 4 +- sysdeps/unix/sysv/linux/mips/xstat.c | 4 +- sysdeps/unix/sysv/linux/mq_notify.c | 26 +- sysdeps/unix/sysv/linux/opensock.c | 114 -- sysdeps/unix/sysv/linux/powerpc/syscall.S | 4 + sysdeps/unix/sysv/linux/s390/bits/hwcap.h | 9 + sysdeps/unix/sysv/linux/s390/opensock.c | 2 - sysdeps/unix/sysv/linux/select.c | 45 +- sysdeps/unix/sysv/linux/sys/prctl.h | 4 - .../unix/sysv/linux/tst-getcwd-smallbuff.c | 259 ++++ sysdeps/unix/sysv/linux/tst-sysvshm-linux.c | 26 +- sysdeps/x86/Makefile | 34 + sysdeps/x86/bits/platform/x86.h | 4 +- sysdeps/x86/cacheinfo.c | 3 + sysdeps/x86/cacheinfo.h | 14 +- sysdeps/x86/configure | 28 + sysdeps/x86/configure.ac | 16 + sysdeps/x86/cpu-features.c | 66 +- sysdeps/x86/cpu-tunables.c | 2 + sysdeps/x86/dl-cacheinfo.h | 25 +- sysdeps/x86/dl-diagnostics-cpu.c | 118 ++ ...cpu-features-preferred_feature_index_1.def | 35 + sysdeps/x86/include/cpu-features.h | 61 +- sysdeps/x86/isa-level.c | 25 +- sysdeps/x86/tst-cpu-features-supports.c | 10 +- sysdeps/x86/tst-get-cpu-features.c | 2 + sysdeps/x86/tst-memchr-rtm.c | 54 + sysdeps/x86/tst-memcmp-rtm.c | 52 + sysdeps/x86/tst-memmove-rtm.c | 53 + sysdeps/x86/tst-memrchr-rtm.c | 54 + sysdeps/x86/tst-memset-rtm.c | 45 + sysdeps/x86/tst-strchr-rtm.c | 54 + sysdeps/x86/tst-strcpy-rtm.c | 53 + sysdeps/x86/tst-string-rtm.h | 72 ++ sysdeps/x86/tst-strlen-rtm.c | 53 + sysdeps/x86/tst-strncmp-rtm.c | 81 ++ sysdeps/x86/tst-strrchr-rtm.c | 53 + .../x86/tst-sysconf-cache-linesize-static.c | 1 + sysdeps/x86/tst-sysconf-cache-linesize.c | 57 + sysdeps/x86/tst-wcsncmp-rtm.c | 21 + sysdeps/x86_64/Makefile | 7 + sysdeps/x86_64/configure | 33 - sysdeps/x86_64/configure.ac | 25 - sysdeps/x86_64/memchr.S | 77 +- sysdeps/x86_64/multiarch/Makefile | 60 +- sysdeps/x86_64/multiarch/ifunc-avx2.h | 18 +- sysdeps/x86_64/multiarch/ifunc-impl-list.c | 402 ++++++- sysdeps/x86_64/multiarch/ifunc-memcmp.h | 17 +- sysdeps/x86_64/multiarch/ifunc-memmove.h | 45 +- sysdeps/x86_64/multiarch/ifunc-memset.h | 49 +- sysdeps/x86_64/multiarch/ifunc-strcpy.h | 17 +- sysdeps/x86_64/multiarch/ifunc-wcslen.h | 52 + sysdeps/x86_64/multiarch/ifunc-wmemset.h | 22 +- sysdeps/x86_64/multiarch/memchr-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/memchr-avx2.S | 494 ++++---- sysdeps/x86_64/multiarch/memchr-evex.S | 478 ++++++++ .../x86_64/multiarch/memcmp-avx2-movbe-rtm.S | 12 + sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S | 28 +- sysdeps/x86_64/multiarch/memcmp-evex-movbe.S | 440 +++++++ .../memmove-avx-unaligned-erms-rtm.S | 17 + .../multiarch/memmove-avx512-unaligned-erms.S | 25 +- .../multiarch/memmove-evex-unaligned-erms.S | 33 + .../multiarch/memmove-vec-unaligned-erms.S | 57 +- sysdeps/x86_64/multiarch/memrchr-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/memrchr-avx2.S | 53 +- sysdeps/x86_64/multiarch/memrchr-evex.S | 337 ++++++ .../memset-avx2-unaligned-erms-rtm.S | 10 + .../multiarch/memset-avx2-unaligned-erms.S | 12 +- .../multiarch/memset-avx512-unaligned-erms.S | 16 +- .../multiarch/memset-evex-unaligned-erms.S | 24 + .../multiarch/memset-vec-unaligned-erms.S | 61 +- sysdeps/x86_64/multiarch/rawmemchr-avx2-rtm.S | 4 + sysdeps/x86_64/multiarch/rawmemchr-evex.S | 4 + sysdeps/x86_64/multiarch/stpcpy-avx2-rtm.S | 3 + sysdeps/x86_64/multiarch/stpcpy-evex.S | 3 + sysdeps/x86_64/multiarch/stpncpy-avx2-rtm.S | 4 + sysdeps/x86_64/multiarch/stpncpy-evex.S | 4 + sysdeps/x86_64/multiarch/strcat-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/strcat-avx2.S | 6 +- sysdeps/x86_64/multiarch/strcat-evex.S | 283 +++++ sysdeps/x86_64/multiarch/strchr-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/strchr-avx2.S | 22 +- sysdeps/x86_64/multiarch/strchr-evex.S | 335 ++++++ sysdeps/x86_64/multiarch/strchr.c | 18 +- sysdeps/x86_64/multiarch/strchrnul-avx2-rtm.S | 3 + sysdeps/x86_64/multiarch/strchrnul-evex.S | 3 + sysdeps/x86_64/multiarch/strcmp-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/strcmp-avx2.S | 65 +- sysdeps/x86_64/multiarch/strcmp-evex.S | 1043 +++++++++++++++++ sysdeps/x86_64/multiarch/strcmp.c | 19 +- sysdeps/x86_64/multiarch/strcpy-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/strcpy-avx2.S | 85 +- sysdeps/x86_64/multiarch/strcpy-evex.S | 1003 ++++++++++++++++ sysdeps/x86_64/multiarch/strlen-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/strlen-avx2.S | 613 ++++++---- sysdeps/x86_64/multiarch/strlen-evex.S | 489 ++++++++ sysdeps/x86_64/multiarch/strlen-sse2.S | 2 +- sysdeps/x86_64/multiarch/strlen-vec.S | 270 +++++ sysdeps/x86_64/multiarch/strncat-avx2-rtm.S | 3 + sysdeps/x86_64/multiarch/strncat-evex.S | 3 + sysdeps/x86_64/multiarch/strncmp-avx2-rtm.S | 4 + sysdeps/x86_64/multiarch/strncmp-avx2.S | 1 + sysdeps/x86_64/multiarch/strncmp-evex.S | 3 + sysdeps/x86_64/multiarch/strncmp.c | 19 +- sysdeps/x86_64/multiarch/strncpy-avx2-rtm.S | 3 + sysdeps/x86_64/multiarch/strncpy-evex.S | 3 + sysdeps/x86_64/multiarch/strnlen-avx2-rtm.S | 4 + sysdeps/x86_64/multiarch/strnlen-evex.S | 4 + sysdeps/x86_64/multiarch/strrchr-avx2-rtm.S | 12 + sysdeps/x86_64/multiarch/strrchr-avx2.S | 19 +- sysdeps/x86_64/multiarch/strrchr-evex.S | 265 +++++ sysdeps/x86_64/multiarch/wcschr-avx2-rtm.S | 3 + sysdeps/x86_64/multiarch/wcschr-evex.S | 3 + sysdeps/x86_64/multiarch/wcscmp-avx2-rtm.S | 4 + sysdeps/x86_64/multiarch/wcscmp-evex.S | 4 + sysdeps/x86_64/multiarch/wcslen-avx2-rtm.S | 4 + sysdeps/x86_64/multiarch/wcslen-evex.S | 4 + sysdeps/x86_64/multiarch/wcslen-sse4_1.S | 4 + sysdeps/x86_64/multiarch/wcslen.c | 2 +- sysdeps/x86_64/multiarch/wcsncmp-avx2-rtm.S | 5 + sysdeps/x86_64/multiarch/wcsncmp-avx2.S | 2 +- sysdeps/x86_64/multiarch/wcsncmp-evex.S | 5 + sysdeps/x86_64/multiarch/wcsnlen-avx2-rtm.S | 5 + sysdeps/x86_64/multiarch/wcsnlen-evex.S | 5 + sysdeps/x86_64/multiarch/wcsnlen-sse4_1.S | 2 +- sysdeps/x86_64/multiarch/wcsnlen.c | 22 +- sysdeps/x86_64/multiarch/wcsrchr-avx2-rtm.S | 3 + sysdeps/x86_64/multiarch/wcsrchr-evex.S | 3 + sysdeps/x86_64/multiarch/wmemchr-avx2-rtm.S | 4 + sysdeps/x86_64/multiarch/wmemchr-evex.S | 4 + .../x86_64/multiarch/wmemcmp-avx2-movbe-rtm.S | 4 + sysdeps/x86_64/multiarch/wmemcmp-evex-movbe.S | 4 + sysdeps/x86_64/strlen.S | 243 +--- sysdeps/x86_64/sysdep.h | 22 + sysdeps/x86_64/tst-rsi-strlen.c | 81 ++ sysdeps/x86_64/tst-rsi-wcslen.c | 20 + 266 files changed, 11559 insertions(+), 2154 deletions(-) create mode 100644 elf/dl-diagnostics-cpu.c create mode 100644 elf/dl-diagnostics-kernel.c create mode 100644 elf/dl-diagnostics.c create mode 100644 elf/dl-diagnostics.h create mode 100644 elf/tst-dlmopen-dlerror-mod.c create mode 100644 elf/tst-dlmopen-dlerror.c create mode 100644 elf/tst-dlmopen-gethostbyname-mod.c create mode 100644 elf/tst-dlmopen-gethostbyname.c create mode 100644 elf/tst-dst-static.c create mode 100644 iconvdata/bug-iconv15.c create mode 100644 io/tst-stat-lfs.c create mode 100644 io/tst-stat.c create mode 100644 misc/tst-select.c create mode 100644 nptl/tst-pthread-gdb-attach-static.c create mode 100644 nptl/tst-pthread-gdb-attach.c create mode 100644 nss/tst-nss-files-hosts-long.root/etc/nsswitch.conf create mode 100644 nss/tst-reload2.root/etc/hosts create mode 100644 nss/tst-reload2.root/subdir/etc/hosts create mode 100644 rt/tst-bz28213.c create mode 100644 socket/sockaddr_un_set.c create mode 100644 socket/tst-sockaddr_un_set.c create mode 100644 stdlib/tst-realpath-toolong.c create mode 100644 sunrpc/tst-bug22542.c create mode 100644 sunrpc/tst-bug28768.c create mode 100644 support/support_select_modifies_timeout.c create mode 100644 support/support_select_normalizes_timeout.c create mode 100644 support/xclone.c create mode 100644 support/xpthread_kill.c create mode 100644 support/xsched.h create mode 100644 sysdeps/pthread/tst-oncey3.c create mode 100644 sysdeps/pthread/tst-oncey4.c create mode 100644 sysdeps/pthread/tst-pthread-exit-signal.c create mode 100644 sysdeps/unix/sysv/linux/dl-diagnostics-kernel.c delete mode 100644 sysdeps/unix/sysv/linux/opensock.c delete mode 100644 sysdeps/unix/sysv/linux/s390/opensock.c create mode 100644 sysdeps/unix/sysv/linux/tst-getcwd-smallbuff.c create mode 100644 sysdeps/x86/dl-diagnostics-cpu.c create mode 100644 sysdeps/x86/include/cpu-features-preferred_feature_index_1.def create mode 100644 sysdeps/x86/tst-memchr-rtm.c create mode 100644 sysdeps/x86/tst-memcmp-rtm.c create mode 100644 sysdeps/x86/tst-memmove-rtm.c create mode 100644 sysdeps/x86/tst-memrchr-rtm.c create mode 100644 sysdeps/x86/tst-memset-rtm.c create mode 100644 sysdeps/x86/tst-strchr-rtm.c create mode 100644 sysdeps/x86/tst-strcpy-rtm.c create mode 100644 sysdeps/x86/tst-string-rtm.h create mode 100644 sysdeps/x86/tst-strlen-rtm.c create mode 100644 sysdeps/x86/tst-strncmp-rtm.c create mode 100644 sysdeps/x86/tst-strrchr-rtm.c create mode 100644 sysdeps/x86/tst-sysconf-cache-linesize-static.c create mode 100644 sysdeps/x86/tst-sysconf-cache-linesize.c create mode 100644 sysdeps/x86/tst-wcsncmp-rtm.c mode change 100644 => 100755 sysdeps/x86_64/configure create mode 100644 sysdeps/x86_64/multiarch/ifunc-wcslen.h create mode 100644 sysdeps/x86_64/multiarch/memchr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/memchr-evex.S create mode 100644 sysdeps/x86_64/multiarch/memcmp-avx2-movbe-rtm.S create mode 100644 sysdeps/x86_64/multiarch/memcmp-evex-movbe.S create mode 100644 sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S create mode 100644 sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S create mode 100644 sysdeps/x86_64/multiarch/memrchr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/memrchr-evex.S create mode 100644 sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S create mode 100644 sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S create mode 100644 sysdeps/x86_64/multiarch/rawmemchr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/rawmemchr-evex.S create mode 100644 sysdeps/x86_64/multiarch/stpcpy-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/stpcpy-evex.S create mode 100644 sysdeps/x86_64/multiarch/stpncpy-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/stpncpy-evex.S create mode 100644 sysdeps/x86_64/multiarch/strcat-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strcat-evex.S create mode 100644 sysdeps/x86_64/multiarch/strchr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strchr-evex.S create mode 100644 sysdeps/x86_64/multiarch/strchrnul-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strchrnul-evex.S create mode 100644 sysdeps/x86_64/multiarch/strcmp-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strcmp-evex.S create mode 100644 sysdeps/x86_64/multiarch/strcpy-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strcpy-evex.S create mode 100644 sysdeps/x86_64/multiarch/strlen-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strlen-evex.S create mode 100644 sysdeps/x86_64/multiarch/strlen-vec.S create mode 100644 sysdeps/x86_64/multiarch/strncat-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strncat-evex.S create mode 100644 sysdeps/x86_64/multiarch/strncmp-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strncmp-evex.S create mode 100644 sysdeps/x86_64/multiarch/strncpy-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strncpy-evex.S create mode 100644 sysdeps/x86_64/multiarch/strnlen-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strnlen-evex.S create mode 100644 sysdeps/x86_64/multiarch/strrchr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/strrchr-evex.S create mode 100644 sysdeps/x86_64/multiarch/wcschr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wcschr-evex.S create mode 100644 sysdeps/x86_64/multiarch/wcscmp-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wcscmp-evex.S create mode 100644 sysdeps/x86_64/multiarch/wcslen-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wcslen-evex.S create mode 100644 sysdeps/x86_64/multiarch/wcslen-sse4_1.S create mode 100644 sysdeps/x86_64/multiarch/wcsncmp-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wcsncmp-evex.S create mode 100644 sysdeps/x86_64/multiarch/wcsnlen-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wcsnlen-evex.S create mode 100644 sysdeps/x86_64/multiarch/wcsrchr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wcsrchr-evex.S create mode 100644 sysdeps/x86_64/multiarch/wmemchr-avx2-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wmemchr-evex.S create mode 100644 sysdeps/x86_64/multiarch/wmemcmp-avx2-movbe-rtm.S create mode 100644 sysdeps/x86_64/multiarch/wmemcmp-evex-movbe.S create mode 100644 sysdeps/x86_64/tst-rsi-strlen.c create mode 100644 sysdeps/x86_64/tst-rsi-wcslen.c diff --git a/NEWS b/NEWS index 71f5d2032..bd86d8971 100644 --- a/NEWS +++ b/NEWS @@ -5,6 +5,66 @@ See the end for copying conditions. Please send GNU C library bug reports via using `glibc' in the "product" field. +Version 2.33.1 + +Major new features: + +* The dynamic linker implements the --list-diagnostics option, printing + a dump of information related to IFUNC resolver operation and + glibc-hwcaps subdirectory selection. + +Security related changes: + + CVE-2021-33574: The mq_notify function has a potential use-after-free + issue when using a notification type of SIGEV_THREAD and a thread + attribute with a non-default affinity mask. + + CVE-2022-23219: Passing an overlong file name to the clnt_create + legacy function could result in a stack-based buffer overflow when + using the "unix" protocol. Reported by Martin Sebor. + + CVE-2022-23218: Passing an overlong file name to the svcunix_create + legacy function could result in a stack-based buffer overflow. + + CVE-2021-3998: Passing a path longer than PATH_MAX to the realpath + function could result in a memory leak and potential access of + uninitialized memory. Reported by Qualys. + + CVE-2021-3999: Passing a buffer of size exactly 1 byte to the getcwd + function may result in an off-by-one buffer underflow and overflow + when the current working directory is longer than PATH_MAX and also + corresponds to the / directory through an unprivileged mount + namespace. Reported by Qualys. + +The following bugs are resolved with this release: + + [15271] dlfcn function failure after dlmopen terminates process + [18435] pthread_once hangs when init routine throws an exception + [22542] CVE-2022-23219: Buffer overflow in sunrpc clnt_create for "unix" + [23462] Static binary with dynamic string tokens ($LIB, $PLATFORM, $ORIGIN) + crashes + [27304] pthread_cond_destroy does not pass private flag to futex system calls + [27457] vzeroupper use in AVX2 multiarch string functions cause HTM aborts + [27537] test-container: Always copy test-specific support files + [27577] elf/ld.so --help doesn't work + [27646] gethostbyname and NSS crashes after dlmopen + [27648] FAIL: misc/tst-select + [27651] Performance regression after updating to 2.33 + [27706] select fails to update timeout on error + [27744] Support different libpthread/ld.so load orders for gdb -p + [27892] powerpc: scv ABI error handling fails to check IS_ERR_VALUE + [27974] Overflow bug in some implementation of wcsnlen, wmemchr, and wcsncat + [28353] Race condition in __opensock + [28607] Masked signals are delivered on thread exit + [28524] Conversion from ISO-2022-JP-3 with iconv may emit spurious NULs + [28532] powerpc64[le]: CFI for assembly templated syscalls is incorrect + [28755] overflow bug in wcsncmp_avx2 and wcsncmp_evex + [28768] CVE-2022-23218: Buffer overflow in sunrpc svcunix_create + [28769] CVE-2021-3999: Off-by-one buffer overflow/underflow in getcwd() + [28770] CVE-2021-3998: Unexpected return value from realpath() for too long results + [28896] strncmp-avx2-rtm and wcsncmp-avx2-rtm fallback on non-rtm + variants when avoiding overflow + Version 2.33 Major new features: @@ -238,6 +298,7 @@ The following bugs are resolved with this release: [27237] malloc: deadlock in malloc/tst-malloc-stats-cancellation [27256] locale: Assertion failure in ISO-2022-JP-3 gconv module related to combining characters (CVE-2021-3326) + [28784] x86: crash in 32bit memset-sse2.s when the cache size can not be determined Version 2.32 diff --git a/config.h.in b/config.h.in index 06ee8ae26..f21bf04e4 100644 --- a/config.h.in +++ b/config.h.in @@ -275,4 +275,10 @@ /* Define if x86 ISA level should be included in shared libraries. */ #undef INCLUDE_X86_ISA_LEVEL +/* Define if -msahf is enabled by default on x86. */ +#undef HAVE_X86_LAHF_SAHF + +/* Define if -mmovbe is enabled by default on x86. */ +#undef HAVE_X86_MOVBE + #endif diff --git a/dlfcn/dlerror.c b/dlfcn/dlerror.c index 48b4c25be..ff7c7b922 100644 --- a/dlfcn/dlerror.c +++ b/dlfcn/dlerror.c @@ -167,8 +167,17 @@ _dlerror_run (void (*operate) (void *), void *args) result->errstring = NULL; } - result->errcode = _dl_catch_error (&result->objname, &result->errstring, - &result->malloced, operate, args); +#ifdef SHARED + result->errcode = _dl_catch_error_ptr (&result->objname, + &result->errstring, + &result->malloced, + operate, args); +#else + result->errcode = _dl_catch_error (&result->objname, + &result->errstring, + &result->malloced, + operate, args); +#endif /* If no error we mark that no error string is available. */ result->returned = result->errstring == NULL; diff --git a/elf/Makefile b/elf/Makefile index 5d666b1b0..4fc3bd823 100644 --- a/elf/Makefile +++ b/elf/Makefile @@ -66,7 +66,7 @@ elide-routines.os = $(all-dl-routines) dl-support enbl-secure dl-origin \ # interpreter and operating independent of libc. rtld-routines = rtld $(all-dl-routines) dl-sysdep dl-environ dl-minimal \ dl-error-minimal dl-conflict dl-hwcaps dl-hwcaps_split dl-hwcaps-subdirs \ - dl-usage + dl-usage dl-diagnostics dl-diagnostics-kernel dl-diagnostics-cpu all-rtld-routines = $(rtld-routines) $(sysdep-rtld-routines) CFLAGS-dl-runtime.c += -fexceptions -fasynchronous-unwind-tables @@ -164,7 +164,8 @@ tests-static-normal := tst-leaks1-static tst-array1-static tst-array5-static \ tst-dl-iter-static \ tst-tlsalign-static tst-tlsalign-extern-static \ tst-linkall-static tst-env-setuid tst-env-setuid-tunables \ - tst-single_threaded-static tst-single_threaded-pthread-static + tst-single_threaded-static tst-single_threaded-pthread-static \ + tst-dst-static tests-static-internal := tst-tls1-static tst-tls2-static \ tst-ptrguard1-static tst-stackguard1-static \ @@ -225,7 +226,8 @@ tests += restest1 preloadtest loadfail multiload origtest resolvfail \ tst-audit14 tst-audit15 tst-audit16 \ tst-single_threaded tst-single_threaded-pthread \ tst-tls-ie tst-tls-ie-dlmopen argv0test \ - tst-glibc-hwcaps tst-glibc-hwcaps-prepend tst-glibc-hwcaps-mask + tst-glibc-hwcaps tst-glibc-hwcaps-prepend tst-glibc-hwcaps-mask \ + tst-dlmopen-dlerror tst-dlmopen-gethostbyname # reldep9 tests-internal += loadtest unload unload2 circleload1 \ neededtest neededtest2 neededtest3 neededtest4 \ @@ -244,7 +246,7 @@ tests += $(tests-execstack-$(have-z-execstack)) ifeq ($(run-built-tests),yes) tests-special += $(objpfx)tst-leaks1-mem.out \ $(objpfx)tst-leaks1-static-mem.out $(objpfx)noload-mem.out \ - $(objpfx)tst-ldconfig-X.out + $(objpfx)tst-ldconfig-X.out $(objpfx)tst-rtld-help.out endif tlsmod17a-suffixes = 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 tlsmod18a-suffixes = 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 @@ -347,6 +349,7 @@ modules-names = testobj1 testobj2 testobj3 testobj4 testobj5 testobj6 \ libmarkermod2-1 libmarkermod2-2 \ libmarkermod3-1 libmarkermod3-2 libmarkermod3-3 \ libmarkermod4-1 libmarkermod4-2 libmarkermod4-3 libmarkermod4-4 \ + tst-dlmopen-dlerror-mod tst-dlmopen-gethostbyname-mod \ # Most modules build with _ISOMAC defined, but those filtered out # depend on internal headers. @@ -432,7 +435,8 @@ endif ifeq (yes,$(build-shared)) ifeq ($(run-built-tests),yes) tests-special += $(objpfx)tst-pathopt.out $(objpfx)tst-rtld-load-self.out \ - $(objpfx)tst-rtld-preload.out $(objpfx)argv0test.out + $(objpfx)tst-rtld-preload.out $(objpfx)argv0test.out \ + $(objpfx)tst-rtld-help.out endif tests-special += $(objpfx)check-textrel.out $(objpfx)check-execstack.out \ $(objpfx)check-wx-segment.out \ @@ -678,6 +682,9 @@ CFLAGS-cache.c += $(SYSCONF-FLAGS) CFLAGS-rtld.c += $(SYSCONF-FLAGS) CFLAGS-dl-usage.c += $(SYSCONF-FLAGS) \ -D'RTLD="$(rtlddir)/$(rtld-installed-name)"' +CFLAGS-dl-diagnostics.c += $(SYSCONF-FLAGS) \ + -D'PREFIX="$(prefix)"' \ + -D'RTLD="$(rtlddir)/$(rtld-installed-name)"' cpp-srcs-left := $(all-rtld-routines:=.os) lib := rtld @@ -1578,6 +1585,10 @@ $(objpfx)tst-sonamemove-dlopen.out: \ $(objpfx)tst-sonamemove-runmod1.so \ $(objpfx)tst-sonamemove-runmod2.so +$(objpfx)tst-dlmopen-dlerror: $(libdl) +$(objpfx)tst-dlmopen-dlerror-mod.so: $(libdl) $(libsupport) +$(objpfx)tst-dlmopen-dlerror.out: $(objpfx)tst-dlmopen-dlerror-mod.so + # Override -z defs, so that we can reference an undefined symbol. # Force lazy binding for the same reason. LDFLAGS-tst-latepthreadmod.so = \ @@ -1653,8 +1664,6 @@ $(objpfx)tst-nodelete-dlclose.out: $(objpfx)tst-nodelete-dlclose-dso.so \ tst-env-setuid-ENV = MALLOC_CHECK_=2 MALLOC_MMAP_THRESHOLD_=4096 \ LD_HWCAP_MASK=0x1 -tst-env-setuid-tunables-ENV = \ - GLIBC_TUNABLES=glibc.malloc.check=2:glibc.malloc.mmap_threshold=4096 $(objpfx)tst-debug1: $(libdl) $(objpfx)tst-debug1.out: $(objpfx)tst-debug1mod1.so @@ -1902,3 +1911,21 @@ $(objpfx)list-tunables.out: tst-rtld-list-tunables.sh $(objpfx)ld.so cmp tst-rtld-list-tunables.exp \ $(objpfx)/tst-rtld-list-tunables.out > $@; \ $(evaluate-test) + +tst-dst-static-ENV = LD_LIBRARY_PATH='$$ORIGIN' + +$(objpfx)tst-rtld-help.out: $(objpfx)ld.so + $(test-wrapper) $(rtld-prefix) --help > $@; \ + status=$$?; \ + echo "info: ld.so exit status: $$status" >> $@; \ + if ! grep -q 'Legacy HWCAP subdirectories under library search path directories' $@; then \ + echo "error: missing subdirectory pattern" >> $@; \ + if test $$status -eq 0; then \ + status=1; \ + fi; \ + fi; \ + (exit $$status); \ + $(evaluate-test) + +$(objpfx)tst-dlmopen-gethostbyname: $(libdl) +$(objpfx)tst-dlmopen-gethostbyname.out: $(objpfx)tst-dlmopen-gethostbyname-mod.so diff --git a/elf/Versions b/elf/Versions index be88c48e6..cdfd7b4d2 100644 --- a/elf/Versions +++ b/elf/Versions @@ -72,7 +72,7 @@ ld { # Internal error handling support. Interposed by libc.so. _dl_signal_exception; _dl_catch_exception; - _dl_signal_error; _dl_catch_error; + _dl_signal_error; _dl_catch_error; _dl_catch_error_ptr; # Set value of a tunable. __tunable_get_val; diff --git a/elf/dl-cache.c b/elf/dl-cache.c index 32f3bef5e..2b8da8650 100644 --- a/elf/dl-cache.c +++ b/elf/dl-cache.c @@ -269,81 +269,77 @@ search_cache (const char *string_table, uint32_t string_table_size, if (_dl_cache_check_flags (flags) && _dl_cache_verify_ptr (lib->value, string_table_size)) { - if (best == NULL || flags == GLRO (dl_correct_cache_id)) - { - /* Named/extension hwcaps get slightly different - treatment: We keep searching for a better - match. */ - bool named_hwcap = false; + /* Named/extension hwcaps get slightly different + treatment: We keep searching for a better + match. */ + bool named_hwcap = false; - if (entry_size >= sizeof (struct file_entry_new)) - { - /* The entry is large enough to include - HWCAP data. Check it. */ - struct file_entry_new *libnew - = (struct file_entry_new *) lib; + if (entry_size >= sizeof (struct file_entry_new)) + { + /* The entry is large enough to include + HWCAP data. Check it. */ + struct file_entry_new *libnew + = (struct file_entry_new *) lib; #ifdef SHARED - named_hwcap = dl_cache_hwcap_extension (libnew); - if (named_hwcap - && !dl_cache_hwcap_isa_level_compatible (libnew)) - continue; + named_hwcap = dl_cache_hwcap_extension (libnew); + if (named_hwcap + && !dl_cache_hwcap_isa_level_compatible (libnew)) + continue; #endif - /* The entries with named/extension hwcaps - have been exhausted. Return the best - match encountered so far if there is - one. */ - if (!named_hwcap && best != NULL) - break; + /* The entries with named/extension hwcaps have + been exhausted (they are listed before all + other entries). Return the best match + encountered so far if there is one. */ + if (!named_hwcap && best != NULL) + break; - if ((libnew->hwcap & hwcap_exclude) && !named_hwcap) - continue; - if (GLRO (dl_osversion) - && libnew->osversion > GLRO (dl_osversion)) - continue; - if (_DL_PLATFORMS_COUNT - && (libnew->hwcap & _DL_HWCAP_PLATFORM) != 0 - && ((libnew->hwcap & _DL_HWCAP_PLATFORM) - != platform)) - continue; + if ((libnew->hwcap & hwcap_exclude) && !named_hwcap) + continue; + if (GLRO (dl_osversion) + && libnew->osversion > GLRO (dl_osversion)) + continue; + if (_DL_PLATFORMS_COUNT + && (libnew->hwcap & _DL_HWCAP_PLATFORM) != 0 + && ((libnew->hwcap & _DL_HWCAP_PLATFORM) + != platform)) + continue; #ifdef SHARED - /* For named hwcaps, determine the priority - and see if beats what has been found so - far. */ - if (named_hwcap) - { - uint32_t entry_priority - = glibc_hwcaps_priority (libnew->hwcap); - if (entry_priority == 0) - /* Not usable at all. Skip. */ - continue; - else if (best == NULL - || entry_priority < best_priority) - /* This entry is of higher priority - than the previous one, or it is the - first entry. */ - best_priority = entry_priority; - else - /* An entry has already been found, - but it is a better match. */ - continue; - } -#endif /* SHARED */ + /* For named hwcaps, determine the priority and + see if beats what has been found so far. */ + if (named_hwcap) + { + uint32_t entry_priority + = glibc_hwcaps_priority (libnew->hwcap); + if (entry_priority == 0) + /* Not usable at all. Skip. */ + continue; + else if (best == NULL + || entry_priority < best_priority) + /* This entry is of higher priority + than the previous one, or it is the + first entry. */ + best_priority = entry_priority; + else + /* An entry has already been found, + but it is a better match. */ + continue; } +#endif /* SHARED */ + } - best = string_table + lib->value; + best = string_table + lib->value; - if (flags == GLRO (dl_correct_cache_id) - && !named_hwcap) - /* We've found an exact match for the shared - object and no general `ELF' release. Stop - searching, but not if a named (extension) - hwcap is used. In this case, an entry with - a higher priority may come up later. */ - break; - } + if (!named_hwcap && flags == _DL_CACHE_DEFAULT_ID) + /* With named hwcaps, we need to keep searching to + see if we find a better match. A better match + is also possible if the flags of the current + entry do not match the expected cache flags. + But if the flags match, no better entry will be + found. */ + break; } } while (++middle <= right); diff --git a/elf/dl-diagnostics-cpu.c b/elf/dl-diagnostics-cpu.c new file mode 100644 index 000000000..f7d149764 --- /dev/null +++ b/elf/dl-diagnostics-cpu.c @@ -0,0 +1,24 @@ +/* Print CPU diagnostics data in ld.so. Stub version. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +void +_dl_diagnostics_cpu (void) +{ +} diff --git a/elf/dl-diagnostics-kernel.c b/elf/dl-diagnostics-kernel.c new file mode 100644 index 000000000..831c358f1 --- /dev/null +++ b/elf/dl-diagnostics-kernel.c @@ -0,0 +1,24 @@ +/* Print kernel diagnostics data in ld.so. Stub version. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +void +_dl_diagnostics_kernel (void) +{ +} diff --git a/elf/dl-diagnostics.c b/elf/dl-diagnostics.c new file mode 100644 index 000000000..bef224b36 --- /dev/null +++ b/elf/dl-diagnostics.c @@ -0,0 +1,265 @@ +/* Print diagnostics data in ld.so. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "trusted-dirs.h" +#include "version.h" + +/* Write CH to standard output. */ +static void +_dl_putc (char ch) +{ + _dl_write (STDOUT_FILENO, &ch, 1); +} + +/* Print CH to standard output, quoting it if necessary. */ +static void +print_quoted_char (char ch) +{ + if (ch < ' ' || ch > '~') + { + char buf[4]; + buf[0] = '\\'; + buf[1] = '0' + ((ch >> 6) & 7); + buf[2] = '0' + ((ch >> 6) & 7); + buf[3] = '0' + (ch & 7); + _dl_write (STDOUT_FILENO, buf, 4); + } + else + { + if (ch == '\\' || ch == '"') + _dl_putc ('\\'); + _dl_putc (ch); + } +} + +/* Print S of LEN bytes to standard output, quoting characters as + needed. */ +static void +print_string_length (const char *s, size_t len) +{ + _dl_putc ('"'); + for (size_t i = 0; i < len; ++i) + print_quoted_char (s[i]); + _dl_putc ('"'); +} + +void +_dl_diagnostics_print_string (const char *s) +{ + if (s == NULL) + { + _dl_printf ("0x0"); + return; + } + + _dl_putc ('"'); + while (*s != '\0') + { + print_quoted_char (*s); + ++s; + } + _dl_putc ('"'); +} + +void +_dl_diagnostics_print_labeled_string (const char *label, const char *s) +{ + _dl_printf ("%s=", label); + _dl_diagnostics_print_string (s); + _dl_putc ('\n'); +} + +void +_dl_diagnostics_print_labeled_value (const char *label, uint64_t value) +{ + if (sizeof (value) == sizeof (unsigned long int)) + /* _dl_printf can print 64-bit values directly. */ + _dl_printf ("%s=0x%lx\n", label, (unsigned long int) value); + else + { + uint32_t high = value >> 32; + uint32_t low = value; + if (high == 0) + _dl_printf ("%s=0x%x\n", label, low); + else + _dl_printf ("%s=0x%x%08x\n", label, high, low); + } +} + +/* Return true if ENV is an unfiltered environment variable. */ +static bool +unfiltered_envvar (const char *env, size_t *name_length) +{ + char *env_equal = strchr (env, '='); + if (env_equal == NULL) + { + /* Always dump malformed entries. */ + *name_length = strlen (env); + return true; + } + size_t envname_length = env_equal - env; + *name_length = envname_length; + + /* LC_ and LD_ variables. */ + if (env[0] == 'L' && (env[1] == 'C' || env[1] == 'D') + && env[2] == '_') + return true; + + /* MALLOC_ variables. */ + if (strncmp (env, "MALLOC_", strlen ("MALLOC_")) == 0) + return true; + + static const char unfiltered[] = + "DATEMSK\0" + "GCONV_PATH\0" + "GETCONF_DIR\0" + "GETCONF_DIR\0" + "GLIBC_TUNABLES\0" + "GMON_OUTPUT_PREFIX\0" + "HESIOD_CONFIG\0" + "HES_DOMAIN\0" + "HOSTALIASES\0" + "I18NPATH\0" + "IFS\0" + "LANG\0" + "LOCALDOMAIN\0" + "LOCPATH\0" + "MSGVERB\0" + "NIS_DEFAULTS\0" + "NIS_GROUP\0" + "NIS_PATH\0" + "NLSPATH\0" + "PATH\0" + "POSIXLY_CORRECT\0" + "RESOLV_HOST_CONF\0" + "RES_OPTIONS\0" + "SEV_LEVEL\0" + "TMPDIR\0" + "TZ\0" + "TZDIR\0" + /* Two null bytes at the end to mark the end of the list via an + empty substring. */ + ; + for (const char *candidate = unfiltered; *candidate != '\0'; ) + { + size_t candidate_length = strlen (candidate); + if (candidate_length == envname_length + && memcmp (candidate, env, candidate_length) == 0) + return true; + candidate += candidate_length + 1; + } + + return false; +} + +/* Dump the process environment. */ +static void +print_environ (char **environ) +{ + unsigned int index = 0; + for (char **envp = environ; *envp != NULL; ++envp) + { + char *env = *envp; + size_t name_length; + bool unfiltered = unfiltered_envvar (env, &name_length); + _dl_printf ("env%s[0x%x]=", + unfiltered ? "" : "_filtered", index); + if (unfiltered) + _dl_diagnostics_print_string (env); + else + print_string_length (env, name_length); + _dl_putc ('\n'); + ++index; + } +} + +/* Print configured paths and the built-in search path. */ +static void +print_paths (void) +{ + _dl_diagnostics_print_labeled_string ("path.prefix", PREFIX); + _dl_diagnostics_print_labeled_string ("path.rtld", RTLD); + _dl_diagnostics_print_labeled_string ("path.sysconfdir", SYSCONFDIR); + + unsigned int index = 0; + static const char *system_dirs = SYSTEM_DIRS "\0"; + for (const char *e = system_dirs; *e != '\0'; ) + { + size_t len = strlen (e); + _dl_printf ("path.system_dirs[0x%x]=", index); + print_string_length (e, len); + _dl_putc ('\n'); + ++index; + e += len + 1; + } +} + +/* Print information about the glibc version. */ +static void +print_version (void) +{ + _dl_diagnostics_print_labeled_string ("version.release", RELEASE); + _dl_diagnostics_print_labeled_string ("version.version", VERSION); +} + +void +_dl_print_diagnostics (char **environ) +{ +#ifdef HAVE_DL_DISCOVER_OSVERSION + _dl_diagnostics_print_labeled_value + ("dl_discover_osversion", _dl_discover_osversion ()); +#endif + _dl_diagnostics_print_labeled_string ("dl_dst_lib", DL_DST_LIB); + _dl_diagnostics_print_labeled_value ("dl_hwcap", GLRO (dl_hwcap)); + _dl_diagnostics_print_labeled_value ("dl_hwcap_important", HWCAP_IMPORTANT); + _dl_diagnostics_print_labeled_value ("dl_hwcap2", GLRO (dl_hwcap2)); + _dl_diagnostics_print_labeled_string + ("dl_hwcaps_subdirs", _dl_hwcaps_subdirs); + _dl_diagnostics_print_labeled_value + ("dl_hwcaps_subdirs_active", _dl_hwcaps_subdirs_active ()); + _dl_diagnostics_print_labeled_value ("dl_osversion", GLRO (dl_osversion)); + _dl_diagnostics_print_labeled_value ("dl_pagesize", GLRO (dl_pagesize)); + _dl_diagnostics_print_labeled_string ("dl_platform", GLRO (dl_platform)); + _dl_diagnostics_print_labeled_string + ("dl_profile_output", GLRO (dl_profile_output)); + _dl_diagnostics_print_labeled_value + ("dl_string_platform", _dl_string_platform ( GLRO (dl_platform))); + + _dl_diagnostics_print_labeled_string ("dso.ld", LD_SO); + _dl_diagnostics_print_labeled_string ("dso.libc", LIBC_SO); + + print_environ (environ); + print_paths (); + print_version (); + + _dl_diagnostics_kernel (); + _dl_diagnostics_cpu (); + + _exit (EXIT_SUCCESS); +} diff --git a/elf/dl-diagnostics.h b/elf/dl-diagnostics.h new file mode 100644 index 000000000..27dcb12bc --- /dev/null +++ b/elf/dl-diagnostics.h @@ -0,0 +1,46 @@ +/* Interfaces for printing diagnostics in ld.so. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef _DL_DIAGNOSTICS_H +#define _DL_DIAGNOSTICS_H + +#include + +/* Write the null-terminated string to standard output, surrounded in + quotation marks. */ +void _dl_diagnostics_print_string (const char *s) attribute_hidden; + +/* Like _dl_diagnostics_print_string, but add a LABEL= prefix, and a + newline character as a suffix. */ +void _dl_diagnostics_print_labeled_string (const char *label, const char *s) + attribute_hidden; + +/* Print LABEL=VALUE to standard output, followed by a newline + character. */ +void _dl_diagnostics_print_labeled_value (const char *label, uint64_t value) + attribute_hidden; + +/* Print diagnostics data for the kernel. Called from + _dl_print_diagnostics. */ +void _dl_diagnostics_kernel (void) attribute_hidden; + +/* Print diagnostics data for the CPU(s). Called from + _dl_print_diagnostics. */ +void _dl_diagnostics_cpu (void) attribute_hidden; + +#endif /* _DL_DIAGNOSTICS_H */ diff --git a/elf/dl-error-skeleton.c b/elf/dl-error-skeleton.c index 2fd62777c..0de505f25 100644 --- a/elf/dl-error-skeleton.c +++ b/elf/dl-error-skeleton.c @@ -248,4 +248,19 @@ _dl_receive_error (receiver_fct fct, void (*operate) (void *), void *args) catch_hook = old_catch; receiver = old_receiver; } + +/* Forwarder used for initializing _dl_catch_error_ptr. */ +int +_rtld_catch_error (const char **objname, const char **errstring, + bool *mallocedp, void (*operate) (void *), + void *args) +{ + /* The reference to _dl_catch_error will eventually be relocated to + point to the implementation in libc.so. */ + return _dl_catch_error (objname, errstring, mallocedp, operate, args); +} + +__typeof (_dl_catch_error) *_dl_catch_error_ptr = _rtld_catch_error; +rtld_hidden_data_def (_dl_catch_error_ptr); + #endif /* DL_ERROR_BOOTSTRAP */ diff --git a/elf/dl-libc.c b/elf/dl-libc.c index ed551f6e5..e2236d87d 100644 --- a/elf/dl-libc.c +++ b/elf/dl-libc.c @@ -43,9 +43,15 @@ dlerror_run (void (*operate) (void *), void *args) const char *last_errstring = NULL; bool malloced; +#ifdef SHARED + int result = (_dl_catch_error_ptr (&objname, &last_errstring, &malloced, + operate, args) + ?: last_errstring != NULL); +#else int result = (_dl_catch_error (&objname, &last_errstring, &malloced, - operate, args) + operate, args) ?: last_errstring != NULL); +#endif if (result && malloced) free ((char *) last_errstring); diff --git a/elf/dl-load.c b/elf/dl-load.c index 9e2089cfa..2f760503c 100644 --- a/elf/dl-load.c +++ b/elf/dl-load.c @@ -758,50 +758,51 @@ _dl_init_paths (const char *llp, const char *source, max_dirnamelen = SYSTEM_DIRS_MAX_LEN; *aelem = NULL; -#ifdef SHARED - /* This points to the map of the main object. */ + /* This points to the map of the main object. If there is no main + object (e.g., under --help, use the dynamic loader itself as a + stand-in. */ l = GL(dl_ns)[LM_ID_BASE]._ns_loaded; - if (l != NULL) +#ifdef SHARED + if (l == NULL) + l = &GL (dl_rtld_map); +#endif + assert (l->l_type != lt_loaded); + + if (l->l_info[DT_RUNPATH]) + { + /* Allocate room for the search path and fill in information + from RUNPATH. */ + decompose_rpath (&l->l_runpath_dirs, + (const void *) (D_PTR (l, l_info[DT_STRTAB]) + + l->l_info[DT_RUNPATH]->d_un.d_val), + l, "RUNPATH"); + /* During rtld init the memory is allocated by the stub malloc, + prevent any attempt to free it by the normal malloc. */ + l->l_runpath_dirs.malloced = 0; + + /* The RPATH is ignored. */ + l->l_rpath_dirs.dirs = (void *) -1; + } + else { - assert (l->l_type != lt_loaded); + l->l_runpath_dirs.dirs = (void *) -1; - if (l->l_info[DT_RUNPATH]) + if (l->l_info[DT_RPATH]) { /* Allocate room for the search path and fill in information - from RUNPATH. */ - decompose_rpath (&l->l_runpath_dirs, + from RPATH. */ + decompose_rpath (&l->l_rpath_dirs, (const void *) (D_PTR (l, l_info[DT_STRTAB]) - + l->l_info[DT_RUNPATH]->d_un.d_val), - l, "RUNPATH"); - /* During rtld init the memory is allocated by the stub malloc, - prevent any attempt to free it by the normal malloc. */ - l->l_runpath_dirs.malloced = 0; - - /* The RPATH is ignored. */ - l->l_rpath_dirs.dirs = (void *) -1; + + l->l_info[DT_RPATH]->d_un.d_val), + l, "RPATH"); + /* During rtld init the memory is allocated by the stub + malloc, prevent any attempt to free it by the normal + malloc. */ + l->l_rpath_dirs.malloced = 0; } else - { - l->l_runpath_dirs.dirs = (void *) -1; - - if (l->l_info[DT_RPATH]) - { - /* Allocate room for the search path and fill in information - from RPATH. */ - decompose_rpath (&l->l_rpath_dirs, - (const void *) (D_PTR (l, l_info[DT_STRTAB]) - + l->l_info[DT_RPATH]->d_un.d_val), - l, "RPATH"); - /* During rtld init the memory is allocated by the stub - malloc, prevent any attempt to free it by the normal - malloc. */ - l->l_rpath_dirs.malloced = 0; - } - else - l->l_rpath_dirs.dirs = (void *) -1; - } + l->l_rpath_dirs.dirs = (void *) -1; } -#endif /* SHARED */ if (llp != NULL && *llp != '\0') { diff --git a/elf/dl-main.h b/elf/dl-main.h index 3a5e13c73..d3820e006 100644 --- a/elf/dl-main.h +++ b/elf/dl-main.h @@ -63,7 +63,7 @@ struct audit_list enum rtld_mode { rtld_mode_normal, rtld_mode_list, rtld_mode_verify, rtld_mode_trace, - rtld_mode_list_tunables, rtld_mode_help, + rtld_mode_list_tunables, rtld_mode_list_diagnostics, rtld_mode_help, }; /* Aggregated state information extracted from environment variables @@ -121,4 +121,7 @@ _Noreturn void _dl_version (void) attribute_hidden; _Noreturn void _dl_help (const char *argv0, struct dl_main_state *state) attribute_hidden; +/* Print a diagnostics dump. */ +_Noreturn void _dl_print_diagnostics (char **environ) attribute_hidden; + #endif /* _DL_MAIN */ diff --git a/elf/dl-open.c b/elf/dl-open.c index ab7aaa345..1b965457c 100644 --- a/elf/dl-open.c +++ b/elf/dl-open.c @@ -881,7 +881,7 @@ no more namespaces available for dlmopen()")); /* Avoid keeping around a dangling reference to the libc.so link map in case it has been cached in libc_map. */ if (!args.libc_already_loaded) - GL(dl_ns)[nsid].libc_map = NULL; + GL(dl_ns)[args.nsid].libc_map = NULL; /* Remove the object from memory. It may be in an inconsistent state if relocation failed, for example. */ diff --git a/elf/dl-tunable-types.h b/elf/dl-tunable-types.h index 3fcc0806f..39bf738d9 100644 --- a/elf/dl-tunable-types.h +++ b/elf/dl-tunable-types.h @@ -38,8 +38,8 @@ typedef enum typedef struct { tunable_type_code_t type_code; - int64_t min; - int64_t max; + tunable_num_t min; + tunable_num_t max; } tunable_type_t; /* Security level for tunables. This decides what to do with individual @@ -81,4 +81,21 @@ struct _tunable typedef struct _tunable tunable_t; +static __always_inline bool +unsigned_tunable_type (tunable_type_code_t t) +{ + switch (t) + { + case TUNABLE_TYPE_INT_32: + return false; + case TUNABLE_TYPE_UINT_64: + case TUNABLE_TYPE_SIZE_T: + return true; + case TUNABLE_TYPE_STRING: + default: + break; + } + __builtin_unreachable (); +} + #endif diff --git a/elf/dl-tunables.c b/elf/dl-tunables.c index b1a50b846..8009e54ee 100644 --- a/elf/dl-tunables.c +++ b/elf/dl-tunables.c @@ -93,88 +93,49 @@ get_next_env (char **envp, char **name, size_t *namelen, char **val, return NULL; } -#define TUNABLE_SET_VAL_IF_VALID_RANGE(__cur, __val, __type) \ -({ \ - __type min = (__cur)->type.min; \ - __type max = (__cur)->type.max; \ - \ - if ((__type) (__val) >= min && (__type) (__val) <= max) \ - { \ - (__cur)->val.numval = (__val); \ - (__cur)->initialized = true; \ - } \ -}) - -#define TUNABLE_SET_BOUNDS_IF_VALID(__cur, __minp, __maxp, __type) \ -({ \ - if (__minp != NULL) \ - { \ - /* MIN is specified. */ \ - __type min = *((__type *) __minp); \ - if (__maxp != NULL) \ - { \ - /* Both MIN and MAX are specified. */ \ - __type max = *((__type *) __maxp); \ - if (max >= min \ - && max <= (__cur)->type.max \ - && min >= (__cur)->type.min) \ - { \ - (__cur)->type.min = min; \ - (__cur)->type.max = max; \ - } \ - } \ - else if (min > (__cur)->type.min && min <= (__cur)->type.max) \ - { \ - /* Only MIN is specified. */ \ - (__cur)->type.min = min; \ - } \ - } \ - else if (__maxp != NULL) \ - { \ - /* Only MAX is specified. */ \ - __type max = *((__type *) __maxp); \ - if (max < (__cur)->type.max && max >= (__cur)->type.min) \ - (__cur)->type.max = max; \ - } \ -}) - static void -do_tunable_update_val (tunable_t *cur, const void *valp, - const void *minp, const void *maxp) +do_tunable_update_val (tunable_t *cur, const tunable_val_t *valp, + const tunable_num_t *minp, + const tunable_num_t *maxp) { - uint64_t val; + tunable_num_t val, min, max; - if (cur->type.type_code != TUNABLE_TYPE_STRING) - val = *((int64_t *) valp); + if (cur->type.type_code == TUNABLE_TYPE_STRING) + { + cur->val.strval = valp->strval; + cur->initialized = true; + return; + } - switch (cur->type.type_code) + bool unsigned_cmp = unsigned_tunable_type (cur->type.type_code); + + val = valp->numval; + min = minp != NULL ? *minp : cur->type.min; + max = maxp != NULL ? *maxp : cur->type.max; + + /* We allow only increasingly restrictive bounds. */ + if (tunable_val_lt (min, cur->type.min, unsigned_cmp)) + min = cur->type.min; + + if (tunable_val_gt (max, cur->type.max, unsigned_cmp)) + max = cur->type.max; + + /* Skip both bounds if they're inconsistent. */ + if (tunable_val_gt (min, max, unsigned_cmp)) { - case TUNABLE_TYPE_INT_32: - { - TUNABLE_SET_BOUNDS_IF_VALID (cur, minp, maxp, int64_t); - TUNABLE_SET_VAL_IF_VALID_RANGE (cur, val, int64_t); - break; - } - case TUNABLE_TYPE_UINT_64: - { - TUNABLE_SET_BOUNDS_IF_VALID (cur, minp, maxp, uint64_t); - TUNABLE_SET_VAL_IF_VALID_RANGE (cur, val, uint64_t); - break; - } - case TUNABLE_TYPE_SIZE_T: - { - TUNABLE_SET_BOUNDS_IF_VALID (cur, minp, maxp, uint64_t); - TUNABLE_SET_VAL_IF_VALID_RANGE (cur, val, uint64_t); - break; - } - case TUNABLE_TYPE_STRING: - { - cur->val.strval = valp; - break; - } - default: - __builtin_unreachable (); + min = cur->type.min; + max = cur->type.max; } + + /* Bail out if the bounds are not valid. */ + if (tunable_val_lt (val, min, unsigned_cmp) + || tunable_val_lt (max, val, unsigned_cmp)) + return; + + cur->val.numval = val; + cur->type.min = min; + cur->type.max = max; + cur->initialized = true; } /* Validate range of the input value and initialize the tunable CUR if it looks @@ -182,24 +143,18 @@ do_tunable_update_val (tunable_t *cur, const void *valp, static void tunable_initialize (tunable_t *cur, const char *strval) { - uint64_t val; - const void *valp; + tunable_val_t val; if (cur->type.type_code != TUNABLE_TYPE_STRING) - { - val = _dl_strtoul (strval, NULL); - valp = &val; - } + val.numval = (tunable_num_t) _dl_strtoul (strval, NULL); else - { - cur->initialized = true; - valp = strval; - } - do_tunable_update_val (cur, valp, NULL, NULL); + val.strval = strval; + do_tunable_update_val (cur, &val, NULL, NULL); } void -__tunable_set_val (tunable_id_t id, void *valp, void *minp, void *maxp) +__tunable_set_val (tunable_id_t id, tunable_val_t *valp, tunable_num_t *minp, + tunable_num_t *maxp) { tunable_t *cur = &tunable_list[id]; @@ -219,6 +174,7 @@ parse_tunables (char *tunestr, char *valstring) return; char *p = tunestr; + size_t off = 0; while (true) { @@ -232,7 +188,11 @@ parse_tunables (char *tunestr, char *valstring) /* If we reach the end of the string before getting a valid name-value pair, bail out. */ if (p[len] == '\0') - return; + { + if (__libc_enable_secure) + tunestr[off] = '\0'; + return; + } /* We did not find a valid name-value pair before encountering the colon. */ @@ -258,35 +218,28 @@ parse_tunables (char *tunestr, char *valstring) if (tunable_is_name (cur->name, name)) { - /* If we are in a secure context (AT_SECURE) then ignore the tunable - unless it is explicitly marked as secure. Tunable values take - precedence over their envvar aliases. */ + /* If we are in a secure context (AT_SECURE) then ignore the + tunable unless it is explicitly marked as secure. Tunable + values take precedence over their envvar aliases. We write + the tunables that are not SXID_ERASE back to TUNESTR, thus + dropping all SXID_ERASE tunables and any invalid or + unrecognized tunables. */ if (__libc_enable_secure) { - if (cur->security_level == TUNABLE_SECLEVEL_SXID_ERASE) + if (cur->security_level != TUNABLE_SECLEVEL_SXID_ERASE) { - if (p[len] == '\0') - { - /* Last tunable in the valstring. Null-terminate and - return. */ - *name = '\0'; - return; - } - else - { - /* Remove the current tunable from the string. We do - this by overwriting the string starting from NAME - (which is where the current tunable begins) with - the remainder of the string. We then have P point - to NAME so that we continue in the correct - position in the valstring. */ - char *q = &p[len + 1]; - p = name; - while (*q != '\0') - *name++ = *q++; - name[0] = '\0'; - len = 0; - } + if (off > 0) + tunestr[off++] = ':'; + + const char *n = cur->name; + + while (*n != '\0') + tunestr[off++] = *n++; + + tunestr[off++] = '='; + + for (size_t j = 0; j < len; j++) + tunestr[off++] = value[j]; } if (cur->security_level != TUNABLE_SECLEVEL_NONE) @@ -299,9 +252,7 @@ parse_tunables (char *tunestr, char *valstring) } } - if (p[len] == '\0') - return; - else + if (p[len] != '\0') p += len + 1; } } diff --git a/elf/dl-tunables.h b/elf/dl-tunables.h index 971376ba8..3880e4aab 100644 --- a/elf/dl-tunables.h +++ b/elf/dl-tunables.h @@ -33,9 +33,11 @@ __tunables_init (char **unused __attribute__ ((unused))) # include # include +typedef intmax_t tunable_num_t; + typedef union { - int64_t numval; + tunable_num_t numval; const char *strval; } tunable_val_t; @@ -52,7 +54,8 @@ typedef void (*tunable_callback_t) (tunable_val_t *); extern void __tunables_init (char **); extern void __tunables_print (void); extern void __tunable_get_val (tunable_id_t, void *, tunable_callback_t); -extern void __tunable_set_val (tunable_id_t, void *, void *, void *); +extern void __tunable_set_val (tunable_id_t, tunable_val_t *, tunable_num_t *, + tunable_num_t *); rtld_hidden_proto (__tunables_init) rtld_hidden_proto (__tunables_print) rtld_hidden_proto (__tunable_get_val) @@ -64,20 +67,18 @@ rtld_hidden_proto (__tunable_set_val) #if defined TOP_NAMESPACE && defined TUNABLE_NAMESPACE # define TUNABLE_GET(__id, __type, __cb) \ TUNABLE_GET_FULL (TOP_NAMESPACE, TUNABLE_NAMESPACE, __id, __type, __cb) -# define TUNABLE_SET(__id, __type, __val) \ - TUNABLE_SET_FULL (TOP_NAMESPACE, TUNABLE_NAMESPACE, __id, __type, __val) -# define TUNABLE_SET_WITH_BOUNDS(__id, __type, __val, __min, __max) \ +# define TUNABLE_SET(__id, __val) \ + TUNABLE_SET_FULL (TOP_NAMESPACE, TUNABLE_NAMESPACE, __id, __val) +# define TUNABLE_SET_WITH_BOUNDS(__id, __val, __min, __max) \ TUNABLE_SET_WITH_BOUNDS_FULL (TOP_NAMESPACE, TUNABLE_NAMESPACE, __id, \ - __type, __val, __min, __max) + __val, __min, __max) #else # define TUNABLE_GET(__top, __ns, __id, __type, __cb) \ TUNABLE_GET_FULL (__top, __ns, __id, __type, __cb) -# define TUNABLE_SET(__top, __ns, __id, __type, __val) \ - TUNABLE_SET_FULL (__top, __ns, __id, __type, __val) -# define TUNABLE_SET_WITH_BOUNDS(__top, __ns, __id, __type, __val, \ - __min, __max) \ - TUNABLE_SET_WITH_BOUNDS_FULL (__top, __ns, __id, __type, __val, \ - __min, __max) +# define TUNABLE_SET(__top, __ns, __id, __val) \ + TUNABLE_SET_FULL (__top, __ns, __id, __val) +# define TUNABLE_SET_WITH_BOUNDS(__top, __ns, __id, __val, __min, __max) \ + TUNABLE_SET_WITH_BOUNDS_FULL (__top, __ns, __id, __val, __min, __max) #endif /* Get and return a tunable value. If the tunable was set externally and __CB @@ -91,19 +92,19 @@ rtld_hidden_proto (__tunable_set_val) }) /* Set a tunable value. */ -# define TUNABLE_SET_FULL(__top, __ns, __id, __type, __val) \ +# define TUNABLE_SET_FULL(__top, __ns, __id, __val) \ ({ \ __tunable_set_val (TUNABLE_ENUM_NAME (__top, __ns, __id), \ - & (__type) {__val}, NULL, NULL); \ + & (tunable_val_t) {.numval = __val}, NULL, NULL); \ }) /* Set a tunable value together with min/max values. */ -# define TUNABLE_SET_WITH_BOUNDS_FULL(__top, __ns, __id, __type, __val, \ - __min, __max) \ +# define TUNABLE_SET_WITH_BOUNDS_FULL(__top, __ns, __id,__val, __min, __max) \ ({ \ __tunable_set_val (TUNABLE_ENUM_NAME (__top, __ns, __id), \ - & (__type) {__val}, & (__type) {__min}, \ - & (__type) {__max}); \ + & (tunable_val_t) {.numval = __val}, \ + & (tunable_num_t) {__min}, \ + & (tunable_num_t) {__max}); \ }) /* Namespace sanity for callback functions. Use this macro to keep the @@ -114,6 +115,24 @@ rtld_hidden_proto (__tunable_set_val) /* The default value for TUNABLES_FRONTEND. */ # define TUNABLES_FRONTEND_yes TUNABLES_FRONTEND_valstring +static __always_inline bool +tunable_val_lt (tunable_num_t lhs, tunable_num_t rhs, bool unsigned_cmp) +{ + if (unsigned_cmp) + return (uintmax_t) lhs < (uintmax_t) rhs; + else + return lhs < rhs; +} + +static __always_inline bool +tunable_val_gt (tunable_num_t lhs, tunable_num_t rhs, bool unsigned_cmp) +{ + if (unsigned_cmp) + return (uintmax_t) lhs > (uintmax_t) rhs; + else + return lhs > rhs; +} + /* Compare two name strings, bounded by the name hardcoded in glibc. */ static __always_inline bool tunable_is_name (const char *orig, const char *envname) diff --git a/elf/dl-tunables.list b/elf/dl-tunables.list index 3cf0ad83e..8ddd4a231 100644 --- a/elf/dl-tunables.list +++ b/elf/dl-tunables.list @@ -64,6 +64,7 @@ glibc { type: INT_32 env_alias: MALLOC_MMAP_MAX_ security_level: SXID_IGNORE + minval: 0 } arena_max { type: SIZE_T @@ -109,22 +110,27 @@ glibc { skip_lock_busy { type: INT_32 default: 3 + minval: 0 } skip_lock_internal_abort { type: INT_32 default: 3 + minval: 0 } skip_lock_after_retries { type: INT_32 default: 3 + minval: 0 } tries { type: INT_32 default: 3 + minval: 0 } skip_trylock_internal_abort { type: INT_32 default: 3 + minval: 0 } } diff --git a/elf/dl-usage.c b/elf/dl-usage.c index 6e26818bd..5ad3a7255 100644 --- a/elf/dl-usage.c +++ b/elf/dl-usage.c @@ -261,6 +261,7 @@ setting environment variables (which would be inherited by subprocesses).\n\ --list-tunables list all tunables with minimum and maximum values\n" #endif "\ + --list-diagnostics list diagnostics information\n\ --help display this help and exit\n\ --version output version information and exit\n\ \n\ diff --git a/elf/rtld.c b/elf/rtld.c index 596b6ac3d..489e58c55 100644 --- a/elf/rtld.c +++ b/elf/rtld.c @@ -141,6 +141,7 @@ static void dl_main_state_init (struct dl_main_state *state); /* Process all environments variables the dynamic linker must recognize. Since all of them start with `LD_' we are a bit smarter while finding all the entries. */ +extern char **_environ attribute_hidden; static void process_envvars (struct dl_main_state *state); #ifdef DL_ARGV_NOT_RELRO @@ -379,7 +380,6 @@ struct rtld_global_ro _rtld_global_ro attribute_relro = extern struct rtld_global_ro _rtld_local_ro __attribute__ ((alias ("_rtld_global_ro"), visibility ("hidden"))); - static void dl_main (const ElfW(Phdr) *phdr, ElfW(Word) phnum, ElfW(Addr) *user_entry, ElfW(auxv_t) *auxv); @@ -1287,6 +1287,14 @@ dl_main (const ElfW(Phdr) *phdr, ++_dl_argv; } #endif + else if (! strcmp (_dl_argv[1], "--list-diagnostics")) + { + state.mode = rtld_mode_list_diagnostics; + + ++_dl_skip_args; + --_dl_argc; + ++_dl_argv; + } else if (strcmp (_dl_argv[1], "--help") == 0) { state.mode = rtld_mode_help; @@ -1315,6 +1323,9 @@ dl_main (const ElfW(Phdr) *phdr, } #endif + if (state.mode == rtld_mode_list_diagnostics) + _dl_print_diagnostics (_environ); + /* If we have no further argument the program was called incorrectly. Grant the user some education. */ if (_dl_argc < 2) @@ -2649,12 +2660,6 @@ a filename can be specified using the LD_DEBUG_OUTPUT environment variable.\n"); } } -/* Process all environments variables the dynamic linker must recognize. - Since all of them start with `LD_' we are a bit smarter while finding - all the entries. */ -extern char **_environ attribute_hidden; - - static void process_envvars (struct dl_main_state *state) { diff --git a/elf/tst-dlmopen-dlerror-mod.c b/elf/tst-dlmopen-dlerror-mod.c new file mode 100644 index 000000000..7e95dcdea --- /dev/null +++ b/elf/tst-dlmopen-dlerror-mod.c @@ -0,0 +1,41 @@ +/* Check that dlfcn errors are reported properly after dlmopen. Test module. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include + +/* Note: This object is not linked into the main program, so we cannot + use delayed test failure reporting via TEST_VERIFY etc., and have + to use FAIL_EXIT1 (or something else that calls exit). */ + +void +call_dlsym (void) +{ + void *ptr = dlsym (NULL, "does not exist"); + if (ptr != NULL) + FAIL_EXIT1 ("dlsym did not fail as expected"); +} + +void +call_dlopen (void) +{ + void *handle = dlopen ("tst-dlmopen-dlerror does not exist", RTLD_NOW); + if (handle != NULL) + FAIL_EXIT1 ("dlopen did not fail as expected"); +} diff --git a/elf/tst-dlmopen-dlerror.c b/elf/tst-dlmopen-dlerror.c new file mode 100644 index 000000000..e864d2fe4 --- /dev/null +++ b/elf/tst-dlmopen-dlerror.c @@ -0,0 +1,37 @@ +/* Check that dlfcn errors are reported properly after dlmopen. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include + +static int +do_test (void) +{ + void *handle = xdlmopen (LM_ID_NEWLM, "tst-dlmopen-dlerror-mod.so", + RTLD_NOW); + void (*call_dlsym) (void) = xdlsym (handle, "call_dlsym"); + void (*call_dlopen) (void) = xdlsym (handle, "call_dlopen"); + + call_dlsym (); + call_dlopen (); + + return 0; +} + +#include diff --git a/elf/tst-dlmopen-gethostbyname-mod.c b/elf/tst-dlmopen-gethostbyname-mod.c new file mode 100644 index 000000000..9a68ea505 --- /dev/null +++ b/elf/tst-dlmopen-gethostbyname-mod.c @@ -0,0 +1,29 @@ +/* Exercise dlerror_run in elf/dl-libc.c after dlmopen, via NSS. Helper module. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include + +void +call_gethostbyname (void) +{ + __nss_configure_lookup ("hosts", "files"); + /* This should not terminate the process due to a missing + _nss_files_getcanonname_r symbol. */ + gethostbyname ("localhost"); +} diff --git a/elf/tst-dlmopen-gethostbyname.c b/elf/tst-dlmopen-gethostbyname.c new file mode 100644 index 000000000..12deb2990 --- /dev/null +++ b/elf/tst-dlmopen-gethostbyname.c @@ -0,0 +1,31 @@ +/* Exercise dlerror_run in elf/dl-libc.c after dlmopen, via NSS (bug 27646). + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +static int +do_test (void) +{ + void *handle = xdlmopen (LM_ID_NEWLM, "tst-dlmopen-gethostbyname-mod.so", + RTLD_NOW); + void (*call_gethostbyname) (void) = xdlsym (handle, "call_gethostbyname"); + call_gethostbyname (); + return 0; +} + +#include diff --git a/elf/tst-dst-static.c b/elf/tst-dst-static.c new file mode 100644 index 000000000..56eb371c9 --- /dev/null +++ b/elf/tst-dst-static.c @@ -0,0 +1,32 @@ +/* Test DST expansion for static binaries doesn't carsh. Bug 23462. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* The purpose of this test is to exercise the code in elf/dl-loac.c + (_dl_init_paths) or thereabout and ensure that static binaries + don't crash when expanding DSTs. + + If the dynamic loader code linked into the static binary cannot + handle expanding the DSTs e.g. null-deref on an incomplete link + map, then it will crash before reaching main, so the test harness + is unnecessary. */ + +int +main (void) +{ + return 0; +} diff --git a/elf/tst-env-setuid-tunables.c b/elf/tst-env-setuid-tunables.c index 50bef8683..05619c9ad 100644 --- a/elf/tst-env-setuid-tunables.c +++ b/elf/tst-env-setuid-tunables.c @@ -25,35 +25,76 @@ #include "config.h" #undef _LIBC -#define test_parent test_parent_tunables -#define test_child test_child_tunables - -static int test_child_tunables (void); -static int test_parent_tunables (void); - -#include "tst-env-setuid.c" - -#define CHILD_VALSTRING_VALUE "glibc.malloc.mmap_threshold=4096" -#define PARENT_VALSTRING_VALUE \ - "glibc.malloc.check=2:glibc.malloc.mmap_threshold=4096" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +const char *teststrings[] = +{ + "glibc.malloc.check=2:glibc.malloc.mmap_threshold=4096", + "glibc.malloc.check=2:glibc.malloc.check=2:glibc.malloc.mmap_threshold=4096", + "glibc.malloc.check=2:glibc.malloc.mmap_threshold=4096:glibc.malloc.check=2", + "glibc.malloc.perturb=0x800", + "glibc.malloc.perturb=0x800:glibc.malloc.mmap_threshold=4096", + "glibc.malloc.perturb=0x800:not_valid.malloc.check=2:glibc.malloc.mmap_threshold=4096", + "glibc.not_valid.check=2:glibc.malloc.mmap_threshold=4096", + "not_valid.malloc.check=2:glibc.malloc.mmap_threshold=4096", + "glibc.malloc.garbage=2:glibc.maoc.mmap_threshold=4096:glibc.malloc.check=2", + "glibc.malloc.check=4:glibc.malloc.garbage=2:glibc.maoc.mmap_threshold=4096", + ":glibc.malloc.garbage=2:glibc.malloc.check=1", + "glibc.malloc.check=1:glibc.malloc.check=2", + "not_valid.malloc.check=2", + "glibc.not_valid.check=2", +}; + +const char *resultstrings[] = +{ + "glibc.malloc.mmap_threshold=4096", + "glibc.malloc.mmap_threshold=4096", + "glibc.malloc.mmap_threshold=4096", + "glibc.malloc.perturb=0x800", + "glibc.malloc.perturb=0x800:glibc.malloc.mmap_threshold=4096", + "glibc.malloc.perturb=0x800:glibc.malloc.mmap_threshold=4096", + "glibc.malloc.mmap_threshold=4096", + "glibc.malloc.mmap_threshold=4096", + "", + "", + "", + "", + "", + "", +}; static int -test_child_tunables (void) +test_child (int off) { const char *val = getenv ("GLIBC_TUNABLES"); #if HAVE_TUNABLES - if (val != NULL && strcmp (val, CHILD_VALSTRING_VALUE) == 0) + if (val != NULL && strcmp (val, resultstrings[off]) == 0) return 0; if (val != NULL) - printf ("Unexpected GLIBC_TUNABLES VALUE %s\n", val); + printf ("[%d] Unexpected GLIBC_TUNABLES VALUE %s\n", off, val); return 1; #else if (val != NULL) { - printf ("GLIBC_TUNABLES not cleared\n"); + printf ("[%d] GLIBC_TUNABLES not cleared\n", off); return 1; } return 0; @@ -61,15 +102,48 @@ test_child_tunables (void) } static int -test_parent_tunables (void) +do_test (int argc, char **argv) { - const char *val = getenv ("GLIBC_TUNABLES"); + /* Setgid child process. */ + if (argc == 2) + { + if (getgid () == getegid ()) + /* This can happen if the file system is mounted nosuid. */ + FAIL_UNSUPPORTED ("SGID failed: GID and EGID match (%jd)\n", + (intmax_t) getgid ()); - if (val != NULL && strcmp (val, PARENT_VALSTRING_VALUE) == 0) - return 0; + int ret = test_child (atoi (argv[1])); - if (val != NULL) - printf ("Unexpected GLIBC_TUNABLES VALUE %s\n", val); + if (ret != 0) + exit (1); - return 1; + exit (EXIT_SUCCESS); + } + else + { + int ret = 0; + + /* Spawn tests. */ + for (int i = 0; i < array_length (teststrings); i++) + { + char buf[INT_BUFSIZE_BOUND (int)]; + + printf ("Spawned test for %s (%d)\n", teststrings[i], i); + snprintf (buf, sizeof (buf), "%d\n", i); + if (setenv ("GLIBC_TUNABLES", teststrings[i], 1) != 0) + exit (1); + + int status = support_capture_subprogram_self_sgid (buf); + + /* Bail out early if unsupported. */ + if (WEXITSTATUS (status) == EXIT_UNSUPPORTED) + return EXIT_UNSUPPORTED; + + ret |= status; + } + return ret; + } } + +#define TEST_FUNCTION_ARGV do_test +#include diff --git a/elf/tst-env-setuid.c b/elf/tst-env-setuid.c index 60ae0ca38..49b5e319e 100644 --- a/elf/tst-env-setuid.c +++ b/elf/tst-env-setuid.c @@ -29,173 +29,12 @@ #include #include +#include #include #include +#include static char SETGID_CHILD[] = "setgid-child"; -#define CHILD_STATUS 42 - -/* Return a GID which is not our current GID, but is present in the - supplementary group list. */ -static gid_t -choose_gid (void) -{ - const int count = 64; - gid_t groups[count]; - int ret = getgroups (count, groups); - if (ret < 0) - { - printf ("getgroups: %m\n"); - exit (1); - } - gid_t current = getgid (); - for (int i = 0; i < ret; ++i) - { - if (groups[i] != current) - return groups[i]; - } - return 0; -} - -/* Spawn and execute a program and verify that it returns the CHILD_STATUS. */ -static pid_t -do_execve (char **args) -{ - pid_t kid = vfork (); - - if (kid < 0) - { - printf ("vfork: %m\n"); - return -1; - } - - if (kid == 0) - { - /* Child process. */ - execve (args[0], args, environ); - _exit (-errno); - } - - if (kid < 0) - return 1; - - int status; - - if (waitpid (kid, &status, 0) < 0) - { - printf ("waitpid: %m\n"); - return 1; - } - - if (WEXITSTATUS (status) == EXIT_UNSUPPORTED) - return EXIT_UNSUPPORTED; - - if (!WIFEXITED (status) || WEXITSTATUS (status) != CHILD_STATUS) - { - printf ("Unexpected exit status %d from child process\n", - WEXITSTATUS (status)); - return 1; - } - return 0; -} - -/* Copies the executable into a restricted directory, so that we can - safely make it SGID with the TARGET group ID. Then runs the - executable. */ -static int -run_executable_sgid (gid_t target) -{ - char *dirname = xasprintf ("%s/tst-tunables-setuid.%jd", - test_dir, (intmax_t) getpid ()); - char *execname = xasprintf ("%s/bin", dirname); - int infd = -1; - int outfd = -1; - int ret = 0; - if (mkdir (dirname, 0700) < 0) - { - printf ("mkdir: %m\n"); - goto err; - } - infd = open ("/proc/self/exe", O_RDONLY); - if (infd < 0) - { - printf ("open (/proc/self/exe): %m\n"); - goto err; - } - outfd = open (execname, O_WRONLY | O_CREAT | O_EXCL, 0700); - if (outfd < 0) - { - printf ("open (%s): %m\n", execname); - goto err; - } - char buf[4096]; - for (;;) - { - ssize_t rdcount = read (infd, buf, sizeof (buf)); - if (rdcount < 0) - { - printf ("read: %m\n"); - goto err; - } - if (rdcount == 0) - break; - char *p = buf; - char *end = buf + rdcount; - while (p != end) - { - ssize_t wrcount = write (outfd, buf, end - p); - if (wrcount == 0) - errno = ENOSPC; - if (wrcount <= 0) - { - printf ("write: %m\n"); - goto err; - } - p += wrcount; - } - } - if (fchown (outfd, getuid (), target) < 0) - { - printf ("fchown (%s): %m\n", execname); - goto err; - } - if (fchmod (outfd, 02750) < 0) - { - printf ("fchmod (%s): %m\n", execname); - goto err; - } - if (close (outfd) < 0) - { - printf ("close (outfd): %m\n"); - goto err; - } - if (close (infd) < 0) - { - printf ("close (infd): %m\n"); - goto err; - } - - char *args[] = {execname, SETGID_CHILD, NULL}; - - ret = do_execve (args); - -err: - if (outfd >= 0) - close (outfd); - if (infd >= 0) - close (infd); - if (execname) - { - unlink (execname); - free (execname); - } - if (dirname) - { - rmdir (dirname); - free (dirname); - } - return ret; -} #ifndef test_child static int @@ -256,40 +95,32 @@ do_test (int argc, char **argv) if (argc == 2 && strcmp (argv[1], SETGID_CHILD) == 0) { if (getgid () == getegid ()) - { - /* This can happen if the file system is mounted nosuid. */ - fprintf (stderr, "SGID failed: GID and EGID match (%jd)\n", - (intmax_t) getgid ()); - exit (EXIT_UNSUPPORTED); - } + /* This can happen if the file system is mounted nosuid. */ + FAIL_UNSUPPORTED ("SGID failed: GID and EGID match (%jd)\n", + (intmax_t) getgid ()); int ret = test_child (); if (ret != 0) exit (1); - exit (CHILD_STATUS); + exit (EXIT_SUCCESS); } else { if (test_parent () != 0) exit (1); - /* Try running a setgid program. */ - gid_t target = choose_gid (); - if (target == 0) - { - fprintf (stderr, - "Could not find a suitable GID for user %jd, skipping test\n", - (intmax_t) getuid ()); - exit (0); - } + int status = support_capture_subprogram_self_sgid (SETGID_CHILD); - return run_executable_sgid (target); - } + if (WEXITSTATUS (status) == EXIT_UNSUPPORTED) + return EXIT_UNSUPPORTED; + + if (!WIFEXITED (status)) + FAIL_EXIT1 ("Unexpected exit status %d from child process\n", status); - /* Something went wrong and our argv was corrupted. */ - _exit (1); + return 0; + } } #define TEST_FUNCTION_ARGV do_test diff --git a/elf/tst-rtld-list-tunables.exp b/elf/tst-rtld-list-tunables.exp index 4f3f7ee4e..9f66c5288 100644 --- a/elf/tst-rtld-list-tunables.exp +++ b/elf/tst-rtld-list-tunables.exp @@ -1,7 +1,7 @@ glibc.malloc.arena_max: 0x0 (min: 0x1, max: 0x[f]+) glibc.malloc.arena_test: 0x0 (min: 0x1, max: 0x[f]+) glibc.malloc.check: 0 (min: 0, max: 3) -glibc.malloc.mmap_max: 0 (min: -2147483648, max: 2147483647) +glibc.malloc.mmap_max: 0 (min: 0, max: 2147483647) glibc.malloc.mmap_threshold: 0x0 (min: 0x0, max: 0x[f]+) glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0x[f]+) glibc.malloc.perturb: 0 (min: 0, max: 255) diff --git a/iconvdata/Makefile b/iconvdata/Makefile index 55c527a5f..2612d2557 100644 --- a/iconvdata/Makefile +++ b/iconvdata/Makefile @@ -1,4 +1,5 @@ # Copyright (C) 1997-2021 Free Software Foundation, Inc. +# Copyright (C) The GNU Toolchain Authors. # This file is part of the GNU C Library. # The GNU C Library is free software; you can redistribute it and/or @@ -74,7 +75,7 @@ ifeq (yes,$(build-shared)) tests = bug-iconv1 bug-iconv2 tst-loading tst-e2big tst-iconv4 bug-iconv4 \ tst-iconv6 bug-iconv5 bug-iconv6 tst-iconv7 bug-iconv8 bug-iconv9 \ bug-iconv10 bug-iconv11 bug-iconv12 tst-iconv-big5-hkscs-to-2ucs4 \ - bug-iconv13 bug-iconv14 + bug-iconv13 bug-iconv14 bug-iconv15 ifeq ($(have-thread-library),yes) tests += bug-iconv3 endif @@ -324,6 +325,8 @@ $(objpfx)bug-iconv12.out: $(objpfx)gconv-modules \ $(addprefix $(objpfx),$(modules.so)) $(objpfx)bug-iconv14.out: $(objpfx)gconv-modules \ $(addprefix $(objpfx),$(modules.so)) +$(objpfx)bug-iconv15.out: $(addprefix $(objpfx), $(gconv-modules)) \ + $(addprefix $(objpfx),$(modules.so)) $(objpfx)iconv-test.out: run-iconv-test.sh $(objpfx)gconv-modules \ $(addprefix $(objpfx),$(modules.so)) \ diff --git a/iconvdata/bug-iconv15.c b/iconvdata/bug-iconv15.c new file mode 100644 index 000000000..cc04bd031 --- /dev/null +++ b/iconvdata/bug-iconv15.c @@ -0,0 +1,60 @@ +/* Bug 28524: Conversion from ISO-2022-JP-3 with iconv + may emit spurious NUL character on state reset. + Copyright (C) The GNU Toolchain Authors. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include + +static int +do_test (void) +{ + char in[] = "\x1b(I"; + char *inbuf = in; + size_t inleft = sizeof (in) - 1; + char out[1]; + char *outbuf = out; + size_t outleft = sizeof (out); + iconv_t cd; + + cd = iconv_open ("UTF8", "ISO-2022-JP-3"); + TEST_VERIFY_EXIT (cd != (iconv_t) -1); + + /* First call to iconv should alter internal state. + Now, JISX0201_Kana_set is selected and + state value != ASCII_set. */ + TEST_VERIFY (iconv (cd, &inbuf, &inleft, &outbuf, &outleft) != (size_t) -1); + + /* No bytes should have been added to + the output buffer at this point. */ + TEST_VERIFY (outbuf == out); + TEST_VERIFY (outleft == sizeof (out)); + + /* Second call shall emit spurious NUL character in unpatched glibc. */ + TEST_VERIFY (iconv (cd, NULL, NULL, &outbuf, &outleft) != (size_t) -1); + + /* No characters are expected to be produced. */ + TEST_VERIFY (outbuf == out); + TEST_VERIFY (outleft == sizeof (out)); + + TEST_VERIFY_EXIT (iconv_close (cd) != -1); + + return 0; +} + +#include diff --git a/iconvdata/iso-2022-jp-3.c b/iconvdata/iso-2022-jp-3.c index c8ba88cdc..5fc0c0f73 100644 --- a/iconvdata/iso-2022-jp-3.c +++ b/iconvdata/iso-2022-jp-3.c @@ -1,5 +1,6 @@ /* Conversion module for ISO-2022-JP-3. Copyright (C) 1998-2021 Free Software Foundation, Inc. + Copyright (C) The GNU Toolchain Authors. This file is part of the GNU C Library. Contributed by Ulrich Drepper , 1998, and Bruno Haible , 2002. @@ -81,20 +82,31 @@ enum the output state to the initial state. This has to be done during the flushing. */ #define EMIT_SHIFT_TO_INIT \ - if (data->__statep->__count != ASCII_set) \ + if ((data->__statep->__count & ~7) != ASCII_set) \ { \ if (FROM_DIRECTION) \ { \ - if (__glibc_likely (outbuf + 4 <= outend)) \ + uint32_t ch = data->__statep->__count >> 6; \ + \ + if (__glibc_unlikely (ch != 0)) \ { \ - /* Write out the last character. */ \ - *((uint32_t *) outbuf) = data->__statep->__count >> 6; \ - outbuf += sizeof (uint32_t); \ - data->__statep->__count = ASCII_set; \ + if (__glibc_likely (outbuf + 4 <= outend)) \ + { \ + /* Write out the last character. */ \ + put32u (outbuf, ch); \ + outbuf += 4; \ + data->__statep->__count &= 7; \ + data->__statep->__count |= ASCII_set; \ + } \ + else \ + /* We don't have enough room in the output buffer. */ \ + status = __GCONV_FULL_OUTPUT; \ } \ else \ - /* We don't have enough room in the output buffer. */ \ - status = __GCONV_FULL_OUTPUT; \ + { \ + data->__statep->__count &= 7; \ + data->__statep->__count |= ASCII_set; \ + } \ } \ else \ { \ diff --git a/include/libc-symbols.h b/include/libc-symbols.h index ea126ae70..c83e550b0 100644 --- a/include/libc-symbols.h +++ b/include/libc-symbols.h @@ -59,6 +59,19 @@ # define IN_MODULE (-1) #endif +/* Use symbol_version_reference to specify the version a symbol + reference should link to. Use symbol_version or + default_symbol_version for the definition of a versioned symbol. + The difference is that the latter is a no-op in non-shared + builds. */ +#ifdef __ASSEMBLER__ +# define symbol_version_reference(real, name, version) \ + .symver real, name##@##version +#else /* !__ASSEMBLER__ */ +# define symbol_version_reference(real, name, version) \ + __asm__ (".symver " #real "," #name "@" #version) +#endif + #ifndef _ISOMAC /* This is defined for the compilation of all C library code. features.h @@ -396,19 +409,6 @@ for linking") past the last element in SET. */ #define symbol_set_end_p(set, ptr) ((ptr) >= (void *const *) &__stop_##set) -/* Use symbol_version_reference to specify the version a symbol - reference should link to. Use symbol_version or - default_symbol_version for the definition of a versioned symbol. - The difference is that the latter is a no-op in non-shared - builds. */ -#ifdef __ASSEMBLER__ -# define symbol_version_reference(real, name, version) \ - .symver real, name##@##version -#else /* !__ASSEMBLER__ */ -# define symbol_version_reference(real, name, version) \ - __asm__ (".symver " #real "," #name "@" #version) -#endif - #ifdef SHARED # define symbol_version(real, name, version) \ symbol_version_reference(real, name, version) diff --git a/include/sys/un.h b/include/sys/un.h index bdbee9998..152afd9fc 100644 --- a/include/sys/un.h +++ b/include/sys/un.h @@ -1 +1,13 @@ #include + +#ifndef _ISOMAC + +/* Set ADDR->sun_family to AF_UNIX and ADDR->sun_path to PATHNAME. + Return 0 on success or -1 on failure (due to overlong PATHNAME). + The caller should always use sizeof (struct sockaddr_un) as the + socket address length, disregaring the length of PATHNAME. + Only concrete (non-abstract) pathnames are supported. */ +int __sockaddr_un_set (struct sockaddr_un *addr, const char *pathname) + attribute_hidden; + +#endif /* _ISOMAC */ diff --git a/include/time.h b/include/time.h index caf2af5e7..e0636132a 100644 --- a/include/time.h +++ b/include/time.h @@ -502,6 +502,11 @@ time_now (void) __clock_gettime (TIME_CLOCK_GETTIME_CLOCKID, &ts); return ts.tv_sec; } + +#define NSEC_PER_SEC 1000000000L /* Nanoseconds per second. */ +#define USEC_PER_SEC 1000000L /* Microseconds per second. */ +#define NSEC_PER_USEC 1000L /* Nanoseconds per microsecond. */ + #endif #endif diff --git a/io/Makefile b/io/Makefile index b7bebe923..d145d88f4 100644 --- a/io/Makefile +++ b/io/Makefile @@ -68,7 +68,7 @@ tests := test-utime test-stat test-stat2 test-lfs tst-getcwd \ tst-fts tst-fts-lfs tst-open-tmpfile \ tst-copy_file_range tst-getcwd-abspath tst-lockf \ tst-ftw-lnk tst-file_change_detection tst-lchmod \ - tst-ftw-bz26353 + tst-ftw-bz26353 tst-stat tst-stat-lfs # Likewise for statx, but we do not need static linking here. tests-internal += tst-statx diff --git a/io/fstat.c b/io/fstat.c index dc117361f..17f31bf3b 100644 --- a/io/fstat.c +++ b/io/fstat.c @@ -16,10 +16,16 @@ . */ #include +#include int __fstat (int fd, struct stat *buf) { + if (fd < 0) + { + __set_errno (EBADF); + return -1; + } return __fstatat (fd, "", buf, AT_EMPTY_PATH); } diff --git a/io/fstat64.c b/io/fstat64.c index addf37977..618170695 100644 --- a/io/fstat64.c +++ b/io/fstat64.c @@ -16,10 +16,16 @@ . */ #include +#include int __fstat64 (int fd, struct stat64 *buf) { + if (fd < 0) + { + __set_errno (EBADF); + return -1; + } return __fstatat64 (fd, "", buf, AT_EMPTY_PATH); } hidden_def (__fstat64) diff --git a/io/tst-stat-lfs.c b/io/tst-stat-lfs.c new file mode 100644 index 000000000..b53f460ad --- /dev/null +++ b/io/tst-stat-lfs.c @@ -0,0 +1,2 @@ +#define _FILE_OFFSET_BITS 64 +#include "tst-stat.c" diff --git a/io/tst-stat.c b/io/tst-stat.c new file mode 100644 index 000000000..445ac4176 --- /dev/null +++ b/io/tst-stat.c @@ -0,0 +1,102 @@ +/* Basic tests for stat, lstat, fstat, and fstatat. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void +stat_check (int fd, const char *path, struct stat *st) +{ + TEST_COMPARE (stat (path, st), 0); +} + +static void +lstat_check (int fd, const char *path, struct stat *st) +{ + TEST_COMPARE (lstat (path, st), 0); +} + +static void +fstat_check (int fd, const char *path, struct stat *st) +{ + /* Test for invalid fstat input (BZ #27559). */ + TEST_COMPARE (fstat (AT_FDCWD, st), -1); + TEST_COMPARE (errno, EBADF); + + TEST_COMPARE (fstat (fd, st), 0); +} + +static void +fstatat_check (int fd, const char *path, struct stat *st) +{ + TEST_COMPARE (fstatat (fd, "", st, 0), -1); + TEST_COMPARE (errno, ENOENT); + + TEST_COMPARE (fstatat (fd, path, st, 0), 0); +} + +typedef void (*test_t)(int, const char *path, struct stat *); + +static int +do_test (void) +{ + char *path; + int fd = create_temp_file ("tst-fstat.", &path); + TEST_VERIFY_EXIT (fd >= 0); + support_write_file_string (path, "abc"); + + struct statx stx; + TEST_COMPARE (statx (fd, path, 0, STATX_BASIC_STATS, &stx), 0); + + test_t tests[] = { stat_check, lstat_check, fstat_check, fstatat_check }; + + for (int i = 0; i < array_length (tests); i++) + { + struct stat st; + tests[i](fd, path, &st); + + TEST_COMPARE (stx.stx_dev_major, major (st.st_dev)); + TEST_COMPARE (stx.stx_dev_minor, minor (st.st_dev)); + TEST_COMPARE (stx.stx_ino, st.st_ino); + TEST_COMPARE (stx.stx_mode, st.st_mode); + TEST_COMPARE (stx.stx_nlink, st.st_nlink); + TEST_COMPARE (stx.stx_uid, st.st_uid); + TEST_COMPARE (stx.stx_gid, st.st_gid); + TEST_COMPARE (stx.stx_rdev_major, major (st.st_rdev)); + TEST_COMPARE (stx.stx_rdev_minor, minor (st.st_rdev)); + TEST_COMPARE (stx.stx_blksize, st.st_blksize); + TEST_COMPARE (stx.stx_blocks, st.st_blocks); + + TEST_COMPARE (stx.stx_ctime.tv_sec, st.st_ctim.tv_sec); + TEST_COMPARE (stx.stx_ctime.tv_nsec, st.st_ctim.tv_nsec); + TEST_COMPARE (stx.stx_mtime.tv_sec, st.st_mtim.tv_sec); + TEST_COMPARE (stx.stx_mtime.tv_nsec, st.st_mtim.tv_nsec); + } + + return 0; +} + +#include diff --git a/malloc/malloc.c b/malloc/malloc.c index 1f4bbd8ed..8f8f12c27 100644 --- a/malloc/malloc.c +++ b/malloc/malloc.c @@ -3446,7 +3446,9 @@ __libc_realloc (void *oldmem, size_t bytes) newp = __libc_malloc (bytes); if (newp != NULL) { - memcpy (newp, oldmem, oldsize - SIZE_SZ); + size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ; + memcpy (newp, oldmem, sz); + (void) TAG_REGION (chunk2rawmem (oldp), sz); _int_free (ar_ptr, oldp, 0); } } diff --git a/misc/Makefile b/misc/Makefile index b08d7c68a..05ad034ba 100644 --- a/misc/Makefile +++ b/misc/Makefile @@ -88,7 +88,7 @@ tests := tst-dirname tst-tsearch tst-fdset tst-mntent tst-hsearch \ tst-preadvwritev tst-preadvwritev64 tst-makedev tst-empty \ tst-preadvwritev2 tst-preadvwritev64v2 tst-warn-wide \ tst-ldbl-warn tst-ldbl-error tst-dbl-efgcvt tst-ldbl-efgcvt \ - tst-mntent-autofs tst-syscalls tst-mntent-escape + tst-mntent-autofs tst-syscalls tst-mntent-escape tst-select # Tests which need libdl. ifeq (yes,$(build-shared)) diff --git a/misc/tst-select.c b/misc/tst-select.c new file mode 100644 index 000000000..52aa26651 --- /dev/null +++ b/misc/tst-select.c @@ -0,0 +1,143 @@ +/* Test for select timeout. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct child_args +{ + int fds[2][2]; + struct timeval tmo; +}; + +static void +alarm_handler (int signum) +{ + /* Do nothing. */ +} + +static void +do_test_child (void *clousure) +{ + struct child_args *args = (struct child_args *) clousure; + + close (args->fds[0][1]); + close (args->fds[1][0]); + + fd_set rfds; + FD_ZERO (&rfds); + FD_SET (args->fds[0][0], &rfds); + + struct timespec ts = xclock_now (CLOCK_REALTIME); + ts = timespec_add (ts, (struct timespec) { args->tmo.tv_sec, 0 }); + + int r = select (args->fds[0][0] + 1, &rfds, NULL, NULL, &args->tmo); + TEST_COMPARE (r, 0); + + if (support_select_modifies_timeout ()) + { + TEST_COMPARE (args->tmo.tv_sec, 0); + TEST_COMPARE (args->tmo.tv_usec, 0); + } + + TEST_TIMESPEC_NOW_OR_AFTER (CLOCK_REALTIME, ts); + + xwrite (args->fds[1][1], "foo", 3); +} + +static void +do_test_child_alarm (void *clousure) +{ + struct sigaction act = { .sa_handler = alarm_handler }; + xsigaction (SIGALRM, &act, NULL); + alarm (1); + + struct timeval tv = { .tv_sec = 10, .tv_usec = 0 }; + int r = select (0, NULL, NULL, NULL, &tv); + TEST_COMPARE (r, -1); + TEST_COMPARE (errno, EINTR); + + if (support_select_modifies_timeout ()) + TEST_VERIFY (tv.tv_sec < 10); +} + +static int +do_test (void) +{ + struct child_args args; + + xpipe (args.fds[0]); + xpipe (args.fds[1]); + + /* The child select should timeout and write on its pipe end. */ + args.tmo = (struct timeval) { .tv_sec = 0, .tv_usec = 250000 }; + { + struct support_capture_subprocess result; + result = support_capture_subprocess (do_test_child, &args); + support_capture_subprocess_check (&result, "tst-select-child", 0, + sc_allow_none); + } + + if (support_select_normalizes_timeout ()) + { + /* This is handled as 1 second instead of failing with EINVAL. */ + args.tmo = (struct timeval) { .tv_sec = 0, .tv_usec = 1000000 }; + struct support_capture_subprocess result; + result = support_capture_subprocess (do_test_child, &args); + support_capture_subprocess_check (&result, "tst-select-child", 0, + sc_allow_none); + } + + /* Same as before, but simulating polling. */ + args.tmo = (struct timeval) { .tv_sec = 0, .tv_usec = 0 }; + { + struct support_capture_subprocess result; + result = support_capture_subprocess (do_test_child, &args); + support_capture_subprocess_check (&result, "tst-select-child", 0, + sc_allow_none); + } + + xclose (args.fds[0][0]); + xclose (args.fds[1][1]); + + { + struct support_capture_subprocess result; + result = support_capture_subprocess (do_test_child_alarm, NULL); + support_capture_subprocess_check (&result, "tst-select-child", 0, + sc_allow_none); + } + + { + fd_set rfds; + FD_ZERO (&rfds); + FD_SET (args.fds[1][0], &rfds); + + int r = select (args.fds[1][0] + 1, &rfds, NULL, NULL, &args.tmo); + TEST_COMPARE (r, 1); + } + + return 0; +} + +#include diff --git a/nptl/Makefile b/nptl/Makefile index 0282e0739..a1a8ef254 100644 --- a/nptl/Makefile +++ b/nptl/Makefile @@ -294,7 +294,8 @@ tests = tst-attr2 tst-attr3 tst-default-attr \ tst-thread-affinity-sched \ tst-pthread-defaultattr-free \ tst-pthread-attr-sigmask \ - tst-pthread-timedlock-lockloop + tst-pthread-timedlock-lockloop \ + tst-pthread-gdb-attach tst-pthread-gdb-attach-static tests-container = tst-pthread-getattr @@ -314,10 +315,6 @@ xtests += tst-eintr1 test-srcs = tst-oddstacklimit -# Test expected to fail on most targets (except x86_64) due to bug -# 18435 - pthread_once hangs when init routine throws an exception. -test-xfail-tst-once5 = yes - gen-as-const-headers = unwindbuf.sym \ pthread-pi-defines.sym @@ -344,6 +341,22 @@ CPPFLAGS-test-cond-printers.c := $(CFLAGS-printers-tests) CPPFLAGS-test-rwlockattr-printers.c := $(CFLAGS-printers-tests) CPPFLAGS-test-rwlock-printers.c := $(CFLAGS-printers-tests) +# Reuse the CFLAGS setting for the GDB attaching test. It needs +# debugging information. +CFLAGS-tst-pthread-gdb-attach.c := $(CFLAGS-printers-tests) +CPPFLAGS-tst-pthread-gdb-attach.c := $(CFLAGS-printers-tests) +ifeq ($(build-shared)$(build-hardcoded-path-in-tests),yesno) +CPPFLAGS-tst-pthread-gdb-attach.c += -DDO_ADD_SYMBOL_FILE=1 +else +CPPFLAGS-tst-pthread-gdb-attach.c += -DDO_ADD_SYMBOL_FILE=0 +endif +CFLAGS-tst-pthread-gdb-attach-static.c := $(CFLAGS-printers-tests) +CPPFLAGS-tst-pthread-gdb-attach-static.c := \ + $(CFLAGS-printers-tests) -DDO_ADD_SYMBOL_FILE=0 +# As of version 9.2, GDB cannot attach properly to PIE programs that +# were launched with an explicit ld.so invocation. +tst-pthread-gdb-attach-no-pie = yes + ifeq ($(build-shared),yes) tests-printers-libs := $(shared-thread-library) else @@ -415,7 +428,8 @@ link-libc-static := $(common-objpfx)libc.a $(static-gnulib) \ tests-static += tst-stackguard1-static \ tst-cancel24-static \ tst-mutex8-static tst-mutexpi8-static tst-sem11-static \ - tst-sem12-static tst-cond11-static + tst-sem12-static tst-cond11-static \ + tst-pthread-gdb-attach-static tests += tst-cancel24-static diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h index e5efa2e62..79be1bc70 100644 --- a/nptl/pthreadP.h +++ b/nptl/pthreadP.h @@ -602,6 +602,67 @@ extern void __pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, # undef pthread_cleanup_pop # define pthread_cleanup_pop(execute) \ __pthread_cleanup_pop (&_buffer, (execute)); } + +# if defined __EXCEPTIONS && !defined __cplusplus +/* Structure to hold the cleanup handler information. */ +struct __pthread_cleanup_combined_frame +{ + void (*__cancel_routine) (void *); + void *__cancel_arg; + int __do_it; + struct _pthread_cleanup_buffer __buffer; +}; + +/* Special cleanup macros which register cleanup both using + __pthread_cleanup_{push,pop} and using cleanup attribute. This is needed + for pthread_once, so that it supports both throwing exceptions from the + pthread_once callback (only cleanup attribute works there) and cancellation + of the thread running the callback if the callback or some routines it + calls don't have unwind information. */ + +static __always_inline void +__pthread_cleanup_combined_routine (struct __pthread_cleanup_combined_frame + *__frame) +{ + if (__frame->__do_it) + { + __frame->__cancel_routine (__frame->__cancel_arg); + __frame->__do_it = 0; + __pthread_cleanup_pop (&__frame->__buffer, 0); + } +} + +static inline void +__pthread_cleanup_combined_routine_voidptr (void *__arg) +{ + struct __pthread_cleanup_combined_frame *__frame + = (struct __pthread_cleanup_combined_frame *) __arg; + if (__frame->__do_it) + { + __frame->__cancel_routine (__frame->__cancel_arg); + __frame->__do_it = 0; + } +} + +# define pthread_cleanup_combined_push(routine, arg) \ + do { \ + void (*__cancel_routine) (void *) = (routine); \ + struct __pthread_cleanup_combined_frame __clframe \ + __attribute__ ((__cleanup__ (__pthread_cleanup_combined_routine))) \ + = { .__cancel_routine = __cancel_routine, .__cancel_arg = (arg), \ + .__do_it = 1 }; \ + __pthread_cleanup_push (&__clframe.__buffer, \ + __pthread_cleanup_combined_routine_voidptr, \ + &__clframe); + +# define pthread_cleanup_combined_pop(execute) \ + __pthread_cleanup_pop (&__clframe.__buffer, 0); \ + __clframe.__do_it = 0; \ + if (execute) \ + __cancel_routine (__clframe.__cancel_arg); \ + } while (0) + +# endif #endif extern void __pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c index 6c645aff4..337a7dcba 100644 --- a/nptl/pthread_create.c +++ b/nptl/pthread_create.c @@ -51,6 +51,14 @@ static td_thr_events_t __nptl_threads_events __attribute_used__; /* Pointer to descriptor with the last event. */ static struct pthread *__nptl_last_event __attribute_used__; +#ifdef SHARED +/* This variable is used to access _rtld_global from libthread_db. If + GDB loads libpthread before ld.so, it is not possible to resolve + _rtld_global directly during libpthread initialization. */ +static struct rtld_global *__nptl_rtld_global __attribute_used__ + = &_rtld_global; +#endif + /* Number of threads running. */ unsigned int __nptl_nthreads = 1; @@ -426,8 +434,6 @@ START_THREAD_DEFN unwind_buf.priv.data.prev = NULL; unwind_buf.priv.data.cleanup = NULL; - __libc_signal_restore_set (&pd->sigmask); - /* Allow setxid from now onwards. */ if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2)) futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); @@ -437,6 +443,8 @@ START_THREAD_DEFN /* Store the new cleanup handler info. */ THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf); + __libc_signal_restore_set (&pd->sigmask); + /* We are either in (a) or (b), and in either case we either own PD already (2) or are about to own PD (1), and so our only restriction would be that we can't free PD until we know we diff --git a/nptl/pthread_once.c b/nptl/pthread_once.c index 28d97097c..7645da222 100644 --- a/nptl/pthread_once.c +++ b/nptl/pthread_once.c @@ -111,11 +111,11 @@ __pthread_once_slow (pthread_once_t *once_control, void (*init_routine) (void)) /* This thread is the first here. Do the initialization. Register a cleanup handler so that in case the thread gets interrupted the initialization can be restarted. */ - pthread_cleanup_push (clear_once_control, once_control); + pthread_cleanup_combined_push (clear_once_control, once_control); init_routine (); - pthread_cleanup_pop (0); + pthread_cleanup_combined_pop (0); /* Mark *once_control as having finished the initialization. We need diff --git a/nptl/tst-once5.cc b/nptl/tst-once5.cc index b797ab356..60fe1ef82 100644 --- a/nptl/tst-once5.cc +++ b/nptl/tst-once5.cc @@ -59,7 +59,7 @@ do_test (void) " throwing an exception", stderr); } catch (OnceException) { - if (1 < niter) + if (niter > 1) fputs ("pthread_once unexpectedly threw", stderr); result = 0; } @@ -75,7 +75,5 @@ do_test (void) return result; } -// The test currently hangs and is XFAILed. Reduce the timeout. -#define TIMEOUT 1 #define TEST_FUNCTION do_test () #include "../test-skeleton.c" diff --git a/nptl/tst-pthread-gdb-attach-static.c b/nptl/tst-pthread-gdb-attach-static.c new file mode 100644 index 000000000..e159632ca --- /dev/null +++ b/nptl/tst-pthread-gdb-attach-static.c @@ -0,0 +1 @@ +#include "tst-pthread-gdb-attach.c" diff --git a/nptl/tst-pthread-gdb-attach.c b/nptl/tst-pthread-gdb-attach.c new file mode 100644 index 000000000..901a12003 --- /dev/null +++ b/nptl/tst-pthread-gdb-attach.c @@ -0,0 +1,217 @@ +/* Smoke testing GDB process attach with thread-local variable access. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* This test runs GDB against a forked copy of itself, to check + whether libthread_db can be loaded, and that access to thread-local + variables works. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Starts out as zero, changed to 1 or 2 by the debugger, depending on + the thread. */ +__thread volatile int altered_by_debugger; + +/* Common prefix between 32-bit and 64-bit ELF. */ +struct elf_prefix +{ + unsigned char e_ident[EI_NIDENT]; + uint16_t e_type; + uint16_t e_machine; + uint32_t e_version; +}; +_Static_assert (sizeof (struct elf_prefix) == EI_NIDENT + 8, + "padding in struct elf_prefix"); + +/* Reads the ELF header from PATH. Returns true if the header can be + read, false if the file is too short. */ +static bool +read_elf_header (const char *path, struct elf_prefix *elf) +{ + int fd = xopen (path, O_RDONLY, 0); + bool result = read (fd, elf, sizeof (*elf)) == sizeof (*elf); + xclose (fd); + return result; +} + +/* Searches for "gdb" alongside the path variable. See execvpe. */ +static char * +find_gdb (void) +{ + const char *path = getenv ("PATH"); + if (path == NULL) + return NULL; + while (true) + { + const char *colon = strchrnul (path, ':'); + char *candidate = xasprintf ("%.*s/gdb", (int) (colon - path), path); + if (access (candidate, X_OK) == 0) + return candidate; + free (candidate); + if (*colon == '\0') + break; + path = colon + 1; + } + return NULL; +} + +/* Writes the GDB script to run the test to PATH. */ +static void +write_gdbscript (const char *path, int tested_pid) +{ + FILE *fp = xfopen (path, "w"); + fprintf (fp, + "set trace-commands on\n" + "set debug libthread-db 1\n" +#if DO_ADD_SYMBOL_FILE + /* Do not do this unconditionally to work around a GDB + assertion failure: ../../gdb/symtab.c:6404: + internal-error: CORE_ADDR get_msymbol_address(objfile*, + const minimal_symbol*): Assertion `(objf->flags & + OBJF_MAINLINE) == 0' failed. */ + "add-symbol-file %1$s/nptl/tst-pthread-gdb-attach\n" +#endif + "set auto-load safe-path %1$s/nptl_db\n" + "set libthread-db-search-path %1$s/nptl_db\n" + "attach %2$d\n", + support_objdir_root, tested_pid); + fputs ("break debugger_inspection_point\n" + "continue\n" + "thread 1\n" + "print altered_by_debugger\n" + "print altered_by_debugger = 1\n" + "thread 2\n" + "print altered_by_debugger\n" + "print altered_by_debugger = 2\n" + "continue\n", + fp); + xfclose (fp); +} + +/* The test sets a breakpoint on this function and alters the + altered_by_debugger thread-local variable. */ +void __attribute__ ((weak)) +debugger_inspection_point (void) +{ +} + +/* Thread function for the test thread in the subprocess. */ +static void * +subprocess_thread (void *closure) +{ + /* Wait until altered_by_debugger changes the value away from 0. */ + while (altered_by_debugger == 0) + { + usleep (100 * 1000); + debugger_inspection_point (); + } + + TEST_COMPARE (altered_by_debugger, 2); + return NULL; +} + +/* This function implements the subprocess under test. It creates a + second thread, waiting for its value to change to 2, and checks + that the main thread also changed its value to 1. */ +static void +in_subprocess (void) +{ + pthread_t thr = xpthread_create (NULL, subprocess_thread, NULL); + TEST_VERIFY (xpthread_join (thr) == NULL); + TEST_COMPARE (altered_by_debugger, 1); + _exit (0); +} + +static int +do_test (void) +{ + char *gdb_path = find_gdb (); + if (gdb_path == NULL) + FAIL_UNSUPPORTED ("gdb command not found in PATH: %s", getenv ("PATH")); + + /* Check that libthread_db is compatible with the gdb architecture + because gdb loads it via dlopen. */ + { + char *threaddb_path = xasprintf ("%s/nptl_db/libthread_db.so", + support_objdir_root); + struct elf_prefix elf_threaddb; + TEST_VERIFY_EXIT (read_elf_header (threaddb_path, &elf_threaddb)); + struct elf_prefix elf_gdb; + /* If the ELF header cannot be read or "gdb" is not an ELF file, + assume this is a wrapper script that can run. */ + if (read_elf_header (gdb_path, &elf_gdb) + && memcmp (&elf_gdb, ELFMAG, SELFMAG) == 0) + { + if (elf_gdb.e_ident[EI_CLASS] != elf_threaddb.e_ident[EI_CLASS]) + FAIL_UNSUPPORTED ("GDB at %s has wrong class", gdb_path); + if (elf_gdb.e_ident[EI_DATA] != elf_threaddb.e_ident[EI_DATA]) + FAIL_UNSUPPORTED ("GDB at %s has wrong data", gdb_path); + if (elf_gdb.e_machine != elf_threaddb.e_machine) + FAIL_UNSUPPORTED ("GDB at %s has wrong machine", gdb_path); + } + free (threaddb_path); + } + + pid_t tested_pid = xfork (); + if (tested_pid == 0) + in_subprocess (); + char *tested_pid_string = xasprintf ("%d", tested_pid); + + char *gdbscript; + xclose (create_temp_file ("tst-pthread-gdb-attach-", &gdbscript)); + write_gdbscript (gdbscript, tested_pid); + + pid_t gdb_pid = xfork (); + if (gdb_pid == 0) + { + xdup2 (STDOUT_FILENO, STDERR_FILENO); + execl (gdb_path, "gdb", "-nx", "-batch", "-x", gdbscript, NULL); + if (errno == ENOENT) + _exit (EXIT_UNSUPPORTED); + else + _exit (1); + } + + int status; + TEST_COMPARE (xwaitpid (gdb_pid, &status, 0), gdb_pid); + if (WIFEXITED (status) && WEXITSTATUS (status) == EXIT_UNSUPPORTED) + /* gdb is not installed. */ + return EXIT_UNSUPPORTED; + TEST_COMPARE (status, 0); + TEST_COMPARE (xwaitpid (tested_pid, &status, 0), tested_pid); + TEST_COMPARE (status, 0); + + free (tested_pid_string); + free (gdbscript); + free (gdb_path); + return 0; +} + +#include diff --git a/nptl_db/structs.def b/nptl_db/structs.def index 999a9fc35..8a613dd2f 100644 --- a/nptl_db/structs.def +++ b/nptl_db/structs.def @@ -100,8 +100,7 @@ DB_STRUCT_FIELD (pthread, dtvp) #endif #if !(IS_IN (libpthread) && !defined SHARED) -DB_STRUCT (rtld_global) -DB_RTLD_VARIABLE (_rtld_global) +DB_VARIABLE (__nptl_rtld_global) #endif DB_RTLD_GLOBAL_FIELD (dl_tls_dtv_slotinfo_list) DB_RTLD_GLOBAL_FIELD (dl_stack_user) diff --git a/nptl_db/td_init.c b/nptl_db/td_init.c index 1d1568122..06b5adc5c 100644 --- a/nptl_db/td_init.c +++ b/nptl_db/td_init.c @@ -33,13 +33,14 @@ td_init (void) bool __td_ta_rtld_global (td_thragent_t *ta) { - if (ta->ta_addr__rtld_global == 0 - && td_mod_lookup (ta->ph, LD_SO, SYM__rtld_global, - &ta->ta_addr__rtld_global) != PS_OK) + if (ta->ta_addr__rtld_global == 0) { - ta->ta_addr__rtld_global = (void*)-1; - return false; + psaddr_t rtldglobalp; + if (DB_GET_VALUE (rtldglobalp, ta, __nptl_rtld_global, 0) == TD_OK) + ta->ta_addr__rtld_global = rtldglobalp; + else + ta->ta_addr__rtld_global = (void *) -1; } - else - return ta->ta_addr__rtld_global != (void*)-1; + + return ta->ta_addr__rtld_global != (void *)-1; } diff --git a/nptl_db/thread_dbP.h b/nptl_db/thread_dbP.h index 580a70c47..712fa3aeb 100644 --- a/nptl_db/thread_dbP.h +++ b/nptl_db/thread_dbP.h @@ -108,6 +108,8 @@ struct td_thragent # undef DB_SYMBOL # undef DB_VARIABLE + psaddr_t ta_addr__rtld_global; + /* The method of locating a thread's th_unique value. */ enum { diff --git a/nscd/netgroupcache.c b/nscd/netgroupcache.c index dba6ceec1..ad2daddaf 100644 --- a/nscd/netgroupcache.c +++ b/nscd/netgroupcache.c @@ -248,7 +248,7 @@ addgetnetgrentX (struct database_dyn *db, int fd, request_header *req, : NULL); ndomain = (ndomain ? newbuf + ndomaindiff : NULL); - buffer = newbuf; + *tofreep = buffer = newbuf; } nhost = memcpy (buffer + bufused, @@ -319,7 +319,7 @@ addgetnetgrentX (struct database_dyn *db, int fd, request_header *req, else if (status == NSS_STATUS_TRYAGAIN && e == ERANGE) { buflen *= 2; - buffer = xrealloc (buffer, buflen); + *tofreep = buffer = xrealloc (buffer, buflen); } else if (status == NSS_STATUS_RETURN || status == NSS_STATUS_NOTFOUND diff --git a/nss/nss_database.c b/nss/nss_database.c index cf0306adc..fb72d0cc0 100644 --- a/nss/nss_database.c +++ b/nss/nss_database.c @@ -398,10 +398,10 @@ nss_database_check_reload_and_get (struct nss_database_state *local, && (str.st_ino != local->root_ino || str.st_dev != local->root_dev))) { - /* Change detected; disable reloading. */ + /* Change detected; disable reloading and return current state. */ atomic_store_release (&local->data.reload_disabled, 1); + *result = local->data.services[database_index]; __libc_lock_unlock (local->lock); - __nss_module_disable_loading (); return true; } local->root_ino = str.st_ino; diff --git a/nss/tst-nss-files-hosts-long.root/etc/nsswitch.conf b/nss/tst-nss-files-hosts-long.root/etc/nsswitch.conf new file mode 100644 index 000000000..5b0c6a419 --- /dev/null +++ b/nss/tst-nss-files-hosts-long.root/etc/nsswitch.conf @@ -0,0 +1 @@ +hosts: files diff --git a/nss/tst-reload2.c b/nss/tst-reload2.c index 5dae16b4f..5ecb032e9 100644 --- a/nss/tst-reload2.c +++ b/nss/tst-reload2.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -48,7 +49,7 @@ static const char *group_4[] = { "alpha", "beta", "gamma", "fred", NULL }; -static struct group group_table_data[] = +static struct group group_table_data1[] = { GRP (4), GRP_LAST () @@ -58,7 +59,7 @@ void _nss_test1_init_hook (test_tables *t) { t->pwd_table = pwd_table1; - t->grp_table = group_table_data; + t->grp_table = group_table_data1; } static struct passwd pwd_table2[] = @@ -68,10 +69,21 @@ static struct passwd pwd_table2[] = PWD_LAST () }; +static const char *group_5[] = { + "fred", NULL +}; + +static struct group group_table_data2[] = + { + GRP (5), + GRP_LAST () + }; + void _nss_test2_init_hook (test_tables *t) { t->pwd_table = pwd_table2; + t->grp_table = group_table_data2; } static int @@ -79,6 +91,7 @@ do_test (void) { struct passwd *pw; struct group *gr; + struct hostent *he; char buf1[PATH_MAX]; char buf2[PATH_MAX]; @@ -99,7 +112,9 @@ do_test (void) TEST_COMPARE (pw->pw_uid, 1234); /* This just loads the test2 DSO. */ - gr = getgrnam ("name4"); + gr = getgrgid (5); + TEST_VERIFY (gr != NULL); + /* Change the root dir. */ @@ -114,15 +129,21 @@ do_test (void) if (pw) TEST_VERIFY (pw->pw_uid != 2468); - /* The "files" DSO should not be loaded. */ - gr = getgrnam ("test3"); - TEST_VERIFY (gr == NULL); - /* We should still be using the old configuration. */ pw = getpwnam ("test1"); TEST_VERIFY (pw != NULL); if (pw) TEST_COMPARE (pw->pw_uid, 1234); + gr = getgrgid (5); + TEST_VERIFY (gr != NULL); + gr = getgrnam ("name4"); + TEST_VERIFY (gr == NULL); + + /* hosts in the outer nsswitch is files; the inner one is test1. + Verify that we're still using the outer nsswitch *and* that we + can load the files DSO. */ + he = gethostbyname ("test2"); + TEST_VERIFY (he != NULL); return 0; } diff --git a/nss/tst-reload2.root/etc/hosts b/nss/tst-reload2.root/etc/hosts new file mode 100644 index 000000000..bbd9e494e --- /dev/null +++ b/nss/tst-reload2.root/etc/hosts @@ -0,0 +1 @@ +1.2.3.4 test1 diff --git a/nss/tst-reload2.root/etc/nsswitch.conf b/nss/tst-reload2.root/etc/nsswitch.conf index 570795ae2..688a58951 100644 --- a/nss/tst-reload2.root/etc/nsswitch.conf +++ b/nss/tst-reload2.root/etc/nsswitch.conf @@ -1,2 +1,3 @@ passwd: test1 group: test2 +hosts: files diff --git a/nss/tst-reload2.root/subdir/etc/hosts b/nss/tst-reload2.root/subdir/etc/hosts new file mode 100644 index 000000000..0a2cbd433 --- /dev/null +++ b/nss/tst-reload2.root/subdir/etc/hosts @@ -0,0 +1 @@ +1.2.3.4 test2 diff --git a/nss/tst-reload2.root/subdir/etc/nsswitch.conf b/nss/tst-reload2.root/subdir/etc/nsswitch.conf index f1d73f876..fea271869 100644 --- a/nss/tst-reload2.root/subdir/etc/nsswitch.conf +++ b/nss/tst-reload2.root/subdir/etc/nsswitch.conf @@ -1,2 +1,3 @@ passwd: test2 group: files +hosts: test1 diff --git a/posix/bits/unistd.h b/posix/bits/unistd.h index f0831386c..622adeb2b 100644 --- a/posix/bits/unistd.h +++ b/posix/bits/unistd.h @@ -199,10 +199,9 @@ __NTH (readlinkat (int __fd, const char *__restrict __path, #endif extern char *__getcwd_chk (char *__buf, size_t __size, size_t __buflen) - __THROW __wur __attr_access ((__write_only__, 1, 2)); + __THROW __wur; extern char *__REDIRECT_NTH (__getcwd_alias, - (char *__buf, size_t __size), getcwd) - __wur __attr_access ((__write_only__, 1, 2)); + (char *__buf, size_t __size), getcwd) __wur; extern char *__REDIRECT_NTH (__getcwd_chk_warn, (char *__buf, size_t __size, size_t __buflen), __getcwd_chk) diff --git a/posix/unistd.h b/posix/unistd.h index 3f2276337..bede49c1f 100644 --- a/posix/unistd.h +++ b/posix/unistd.h @@ -517,8 +517,7 @@ extern int fchdir (int __fd) __THROW __wur; an array is allocated with `malloc'; the array is SIZE bytes long, unless SIZE == 0, in which case it is as big as necessary. */ -extern char *getcwd (char *__buf, size_t __size) __THROW __wur - __attr_access ((__write_only__, 1, 2)); +extern char *getcwd (char *__buf, size_t __size) __THROW __wur; #ifdef __USE_GNU /* Return a malloc'd string containing the current directory name. diff --git a/posix/wordexp-test.c b/posix/wordexp-test.c index f93a546d7..9df02dbbb 100644 --- a/posix/wordexp-test.c +++ b/posix/wordexp-test.c @@ -183,6 +183,7 @@ struct test_case_struct { 0, NULL, "$var", 0, 0, { NULL, }, IFS }, { 0, NULL, "\"\\n\"", 0, 1, { "\\n", }, IFS }, { 0, NULL, "", 0, 0, { NULL, }, IFS }, + { 0, NULL, "${1234567890123456789012}", 0, 0, { NULL, }, IFS }, /* Flags not already covered (testit() has special handling for these) */ { 0, NULL, "one two", WRDE_DOOFFS, 2, { "one", "two", }, IFS }, diff --git a/posix/wordexp.c b/posix/wordexp.c index bcbe96e48..1f3b09f72 100644 --- a/posix/wordexp.c +++ b/posix/wordexp.c @@ -1399,7 +1399,7 @@ envsubst: /* Is it a numeric parameter? */ else if (isdigit (env[0])) { - int n = atoi (env); + unsigned long n = strtoul (env, NULL, 10); if (n >= __libc_argc) /* Substitute NULL. */ diff --git a/rt/Makefile b/rt/Makefile index 7b374f207..c87d95793 100644 --- a/rt/Makefile +++ b/rt/Makefile @@ -44,6 +44,7 @@ tests := tst-shm tst-timer tst-timer2 \ tst-aio7 tst-aio8 tst-aio9 tst-aio10 \ tst-mqueue1 tst-mqueue2 tst-mqueue3 tst-mqueue4 \ tst-mqueue5 tst-mqueue6 tst-mqueue7 tst-mqueue8 tst-mqueue9 \ + tst-bz28213 \ tst-timer3 tst-timer4 tst-timer5 \ tst-cpuclock2 tst-cputimer1 tst-cputimer2 tst-cputimer3 \ tst-shm-cancel diff --git a/rt/tst-bz28213.c b/rt/tst-bz28213.c new file mode 100644 index 000000000..0c096b5a0 --- /dev/null +++ b/rt/tst-bz28213.c @@ -0,0 +1,101 @@ +/* Bug 28213: test for NULL pointer dereference in mq_notify. + Copyright (C) The GNU Toolchain Authors. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static mqd_t m = -1; +static const char msg[] = "hello"; + +static void +check_bz28213_cb (union sigval sv) +{ + char buf[sizeof (msg)]; + + (void) sv; + + TEST_VERIFY_EXIT ((size_t) mq_receive (m, buf, sizeof (buf), NULL) + == sizeof (buf)); + TEST_VERIFY_EXIT (memcmp (buf, msg, sizeof (buf)) == 0); + + exit (0); +} + +static void +check_bz28213 (void) +{ + struct sigevent sev; + + memset (&sev, '\0', sizeof (sev)); + sev.sigev_notify = SIGEV_THREAD; + sev.sigev_notify_function = check_bz28213_cb; + + /* Step 1: Register & unregister notifier. + Helper thread should receive NOTIFY_REMOVED notification. + In a vulnerable version of glibc, NULL pointer dereference follows. */ + TEST_VERIFY_EXIT (mq_notify (m, &sev) == 0); + TEST_VERIFY_EXIT (mq_notify (m, NULL) == 0); + + /* Step 2: Once again, register notification. + Try to send one message. + Test is considered successful, if the callback does exit (0). */ + TEST_VERIFY_EXIT (mq_notify (m, &sev) == 0); + TEST_VERIFY_EXIT (mq_send (m, msg, sizeof (msg), 1) == 0); + + /* Wait... */ + pause (); +} + +static int +do_test (void) +{ + static const char m_name[] = "/bz28213_queue"; + struct mq_attr m_attr; + + memset (&m_attr, '\0', sizeof (m_attr)); + m_attr.mq_maxmsg = 1; + m_attr.mq_msgsize = sizeof (msg); + + m = mq_open (m_name, + O_RDWR | O_CREAT | O_EXCL, + 0600, + &m_attr); + + if (m < 0) + { + if (errno == ENOSYS) + FAIL_UNSUPPORTED ("POSIX message queues are not implemented\n"); + FAIL_EXIT1 ("Failed to create POSIX message queue: %m\n"); + } + + TEST_VERIFY_EXIT (mq_unlink (m_name) == 0); + + check_bz28213 (); + + return 0; +} + +#include diff --git a/socket/Makefile b/socket/Makefile index 600891d5d..4ab67aed6 100644 --- a/socket/Makefile +++ b/socket/Makefile @@ -29,10 +29,14 @@ headers := sys/socket.h sys/un.h bits/sockaddr.h bits/socket.h \ routines := accept bind connect getpeername getsockname getsockopt \ listen recv recvfrom recvmsg send sendmsg sendto \ setsockopt shutdown socket socketpair isfdtype opensock \ - sockatmark accept4 recvmmsg sendmmsg + sockatmark accept4 recvmmsg sendmmsg sockaddr_un_set tests := tst-accept4 +tests-internal := \ + tst-sockaddr_un_set \ + # tests-internal + aux := sa_len include ../Rules diff --git a/socket/opensock.c b/socket/opensock.c index 37148d474..3e35821f9 100644 --- a/socket/opensock.c +++ b/socket/opensock.c @@ -1,4 +1,5 @@ -/* Copyright (C) 1999-2021 Free Software Foundation, Inc. +/* Create socket with an unspecified address family for use with ioctl. + Copyright (C) 1999-2021 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -15,56 +16,27 @@ License along with the GNU C Library; if not, see . */ -#include +#include #include -#include /* Return a socket of any type. The socket can be used in subsequent ioctl calls to talk to the kernel. */ int __opensock (void) { - /* Cache the last AF that worked, to avoid many redundant calls to - socket(). */ - static int sock_af = -1; - int fd = -1; - __libc_lock_define_initialized (static, lock); - - if (sock_af != -1) - { - fd = __socket (sock_af, SOCK_DGRAM, 0); - if (fd != -1) - return fd; - } - - __libc_lock_lock (lock); - - if (sock_af != -1) - fd = __socket (sock_af, SOCK_DGRAM, 0); - - if (fd == -1) - { -#ifdef AF_INET - fd = __socket (sock_af = AF_INET, SOCK_DGRAM, 0); -#endif -#ifdef AF_INET6 - if (fd < 0) - fd = __socket (sock_af = AF_INET6, SOCK_DGRAM, 0); -#endif -#ifdef AF_IPX - if (fd < 0) - fd = __socket (sock_af = AF_IPX, SOCK_DGRAM, 0); -#endif -#ifdef AF_AX25 - if (fd < 0) - fd = __socket (sock_af = AF_AX25, SOCK_DGRAM, 0); -#endif -#ifdef AF_APPLETALK - if (fd < 0) - fd = __socket (sock_af = AF_APPLETALK, SOCK_DGRAM, 0); -#endif - } - - __libc_lock_unlock (lock); + /* SOCK_DGRAM is supported by all address families. */ + int type = SOCK_DGRAM | SOCK_CLOEXEC; + int fd; + + fd = __socket (AF_UNIX, type, 0); + if (fd >= 0) + return fd; + fd = __socket (AF_INET, type, 0); + if (fd >= 0) + return fd; + fd = __socket (AF_INET6, type, 0); + if (fd >= 0) + return fd; + __set_errno (ENOENT); return fd; } diff --git a/socket/sockaddr_un_set.c b/socket/sockaddr_un_set.c new file mode 100644 index 000000000..0bd40dc34 --- /dev/null +++ b/socket/sockaddr_un_set.c @@ -0,0 +1,41 @@ +/* Set the sun_path member of struct sockaddr_un. + Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include + +int +__sockaddr_un_set (struct sockaddr_un *addr, const char *pathname) +{ + size_t name_length = strlen (pathname); + + /* The kernel supports names of exactly sizeof (addr->sun_path) + bytes, without a null terminator, but userspace does not; see the + SUN_LEN macro. */ + if (name_length >= sizeof (addr->sun_path)) + { + __set_errno (EINVAL); /* Error code used by the kernel. */ + return -1; + } + + addr->sun_family = AF_UNIX; + memcpy (addr->sun_path, pathname, name_length + 1); + return 0; +} diff --git a/socket/tst-sockaddr_un_set.c b/socket/tst-sockaddr_un_set.c new file mode 100644 index 000000000..29c2a81af --- /dev/null +++ b/socket/tst-sockaddr_un_set.c @@ -0,0 +1,62 @@ +/* Test the __sockaddr_un_set function. + Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* Re-compile the function because the version in libc is not + exported. */ +#include "sockaddr_un_set.c" + +#include + +static int +do_test (void) +{ + struct sockaddr_un sun; + + memset (&sun, 0xcc, sizeof (sun)); + __sockaddr_un_set (&sun, ""); + TEST_COMPARE (sun.sun_family, AF_UNIX); + TEST_COMPARE (__sockaddr_un_set (&sun, ""), 0); + + memset (&sun, 0xcc, sizeof (sun)); + TEST_COMPARE (__sockaddr_un_set (&sun, "/example"), 0); + TEST_COMPARE_STRING (sun.sun_path, "/example"); + + { + char pathname[108]; /* Length of sun_path (ABI constant). */ + memset (pathname, 'x', sizeof (pathname)); + pathname[sizeof (pathname) - 1] = '\0'; + memset (&sun, 0xcc, sizeof (sun)); + TEST_COMPARE (__sockaddr_un_set (&sun, pathname), 0); + TEST_COMPARE (sun.sun_family, AF_UNIX); + TEST_COMPARE_STRING (sun.sun_path, pathname); + } + + { + char pathname[109]; + memset (pathname, 'x', sizeof (pathname)); + pathname[sizeof (pathname) - 1] = '\0'; + memset (&sun, 0xcc, sizeof (sun)); + errno = 0; + TEST_COMPARE (__sockaddr_un_set (&sun, pathname), -1); + TEST_COMPARE (errno, EINVAL); + } + + return 0; +} + +#include diff --git a/stdlib/Makefile b/stdlib/Makefile index b3b30ab73..b1eebd568 100644 --- a/stdlib/Makefile +++ b/stdlib/Makefile @@ -86,7 +86,8 @@ tests := tst-strtol tst-strtod testmb testrand testsort testdiv \ tst-makecontext-align test-bz22786 tst-strtod-nan-sign \ tst-swapcontext1 tst-setcontext4 tst-setcontext5 \ tst-setcontext6 tst-setcontext7 tst-setcontext8 \ - tst-setcontext9 tst-bz20544 tst-canon-bz26341 + tst-setcontext9 tst-bz20544 tst-canon-bz26341 \ + tst-realpath-toolong tests-internal := tst-strtod1i tst-strtod3 tst-strtod4 tst-strtod5i \ tst-tls-atexit tst-tls-atexit-nodelete diff --git a/stdlib/canonicalize.c b/stdlib/canonicalize.c index 698f9ede2..e2d4244fc 100644 --- a/stdlib/canonicalize.c +++ b/stdlib/canonicalize.c @@ -400,8 +400,16 @@ realpath_stk (const char *name, char *resolved, error: *dest++ = '\0'; - if (resolved != NULL && dest - rname <= get_path_max ()) - rname = strcpy (resolved, rname); + if (resolved != NULL) + { + if (dest - rname <= get_path_max ()) + rname = strcpy (resolved, rname); + else if (!failed) + { + failed = true; + __set_errno (ENAMETOOLONG); + } + } error_nomem: scratch_buffer_free (&extra_buffer); diff --git a/stdlib/tst-realpath-toolong.c b/stdlib/tst-realpath-toolong.c new file mode 100644 index 000000000..438889029 --- /dev/null +++ b/stdlib/tst-realpath-toolong.c @@ -0,0 +1,53 @@ +/* Verify that realpath returns NULL with ENAMETOOLONG if the result exceeds + NAME_MAX. + Copyright The GNU Toolchain Authors. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BASENAME "tst-realpath-toolong." + +#ifndef PATH_MAX +# define PATH_MAX 1024 +#endif + +int +do_test (void) +{ + char *base = support_create_and_chdir_toolong_temp_directory (BASENAME); + + char buf[PATH_MAX + 1]; + const char *res = realpath (".", buf); + + /* canonicalize.c states that if the real path is >= PATH_MAX, then + realpath returns NULL and sets ENAMETOOLONG. */ + TEST_VERIFY (res == NULL); + TEST_VERIFY (errno == ENAMETOOLONG); + + free (base); + return 0; +} + +#include diff --git a/stdlib/tst-secure-getenv.c b/stdlib/tst-secure-getenv.c index c9ec03866..5567c9ae2 100644 --- a/stdlib/tst-secure-getenv.c +++ b/stdlib/tst-secure-getenv.c @@ -30,167 +30,12 @@ #include #include +#include #include +#include #include static char MAGIC_ARGUMENT[] = "run-actual-test"; -#define MAGIC_STATUS 19 - -/* Return a GID which is not our current GID, but is present in the - supplementary group list. */ -static gid_t -choose_gid (void) -{ - int count = getgroups (0, NULL); - if (count < 0) - { - printf ("getgroups: %m\n"); - exit (1); - } - gid_t *groups; - groups = xcalloc (count, sizeof (*groups)); - int ret = getgroups (count, groups); - if (ret < 0) - { - printf ("getgroups: %m\n"); - exit (1); - } - gid_t current = getgid (); - gid_t not_current = 0; - for (int i = 0; i < ret; ++i) - { - if (groups[i] != current) - { - not_current = groups[i]; - break; - } - } - free (groups); - return not_current; -} - - -/* Copies the executable into a restricted directory, so that we can - safely make it SGID with the TARGET group ID. Then runs the - executable. */ -static int -run_executable_sgid (gid_t target) -{ - char *dirname = xasprintf ("%s/secure-getenv.%jd", - test_dir, (intmax_t) getpid ()); - char *execname = xasprintf ("%s/bin", dirname); - int infd = -1; - int outfd = -1; - int ret = -1; - if (mkdir (dirname, 0700) < 0) - { - printf ("mkdir: %m\n"); - goto err; - } - infd = open ("/proc/self/exe", O_RDONLY); - if (infd < 0) - { - printf ("open (/proc/self/exe): %m\n"); - goto err; - } - outfd = open (execname, O_WRONLY | O_CREAT | O_EXCL, 0700); - if (outfd < 0) - { - printf ("open (%s): %m\n", execname); - goto err; - } - char buf[4096]; - for (;;) - { - ssize_t rdcount = read (infd, buf, sizeof (buf)); - if (rdcount < 0) - { - printf ("read: %m\n"); - goto err; - } - if (rdcount == 0) - break; - char *p = buf; - char *end = buf + rdcount; - while (p != end) - { - ssize_t wrcount = write (outfd, buf, end - p); - if (wrcount == 0) - errno = ENOSPC; - if (wrcount <= 0) - { - printf ("write: %m\n"); - goto err; - } - p += wrcount; - } - } - if (fchown (outfd, getuid (), target) < 0) - { - printf ("fchown (%s): %m\n", execname); - goto err; - } - if (fchmod (outfd, 02750) < 0) - { - printf ("fchmod (%s): %m\n", execname); - goto err; - } - if (close (outfd) < 0) - { - printf ("close (outfd): %m\n"); - goto err; - } - if (close (infd) < 0) - { - printf ("close (infd): %m\n"); - goto err; - } - - int kid = fork (); - if (kid < 0) - { - printf ("fork: %m\n"); - goto err; - } - if (kid == 0) - { - /* Child process. */ - char *args[] = { execname, MAGIC_ARGUMENT, NULL }; - execve (execname, args, environ); - printf ("execve (%s): %m\n", execname); - _exit (1); - } - int status; - if (waitpid (kid, &status, 0) < 0) - { - printf ("waitpid: %m\n"); - goto err; - } - if (!WIFEXITED (status) || WEXITSTATUS (status) != MAGIC_STATUS) - { - printf ("Unexpected exit status %d from child process\n", - status); - goto err; - } - ret = 0; - -err: - if (outfd >= 0) - close (outfd); - if (infd >= 0) - close (infd); - if (execname) - { - unlink (execname); - free (execname); - } - if (dirname) - { - rmdir (dirname); - free (dirname); - } - return ret; -} static int do_test (void) @@ -212,15 +57,15 @@ do_test (void) exit (1); } - gid_t target = choose_gid (); - if (target == 0) - { - fprintf (stderr, - "Could not find a suitable GID for user %jd, skipping test\n", - (intmax_t) getuid ()); - exit (0); - } - return run_executable_sgid (target); + int status = support_capture_subprogram_self_sgid (MAGIC_ARGUMENT); + + if (WEXITSTATUS (status) == EXIT_UNSUPPORTED) + return EXIT_UNSUPPORTED; + + if (!WIFEXITED (status)) + FAIL_EXIT1 ("Unexpected exit status %d from child process\n", status); + + return 0; } static void @@ -229,23 +74,15 @@ alternative_main (int argc, char **argv) if (argc == 2 && strcmp (argv[1], MAGIC_ARGUMENT) == 0) { if (getgid () == getegid ()) - { - /* This can happen if the file system is mounted nosuid. */ - fprintf (stderr, "SGID failed: GID and EGID match (%jd)\n", - (intmax_t) getgid ()); - exit (MAGIC_STATUS); - } + /* This can happen if the file system is mounted nosuid. */ + FAIL_UNSUPPORTED ("SGID failed: GID and EGID match (%jd)\n", + (intmax_t) getgid ()); if (getenv ("PATH") == NULL) - { - printf ("PATH variable not present\n"); - exit (3); - } + FAIL_EXIT (3, "PATH variable not present\n"); if (secure_getenv ("PATH") != NULL) - { - printf ("PATH variable not filtered out\n"); - exit (4); - } - exit (MAGIC_STATUS); + FAIL_EXIT (4, "PATH variable not filtered out\n"); + + exit (EXIT_SUCCESS); } } diff --git a/string/rawmemchr.c b/string/rawmemchr.c index 59bbeeaa4..b8523118e 100644 --- a/string/rawmemchr.c +++ b/string/rawmemchr.c @@ -22,24 +22,28 @@ # define RAWMEMCHR __rawmemchr #endif -/* Find the first occurrence of C in S. */ -void * -RAWMEMCHR (const void *s, int c) -{ - DIAG_PUSH_NEEDS_COMMENT; +/* The pragmata should be nested inside RAWMEMCHR below, but that + triggers GCC PR 98512. */ +DIAG_PUSH_NEEDS_COMMENT; #if __GNUC_PREREQ (7, 0) - /* GCC 8 warns about the size passed to memchr being larger than - PTRDIFF_MAX; the use of SIZE_MAX is deliberate here. */ - DIAG_IGNORE_NEEDS_COMMENT (8, "-Wstringop-overflow="); +/* GCC 8 warns about the size passed to memchr being larger than + PTRDIFF_MAX; the use of SIZE_MAX is deliberate here. */ +DIAG_IGNORE_NEEDS_COMMENT (8, "-Wstringop-overflow="); #endif #if __GNUC_PREREQ (11, 0) - /* Likewise GCC 11, with a different warning option. */ - DIAG_IGNORE_NEEDS_COMMENT (11, "-Wstringop-overread"); +/* Likewise GCC 11, with a different warning option. */ +DIAG_IGNORE_NEEDS_COMMENT (11, "-Wstringop-overread"); #endif + +/* Find the first occurrence of C in S. */ +void * +RAWMEMCHR (const void *s, int c) +{ if (c != '\0') return memchr (s, c, (size_t)-1); - DIAG_POP_NEEDS_COMMENT; return (char *)s + strlen (s); } libc_hidden_def (__rawmemchr) weak_alias (__rawmemchr, rawmemchr) + +DIAG_POP_NEEDS_COMMENT; diff --git a/string/test-memchr.c b/string/test-memchr.c index 665edc32a..ce964284a 100644 --- a/string/test-memchr.c +++ b/string/test-memchr.c @@ -65,8 +65,8 @@ do_one_test (impl_t *impl, const CHAR *s, int c, size_t n, CHAR *exp_res) CHAR *res = CALL (impl, s, c, n); if (res != exp_res) { - error (0, 0, "Wrong result in function %s %p %p", impl->name, - res, exp_res); + error (0, 0, "Wrong result in function %s (%p, %d, %zu) -> %p != %p", + impl->name, s, c, n, res, exp_res); ret = 1; return; } @@ -91,7 +91,7 @@ do_test (size_t align, size_t pos, size_t len, size_t n, int seek_char) } buf[align + len] = 0; - if (pos < len) + if (pos < MIN(n, len)) { buf[align + pos] = seek_char; buf[align + len] = -seek_char; @@ -107,6 +107,38 @@ do_test (size_t align, size_t pos, size_t len, size_t n, int seek_char) do_one_test (impl, (CHAR *) (buf + align), seek_char, n, result); } +static void +do_overflow_tests (void) +{ + size_t i, j, len; + const size_t one = 1; + uintptr_t buf_addr = (uintptr_t) buf1; + + for (i = 0; i < 750; ++i) + { + do_test (0, i, 751, SIZE_MAX - i, BIG_CHAR); + do_test (0, i, 751, i - buf_addr, BIG_CHAR); + do_test (0, i, 751, -buf_addr - i, BIG_CHAR); + do_test (0, i, 751, SIZE_MAX - buf_addr - i, BIG_CHAR); + do_test (0, i, 751, SIZE_MAX - buf_addr + i, BIG_CHAR); + + len = 0; + for (j = 8 * sizeof(size_t) - 1; j ; --j) + { + len |= one << j; + do_test (0, i, 751, len - i, BIG_CHAR); + do_test (0, i, 751, len + i, BIG_CHAR); + do_test (0, i, 751, len - buf_addr - i, BIG_CHAR); + do_test (0, i, 751, len - buf_addr + i, BIG_CHAR); + + do_test (0, i, 751, ~len - i, BIG_CHAR); + do_test (0, i, 751, ~len + i, BIG_CHAR); + do_test (0, i, 751, ~len - buf_addr - i, BIG_CHAR); + do_test (0, i, 751, ~len - buf_addr + i, BIG_CHAR); + } + } +} + static void do_random_tests (void) { @@ -221,6 +253,7 @@ test_main (void) do_test (page_size / 2 - i, i, i, 1, 0x9B); do_random_tests (); + do_overflow_tests (); return ret; } diff --git a/string/test-strncat.c b/string/test-strncat.c index 2ef917b82..37ea26ea0 100644 --- a/string/test-strncat.c +++ b/string/test-strncat.c @@ -134,6 +134,66 @@ do_test (size_t align1, size_t align2, size_t len1, size_t len2, } } +static void +do_overflow_tests (void) +{ + size_t i, j, len; + const size_t one = 1; + CHAR *s1, *s2; + uintptr_t s1_addr; + s1 = (CHAR *) buf1; + s2 = (CHAR *) buf2; + s1_addr = (uintptr_t)s1; + for (j = 0; j < 200; ++j) + s2[j] = 32 + 23 * j % (BIG_CHAR - 32); + s2[200] = 0; + for (i = 0; i < 750; ++i) { + for (j = 0; j < i; ++j) + s1[j] = 32 + 23 * j % (BIG_CHAR - 32); + s1[i] = '\0'; + + FOR_EACH_IMPL (impl, 0) + { + s2[200] = '\0'; + do_one_test (impl, s2, s1, SIZE_MAX - i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, i - s1_addr); + s2[200] = '\0'; + do_one_test (impl, s2, s1, -s1_addr - i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, SIZE_MAX - s1_addr - i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, SIZE_MAX - s1_addr + i); + } + + len = 0; + for (j = 8 * sizeof(size_t) - 1; j ; --j) + { + len |= one << j; + FOR_EACH_IMPL (impl, 0) + { + s2[200] = '\0'; + do_one_test (impl, s2, s1, len - i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, len + i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, len - s1_addr - i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, len - s1_addr + i); + + s2[200] = '\0'; + do_one_test (impl, s2, s1, ~len - i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, ~len + i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, ~len - s1_addr - i); + s2[200] = '\0'; + do_one_test (impl, s2, s1, ~len - s1_addr + i); + } + } + } +} + static void do_random_tests (void) { @@ -316,6 +376,7 @@ test_main (void) } do_random_tests (); + do_overflow_tests (); return ret; } diff --git a/string/test-strncmp.c b/string/test-strncmp.c index 10b34de8d..97e831d88 100644 --- a/string/test-strncmp.c +++ b/string/test-strncmp.c @@ -435,6 +435,18 @@ check3 (void) } } +static void +check4 (void) +{ + const CHAR *s1 = L ("abc"); + CHAR *s2 = STRDUP (s1); + + FOR_EACH_IMPL (impl, 0) + check_result (impl, s1, s2, SIZE_MAX, 0); + + free (s2); +} + int test_main (void) { @@ -445,6 +457,7 @@ test_main (void) check1 (); check2 (); check3 (); + check4 (); printf ("%23s", ""); FOR_EACH_IMPL (impl, 0) diff --git a/string/test-strnlen.c b/string/test-strnlen.c index 61eb521dc..0d9479d15 100644 --- a/string/test-strnlen.c +++ b/string/test-strnlen.c @@ -27,6 +27,7 @@ #ifndef WIDE # define STRNLEN strnlen +# define MEMSET memset # define CHAR char # define BIG_CHAR CHAR_MAX # define MIDDLE_CHAR 127 @@ -34,6 +35,7 @@ #else # include # define STRNLEN wcsnlen +# define MEMSET wmemset # define CHAR wchar_t # define BIG_CHAR WCHAR_MAX # define MIDDLE_CHAR 1121 @@ -87,6 +89,38 @@ do_test (size_t align, size_t len, size_t maxlen, int max_char) do_one_test (impl, (CHAR *) (buf + align), maxlen, MIN (len, maxlen)); } +static void +do_overflow_tests (void) +{ + size_t i, j, len; + const size_t one = 1; + uintptr_t buf_addr = (uintptr_t) buf1; + + for (i = 0; i < 750; ++i) + { + do_test (0, i, SIZE_MAX - i, BIG_CHAR); + do_test (0, i, i - buf_addr, BIG_CHAR); + do_test (0, i, -buf_addr - i, BIG_CHAR); + do_test (0, i, SIZE_MAX - buf_addr - i, BIG_CHAR); + do_test (0, i, SIZE_MAX - buf_addr + i, BIG_CHAR); + + len = 0; + for (j = 8 * sizeof(size_t) - 1; j ; --j) + { + len |= one << j; + do_test (0, i, len - i, BIG_CHAR); + do_test (0, i, len + i, BIG_CHAR); + do_test (0, i, len - buf_addr - i, BIG_CHAR); + do_test (0, i, len - buf_addr + i, BIG_CHAR); + + do_test (0, i, ~len - i, BIG_CHAR); + do_test (0, i, ~len + i, BIG_CHAR); + do_test (0, i, ~len - buf_addr - i, BIG_CHAR); + do_test (0, i, ~len - buf_addr + i, BIG_CHAR); + } + } +} + static void do_random_tests (void) { @@ -153,7 +187,7 @@ do_page_tests (void) size_t last_offset = (page_size / sizeof (CHAR)) - 1; CHAR *s = (CHAR *) buf2; - memset (s, 65, (last_offset - 1)); + MEMSET (s, 65, (last_offset - 1)); s[last_offset] = 0; /* Place short strings ending at page boundary. */ @@ -196,6 +230,35 @@ do_page_tests (void) } } +/* Tests meant to unveil fail on implementations that access bytes + beyond the maxium length. */ + +static void +do_page_2_tests (void) +{ + size_t i, exp_len, offset; + size_t last_offset = page_size / sizeof (CHAR); + + CHAR *s = (CHAR *) buf2; + MEMSET (s, 65, last_offset); + + /* Place short strings ending at page boundary without the null + byte. */ + offset = last_offset; + for (i = 0; i < 128; i++) + { + /* Decrease offset to stress several sizes and alignments. */ + offset--; + exp_len = last_offset - offset; + FOR_EACH_IMPL (impl, 0) + { + /* If an implementation goes beyond EXP_LEN, it will trigger + the segfault. */ + do_one_test (impl, (CHAR *) (s + offset), exp_len, exp_len); + } + } +} + int test_main (void) { @@ -242,6 +305,8 @@ test_main (void) do_random_tests (); do_page_tests (); + do_page_2_tests (); + do_overflow_tests (); return ret; } diff --git a/sunrpc/Makefile b/sunrpc/Makefile index 976158540..a7cd3eafa 100644 --- a/sunrpc/Makefile +++ b/sunrpc/Makefile @@ -65,7 +65,8 @@ shared-only-routines = $(routines) endif tests = tst-xdrmem tst-xdrmem2 test-rpcent tst-udp-error tst-udp-timeout \ - tst-udp-nonblocking + tst-udp-nonblocking tst-bug22542 tst-bug28768 + xtests := tst-getmyaddr ifeq ($(have-thread-library),yes) @@ -111,6 +112,8 @@ $(objpfx)tst-udp-nonblocking: $(common-objpfx)linkobj/libc.so $(objpfx)tst-udp-garbage: \ $(common-objpfx)linkobj/libc.so $(shared-thread-library) +$(objpfx)tst-bug22542: $(common-objpfx)linkobj/libc.so + else # !have-GLIBC_2.31 routines = $(routines-for-nss) diff --git a/sunrpc/clnt_gen.c b/sunrpc/clnt_gen.c index 13ced8994..b44357cd8 100644 --- a/sunrpc/clnt_gen.c +++ b/sunrpc/clnt_gen.c @@ -57,9 +57,13 @@ clnt_create (const char *hostname, u_long prog, u_long vers, if (strcmp (proto, "unix") == 0) { - memset ((char *)&sun, 0, sizeof (sun)); - sun.sun_family = AF_UNIX; - strcpy (sun.sun_path, hostname); + if (__sockaddr_un_set (&sun, hostname) < 0) + { + struct rpc_createerr *ce = &get_rpc_createerr (); + ce->cf_stat = RPC_SYSTEMERROR; + ce->cf_error.re_errno = errno; + return NULL; + } sock = RPC_ANYSOCK; client = clntunix_create (&sun, prog, vers, &sock, 0, 0); if (client == NULL) diff --git a/sunrpc/svc_unix.c b/sunrpc/svc_unix.c index 679fbe9cb..46f8d16fe 100644 --- a/sunrpc/svc_unix.c +++ b/sunrpc/svc_unix.c @@ -154,7 +154,10 @@ svcunix_create (int sock, u_int sendsize, u_int recvsize, char *path) SVCXPRT *xprt; struct unix_rendezvous *r; struct sockaddr_un addr; - socklen_t len = sizeof (struct sockaddr_in); + socklen_t len = sizeof (addr); + + if (__sockaddr_un_set (&addr, path) < 0) + return NULL; if (sock == RPC_ANYSOCK) { @@ -165,12 +168,6 @@ svcunix_create (int sock, u_int sendsize, u_int recvsize, char *path) } madesock = TRUE; } - memset (&addr, '\0', sizeof (addr)); - addr.sun_family = AF_UNIX; - len = strlen (path) + 1; - memcpy (addr.sun_path, path, len); - len += sizeof (addr.sun_family); - __bind (sock, (struct sockaddr *) &addr, len); if (__getsockname (sock, (struct sockaddr *) &addr, &len) != 0 diff --git a/sunrpc/svcauth_des.c b/sunrpc/svcauth_des.c index 7607abc81..25a85c909 100644 --- a/sunrpc/svcauth_des.c +++ b/sunrpc/svcauth_des.c @@ -58,7 +58,6 @@ #define debug(msg) /*printf("svcauth_des: %s\n", msg) */ -#define USEC_PER_SEC ((uint32_t) 1000000L) #define BEFORE(t1, t2) timercmp(t1, t2, <) /* diff --git a/sunrpc/tst-bug22542.c b/sunrpc/tst-bug22542.c new file mode 100644 index 000000000..d6cd79787 --- /dev/null +++ b/sunrpc/tst-bug22542.c @@ -0,0 +1,44 @@ +/* Test to verify that overlong hostname is rejected by clnt_create + and doesn't cause a buffer overflow (bug 22542). + + Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include + +static int +do_test (void) +{ + /* Create an arbitrary hostname that's longer than fits in sun_path. */ + char name [sizeof ((struct sockaddr_un*)0)->sun_path * 2]; + memset (name, 'x', sizeof name - 1); + name [sizeof name - 1] = '\0'; + + errno = 0; + CLIENT *clnt = clnt_create (name, 0, 0, "unix"); + + TEST_VERIFY (clnt == NULL); + TEST_COMPARE (errno, EINVAL); + return 0; +} + +#include diff --git a/sunrpc/tst-bug28768.c b/sunrpc/tst-bug28768.c new file mode 100644 index 000000000..35a4b7b0b --- /dev/null +++ b/sunrpc/tst-bug28768.c @@ -0,0 +1,42 @@ +/* Test to verify that long path is rejected by svcunix_create (bug 28768). + Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include + +/* svcunix_create does not have a default version in linkobj/libc.so. */ +compat_symbol_reference (libc, svcunix_create, svcunix_create, GLIBC_2_1); + +static int +do_test (void) +{ + char pathname[109]; + memset (pathname, 'x', sizeof (pathname)); + pathname[sizeof (pathname) - 1] = '\0'; + + errno = 0; + TEST_VERIFY (svcunix_create (RPC_ANYSOCK, 4096, 4096, pathname) == NULL); + TEST_COMPARE (errno, EINVAL); + + return 0; +} + +#include diff --git a/support/Makefile b/support/Makefile index bb9889efb..f631846c0 100644 --- a/support/Makefile +++ b/support/Makefile @@ -67,6 +67,8 @@ libsupport-routines = \ support_quote_string \ support_record_failure \ support_run_diff \ + support_select_modifies_timeout \ + support_select_normalizes_timeout \ support_set_small_thread_stack_size \ support_shared_allocate \ support_small_stack_thread_attribute \ @@ -90,6 +92,7 @@ libsupport-routines = \ xchdir \ xchroot \ xclock_gettime \ + xclone \ xclose \ xchmod \ xconnect \ @@ -139,6 +142,7 @@ libsupport-routines = \ xpthread_join \ xpthread_key_create \ xpthread_key_delete \ + xpthread_kill \ xpthread_mutex_consistent \ xpthread_mutex_destroy \ xpthread_mutex_init \ diff --git a/support/capture_subprocess.h b/support/capture_subprocess.h index 8969d4a99..4be430f09 100644 --- a/support/capture_subprocess.h +++ b/support/capture_subprocess.h @@ -41,6 +41,12 @@ struct support_capture_subprocess support_capture_subprocess struct support_capture_subprocess support_capture_subprogram (const char *file, char *const argv[]); +/* Copy the running program into a setgid binary and run it with CHILD_ID + argument. If execution is successful, return the exit status of the child + program, otherwise return a non-zero failure exit code. */ +int support_capture_subprogram_self_sgid + (char *child_id); + /* Deallocate the subprocess data captured by support_capture_subprocess. */ void support_capture_subprocess_free (struct support_capture_subprocess *); diff --git a/support/subprocess.h b/support/subprocess.h index 11cfc6a07..40d82c7e4 100644 --- a/support/subprocess.h +++ b/support/subprocess.h @@ -38,6 +38,11 @@ struct support_subprocess support_subprocess struct support_subprocess support_subprogram (const char *file, char *const argv[]); +/* Invoke program FILE with ARGV arguments by using posix_spawn and wait for it + to complete. Return program exit status. */ +int support_subprogram_wait + (const char *file, char *const argv[]); + /* Wait for the subprocess indicated by PROC::PID. Return the status indicate by waitpid call. */ int support_process_wait (struct support_subprocess *proc); diff --git a/support/support.h b/support/support.h index 9cbc45572..8c7890e0a 100644 --- a/support/support.h +++ b/support/support.h @@ -23,6 +23,7 @@ #ifndef SUPPORT_H #define SUPPORT_H +#include #include #include /* For mode_t. */ @@ -129,6 +130,14 @@ extern void support_copy_file (const char *from, const char *to); extern ssize_t support_copy_file_range (int, off64_t *, int, off64_t *, size_t, unsigned int); +/* Return true if select modify the timeout to reflect the amount of time + no slept. */ +extern bool support_select_modifies_timeout (void); + +/* Return true if select normalize the timeout input by taking in account + tv_usec larger than 1000000. */ +extern bool support_select_normalizes_timeout (void); + __END_DECLS #endif /* SUPPORT_H */ diff --git a/support/support_capture_subprocess.c b/support/support_capture_subprocess.c index a7afa0e70..27bfd19c9 100644 --- a/support/support_capture_subprocess.c +++ b/support/support_capture_subprocess.c @@ -20,11 +20,14 @@ #include #include +#include #include #include #include #include #include +#include +#include static void transfer (const char *what, struct pollfd *pfd, struct xmemstream *stream) @@ -36,7 +39,7 @@ transfer (const char *what, struct pollfd *pfd, struct xmemstream *stream) if (ret < 0) { support_record_failure (); - printf ("error: reading from subprocess %s: %m", what); + printf ("error: reading from subprocess %s: %m\n", what); pfd->events = 0; pfd->revents = 0; } @@ -102,6 +105,129 @@ support_capture_subprogram (const char *file, char *const argv[]) return result; } +/* Copies the executable into a restricted directory, so that we can + safely make it SGID with the TARGET group ID. Then runs the + executable. */ +static int +copy_and_spawn_sgid (char *child_id, gid_t gid) +{ + char *dirname = xasprintf ("%s/tst-tunables-setuid.%jd", + test_dir, (intmax_t) getpid ()); + char *execname = xasprintf ("%s/bin", dirname); + int infd = -1; + int outfd = -1; + int ret = 1, status = 1; + + TEST_VERIFY (mkdir (dirname, 0700) == 0); + if (support_record_failure_is_failed ()) + goto err; + + infd = open ("/proc/self/exe", O_RDONLY); + if (infd < 0) + FAIL_UNSUPPORTED ("unsupported: Cannot read binary from procfs\n"); + + outfd = open (execname, O_WRONLY | O_CREAT | O_EXCL, 0700); + TEST_VERIFY (outfd >= 0); + if (support_record_failure_is_failed ()) + goto err; + + char buf[4096]; + for (;;) + { + ssize_t rdcount = read (infd, buf, sizeof (buf)); + TEST_VERIFY (rdcount >= 0); + if (support_record_failure_is_failed ()) + goto err; + if (rdcount == 0) + break; + char *p = buf; + char *end = buf + rdcount; + while (p != end) + { + ssize_t wrcount = write (outfd, buf, end - p); + if (wrcount == 0) + errno = ENOSPC; + TEST_VERIFY (wrcount > 0); + if (support_record_failure_is_failed ()) + goto err; + p += wrcount; + } + } + TEST_VERIFY (fchown (outfd, getuid (), gid) == 0); + if (support_record_failure_is_failed ()) + goto err; + TEST_VERIFY (fchmod (outfd, 02750) == 0); + if (support_record_failure_is_failed ()) + goto err; + TEST_VERIFY (close (outfd) == 0); + if (support_record_failure_is_failed ()) + goto err; + TEST_VERIFY (close (infd) == 0); + if (support_record_failure_is_failed ()) + goto err; + + /* We have the binary, now spawn the subprocess. Avoid using + support_subprogram because we only want the program exit status, not the + contents. */ + ret = 0; + + char * const args[] = {execname, child_id, NULL}; + + status = support_subprogram_wait (args[0], args); + +err: + if (outfd >= 0) + close (outfd); + if (infd >= 0) + close (infd); + if (execname != NULL) + { + unlink (execname); + free (execname); + } + if (dirname != NULL) + { + rmdir (dirname); + free (dirname); + } + + if (ret != 0) + FAIL_EXIT1("Failed to make sgid executable for test\n"); + + return status; +} + +int +support_capture_subprogram_self_sgid (char *child_id) +{ + gid_t target = 0; + const int count = 64; + gid_t groups[count]; + + /* Get a GID which is not our current GID, but is present in the + supplementary group list. */ + int ret = getgroups (count, groups); + if (ret < 0) + FAIL_UNSUPPORTED("Could not get group list for user %jd\n", + (intmax_t) getuid ()); + + gid_t current = getgid (); + for (int i = 0; i < ret; ++i) + { + if (groups[i] != current) + { + target = groups[i]; + break; + } + } + + if (target == 0) + FAIL_UNSUPPORTED("Could not find a suitable GID for user %jd\n", + (intmax_t) getuid ()); + + return copy_and_spawn_sgid (child_id, target); +} + void support_capture_subprocess_free (struct support_capture_subprocess *p) { diff --git a/support/support_select_modifies_timeout.c b/support/support_select_modifies_timeout.c new file mode 100644 index 000000000..653ea2cc9 --- /dev/null +++ b/support/support_select_modifies_timeout.c @@ -0,0 +1,29 @@ +/* Return whether select modifies the timeout. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +bool +support_select_modifies_timeout (void) +{ +#ifdef __linux__ + return true; +#else + return false; +#endif +} diff --git a/support/support_select_normalizes_timeout.c b/support/support_select_normalizes_timeout.c new file mode 100644 index 000000000..987f9b035 --- /dev/null +++ b/support/support_select_normalizes_timeout.c @@ -0,0 +1,29 @@ +/* Return whether select normalizes the timeout. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +bool +support_select_normalizes_timeout (void) +{ +#ifdef __linux__ + return true; +#else + return false; +#endif +} diff --git a/support/support_subprocess.c b/support/support_subprocess.c index 88489a335..89e767ae4 100644 --- a/support/support_subprocess.c +++ b/support/support_subprocess.c @@ -27,7 +27,7 @@ #include static struct support_subprocess -support_suprocess_init (void) +support_subprocess_init (void) { struct support_subprocess result; @@ -48,7 +48,7 @@ support_suprocess_init (void) struct support_subprocess support_subprocess (void (*callback) (void *), void *closure) { - struct support_subprocess result = support_suprocess_init (); + struct support_subprocess result = support_subprocess_init (); result.pid = xfork (); if (result.pid == 0) @@ -71,7 +71,7 @@ support_subprocess (void (*callback) (void *), void *closure) struct support_subprocess support_subprogram (const char *file, char *const argv[]) { - struct support_subprocess result = support_suprocess_init (); + struct support_subprocess result = support_subprocess_init (); posix_spawn_file_actions_t fa; /* posix_spawn_file_actions_init does not fail. */ @@ -84,7 +84,7 @@ support_subprogram (const char *file, char *const argv[]) xposix_spawn_file_actions_addclose (&fa, result.stdout_pipe[1]); xposix_spawn_file_actions_addclose (&fa, result.stderr_pipe[1]); - result.pid = xposix_spawn (file, &fa, NULL, argv, NULL); + result.pid = xposix_spawn (file, &fa, NULL, argv, environ); xclose (result.stdout_pipe[1]); xclose (result.stderr_pipe[1]); @@ -92,6 +92,19 @@ support_subprogram (const char *file, char *const argv[]) return result; } +int +support_subprogram_wait (const char *file, char *const argv[]) +{ + posix_spawn_file_actions_t fa; + + posix_spawn_file_actions_init (&fa); + struct support_subprocess res = support_subprocess_init (); + + res.pid = xposix_spawn (file, &fa, NULL, argv, environ); + + return support_process_wait (&res); +} + int support_process_wait (struct support_subprocess *proc) { diff --git a/support/temp_file.c b/support/temp_file.c index c6df64187..e41128c2d 100644 --- a/support/temp_file.c +++ b/support/temp_file.c @@ -1,5 +1,6 @@ /* Temporary file handling for tests. - Copyright (C) 1998-2021 Free Software Foundation, Inc. + Copyright (C) 1998-2022 Free Software Foundation, Inc. + Copyright The GNU Tools Authors. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,15 +21,17 @@ some 32-bit platforms. */ #define _FILE_OFFSET_BITS 64 +#include #include #include #include +#include #include #include #include #include -#include +#include /* List of temporary files. */ static struct temp_name_list @@ -36,14 +39,20 @@ static struct temp_name_list struct temp_name_list *next; char *name; pid_t owner; + bool toolong; } *temp_name_list; /* Location of the temporary files. Set by the test skeleton via support_set_test_dir. The string is not be freed. */ static const char *test_dir = _PATH_TMP; -void -add_temp_file (const char *name) +/* Name of subdirectories in a too long temporary directory tree. */ +static char toolong_subdir[NAME_MAX + 1]; +static bool toolong_initialized; +static size_t toolong_path_max; + +static void +add_temp_file_internal (const char *name, bool toolong) { struct temp_name_list *newp = (struct temp_name_list *) xcalloc (sizeof (*newp), 1); @@ -53,12 +62,19 @@ add_temp_file (const char *name) newp->name = newname; newp->next = temp_name_list; newp->owner = getpid (); + newp->toolong = toolong; temp_name_list = newp; } else free (newp); } +void +add_temp_file (const char *name) +{ + add_temp_file_internal (name, false); +} + int create_temp_file_in_dir (const char *base, const char *dir, char **filename) { @@ -90,8 +106,8 @@ create_temp_file (const char *base, char **filename) return create_temp_file_in_dir (base, test_dir, filename); } -char * -support_create_temp_directory (const char *base) +static char * +create_temp_directory_internal (const char *base, bool toolong) { char *path = xasprintf ("%s/%sXXXXXX", test_dir, base); if (mkdtemp (path) == NULL) @@ -99,16 +115,132 @@ support_create_temp_directory (const char *base) printf ("error: mkdtemp (\"%s\"): %m", path); exit (1); } - add_temp_file (path); + add_temp_file_internal (path, toolong); return path; } -/* Helper functions called by the test skeleton follow. */ +char * +support_create_temp_directory (const char *base) +{ + return create_temp_directory_internal (base, false); +} + +static void +ensure_toolong_initialized (void) +{ + if (!toolong_initialized) + FAIL_EXIT1 ("uninitialized toolong directory tree\n"); +} + +static void +initialize_toolong (const char *base) +{ + long name_max = pathconf (base, _PC_NAME_MAX); + name_max = (name_max < 0 ? 64 + : (name_max < sizeof (toolong_subdir) ? name_max + : sizeof (toolong_subdir) - 1)); + + long path_max = pathconf (base, _PC_PATH_MAX); + path_max = (path_max < 0 ? 1024 + : path_max <= PTRDIFF_MAX ? path_max : PTRDIFF_MAX); + + /* Sanity check to ensure that the test does not create temporary directories + in different filesystems because this API doesn't support it. */ + if (toolong_initialized) + { + if (name_max != strlen (toolong_subdir)) + FAIL_UNSUPPORTED ("name_max: Temporary directories in different" + " filesystems not supported yet\n"); + if (path_max != toolong_path_max) + FAIL_UNSUPPORTED ("path_max: Temporary directories in different" + " filesystems not supported yet\n"); + return; + } + + toolong_path_max = path_max; + + size_t len = name_max; + memset (toolong_subdir, 'X', len); + toolong_initialized = true; +} + +char * +support_create_and_chdir_toolong_temp_directory (const char *basename) +{ + char *base = create_temp_directory_internal (basename, true); + xchdir (base); + + initialize_toolong (base); + + size_t sz = strlen (toolong_subdir); + + /* Create directories and descend into them so that the final path is larger + than PATH_MAX. */ + for (size_t i = 0; i <= toolong_path_max / sz; i++) + { + int ret = mkdir (toolong_subdir, S_IRWXU); + if (ret != 0 && errno == ENAMETOOLONG) + FAIL_UNSUPPORTED ("Filesystem does not support creating too long " + "directory trees\n"); + else if (ret != 0) + FAIL_EXIT1 ("Failed to create directory tree: %m\n"); + xchdir (toolong_subdir); + } + return base; +} void -support_set_test_dir (const char *path) +support_chdir_toolong_temp_directory (const char *base) { - test_dir = path; + ensure_toolong_initialized (); + + xchdir (base); + + size_t sz = strlen (toolong_subdir); + for (size_t i = 0; i <= toolong_path_max / sz; i++) + xchdir (toolong_subdir); +} + +/* Helper functions called by the test skeleton follow. */ + +static void +remove_toolong_subdirs (const char *base) +{ + ensure_toolong_initialized (); + + if (chdir (base) != 0) + { + printf ("warning: toolong cleanup base failed: chdir (\"%s\"): %m\n", + base); + return; + } + + /* Descend. */ + int levels = 0; + size_t sz = strlen (toolong_subdir); + for (levels = 0; levels <= toolong_path_max / sz; levels++) + if (chdir (toolong_subdir) != 0) + { + printf ("warning: toolong cleanup failed: chdir (\"%s\"): %m\n", + toolong_subdir); + break; + } + + /* Ascend and remove. */ + while (--levels >= 0) + { + if (chdir ("..") != 0) + { + printf ("warning: toolong cleanup failed: chdir (\"..\"): %m\n"); + return; + } + if (remove (toolong_subdir) != 0) + { + printf ("warning: could not remove subdirectory: %s: %m\n", + toolong_subdir); + return; + } + } } void @@ -123,6 +255,9 @@ support_delete_temp_files (void) around, to prevent PID reuse.) */ if (temp_name_list->owner == pid) { + if (temp_name_list->toolong) + remove_toolong_subdirs (temp_name_list->name); + if (remove (temp_name_list->name) != 0) printf ("warning: could not remove temporary file: %s: %m\n", temp_name_list->name); @@ -147,3 +282,9 @@ support_print_temp_files (FILE *f) fprintf (f, ")\n"); } } + +void +support_set_test_dir (const char *path) +{ + test_dir = path; +} diff --git a/support/temp_file.h b/support/temp_file.h index f3a7fb6f9..a22964c6f 100644 --- a/support/temp_file.h +++ b/support/temp_file.h @@ -44,6 +44,15 @@ int create_temp_file_in_dir (const char *base, const char *dir, returns. The caller should free this string. */ char *support_create_temp_directory (const char *base); +/* Create a temporary directory tree that is longer than PATH_MAX and schedule + it for deletion. BASENAME is used as a prefix for the unique directory + name, which the function returns. The caller should free this string. */ +char *support_create_and_chdir_toolong_temp_directory (const char *basename); + +/* Change into the innermost directory of the directory tree BASE, which was + created using support_create_and_chdir_toolong_temp_directory. */ +void support_chdir_toolong_temp_directory (const char *base); + __END_DECLS #endif /* SUPPORT_TEMP_FILE_H */ diff --git a/support/test-container.c b/support/test-container.c index 28cc44d9f..94498d390 100644 --- a/support/test-container.c +++ b/support/test-container.c @@ -481,7 +481,7 @@ need_sync (char *ap, char *bp, struct stat *a, struct stat *b) } static void -rsync_1 (path_buf * src, path_buf * dest, int and_delete) +rsync_1 (path_buf * src, path_buf * dest, int and_delete, int force_copies) { DIR *dir; struct dirent *de; @@ -491,8 +491,9 @@ rsync_1 (path_buf * src, path_buf * dest, int and_delete) r_append ("/", dest); if (verbose) - printf ("sync %s to %s %s\n", src->buf, dest->buf, - and_delete ? "and delete" : ""); + printf ("sync %s to %s%s%s\n", src->buf, dest->buf, + and_delete ? " and delete" : "", + force_copies ? " (forced)" : ""); size_t staillen = src->len; @@ -521,10 +522,10 @@ rsync_1 (path_buf * src, path_buf * dest, int and_delete) missing. */ lstat (dest->buf, &d); - if (! need_sync (src->buf, dest->buf, &s, &d)) + if (! force_copies && ! need_sync (src->buf, dest->buf, &s, &d)) { if (S_ISDIR (s.st_mode)) - rsync_1 (src, dest, and_delete); + rsync_1 (src, dest, and_delete, force_copies); continue; } @@ -559,7 +560,7 @@ rsync_1 (path_buf * src, path_buf * dest, int and_delete) if (verbose) printf ("+D %s\n", dest->buf); maybe_xmkdir (dest->buf, (s.st_mode & 0777) | 0700); - rsync_1 (src, dest, and_delete); + rsync_1 (src, dest, and_delete, force_copies); break; case S_IFLNK: @@ -639,12 +640,12 @@ rsync_1 (path_buf * src, path_buf * dest, int and_delete) } static void -rsync (char *src, char *dest, int and_delete) +rsync (char *src, char *dest, int and_delete, int force_copies) { r_setup (src, &spath); r_setup (dest, &dpath); - rsync_1 (&spath, &dpath, and_delete); + rsync_1 (&spath, &dpath, and_delete, force_copies); } @@ -846,11 +847,11 @@ main (int argc, char **argv) do_ldconfig = true; rsync (pristine_root_path, new_root_path, - file_exists (concat (command_root, "/preclean.req", NULL))); + file_exists (concat (command_root, "/preclean.req", NULL)), 0); if (stat (command_root, &st) >= 0 && S_ISDIR (st.st_mode)) - rsync (command_root, new_root_path, 0); + rsync (command_root, new_root_path, 0, 1); new_objdir_path = xstrdup (concat (new_root_path, support_objdir_root, NULL)); @@ -1044,7 +1045,7 @@ main (int argc, char **argv) /* Child has exited, we can post-clean the test root. */ printf("running post-clean rsync\n"); - rsync (pristine_root_path, new_root_path, 1); + rsync (pristine_root_path, new_root_path, 1, 0); if (WIFEXITED (status)) exit (WEXITSTATUS (status)); diff --git a/support/xclone.c b/support/xclone.c new file mode 100644 index 000000000..243eee8b2 --- /dev/null +++ b/support/xclone.c @@ -0,0 +1,49 @@ +/* Auxiliary functions to issue the clone syscall. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifdef __linux__ +# include +# include /* For _STACK_GROWS_{UP,DOWN}. */ +# include + +pid_t +xclone (int (*fn) (void *arg), void *arg, void *stack, size_t stack_size, + int flags) +{ + pid_t r = -1; + +# ifdef __ia64__ + extern int __clone2 (int (*fn) (void *arg), void *stack, size_t stack_size, + int flags, void *arg, ...); + r = __clone2 (fn, stack, stack_size, flags, arg, /* ptid */ NULL, + /* tls */ NULL, /* ctid */ NULL); +# else +# if _STACK_GROWS_DOWN + r = clone (fn, stack + stack_size, flags, arg, /* ptid */ NULL, + /* tls */ NULL, /* ctid */ NULL); +# elif _STACK_GROWS_UP + r = clone (fn, stack, flags, arg, /* ptid */ NULL, /* tls */ NULL, NULL); +# endif +# endif + + if (r < 0) + FAIL_EXIT1 ("clone: %m"); + + return r; +} +#endif diff --git a/support/xpthread_kill.c b/support/xpthread_kill.c new file mode 100644 index 000000000..111a75d85 --- /dev/null +++ b/support/xpthread_kill.c @@ -0,0 +1,26 @@ +/* pthread_kill with error checking. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include + +void +xpthread_kill (pthread_t thr, int signo) +{ + xpthread_check_return ("pthread_kill", pthread_kill (thr, signo)); +} diff --git a/support/xsched.h b/support/xsched.h new file mode 100644 index 000000000..eefd73194 --- /dev/null +++ b/support/xsched.h @@ -0,0 +1,34 @@ +/* Wrapper for sched.h functions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifndef SUPPORT_XSCHED_H +#define SUPPORT_XSCHED_H + +__BEGIN_DECLS + +#include +#include + +#ifdef __linux__ +pid_t xclone (int (*fn) (void *arg), void *arg, void *stack, + size_t stack_size, int flags); +#endif + +__END_DECLS + +#endif diff --git a/support/xthread.h b/support/xthread.h index c2086db34..1ba3f133a 100644 --- a/support/xthread.h +++ b/support/xthread.h @@ -75,6 +75,8 @@ void xpthread_attr_setstacksize (pthread_attr_t *attr, void xpthread_attr_setguardsize (pthread_attr_t *attr, size_t guardsize); +void xpthread_kill (pthread_t thr, int signo); + /* Return the stack size used on support_set_small_thread_stack_size. */ size_t support_small_thread_stack_size (void); /* Set the stack size in ATTR to a small value, but still large enough diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h index aab7245e9..d552a7886 100644 --- a/sysdeps/generic/ldsodefs.h +++ b/sysdeps/generic/ldsodefs.h @@ -893,6 +893,17 @@ extern int _dl_catch_error (const char **objname, const char **errstring, void *args); libc_hidden_proto (_dl_catch_error) + +/* libdl in a secondary namespace (after dlopen) must use + _dl_catch_error from the main namespace, so it has to be exported + in some way. Initialized to _rtld_catch_error in rtld.c. Not in + _rtld_global_ro to preserve structure layout. */ +extern __typeof (_dl_catch_error) *_dl_catch_error_ptr attribute_relro; +rtld_hidden_proto (_dl_catch_error_ptr) + +/* Used for initializing _dl_catch_error_ptr. */ +extern __typeof__ (_dl_catch_error) _rtld_catch_error attribute_hidden; + /* Call OPERATE (ARGS). If no error occurs, set *EXCEPTION to zero. Otherwise, store a copy of the raised exception in *EXCEPTION, which has to be freed by _dl_exception_free. As a special case, if diff --git a/sysdeps/mach/hurd/if_index.c b/sysdeps/mach/hurd/if_index.c index 56e63a4a9..5e566da82 100644 --- a/sysdeps/mach/hurd/if_index.c +++ b/sysdeps/mach/hurd/if_index.c @@ -32,7 +32,7 @@ unsigned int __if_nametoindex (const char *ifname) { struct ifreq ifr; - int fd = __opensock (); + int fd = __socket (AF_INET, SOCK_DGRAM, 0); if (fd < 0) return 0; @@ -84,7 +84,7 @@ __if_nameindex (void) error_t err = 0; char data[2048]; file_t server; - int fd = __opensock (); + int fd = __socket (AF_INET, SOCK_DGRAM, 0); struct ifconf ifc; unsigned int nifs, i; struct if_nameindex *idx = NULL; @@ -169,7 +169,7 @@ char * __if_indextoname (unsigned int ifindex, char *ifname) { struct ifreq ifr; - int fd = __opensock (); + int fd = __socket (AF_INET, SOCK_DGRAM, 0); if (fd < 0) return NULL; diff --git a/sysdeps/nios2/libm-test-ulps b/sysdeps/nios2/libm-test-ulps index 8c3e9df54..9315ba82f 100644 --- a/sysdeps/nios2/libm-test-ulps +++ b/sysdeps/nios2/libm-test-ulps @@ -12,7 +12,7 @@ Function: "asin": float: 1 Function: "asinh": -double: 1 +double: 2 float: 2 Function: "atan": @@ -80,7 +80,7 @@ double: 1 float: 1 Function: "cbrt": -double: 3 +double: 4 float: 1 Function: Real part of "ccos": @@ -127,7 +127,7 @@ double: 1 float: 1 Function: "cosh": -double: 1 +double: 2 float: 2 Function: Real part of "cpow": @@ -177,10 +177,11 @@ double: 1 float: 1 Function: "erfc": -double: 3 +double: 5 float: 3 Function: "exp": +double: 1 float: 1 Function: "exp10": @@ -256,7 +257,7 @@ double: 2 float: 2 Function: "tgamma": -double: 5 +double: 9 float: 8 Function: "y0": diff --git a/sysdeps/nptl/lowlevellock-futex.h b/sysdeps/nptl/lowlevellock-futex.h index ecb729da6..ca96397a4 100644 --- a/sysdeps/nptl/lowlevellock-futex.h +++ b/sysdeps/nptl/lowlevellock-futex.h @@ -50,20 +50,8 @@ #define LLL_SHARED FUTEX_PRIVATE_FLAG #ifndef __ASSEMBLER__ - -# if IS_IN (libc) || IS_IN (rtld) -/* In libc.so or ld.so all futexes are private. */ -# define __lll_private_flag(fl, private) \ - ({ \ - /* Prevent warnings in callers of this macro. */ \ - int __lll_private_flag_priv __attribute__ ((unused)); \ - __lll_private_flag_priv = (private); \ - ((fl) | FUTEX_PRIVATE_FLAG); \ - }) -# else -# define __lll_private_flag(fl, private) \ +# define __lll_private_flag(fl, private) \ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private)) -# endif # define lll_futex_syscall(nargs, futexp, op, ...) \ ({ \ diff --git a/sysdeps/posix/getcwd.c b/sysdeps/posix/getcwd.c index f11644aae..242e4dbe4 100644 --- a/sysdeps/posix/getcwd.c +++ b/sysdeps/posix/getcwd.c @@ -187,6 +187,13 @@ __getcwd_generic (char *buf, size_t size) size_t allocated = size; size_t used; + /* A size of 1 byte is never useful. */ + if (allocated == 1) + { + __set_errno (ERANGE); + return NULL; + } + #if HAVE_MINIMALLY_WORKING_GETCWD /* If AT_FDCWD is not defined, the algorithm below is O(N**2) and this is much slower than the system getcwd (at least on diff --git a/sysdeps/powerpc/Makefile b/sysdeps/powerpc/Makefile index d1c71a0ca..06491b0ef 100644 --- a/sysdeps/powerpc/Makefile +++ b/sysdeps/powerpc/Makefile @@ -62,11 +62,6 @@ ifeq ($(subdir),misc) sysdep_headers += sys/platform/ppc.h tests += test-gettimebase tests += tst-set_ppr - -# This test is expected to run and exit with EXIT_UNSUPPORTED on -# processors that do not implement the Power ISA 2.06 or greater. -# But the test makes use of instructions from Power ISA 2.06 and 2.07. -CFLAGS-tst-set_ppr.c += -Wa,-many endif ifeq ($(subdir),wcsmbs) diff --git a/sysdeps/powerpc/powerpc64/sysdep.h b/sysdeps/powerpc/powerpc64/sysdep.h index c57bb1c05..83fdc8e83 100644 --- a/sysdeps/powerpc/powerpc64/sysdep.h +++ b/sysdeps/powerpc/powerpc64/sysdep.h @@ -275,12 +275,14 @@ LT_LABELSUFFIX(name,_name_end): ; \ /* Allocate frame and save register */ #define NVOLREG_SAVE \ stdu r1,-SCV_FRAME_SIZE(r1); \ + cfi_adjust_cfa_offset(SCV_FRAME_SIZE); \ std r31,SCV_FRAME_NVOLREG_SAVE(r1); \ - cfi_adjust_cfa_offset(SCV_FRAME_SIZE); + cfi_rel_offset(r31,SCV_FRAME_NVOLREG_SAVE); /* Restore register and destroy frame */ #define NVOLREG_RESTORE \ ld r31,SCV_FRAME_NVOLREG_SAVE(r1); \ + cfi_restore(r31); \ addi r1,r1,SCV_FRAME_SIZE; \ cfi_adjust_cfa_offset(-SCV_FRAME_SIZE); @@ -331,13 +333,13 @@ LT_LABELSUFFIX(name,_name_end): ; \ #define DO_CALL_SCV \ mflr r9; \ - std r9,FRAME_LR_SAVE(r1); \ - cfi_offset(lr,FRAME_LR_SAVE); \ + std r9,SCV_FRAME_SIZE+FRAME_LR_SAVE(r1); \ + cfi_rel_offset(lr,SCV_FRAME_SIZE+FRAME_LR_SAVE); \ .machine "push"; \ .machine "power9"; \ scv 0; \ .machine "pop"; \ - ld r9,FRAME_LR_SAVE(r1); \ + ld r9,SCV_FRAME_SIZE+FRAME_LR_SAVE(r1); \ mtlr r9; \ cfi_restore(lr); @@ -398,8 +400,9 @@ LT_LABELSUFFIX(name,_name_end): ; \ #endif #define RET_SCV \ - cmpdi r3,0; \ - bgelr+; \ + li r9,-4095; \ + cmpld r3,r9; \ + bltlr+; \ neg r3,r3; #define RET_SC \ diff --git a/sysdeps/powerpc/powerpc64/tst-ucontext-ppc64-vscr.c b/sysdeps/powerpc/powerpc64/tst-ucontext-ppc64-vscr.c index 28c87fcef..d3fc4ab58 100644 --- a/sysdeps/powerpc/powerpc64/tst-ucontext-ppc64-vscr.c +++ b/sysdeps/powerpc/powerpc64/tst-ucontext-ppc64-vscr.c @@ -50,6 +50,7 @@ do_test (void) /* Set SAT bit in VSCR register. */ asm volatile (".machine push;\n" ".machine \"power5\";\n" + ".machine altivec;\n" "vspltisb %0,0;\n" "vspltisb %1,-1;\n" "vpkuwus %0,%0,%1;\n" diff --git a/sysdeps/powerpc/tst-set_ppr.c b/sysdeps/powerpc/tst-set_ppr.c index 7684f5d6e..e80da1532 100644 --- a/sysdeps/powerpc/tst-set_ppr.c +++ b/sysdeps/powerpc/tst-set_ppr.c @@ -44,7 +44,8 @@ get_thread_priority (void) { /* Read the PPR. */ ppr_t ppr; - asm volatile (MFPPR" %0" : "=r"(ppr)); + asm volatile (".machine push; .machine power7; "MFPPR" %0; .machine pop" + : "=r"(ppr)); /* Return the thread priority value. */ return EXTRACT_THREAD_PRIORITY (ppr); } diff --git a/sysdeps/pthread/Makefile b/sysdeps/pthread/Makefile index eeb64f9fb..2df947cb0 100644 --- a/sysdeps/pthread/Makefile +++ b/sysdeps/pthread/Makefile @@ -108,6 +108,7 @@ tests += tst-cnd-basic tst-mtx-trylock tst-cnd-broadcast \ tst-unload \ tst-unwind-thread \ tst-pt-vfork1 tst-pt-vfork2 tst-vfork1x tst-vfork2x \ + tst-pthread-exit-signal \ # Files which must not be linked with libpthread. @@ -231,7 +232,7 @@ generated += $(objpfx)tst-atfork2.mtrace \ tests-internal += tst-cancel25 tst-robust8 -tests += tst-oncex3 tst-oncex4 +tests += tst-oncex3 tst-oncex4 tst-oncey3 tst-oncey4 modules-names += tst-join7mod @@ -242,6 +243,8 @@ endif CFLAGS-tst-oncex3.c += -fexceptions CFLAGS-tst-oncex4.c += -fexceptions +CFLAGS-tst-oncey3.c += -fno-exceptions -fno-asynchronous-unwind-tables +CFLAGS-tst-oncey4.c += -fno-exceptions -fno-asynchronous-unwind-tables $(objpfx)tst-join7: $(libdl) $(shared-thread-library) $(objpfx)tst-join7.out: $(objpfx)tst-join7mod.so diff --git a/sysdeps/pthread/tst-oncey3.c b/sysdeps/pthread/tst-oncey3.c new file mode 100644 index 000000000..08225b88d --- /dev/null +++ b/sysdeps/pthread/tst-oncey3.c @@ -0,0 +1 @@ +#include "tst-once3.c" diff --git a/sysdeps/pthread/tst-oncey4.c b/sysdeps/pthread/tst-oncey4.c new file mode 100644 index 000000000..9b4d98f3f --- /dev/null +++ b/sysdeps/pthread/tst-oncey4.c @@ -0,0 +1 @@ +#include "tst-once4.c" diff --git a/sysdeps/pthread/tst-pthread-exit-signal.c b/sysdeps/pthread/tst-pthread-exit-signal.c new file mode 100644 index 000000000..b4526fe66 --- /dev/null +++ b/sysdeps/pthread/tst-pthread-exit-signal.c @@ -0,0 +1,45 @@ +/* Test that pending signals are not delivered on thread exit (bug 28607). + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +/* Due to bug 28607, pthread_kill (or pthread_cancel) restored the + signal mask during during thread exit, triggering the delivery of a + blocked pending signal (SIGUSR1 in this test). */ + +#include +#include + +static void * +threadfunc (void *closure) +{ + sigset_t sigmask; + sigfillset (&sigmask); + xpthread_sigmask (SIG_SETMASK, &sigmask, NULL); + xpthread_kill (pthread_self (), SIGUSR1); + pthread_exit (NULL); + return NULL; +} + +static int +do_test (void) +{ + pthread_t thr = xpthread_create (NULL, threadfunc, NULL); + xpthread_join (thr); + return 0; +} + +#include diff --git a/sysdeps/riscv/rv64/rvd/libm-test-ulps b/sysdeps/riscv/rv64/rvd/libm-test-ulps index 5b6f121ac..a0085bcdb 100644 --- a/sysdeps/riscv/rv64/rvd/libm-test-ulps +++ b/sysdeps/riscv/rv64/rvd/libm-test-ulps @@ -60,7 +60,7 @@ float: 1 ldouble: 2 Function: "asinh": -double: 1 +double: 2 float: 2 ldouble: 3 @@ -413,7 +413,7 @@ float: 1 ldouble: 2 Function: "cbrt": -double: 3 +double: 4 float: 1 ldouble: 1 @@ -652,17 +652,17 @@ float: 1 ldouble: 2 Function: "cosh": -double: 1 +double: 2 float: 2 ldouble: 1 Function: "cosh_downward": -double: 2 +double: 3 float: 1 ldouble: 2 Function: "cosh_towardzero": -double: 2 +double: 3 float: 1 ldouble: 2 @@ -948,6 +948,7 @@ float: 4 ldouble: 5 Function: "exp": +double: 1 float: 1 ldouble: 1 @@ -1068,7 +1069,7 @@ ldouble: 4 Function: "j0_towardzero": double: 5 float: 6 -ldouble: 2 +ldouble: 4 Function: "j0_upward": double: 4 @@ -1136,6 +1137,7 @@ float: 5 ldouble: 8 Function: "log": +double: 1 ldouble: 1 Function: "log10": @@ -1274,7 +1276,7 @@ float: 3 ldouble: 3 Function: "sinh_towardzero": -double: 2 +double: 3 float: 2 ldouble: 3 @@ -1323,22 +1325,22 @@ float: 3 ldouble: 3 Function: "tgamma": -double: 5 +double: 9 float: 8 ldouble: 4 Function: "tgamma_downward": -double: 5 +double: 8 float: 7 ldouble: 5 Function: "tgamma_towardzero": -double: 5 +double: 9 float: 7 ldouble: 5 Function: "tgamma_upward": -double: 4 +double: 9 float: 8 ldouble: 4 diff --git a/sysdeps/s390/configure b/sysdeps/s390/configure index 5f98640d0..7eaefbabc 100644 --- a/sysdeps/s390/configure +++ b/sysdeps/s390/configure @@ -123,7 +123,9 @@ void testinsn (char *buf) __asm__ (".machine \"arch13\" \n\t" ".machinemode \"zarch_nohighgprs\" \n\t" "lghi %%r0,16 \n\t" - "mvcrl 0(%0),32(%0)" : : "a" (buf) : "memory", "r0"); + "mvcrl 0(%0),32(%0) \n\t" + "vstrs %%v20,%%v20,%%v20,%%v20,0,2" + : : "a" (buf) : "memory", "r0"); } EOF if { ac_try='${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS --shared conftest.c @@ -271,7 +273,9 @@ else void testinsn (char *buf) { __asm__ ("lghi %%r0,16 \n\t" - "mvcrl 0(%0),32(%0)" : : "a" (buf) : "memory", "r0"); + "mvcrl 0(%0),32(%0) \n\t" + "vstrs %%v20,%%v20,%%v20,%%v20,0,2" + : : "a" (buf) : "memory", "r0"); } EOF if { ac_try='${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS --shared conftest.c diff --git a/sysdeps/s390/configure.ac b/sysdeps/s390/configure.ac index dfe007a77..e6df62491 100644 --- a/sysdeps/s390/configure.ac +++ b/sysdeps/s390/configure.ac @@ -88,7 +88,9 @@ void testinsn (char *buf) __asm__ (".machine \"arch13\" \n\t" ".machinemode \"zarch_nohighgprs\" \n\t" "lghi %%r0,16 \n\t" - "mvcrl 0(%0),32(%0)" : : "a" (buf) : "memory", "r0"); + "mvcrl 0(%0),32(%0) \n\t" + "vstrs %%v20,%%v20,%%v20,%%v20,0,2" + : : "a" (buf) : "memory", "r0"); } EOF dnl test, if assembler supports S390 arch13 instructions @@ -195,7 +197,9 @@ cat > conftest.c <<\EOF void testinsn (char *buf) { __asm__ ("lghi %%r0,16 \n\t" - "mvcrl 0(%0),32(%0)" : : "a" (buf) : "memory", "r0"); + "mvcrl 0(%0),32(%0) \n\t" + "vstrs %%v20,%%v20,%%v20,%%v20,0,2" + : : "a" (buf) : "memory", "r0"); } EOF dnl test, if assembler supports S390 arch13 zarch instructions as default diff --git a/sysdeps/s390/dl-procinfo.c b/sysdeps/s390/dl-procinfo.c index 0c334a255..155f0bd99 100644 --- a/sysdeps/s390/dl-procinfo.c +++ b/sysdeps/s390/dl-procinfo.c @@ -46,12 +46,13 @@ #if !defined PROCINFO_DECL && defined SHARED ._dl_s390_cap_flags #else -PROCINFO_CLASS const char _dl_s390_cap_flags[19][9] +PROCINFO_CLASS const char _dl_s390_cap_flags[23][9] #endif #ifndef PROCINFO_DECL = { "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "edat", "etf3eh", - "highgprs", "te", "vx", "vxd", "vxe", "gs", "vxe2", "vxp", "sort", "dflt" + "highgprs", "te", "vx", "vxd", "vxe", "gs", "vxe2", "vxp", "sort", "dflt", + "vxp2", "nnpa", "pcimio", "sie" } #endif #if !defined SHARED || defined PROCINFO_DECL diff --git a/sysdeps/s390/dl-procinfo.h b/sysdeps/s390/dl-procinfo.h index 9e1a8c7ba..e4e3e334a 100644 --- a/sysdeps/s390/dl-procinfo.h +++ b/sysdeps/s390/dl-procinfo.h @@ -21,7 +21,7 @@ #define _DL_PROCINFO_H 1 #include -#define _DL_HWCAP_COUNT 19 +#define _DL_HWCAP_COUNT 23 #define _DL_PLATFORMS_COUNT 10 @@ -61,6 +61,10 @@ enum HWCAP_S390_VXRS_PDE = 1 << 16, HWCAP_S390_SORT = 1 << 17, HWCAP_S390_DFLT = 1 << 18, + HWCAP_S390_VXRS_PDE2 = 1 << 19, + HWCAP_S390_NNPA = 1 << 20, + HWCAP_S390_PCI_MIO = 1 << 21, + HWCAP_S390_SIE = 1 << 22, }; #define HWCAP_IMPORTANT (HWCAP_S390_ZARCH | HWCAP_S390_LDISP \ diff --git a/sysdeps/s390/memmem-arch13.S b/sysdeps/s390/memmem-arch13.S index c5c8d8c97..58df8cdb1 100644 --- a/sysdeps/s390/memmem-arch13.S +++ b/sysdeps/s390/memmem-arch13.S @@ -41,7 +41,7 @@ ENTRY(MEMMEM_ARCH13) # error The arch13 variant of memmem needs the z13 variant of memmem! # endif clgfi %r5,9 - jh MEMMEM_Z13 + jgh MEMMEM_Z13 aghik %r0,%r5,-1 /* vll needs highest index. */ bc 4,0(%r14) /* cc==1: return if needle-len == 0. */ diff --git a/sysdeps/s390/memmove.c b/sysdeps/s390/memmove.c index f88ea79d9..1a7d3369f 100644 --- a/sysdeps/s390/memmove.c +++ b/sysdeps/s390/memmove.c @@ -43,7 +43,7 @@ extern __typeof (__redirect_memmove) MEMMOVE_ARCH13 attribute_hidden; s390_libc_ifunc_expr (__redirect_memmove, memmove, ({ s390_libc_ifunc_expr_stfle_init (); - (HAVE_MEMMOVE_ARCH13 + (HAVE_MEMMOVE_ARCH13 && (hwcap & HWCAP_S390_VXRS_EXT2) && S390_IS_ARCH13_MIE3 (stfle_bits)) ? MEMMOVE_ARCH13 : (HAVE_MEMMOVE_Z13 && (hwcap & HWCAP_S390_VX)) diff --git a/sysdeps/s390/multiarch/ifunc-impl-list.c b/sysdeps/s390/multiarch/ifunc-impl-list.c index 4b170e445..2ef38b72d 100644 --- a/sysdeps/s390/multiarch/ifunc-impl-list.c +++ b/sysdeps/s390/multiarch/ifunc-impl-list.c @@ -171,7 +171,8 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL (i, name, memmove, # if HAVE_MEMMOVE_ARCH13 IFUNC_IMPL_ADD (array, i, memmove, - S390_IS_ARCH13_MIE3 (stfle_bits), + ((dl_hwcap & HWCAP_S390_VXRS_EXT2) + && S390_IS_ARCH13_MIE3 (stfle_bits)), MEMMOVE_ARCH13) # endif # if HAVE_MEMMOVE_Z13 diff --git a/sysdeps/s390/strstr-arch13.S b/sysdeps/s390/strstr-arch13.S index c7183e627..222a6de91 100644 --- a/sysdeps/s390/strstr-arch13.S +++ b/sysdeps/s390/strstr-arch13.S @@ -49,7 +49,7 @@ ENTRY(STRSTR_ARCH13) # error The arch13 variant of strstr needs the z13 variant of strstr! # endif clgfi %r4,9 - jh STRSTR_Z13 + jgh STRSTR_Z13 /* In case of a partial match, the vstrs instruction returns the index of the partial match in a vector-register. Then we have to diff --git a/sysdeps/unix/sysv/linux/Makefile b/sysdeps/unix/sysv/linux/Makefile index 472eab700..9531641f8 100644 --- a/sysdeps/unix/sysv/linux/Makefile +++ b/sysdeps/unix/sysv/linux/Makefile @@ -282,7 +282,12 @@ sysdep_routines += xstatconv internal_statvfs internal_statvfs64 \ sysdep_headers += bits/fcntl-linux.h -tests += tst-fallocate tst-fallocate64 tst-o_path-locks +tests += \ + tst-fallocate \ + tst-fallocate64 \ + tst-getcwd-smallbuff \ + tst-o_path-locks \ +# tests endif ifeq ($(subdir),elf) diff --git a/sysdeps/unix/sysv/linux/aarch64/clone.S b/sysdeps/unix/sysv/linux/aarch64/clone.S index c9e63bae4..fe04bce6b 100644 --- a/sysdeps/unix/sysv/linux/aarch64/clone.S +++ b/sysdeps/unix/sysv/linux/aarch64/clone.S @@ -47,6 +47,8 @@ ENTRY(__clone) /* Sanity check args. */ mov x0, #-EINVAL cbz x10, .Lsyscall_error + /* Align sp. */ + and x1, x1, -16 cbz x1, .Lsyscall_error /* Do the system call. */ diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c index fe52b6308..db6aa3516 100644 --- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c +++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c @@ -104,7 +104,7 @@ init_cpu_features (struct cpu_features *cpu_features) cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ? mte_state : 0; /* If we lack the MTE feature, disable the tunable, since it will otherwise cause instructions that won't run on this CPU to be used. */ - TUNABLE_SET (glibc, mem, tagging, unsigned, cpu_features->mte_state); + TUNABLE_SET (glibc, mem, tagging, cpu_features->mte_state); # endif if (cpu_features->mte_state & 2) diff --git a/sysdeps/unix/sysv/linux/dl-diagnostics-kernel.c b/sysdeps/unix/sysv/linux/dl-diagnostics-kernel.c new file mode 100644 index 000000000..59f6402c5 --- /dev/null +++ b/sysdeps/unix/sysv/linux/dl-diagnostics-kernel.c @@ -0,0 +1,77 @@ +/* Print kernel diagnostics data in ld.so. Linux version. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include + +/* Dump the auxiliary vector to standard output. */ +static void +print_auxv (void) +{ + /* See _dl_show_auxv. The code below follows the general output + format for diagnostic dumps. */ + unsigned int index = 0; + for (ElfW(auxv_t) *av = GLRO(dl_auxv); av->a_type != AT_NULL; ++av) + { + _dl_printf ("auxv[0x%x].a_type=0x%lx\n" + "auxv[0x%x].a_val=", + index, (unsigned long int) av->a_type, index); + if (av->a_type == AT_EXECFN + || av->a_type == AT_PLATFORM + || av->a_type == AT_BASE_PLATFORM) + /* The address of the strings is not useful at all, so print + the strings themselvs. */ + _dl_diagnostics_print_string ((const char *) av->a_un.a_val); + else + _dl_printf ("0x%lx", (unsigned long int) av->a_un.a_val); + _dl_printf ("\n"); + ++index; + } +} + +/* Print one uname entry. */ +static void +print_utsname_entry (const char *field, const char *value) +{ + _dl_printf ("uname."); + _dl_diagnostics_print_labeled_string (field, value); +} + +/* Print information from uname, including the kernel version. */ +static void +print_uname (void) +{ + struct utsname uts; + if (__uname (&uts) == 0) + { + print_utsname_entry ("sysname", uts.sysname); + print_utsname_entry ("nodename", uts.nodename); + print_utsname_entry ("release", uts.release); + print_utsname_entry ("version", uts.version); + print_utsname_entry ("machine", uts.machine); + print_utsname_entry ("domainname", uts.domainname); + } +} + +void +_dl_diagnostics_kernel (void) +{ + print_auxv (); + print_uname (); +} diff --git a/sysdeps/unix/sysv/linux/fstat.c b/sysdeps/unix/sysv/linux/fstat.c index fd6436220..31a172dcc 100644 --- a/sysdeps/unix/sysv/linux/fstat.c +++ b/sysdeps/unix/sysv/linux/fstat.c @@ -19,11 +19,17 @@ #include #include #include +#include #if !XSTAT_IS_XSTAT64 int __fstat (int fd, struct stat *buf) { + if (fd < 0) + { + __set_errno (EBADF); + return -1; + } return __fstatat (fd, "", buf, AT_EMPTY_PATH); } diff --git a/sysdeps/unix/sysv/linux/fstat64.c b/sysdeps/unix/sysv/linux/fstat64.c index 993abcb44..46de80b66 100644 --- a/sysdeps/unix/sysv/linux/fstat64.c +++ b/sysdeps/unix/sysv/linux/fstat64.c @@ -22,10 +22,16 @@ #include #include #include +#include int __fstat64_time64 (int fd, struct __stat64_t64 *buf) { + if (fd < 0) + { + __set_errno (EBADF); + return -1; + } return __fstatat64_time64 (fd, "", buf, AT_EMPTY_PATH); } #if __TIMESIZE != 64 @@ -34,6 +40,12 @@ hidden_def (__fstat64_time64) int __fstat64 (int fd, struct stat64 *buf) { + if (fd < 0) + { + __set_errno (EBADF); + return -1; + } + struct __stat64_t64 st_t64; return __fstat64_time64 (fd, &st_t64) ?: __cp_stat64_t64_stat64 (&st_t64, buf); diff --git a/sysdeps/unix/sysv/linux/mips/fxstat.c b/sysdeps/unix/sysv/linux/mips/fxstat.c index 11511d30b..4a6016ff1 100644 --- a/sysdeps/unix/sysv/linux/mips/fxstat.c +++ b/sysdeps/unix/sysv/linux/mips/fxstat.c @@ -35,7 +35,9 @@ __fxstat (int vers, int fd, struct stat *buf) { struct kernel_stat kbuf; int r = INTERNAL_SYSCALL_CALL (fstat, fd, &kbuf); - return r ?: __xstat_conv (vers, &kbuf, buf); + if (r == 0) + return __xstat_conv (vers, &kbuf, buf); + return INLINE_SYSCALL_ERROR_RETURN_VALUE (-r); } } } diff --git a/sysdeps/unix/sysv/linux/mips/lxstat.c b/sysdeps/unix/sysv/linux/mips/lxstat.c index 871fb6c6c..54f990a25 100644 --- a/sysdeps/unix/sysv/linux/mips/lxstat.c +++ b/sysdeps/unix/sysv/linux/mips/lxstat.c @@ -35,7 +35,9 @@ __lxstat (int vers, const char *name, struct stat *buf) { struct kernel_stat kbuf; int r = INTERNAL_SYSCALL_CALL (lstat, name, &kbuf); - return r ?: __xstat_conv (vers, &kbuf, buf); + if (r == 0) + return __xstat_conv (vers, &kbuf, buf); + return INLINE_SYSCALL_ERROR_RETURN_VALUE (-r); } } } diff --git a/sysdeps/unix/sysv/linux/mips/xstat.c b/sysdeps/unix/sysv/linux/mips/xstat.c index 9d810b6f6..86f4dc31a 100644 --- a/sysdeps/unix/sysv/linux/mips/xstat.c +++ b/sysdeps/unix/sysv/linux/mips/xstat.c @@ -35,7 +35,9 @@ __xstat (int vers, const char *name, struct stat *buf) { struct kernel_stat kbuf; int r = INTERNAL_SYSCALL_CALL (stat, name, &kbuf); - return r ?: __xstat_conv (vers, &kbuf, buf); + if (r == 0) + return __xstat_conv (vers, &kbuf, buf); + return INLINE_SYSCALL_ERROR_RETURN_VALUE (-r); } } } diff --git a/sysdeps/unix/sysv/linux/mq_notify.c b/sysdeps/unix/sysv/linux/mq_notify.c index cc575a0cd..1714e1cc5 100644 --- a/sysdeps/unix/sysv/linux/mq_notify.c +++ b/sysdeps/unix/sysv/linux/mq_notify.c @@ -132,9 +132,12 @@ helper_thread (void *arg) to wait until it is done with it. */ (void) __pthread_barrier_wait (¬ify_barrier); } - else if (data.raw[NOTIFY_COOKIE_LEN - 1] == NOTIFY_REMOVED) - /* The only state we keep is the copy of the thread attributes. */ - free (data.attr); + else if (data.raw[NOTIFY_COOKIE_LEN - 1] == NOTIFY_REMOVED && data.attr != NULL) + { + /* The only state we keep is the copy of the thread attributes. */ + pthread_attr_destroy (data.attr); + free (data.attr); + } } return NULL; } @@ -255,8 +258,14 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification) if (data.attr == NULL) return -1; - memcpy (data.attr, notification->sigev_notify_attributes, - sizeof (pthread_attr_t)); + int ret = __pthread_attr_copy (data.attr, + notification->sigev_notify_attributes); + if (ret != 0) + { + free (data.attr); + __set_errno (ret); + return -1; + } } /* Construct the new request. */ @@ -269,8 +278,11 @@ mq_notify (mqd_t mqdes, const struct sigevent *notification) int retval = INLINE_SYSCALL (mq_notify, 2, mqdes, &se); /* If it failed, free the allocated memory. */ - if (__glibc_unlikely (retval != 0)) - free (data.attr); + if (retval != 0 && data.attr != NULL) + { + pthread_attr_destroy (data.attr); + free (data.attr); + } return retval; } diff --git a/sysdeps/unix/sysv/linux/opensock.c b/sysdeps/unix/sysv/linux/opensock.c deleted file mode 100644 index e87d6e58b..000000000 --- a/sysdeps/unix/sysv/linux/opensock.c +++ /dev/null @@ -1,114 +0,0 @@ -/* Copyright (C) 1999-2021 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include -#include -#include -#include - -/* Return a socket of any type. The socket can be used in subsequent - ioctl calls to talk to the kernel. */ -int -__opensock (void) -{ - static int last_family; /* Available socket family we will use. */ - static int last_type; - static const struct - { - int family; - const char procname[15]; - } afs[] = - { - { AF_UNIX, "net/unix" }, - { AF_INET, "" }, - { AF_INET6, "net/if_inet6" }, - { AF_AX25, "net/ax25" }, - { AF_NETROM, "net/nr" }, - { AF_ROSE, "net/rose" }, - { AF_IPX, "net/ipx" }, - { AF_APPLETALK, "net/appletalk" }, - { AF_ECONET, "sys/net/econet" }, - { AF_ASH, "sys/net/ash" }, - { AF_X25, "net/x25" }, -#ifdef NEED_AF_IUCV - { AF_IUCV, "net/iucv" } -#endif - }; -#define nafs (sizeof (afs) / sizeof (afs[0])) - char fname[sizeof "/proc/" + 14]; - int result; - int has_proc; - size_t cnt; - - /* We already know which family to use from the last call. Use it - again. */ - if (last_family != 0) - { - assert (last_type != 0); - - result = __socket (last_family, last_type | SOCK_CLOEXEC, 0); - if (result != -1 || errno != EAFNOSUPPORT) - /* Maybe the socket type isn't supported anymore (module is - unloaded). In this case again try to find the type. */ - return result; - - /* Reset the values. They seem not valid anymore. */ - last_family = 0; - last_type = 0; - } - - /* Check whether the /proc filesystem is available. */ - has_proc = __access ("/proc/net", R_OK) != -1; - strcpy (fname, "/proc/"); - - /* Iterate over the interface families and find one which is - available. */ - for (cnt = 0; cnt < nafs; ++cnt) - { - int type = SOCK_DGRAM; - - if (has_proc && afs[cnt].procname[0] != '\0') - { - strcpy (fname + 6, afs[cnt].procname); - if (__access (fname, R_OK) == -1) - /* The /proc entry is not available. I.e., we cannot - create a socket of this type (without loading the - module). Don't look for it since this might trigger - loading the module. */ - continue; - } - - if (afs[cnt].family == AF_NETROM || afs[cnt].family == AF_X25) - type = SOCK_SEQPACKET; - - result = __socket (afs[cnt].family, type | SOCK_CLOEXEC, 0); - if (result != -1) - { - /* Found an available family. */ - last_type = type; - last_family = afs[cnt].family; - return result; - } - } - - /* None of the protocol families is available. It is unclear what kind - of error is returned. ENOENT seems like a reasonable choice. */ - __set_errno (ENOENT); - return -1; -} diff --git a/sysdeps/unix/sysv/linux/powerpc/syscall.S b/sysdeps/unix/sysv/linux/powerpc/syscall.S index d6ec87f00..d06e776ca 100644 --- a/sysdeps/unix/sysv/linux/powerpc/syscall.S +++ b/sysdeps/unix/sysv/linux/powerpc/syscall.S @@ -27,7 +27,11 @@ ENTRY (syscall) mr r8,r9 #if !IS_IN(rtld) && (defined(__PPC64__) || defined(__powerpc64__)) CHECK_SCV_SUPPORT r9 0f + stdu r1,-SCV_FRAME_SIZE(r1) + cfi_adjust_cfa_offset(SCV_FRAME_SIZE) DO_CALL_SCV + addi r1,r1,SCV_FRAME_SIZE + cfi_adjust_cfa_offset(-SCV_FRAME_SIZE) RET_SCV b 1f #endif diff --git a/sysdeps/unix/sysv/linux/s390/bits/hwcap.h b/sysdeps/unix/sysv/linux/s390/bits/hwcap.h index 696616e77..00e73a3e3 100644 --- a/sysdeps/unix/sysv/linux/s390/bits/hwcap.h +++ b/sysdeps/unix/sysv/linux/s390/bits/hwcap.h @@ -22,6 +22,11 @@ /* * The following must match the kernels asm/elf.h. + * Note: The kernel commit 511ad531afd4090625def4d9aba1f5227bd44b8e + * "s390/hwcaps: shorten HWCAP defines" has shortened the prefix of the macros + * from "HWCAP_S390_" to "HWCAP_". For compatibility reasons, we do not + * change the prefix in public glibc header file. + * * Note that these are *not* the same as the STORE FACILITY LIST bits. */ #define HWCAP_S390_ESAN3 1 @@ -46,3 +51,7 @@ #define HWCAP_S390_VXRS_PDE 65536 #define HWCAP_S390_SORT 131072 #define HWCAP_S390_DFLT 262144 +#define HWCAP_S390_VXRS_PDE2 524288 +#define HWCAP_S390_NNPA 1048576 +#define HWCAP_S390_PCI_MIO 2097152 +#define HWCAP_S390_SIE 4194304 diff --git a/sysdeps/unix/sysv/linux/s390/opensock.c b/sysdeps/unix/sysv/linux/s390/opensock.c deleted file mode 100644 index f099d651f..000000000 --- a/sysdeps/unix/sysv/linux/s390/opensock.c +++ /dev/null @@ -1,2 +0,0 @@ -#define NEED_AF_IUCV 1 -#include "../opensock.c" diff --git a/sysdeps/unix/sysv/linux/select.c b/sysdeps/unix/sysv/linux/select.c index 415aa87d3..dc16a816e 100644 --- a/sysdeps/unix/sysv/linux/select.c +++ b/sysdeps/unix/sysv/linux/select.c @@ -33,13 +33,35 @@ int __select64 (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct __timeval64 *timeout) { - struct __timespec64 ts64, *pts64 = NULL; - if (timeout != NULL) + __time64_t s = timeout != NULL ? timeout->tv_sec : 0; + int32_t us = timeout != NULL ? timeout->tv_usec : 0; + int32_t ns; + + if (s < 0 || us < 0) + return INLINE_SYSCALL_ERROR_RETURN_VALUE (EINVAL); + + /* Normalize the timeout, as legacy Linux __NR_select and __NR__newselect. + Different than syscall, it also handle possible overflow. */ + if (us / USEC_PER_SEC > INT64_MAX - s) + { + s = INT64_MAX; + ns = NSEC_PER_SEC - 1; + } + else { - ts64 = timeval64_to_timespec64 (*timeout); - pts64 = &ts64; + s += us / USEC_PER_SEC; + us = us % USEC_PER_SEC; + ns = us * NSEC_PER_USEC; } + struct __timespec64 ts64, *pts64 = NULL; + if (timeout != NULL) + { + ts64.tv_sec = s; + ts64.tv_nsec = ns; + pts64 = &ts64; + } + #ifndef __NR_pselect6_time64 # define __NR_pselect6_time64 __NR_pselect6 #endif @@ -52,10 +74,10 @@ __select64 (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, (though the pselect() glibc call suppresses this behavior). Since select() on Linux has the same behavior as the pselect6 syscall, we update the timeout here. */ - if (r == 0 || errno != ENOSYS) + if (r >= 0 || errno != ENOSYS) { if (timeout != NULL) - TIMEVAL_TO_TIMESPEC (timeout, &ts64); + TIMESPEC_TO_TIMEVAL (timeout, &ts64); return r; } @@ -64,14 +86,15 @@ __select64 (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, #ifndef __ASSUME_TIME64_SYSCALLS struct timespec ts32, *pts32 = NULL; - if (timeout != NULL) + if (pts64 != NULL) { - if (! in_time_t_range (timeout->tv_sec)) + if (! in_time_t_range (pts64->tv_sec)) { __set_errno (EINVAL); return -1; } - ts32 = valid_timespec64_to_timespec (ts64); + ts32.tv_sec = s; + ts32.tv_nsec = ns; pts32 = &ts32; } # ifndef __ASSUME_PSELECT @@ -84,7 +107,7 @@ __select64 (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, r = SYSCALL_CANCEL (pselect6, nfds, readfds, writefds, exceptfds, pts32, NULL); # endif - if (r >= 0 && timeout != NULL) + if (timeout != NULL) *timeout = valid_timespec_to_timeval64 (ts32); #endif @@ -105,7 +128,7 @@ __select (int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, ptv64 = &tv64; } int r = __select64 (nfds, readfds, writefds, exceptfds, ptv64); - if (r >= 0 && timeout != NULL) + if (timeout != NULL) /* The remanining timeout will be always less the input TIMEOUT. */ *timeout = valid_timeval64_to_timeval (tv64); return r; diff --git a/sysdeps/unix/sysv/linux/sys/prctl.h b/sysdeps/unix/sysv/linux/sys/prctl.h index 00817ff0f..c9048c7cd 100644 --- a/sysdeps/unix/sysv/linux/sys/prctl.h +++ b/sysdeps/unix/sysv/linux/sys/prctl.h @@ -25,10 +25,6 @@ we're picking up... */ /* Memory tagging control operations (for AArch64). */ -#ifndef PR_TAGGED_ADDR_ENABLE -# define PR_TAGGED_ADDR_ENABLE (1UL << 8) -#endif - #ifndef PR_MTE_TCF_SHIFT # define PR_MTE_TCF_SHIFT 1 # define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) diff --git a/sysdeps/unix/sysv/linux/tst-getcwd-smallbuff.c b/sysdeps/unix/sysv/linux/tst-getcwd-smallbuff.c new file mode 100644 index 000000000..55362f606 --- /dev/null +++ b/sysdeps/unix/sysv/linux/tst-getcwd-smallbuff.c @@ -0,0 +1,259 @@ +/* Verify that getcwd returns ERANGE for size 1 byte and does not underflow + buffer when the CWD is too long and is also a mount target of /. See bug + #28769 or CVE-2021-3999 for more context. + Copyright The GNU Toolchain Authors. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +static char *base; +#define BASENAME "tst-getcwd-smallbuff" +#define MOUNT_NAME "mpoint" +static int sockfd[2]; + +static void +do_cleanup (void) +{ + support_chdir_toolong_temp_directory (base); + TEST_VERIFY_EXIT (rmdir (MOUNT_NAME) == 0); + free (base); +} + +static void +send_fd (const int sock, const int fd) +{ + struct msghdr msg = {0}; + union + { + struct cmsghdr hdr; + char buf[CMSG_SPACE (sizeof (int))]; + } cmsgbuf = {0}; + struct cmsghdr *cmsg; + struct iovec vec; + char ch = 'A'; + ssize_t n; + + msg.msg_control = &cmsgbuf.buf; + msg.msg_controllen = sizeof (cmsgbuf.buf); + + cmsg = CMSG_FIRSTHDR (&msg); + cmsg->cmsg_len = CMSG_LEN (sizeof (int)); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + memcpy (CMSG_DATA (cmsg), &fd, sizeof (fd)); + + vec.iov_base = &ch; + vec.iov_len = 1; + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + + while ((n = sendmsg (sock, &msg, 0)) == -1 && errno == EINTR); + + TEST_VERIFY_EXIT (n == 1); +} + +static int +recv_fd (const int sock) +{ + struct msghdr msg = {0}; + union + { + struct cmsghdr hdr; + char buf[CMSG_SPACE(sizeof(int))]; + } cmsgbuf = {0}; + struct cmsghdr *cmsg; + struct iovec vec; + ssize_t n; + char ch = '\0'; + int fd = -1; + + vec.iov_base = &ch; + vec.iov_len = 1; + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + + msg.msg_control = &cmsgbuf.buf; + msg.msg_controllen = sizeof (cmsgbuf.buf); + + while ((n = recvmsg (sock, &msg, 0)) == -1 && errno == EINTR); + if (n != 1 || ch != 'A') + return -1; + + cmsg = CMSG_FIRSTHDR (&msg); + if (cmsg == NULL) + return -1; + if (cmsg->cmsg_type != SCM_RIGHTS) + return -1; + memcpy (&fd, CMSG_DATA (cmsg), sizeof (fd)); + if (fd < 0) + return -1; + return fd; +} + +static int +child_func (void * const arg) +{ + xclose (sockfd[0]); + const int sock = sockfd[1]; + char ch; + + TEST_VERIFY_EXIT (read (sock, &ch, 1) == 1); + TEST_VERIFY_EXIT (ch == '1'); + + if (mount ("/", MOUNT_NAME, NULL, MS_BIND | MS_REC, NULL)) + FAIL_EXIT1 ("mount failed: %m\n"); + const int fd = xopen ("mpoint", + O_RDONLY | O_PATH | O_DIRECTORY | O_NOFOLLOW, 0); + + send_fd (sock, fd); + xclose (fd); + + TEST_VERIFY_EXIT (read (sock, &ch, 1) == 1); + TEST_VERIFY_EXIT (ch == 'a'); + + xclose (sock); + return 0; +} + +static void +update_map (char * const mapping, const char * const map_file) +{ + const size_t map_len = strlen (mapping); + + const int fd = xopen (map_file, O_WRONLY, 0); + xwrite (fd, mapping, map_len); + xclose (fd); +} + +static void +proc_setgroups_write (const long child_pid, const char * const str) +{ + const size_t str_len = strlen(str); + + char setgroups_path[sizeof ("/proc//setgroups") + INT_STRLEN_BOUND (long)]; + + snprintf (setgroups_path, sizeof (setgroups_path), + "/proc/%ld/setgroups", child_pid); + + const int fd = open (setgroups_path, O_WRONLY); + + if (fd < 0) + { + TEST_VERIFY_EXIT (errno == ENOENT); + FAIL_UNSUPPORTED ("/proc/%ld/setgroups not found\n", child_pid); + } + + xwrite (fd, str, str_len); + xclose(fd); +} + +static char child_stack[1024 * 1024]; + +int +do_test (void) +{ + base = support_create_and_chdir_toolong_temp_directory (BASENAME); + + xmkdir (MOUNT_NAME, S_IRWXU); + atexit (do_cleanup); + + /* Check whether user namespaces are supported. */ + { + pid_t pid = xfork (); + if (pid == 0) + { + if (unshare (CLONE_NEWUSER | CLONE_NEWNS) != 0) + _exit (EXIT_UNSUPPORTED); + else + _exit (0); + } + int status; + xwaitpid (pid, &status, 0); + TEST_VERIFY_EXIT (WIFEXITED (status)); + if (WEXITSTATUS (status) != 0) + return WEXITSTATUS (status); + } + + TEST_VERIFY_EXIT (socketpair (AF_UNIX, SOCK_STREAM, 0, sockfd) == 0); + pid_t child_pid = xclone (child_func, NULL, child_stack, + sizeof (child_stack), + CLONE_NEWUSER | CLONE_NEWNS | SIGCHLD); + + xclose (sockfd[1]); + const int sock = sockfd[0]; + + char map_path[sizeof ("/proc//uid_map") + INT_STRLEN_BOUND (long)]; + char map_buf[sizeof ("0 1") + INT_STRLEN_BOUND (long)]; + + snprintf (map_path, sizeof (map_path), "/proc/%ld/uid_map", + (long) child_pid); + snprintf (map_buf, sizeof (map_buf), "0 %ld 1", (long) getuid()); + update_map (map_buf, map_path); + + proc_setgroups_write ((long) child_pid, "deny"); + snprintf (map_path, sizeof (map_path), "/proc/%ld/gid_map", + (long) child_pid); + snprintf (map_buf, sizeof (map_buf), "0 %ld 1", (long) getgid()); + update_map (map_buf, map_path); + + TEST_VERIFY_EXIT (send (sock, "1", 1, MSG_NOSIGNAL) == 1); + const int fd = recv_fd (sock); + TEST_VERIFY_EXIT (fd >= 0); + TEST_VERIFY_EXIT (fchdir (fd) == 0); + + static char buf[2 * 10 + 1]; + memset (buf, 'A', sizeof (buf)); + + /* Finally, call getcwd and check if it resulted in a buffer underflow. */ + char * cwd = getcwd (buf + sizeof (buf) / 2, 1); + TEST_VERIFY (cwd == NULL); + TEST_VERIFY (errno == ERANGE); + + for (int i = 0; i < sizeof (buf); i++) + if (buf[i] != 'A') + { + printf ("buf[%d] = %02x\n", i, (unsigned int) buf[i]); + support_record_failure (); + } + + TEST_VERIFY_EXIT (send (sock, "a", 1, MSG_NOSIGNAL) == 1); + xclose (sock); + TEST_VERIFY_EXIT (xwaitpid (child_pid, NULL, 0) == child_pid); + + return 0; +} + +#define CLEANUP_HANDLER do_cleanup +#include diff --git a/sysdeps/unix/sysv/linux/tst-sysvshm-linux.c b/sysdeps/unix/sysv/linux/tst-sysvshm-linux.c index 2f05f21e4..110a7c143 100644 --- a/sysdeps/unix/sysv/linux/tst-sysvshm-linux.c +++ b/sysdeps/unix/sysv/linux/tst-sysvshm-linux.c @@ -122,18 +122,21 @@ do_test (void) if (shmid == -1) FAIL_EXIT1 ("shmget failed: %m"); + /* It does not check shmmax because kernel clamp its value to INT_MAX for: + + 1. Compat symbols with IPC_64, i.e, 32-bit binaries running on 64-bit + kernels. + + 2. Default symbol without IPC_64 (defined as IPC_OLD within Linux) and + glibc always use IPC_64 for 32-bit ABIs (to support 64-bit time_t). + It means that 32-bit binaries running on 32-bit kernels will not see + shmmax being clamped. + + And finding out whether the compat symbol is used would require checking + the underlying kernel against the current ABI. The shmall and shmmni + already provided enough coverage. */ + struct test_shminfo tipcinfo; - { - uint64_t v = read_proc_file ("/proc/sys/kernel/shmmax"); -#if LONG_MAX == INT_MAX - /* Kernel explicit clamp the value for shmmax on compat symbol (32-bit - binaries running on 64-bit kernels). */ - if (sizeof (__syscall_ulong_t) == sizeof (unsigned long int) - && v > INT_MAX) - v = INT_MAX; -#endif - tipcinfo.shmmax = v; - } tipcinfo.shmall = read_proc_file ("/proc/sys/kernel/shmall"); tipcinfo.shmmni = read_proc_file ("/proc/sys/kernel/shmmni"); @@ -152,7 +155,6 @@ do_test (void) FAIL_EXIT1 ("shmctl with IPC_INFO failed: %m"); TEST_COMPARE (ipcinfo.shmall, tipcinfo.shmall); - TEST_COMPARE (ipcinfo.shmmax, tipcinfo.shmmax); TEST_COMPARE (ipcinfo.shmmni, tipcinfo.shmmni); } diff --git a/sysdeps/x86/Makefile b/sysdeps/x86/Makefile index dd8267434..184660916 100644 --- a/sysdeps/x86/Makefile +++ b/sysdeps/x86/Makefile @@ -73,6 +73,32 @@ endif ifeq ($(subdir),string) sysdep_routines += cacheinfo + +tests += \ + tst-memchr-rtm \ + tst-memcmp-rtm \ + tst-memmove-rtm \ + tst-memrchr-rtm \ + tst-memset-rtm \ + tst-strchr-rtm \ + tst-strcpy-rtm \ + tst-strlen-rtm \ + tst-strncmp-rtm \ + tst-strrchr-rtm \ + tst-wcsncmp-rtm \ +# tests + +CFLAGS-tst-memchr-rtm.c += -mrtm +CFLAGS-tst-memcmp-rtm.c += -mrtm +CFLAGS-tst-memmove-rtm.c += -mrtm +CFLAGS-tst-memrchr-rtm.c += -mrtm +CFLAGS-tst-memset-rtm.c += -mrtm +CFLAGS-tst-strchr-rtm.c += -mrtm +CFLAGS-tst-strcpy-rtm.c += -mrtm +CFLAGS-tst-strlen-rtm.c += -mrtm +CFLAGS-tst-strncmp-rtm.c += -mrtm -Wno-error +CFLAGS-tst-strrchr-rtm.c += -mrtm +CFLAGS-tst-wcsncmp-rtm.c += -mrtm -Wno-error endif ifneq ($(enable-cet),no) @@ -208,3 +234,11 @@ $(objpfx)check-cet.out: $(..)sysdeps/x86/check-cet.awk \ generated += check-cet.out endif endif + +ifeq ($(subdir),posix) +tests += \ + tst-sysconf-cache-linesize \ + tst-sysconf-cache-linesize-static +tests-static += \ + tst-sysconf-cache-linesize-static +endif diff --git a/sysdeps/x86/bits/platform/x86.h b/sysdeps/x86/bits/platform/x86.h index 8f423ae72..1b4d6dab6 100644 --- a/sysdeps/x86/bits/platform/x86.h +++ b/sysdeps/x86/bits/platform/x86.h @@ -210,7 +210,7 @@ enum x86_cpu_AVX512_VP2INTERSECT = x86_cpu_index_7_edx + 8, x86_cpu_INDEX_7_EDX_9 = x86_cpu_index_7_edx + 9, x86_cpu_MD_CLEAR = x86_cpu_index_7_edx + 10, - x86_cpu_INDEX_7_EDX_11 = x86_cpu_index_7_edx + 11, + x86_cpu_RTM_ALWAYS_ABORT = x86_cpu_index_7_edx + 11, x86_cpu_INDEX_7_EDX_12 = x86_cpu_index_7_edx + 12, x86_cpu_INDEX_7_EDX_13 = x86_cpu_index_7_edx + 13, x86_cpu_SERIALIZE = x86_cpu_index_7_edx + 14, @@ -244,7 +244,7 @@ enum x86_cpu_XOP = x86_cpu_index_80000001_ecx + 11, x86_cpu_LWP = x86_cpu_index_80000001_ecx + 15, x86_cpu_FMA4 = x86_cpu_index_80000001_ecx + 16, - x86_cpu_TBM = x86_cpu_index_80000001_ecx + 20, + x86_cpu_TBM = x86_cpu_index_80000001_ecx + 21, x86_cpu_index_80000001_edx = (CPUID_INDEX_80000001 * 8 * 4 * sizeof (unsigned int) diff --git a/sysdeps/x86/cacheinfo.c b/sysdeps/x86/cacheinfo.c index 7b8df45e3..5ea4723ca 100644 --- a/sysdeps/x86/cacheinfo.c +++ b/sysdeps/x86/cacheinfo.c @@ -32,6 +32,9 @@ __cache_sysconf (int name) case _SC_LEVEL1_ICACHE_SIZE: return cpu_features->level1_icache_size; + case _SC_LEVEL1_ICACHE_LINESIZE: + return cpu_features->level1_icache_linesize; + case _SC_LEVEL1_DCACHE_SIZE: return cpu_features->level1_dcache_size; diff --git a/sysdeps/x86/cacheinfo.h b/sysdeps/x86/cacheinfo.h index 68c253542..0f850bdf1 100644 --- a/sysdeps/x86/cacheinfo.h +++ b/sysdeps/x86/cacheinfo.h @@ -63,16 +63,22 @@ init_cacheinfo (void) __x86_raw_data_cache_size = data; /* Round data cache size to multiple of 256 bytes. */ data = data & ~255L; - __x86_data_cache_size_half = data / 2; - __x86_data_cache_size = data; + if (data > 0) + { + __x86_data_cache_size_half = data / 2; + __x86_data_cache_size = data; + } long int shared = cpu_features->shared_cache_size; __x86_raw_shared_cache_size_half = shared / 2; __x86_raw_shared_cache_size = shared; /* Round shared cache size to multiple of 256 bytes. */ shared = shared & ~255L; - __x86_shared_cache_size_half = shared / 2; - __x86_shared_cache_size = shared; + if (shared > 0) + { + __x86_shared_cache_size_half = shared / 2; + __x86_shared_cache_size = shared; + } __x86_shared_non_temporal_threshold = cpu_features->non_temporal_threshold; diff --git a/sysdeps/x86/configure b/sysdeps/x86/configure index 5e32dc62b..ead1295c3 100644 --- a/sysdeps/x86/configure +++ b/sysdeps/x86/configure @@ -126,6 +126,8 @@ cat > conftest2.S <&5 (eval $ac_try) 2>&5 @@ -135,6 +137,24 @@ if { ac_try='${CC-cc} $CFLAGS $CPPFLAGS -nostartfiles -nostdlib -r -o conftest c count=`LC_ALL=C $READELF -n conftest | grep NT_GNU_PROPERTY_TYPE_0 | wc -l` if test "$count" = 1; then libc_cv_include_x86_isa_level=yes + cat > conftest.c <&5 + (eval $ac_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; } | grep -q "\-msahf"; then + libc_cv_have_x86_lahf_sahf=yes + fi + if { ac_try='${CC-cc} $CFLAGS $CPPFLAGS -fverbose-asm -S -o - conftest.c' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 + (eval $ac_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; } | grep -q "\-mmovbe"; then + libc_cv_have_x86_movbe=yes + fi fi fi rm -f conftest* @@ -144,6 +164,14 @@ $as_echo "$libc_cv_include_x86_isa_level" >&6; } if test $libc_cv_include_x86_isa_level = yes; then $as_echo "#define INCLUDE_X86_ISA_LEVEL 1" >>confdefs.h +fi +if test $libc_cv_have_x86_lahf_sahf = yes; then + $as_echo "#define HAVE_X86_LAHF_SAHF 1" >>confdefs.h + +fi +if test $libc_cv_have_x86_movbe = yes; then + $as_echo "#define HAVE_X86_MOVBE 1" >>confdefs.h + fi config_vars="$config_vars enable-x86-isa-level = $libc_cv_include_x86_isa_level" diff --git a/sysdeps/x86/configure.ac b/sysdeps/x86/configure.ac index f94088f37..bca97fdc2 100644 --- a/sysdeps/x86/configure.ac +++ b/sysdeps/x86/configure.ac @@ -98,14 +98,30 @@ cat > conftest2.S < conftest.c < 0xc) + break; + /* Fall through. */ + case 0x4e: + case 0x5e: + { + /* Disable Intel TSX and enable RTM_ALWAYS_ABORT for + processors listed in: + +https://www.intel.com/content/www/us/en/support/articles/000059422/processors.html + */ +disable_tsx: + CPU_FEATURE_UNSET (cpu_features, HLE); + CPU_FEATURE_UNSET (cpu_features, RTM); + CPU_FEATURE_SET (cpu_features, RTM_ALWAYS_ABORT); + } + break; case 0x3f: /* Xeon E7 v3 with stepping >= 4 has working TSX. */ if (stepping >= 4) @@ -520,8 +554,24 @@ init_cpu_features (struct cpu_features *cpu_features) cpu_features->preferred[index_arch_Prefer_No_VZEROUPPER] |= bit_arch_Prefer_No_VZEROUPPER; else - cpu_features->preferred[index_arch_Prefer_No_AVX512] - |= bit_arch_Prefer_No_AVX512; + { + cpu_features->preferred[index_arch_Prefer_No_AVX512] + |= bit_arch_Prefer_No_AVX512; + + /* Avoid RTM abort triggered by VZEROUPPER inside a + transactionally executing RTM region. */ + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + cpu_features->preferred[index_arch_Prefer_No_VZEROUPPER] + |= bit_arch_Prefer_No_VZEROUPPER; + + /* Since to compare 2 32-byte strings, 256-bit EVEX strcmp + requires 2 loads, 3 VPCMPs and 2 KORDs while AVX2 strcmp + requires 1 load, 2 VPCMPEQs, 1 VPMINU and 1 VPMOVMSKB, + AVX2 strcmp is faster than EVEX strcmp. */ + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) + cpu_features->preferred[index_arch_Prefer_AVX2_STRCMP] + |= bit_arch_Prefer_AVX2_STRCMP; + } } /* This spells out "AuthenticAMD" or "HygonGenuine". */ else if ((ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) diff --git a/sysdeps/x86/cpu-tunables.c b/sysdeps/x86/cpu-tunables.c index 126896f41..a90df39b7 100644 --- a/sysdeps/x86/cpu-tunables.c +++ b/sysdeps/x86/cpu-tunables.c @@ -238,6 +238,8 @@ TUNABLE_CALLBACK (set_hwcaps) (tunable_val_t *valp) CHECK_GLIBC_IFUNC_PREFERRED_BOTH (n, cpu_features, Fast_Copy_Backward, disable, 18); + CHECK_GLIBC_IFUNC_PREFERRED_NEED_BOTH + (n, cpu_features, Prefer_AVX2_STRCMP, AVX2, disable, 18); } break; case 19: diff --git a/sysdeps/x86/dl-cacheinfo.h b/sysdeps/x86/dl-cacheinfo.h index a31fa0783..2ab3acd83 100644 --- a/sysdeps/x86/dl-cacheinfo.h +++ b/sysdeps/x86/dl-cacheinfo.h @@ -707,6 +707,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) long int core; unsigned int threads = 0; unsigned long int level1_icache_size = -1; + unsigned long int level1_icache_linesize = -1; unsigned long int level1_dcache_size = -1; unsigned long int level1_dcache_assoc = -1; unsigned long int level1_dcache_linesize = -1; @@ -726,6 +727,8 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) level1_icache_size = handle_intel (_SC_LEVEL1_ICACHE_SIZE, cpu_features); + level1_icache_linesize + = handle_intel (_SC_LEVEL1_ICACHE_LINESIZE, cpu_features); level1_dcache_size = data; level1_dcache_assoc = handle_intel (_SC_LEVEL1_DCACHE_ASSOC, cpu_features); @@ -753,6 +756,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) shared = handle_zhaoxin (_SC_LEVEL3_CACHE_SIZE); level1_icache_size = handle_zhaoxin (_SC_LEVEL1_ICACHE_SIZE); + level1_icache_linesize = handle_zhaoxin (_SC_LEVEL1_ICACHE_LINESIZE); level1_dcache_size = data; level1_dcache_assoc = handle_zhaoxin (_SC_LEVEL1_DCACHE_ASSOC); level1_dcache_linesize = handle_zhaoxin (_SC_LEVEL1_DCACHE_LINESIZE); @@ -772,6 +776,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) shared = handle_amd (_SC_LEVEL3_CACHE_SIZE); level1_icache_size = handle_amd (_SC_LEVEL1_ICACHE_SIZE); + level1_icache_linesize = handle_amd (_SC_LEVEL1_ICACHE_LINESIZE); level1_dcache_size = data; level1_dcache_assoc = handle_amd (_SC_LEVEL1_DCACHE_ASSOC); level1_dcache_linesize = handle_amd (_SC_LEVEL1_DCACHE_LINESIZE); @@ -833,6 +838,7 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) } cpu_features->level1_icache_size = level1_icache_size; + cpu_features->level1_icache_linesize = level1_icache_linesize; cpu_features->level1_dcache_size = level1_dcache_size; cpu_features->level1_dcache_assoc = level1_dcache_assoc; cpu_features->level1_dcache_linesize = level1_dcache_linesize; @@ -917,17 +923,14 @@ dl_init_cacheinfo (struct cpu_features *cpu_features) rep_stosb_threshold = TUNABLE_GET (x86_rep_stosb_threshold, long int, NULL); - TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, long int, data, - 0, (long int) -1); - TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, long int, shared, - 0, (long int) -1); - TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, long int, - non_temporal_threshold, 0, (long int) -1); - TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, long int, - rep_movsb_threshold, - minimum_rep_movsb_threshold, (long int) -1); - TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, long int, - rep_stosb_threshold, 1, (long int) -1); + TUNABLE_SET_WITH_BOUNDS (x86_data_cache_size, data, 0, SIZE_MAX); + TUNABLE_SET_WITH_BOUNDS (x86_shared_cache_size, shared, 0, SIZE_MAX); + TUNABLE_SET_WITH_BOUNDS (x86_non_temporal_threshold, non_temporal_threshold, + 0, SIZE_MAX); + TUNABLE_SET_WITH_BOUNDS (x86_rep_movsb_threshold, rep_movsb_threshold, + minimum_rep_movsb_threshold, SIZE_MAX); + TUNABLE_SET_WITH_BOUNDS (x86_rep_stosb_threshold, rep_stosb_threshold, 1, + SIZE_MAX); #endif cpu_features->data_cache_size = data; diff --git a/sysdeps/x86/dl-diagnostics-cpu.c b/sysdeps/x86/dl-diagnostics-cpu.c new file mode 100644 index 000000000..af8486470 --- /dev/null +++ b/sysdeps/x86/dl-diagnostics-cpu.c @@ -0,0 +1,118 @@ +/* Print CPU diagnostics data in ld.so. x86 version. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include + +static void +print_cpu_features_value (const char *label, uint64_t value) +{ + _dl_printf ("x86.cpu_features."); + _dl_diagnostics_print_labeled_value (label, value); +} + +static void +print_cpu_feature_internal (unsigned int index, const char *kind, + unsigned int reg, uint32_t value) +{ + _dl_printf ("x86.cpu_features.features[0x%x].%s[0x%x]=0x%x\n", + index, kind, reg, value); +} + +static void +print_cpu_feature_preferred (const char *label, unsigned int flag) +{ + _dl_printf("x86.cpu_features.preferred.%s=0x%x\n", label, flag); +} + +void +_dl_diagnostics_cpu (void) +{ + const struct cpu_features *cpu_features = __get_cpu_features (); + + print_cpu_features_value ("basic.kind", cpu_features->basic.kind); + print_cpu_features_value ("basic.max_cpuid", cpu_features->basic.max_cpuid); + print_cpu_features_value ("basic.family", cpu_features->basic.family); + print_cpu_features_value ("basic.model", cpu_features->basic.model); + print_cpu_features_value ("basic.stepping", cpu_features->basic.stepping); + + for (unsigned int index = 0; index < CPUID_INDEX_MAX; ++index) + { + /* The index values are part of the ABI via + , so translating them to strings is not + necessary. */ + for (unsigned int reg = 0; reg < 4; ++reg) + print_cpu_feature_internal + (index, "cpuid", reg, + cpu_features->features[index].cpuid_array[reg]); + for (unsigned int reg = 0; reg < 4; ++reg) + print_cpu_feature_internal + (index, "usable", reg, + cpu_features->features[index].usable_array[reg]); + } + + /* The preferred indicators are not part of the ABI and need to be + translated. */ +#define BIT(x) \ + print_cpu_feature_preferred (#x, CPU_FEATURE_PREFERRED_P (cpu_features, x)); +#include "cpu-features-preferred_feature_index_1.def" +#undef BIT + + print_cpu_features_value ("isa_1", cpu_features->isa_1); + print_cpu_features_value ("xsave_state_size", + cpu_features->xsave_state_size); + print_cpu_features_value ("xsave_state_full_size", + cpu_features->xsave_state_full_size); + print_cpu_features_value ("data_cache_size", cpu_features->data_cache_size); + print_cpu_features_value ("shared_cache_size", + cpu_features->shared_cache_size); + print_cpu_features_value ("non_temporal_threshold", + cpu_features->non_temporal_threshold); + print_cpu_features_value ("rep_movsb_threshold", + cpu_features->rep_movsb_threshold); + print_cpu_features_value ("rep_stosb_threshold", + cpu_features->rep_stosb_threshold); + print_cpu_features_value ("level1_icache_size", + cpu_features->level1_icache_size); + print_cpu_features_value ("level1_icache_linesize", + cpu_features->level1_icache_linesize); + print_cpu_features_value ("level1_dcache_size", + cpu_features->level1_dcache_size); + print_cpu_features_value ("level1_dcache_assoc", + cpu_features->level1_dcache_assoc); + print_cpu_features_value ("level1_dcache_linesize", + cpu_features->level1_dcache_linesize); + print_cpu_features_value ("level2_cache_size", + cpu_features->level2_cache_size); + print_cpu_features_value ("level2_cache_assoc", + cpu_features->level2_cache_assoc); + print_cpu_features_value ("level2_cache_linesize", + cpu_features->level2_cache_linesize); + print_cpu_features_value ("level3_cache_size", + cpu_features->level3_cache_size); + print_cpu_features_value ("level3_cache_assoc", + cpu_features->level3_cache_assoc); + print_cpu_features_value ("level3_cache_linesize", + cpu_features->level3_cache_linesize); + print_cpu_features_value ("level4_cache_size", + cpu_features->level4_cache_size); + _Static_assert (offsetof (struct cpu_features, level4_cache_size) + + sizeof (cpu_features->level4_cache_size) + == sizeof (*cpu_features), + "last cpu_features field has been printed"); +} diff --git a/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def new file mode 100644 index 000000000..133aab19f --- /dev/null +++ b/sysdeps/x86/include/cpu-features-preferred_feature_index_1.def @@ -0,0 +1,35 @@ +/* Bits in the PREFERRED_FEATURE_INDEX_1 bitfield of . + Copyright (C) 2020-2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +BIT (Fast_Rep_String) +BIT (Fast_Copy_Backward) +BIT (Slow_BSF) +BIT (Fast_Unaligned_Load) +BIT (Prefer_PMINUB_for_stringop) +BIT (Fast_Unaligned_Copy) +BIT (I586) +BIT (I686) +BIT (Slow_SSE4_2) +BIT (AVX_Fast_Unaligned_Load) +BIT (Prefer_MAP_32BIT_EXEC) +BIT (Prefer_No_VZEROUPPER) +BIT (Prefer_ERMS) +BIT (Prefer_No_AVX512) +BIT (MathVec_Prefer_No_AVX512) +BIT (Prefer_FSRM) +BIT (Prefer_AVX2_STRCMP) diff --git a/sysdeps/x86/include/cpu-features.h b/sysdeps/x86/include/cpu-features.h index 624736b40..bfe4fe231 100644 --- a/sysdeps/x86/include/cpu-features.h +++ b/sysdeps/x86/include/cpu-features.h @@ -229,7 +229,7 @@ enum #define bit_cpu_AVX512_VP2INTERSECT (1u << 8) #define bit_cpu_INDEX_7_EDX_9 (1u << 9) #define bit_cpu_MD_CLEAR (1u << 10) -#define bit_cpu_INDEX_7_EDX_11 (1u << 11) +#define bit_cpu_RTM_ALWAYS_ABORT (1u << 11) #define bit_cpu_INDEX_7_EDX_12 (1u << 12) #define bit_cpu_INDEX_7_EDX_13 (1u << 13) #define bit_cpu_SERIALIZE (1u << 14) @@ -454,7 +454,7 @@ enum #define index_cpu_AVX512_VP2INTERSECT CPUID_INDEX_7 #define index_cpu_INDEX_7_EDX_9 CPUID_INDEX_7 #define index_cpu_MD_CLEAR CPUID_INDEX_7 -#define index_cpu_INDEX_7_EDX_11 CPUID_INDEX_7 +#define index_cpu_RTM_ALWAYS_ABORT CPUID_INDEX_7 #define index_cpu_INDEX_7_EDX_12 CPUID_INDEX_7 #define index_cpu_INDEX_7_EDX_13 CPUID_INDEX_7 #define index_cpu_SERIALIZE CPUID_INDEX_7 @@ -679,7 +679,7 @@ enum #define reg_AVX512_VP2INTERSECT edx #define reg_INDEX_7_EDX_9 edx #define reg_MD_CLEAR edx -#define reg_INDEX_7_EDX_11 edx +#define reg_RTM_ALWAYS_ABORT edx #define reg_INDEX_7_EDX_12 edx #define reg_INDEX_7_EDX_13 edx #define reg_SERIALIZE edx @@ -757,40 +757,23 @@ enum #define reg_AESKLE ebx #define reg_WIDE_KL ebx -/* PREFERRED_FEATURE_INDEX_1. */ -#define bit_arch_I586 (1u << 0) -#define bit_arch_I686 (1u << 1) -#define bit_arch_Fast_Rep_String (1u << 2) -#define bit_arch_Fast_Copy_Backward (1u << 3) -#define bit_arch_Fast_Unaligned_Load (1u << 4) -#define bit_arch_Fast_Unaligned_Copy (1u << 5) -#define bit_arch_Slow_BSF (1u << 6) -#define bit_arch_Slow_SSE4_2 (1u << 7) -#define bit_arch_AVX_Fast_Unaligned_Load (1u << 8) -#define bit_arch_Prefer_MAP_32BIT_EXEC (1u << 9) -#define bit_arch_Prefer_PMINUB_for_stringop (1u << 10) -#define bit_arch_Prefer_No_VZEROUPPER (1u << 11) -#define bit_arch_Prefer_ERMS (1u << 12) -#define bit_arch_Prefer_FSRM (1u << 13) -#define bit_arch_Prefer_No_AVX512 (1u << 14) -#define bit_arch_MathVec_Prefer_No_AVX512 (1u << 15) - -#define index_arch_Fast_Rep_String PREFERRED_FEATURE_INDEX_1 -#define index_arch_Fast_Copy_Backward PREFERRED_FEATURE_INDEX_1 -#define index_arch_Slow_BSF PREFERRED_FEATURE_INDEX_1 -#define index_arch_Fast_Unaligned_Load PREFERRED_FEATURE_INDEX_1 -#define index_arch_Prefer_PMINUB_for_stringop PREFERRED_FEATURE_INDEX_1 -#define index_arch_Fast_Unaligned_Copy PREFERRED_FEATURE_INDEX_1 -#define index_arch_I586 PREFERRED_FEATURE_INDEX_1 -#define index_arch_I686 PREFERRED_FEATURE_INDEX_1 -#define index_arch_Slow_SSE4_2 PREFERRED_FEATURE_INDEX_1 -#define index_arch_AVX_Fast_Unaligned_Load PREFERRED_FEATURE_INDEX_1 -#define index_arch_Prefer_MAP_32BIT_EXEC PREFERRED_FEATURE_INDEX_1 -#define index_arch_Prefer_No_VZEROUPPER PREFERRED_FEATURE_INDEX_1 -#define index_arch_Prefer_ERMS PREFERRED_FEATURE_INDEX_1 -#define index_arch_Prefer_No_AVX512 PREFERRED_FEATURE_INDEX_1 -#define index_arch_MathVec_Prefer_No_AVX512 PREFERRED_FEATURE_INDEX_1 -#define index_arch_Prefer_FSRM PREFERRED_FEATURE_INDEX_1 +/* PREFERRED_FEATURE_INDEX_1. First define the bitindex values + sequentially, then define the bit_arch* and index_arch_* lookup + constants. */ +enum + { +#define BIT(x) _bitindex_arch_##x , +#include "cpu-features-preferred_feature_index_1.def" +#undef BIT + }; +enum + { +#define BIT(x) \ + bit_arch_##x = 1u << _bitindex_arch_##x , \ + index_arch_##x = PREFERRED_FEATURE_INDEX_1, +#include "cpu-features-preferred_feature_index_1.def" +#undef BIT + }; /* XCR0 Feature flags. */ #define bit_XMM_state (1u << 1) @@ -841,6 +824,8 @@ struct cpuid_feature_internal }; }; +/* NB: When adding new fields, update sysdeps/x86/dl-diagnostics-cpu.c + to print them. */ struct cpu_features { struct cpu_features_basic basic; @@ -874,6 +859,8 @@ struct cpu_features unsigned long int rep_stosb_threshold; /* _SC_LEVEL1_ICACHE_SIZE. */ unsigned long int level1_icache_size; + /* _SC_LEVEL1_ICACHE_LINESIZE. */ + unsigned long int level1_icache_linesize; /* _SC_LEVEL1_DCACHE_SIZE. */ unsigned long int level1_dcache_size; /* _SC_LEVEL1_DCACHE_ASSOC. */ diff --git a/sysdeps/x86/isa-level.c b/sysdeps/x86/isa-level.c index aaf524cb5..49ef4aa61 100644 --- a/sysdeps/x86/isa-level.c +++ b/sysdeps/x86/isa-level.c @@ -29,32 +29,35 @@ /* ELF program property for x86 ISA level. */ #ifdef INCLUDE_X86_ISA_LEVEL -# if defined __x86_64__ || defined __FXSR__ || !defined _SOFT_FLOAT \ - || defined __MMX__ || defined __SSE__ || defined __SSE2__ +# if defined __SSE__ && defined __SSE2__ +/* NB: ISAs, excluding MMX, in x86-64 ISA level baseline are used. */ # define ISA_BASELINE GNU_PROPERTY_X86_ISA_1_BASELINE # else # define ISA_BASELINE 0 # endif -# if defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 \ - || (defined __x86_64__ && defined __LAHF_SAHF__) \ - || defined __POPCNT__ || defined __SSE3__ \ - || defined __SSSE3__ || defined __SSE4_1__ || defined __SSE4_2__ +# if ISA_BASELINE && defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 \ + && defined HAVE_X86_LAHF_SAHF && defined __POPCNT__ \ + && defined __SSE3__ && defined __SSSE3__ && defined __SSE4_1__ \ + && defined __SSE4_2__ +/* NB: ISAs in x86-64 ISA level v2 are used. */ # define ISA_V2 GNU_PROPERTY_X86_ISA_1_V2 # else # define ISA_V2 0 # endif -# if defined __AVX__ || defined __AVX2__ || defined __F16C__ \ - || defined __FMA__ || defined __LZCNT__ || defined __MOVBE__ \ - || defined __XSAVE__ +# if ISA_V2 && defined __AVX__ && defined __AVX2__ && defined __F16C__ \ + && defined __FMA__ && defined __LZCNT__ && defined HAVE_X86_MOVBE +/* NB: ISAs in x86-64 ISA level v3 are used. */ # define ISA_V3 GNU_PROPERTY_X86_ISA_1_V3 # else # define ISA_V3 0 # endif -# if defined __AVX512F__ || defined __AVX512BW__ || defined __AVX512CD__ \ - || defined __AVX512DQ__ || defined __AVX512VL__ +# if ISA_V3 && defined __AVX512F__ && defined __AVX512BW__ \ + && defined __AVX512CD__ && defined __AVX512DQ__ \ + && defined __AVX512VL__ +/* NB: ISAs in x86-64 ISA level v4 are used. */ # define ISA_V4 GNU_PROPERTY_X86_ISA_1_V4 # else # define ISA_V4 0 diff --git a/sysdeps/x86/tst-cpu-features-supports.c b/sysdeps/x86/tst-cpu-features-supports.c index 79d803eb2..e79d19b5a 100644 --- a/sysdeps/x86/tst-cpu-features-supports.c +++ b/sysdeps/x86/tst-cpu-features-supports.c @@ -59,9 +59,9 @@ do_test (int argc, char **argv) fails += CHECK_SUPPORTS (aes, AES); #endif #if __GNUC_PREREQ (11, 1) - fails += CHECK_SUPPORTS (amx_bf16, AMX_BF16); - fails += CHECK_SUPPORTS (amx_int8, AMX_INT8); - fails += CHECK_SUPPORTS (amx_tile, AMX_TILE); + fails += CHECK_SUPPORTS (amx-bf16, AMX_BF16); + fails += CHECK_SUPPORTS (amx-int8, AMX_INT8); + fails += CHECK_SUPPORTS (amx-tile, AMX_TILE); #endif fails += CHECK_SUPPORTS (avx, AVX); fails += CHECK_SUPPORTS (avx2, AVX2); @@ -130,7 +130,7 @@ do_test (int argc, char **argv) fails += CHECK_SUPPORTS (gfni, GFNI); #endif #if __GNUC_PREREQ (11, 0) - fails += CHECK_SUPPORTS (hle, HLE); + fails += CHECK_CPU_SUPPORTS (hle, HLE); fails += CHECK_CPU_SUPPORTS (ibt, IBT); fails += CHECK_SUPPORTS (lahf_lm, LAHF64_SAHF64); fails += CHECK_CPU_SUPPORTS (lm, LM); @@ -152,7 +152,7 @@ do_test (int argc, char **argv) fails += CHECK_SUPPORTS (rdpid, RDPID); fails += CHECK_SUPPORTS (rdrnd, RDRAND); fails += CHECK_SUPPORTS (rdseed, RDSEED); - fails += CHECK_SUPPORTS (rtm, RTM); + fails += CHECK_CPU_SUPPORTS (rtm, RTM); fails += CHECK_SUPPORTS (serialize, SERIALIZE); fails += CHECK_SUPPORTS (sha, SHA); fails += CHECK_CPU_SUPPORTS (shstk, SHSTK); diff --git a/sysdeps/x86/tst-get-cpu-features.c b/sysdeps/x86/tst-get-cpu-features.c index b5e7f6e7b..37d9ec9d8 100644 --- a/sysdeps/x86/tst-get-cpu-features.c +++ b/sysdeps/x86/tst-get-cpu-features.c @@ -158,6 +158,7 @@ do_test (void) CHECK_CPU_FEATURE (UINTR); CHECK_CPU_FEATURE (AVX512_VP2INTERSECT); CHECK_CPU_FEATURE (MD_CLEAR); + CHECK_CPU_FEATURE (RTM_ALWAYS_ABORT); CHECK_CPU_FEATURE (SERIALIZE); CHECK_CPU_FEATURE (HYBRID); CHECK_CPU_FEATURE (TSXLDTRK); @@ -321,6 +322,7 @@ do_test (void) CHECK_CPU_FEATURE_USABLE (FSRM); CHECK_CPU_FEATURE_USABLE (AVX512_VP2INTERSECT); CHECK_CPU_FEATURE_USABLE (MD_CLEAR); + CHECK_CPU_FEATURE_USABLE (RTM_ALWAYS_ABORT); CHECK_CPU_FEATURE_USABLE (SERIALIZE); CHECK_CPU_FEATURE_USABLE (HYBRID); CHECK_CPU_FEATURE_USABLE (TSXLDTRK); diff --git a/sysdeps/x86/tst-memchr-rtm.c b/sysdeps/x86/tst-memchr-rtm.c new file mode 100644 index 000000000..e47494011 --- /dev/null +++ b/sysdeps/x86/tst-memchr-rtm.c @@ -0,0 +1,54 @@ +/* Test case for memchr inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE); + string1[100] = 'c'; + string1[STRING_SIZE - 100] = 'c'; + char *p = memchr (string1, 'c', STRING_SIZE); + if (p == &string1[100]) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + char *p = memchr (string1, 'c', STRING_SIZE); + if (p == &string1[100]) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("memchr", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-memcmp-rtm.c b/sysdeps/x86/tst-memcmp-rtm.c new file mode 100644 index 000000000..e4c8a623b --- /dev/null +++ b/sysdeps/x86/tst-memcmp-rtm.c @@ -0,0 +1,52 @@ +/* Test case for memcmp inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; +char string2[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE); + memset (string2, 'a', STRING_SIZE); + if (memcmp (string1, string2, STRING_SIZE) == 0) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + if (memcmp (string1, string2, STRING_SIZE) == 0) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("memcmp", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-memmove-rtm.c b/sysdeps/x86/tst-memmove-rtm.c new file mode 100644 index 000000000..4bf97ef1e --- /dev/null +++ b/sysdeps/x86/tst-memmove-rtm.c @@ -0,0 +1,53 @@ +/* Test case for memmove inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; +char string2[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE); + if (memmove (string2, string1, STRING_SIZE) == string2 + && memcmp (string2, string1, STRING_SIZE) == 0) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + if (memmove (string2, string1, STRING_SIZE) == string2 + && memcmp (string2, string1, STRING_SIZE) == 0) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("memmove", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-memrchr-rtm.c b/sysdeps/x86/tst-memrchr-rtm.c new file mode 100644 index 000000000..a57a5a8eb --- /dev/null +++ b/sysdeps/x86/tst-memrchr-rtm.c @@ -0,0 +1,54 @@ +/* Test case for memrchr inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE); + string1[100] = 'c'; + string1[STRING_SIZE - 100] = 'c'; + char *p = memrchr (string1, 'c', STRING_SIZE); + if (p == &string1[STRING_SIZE - 100]) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + char *p = memrchr (string1, 'c', STRING_SIZE); + if (p == &string1[STRING_SIZE - 100]) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("memrchr", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-memset-rtm.c b/sysdeps/x86/tst-memset-rtm.c new file mode 100644 index 000000000..bf343a4da --- /dev/null +++ b/sysdeps/x86/tst-memset-rtm.c @@ -0,0 +1,45 @@ +/* Test case for memset inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE); + return EXIT_SUCCESS; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + memset (string1, 'a', STRING_SIZE); + return 0; +} + +static int +do_test (void) +{ + return do_test_1 ("memset", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-strchr-rtm.c b/sysdeps/x86/tst-strchr-rtm.c new file mode 100644 index 000000000..a82e29c07 --- /dev/null +++ b/sysdeps/x86/tst-strchr-rtm.c @@ -0,0 +1,54 @@ +/* Test case for strchr inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE - 1); + string1[100] = 'c'; + string1[STRING_SIZE - 100] = 'c'; + char *p = strchr (string1, 'c'); + if (p == &string1[100]) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + char *p = strchr (string1, 'c'); + if (p == &string1[100]) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("strchr", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-strcpy-rtm.c b/sysdeps/x86/tst-strcpy-rtm.c new file mode 100644 index 000000000..2b2a583fb --- /dev/null +++ b/sysdeps/x86/tst-strcpy-rtm.c @@ -0,0 +1,53 @@ +/* Test case for strcpy inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; +char string2[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE - 1); + if (strcpy (string2, string1) == string2 + && strcmp (string2, string1) == 0) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + if (strcpy (string2, string1) == string2 + && strcmp (string2, string1) == 0) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("strcpy", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-string-rtm.h b/sysdeps/x86/tst-string-rtm.h new file mode 100644 index 000000000..d2470afa1 --- /dev/null +++ b/sysdeps/x86/tst-string-rtm.h @@ -0,0 +1,72 @@ +/* Test string function in a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include + +static int +do_test_1 (const char *name, unsigned int loop, int (*prepare) (void), + int (*function) (void)) +{ + if (!CPU_FEATURE_USABLE (RTM)) + return EXIT_UNSUPPORTED; + + int status = prepare (); + if (status != EXIT_SUCCESS) + return status; + + unsigned int i; + unsigned int naborts = 0; + unsigned int failed = 0; + for (i = 0; i < loop; i++) + { + failed |= function (); + if (_xbegin() == _XBEGIN_STARTED) + { + failed |= function (); + _xend(); + } + else + { + failed |= function (); + ++naborts; + } + } + + if (failed) + FAIL_EXIT1 ("%s() failed", name); + + if (naborts) + { + /* NB: Low single digit (<= 5%) noise-level aborts are normal for + TSX. */ + double rate = 100 * ((double) naborts) / ((double) loop); + if (rate > 5) + FAIL_EXIT1 ("TSX abort rate: %.2f%% (%d out of %d)", + rate, naborts, loop); + } + + return EXIT_SUCCESS; +} + +static int do_test (void); + +#include diff --git a/sysdeps/x86/tst-strlen-rtm.c b/sysdeps/x86/tst-strlen-rtm.c new file mode 100644 index 000000000..0dcf14db8 --- /dev/null +++ b/sysdeps/x86/tst-strlen-rtm.c @@ -0,0 +1,53 @@ +/* Test case for strlen inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE - 1); + string1[STRING_SIZE - 100] = '\0'; + size_t len = strlen (string1); + if (len == STRING_SIZE - 100) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + size_t len = strlen (string1); + if (len == STRING_SIZE - 100) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("strlen", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-strncmp-rtm.c b/sysdeps/x86/tst-strncmp-rtm.c new file mode 100644 index 000000000..aef9866cf --- /dev/null +++ b/sysdeps/x86/tst-strncmp-rtm.c @@ -0,0 +1,81 @@ +/* Test case for strncmp inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include + +#ifdef WIDE +# define CHAR wchar_t +# define MEMSET wmemset +# define STRNCMP wcsncmp +# define TEST_NAME "wcsncmp" +#else /* !WIDE */ +# define CHAR char +# define MEMSET memset +# define STRNCMP strncmp +# define TEST_NAME "strncmp" +#endif /* !WIDE */ + + + +#define LOOP 3000 +#define STRING_SIZE 1024 +CHAR string1[STRING_SIZE]; +CHAR string2[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + MEMSET (string1, 'a', STRING_SIZE - 1); + MEMSET (string2, 'a', STRING_SIZE - 1); + if (STRNCMP (string1, string2, STRING_SIZE) == 0) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + if (STRNCMP (string1, string2, STRING_SIZE) == 0) + return 0; + else + return 1; +} + +__attribute__ ((noinline, noclone)) +static int +function_overflow (void) +{ + if (STRNCMP (string1, string2, SIZE_MAX) == 0) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + int status = do_test_1 (TEST_NAME, LOOP, prepare, function); + if (status != EXIT_SUCCESS) + return status; + status = do_test_1 (TEST_NAME, LOOP, prepare, function_overflow); + return status; +} diff --git a/sysdeps/x86/tst-strrchr-rtm.c b/sysdeps/x86/tst-strrchr-rtm.c new file mode 100644 index 000000000..e32bfaf5f --- /dev/null +++ b/sysdeps/x86/tst-strrchr-rtm.c @@ -0,0 +1,53 @@ +/* Test case for strrchr inside a transactionally executing RTM region. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#define LOOP 3000 +#define STRING_SIZE 1024 +char string1[STRING_SIZE]; + +__attribute__ ((noinline, noclone)) +static int +prepare (void) +{ + memset (string1, 'a', STRING_SIZE - 1); + string1[STRING_SIZE - 100] = 'c'; + char *p = strrchr (string1, 'c'); + if (p == &string1[STRING_SIZE - 100]) + return EXIT_SUCCESS; + else + return EXIT_FAILURE; +} + +__attribute__ ((noinline, noclone)) +static int +function (void) +{ + char *p = strrchr (string1, 'c'); + if (p == &string1[STRING_SIZE - 100]) + return 0; + else + return 1; +} + +static int +do_test (void) +{ + return do_test_1 ("strrchr", LOOP, prepare, function); +} diff --git a/sysdeps/x86/tst-sysconf-cache-linesize-static.c b/sysdeps/x86/tst-sysconf-cache-linesize-static.c new file mode 100644 index 000000000..152ae6882 --- /dev/null +++ b/sysdeps/x86/tst-sysconf-cache-linesize-static.c @@ -0,0 +1 @@ +#include "tst-sysconf-cache-linesize.c" diff --git a/sysdeps/x86/tst-sysconf-cache-linesize.c b/sysdeps/x86/tst-sysconf-cache-linesize.c new file mode 100644 index 000000000..642dbde5d --- /dev/null +++ b/sysdeps/x86/tst-sysconf-cache-linesize.c @@ -0,0 +1,57 @@ +/* Test system cache line sizes. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include + +static struct +{ + const char *name; + int _SC_val; +} sc_options[] = + { +#define N(name) { "_SC_"#name, _SC_##name } + N (LEVEL1_ICACHE_LINESIZE), + N (LEVEL1_DCACHE_LINESIZE), + N (LEVEL2_CACHE_LINESIZE) + }; + +static int +do_test (void) +{ + int result = EXIT_SUCCESS; + + for (int i = 0; i < array_length (sc_options); ++i) + { + long int scret = sysconf (sc_options[i]._SC_val); + if (scret < 0) + { + printf ("sysconf (%s) returned < 0 (%ld)\n", + sc_options[i].name, scret); + result = EXIT_FAILURE; + } + else + printf ("sysconf (%s): %ld\n", sc_options[i].name, scret); + } + + return result; +} + +#include diff --git a/sysdeps/x86/tst-wcsncmp-rtm.c b/sysdeps/x86/tst-wcsncmp-rtm.c new file mode 100644 index 000000000..bad3b8637 --- /dev/null +++ b/sysdeps/x86/tst-wcsncmp-rtm.c @@ -0,0 +1,21 @@ +/* Test case for wcsncmp inside a transactionally executing RTM region. + Copyright (C) 2022 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#define WIDE 1 +#include +#include "tst-strncmp-rtm.c" diff --git a/sysdeps/x86_64/Makefile b/sysdeps/x86_64/Makefile index d1d7cb9d2..2dd9fd516 100644 --- a/sysdeps/x86_64/Makefile +++ b/sysdeps/x86_64/Makefile @@ -20,6 +20,8 @@ endif ifeq ($(subdir),string) sysdep_routines += strcasecmp_l-nonascii strncase_l-nonascii gen-as-const-headers += locale-defines.sym +tests += \ + tst-rsi-strlen endif ifeq ($(subdir),elf) @@ -189,6 +191,11 @@ ifeq ($(subdir),csu) gen-as-const-headers += tlsdesc.sym rtld-offsets.sym endif +ifeq ($(subdir),wcsmbs) +tests += \ + tst-rsi-wcslen +endif + $(objpfx)x86_64/tst-x86_64mod-1.os: $(objpfx)tst-x86_64mod-1.os $(make-target-directory) rm -f $@ diff --git a/sysdeps/x86_64/configure b/sysdeps/x86_64/configure old mode 100644 new mode 100755 index 198554d78..75c96d60d --- a/sysdeps/x86_64/configure +++ b/sysdeps/x86_64/configure @@ -107,39 +107,6 @@ if test x"$build_mathvec" = xnotset; then build_mathvec=yes fi -if test "$static_pie" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for linker static PIE support" >&5 -$as_echo_n "checking for linker static PIE support... " >&6; } -if ${libc_cv_ld_static_pie+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat > conftest.s <<\EOF - .text - .global _start - .weak foo -_start: - leaq foo(%rip), %rax -EOF - libc_cv_pie_option="-Wl,-pie" - if { ac_try='${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS -nostartfiles -nostdlib $no_ssp $libc_cv_pie_option -o conftest conftest.s 1>&5' - { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 - (eval $ac_try) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - libc_cv_ld_static_pie=yes - else - libc_cv_ld_static_pie=no - fi -rm -f conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_ld_static_pie" >&5 -$as_echo "$libc_cv_ld_static_pie" >&6; } - if test "$libc_cv_ld_static_pie" != yes; then - as_fn_error $? "linker support for static PIE needed" "$LINENO" 5 - fi -fi - $as_echo "#define PI_STATIC_AND_HIDDEN 1" >>confdefs.h diff --git a/sysdeps/x86_64/configure.ac b/sysdeps/x86_64/configure.ac index ec776274a..66219e7ce 100644 --- a/sysdeps/x86_64/configure.ac +++ b/sysdeps/x86_64/configure.ac @@ -53,31 +53,6 @@ if test x"$build_mathvec" = xnotset; then build_mathvec=yes fi -dnl Check if linker supports static PIE with the fix for -dnl -dnl https://sourceware.org/bugzilla/show_bug.cgi?id=21782 -dnl -if test "$static_pie" = yes; then - AC_CACHE_CHECK(for linker static PIE support, libc_cv_ld_static_pie, [dnl -cat > conftest.s <<\EOF - .text - .global _start - .weak foo -_start: - leaq foo(%rip), %rax -EOF - libc_cv_pie_option="-Wl,-pie" - if AC_TRY_COMMAND(${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS -nostartfiles -nostdlib $no_ssp $libc_cv_pie_option -o conftest conftest.s 1>&AS_MESSAGE_LOG_FD); then - libc_cv_ld_static_pie=yes - else - libc_cv_ld_static_pie=no - fi -rm -f conftest*]) - if test "$libc_cv_ld_static_pie" != yes; then - AC_MSG_ERROR([linker support for static PIE needed]) - fi -fi - dnl It is always possible to access static and hidden symbols in an dnl position independent way. AC_DEFINE(PI_STATIC_AND_HIDDEN) diff --git a/sysdeps/x86_64/memchr.S b/sysdeps/x86_64/memchr.S index beff2708d..3ddc4655c 100644 --- a/sysdeps/x86_64/memchr.S +++ b/sysdeps/x86_64/memchr.S @@ -21,9 +21,11 @@ #ifdef USE_AS_WMEMCHR # define MEMCHR wmemchr # define PCMPEQ pcmpeqd +# define CHAR_PER_VEC 4 #else # define MEMCHR memchr # define PCMPEQ pcmpeqb +# define CHAR_PER_VEC 16 #endif /* fast SSE2 version with using pmaxub and 64 byte loop */ @@ -33,15 +35,14 @@ ENTRY(MEMCHR) movd %esi, %xmm1 mov %edi, %ecx +#ifdef __ILP32__ + /* Clear the upper 32 bits. */ + movl %edx, %edx +#endif #ifdef USE_AS_WMEMCHR test %RDX_LP, %RDX_LP jz L(return_null) - shl $2, %RDX_LP #else -# ifdef __ILP32__ - /* Clear the upper 32 bits. */ - movl %edx, %edx -# endif punpcklbw %xmm1, %xmm1 test %RDX_LP, %RDX_LP jz L(return_null) @@ -60,13 +61,16 @@ ENTRY(MEMCHR) test %eax, %eax jnz L(matches_1) - sub $16, %rdx + sub $CHAR_PER_VEC, %rdx jbe L(return_null) add $16, %rdi and $15, %ecx and $-16, %rdi +#ifdef USE_AS_WMEMCHR + shr $2, %ecx +#endif add %rcx, %rdx - sub $64, %rdx + sub $(CHAR_PER_VEC * 4), %rdx jbe L(exit_loop) jmp L(loop_prolog) @@ -77,16 +81,21 @@ L(crosscache): movdqa (%rdi), %xmm0 PCMPEQ %xmm1, %xmm0 -/* Check if there is a match. */ + /* Check if there is a match. */ pmovmskb %xmm0, %eax -/* Remove the leading bytes. */ + /* Remove the leading bytes. */ sar %cl, %eax test %eax, %eax je L(unaligned_no_match) -/* Check which byte is a match. */ + /* Check which byte is a match. */ bsf %eax, %eax - +#ifdef USE_AS_WMEMCHR + mov %eax, %esi + shr $2, %esi + sub %rsi, %rdx +#else sub %rax, %rdx +#endif jbe L(return_null) add %rdi, %rax add %rcx, %rax @@ -94,15 +103,18 @@ L(crosscache): .p2align 4 L(unaligned_no_match): - /* "rcx" is less than 16. Calculate "rdx + rcx - 16" by using + /* "rcx" is less than 16. Calculate "rdx + rcx - 16" by using "rdx - (16 - rcx)" instead of "(rdx + rcx) - 16" to void possible addition overflow. */ neg %rcx add $16, %rcx +#ifdef USE_AS_WMEMCHR + shr $2, %ecx +#endif sub %rcx, %rdx jbe L(return_null) add $16, %rdi - sub $64, %rdx + sub $(CHAR_PER_VEC * 4), %rdx jbe L(exit_loop) .p2align 4 @@ -135,7 +147,7 @@ L(loop_prolog): test $0x3f, %rdi jz L(align64_loop) - sub $64, %rdx + sub $(CHAR_PER_VEC * 4), %rdx jbe L(exit_loop) movdqa (%rdi), %xmm0 @@ -167,11 +179,14 @@ L(loop_prolog): mov %rdi, %rcx and $-64, %rdi and $63, %ecx +#ifdef USE_AS_WMEMCHR + shr $2, %ecx +#endif add %rcx, %rdx .p2align 4 L(align64_loop): - sub $64, %rdx + sub $(CHAR_PER_VEC * 4), %rdx jbe L(exit_loop) movdqa (%rdi), %xmm0 movdqa 16(%rdi), %xmm2 @@ -218,7 +233,7 @@ L(align64_loop): .p2align 4 L(exit_loop): - add $32, %edx + add $(CHAR_PER_VEC * 2), %edx jle L(exit_loop_32) movdqa (%rdi), %xmm0 @@ -238,7 +253,7 @@ L(exit_loop): pmovmskb %xmm3, %eax test %eax, %eax jnz L(matches32_1) - sub $16, %edx + sub $CHAR_PER_VEC, %edx jle L(return_null) PCMPEQ 48(%rdi), %xmm1 @@ -250,13 +265,13 @@ L(exit_loop): .p2align 4 L(exit_loop_32): - add $32, %edx + add $(CHAR_PER_VEC * 2), %edx movdqa (%rdi), %xmm0 PCMPEQ %xmm1, %xmm0 pmovmskb %xmm0, %eax test %eax, %eax jnz L(matches_1) - sub $16, %edx + sub $CHAR_PER_VEC, %edx jbe L(return_null) PCMPEQ 16(%rdi), %xmm1 @@ -293,7 +308,13 @@ L(matches32): .p2align 4 L(matches_1): bsf %eax, %eax +#ifdef USE_AS_WMEMCHR + mov %eax, %esi + shr $2, %esi + sub %rsi, %rdx +#else sub %rax, %rdx +#endif jbe L(return_null) add %rdi, %rax ret @@ -301,7 +322,13 @@ L(matches_1): .p2align 4 L(matches16_1): bsf %eax, %eax +#ifdef USE_AS_WMEMCHR + mov %eax, %esi + shr $2, %esi + sub %rsi, %rdx +#else sub %rax, %rdx +#endif jbe L(return_null) lea 16(%rdi, %rax), %rax ret @@ -309,7 +336,13 @@ L(matches16_1): .p2align 4 L(matches32_1): bsf %eax, %eax +#ifdef USE_AS_WMEMCHR + mov %eax, %esi + shr $2, %esi + sub %rsi, %rdx +#else sub %rax, %rdx +#endif jbe L(return_null) lea 32(%rdi, %rax), %rax ret @@ -317,7 +350,13 @@ L(matches32_1): .p2align 4 L(matches48_1): bsf %eax, %eax +#ifdef USE_AS_WMEMCHR + mov %eax, %esi + shr $2, %esi + sub %rsi, %rdx +#else sub %rax, %rdx +#endif jbe L(return_null) lea 48(%rdi, %rax), %rax ret diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile index 9477538af..65fde4eb9 100644 --- a/sysdeps/x86_64/multiarch/Makefile +++ b/sysdeps/x86_64/multiarch/Makefile @@ -39,7 +39,45 @@ sysdep_routines += strncat-c stpncpy-c strncpy-c \ memmove-avx512-unaligned-erms \ memset-sse2-unaligned-erms \ memset-avx2-unaligned-erms \ - memset-avx512-unaligned-erms + memset-avx512-unaligned-erms \ + memchr-avx2-rtm \ + memcmp-avx2-movbe-rtm \ + memmove-avx-unaligned-erms-rtm \ + memrchr-avx2-rtm \ + memset-avx2-unaligned-erms-rtm \ + rawmemchr-avx2-rtm \ + strchr-avx2-rtm \ + strcmp-avx2-rtm \ + strchrnul-avx2-rtm \ + stpcpy-avx2-rtm \ + stpncpy-avx2-rtm \ + strcat-avx2-rtm \ + strcpy-avx2-rtm \ + strlen-avx2-rtm \ + strncat-avx2-rtm \ + strncmp-avx2-rtm \ + strncpy-avx2-rtm \ + strnlen-avx2-rtm \ + strrchr-avx2-rtm \ + memchr-evex \ + memcmp-evex-movbe \ + memmove-evex-unaligned-erms \ + memrchr-evex \ + memset-evex-unaligned-erms \ + rawmemchr-evex \ + stpcpy-evex \ + stpncpy-evex \ + strcat-evex \ + strchr-evex \ + strchrnul-evex \ + strcmp-evex \ + strcpy-evex \ + strlen-evex \ + strncat-evex \ + strncmp-evex \ + strncpy-evex \ + strnlen-evex \ + strrchr-evex CFLAGS-varshift.c += -msse4 CFLAGS-strcspn-c.c += -msse4 CFLAGS-strpbrk-c.c += -msse4 @@ -55,8 +93,24 @@ sysdep_routines += wmemcmp-sse4 wmemcmp-ssse3 wmemcmp-c \ wcscpy-ssse3 wcscpy-c \ wcschr-sse2 wcschr-avx2 \ wcsrchr-sse2 wcsrchr-avx2 \ - wcsnlen-sse4_1 wcsnlen-c \ - wcslen-sse2 wcslen-avx2 wcsnlen-avx2 + wcslen-sse2 wcslen-sse4_1 wcslen-avx2 \ + wcsnlen-c wcsnlen-sse4_1 wcsnlen-avx2 \ + wcschr-avx2-rtm \ + wcscmp-avx2-rtm \ + wcslen-avx2-rtm \ + wcsncmp-avx2-rtm \ + wcsnlen-avx2-rtm \ + wcsrchr-avx2-rtm \ + wmemchr-avx2-rtm \ + wmemcmp-avx2-movbe-rtm \ + wcschr-evex \ + wcscmp-evex \ + wcslen-evex \ + wcsncmp-evex \ + wcsnlen-evex \ + wcsrchr-evex \ + wmemchr-evex \ + wmemcmp-evex-movbe endif ifeq ($(subdir),debug) diff --git a/sysdeps/x86_64/multiarch/ifunc-avx2.h b/sysdeps/x86_64/multiarch/ifunc-avx2.h index bbaf5dcf1..e3ec62ca5 100644 --- a/sysdeps/x86_64/multiarch/ifunc-avx2.h +++ b/sysdeps/x86_64/multiarch/ifunc-avx2.h @@ -21,16 +21,28 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_rtm) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { const struct cpu_features* cpu_features = __get_cpu_features (); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) - return OPTIMIZE (avx2); + { + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) + return OPTIMIZE (evex); + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + return OPTIMIZE (avx2); + } return OPTIMIZE (sse2); } diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c index 1be5dd032..d891f8181 100644 --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c @@ -43,6 +43,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, memchr, CPU_FEATURE_USABLE (AVX2), __memchr_avx2) + IFUNC_IMPL_ADD (array, i, memchr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __memchr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, memchr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __memchr_evex) IFUNC_IMPL_ADD (array, i, memchr, 1, __memchr_sse2)) /* Support sysdeps/x86_64/multiarch/memcmp.c. */ @@ -51,6 +60,16 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, (CPU_FEATURE_USABLE (AVX2) && CPU_FEATURE_USABLE (MOVBE)), __memcmp_avx2_movbe) + IFUNC_IMPL_ADD (array, i, memcmp, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (MOVBE) + && CPU_FEATURE_USABLE (RTM)), + __memcmp_avx2_movbe_rtm) + IFUNC_IMPL_ADD (array, i, memcmp, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (MOVBE)), + __memcmp_evex_movbe) IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSE4_1), __memcmp_sse4_1) IFUNC_IMPL_ADD (array, i, memcmp, CPU_FEATURE_USABLE (SSSE3), @@ -64,10 +83,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX512F), __memmove_chk_avx512_no_vzeroupper) IFUNC_IMPL_ADD (array, i, __memmove_chk, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memmove_chk_avx512_unaligned) IFUNC_IMPL_ADD (array, i, __memmove_chk, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memmove_chk_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, __memmove_chk, CPU_FEATURE_USABLE (AVX), @@ -75,6 +94,20 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, __memmove_chk, CPU_FEATURE_USABLE (AVX), __memmove_chk_avx_unaligned_erms) + IFUNC_IMPL_ADD (array, i, __memmove_chk, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memmove_chk_avx_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, __memmove_chk, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memmove_chk_avx_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, __memmove_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_chk_evex_unaligned) + IFUNC_IMPL_ADD (array, i, __memmove_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_chk_evex_unaligned_erms) IFUNC_IMPL_ADD (array, i, __memmove_chk, CPU_FEATURE_USABLE (SSSE3), __memmove_chk_ssse3_back) @@ -97,14 +130,28 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, memmove, CPU_FEATURE_USABLE (AVX), __memmove_avx_unaligned_erms) + IFUNC_IMPL_ADD (array, i, memmove, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memmove_avx_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, memmove, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memmove_avx_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, memmove, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_evex_unaligned) + IFUNC_IMPL_ADD (array, i, memmove, + CPU_FEATURE_USABLE (AVX512VL), + __memmove_evex_unaligned_erms) IFUNC_IMPL_ADD (array, i, memmove, CPU_FEATURE_USABLE (AVX512F), __memmove_avx512_no_vzeroupper) IFUNC_IMPL_ADD (array, i, memmove, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memmove_avx512_unaligned) IFUNC_IMPL_ADD (array, i, memmove, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memmove_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, memmove, CPU_FEATURE_USABLE (SSSE3), __memmove_ssse3_back) @@ -121,6 +168,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, memrchr, CPU_FEATURE_USABLE (AVX2), __memrchr_avx2) + IFUNC_IMPL_ADD (array, i, memrchr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __memrchr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, memrchr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __memrchr_evex) + IFUNC_IMPL_ADD (array, i, memrchr, 1, __memrchr_sse2)) #ifdef SHARED @@ -139,10 +195,28 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX2), __memset_chk_avx2_unaligned_erms) IFUNC_IMPL_ADD (array, i, __memset_chk, - CPU_FEATURE_USABLE (AVX512F), + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __memset_chk_avx2_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __memset_chk_avx2_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __memset_chk_evex_unaligned) + IFUNC_IMPL_ADD (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __memset_chk_evex_unaligned_erms) + IFUNC_IMPL_ADD (array, i, __memset_chk, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), __memset_chk_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, __memset_chk, - CPU_FEATURE_USABLE (AVX512F), + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), __memset_chk_avx512_unaligned) IFUNC_IMPL_ADD (array, i, __memset_chk, CPU_FEATURE_USABLE (AVX512F), @@ -164,10 +238,28 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX2), __memset_avx2_unaligned_erms) IFUNC_IMPL_ADD (array, i, memset, - CPU_FEATURE_USABLE (AVX512F), + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __memset_avx2_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, memset, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __memset_avx2_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, memset, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __memset_evex_unaligned) + IFUNC_IMPL_ADD (array, i, memset, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __memset_evex_unaligned_erms) + IFUNC_IMPL_ADD (array, i, memset, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), __memset_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, memset, - CPU_FEATURE_USABLE (AVX512F), + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), __memset_avx512_unaligned) IFUNC_IMPL_ADD (array, i, memset, CPU_FEATURE_USABLE (AVX512F), @@ -179,20 +271,51 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, rawmemchr, CPU_FEATURE_USABLE (AVX2), __rawmemchr_avx2) + IFUNC_IMPL_ADD (array, i, rawmemchr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __rawmemchr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, rawmemchr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __rawmemchr_evex) IFUNC_IMPL_ADD (array, i, rawmemchr, 1, __rawmemchr_sse2)) /* Support sysdeps/x86_64/multiarch/strlen.c. */ IFUNC_IMPL (i, name, strlen, IFUNC_IMPL_ADD (array, i, strlen, - CPU_FEATURE_USABLE (AVX2), + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2)), __strlen_avx2) + IFUNC_IMPL_ADD (array, i, strlen, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2) + && CPU_FEATURE_USABLE (RTM)), + __strlen_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strlen, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __strlen_evex) IFUNC_IMPL_ADD (array, i, strlen, 1, __strlen_sse2)) /* Support sysdeps/x86_64/multiarch/strnlen.c. */ IFUNC_IMPL (i, name, strnlen, IFUNC_IMPL_ADD (array, i, strnlen, - CPU_FEATURE_USABLE (AVX2), + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2)), __strnlen_avx2) + IFUNC_IMPL_ADD (array, i, strnlen, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2) + && CPU_FEATURE_USABLE (RTM)), + __strnlen_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strnlen, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __strnlen_evex) IFUNC_IMPL_ADD (array, i, strnlen, 1, __strnlen_sse2)) /* Support sysdeps/x86_64/multiarch/stpncpy.c. */ @@ -201,6 +324,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, __stpncpy_ssse3) IFUNC_IMPL_ADD (array, i, stpncpy, CPU_FEATURE_USABLE (AVX2), __stpncpy_avx2) + IFUNC_IMPL_ADD (array, i, stpncpy, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __stpncpy_avx2_rtm) + IFUNC_IMPL_ADD (array, i, stpncpy, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __stpncpy_evex) IFUNC_IMPL_ADD (array, i, stpncpy, 1, __stpncpy_sse2_unaligned) IFUNC_IMPL_ADD (array, i, stpncpy, 1, __stpncpy_sse2)) @@ -211,6 +342,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, __stpcpy_ssse3) IFUNC_IMPL_ADD (array, i, stpcpy, CPU_FEATURE_USABLE (AVX2), __stpcpy_avx2) + IFUNC_IMPL_ADD (array, i, stpcpy, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __stpcpy_avx2_rtm) + IFUNC_IMPL_ADD (array, i, stpcpy, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __stpcpy_evex) IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2_unaligned) IFUNC_IMPL_ADD (array, i, stpcpy, 1, __stpcpy_sse2)) @@ -245,6 +384,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL (i, name, strcat, IFUNC_IMPL_ADD (array, i, strcat, CPU_FEATURE_USABLE (AVX2), __strcat_avx2) + IFUNC_IMPL_ADD (array, i, strcat, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strcat_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strcat, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __strcat_evex) IFUNC_IMPL_ADD (array, i, strcat, CPU_FEATURE_USABLE (SSSE3), __strcat_ssse3) IFUNC_IMPL_ADD (array, i, strcat, 1, __strcat_sse2_unaligned) @@ -255,6 +402,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, strchr, CPU_FEATURE_USABLE (AVX2), __strchr_avx2) + IFUNC_IMPL_ADD (array, i, strchr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strchr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strchr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __strchr_evex) IFUNC_IMPL_ADD (array, i, strchr, 1, __strchr_sse2_no_bsf) IFUNC_IMPL_ADD (array, i, strchr, 1, __strchr_sse2)) @@ -263,6 +419,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, strchrnul, CPU_FEATURE_USABLE (AVX2), __strchrnul_avx2) + IFUNC_IMPL_ADD (array, i, strchrnul, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strchrnul_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strchrnul, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __strchrnul_evex) IFUNC_IMPL_ADD (array, i, strchrnul, 1, __strchrnul_sse2)) /* Support sysdeps/x86_64/multiarch/strrchr.c. */ @@ -270,6 +435,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, strrchr, CPU_FEATURE_USABLE (AVX2), __strrchr_avx2) + IFUNC_IMPL_ADD (array, i, strrchr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strrchr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strrchr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __strrchr_evex) IFUNC_IMPL_ADD (array, i, strrchr, 1, __strrchr_sse2)) /* Support sysdeps/x86_64/multiarch/strcmp.c. */ @@ -277,6 +450,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, strcmp, CPU_FEATURE_USABLE (AVX2), __strcmp_avx2) + IFUNC_IMPL_ADD (array, i, strcmp, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strcmp_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strcmp, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __strcmp_evex) IFUNC_IMPL_ADD (array, i, strcmp, CPU_FEATURE_USABLE (SSE4_2), __strcmp_sse42) IFUNC_IMPL_ADD (array, i, strcmp, CPU_FEATURE_USABLE (SSSE3), @@ -288,6 +470,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL (i, name, strcpy, IFUNC_IMPL_ADD (array, i, strcpy, CPU_FEATURE_USABLE (AVX2), __strcpy_avx2) + IFUNC_IMPL_ADD (array, i, strcpy, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strcpy_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strcpy, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __strcpy_evex) IFUNC_IMPL_ADD (array, i, strcpy, CPU_FEATURE_USABLE (SSSE3), __strcpy_ssse3) IFUNC_IMPL_ADD (array, i, strcpy, 1, __strcpy_sse2_unaligned) @@ -331,6 +521,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL (i, name, strncat, IFUNC_IMPL_ADD (array, i, strncat, CPU_FEATURE_USABLE (AVX2), __strncat_avx2) + IFUNC_IMPL_ADD (array, i, strncat, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strncat_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strncat, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __strncat_evex) IFUNC_IMPL_ADD (array, i, strncat, CPU_FEATURE_USABLE (SSSE3), __strncat_ssse3) IFUNC_IMPL_ADD (array, i, strncat, 1, @@ -341,6 +539,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL (i, name, strncpy, IFUNC_IMPL_ADD (array, i, strncpy, CPU_FEATURE_USABLE (AVX2), __strncpy_avx2) + IFUNC_IMPL_ADD (array, i, strncpy, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strncpy_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strncpy, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __strncpy_evex) IFUNC_IMPL_ADD (array, i, strncpy, CPU_FEATURE_USABLE (SSSE3), __strncpy_ssse3) IFUNC_IMPL_ADD (array, i, strncpy, 1, @@ -370,6 +576,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, wcschr, CPU_FEATURE_USABLE (AVX2), __wcschr_avx2) + IFUNC_IMPL_ADD (array, i, wcschr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wcschr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, wcschr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __wcschr_evex) IFUNC_IMPL_ADD (array, i, wcschr, 1, __wcschr_sse2)) /* Support sysdeps/x86_64/multiarch/wcsrchr.c. */ @@ -377,6 +592,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, wcsrchr, CPU_FEATURE_USABLE (AVX2), __wcsrchr_avx2) + IFUNC_IMPL_ADD (array, i, wcsrchr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wcsrchr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, wcsrchr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __wcsrchr_evex) IFUNC_IMPL_ADD (array, i, wcsrchr, 1, __wcsrchr_sse2)) /* Support sysdeps/x86_64/multiarch/wcscmp.c. */ @@ -384,6 +608,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, wcscmp, CPU_FEATURE_USABLE (AVX2), __wcscmp_avx2) + IFUNC_IMPL_ADD (array, i, wcscmp, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wcscmp_avx2_rtm) + IFUNC_IMPL_ADD (array, i, wcscmp, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __wcscmp_evex) IFUNC_IMPL_ADD (array, i, wcscmp, 1, __wcscmp_sse2)) /* Support sysdeps/x86_64/multiarch/wcsncmp.c. */ @@ -391,6 +624,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, wcsncmp, CPU_FEATURE_USABLE (AVX2), __wcsncmp_avx2) + IFUNC_IMPL_ADD (array, i, wcsncmp, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wcsncmp_avx2_rtm) + IFUNC_IMPL_ADD (array, i, wcsncmp, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __wcsncmp_evex) IFUNC_IMPL_ADD (array, i, wcsncmp, 1, __wcsncmp_sse2)) /* Support sysdeps/x86_64/multiarch/wcscpy.c. */ @@ -402,15 +644,40 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, /* Support sysdeps/x86_64/multiarch/wcslen.c. */ IFUNC_IMPL (i, name, wcslen, IFUNC_IMPL_ADD (array, i, wcslen, - CPU_FEATURE_USABLE (AVX2), + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2)), __wcslen_avx2) + IFUNC_IMPL_ADD (array, i, wcslen, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2) + && CPU_FEATURE_USABLE (RTM)), + __wcslen_avx2_rtm) + IFUNC_IMPL_ADD (array, i, wcslen, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __wcslen_evex) + IFUNC_IMPL_ADD (array, i, wcslen, + CPU_FEATURE_USABLE (SSE4_1), + __wcslen_sse4_1) IFUNC_IMPL_ADD (array, i, wcslen, 1, __wcslen_sse2)) /* Support sysdeps/x86_64/multiarch/wcsnlen.c. */ IFUNC_IMPL (i, name, wcsnlen, IFUNC_IMPL_ADD (array, i, wcsnlen, - CPU_FEATURE_USABLE (AVX2), + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2)), __wcsnlen_avx2) + IFUNC_IMPL_ADD (array, i, wcsnlen, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (BMI2) + && CPU_FEATURE_USABLE (RTM)), + __wcsnlen_avx2_rtm) + IFUNC_IMPL_ADD (array, i, wcsnlen, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __wcsnlen_evex) IFUNC_IMPL_ADD (array, i, wcsnlen, CPU_FEATURE_USABLE (SSE4_1), __wcsnlen_sse4_1) @@ -421,6 +688,15 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, wmemchr, CPU_FEATURE_USABLE (AVX2), __wmemchr_avx2) + IFUNC_IMPL_ADD (array, i, wmemchr, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wmemchr_avx2_rtm) + IFUNC_IMPL_ADD (array, i, wmemchr, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (BMI2)), + __wmemchr_evex) IFUNC_IMPL_ADD (array, i, wmemchr, 1, __wmemchr_sse2)) /* Support sysdeps/x86_64/multiarch/wmemcmp.c. */ @@ -429,6 +705,16 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, (CPU_FEATURE_USABLE (AVX2) && CPU_FEATURE_USABLE (MOVBE)), __wmemcmp_avx2_movbe) + IFUNC_IMPL_ADD (array, i, wmemcmp, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (MOVBE) + && CPU_FEATURE_USABLE (RTM)), + __wmemcmp_avx2_movbe_rtm) + IFUNC_IMPL_ADD (array, i, wmemcmp, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW) + && CPU_FEATURE_USABLE (MOVBE)), + __wmemcmp_evex_movbe) IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSE4_1), __wmemcmp_sse4_1) IFUNC_IMPL_ADD (array, i, wmemcmp, CPU_FEATURE_USABLE (SSSE3), @@ -443,7 +729,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX2), __wmemset_avx2_unaligned) IFUNC_IMPL_ADD (array, i, wmemset, - CPU_FEATURE_USABLE (AVX512F), + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __wmemset_avx2_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, wmemset, + CPU_FEATURE_USABLE (AVX512VL), + __wmemset_evex_unaligned) + IFUNC_IMPL_ADD (array, i, wmemset, + CPU_FEATURE_USABLE (AVX512VL), __wmemset_avx512_unaligned)) #ifdef SHARED @@ -453,10 +746,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX512F), __memcpy_chk_avx512_no_vzeroupper) IFUNC_IMPL_ADD (array, i, __memcpy_chk, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memcpy_chk_avx512_unaligned) IFUNC_IMPL_ADD (array, i, __memcpy_chk, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memcpy_chk_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, __memcpy_chk, CPU_FEATURE_USABLE (AVX), @@ -464,6 +757,20 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, __memcpy_chk, CPU_FEATURE_USABLE (AVX), __memcpy_chk_avx_unaligned_erms) + IFUNC_IMPL_ADD (array, i, __memcpy_chk, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memcpy_chk_avx_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, __memcpy_chk, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memcpy_chk_avx_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, __memcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_chk_evex_unaligned) + IFUNC_IMPL_ADD (array, i, __memcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_chk_evex_unaligned_erms) IFUNC_IMPL_ADD (array, i, __memcpy_chk, CPU_FEATURE_USABLE (SSSE3), __memcpy_chk_ssse3_back) @@ -486,6 +793,20 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, memcpy, CPU_FEATURE_USABLE (AVX), __memcpy_avx_unaligned_erms) + IFUNC_IMPL_ADD (array, i, memcpy, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memcpy_avx_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, memcpy, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __memcpy_avx_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, memcpy, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_evex_unaligned) + IFUNC_IMPL_ADD (array, i, memcpy, + CPU_FEATURE_USABLE (AVX512VL), + __memcpy_evex_unaligned_erms) IFUNC_IMPL_ADD (array, i, memcpy, CPU_FEATURE_USABLE (SSSE3), __memcpy_ssse3_back) IFUNC_IMPL_ADD (array, i, memcpy, CPU_FEATURE_USABLE (SSSE3), @@ -494,10 +815,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX512F), __memcpy_avx512_no_vzeroupper) IFUNC_IMPL_ADD (array, i, memcpy, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memcpy_avx512_unaligned) IFUNC_IMPL_ADD (array, i, memcpy, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __memcpy_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_sse2_unaligned) IFUNC_IMPL_ADD (array, i, memcpy, 1, @@ -511,10 +832,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX512F), __mempcpy_chk_avx512_no_vzeroupper) IFUNC_IMPL_ADD (array, i, __mempcpy_chk, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __mempcpy_chk_avx512_unaligned) IFUNC_IMPL_ADD (array, i, __mempcpy_chk, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __mempcpy_chk_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (AVX), @@ -522,6 +843,20 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (AVX), __mempcpy_chk_avx_unaligned_erms) + IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __mempcpy_chk_avx_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __mempcpy_chk_avx_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_chk_evex_unaligned) + IFUNC_IMPL_ADD (array, i, __mempcpy_chk, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_chk_evex_unaligned_erms) IFUNC_IMPL_ADD (array, i, __mempcpy_chk, CPU_FEATURE_USABLE (SSSE3), __mempcpy_chk_ssse3_back) @@ -542,10 +877,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, CPU_FEATURE_USABLE (AVX512F), __mempcpy_avx512_no_vzeroupper) IFUNC_IMPL_ADD (array, i, mempcpy, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __mempcpy_avx512_unaligned) IFUNC_IMPL_ADD (array, i, mempcpy, - CPU_FEATURE_USABLE (AVX512F), + CPU_FEATURE_USABLE (AVX512VL), __mempcpy_avx512_unaligned_erms) IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (AVX), @@ -553,6 +888,20 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (AVX), __mempcpy_avx_unaligned_erms) + IFUNC_IMPL_ADD (array, i, mempcpy, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __mempcpy_avx_unaligned_rtm) + IFUNC_IMPL_ADD (array, i, mempcpy, + (CPU_FEATURE_USABLE (AVX) + && CPU_FEATURE_USABLE (RTM)), + __mempcpy_avx_unaligned_erms_rtm) + IFUNC_IMPL_ADD (array, i, mempcpy, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_evex_unaligned) + IFUNC_IMPL_ADD (array, i, mempcpy, + CPU_FEATURE_USABLE (AVX512VL), + __mempcpy_evex_unaligned_erms) IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (SSSE3), __mempcpy_ssse3_back) IFUNC_IMPL_ADD (array, i, mempcpy, CPU_FEATURE_USABLE (SSSE3), @@ -568,6 +917,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, strncmp, CPU_FEATURE_USABLE (AVX2), __strncmp_avx2) + IFUNC_IMPL_ADD (array, i, strncmp, + (CPU_FEATURE_USABLE (AVX2) + && CPU_FEATURE_USABLE (RTM)), + __strncmp_avx2_rtm) + IFUNC_IMPL_ADD (array, i, strncmp, + (CPU_FEATURE_USABLE (AVX512VL) + && CPU_FEATURE_USABLE (AVX512BW)), + __strncmp_evex) IFUNC_IMPL_ADD (array, i, strncmp, CPU_FEATURE_USABLE (SSE4_2), __strncmp_sse42) IFUNC_IMPL_ADD (array, i, strncmp, CPU_FEATURE_USABLE (SSSE3), @@ -582,6 +939,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, IFUNC_IMPL_ADD (array, i, __wmemset_chk, CPU_FEATURE_USABLE (AVX2), __wmemset_chk_avx2_unaligned) + IFUNC_IMPL_ADD (array, i, __wmemset_chk, + CPU_FEATURE_USABLE (AVX512VL), + __wmemset_chk_evex_unaligned) IFUNC_IMPL_ADD (array, i, __wmemset_chk, CPU_FEATURE_USABLE (AVX512F), __wmemset_chk_avx512_unaligned)) diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmp.h b/sysdeps/x86_64/multiarch/ifunc-memcmp.h index d5df541ec..8bee1aff7 100644 --- a/sysdeps/x86_64/multiarch/ifunc-memcmp.h +++ b/sysdeps/x86_64/multiarch/ifunc-memcmp.h @@ -23,17 +23,28 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (sse4_1) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_movbe_rtm) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_movbe) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { const struct cpu_features* cpu_features = __get_cpu_features (); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURE_USABLE_P (cpu_features, MOVBE) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) - return OPTIMIZE (avx2_movbe); + { + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) + return OPTIMIZE (evex_movbe); + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_movbe_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + return OPTIMIZE (avx2_movbe); + } if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1)) return OPTIMIZE (sse4_1); diff --git a/sysdeps/x86_64/multiarch/ifunc-memmove.h b/sysdeps/x86_64/multiarch/ifunc-memmove.h index bf42a555d..a14718a97 100644 --- a/sysdeps/x86_64/multiarch/ifunc-memmove.h +++ b/sysdeps/x86_64/multiarch/ifunc-memmove.h @@ -29,6 +29,14 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3_back) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_rtm) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx_unaligned_erms_rtm) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) + attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) @@ -48,21 +56,42 @@ IFUNC_SELECTOR (void) if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F) && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) { - if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) - return OPTIMIZE (avx512_no_vzeroupper); + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (avx512_unaligned_erms); - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) - return OPTIMIZE (avx512_unaligned_erms); + return OPTIMIZE (avx512_unaligned); + } - return OPTIMIZE (avx512_unaligned); + return OPTIMIZE (avx512_no_vzeroupper); } if (CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) { - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) - return OPTIMIZE (avx_unaligned_erms); + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (evex_unaligned_erms); + + return OPTIMIZE (evex_unaligned); + } + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (avx_unaligned_erms_rtm); + + return OPTIMIZE (avx_unaligned_rtm); + } + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (avx_unaligned_erms); - return OPTIMIZE (avx_unaligned); + return OPTIMIZE (avx_unaligned); + } } if (!CPU_FEATURE_USABLE_P (cpu_features, SSSE3) diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h index 0ac6b1188..502f946a8 100644 --- a/sysdeps/x86_64/multiarch/ifunc-memset.h +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h @@ -27,6 +27,14 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms) extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms_rtm) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms) + attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms) @@ -45,21 +53,44 @@ IFUNC_SELECTOR (void) if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F) && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) { - if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) - return OPTIMIZE (avx512_no_vzeroupper); + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (avx512_unaligned_erms); - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) - return OPTIMIZE (avx512_unaligned_erms); + return OPTIMIZE (avx512_unaligned); + } - return OPTIMIZE (avx512_unaligned); + return OPTIMIZE (avx512_no_vzeroupper); } if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) { - if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) - return OPTIMIZE (avx2_unaligned_erms); - else - return OPTIMIZE (avx2_unaligned); + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (evex_unaligned_erms); + + return OPTIMIZE (evex_unaligned); + } + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (avx2_unaligned_erms_rtm); + + return OPTIMIZE (avx2_unaligned_rtm); + } + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) + return OPTIMIZE (avx2_unaligned_erms); + + return OPTIMIZE (avx2_unaligned); + } } if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) diff --git a/sysdeps/x86_64/multiarch/ifunc-strcpy.h b/sysdeps/x86_64/multiarch/ifunc-strcpy.h index 1100cd23c..39568f480 100644 --- a/sysdeps/x86_64/multiarch/ifunc-strcpy.h +++ b/sysdeps/x86_64/multiarch/ifunc-strcpy.h @@ -25,16 +25,27 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_rtm) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { const struct cpu_features* cpu_features = __get_cpu_features (); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) - return OPTIMIZE (avx2); + { + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) + return OPTIMIZE (evex); + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + return OPTIMIZE (avx2); + } if (CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Load)) return OPTIMIZE (sse2_unaligned); diff --git a/sysdeps/x86_64/multiarch/ifunc-wcslen.h b/sysdeps/x86_64/multiarch/ifunc-wcslen.h new file mode 100644 index 000000000..39e334737 --- /dev/null +++ b/sysdeps/x86_64/multiarch/ifunc-wcslen.h @@ -0,0 +1,52 @@ +/* Common definition for ifunc selections for wcslen and wcsnlen + All versions must be listed in ifunc-impl-list.c. + Copyright (C) 2017-2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (sse4_1) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_rtm) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden; + +static inline void * +IFUNC_SELECTOR (void) +{ + const struct cpu_features* cpu_features = __get_cpu_features (); + + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) + && CPU_FEATURE_USABLE_P (cpu_features, BMI2) + && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) + { + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) + return OPTIMIZE (evex); + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + return OPTIMIZE (avx2); + } + + if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1)) + return OPTIMIZE (sse4_1); + + return OPTIMIZE (sse2); +} diff --git a/sysdeps/x86_64/multiarch/ifunc-wmemset.h b/sysdeps/x86_64/multiarch/ifunc-wmemset.h index c1b0c2254..756f0ccdb 100644 --- a/sysdeps/x86_64/multiarch/ifunc-wmemset.h +++ b/sysdeps/x86_64/multiarch/ifunc-wmemset.h @@ -20,6 +20,9 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm) + attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden; static inline void * @@ -27,14 +30,21 @@ IFUNC_SELECTOR (void) { const struct cpu_features* cpu_features = __get_cpu_features (); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) { - if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F) - && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) - return OPTIMIZE (avx512_unaligned); - else + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)) + { + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) + return OPTIMIZE (avx512_unaligned); + + return OPTIMIZE (evex_unaligned); + } + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_unaligned_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) return OPTIMIZE (avx2_unaligned); } diff --git a/sysdeps/x86_64/multiarch/memchr-avx2-rtm.S b/sysdeps/x86_64/multiarch/memchr-avx2-rtm.S new file mode 100644 index 000000000..87b076c7c --- /dev/null +++ b/sysdeps/x86_64/multiarch/memchr-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef MEMCHR +# define MEMCHR __memchr_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "memchr-avx2.S" diff --git a/sysdeps/x86_64/multiarch/memchr-avx2.S b/sysdeps/x86_64/multiarch/memchr-avx2.S index 77a952316..afdb95650 100644 --- a/sysdeps/x86_64/multiarch/memchr-avx2.S +++ b/sysdeps/x86_64/multiarch/memchr-avx2.S @@ -26,319 +26,407 @@ # ifdef USE_AS_WMEMCHR # define VPCMPEQ vpcmpeqd +# define VPBROADCAST vpbroadcastd +# define CHAR_SIZE 4 # else # define VPCMPEQ vpcmpeqb +# define VPBROADCAST vpbroadcastb +# define CHAR_SIZE 1 +# endif + +# ifdef USE_AS_RAWMEMCHR +# define ERAW_PTR_REG ecx +# define RRAW_PTR_REG rcx +# define ALGN_PTR_REG rdi +# else +# define ERAW_PTR_REG edi +# define RRAW_PTR_REG rdi +# define ALGN_PTR_REG rcx # endif # ifndef VZEROUPPER # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + # define VEC_SIZE 32 +# define PAGE_SIZE 4096 +# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE) - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (MEMCHR) # ifndef USE_AS_RAWMEMCHR /* Check for zero length. */ +# ifdef __ILP32__ + /* Clear upper bits. */ + and %RDX_LP, %RDX_LP +# else test %RDX_LP, %RDX_LP +# endif jz L(null) # endif - movl %edi, %ecx - /* Broadcast CHAR to YMM0. */ + /* Broadcast CHAR to YMMMATCH. */ vmovd %esi, %xmm0 -# ifdef USE_AS_WMEMCHR - shl $2, %RDX_LP - vpbroadcastd %xmm0, %ymm0 -# else -# ifdef __ILP32__ - /* Clear the upper 32 bits. */ - movl %edx, %edx -# endif - vpbroadcastb %xmm0, %ymm0 -# endif + VPBROADCAST %xmm0, %ymm0 /* Check if we may cross page boundary with one vector load. */ - andl $(2 * VEC_SIZE - 1), %ecx - cmpl $VEC_SIZE, %ecx - ja L(cros_page_boundary) + movl %edi, %eax + andl $(PAGE_SIZE - 1), %eax + cmpl $(PAGE_SIZE - VEC_SIZE), %eax + ja L(cross_page_boundary) /* Check the first VEC_SIZE bytes. */ - VPCMPEQ (%rdi), %ymm0, %ymm1 + VPCMPEQ (%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax - testl %eax, %eax - # ifndef USE_AS_RAWMEMCHR - jnz L(first_vec_x0_check) - /* Adjust length and check the end of data. */ - subq $VEC_SIZE, %rdx - jbe L(zero) -# else - jnz L(first_vec_x0) + /* If length < CHAR_PER_VEC handle special. */ + cmpq $CHAR_PER_VEC, %rdx + jbe L(first_vec_x0) # endif - - /* Align data for aligned loads in the loop. */ - addq $VEC_SIZE, %rdi - andl $(VEC_SIZE - 1), %ecx - andq $-VEC_SIZE, %rdi + testl %eax, %eax + jz L(aligned_more) + tzcntl %eax, %eax + addq %rdi, %rax + VZEROUPPER_RETURN # ifndef USE_AS_RAWMEMCHR - /* Adjust length. */ - addq %rcx, %rdx + .p2align 5 +L(first_vec_x0): + /* Check if first match was before length. */ + tzcntl %eax, %eax +# ifdef USE_AS_WMEMCHR + /* NB: Multiply length by 4 to get byte count. */ + sall $2, %edx +# endif + xorl %ecx, %ecx + cmpl %eax, %edx + leaq (%rdi, %rax), %rax + cmovle %rcx, %rax + VZEROUPPER_RETURN - subq $(VEC_SIZE * 4), %rdx - jbe L(last_4x_vec_or_less) +L(null): + xorl %eax, %eax + ret # endif - jmp L(more_4x_vec) - .p2align 4 -L(cros_page_boundary): - andl $(VEC_SIZE - 1), %ecx - andq $-VEC_SIZE, %rdi - VPCMPEQ (%rdi), %ymm0, %ymm1 +L(cross_page_boundary): + /* Save pointer before aligning as its original value is + necessary for computer return address if byte is found or + adjusting length if it is not and this is memchr. */ + movq %rdi, %rcx + /* Align data to VEC_SIZE - 1. ALGN_PTR_REG is rcx for memchr + and rdi for rawmemchr. */ + orq $(VEC_SIZE - 1), %ALGN_PTR_REG + VPCMPEQ -(VEC_SIZE - 1)(%ALGN_PTR_REG), %ymm0, %ymm1 vpmovmskb %ymm1, %eax +# ifndef USE_AS_RAWMEMCHR + /* Calculate length until end of page (length checked for a + match). */ + leaq 1(%ALGN_PTR_REG), %rsi + subq %RRAW_PTR_REG, %rsi +# ifdef USE_AS_WMEMCHR + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %esi +# endif +# endif /* Remove the leading bytes. */ - sarl %cl, %eax - testl %eax, %eax - jz L(aligned_more) - tzcntl %eax, %eax + sarxl %ERAW_PTR_REG, %eax, %eax # ifndef USE_AS_RAWMEMCHR /* Check the end of data. */ - cmpq %rax, %rdx - jbe L(zero) + cmpq %rsi, %rdx + jbe L(first_vec_x0) # endif + testl %eax, %eax + jz L(cross_page_continue) + tzcntl %eax, %eax + addq %RRAW_PTR_REG, %rax +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN + + .p2align 4 +L(first_vec_x1): + tzcntl %eax, %eax + incq %rdi addq %rdi, %rax - addq %rcx, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 -L(aligned_more): -# ifndef USE_AS_RAWMEMCHR - /* Calculate "rdx + rcx - VEC_SIZE" with "rdx - (VEC_SIZE - rcx)" - instead of "(rdx + rcx) - VEC_SIZE" to void possible addition - overflow. */ - negq %rcx - addq $VEC_SIZE, %rcx +L(first_vec_x2): + tzcntl %eax, %eax + addq $(VEC_SIZE + 1), %rdi + addq %rdi, %rax + VZEROUPPER_RETURN - /* Check the end of data. */ - subq %rcx, %rdx - jbe L(zero) -# endif + .p2align 4 +L(first_vec_x3): + tzcntl %eax, %eax + addq $(VEC_SIZE * 2 + 1), %rdi + addq %rdi, %rax + VZEROUPPER_RETURN - addq $VEC_SIZE, %rdi -# ifndef USE_AS_RAWMEMCHR - subq $(VEC_SIZE * 4), %rdx - jbe L(last_4x_vec_or_less) -# endif + .p2align 4 +L(first_vec_x4): + tzcntl %eax, %eax + addq $(VEC_SIZE * 3 + 1), %rdi + addq %rdi, %rax + VZEROUPPER_RETURN -L(more_4x_vec): + .p2align 4 +L(aligned_more): /* Check the first 4 * VEC_SIZE. Only one VEC_SIZE at a time since data is only aligned to VEC_SIZE. */ - VPCMPEQ (%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax - testl %eax, %eax - jnz L(first_vec_x0) - VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1 +# ifndef USE_AS_RAWMEMCHR +L(cross_page_continue): + /* Align data to VEC_SIZE - 1. */ + xorl %ecx, %ecx + subl %edi, %ecx + orq $(VEC_SIZE - 1), %rdi + /* esi is for adjusting length to see if near the end. */ + leal (VEC_SIZE * 4 + 1)(%rdi, %rcx), %esi +# ifdef USE_AS_WMEMCHR + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %esi +# endif +# else + orq $(VEC_SIZE - 1), %rdi +L(cross_page_continue): +# endif + /* Load first VEC regardless. */ + VPCMPEQ 1(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax +# ifndef USE_AS_RAWMEMCHR + /* Adjust length. If near end handle specially. */ + subq %rsi, %rdx + jbe L(last_4x_vec_or_less) +# endif testl %eax, %eax jnz L(first_vec_x1) - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1 + VPCMPEQ (VEC_SIZE + 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax testl %eax, %eax jnz L(first_vec_x2) - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1 + VPCMPEQ (VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax testl %eax, %eax jnz L(first_vec_x3) - addq $(VEC_SIZE * 4), %rdi - -# ifndef USE_AS_RAWMEMCHR - subq $(VEC_SIZE * 4), %rdx - jbe L(last_4x_vec_or_less) -# endif - - /* Align data to 4 * VEC_SIZE. */ - movq %rdi, %rcx - andl $(4 * VEC_SIZE - 1), %ecx - andq $-(4 * VEC_SIZE), %rdi + VPCMPEQ (VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax + testl %eax, %eax + jnz L(first_vec_x4) # ifndef USE_AS_RAWMEMCHR - /* Adjust length. */ + /* Check if at last VEC_SIZE * 4 length. */ + subq $(CHAR_PER_VEC * 4), %rdx + jbe L(last_4x_vec_or_less_cmpeq) + /* Align data to VEC_SIZE * 4 - 1 for the loop and readjust + length. */ + incq %rdi + movl %edi, %ecx + orq $(VEC_SIZE * 4 - 1), %rdi + andl $(VEC_SIZE * 4 - 1), %ecx +# ifdef USE_AS_WMEMCHR + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %ecx +# endif addq %rcx, %rdx +# else + /* Align data to VEC_SIZE * 4 - 1 for loop. */ + incq %rdi + orq $(VEC_SIZE * 4 - 1), %rdi # endif + /* Compare 4 * VEC at a time forward. */ .p2align 4 L(loop_4x_vec): - /* Compare 4 * VEC at a time forward. */ - VPCMPEQ (%rdi), %ymm0, %ymm1 - VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm2 - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm3 - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm4 - + VPCMPEQ 1(%rdi), %ymm0, %ymm1 + VPCMPEQ (VEC_SIZE + 1)(%rdi), %ymm0, %ymm2 + VPCMPEQ (VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm3 + VPCMPEQ (VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm4 vpor %ymm1, %ymm2, %ymm5 vpor %ymm3, %ymm4, %ymm6 vpor %ymm5, %ymm6, %ymm5 - vpmovmskb %ymm5, %eax - testl %eax, %eax - jnz L(4x_vec_end) - - addq $(VEC_SIZE * 4), %rdi - + vpmovmskb %ymm5, %ecx # ifdef USE_AS_RAWMEMCHR - jmp L(loop_4x_vec) + subq $-(VEC_SIZE * 4), %rdi + testl %ecx, %ecx + jz L(loop_4x_vec) # else - subq $(VEC_SIZE * 4), %rdx - ja L(loop_4x_vec) + testl %ecx, %ecx + jnz L(loop_4x_vec_end) -L(last_4x_vec_or_less): - /* Less than 4 * VEC and aligned to VEC_SIZE. */ - addl $(VEC_SIZE * 2), %edx - jle L(last_2x_vec) + subq $-(VEC_SIZE * 4), %rdi - VPCMPEQ (%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax - testl %eax, %eax - jnz L(first_vec_x0) + subq $(CHAR_PER_VEC * 4), %rdx + ja L(loop_4x_vec) - VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1 + /* Fall through into less than 4 remaining vectors of length + case. */ + VPCMPEQ (VEC_SIZE * 0 + 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax + .p2align 4 +L(last_4x_vec_or_less): +# ifdef USE_AS_WMEMCHR + /* NB: Multiply length by 4 to get byte count. */ + sall $2, %edx +# endif + /* Check if first VEC contained match. */ testl %eax, %eax - jnz L(first_vec_x1) + jnz L(first_vec_x1_check) - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax - testl %eax, %eax + /* If remaining length > VEC_SIZE * 2. */ + addl $(VEC_SIZE * 2), %edx + jg L(last_4x_vec) - jnz L(first_vec_x2_check) - subl $VEC_SIZE, %edx - jle L(zero) +L(last_2x_vec): + /* If remaining length < VEC_SIZE. */ + addl $VEC_SIZE, %edx + jle L(zero_end) - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1 + /* Check VEC2 and compare any match with remaining length. */ + VPCMPEQ (VEC_SIZE + 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax - testl %eax, %eax - - jnz L(first_vec_x3_check) - xorl %eax, %eax - VZEROUPPER - ret + tzcntl %eax, %eax + cmpl %eax, %edx + jbe L(set_zero_end) + addq $(VEC_SIZE + 1), %rdi + addq %rdi, %rax +L(zero_end): + VZEROUPPER_RETURN .p2align 4 -L(last_2x_vec): - addl $(VEC_SIZE * 2), %edx - VPCMPEQ (%rdi), %ymm0, %ymm1 +L(loop_4x_vec_end): +# endif + /* rawmemchr will fall through into this if match was found in + loop. */ + vpmovmskb %ymm1, %eax testl %eax, %eax + jnz L(last_vec_x1_return) - jnz L(first_vec_x0_check) - subl $VEC_SIZE, %edx - jle L(zero) - - VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax + vpmovmskb %ymm2, %eax testl %eax, %eax - jnz L(first_vec_x1_check) - xorl %eax, %eax - VZEROUPPER - ret + jnz L(last_vec_x2_return) - .p2align 4 -L(first_vec_x0_check): - tzcntl %eax, %eax - /* Check the end of data. */ - cmpq %rax, %rdx - jbe L(zero) + vpmovmskb %ymm3, %eax + /* Combine VEC3 matches (eax) with VEC4 matches (ecx). */ + salq $32, %rcx + orq %rcx, %rax + tzcntq %rax, %rax +# ifdef USE_AS_RAWMEMCHR + subq $(VEC_SIZE * 2 - 1), %rdi +# else + subq $-(VEC_SIZE * 2 + 1), %rdi +# endif addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN +# ifndef USE_AS_RAWMEMCHR .p2align 4 L(first_vec_x1_check): tzcntl %eax, %eax - /* Check the end of data. */ - cmpq %rax, %rdx - jbe L(zero) - addq $VEC_SIZE, %rax + /* Adjust length. */ + subl $-(VEC_SIZE * 4), %edx + /* Check if match within remaining length. */ + cmpl %eax, %edx + jbe L(set_zero_end) + incq %rdi addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN + .p2align 4 +L(set_zero_end): + xorl %eax, %eax + VZEROUPPER_RETURN +# endif .p2align 4 -L(first_vec_x2_check): +L(last_vec_x1_return): tzcntl %eax, %eax - /* Check the end of data. */ - cmpq %rax, %rdx - jbe L(zero) - addq $(VEC_SIZE * 2), %rax +# ifdef USE_AS_RAWMEMCHR + subq $(VEC_SIZE * 4 - 1), %rdi +# else + incq %rdi +# endif addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 -L(first_vec_x3_check): +L(last_vec_x2_return): tzcntl %eax, %eax - /* Check the end of data. */ - cmpq %rax, %rdx - jbe L(zero) - addq $(VEC_SIZE * 3), %rax +# ifdef USE_AS_RAWMEMCHR + subq $(VEC_SIZE * 3 - 1), %rdi +# else + subq $-(VEC_SIZE + 1), %rdi +# endif addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN +# ifndef USE_AS_RAWMEMCHR .p2align 4 -L(zero): - VZEROUPPER -L(null): - xorl %eax, %eax - ret -# endif +L(last_4x_vec_or_less_cmpeq): + VPCMPEQ (VEC_SIZE * 4 + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax +# ifdef USE_AS_WMEMCHR + /* NB: Multiply length by 4 to get byte count. */ + sall $2, %edx +# endif + subq $-(VEC_SIZE * 4), %rdi + /* Check first VEC regardless. */ + testl %eax, %eax + jnz L(first_vec_x1_check) + /* If remaining length <= CHAR_PER_VEC * 2. */ + addl $(VEC_SIZE * 2), %edx + jle L(last_2x_vec) .p2align 4 -L(first_vec_x0): - tzcntl %eax, %eax - addq %rdi, %rax - VZEROUPPER - ret +L(last_4x_vec): + VPCMPEQ (VEC_SIZE + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax + testl %eax, %eax + jnz L(last_vec_x2_return) - .p2align 4 -L(first_vec_x1): - tzcntl %eax, %eax - addq $VEC_SIZE, %rax - addq %rdi, %rax - VZEROUPPER - ret + VPCMPEQ (VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax - .p2align 4 -L(first_vec_x2): + /* Create mask for possible matches within remaining length. */ + movq $-1, %rcx + bzhiq %rdx, %rcx, %rcx + + /* Test matches in data against length match. */ + andl %ecx, %eax + jnz L(last_vec_x3) + + /* if remaining length <= VEC_SIZE * 3 (Note this is after + remaining length was found to be > VEC_SIZE * 2. */ + subl $VEC_SIZE, %edx + jbe L(zero_end2) + + VPCMPEQ (VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax + /* Shift remaining length mask for last VEC. */ + shrq $32, %rcx + andl %ecx, %eax + jz L(zero_end2) tzcntl %eax, %eax - addq $(VEC_SIZE * 2), %rax + addq $(VEC_SIZE * 3 + 1), %rdi addq %rdi, %rax - VZEROUPPER - ret +L(zero_end2): + VZEROUPPER_RETURN .p2align 4 -L(4x_vec_end): - vpmovmskb %ymm1, %eax - testl %eax, %eax - jnz L(first_vec_x0) - vpmovmskb %ymm2, %eax - testl %eax, %eax - jnz L(first_vec_x1) - vpmovmskb %ymm3, %eax - testl %eax, %eax - jnz L(first_vec_x2) - vpmovmskb %ymm4, %eax - testl %eax, %eax -L(first_vec_x3): +L(last_vec_x3): tzcntl %eax, %eax - addq $(VEC_SIZE * 3), %rax + subq $-(VEC_SIZE * 2 + 1), %rdi addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN +# endif END (MEMCHR) #endif diff --git a/sysdeps/x86_64/multiarch/memchr-evex.S b/sysdeps/x86_64/multiarch/memchr-evex.S new file mode 100644 index 000000000..f3fdad4fd --- /dev/null +++ b/sysdeps/x86_64/multiarch/memchr-evex.S @@ -0,0 +1,478 @@ +/* memchr/wmemchr optimized with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# include + +# ifndef MEMCHR +# define MEMCHR __memchr_evex +# endif + +# ifdef USE_AS_WMEMCHR +# define VPBROADCAST vpbroadcastd +# define VPMINU vpminud +# define VPCMP vpcmpd +# define VPCMPEQ vpcmpeqd +# define CHAR_SIZE 4 +# else +# define VPBROADCAST vpbroadcastb +# define VPMINU vpminub +# define VPCMP vpcmpb +# define VPCMPEQ vpcmpeqb +# define CHAR_SIZE 1 +# endif + +# ifdef USE_AS_RAWMEMCHR +# define RAW_PTR_REG rcx +# define ALGN_PTR_REG rdi +# else +# define RAW_PTR_REG rdi +# define ALGN_PTR_REG rcx +# endif + +# define XMMZERO xmm23 +# define YMMZERO ymm23 +# define XMMMATCH xmm16 +# define YMMMATCH ymm16 +# define YMM1 ymm17 +# define YMM2 ymm18 +# define YMM3 ymm19 +# define YMM4 ymm20 +# define YMM5 ymm21 +# define YMM6 ymm22 + +# define VEC_SIZE 32 +# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE) +# define PAGE_SIZE 4096 + + .section .text.evex,"ax",@progbits +ENTRY (MEMCHR) +# ifndef USE_AS_RAWMEMCHR + /* Check for zero length. */ + test %RDX_LP, %RDX_LP + jz L(zero) + +# ifdef __ILP32__ + /* Clear the upper 32 bits. */ + movl %edx, %edx +# endif +# endif + /* Broadcast CHAR to YMMMATCH. */ + VPBROADCAST %esi, %YMMMATCH + /* Check if we may cross page boundary with one vector load. */ + movl %edi, %eax + andl $(PAGE_SIZE - 1), %eax + cmpl $(PAGE_SIZE - VEC_SIZE), %eax + ja L(cross_page_boundary) + + /* Check the first VEC_SIZE bytes. */ + VPCMP $0, (%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax +# ifndef USE_AS_RAWMEMCHR + /* If length < CHAR_PER_VEC handle special. */ + cmpq $CHAR_PER_VEC, %rdx + jbe L(first_vec_x0) +# endif + testl %eax, %eax + jz L(aligned_more) + tzcntl %eax, %eax +# ifdef USE_AS_WMEMCHR + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq (%rdi, %rax, CHAR_SIZE), %rax +# else + addq %rdi, %rax +# endif + ret + +# ifndef USE_AS_RAWMEMCHR +L(zero): + xorl %eax, %eax + ret + + .p2align 5 +L(first_vec_x0): + /* Check if first match was before length. */ + tzcntl %eax, %eax + xorl %ecx, %ecx + cmpl %eax, %edx + leaq (%rdi, %rax, CHAR_SIZE), %rax + cmovle %rcx, %rax + ret +# else + /* NB: first_vec_x0 is 17 bytes which will leave + cross_page_boundary (which is relatively cold) close enough + to ideal alignment. So only realign L(cross_page_boundary) if + rawmemchr. */ + .p2align 4 +# endif +L(cross_page_boundary): + /* Save pointer before aligning as its original value is + necessary for computer return address if byte is found or + adjusting length if it is not and this is memchr. */ + movq %rdi, %rcx + /* Align data to VEC_SIZE. ALGN_PTR_REG is rcx for memchr and rdi + for rawmemchr. */ + andq $-VEC_SIZE, %ALGN_PTR_REG + VPCMP $0, (%ALGN_PTR_REG), %YMMMATCH, %k0 + kmovd %k0, %r8d +# ifdef USE_AS_WMEMCHR + /* NB: Divide shift count by 4 since each bit in K0 represent 4 + bytes. */ + sarl $2, %eax +# endif +# ifndef USE_AS_RAWMEMCHR + movl $(PAGE_SIZE / CHAR_SIZE), %esi + subl %eax, %esi +# endif +# ifdef USE_AS_WMEMCHR + andl $(CHAR_PER_VEC - 1), %eax +# endif + /* Remove the leading bytes. */ + sarxl %eax, %r8d, %eax +# ifndef USE_AS_RAWMEMCHR + /* Check the end of data. */ + cmpq %rsi, %rdx + jbe L(first_vec_x0) +# endif + testl %eax, %eax + jz L(cross_page_continue) + tzcntl %eax, %eax +# ifdef USE_AS_WMEMCHR + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq (%RAW_PTR_REG, %rax, CHAR_SIZE), %rax +# else + addq %RAW_PTR_REG, %rax +# endif + ret + + .p2align 4 +L(first_vec_x1): + tzcntl %eax, %eax + leaq VEC_SIZE(%rdi, %rax, CHAR_SIZE), %rax + ret + + .p2align 4 +L(first_vec_x2): + tzcntl %eax, %eax + leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax + ret + + .p2align 4 +L(first_vec_x3): + tzcntl %eax, %eax + leaq (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax + ret + + .p2align 4 +L(first_vec_x4): + tzcntl %eax, %eax + leaq (VEC_SIZE * 4)(%rdi, %rax, CHAR_SIZE), %rax + ret + + .p2align 5 +L(aligned_more): + /* Check the first 4 * VEC_SIZE. Only one VEC_SIZE at a time + since data is only aligned to VEC_SIZE. */ + +# ifndef USE_AS_RAWMEMCHR + /* Align data to VEC_SIZE. */ +L(cross_page_continue): + xorl %ecx, %ecx + subl %edi, %ecx + andq $-VEC_SIZE, %rdi + /* esi is for adjusting length to see if near the end. */ + leal (VEC_SIZE * 5)(%rdi, %rcx), %esi +# ifdef USE_AS_WMEMCHR + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %esi +# endif +# else + andq $-VEC_SIZE, %rdi +L(cross_page_continue): +# endif + /* Load first VEC regardless. */ + VPCMP $0, (VEC_SIZE)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax +# ifndef USE_AS_RAWMEMCHR + /* Adjust length. If near end handle specially. */ + subq %rsi, %rdx + jbe L(last_4x_vec_or_less) +# endif + testl %eax, %eax + jnz L(first_vec_x1) + + VPCMP $0, (VEC_SIZE * 2)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x2) + + VPCMP $0, (VEC_SIZE * 3)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x3) + + VPCMP $0, (VEC_SIZE * 4)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x4) + + +# ifndef USE_AS_RAWMEMCHR + /* Check if at last CHAR_PER_VEC * 4 length. */ + subq $(CHAR_PER_VEC * 4), %rdx + jbe L(last_4x_vec_or_less_cmpeq) + addq $VEC_SIZE, %rdi + + /* Align data to VEC_SIZE * 4 for the loop and readjust length. + */ +# ifdef USE_AS_WMEMCHR + movl %edi, %ecx + andq $-(4 * VEC_SIZE), %rdi + andl $(VEC_SIZE * 4 - 1), %ecx + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %ecx + addq %rcx, %rdx +# else + addq %rdi, %rdx + andq $-(4 * VEC_SIZE), %rdi + subq %rdi, %rdx +# endif +# else + addq $VEC_SIZE, %rdi + andq $-(4 * VEC_SIZE), %rdi +# endif + + vpxorq %XMMZERO, %XMMZERO, %XMMZERO + + /* Compare 4 * VEC at a time forward. */ + .p2align 4 +L(loop_4x_vec): + /* It would be possible to save some instructions using 4x VPCMP + but bottleneck on port 5 makes it not woth it. */ + VPCMP $4, (VEC_SIZE * 4)(%rdi), %YMMMATCH, %k1 + /* xor will set bytes match esi to zero. */ + vpxorq (VEC_SIZE * 5)(%rdi), %YMMMATCH, %YMM2 + vpxorq (VEC_SIZE * 6)(%rdi), %YMMMATCH, %YMM3 + VPCMP $0, (VEC_SIZE * 7)(%rdi), %YMMMATCH, %k3 + /* Reduce VEC2 / VEC3 with min and VEC1 with zero mask. */ + VPMINU %YMM2, %YMM3, %YMM3{%k1}{z} + VPCMP $0, %YMM3, %YMMZERO, %k2 +# ifdef USE_AS_RAWMEMCHR + subq $-(VEC_SIZE * 4), %rdi + kortestd %k2, %k3 + jz L(loop_4x_vec) +# else + kortestd %k2, %k3 + jnz L(loop_4x_vec_end) + + subq $-(VEC_SIZE * 4), %rdi + + subq $(CHAR_PER_VEC * 4), %rdx + ja L(loop_4x_vec) + + /* Fall through into less than 4 remaining vectors of length case. + */ + VPCMP $0, (VEC_SIZE * 4)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + addq $(VEC_SIZE * 3), %rdi + .p2align 4 +L(last_4x_vec_or_less): + /* Check if first VEC contained match. */ + testl %eax, %eax + jnz L(first_vec_x1_check) + + /* If remaining length > CHAR_PER_VEC * 2. */ + addl $(CHAR_PER_VEC * 2), %edx + jg L(last_4x_vec) + +L(last_2x_vec): + /* If remaining length < CHAR_PER_VEC. */ + addl $CHAR_PER_VEC, %edx + jle L(zero_end) + + /* Check VEC2 and compare any match with remaining length. */ + VPCMP $0, (VEC_SIZE * 2)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + tzcntl %eax, %eax + cmpl %eax, %edx + jbe L(set_zero_end) + leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax +L(zero_end): + ret + + + .p2align 4 +L(first_vec_x1_check): + tzcntl %eax, %eax + /* Adjust length. */ + subl $-(CHAR_PER_VEC * 4), %edx + /* Check if match within remaining length. */ + cmpl %eax, %edx + jbe L(set_zero_end) + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq VEC_SIZE(%rdi, %rax, CHAR_SIZE), %rax + ret +L(set_zero_end): + xorl %eax, %eax + ret + + .p2align 4 +L(loop_4x_vec_end): +# endif + /* rawmemchr will fall through into this if match was found in + loop. */ + + /* k1 has not of matches with VEC1. */ + kmovd %k1, %eax +# ifdef USE_AS_WMEMCHR + subl $((1 << CHAR_PER_VEC) - 1), %eax +# else + incl %eax +# endif + jnz L(last_vec_x1_return) + + VPCMP $0, %YMM2, %YMMZERO, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(last_vec_x2_return) + + kmovd %k2, %eax + testl %eax, %eax + jnz L(last_vec_x3_return) + + kmovd %k3, %eax + tzcntl %eax, %eax +# ifdef USE_AS_RAWMEMCHR + leaq (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax +# else + leaq (VEC_SIZE * 7)(%rdi, %rax, CHAR_SIZE), %rax +# endif + ret + + .p2align 4 +L(last_vec_x1_return): + tzcntl %eax, %eax +# ifdef USE_AS_RAWMEMCHR +# ifdef USE_AS_WMEMCHR + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq (%rdi, %rax, CHAR_SIZE), %rax +# else + addq %rdi, %rax +# endif +# else + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq (VEC_SIZE * 4)(%rdi, %rax, CHAR_SIZE), %rax +# endif + ret + + .p2align 4 +L(last_vec_x2_return): + tzcntl %eax, %eax +# ifdef USE_AS_RAWMEMCHR + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq VEC_SIZE(%rdi, %rax, CHAR_SIZE), %rax +# else + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq (VEC_SIZE * 5)(%rdi, %rax, CHAR_SIZE), %rax +# endif + ret + + .p2align 4 +L(last_vec_x3_return): + tzcntl %eax, %eax +# ifdef USE_AS_RAWMEMCHR + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax +# else + /* NB: Multiply bytes by CHAR_SIZE to get the wchar_t count. */ + leaq (VEC_SIZE * 6)(%rdi, %rax, CHAR_SIZE), %rax +# endif + ret + + +# ifndef USE_AS_RAWMEMCHR +L(last_4x_vec_or_less_cmpeq): + VPCMP $0, (VEC_SIZE * 5)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + subq $-(VEC_SIZE * 4), %rdi + /* Check first VEC regardless. */ + testl %eax, %eax + jnz L(first_vec_x1_check) + + /* If remaining length <= CHAR_PER_VEC * 2. */ + addl $(CHAR_PER_VEC * 2), %edx + jle L(last_2x_vec) + + .p2align 4 +L(last_4x_vec): + VPCMP $0, (VEC_SIZE * 2)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(last_vec_x2) + + + VPCMP $0, (VEC_SIZE * 3)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + /* Create mask for possible matches within remaining length. */ +# ifdef USE_AS_WMEMCHR + movl $((1 << (CHAR_PER_VEC * 2)) - 1), %ecx + bzhil %edx, %ecx, %ecx +# else + movq $-1, %rcx + bzhiq %rdx, %rcx, %rcx +# endif + /* Test matches in data against length match. */ + andl %ecx, %eax + jnz L(last_vec_x3) + + /* if remaining length <= CHAR_PER_VEC * 3 (Note this is after + remaining length was found to be > CHAR_PER_VEC * 2. */ + subl $CHAR_PER_VEC, %edx + jbe L(zero_end2) + + + VPCMP $0, (VEC_SIZE * 4)(%rdi), %YMMMATCH, %k0 + kmovd %k0, %eax + /* Shift remaining length mask for last VEC. */ +# ifdef USE_AS_WMEMCHR + shrl $CHAR_PER_VEC, %ecx +# else + shrq $CHAR_PER_VEC, %rcx +# endif + andl %ecx, %eax + jz L(zero_end2) + tzcntl %eax, %eax + leaq (VEC_SIZE * 4)(%rdi, %rax, CHAR_SIZE), %rax +L(zero_end2): + ret + +L(last_vec_x2): + tzcntl %eax, %eax + leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax + ret + + .p2align 4 +L(last_vec_x3): + tzcntl %eax, %eax + leaq (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax + ret +# endif + +END (MEMCHR) +#endif diff --git a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe-rtm.S b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe-rtm.S new file mode 100644 index 000000000..cf4eff5d4 --- /dev/null +++ b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe-rtm.S @@ -0,0 +1,12 @@ +#ifndef MEMCMP +# define MEMCMP __memcmp_avx2_movbe_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "memcmp-avx2-movbe.S" diff --git a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S index cf9c9b8c1..ad0fa962a 100644 --- a/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S +++ b/sysdeps/x86_64/multiarch/memcmp-avx2-movbe.S @@ -47,6 +47,10 @@ # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + # define VEC_SIZE 32 # define VEC_MASK ((1 << VEC_SIZE) - 1) @@ -55,7 +59,7 @@ memcmp has to use UNSIGNED comparison for elemnts. */ - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (MEMCMP) # ifdef USE_AS_WMEMCMP shl $2, %RDX_LP @@ -123,8 +127,8 @@ ENTRY (MEMCMP) vptest %ymm0, %ymm5 jnc L(4x_vec_end) xorl %eax, %eax - VZEROUPPER - ret +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN .p2align 4 L(last_2x_vec): @@ -144,8 +148,7 @@ L(last_vec): vpmovmskb %ymm2, %eax subl $VEC_MASK, %eax jnz L(first_vec) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec): @@ -164,8 +167,7 @@ L(wmemcmp_return): movzbl (%rsi, %rcx), %edx sub %edx, %eax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN # ifdef USE_AS_WMEMCMP .p2align 4 @@ -367,8 +369,7 @@ L(last_4x_vec): vpmovmskb %ymm2, %eax subl $VEC_MASK, %eax jnz L(first_vec) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(4x_vec_end): @@ -394,8 +395,7 @@ L(4x_vec_end): movzbl (VEC_SIZE * 3)(%rsi, %rcx), %edx sub %edx, %eax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x1): @@ -410,8 +410,7 @@ L(first_vec_x1): movzbl VEC_SIZE(%rsi, %rcx), %edx sub %edx, %eax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x2): @@ -426,7 +425,6 @@ L(first_vec_x2): movzbl (VEC_SIZE * 2)(%rsi, %rcx), %edx sub %edx, %eax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN END (MEMCMP) #endif diff --git a/sysdeps/x86_64/multiarch/memcmp-evex-movbe.S b/sysdeps/x86_64/multiarch/memcmp-evex-movbe.S new file mode 100644 index 000000000..9c093972e --- /dev/null +++ b/sysdeps/x86_64/multiarch/memcmp-evex-movbe.S @@ -0,0 +1,440 @@ +/* memcmp/wmemcmp optimized with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +/* memcmp/wmemcmp is implemented as: + 1. For size from 2 to 7 bytes, load as big endian with movbe and bswap + to avoid branches. + 2. Use overlapping compare to avoid branch. + 3. Use vector compare when size >= 4 bytes for memcmp or size >= 8 + bytes for wmemcmp. + 4. If size is 8 * VEC_SIZE or less, unroll the loop. + 5. Compare 4 * VEC_SIZE at a time with the aligned first memory + area. + 6. Use 2 vector compares when size is 2 * VEC_SIZE or less. + 7. Use 4 vector compares when size is 4 * VEC_SIZE or less. + 8. Use 8 vector compares when size is 8 * VEC_SIZE or less. */ + +# include + +# ifndef MEMCMP +# define MEMCMP __memcmp_evex_movbe +# endif + +# define VMOVU vmovdqu64 + +# ifdef USE_AS_WMEMCMP +# define VPCMPEQ vpcmpeqd +# else +# define VPCMPEQ vpcmpeqb +# endif + +# define XMM1 xmm17 +# define XMM2 xmm18 +# define YMM1 ymm17 +# define YMM2 ymm18 +# define YMM3 ymm19 +# define YMM4 ymm20 +# define YMM5 ymm21 +# define YMM6 ymm22 + +# define VEC_SIZE 32 +# ifdef USE_AS_WMEMCMP +# define VEC_MASK 0xff +# define XMM_MASK 0xf +# else +# define VEC_MASK 0xffffffff +# define XMM_MASK 0xffff +# endif + +/* Warning! + wmemcmp has to use SIGNED comparison for elements. + memcmp has to use UNSIGNED comparison for elemnts. +*/ + + .section .text.evex,"ax",@progbits +ENTRY (MEMCMP) +# ifdef USE_AS_WMEMCMP + shl $2, %RDX_LP +# elif defined __ILP32__ + /* Clear the upper 32 bits. */ + movl %edx, %edx +# endif + cmp $VEC_SIZE, %RDX_LP + jb L(less_vec) + + /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */ + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k1 + kmovd %k1, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + + cmpq $(VEC_SIZE * 2), %rdx + jbe L(last_vec) + + /* More than 2 * VEC. */ + cmpq $(VEC_SIZE * 8), %rdx + ja L(more_8x_vec) + cmpq $(VEC_SIZE * 4), %rdx + jb L(last_4x_vec) + + /* From 4 * VEC to 8 * VEC, inclusively. */ + VMOVU (%rsi), %YMM1 + VPCMPEQ (%rdi), %YMM1, %k1 + + VMOVU VEC_SIZE(%rsi), %YMM2 + VPCMPEQ VEC_SIZE(%rdi), %YMM2, %k2 + + VMOVU (VEC_SIZE * 2)(%rsi), %YMM3 + VPCMPEQ (VEC_SIZE * 2)(%rdi), %YMM3, %k3 + + VMOVU (VEC_SIZE * 3)(%rsi), %YMM4 + VPCMPEQ (VEC_SIZE * 3)(%rdi), %YMM4, %k4 + + kandd %k1, %k2, %k5 + kandd %k3, %k4, %k6 + kandd %k5, %k6, %k6 + + kmovd %k6, %eax + cmpl $VEC_MASK, %eax + jne L(4x_vec_end) + + leaq -(4 * VEC_SIZE)(%rdi, %rdx), %rdi + leaq -(4 * VEC_SIZE)(%rsi, %rdx), %rsi + VMOVU (%rsi), %YMM1 + VPCMPEQ (%rdi), %YMM1, %k1 + + VMOVU VEC_SIZE(%rsi), %YMM2 + VPCMPEQ VEC_SIZE(%rdi), %YMM2, %k2 + kandd %k1, %k2, %k5 + + VMOVU (VEC_SIZE * 2)(%rsi), %YMM3 + VPCMPEQ (VEC_SIZE * 2)(%rdi), %YMM3, %k3 + kandd %k3, %k5, %k5 + + VMOVU (VEC_SIZE * 3)(%rsi), %YMM4 + VPCMPEQ (VEC_SIZE * 3)(%rdi), %YMM4, %k4 + kandd %k4, %k5, %k5 + + kmovd %k5, %eax + cmpl $VEC_MASK, %eax + jne L(4x_vec_end) + xorl %eax, %eax + ret + + .p2align 4 +L(last_2x_vec): + /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */ + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k2 + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + +L(last_vec): + /* Use overlapping loads to avoid branches. */ + leaq -VEC_SIZE(%rdi, %rdx), %rdi + leaq -VEC_SIZE(%rsi, %rdx), %rsi + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k2 + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + ret + + .p2align 4 +L(first_vec): + /* A byte or int32 is different within 16 or 32 bytes. */ + tzcntl %eax, %ecx +# ifdef USE_AS_WMEMCMP + xorl %eax, %eax + movl (%rdi, %rcx, 4), %edx + cmpl (%rsi, %rcx, 4), %edx +L(wmemcmp_return): + setl %al + negl %eax + orl $1, %eax +# else + movzbl (%rdi, %rcx), %eax + movzbl (%rsi, %rcx), %edx + sub %edx, %eax +# endif + ret + +# ifdef USE_AS_WMEMCMP + .p2align 4 +L(4): + xorl %eax, %eax + movl (%rdi), %edx + cmpl (%rsi), %edx + jne L(wmemcmp_return) + ret +# else + .p2align 4 +L(between_4_7): + /* Load as big endian with overlapping movbe to avoid branches. */ + movbe (%rdi), %eax + movbe (%rsi), %ecx + shlq $32, %rax + shlq $32, %rcx + movbe -4(%rdi, %rdx), %edi + movbe -4(%rsi, %rdx), %esi + orq %rdi, %rax + orq %rsi, %rcx + subq %rcx, %rax + je L(exit) + sbbl %eax, %eax + orl $1, %eax + ret + + .p2align 4 +L(exit): + ret + + .p2align 4 +L(between_2_3): + /* Load as big endian to avoid branches. */ + movzwl (%rdi), %eax + movzwl (%rsi), %ecx + shll $8, %eax + shll $8, %ecx + bswap %eax + bswap %ecx + movb -1(%rdi, %rdx), %al + movb -1(%rsi, %rdx), %cl + /* Subtraction is okay because the upper 8 bits are zero. */ + subl %ecx, %eax + ret + + .p2align 4 +L(1): + movzbl (%rdi), %eax + movzbl (%rsi), %ecx + subl %ecx, %eax + ret +# endif + + .p2align 4 +L(zero): + xorl %eax, %eax + ret + + .p2align 4 +L(less_vec): +# ifdef USE_AS_WMEMCMP + /* It can only be 0, 4, 8, 12, 16, 20, 24, 28 bytes. */ + cmpb $4, %dl + je L(4) + jb L(zero) +# else + cmpb $1, %dl + je L(1) + jb L(zero) + cmpb $4, %dl + jb L(between_2_3) + cmpb $8, %dl + jb L(between_4_7) +# endif + cmpb $16, %dl + jae L(between_16_31) + /* It is between 8 and 15 bytes. */ + vmovq (%rdi), %XMM1 + vmovq (%rsi), %XMM2 + VPCMPEQ %XMM1, %XMM2, %k2 + kmovw %k2, %eax + subl $XMM_MASK, %eax + jnz L(first_vec) + /* Use overlapping loads to avoid branches. */ + leaq -8(%rdi, %rdx), %rdi + leaq -8(%rsi, %rdx), %rsi + vmovq (%rdi), %XMM1 + vmovq (%rsi), %XMM2 + VPCMPEQ %XMM1, %XMM2, %k2 + kmovw %k2, %eax + subl $XMM_MASK, %eax + jnz L(first_vec) + ret + + .p2align 4 +L(between_16_31): + /* From 16 to 31 bytes. No branch when size == 16. */ + VMOVU (%rsi), %XMM2 + VPCMPEQ (%rdi), %XMM2, %k2 + kmovw %k2, %eax + subl $XMM_MASK, %eax + jnz L(first_vec) + + /* Use overlapping loads to avoid branches. */ + leaq -16(%rdi, %rdx), %rdi + leaq -16(%rsi, %rdx), %rsi + VMOVU (%rsi), %XMM2 + VPCMPEQ (%rdi), %XMM2, %k2 + kmovw %k2, %eax + subl $XMM_MASK, %eax + jnz L(first_vec) + ret + + .p2align 4 +L(more_8x_vec): + /* More than 8 * VEC. Check the first VEC. */ + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k2 + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + + /* Align the first memory area for aligned loads in the loop. + Compute how much the first memory area is misaligned. */ + movq %rdi, %rcx + andl $(VEC_SIZE - 1), %ecx + /* Get the negative of offset for alignment. */ + subq $VEC_SIZE, %rcx + /* Adjust the second memory area. */ + subq %rcx, %rsi + /* Adjust the first memory area which should be aligned now. */ + subq %rcx, %rdi + /* Adjust length. */ + addq %rcx, %rdx + +L(loop_4x_vec): + /* Compare 4 * VEC at a time forward. */ + VMOVU (%rsi), %YMM1 + VPCMPEQ (%rdi), %YMM1, %k1 + + VMOVU VEC_SIZE(%rsi), %YMM2 + VPCMPEQ VEC_SIZE(%rdi), %YMM2, %k2 + kandd %k2, %k1, %k5 + + VMOVU (VEC_SIZE * 2)(%rsi), %YMM3 + VPCMPEQ (VEC_SIZE * 2)(%rdi), %YMM3, %k3 + kandd %k3, %k5, %k5 + + VMOVU (VEC_SIZE * 3)(%rsi), %YMM4 + VPCMPEQ (VEC_SIZE * 3)(%rdi), %YMM4, %k4 + kandd %k4, %k5, %k5 + + kmovd %k5, %eax + cmpl $VEC_MASK, %eax + jne L(4x_vec_end) + + addq $(VEC_SIZE * 4), %rdi + addq $(VEC_SIZE * 4), %rsi + + subq $(VEC_SIZE * 4), %rdx + cmpq $(VEC_SIZE * 4), %rdx + jae L(loop_4x_vec) + + /* Less than 4 * VEC. */ + cmpq $VEC_SIZE, %rdx + jbe L(last_vec) + cmpq $(VEC_SIZE * 2), %rdx + jbe L(last_2x_vec) + +L(last_4x_vec): + /* From 2 * VEC to 4 * VEC. */ + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k2 + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + + addq $VEC_SIZE, %rdi + addq $VEC_SIZE, %rsi + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k2 + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + + /* Use overlapping loads to avoid branches. */ + leaq -(3 * VEC_SIZE)(%rdi, %rdx), %rdi + leaq -(3 * VEC_SIZE)(%rsi, %rdx), %rsi + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k2 + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + + addq $VEC_SIZE, %rdi + addq $VEC_SIZE, %rsi + VMOVU (%rsi), %YMM2 + VPCMPEQ (%rdi), %YMM2, %k2 + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + ret + + .p2align 4 +L(4x_vec_end): + kmovd %k1, %eax + subl $VEC_MASK, %eax + jnz L(first_vec) + kmovd %k2, %eax + subl $VEC_MASK, %eax + jnz L(first_vec_x1) + kmovd %k3, %eax + subl $VEC_MASK, %eax + jnz L(first_vec_x2) + kmovd %k4, %eax + subl $VEC_MASK, %eax + tzcntl %eax, %ecx +# ifdef USE_AS_WMEMCMP + xorl %eax, %eax + movl (VEC_SIZE * 3)(%rdi, %rcx, 4), %edx + cmpl (VEC_SIZE * 3)(%rsi, %rcx, 4), %edx + jmp L(wmemcmp_return) +# else + movzbl (VEC_SIZE * 3)(%rdi, %rcx), %eax + movzbl (VEC_SIZE * 3)(%rsi, %rcx), %edx + sub %edx, %eax +# endif + ret + + .p2align 4 +L(first_vec_x1): + tzcntl %eax, %ecx +# ifdef USE_AS_WMEMCMP + xorl %eax, %eax + movl VEC_SIZE(%rdi, %rcx, 4), %edx + cmpl VEC_SIZE(%rsi, %rcx, 4), %edx + jmp L(wmemcmp_return) +# else + movzbl VEC_SIZE(%rdi, %rcx), %eax + movzbl VEC_SIZE(%rsi, %rcx), %edx + sub %edx, %eax +# endif + ret + + .p2align 4 +L(first_vec_x2): + tzcntl %eax, %ecx +# ifdef USE_AS_WMEMCMP + xorl %eax, %eax + movl (VEC_SIZE * 2)(%rdi, %rcx, 4), %edx + cmpl (VEC_SIZE * 2)(%rsi, %rcx, 4), %edx + jmp L(wmemcmp_return) +# else + movzbl (VEC_SIZE * 2)(%rdi, %rcx), %eax + movzbl (VEC_SIZE * 2)(%rsi, %rcx), %edx + sub %edx, %eax +# endif + ret +END (MEMCMP) +#endif diff --git a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S new file mode 100644 index 000000000..1ec1962e8 --- /dev/null +++ b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S @@ -0,0 +1,17 @@ +#if IS_IN (libc) +# define VEC_SIZE 32 +# define VEC(i) ymm##i +# define VMOVNT vmovntdq +# define VMOVU vmovdqu +# define VMOVA vmovdqa + +# define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +# define VZEROUPPER_RETURN jmp L(return) + +# define SECTION(p) p##.avx.rtm +# define MEMMOVE_SYMBOL(p,s) p##_avx_##s##_rtm + +# include "memmove-vec-unaligned-erms.S" +#endif diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S index aac1515cf..848848ab3 100644 --- a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S @@ -1,11 +1,32 @@ #if IS_IN (libc) # define VEC_SIZE 64 -# define VEC(i) zmm##i +# define XMM0 xmm16 +# define XMM1 xmm17 +# define YMM0 ymm16 +# define YMM1 ymm17 +# define VEC0 zmm16 +# define VEC1 zmm17 +# define VEC2 zmm18 +# define VEC3 zmm19 +# define VEC4 zmm20 +# define VEC5 zmm21 +# define VEC6 zmm22 +# define VEC7 zmm23 +# define VEC8 zmm24 +# define VEC9 zmm25 +# define VEC10 zmm26 +# define VEC11 zmm27 +# define VEC12 zmm28 +# define VEC13 zmm29 +# define VEC14 zmm30 +# define VEC15 zmm31 +# define VEC(i) VEC##i # define VMOVNT vmovntdq # define VMOVU vmovdqu64 # define VMOVA vmovdqa64 +# define VZEROUPPER -# define SECTION(p) p##.avx512 +# define SECTION(p) p##.evex512 # define MEMMOVE_SYMBOL(p,s) p##_avx512_##s # include "memmove-vec-unaligned-erms.S" diff --git a/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S new file mode 100644 index 000000000..0cbce8f94 --- /dev/null +++ b/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S @@ -0,0 +1,33 @@ +#if IS_IN (libc) +# define VEC_SIZE 32 +# define XMM0 xmm16 +# define XMM1 xmm17 +# define YMM0 ymm16 +# define YMM1 ymm17 +# define VEC0 ymm16 +# define VEC1 ymm17 +# define VEC2 ymm18 +# define VEC3 ymm19 +# define VEC4 ymm20 +# define VEC5 ymm21 +# define VEC6 ymm22 +# define VEC7 ymm23 +# define VEC8 ymm24 +# define VEC9 ymm25 +# define VEC10 ymm26 +# define VEC11 ymm27 +# define VEC12 ymm28 +# define VEC13 ymm29 +# define VEC14 ymm30 +# define VEC15 ymm31 +# define VEC(i) VEC##i +# define VMOVNT vmovntdq +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 +# define VZEROUPPER + +# define SECTION(p) p##.evex +# define MEMMOVE_SYMBOL(p,s) p##_evex_##s + +# include "memmove-vec-unaligned-erms.S" +#endif diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S index 0980c9537..03a2e4dfb 100644 --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S @@ -48,6 +48,14 @@ # define MEMMOVE_CHK_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s) #endif +#ifndef XMM0 +# define XMM0 xmm0 +#endif + +#ifndef YMM0 +# define YMM0 ymm0 +#endif + #ifndef VZEROUPPER # if VEC_SIZE > 16 # define VZEROUPPER vzeroupper @@ -139,11 +147,12 @@ L(last_2x_vec): VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1) VMOVU %VEC(0), (%rdi) VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) - VZEROUPPER #if !defined USE_MULTIARCH || !IS_IN (libc) L(nop): -#endif ret +#else + VZEROUPPER_RETURN +#endif #if defined USE_MULTIARCH && IS_IN (libc) END (MEMMOVE_SYMBOL (__memmove, unaligned)) @@ -236,8 +245,11 @@ L(last_2x_vec): VMOVU %VEC(0), (%rdi) VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx) L(return): - VZEROUPPER +#if VEC_SIZE > 16 + ZERO_UPPER_VEC_REGISTERS_RETURN +#else ret +#endif L(movsb): cmp __x86_shared_non_temporal_threshold(%rip), %RDX_LP @@ -298,21 +310,20 @@ L(less_vec): #if VEC_SIZE > 32 L(between_32_63): /* From 32 to 63. No branch when size == 32. */ - vmovdqu (%rsi), %ymm0 - vmovdqu -32(%rsi,%rdx), %ymm1 - vmovdqu %ymm0, (%rdi) - vmovdqu %ymm1, -32(%rdi,%rdx) - VZEROUPPER - ret + VMOVU (%rsi), %YMM0 + VMOVU -32(%rsi,%rdx), %YMM1 + VMOVU %YMM0, (%rdi) + VMOVU %YMM1, -32(%rdi,%rdx) + VZEROUPPER_RETURN #endif #if VEC_SIZE > 16 /* From 16 to 31. No branch when size == 16. */ L(between_16_31): - vmovdqu (%rsi), %xmm0 - vmovdqu -16(%rsi,%rdx), %xmm1 - vmovdqu %xmm0, (%rdi) - vmovdqu %xmm1, -16(%rdi,%rdx) - ret + VMOVU (%rsi), %XMM0 + VMOVU -16(%rsi,%rdx), %XMM1 + VMOVU %XMM0, (%rdi) + VMOVU %XMM1, -16(%rdi,%rdx) + VZEROUPPER_RETURN #endif L(between_8_15): /* From 8 to 15. No branch when size == 8. */ @@ -365,8 +376,7 @@ L(more_2x_vec): VMOVU %VEC(5), -(VEC_SIZE * 2)(%rdi,%rdx) VMOVU %VEC(6), -(VEC_SIZE * 3)(%rdi,%rdx) VMOVU %VEC(7), -(VEC_SIZE * 4)(%rdi,%rdx) - VZEROUPPER - ret + VZEROUPPER_RETURN L(last_4x_vec): /* Copy from 2 * VEC to 4 * VEC. */ VMOVU (%rsi), %VEC(0) @@ -377,8 +387,7 @@ L(last_4x_vec): VMOVU %VEC(1), VEC_SIZE(%rdi) VMOVU %VEC(2), -VEC_SIZE(%rdi,%rdx) VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi,%rdx) - VZEROUPPER - ret + VZEROUPPER_RETURN L(more_8x_vec): cmpq %rsi, %rdi @@ -434,8 +443,7 @@ L(loop_4x_vec_forward): VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) /* Store the first VEC. */ VMOVU %VEC(4), (%r11) - VZEROUPPER - ret + VZEROUPPER_RETURN L(more_8x_vec_backward): /* Load the first 4 * VEC and last VEC to support overlapping @@ -486,8 +494,7 @@ L(loop_4x_vec_backward): VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) /* Store the last VEC. */ VMOVU %VEC(8), (%r11) - VZEROUPPER - ret + VZEROUPPER_RETURN #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc) L(large_forward): @@ -522,8 +529,7 @@ L(loop_large_forward): VMOVU %VEC(8), -(VEC_SIZE * 3)(%rcx) /* Store the first VEC. */ VMOVU %VEC(4), (%r11) - VZEROUPPER - ret + VZEROUPPER_RETURN L(large_backward): /* Don't use non-temporal store if there is overlap between @@ -557,8 +563,7 @@ L(loop_large_backward): VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi) /* Store the last VEC. */ VMOVU %VEC(8), (%r11) - VZEROUPPER - ret + VZEROUPPER_RETURN #endif END (MEMMOVE_SYMBOL (__memmove, unaligned_erms)) diff --git a/sysdeps/x86_64/multiarch/memrchr-avx2-rtm.S b/sysdeps/x86_64/multiarch/memrchr-avx2-rtm.S new file mode 100644 index 000000000..cea2d2a72 --- /dev/null +++ b/sysdeps/x86_64/multiarch/memrchr-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef MEMRCHR +# define MEMRCHR __memrchr_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "memrchr-avx2.S" diff --git a/sysdeps/x86_64/multiarch/memrchr-avx2.S b/sysdeps/x86_64/multiarch/memrchr-avx2.S index eddede45b..ac7370cb0 100644 --- a/sysdeps/x86_64/multiarch/memrchr-avx2.S +++ b/sysdeps/x86_64/multiarch/memrchr-avx2.S @@ -20,14 +20,22 @@ # include +# ifndef MEMRCHR +# define MEMRCHR __memrchr_avx2 +# endif + # ifndef VZEROUPPER # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + # define VEC_SIZE 32 - .section .text.avx,"ax",@progbits -ENTRY (__memrchr_avx2) + .section SECTION(.text),"ax",@progbits +ENTRY (MEMRCHR) /* Broadcast CHAR to YMM0. */ vmovd %esi, %xmm0 vpbroadcastb %xmm0, %ymm0 @@ -134,8 +142,8 @@ L(loop_4x_vec): vpmovmskb %ymm1, %eax bsrl %eax, %eax addq %rdi, %rax - VZEROUPPER - ret +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN .p2align 4 L(last_4x_vec_or_less): @@ -169,8 +177,7 @@ L(last_4x_vec_or_less): addq %rax, %rdx jl L(zero) addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_2x_vec): @@ -191,31 +198,27 @@ L(last_2x_vec): jl L(zero) addl $(VEC_SIZE * 2), %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_vec_x0): bsrl %eax, %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_vec_x1): bsrl %eax, %eax addl $VEC_SIZE, %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_vec_x2): bsrl %eax, %eax addl $(VEC_SIZE * 2), %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_vec_x3): @@ -232,8 +235,7 @@ L(last_vec_x1_check): jl L(zero) addl $VEC_SIZE, %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_vec_x3_check): @@ -243,12 +245,14 @@ L(last_vec_x3_check): jl L(zero) addl $(VEC_SIZE * 3), %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(zero): - VZEROUPPER + xorl %eax, %eax + VZEROUPPER_RETURN + + .p2align 4 L(null): xorl %eax, %eax ret @@ -273,8 +277,7 @@ L(last_vec_or_less_aligned): bsrl %eax, %eax addq %rdi, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_vec_or_less): @@ -315,8 +318,7 @@ L(last_vec_or_less): bsrl %eax, %eax addq %rdi, %rax addq %r8, %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(last_vec_2x_aligned): @@ -353,7 +355,6 @@ L(last_vec_2x_aligned): bsrl %eax, %eax addq %rdi, %rax addq %r8, %rax - VZEROUPPER - ret -END (__memrchr_avx2) + VZEROUPPER_RETURN +END (MEMRCHR) #endif diff --git a/sysdeps/x86_64/multiarch/memrchr-evex.S b/sysdeps/x86_64/multiarch/memrchr-evex.S new file mode 100644 index 000000000..16bf8e02b --- /dev/null +++ b/sysdeps/x86_64/multiarch/memrchr-evex.S @@ -0,0 +1,337 @@ +/* memrchr optimized with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# include + +# define VMOVA vmovdqa64 + +# define YMMMATCH ymm16 + +# define VEC_SIZE 32 + + .section .text.evex,"ax",@progbits +ENTRY (__memrchr_evex) + /* Broadcast CHAR to YMMMATCH. */ + vpbroadcastb %esi, %YMMMATCH + + sub $VEC_SIZE, %RDX_LP + jbe L(last_vec_or_less) + + add %RDX_LP, %RDI_LP + + /* Check the last VEC_SIZE bytes. */ + vpcmpb $0, (%rdi), %YMMMATCH, %k1 + kmovd %k1, %eax + testl %eax, %eax + jnz L(last_vec_x0) + + subq $(VEC_SIZE * 4), %rdi + movl %edi, %ecx + andl $(VEC_SIZE - 1), %ecx + jz L(aligned_more) + + /* Align data for aligned loads in the loop. */ + addq $VEC_SIZE, %rdi + addq $VEC_SIZE, %rdx + andq $-VEC_SIZE, %rdi + subq %rcx, %rdx + + .p2align 4 +L(aligned_more): + subq $(VEC_SIZE * 4), %rdx + jbe L(last_4x_vec_or_less) + + /* Check the last 4 * VEC_SIZE. Only one VEC_SIZE at a time + since data is only aligned to VEC_SIZE. */ + vpcmpb $0, (VEC_SIZE * 3)(%rdi), %YMMMATCH, %k1 + kmovd %k1, %eax + testl %eax, %eax + jnz L(last_vec_x3) + + vpcmpb $0, (VEC_SIZE * 2)(%rdi), %YMMMATCH, %k2 + kmovd %k2, %eax + testl %eax, %eax + jnz L(last_vec_x2) + + vpcmpb $0, VEC_SIZE(%rdi), %YMMMATCH, %k3 + kmovd %k3, %eax + testl %eax, %eax + jnz L(last_vec_x1) + + vpcmpb $0, (%rdi), %YMMMATCH, %k4 + kmovd %k4, %eax + testl %eax, %eax + jnz L(last_vec_x0) + + /* Align data to 4 * VEC_SIZE for loop with fewer branches. + There are some overlaps with above if data isn't aligned + to 4 * VEC_SIZE. */ + movl %edi, %ecx + andl $(VEC_SIZE * 4 - 1), %ecx + jz L(loop_4x_vec) + + addq $(VEC_SIZE * 4), %rdi + addq $(VEC_SIZE * 4), %rdx + andq $-(VEC_SIZE * 4), %rdi + subq %rcx, %rdx + + .p2align 4 +L(loop_4x_vec): + /* Compare 4 * VEC at a time forward. */ + subq $(VEC_SIZE * 4), %rdi + subq $(VEC_SIZE * 4), %rdx + jbe L(last_4x_vec_or_less) + + vpcmpb $0, (%rdi), %YMMMATCH, %k1 + vpcmpb $0, VEC_SIZE(%rdi), %YMMMATCH, %k2 + kord %k1, %k2, %k5 + vpcmpb $0, (VEC_SIZE * 2)(%rdi), %YMMMATCH, %k3 + vpcmpb $0, (VEC_SIZE * 3)(%rdi), %YMMMATCH, %k4 + + kord %k3, %k4, %k6 + kortestd %k5, %k6 + jz L(loop_4x_vec) + + /* There is a match. */ + kmovd %k4, %eax + testl %eax, %eax + jnz L(last_vec_x3) + + kmovd %k3, %eax + testl %eax, %eax + jnz L(last_vec_x2) + + kmovd %k2, %eax + testl %eax, %eax + jnz L(last_vec_x1) + + kmovd %k1, %eax + bsrl %eax, %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_4x_vec_or_less): + addl $(VEC_SIZE * 4), %edx + cmpl $(VEC_SIZE * 2), %edx + jbe L(last_2x_vec) + + vpcmpb $0, (VEC_SIZE * 3)(%rdi), %YMMMATCH, %k1 + kmovd %k1, %eax + testl %eax, %eax + jnz L(last_vec_x3) + + vpcmpb $0, (VEC_SIZE * 2)(%rdi), %YMMMATCH, %k2 + kmovd %k2, %eax + testl %eax, %eax + jnz L(last_vec_x2) + + vpcmpb $0, VEC_SIZE(%rdi), %YMMMATCH, %k3 + kmovd %k3, %eax + testl %eax, %eax + jnz L(last_vec_x1_check) + cmpl $(VEC_SIZE * 3), %edx + jbe L(zero) + + vpcmpb $0, (%rdi), %YMMMATCH, %k4 + kmovd %k4, %eax + testl %eax, %eax + jz L(zero) + bsrl %eax, %eax + subq $(VEC_SIZE * 4), %rdx + addq %rax, %rdx + jl L(zero) + addq %rdi, %rax + ret + + .p2align 4 +L(last_2x_vec): + vpcmpb $0, (VEC_SIZE * 3)(%rdi), %YMMMATCH, %k1 + kmovd %k1, %eax + testl %eax, %eax + jnz L(last_vec_x3_check) + cmpl $VEC_SIZE, %edx + jbe L(zero) + + vpcmpb $0, (VEC_SIZE * 2)(%rdi), %YMMMATCH, %k1 + kmovd %k1, %eax + testl %eax, %eax + jz L(zero) + bsrl %eax, %eax + subq $(VEC_SIZE * 2), %rdx + addq %rax, %rdx + jl L(zero) + addl $(VEC_SIZE * 2), %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_vec_x0): + bsrl %eax, %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_vec_x1): + bsrl %eax, %eax + addl $VEC_SIZE, %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_vec_x2): + bsrl %eax, %eax + addl $(VEC_SIZE * 2), %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_vec_x3): + bsrl %eax, %eax + addl $(VEC_SIZE * 3), %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_vec_x1_check): + bsrl %eax, %eax + subq $(VEC_SIZE * 3), %rdx + addq %rax, %rdx + jl L(zero) + addl $VEC_SIZE, %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_vec_x3_check): + bsrl %eax, %eax + subq $VEC_SIZE, %rdx + addq %rax, %rdx + jl L(zero) + addl $(VEC_SIZE * 3), %eax + addq %rdi, %rax + ret + + .p2align 4 +L(zero): + xorl %eax, %eax + ret + + .p2align 4 +L(last_vec_or_less_aligned): + movl %edx, %ecx + + vpcmpb $0, (%rdi), %YMMMATCH, %k1 + + movl $1, %edx + /* Support rdx << 32. */ + salq %cl, %rdx + subq $1, %rdx + + kmovd %k1, %eax + + /* Remove the trailing bytes. */ + andl %edx, %eax + testl %eax, %eax + jz L(zero) + + bsrl %eax, %eax + addq %rdi, %rax + ret + + .p2align 4 +L(last_vec_or_less): + addl $VEC_SIZE, %edx + + /* Check for zero length. */ + testl %edx, %edx + jz L(zero) + + movl %edi, %ecx + andl $(VEC_SIZE - 1), %ecx + jz L(last_vec_or_less_aligned) + + movl %ecx, %esi + movl %ecx, %r8d + addl %edx, %esi + andq $-VEC_SIZE, %rdi + + subl $VEC_SIZE, %esi + ja L(last_vec_2x_aligned) + + /* Check the last VEC. */ + vpcmpb $0, (%rdi), %YMMMATCH, %k1 + kmovd %k1, %eax + + /* Remove the leading and trailing bytes. */ + sarl %cl, %eax + movl %edx, %ecx + + movl $1, %edx + sall %cl, %edx + subl $1, %edx + + andl %edx, %eax + testl %eax, %eax + jz L(zero) + + bsrl %eax, %eax + addq %rdi, %rax + addq %r8, %rax + ret + + .p2align 4 +L(last_vec_2x_aligned): + movl %esi, %ecx + + /* Check the last VEC. */ + vpcmpb $0, VEC_SIZE(%rdi), %YMMMATCH, %k1 + + movl $1, %edx + sall %cl, %edx + subl $1, %edx + + kmovd %k1, %eax + + /* Remove the trailing bytes. */ + andl %edx, %eax + + testl %eax, %eax + jnz L(last_vec_x1) + + /* Check the second last VEC. */ + vpcmpb $0, (%rdi), %YMMMATCH, %k1 + + movl %r8d, %ecx + + kmovd %k1, %eax + + /* Remove the leading bytes. Must use unsigned right shift for + bsrl below. */ + shrl %cl, %eax + testl %eax, %eax + jz L(zero) + + bsrl %eax, %eax + addq %rdi, %rax + addq %r8, %rax + ret +END (__memrchr_evex) +#endif diff --git a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S new file mode 100644 index 000000000..8ac3e479b --- /dev/null +++ b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S @@ -0,0 +1,10 @@ +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return) + +#define SECTION(p) p##.avx.rtm +#define MEMSET_SYMBOL(p,s) p##_avx2_##s##_rtm +#define WMEMSET_SYMBOL(p,s) p##_avx2_##s##_rtm + +#include "memset-avx2-unaligned-erms.S" diff --git a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S index 7ab3d8984..ae0860f36 100644 --- a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S @@ -14,9 +14,15 @@ movq r, %rax; \ vpbroadcastd %xmm0, %ymm0 -# define SECTION(p) p##.avx -# define MEMSET_SYMBOL(p,s) p##_avx2_##s -# define WMEMSET_SYMBOL(p,s) p##_avx2_##s +# ifndef SECTION +# define SECTION(p) p##.avx +# endif +# ifndef MEMSET_SYMBOL +# define MEMSET_SYMBOL(p,s) p##_avx2_##s +# endif +# ifndef WMEMSET_SYMBOL +# define WMEMSET_SYMBOL(p,s) p##_avx2_##s +# endif # include "memset-vec-unaligned-erms.S" #endif diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S index 0783979ca..22e7b187c 100644 --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S @@ -1,22 +1,22 @@ #if IS_IN (libc) # define VEC_SIZE 64 -# define VEC(i) zmm##i +# define XMM0 xmm16 +# define YMM0 ymm16 +# define VEC0 zmm16 +# define VEC(i) VEC##i # define VMOVU vmovdqu64 # define VMOVA vmovdqa64 +# define VZEROUPPER # define MEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \ - vmovd d, %xmm0; \ movq r, %rax; \ - vpbroadcastb %xmm0, %xmm0; \ - vpbroadcastq %xmm0, %zmm0 + vpbroadcastb d, %VEC0 # define WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \ - vmovd d, %xmm0; \ movq r, %rax; \ - vpbroadcastd %xmm0, %xmm0; \ - vpbroadcastq %xmm0, %zmm0 + vpbroadcastd d, %VEC0 -# define SECTION(p) p##.avx512 +# define SECTION(p) p##.evex512 # define MEMSET_SYMBOL(p,s) p##_avx512_##s # define WMEMSET_SYMBOL(p,s) p##_avx512_##s diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S new file mode 100644 index 000000000..ae0a4d6e4 --- /dev/null +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S @@ -0,0 +1,24 @@ +#if IS_IN (libc) +# define VEC_SIZE 32 +# define XMM0 xmm16 +# define YMM0 ymm16 +# define VEC0 ymm16 +# define VEC(i) VEC##i +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 +# define VZEROUPPER + +# define MEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \ + movq r, %rax; \ + vpbroadcastb d, %VEC0 + +# define WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \ + movq r, %rax; \ + vpbroadcastd d, %VEC0 + +# define SECTION(p) p##.evex +# define MEMSET_SYMBOL(p,s) p##_evex_##s +# define WMEMSET_SYMBOL(p,s) p##_evex_##s + +# include "memset-vec-unaligned-erms.S" +#endif diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S index faa408561..584747f1a 100644 --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S @@ -34,20 +34,25 @@ # define WMEMSET_CHK_SYMBOL(p,s) WMEMSET_SYMBOL(p, s) #endif +#ifndef XMM0 +# define XMM0 xmm0 +#endif + +#ifndef YMM0 +# define YMM0 ymm0 +#endif + #ifndef VZEROUPPER # if VEC_SIZE > 16 # define VZEROUPPER vzeroupper +# define VZEROUPPER_SHORT_RETURN vzeroupper; ret # else # define VZEROUPPER # endif #endif #ifndef VZEROUPPER_SHORT_RETURN -# if VEC_SIZE > 16 -# define VZEROUPPER_SHORT_RETURN vzeroupper -# else -# define VZEROUPPER_SHORT_RETURN rep -# endif +# define VZEROUPPER_SHORT_RETURN rep; ret #endif #ifndef MOVQ @@ -67,7 +72,7 @@ ENTRY (__bzero) mov %RDI_LP, %RAX_LP /* Set return value. */ mov %RSI_LP, %RDX_LP /* Set n. */ - pxor %xmm0, %xmm0 + pxor %XMM0, %XMM0 jmp L(entry_from_bzero) END (__bzero) weak_alias (__bzero, bzero) @@ -109,8 +114,7 @@ L(entry_from_bzero): /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ VMOVU %VEC(0), -VEC_SIZE(%rdi,%rdx) VMOVU %VEC(0), (%rdi) - VZEROUPPER - ret + VZEROUPPER_RETURN #if defined USE_MULTIARCH && IS_IN (libc) END (MEMSET_SYMBOL (__memset, unaligned)) @@ -133,14 +137,12 @@ ENTRY (__memset_erms) ENTRY (MEMSET_SYMBOL (__memset, erms)) # endif L(stosb): - /* Issue vzeroupper before rep stosb. */ - VZEROUPPER mov %RDX_LP, %RCX_LP movzbl %sil, %eax mov %RDI_LP, %RDX_LP rep stosb mov %RDX_LP, %RAX_LP - ret + VZEROUPPER_RETURN # if VEC_SIZE == 16 END (__memset_erms) # else @@ -167,8 +169,7 @@ ENTRY (MEMSET_SYMBOL (__memset, unaligned_erms)) /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */ VMOVU %VEC(0), -VEC_SIZE(%rdi,%rdx) VMOVU %VEC(0), (%rdi) - VZEROUPPER - ret + VZEROUPPER_RETURN L(stosb_more_2x_vec): cmp __x86_rep_stosb_threshold(%rip), %RDX_LP @@ -182,8 +183,11 @@ L(more_2x_vec): VMOVU %VEC(0), -VEC_SIZE(%rdi,%rdx) VMOVU %VEC(0), -(VEC_SIZE * 2)(%rdi,%rdx) L(return): - VZEROUPPER +#if VEC_SIZE > 16 + ZERO_UPPER_VEC_REGISTERS_RETURN +#else ret +#endif L(loop_start): leaq (VEC_SIZE * 4)(%rdi), %rcx @@ -209,7 +213,6 @@ L(loop): cmpq %rcx, %rdx jne L(loop) VZEROUPPER_SHORT_RETURN - ret L(less_vec): /* Less than 1 VEC. */ # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 @@ -223,7 +226,7 @@ L(less_vec): cmpb $16, %dl jae L(between_16_31) # endif - MOVQ %xmm0, %rcx + MOVQ %XMM0, %rcx cmpb $8, %dl jae L(between_8_15) cmpb $4, %dl @@ -233,40 +236,34 @@ L(less_vec): jb 1f movb %cl, (%rdi) 1: - VZEROUPPER - ret + VZEROUPPER_RETURN # if VEC_SIZE > 32 /* From 32 to 63. No branch when size == 32. */ L(between_32_63): - vmovdqu %ymm0, -32(%rdi,%rdx) - vmovdqu %ymm0, (%rdi) - VZEROUPPER - ret + VMOVU %YMM0, -32(%rdi,%rdx) + VMOVU %YMM0, (%rdi) + VZEROUPPER_RETURN # endif # if VEC_SIZE > 16 /* From 16 to 31. No branch when size == 16. */ L(between_16_31): - vmovdqu %xmm0, -16(%rdi,%rdx) - vmovdqu %xmm0, (%rdi) - VZEROUPPER - ret + VMOVU %XMM0, -16(%rdi,%rdx) + VMOVU %XMM0, (%rdi) + VZEROUPPER_RETURN # endif /* From 8 to 15. No branch when size == 8. */ L(between_8_15): movq %rcx, -8(%rdi,%rdx) movq %rcx, (%rdi) - VZEROUPPER - ret + VZEROUPPER_RETURN L(between_4_7): /* From 4 to 7. No branch when size == 4. */ movl %ecx, -4(%rdi,%rdx) movl %ecx, (%rdi) - VZEROUPPER - ret + VZEROUPPER_RETURN L(between_2_3): /* From 2 to 3. No branch when size == 2. */ movw %cx, -2(%rdi,%rdx) movw %cx, (%rdi) - VZEROUPPER - ret + VZEROUPPER_RETURN END (MEMSET_SYMBOL (__memset, unaligned_erms)) diff --git a/sysdeps/x86_64/multiarch/rawmemchr-avx2-rtm.S b/sysdeps/x86_64/multiarch/rawmemchr-avx2-rtm.S new file mode 100644 index 000000000..acc5f6e2f --- /dev/null +++ b/sysdeps/x86_64/multiarch/rawmemchr-avx2-rtm.S @@ -0,0 +1,4 @@ +#define MEMCHR __rawmemchr_avx2_rtm +#define USE_AS_RAWMEMCHR 1 + +#include "memchr-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/rawmemchr-evex.S b/sysdeps/x86_64/multiarch/rawmemchr-evex.S new file mode 100644 index 000000000..ec942b77b --- /dev/null +++ b/sysdeps/x86_64/multiarch/rawmemchr-evex.S @@ -0,0 +1,4 @@ +#define MEMCHR __rawmemchr_evex +#define USE_AS_RAWMEMCHR 1 + +#include "memchr-evex.S" diff --git a/sysdeps/x86_64/multiarch/stpcpy-avx2-rtm.S b/sysdeps/x86_64/multiarch/stpcpy-avx2-rtm.S new file mode 100644 index 000000000..2b9c07a59 --- /dev/null +++ b/sysdeps/x86_64/multiarch/stpcpy-avx2-rtm.S @@ -0,0 +1,3 @@ +#define USE_AS_STPCPY +#define STRCPY __stpcpy_avx2_rtm +#include "strcpy-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/stpcpy-evex.S b/sysdeps/x86_64/multiarch/stpcpy-evex.S new file mode 100644 index 000000000..7c6f26cd9 --- /dev/null +++ b/sysdeps/x86_64/multiarch/stpcpy-evex.S @@ -0,0 +1,3 @@ +#define USE_AS_STPCPY +#define STRCPY __stpcpy_evex +#include "strcpy-evex.S" diff --git a/sysdeps/x86_64/multiarch/stpncpy-avx2-rtm.S b/sysdeps/x86_64/multiarch/stpncpy-avx2-rtm.S new file mode 100644 index 000000000..60a2ccfe5 --- /dev/null +++ b/sysdeps/x86_64/multiarch/stpncpy-avx2-rtm.S @@ -0,0 +1,4 @@ +#define USE_AS_STPCPY +#define USE_AS_STRNCPY +#define STRCPY __stpncpy_avx2_rtm +#include "strcpy-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/stpncpy-evex.S b/sysdeps/x86_64/multiarch/stpncpy-evex.S new file mode 100644 index 000000000..1570014d1 --- /dev/null +++ b/sysdeps/x86_64/multiarch/stpncpy-evex.S @@ -0,0 +1,4 @@ +#define USE_AS_STPCPY +#define USE_AS_STRNCPY +#define STRCPY __stpncpy_evex +#include "strcpy-evex.S" diff --git a/sysdeps/x86_64/multiarch/strcat-avx2-rtm.S b/sysdeps/x86_64/multiarch/strcat-avx2-rtm.S new file mode 100644 index 000000000..637fb557c --- /dev/null +++ b/sysdeps/x86_64/multiarch/strcat-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef STRCAT +# define STRCAT __strcat_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "strcat-avx2.S" diff --git a/sysdeps/x86_64/multiarch/strcat-avx2.S b/sysdeps/x86_64/multiarch/strcat-avx2.S index 41de8b2b6..4356fa733 100644 --- a/sysdeps/x86_64/multiarch/strcat-avx2.S +++ b/sysdeps/x86_64/multiarch/strcat-avx2.S @@ -30,7 +30,11 @@ /* Number of bytes in a vector register */ # define VEC_SIZE 32 - .section .text.avx,"ax",@progbits +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + + .section SECTION(.text),"ax",@progbits ENTRY (STRCAT) mov %rdi, %r9 # ifdef USE_AS_STRNCAT diff --git a/sysdeps/x86_64/multiarch/strcat-evex.S b/sysdeps/x86_64/multiarch/strcat-evex.S new file mode 100644 index 000000000..97c3d85b6 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strcat-evex.S @@ -0,0 +1,283 @@ +/* strcat with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# include + +# ifndef STRCAT +# define STRCAT __strcat_evex +# endif + +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 + +/* zero register */ +# define XMMZERO xmm16 +# define YMMZERO ymm16 +# define YMM0 ymm17 +# define YMM1 ymm18 + +# define USE_AS_STRCAT + +/* Number of bytes in a vector register */ +# define VEC_SIZE 32 + + .section .text.evex,"ax",@progbits +ENTRY (STRCAT) + mov %rdi, %r9 +# ifdef USE_AS_STRNCAT + mov %rdx, %r8 +# endif + + xor %eax, %eax + mov %edi, %ecx + and $((VEC_SIZE * 4) - 1), %ecx + vpxorq %XMMZERO, %XMMZERO, %XMMZERO + cmp $(VEC_SIZE * 3), %ecx + ja L(fourth_vector_boundary) + vpcmpb $0, (%rdi), %YMMZERO, %k0 + kmovd %k0, %edx + test %edx, %edx + jnz L(exit_null_on_first_vector) + mov %rdi, %rax + and $-VEC_SIZE, %rax + jmp L(align_vec_size_start) +L(fourth_vector_boundary): + mov %rdi, %rax + and $-VEC_SIZE, %rax + vpcmpb $0, (%rax), %YMMZERO, %k0 + mov $-1, %r10d + sub %rax, %rcx + shl %cl, %r10d + kmovd %k0, %edx + and %r10d, %edx + jnz L(exit) + +L(align_vec_size_start): + vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k0 + kmovd %k0, %edx + test %edx, %edx + jnz L(exit_null_on_second_vector) + + vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1 + kmovd %k1, %edx + test %edx, %edx + jnz L(exit_null_on_third_vector) + + vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2 + kmovd %k2, %edx + test %edx, %edx + jnz L(exit_null_on_fourth_vector) + + vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3 + kmovd %k3, %edx + test %edx, %edx + jnz L(exit_null_on_fifth_vector) + + vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4 + add $(VEC_SIZE * 4), %rax + kmovd %k4, %edx + test %edx, %edx + jnz L(exit_null_on_second_vector) + + vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1 + kmovd %k1, %edx + test %edx, %edx + jnz L(exit_null_on_third_vector) + + vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2 + kmovd %k2, %edx + test %edx, %edx + jnz L(exit_null_on_fourth_vector) + + vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3 + kmovd %k3, %edx + test %edx, %edx + jnz L(exit_null_on_fifth_vector) + + vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4 + kmovd %k4, %edx + add $(VEC_SIZE * 4), %rax + test %edx, %edx + jnz L(exit_null_on_second_vector) + + vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1 + kmovd %k1, %edx + test %edx, %edx + jnz L(exit_null_on_third_vector) + + vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2 + kmovd %k2, %edx + test %edx, %edx + jnz L(exit_null_on_fourth_vector) + + vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3 + kmovd %k3, %edx + test %edx, %edx + jnz L(exit_null_on_fifth_vector) + + vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4 + add $(VEC_SIZE * 4), %rax + kmovd %k4, %edx + test %edx, %edx + jnz L(exit_null_on_second_vector) + + vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1 + kmovd %k1, %edx + test %edx, %edx + jnz L(exit_null_on_third_vector) + + vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2 + kmovd %k2, %edx + test %edx, %edx + jnz L(exit_null_on_fourth_vector) + + vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3 + kmovd %k3, %edx + test %edx, %edx + jnz L(exit_null_on_fifth_vector) + + test $((VEC_SIZE * 4) - 1), %rax + jz L(align_four_vec_loop) + + vpcmpb $0, (VEC_SIZE * 5)(%rax), %YMMZERO, %k4 + add $(VEC_SIZE * 5), %rax + kmovd %k4, %edx + test %edx, %edx + jnz L(exit) + + test $((VEC_SIZE * 4) - 1), %rax + jz L(align_four_vec_loop) + + vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k0 + add $VEC_SIZE, %rax + kmovd %k0, %edx + test %edx, %edx + jnz L(exit) + + test $((VEC_SIZE * 4) - 1), %rax + jz L(align_four_vec_loop) + + vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k0 + add $VEC_SIZE, %rax + kmovd %k0, %edx + test %edx, %edx + jnz L(exit) + + test $((VEC_SIZE * 4) - 1), %rax + jz L(align_four_vec_loop) + + vpcmpb $0, VEC_SIZE(%rax), %YMMZERO, %k1 + add $VEC_SIZE, %rax + kmovd %k1, %edx + test %edx, %edx + jnz L(exit) + + add $VEC_SIZE, %rax + + .p2align 4 +L(align_four_vec_loop): + VMOVA (%rax), %YMM0 + VMOVA (VEC_SIZE * 2)(%rax), %YMM1 + vpminub VEC_SIZE(%rax), %YMM0, %YMM0 + vpminub (VEC_SIZE * 3)(%rax), %YMM1, %YMM1 + vpminub %YMM0, %YMM1, %YMM0 + /* If K0 != 0, there is a null byte. */ + vpcmpb $0, %YMM0, %YMMZERO, %k0 + add $(VEC_SIZE * 4), %rax + ktestd %k0, %k0 + jz L(align_four_vec_loop) + + vpcmpb $0, -(VEC_SIZE * 4)(%rax), %YMMZERO, %k0 + sub $(VEC_SIZE * 5), %rax + kmovd %k0, %edx + test %edx, %edx + jnz L(exit_null_on_second_vector) + + vpcmpb $0, (VEC_SIZE * 2)(%rax), %YMMZERO, %k1 + kmovd %k1, %edx + test %edx, %edx + jnz L(exit_null_on_third_vector) + + vpcmpb $0, (VEC_SIZE * 3)(%rax), %YMMZERO, %k2 + kmovd %k2, %edx + test %edx, %edx + jnz L(exit_null_on_fourth_vector) + + vpcmpb $0, (VEC_SIZE * 4)(%rax), %YMMZERO, %k3 + kmovd %k3, %edx + sub %rdi, %rax + bsf %rdx, %rdx + add %rdx, %rax + add $(VEC_SIZE * 4), %rax + jmp L(StartStrcpyPart) + + .p2align 4 +L(exit): + sub %rdi, %rax +L(exit_null_on_first_vector): + bsf %rdx, %rdx + add %rdx, %rax + jmp L(StartStrcpyPart) + + .p2align 4 +L(exit_null_on_second_vector): + sub %rdi, %rax + bsf %rdx, %rdx + add %rdx, %rax + add $VEC_SIZE, %rax + jmp L(StartStrcpyPart) + + .p2align 4 +L(exit_null_on_third_vector): + sub %rdi, %rax + bsf %rdx, %rdx + add %rdx, %rax + add $(VEC_SIZE * 2), %rax + jmp L(StartStrcpyPart) + + .p2align 4 +L(exit_null_on_fourth_vector): + sub %rdi, %rax + bsf %rdx, %rdx + add %rdx, %rax + add $(VEC_SIZE * 3), %rax + jmp L(StartStrcpyPart) + + .p2align 4 +L(exit_null_on_fifth_vector): + sub %rdi, %rax + bsf %rdx, %rdx + add %rdx, %rax + add $(VEC_SIZE * 4), %rax + + .p2align 4 +L(StartStrcpyPart): + lea (%r9, %rax), %rdi + mov %rsi, %rcx + mov %r9, %rax /* save result */ + +# ifdef USE_AS_STRNCAT + test %r8, %r8 + jz L(ExitZero) +# define USE_AS_STRNCPY +# endif + +# include "strcpy-evex.S" +#endif diff --git a/sysdeps/x86_64/multiarch/strchr-avx2-rtm.S b/sysdeps/x86_64/multiarch/strchr-avx2-rtm.S new file mode 100644 index 000000000..81f20d1d8 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strchr-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef STRCHR +# define STRCHR __strchr_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "strchr-avx2.S" diff --git a/sysdeps/x86_64/multiarch/strchr-avx2.S b/sysdeps/x86_64/multiarch/strchr-avx2.S index d416558d0..a94d728c7 100644 --- a/sysdeps/x86_64/multiarch/strchr-avx2.S +++ b/sysdeps/x86_64/multiarch/strchr-avx2.S @@ -38,9 +38,13 @@ # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + # define VEC_SIZE 32 - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (STRCHR) movl %edi, %ecx /* Broadcast CHAR to YMM0. */ @@ -93,8 +97,8 @@ L(cros_page_boundary): cmp (%rax), %CHAR_REG cmovne %rdx, %rax # endif - VZEROUPPER - ret +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN .p2align 4 L(aligned_more): @@ -190,8 +194,7 @@ L(first_vec_x0): cmp (%rax), %CHAR_REG cmovne %rdx, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x1): @@ -205,8 +208,7 @@ L(first_vec_x1): cmp (%rax), %CHAR_REG cmovne %rdx, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(first_vec_x2): @@ -220,8 +222,7 @@ L(first_vec_x2): cmp (%rax), %CHAR_REG cmovne %rdx, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(4x_vec_end): @@ -247,8 +248,7 @@ L(first_vec_x3): cmp (%rax), %CHAR_REG cmovne %rdx, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN END (STRCHR) #endif diff --git a/sysdeps/x86_64/multiarch/strchr-evex.S b/sysdeps/x86_64/multiarch/strchr-evex.S new file mode 100644 index 000000000..ddc86a705 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strchr-evex.S @@ -0,0 +1,335 @@ +/* strchr/strchrnul optimized with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# include + +# ifndef STRCHR +# define STRCHR __strchr_evex +# endif + +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 + +# ifdef USE_AS_WCSCHR +# define VPBROADCAST vpbroadcastd +# define VPCMP vpcmpd +# define VPMINU vpminud +# define CHAR_REG esi +# define SHIFT_REG r8d +# else +# define VPBROADCAST vpbroadcastb +# define VPCMP vpcmpb +# define VPMINU vpminub +# define CHAR_REG sil +# define SHIFT_REG ecx +# endif + +# define XMMZERO xmm16 + +# define YMMZERO ymm16 +# define YMM0 ymm17 +# define YMM1 ymm18 +# define YMM2 ymm19 +# define YMM3 ymm20 +# define YMM4 ymm21 +# define YMM5 ymm22 +# define YMM6 ymm23 +# define YMM7 ymm24 +# define YMM8 ymm25 + +# define VEC_SIZE 32 +# define PAGE_SIZE 4096 + + .section .text.evex,"ax",@progbits +ENTRY (STRCHR) + movl %edi, %ecx +# ifndef USE_AS_STRCHRNUL + xorl %edx, %edx +# endif + + /* Broadcast CHAR to YMM0. */ + VPBROADCAST %esi, %YMM0 + + vpxorq %XMMZERO, %XMMZERO, %XMMZERO + + /* Check if we cross page boundary with one vector load. */ + andl $(PAGE_SIZE - 1), %ecx + cmpl $(PAGE_SIZE - VEC_SIZE), %ecx + ja L(cross_page_boundary) + + /* Check the first VEC_SIZE bytes. Search for both CHAR and the + null bytes. */ + VMOVU (%rdi), %YMM1 + + /* Leaves only CHARS matching esi as 0. */ + vpxorq %YMM1, %YMM0, %YMM2 + VPMINU %YMM2, %YMM1, %YMM2 + /* Each bit in K0 represents a CHAR or a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM2, %k0 + ktestd %k0, %k0 + jz L(more_vecs) + kmovd %k0, %eax + tzcntl %eax, %eax + /* Found CHAR or the null byte. */ +# ifdef USE_AS_WCSCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq (%rdi, %rax, 4), %rax +# else + addq %rdi, %rax +# endif +# ifndef USE_AS_STRCHRNUL + cmp (%rax), %CHAR_REG + cmovne %rdx, %rax +# endif + ret + + .p2align 4 +L(more_vecs): + /* Align data for aligned loads in the loop. */ + andq $-VEC_SIZE, %rdi +L(aligned_more): + + /* Check the next 4 * VEC_SIZE. Only one VEC_SIZE at a time + since data is only aligned to VEC_SIZE. */ + VMOVA VEC_SIZE(%rdi), %YMM1 + addq $VEC_SIZE, %rdi + + /* Leaves only CHARS matching esi as 0. */ + vpxorq %YMM1, %YMM0, %YMM2 + VPMINU %YMM2, %YMM1, %YMM2 + /* Each bit in K0 represents a CHAR or a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM2, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x0) + + VMOVA VEC_SIZE(%rdi), %YMM1 + /* Leaves only CHARS matching esi as 0. */ + vpxorq %YMM1, %YMM0, %YMM2 + VPMINU %YMM2, %YMM1, %YMM2 + /* Each bit in K0 represents a CHAR or a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM2, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x1) + + VMOVA (VEC_SIZE * 2)(%rdi), %YMM1 + /* Leaves only CHARS matching esi as 0. */ + vpxorq %YMM1, %YMM0, %YMM2 + VPMINU %YMM2, %YMM1, %YMM2 + /* Each bit in K0 represents a CHAR or a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM2, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x2) + + VMOVA (VEC_SIZE * 3)(%rdi), %YMM1 + /* Leaves only CHARS matching esi as 0. */ + vpxorq %YMM1, %YMM0, %YMM2 + VPMINU %YMM2, %YMM1, %YMM2 + /* Each bit in K0 represents a CHAR or a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM2, %k0 + ktestd %k0, %k0 + jz L(prep_loop_4x) + + kmovd %k0, %eax + tzcntl %eax, %eax + /* Found CHAR or the null byte. */ +# ifdef USE_AS_WCSCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq (VEC_SIZE * 3)(%rdi, %rax, 4), %rax +# else + leaq (VEC_SIZE * 3)(%rdi, %rax), %rax +# endif +# ifndef USE_AS_STRCHRNUL + cmp (%rax), %CHAR_REG + cmovne %rdx, %rax +# endif + ret + + .p2align 4 +L(first_vec_x0): + tzcntl %eax, %eax + /* Found CHAR or the null byte. */ +# ifdef USE_AS_WCSCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq (%rdi, %rax, 4), %rax +# else + addq %rdi, %rax +# endif +# ifndef USE_AS_STRCHRNUL + cmp (%rax), %CHAR_REG + cmovne %rdx, %rax +# endif + ret + + .p2align 4 +L(first_vec_x1): + tzcntl %eax, %eax + /* Found CHAR or the null byte. */ +# ifdef USE_AS_WCSCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq VEC_SIZE(%rdi, %rax, 4), %rax +# else + leaq VEC_SIZE(%rdi, %rax), %rax +# endif +# ifndef USE_AS_STRCHRNUL + cmp (%rax), %CHAR_REG + cmovne %rdx, %rax +# endif + ret + + .p2align 4 +L(first_vec_x2): + tzcntl %eax, %eax + /* Found CHAR or the null byte. */ +# ifdef USE_AS_WCSCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq (VEC_SIZE * 2)(%rdi, %rax, 4), %rax +# else + leaq (VEC_SIZE * 2)(%rdi, %rax), %rax +# endif +# ifndef USE_AS_STRCHRNUL + cmp (%rax), %CHAR_REG + cmovne %rdx, %rax +# endif + ret + +L(prep_loop_4x): + /* Align data to 4 * VEC_SIZE. */ + andq $-(VEC_SIZE * 4), %rdi + + .p2align 4 +L(loop_4x_vec): + /* Compare 4 * VEC at a time forward. */ + VMOVA (VEC_SIZE * 4)(%rdi), %YMM1 + VMOVA (VEC_SIZE * 5)(%rdi), %YMM2 + VMOVA (VEC_SIZE * 6)(%rdi), %YMM3 + VMOVA (VEC_SIZE * 7)(%rdi), %YMM4 + + /* Leaves only CHARS matching esi as 0. */ + vpxorq %YMM1, %YMM0, %YMM5 + vpxorq %YMM2, %YMM0, %YMM6 + vpxorq %YMM3, %YMM0, %YMM7 + vpxorq %YMM4, %YMM0, %YMM8 + + VPMINU %YMM5, %YMM1, %YMM5 + VPMINU %YMM6, %YMM2, %YMM6 + VPMINU %YMM7, %YMM3, %YMM7 + VPMINU %YMM8, %YMM4, %YMM8 + + VPMINU %YMM5, %YMM6, %YMM1 + VPMINU %YMM7, %YMM8, %YMM2 + + VPMINU %YMM1, %YMM2, %YMM1 + + /* Each bit in K0 represents a CHAR or a null byte. */ + VPCMP $0, %YMMZERO, %YMM1, %k0 + + addq $(VEC_SIZE * 4), %rdi + + ktestd %k0, %k0 + jz L(loop_4x_vec) + + /* Each bit in K0 represents a CHAR or a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM5, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x0) + + /* Each bit in K1 represents a CHAR or a null byte in YMM2. */ + VPCMP $0, %YMMZERO, %YMM6, %k1 + kmovd %k1, %eax + testl %eax, %eax + jnz L(first_vec_x1) + + /* Each bit in K2 represents a CHAR or a null byte in YMM3. */ + VPCMP $0, %YMMZERO, %YMM7, %k2 + /* Each bit in K3 represents a CHAR or a null byte in YMM4. */ + VPCMP $0, %YMMZERO, %YMM8, %k3 + +# ifdef USE_AS_WCSCHR + /* NB: Each bit in K2/K3 represents 4-byte element. */ + kshiftlw $8, %k3, %k1 +# else + kshiftlq $32, %k3, %k1 +# endif + + /* Each bit in K1 represents a NULL or a mismatch. */ + korq %k1, %k2, %k1 + kmovq %k1, %rax + + tzcntq %rax, %rax +# ifdef USE_AS_WCSCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq (VEC_SIZE * 2)(%rdi, %rax, 4), %rax +# else + leaq (VEC_SIZE * 2)(%rdi, %rax), %rax +# endif +# ifndef USE_AS_STRCHRNUL + cmp (%rax), %CHAR_REG + cmovne %rdx, %rax +# endif + ret + + /* Cold case for crossing page with first load. */ + .p2align 4 +L(cross_page_boundary): + andq $-VEC_SIZE, %rdi + andl $(VEC_SIZE - 1), %ecx + + VMOVA (%rdi), %YMM1 + + /* Leaves only CHARS matching esi as 0. */ + vpxorq %YMM1, %YMM0, %YMM2 + VPMINU %YMM2, %YMM1, %YMM2 + /* Each bit in K0 represents a CHAR or a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM2, %k0 + kmovd %k0, %eax + testl %eax, %eax + +# ifdef USE_AS_WCSCHR + /* NB: Divide shift count by 4 since each bit in K1 represent 4 + bytes. */ + movl %ecx, %SHIFT_REG + sarl $2, %SHIFT_REG +# endif + + /* Remove the leading bits. */ + sarxl %SHIFT_REG, %eax, %eax + testl %eax, %eax + + jz L(aligned_more) + tzcntl %eax, %eax + addq %rcx, %rdi +# ifdef USE_AS_WCSCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq (%rdi, %rax, 4), %rax +# else + addq %rdi, %rax +# endif +# ifndef USE_AS_STRCHRNUL + cmp (%rax), %CHAR_REG + cmovne %rdx, %rax +# endif + ret + +END (STRCHR) +# endif diff --git a/sysdeps/x86_64/multiarch/strchr.c b/sysdeps/x86_64/multiarch/strchr.c index 583a15279..e73d59581 100644 --- a/sysdeps/x86_64/multiarch/strchr.c +++ b/sysdeps/x86_64/multiarch/strchr.c @@ -29,16 +29,28 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_no_bsf) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_rtm) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { const struct cpu_features* cpu_features = __get_cpu_features (); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) - return OPTIMIZE (avx2); + { + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) + return OPTIMIZE (evex); + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + return OPTIMIZE (avx2); + } if (CPU_FEATURES_ARCH_P (cpu_features, Slow_BSF)) return OPTIMIZE (sse2_no_bsf); diff --git a/sysdeps/x86_64/multiarch/strchrnul-avx2-rtm.S b/sysdeps/x86_64/multiarch/strchrnul-avx2-rtm.S new file mode 100644 index 000000000..cdcf818b9 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strchrnul-avx2-rtm.S @@ -0,0 +1,3 @@ +#define STRCHR __strchrnul_avx2_rtm +#define USE_AS_STRCHRNUL 1 +#include "strchr-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/strchrnul-evex.S b/sysdeps/x86_64/multiarch/strchrnul-evex.S new file mode 100644 index 000000000..064fe7ca9 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strchrnul-evex.S @@ -0,0 +1,3 @@ +#define STRCHR __strchrnul_evex +#define USE_AS_STRCHRNUL 1 +#include "strchr-evex.S" diff --git a/sysdeps/x86_64/multiarch/strcmp-avx2-rtm.S b/sysdeps/x86_64/multiarch/strcmp-avx2-rtm.S new file mode 100644 index 000000000..aecd30d97 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strcmp-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef STRCMP +# define STRCMP __strcmp_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "strcmp-avx2.S" diff --git a/sysdeps/x86_64/multiarch/strcmp-avx2.S b/sysdeps/x86_64/multiarch/strcmp-avx2.S index 53cb7a669..fa70c994f 100644 --- a/sysdeps/x86_64/multiarch/strcmp-avx2.S +++ b/sysdeps/x86_64/multiarch/strcmp-avx2.S @@ -55,6 +55,10 @@ # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + /* Warning! wcscmp/wcsncmp have to use SIGNED comparison for elements. strcmp/strncmp have to use UNSIGNED comparison for elements. @@ -75,7 +79,7 @@ the maximum offset is reached before a difference is found, zero is returned. */ - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (STRCMP) # ifdef USE_AS_STRNCMP /* Check for simple cases (0 or 1) in offset. */ @@ -83,6 +87,16 @@ ENTRY (STRCMP) je L(char0) jb L(zero) # ifdef USE_AS_WCSCMP +# ifndef __ILP32__ + movq %rdx, %rcx + /* Check if length could overflow when multiplied by + sizeof(wchar_t). Checking top 8 bits will cover all potential + overflow cases as well as redirect cases where its impossible to + length to bound a valid memory region. In these cases just use + 'wcscmp'. */ + shrq $56, %rcx + jnz OVERFLOW_STRCMP +# endif /* Convert units: from wide to byte char. */ shl $2, %RDX_LP # endif @@ -127,8 +141,8 @@ L(return): movzbl (%rsi, %rdx), %edx subl %edx, %eax # endif - VZEROUPPER - ret +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN .p2align 4 L(return_vec_size): @@ -161,8 +175,7 @@ L(return_vec_size): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(return_2_vec_size): @@ -195,8 +208,7 @@ L(return_2_vec_size): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(return_3_vec_size): @@ -229,8 +241,7 @@ L(return_3_vec_size): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(next_3_vectors): @@ -356,8 +367,7 @@ L(back_to_loop): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(test_vec): @@ -400,8 +410,7 @@ L(test_vec): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(test_2_vec): @@ -444,8 +453,7 @@ L(test_2_vec): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(test_3_vec): @@ -486,8 +494,7 @@ L(test_3_vec): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(loop_cross_page): @@ -556,8 +563,7 @@ L(loop_cross_page): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(loop_cross_page_2_vec): @@ -631,8 +637,7 @@ L(loop_cross_page_2_vec): subl %edx, %eax # endif # endif - VZEROUPPER - ret + VZEROUPPER_RETURN # ifdef USE_AS_STRNCMP L(string_nbyte_offset_check): @@ -674,8 +679,7 @@ L(cross_page_loop): # ifndef USE_AS_WCSCMP L(different): # endif - VZEROUPPER - ret + VZEROUPPER_RETURN # ifdef USE_AS_WCSCMP .p2align 4 @@ -685,16 +689,14 @@ L(different): setl %al negl %eax orl $1, %eax - VZEROUPPER - ret + VZEROUPPER_RETURN # endif # ifdef USE_AS_STRNCMP .p2align 4 L(zero): xorl %eax, %eax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(char0): @@ -708,8 +710,7 @@ L(char0): movzbl (%rdi), %eax subl %ecx, %eax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN # endif .p2align 4 @@ -734,8 +735,7 @@ L(last_vector): movzbl (%rsi, %rdx), %edx subl %edx, %eax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN /* Comparing on page boundary region requires special treatment: It must done one vector at the time, starting with the wider @@ -856,7 +856,6 @@ L(cross_page_4bytes): testl %eax, %eax jne L(cross_page_loop) subl %ecx, %eax - VZEROUPPER - ret + VZEROUPPER_RETURN END (STRCMP) #endif diff --git a/sysdeps/x86_64/multiarch/strcmp-evex.S b/sysdeps/x86_64/multiarch/strcmp-evex.S new file mode 100644 index 000000000..459eeed09 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strcmp-evex.S @@ -0,0 +1,1043 @@ +/* strcmp/wcscmp/strncmp/wcsncmp optimized with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# include + +# ifndef STRCMP +# define STRCMP __strcmp_evex +# endif + +# define PAGE_SIZE 4096 + +/* VEC_SIZE = Number of bytes in a ymm register */ +# define VEC_SIZE 32 + +/* Shift for dividing by (VEC_SIZE * 4). */ +# define DIVIDE_BY_VEC_4_SHIFT 7 +# if (VEC_SIZE * 4) != (1 << DIVIDE_BY_VEC_4_SHIFT) +# error (VEC_SIZE * 4) != (1 << DIVIDE_BY_VEC_4_SHIFT) +# endif + +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 + +# ifdef USE_AS_WCSCMP +/* Compare packed dwords. */ +# define VPCMP vpcmpd +# define SHIFT_REG32 r8d +# define SHIFT_REG64 r8 +/* 1 dword char == 4 bytes. */ +# define SIZE_OF_CHAR 4 +# else +/* Compare packed bytes. */ +# define VPCMP vpcmpb +# define SHIFT_REG32 ecx +# define SHIFT_REG64 rcx +/* 1 byte char == 1 byte. */ +# define SIZE_OF_CHAR 1 +# endif + +# define XMMZERO xmm16 +# define XMM0 xmm17 +# define XMM1 xmm18 + +# define YMMZERO ymm16 +# define YMM0 ymm17 +# define YMM1 ymm18 +# define YMM2 ymm19 +# define YMM3 ymm20 +# define YMM4 ymm21 +# define YMM5 ymm22 +# define YMM6 ymm23 +# define YMM7 ymm24 + +/* Warning! + wcscmp/wcsncmp have to use SIGNED comparison for elements. + strcmp/strncmp have to use UNSIGNED comparison for elements. +*/ + +/* The main idea of the string comparison (byte or dword) using 256-bit + EVEX instructions consists of comparing (VPCMP) two ymm vectors. The + latter can be on either packed bytes or dwords depending on + USE_AS_WCSCMP. In order to check the null char, algorithm keeps the + matched bytes/dwords, requiring 5 EVEX instructions (3 VPCMP and 2 + KORD). In general, the costs of comparing VEC_SIZE bytes (32-bytes) + are 3 VPCMP and 2 KORD instructions, together with VMOVU and ktestd + instructions. Main loop (away from from page boundary) compares 4 + vectors are a time, effectively comparing 4 x VEC_SIZE bytes (128 + bytes) on each loop. + + The routine strncmp/wcsncmp (enabled by defining USE_AS_STRNCMP) logic + is the same as strcmp, except that an a maximum offset is tracked. If + the maximum offset is reached before a difference is found, zero is + returned. */ + + .section .text.evex,"ax",@progbits +ENTRY (STRCMP) +# ifdef USE_AS_STRNCMP + /* Check for simple cases (0 or 1) in offset. */ + cmp $1, %RDX_LP + je L(char0) + jb L(zero) +# ifdef USE_AS_WCSCMP + /* Convert units: from wide to byte char. */ + shl $2, %RDX_LP +# endif + /* Register %r11 tracks the maximum offset. */ + mov %RDX_LP, %R11_LP +# endif + movl %edi, %eax + xorl %edx, %edx + /* Make %XMMZERO (%YMMZERO) all zeros in this function. */ + vpxorq %XMMZERO, %XMMZERO, %XMMZERO + orl %esi, %eax + andl $(PAGE_SIZE - 1), %eax + cmpl $(PAGE_SIZE - (VEC_SIZE * 4)), %eax + jg L(cross_page) + /* Start comparing 4 vectors. */ + VMOVU (%rdi), %YMM0 + VMOVU (%rsi), %YMM1 + + /* Each bit in K0 represents a mismatch in YMM0 and YMM1. */ + VPCMP $4, %YMM0, %YMM1, %k0 + + /* Check for NULL in YMM0. */ + VPCMP $0, %YMMZERO, %YMM0, %k1 + /* Check for NULL in YMM1. */ + VPCMP $0, %YMMZERO, %YMM1, %k2 + /* Each bit in K1 represents a NULL in YMM0 or YMM1. */ + kord %k1, %k2, %k1 + + /* Each bit in K1 represents: + 1. A mismatch in YMM0 and YMM1. Or + 2. A NULL in YMM0 or YMM1. + */ + kord %k0, %k1, %k1 + + ktestd %k1, %k1 + je L(next_3_vectors) + kmovd %k1, %ecx + tzcntl %ecx, %edx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %edx +# endif +# ifdef USE_AS_STRNCMP + /* Return 0 if the mismatched index (%rdx) is after the maximum + offset (%r11). */ + cmpq %r11, %rdx + jae L(zero) +# endif +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (%rdi, %rdx), %ecx + cmpl (%rsi, %rdx), %ecx + je L(return) +L(wcscmp_return): + setl %al + negl %eax + orl $1, %eax +L(return): +# else + movzbl (%rdi, %rdx), %eax + movzbl (%rsi, %rdx), %edx + subl %edx, %eax +# endif + ret + + .p2align 4 +L(return_vec_size): + kmovd %k1, %ecx + tzcntl %ecx, %edx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %edx +# endif +# ifdef USE_AS_STRNCMP + /* Return 0 if the mismatched index (%rdx + VEC_SIZE) is after + the maximum offset (%r11). */ + addq $VEC_SIZE, %rdx + cmpq %r11, %rdx + jae L(zero) +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (%rdi, %rdx), %ecx + cmpl (%rsi, %rdx), %ecx + jne L(wcscmp_return) +# else + movzbl (%rdi, %rdx), %eax + movzbl (%rsi, %rdx), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl VEC_SIZE(%rdi, %rdx), %ecx + cmpl VEC_SIZE(%rsi, %rdx), %ecx + jne L(wcscmp_return) +# else + movzbl VEC_SIZE(%rdi, %rdx), %eax + movzbl VEC_SIZE(%rsi, %rdx), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(return_2_vec_size): + kmovd %k1, %ecx + tzcntl %ecx, %edx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %edx +# endif +# ifdef USE_AS_STRNCMP + /* Return 0 if the mismatched index (%rdx + 2 * VEC_SIZE) is + after the maximum offset (%r11). */ + addq $(VEC_SIZE * 2), %rdx + cmpq %r11, %rdx + jae L(zero) +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (%rdi, %rdx), %ecx + cmpl (%rsi, %rdx), %ecx + jne L(wcscmp_return) +# else + movzbl (%rdi, %rdx), %eax + movzbl (%rsi, %rdx), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (VEC_SIZE * 2)(%rdi, %rdx), %ecx + cmpl (VEC_SIZE * 2)(%rsi, %rdx), %ecx + jne L(wcscmp_return) +# else + movzbl (VEC_SIZE * 2)(%rdi, %rdx), %eax + movzbl (VEC_SIZE * 2)(%rsi, %rdx), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(return_3_vec_size): + kmovd %k1, %ecx + tzcntl %ecx, %edx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %edx +# endif +# ifdef USE_AS_STRNCMP + /* Return 0 if the mismatched index (%rdx + 3 * VEC_SIZE) is + after the maximum offset (%r11). */ + addq $(VEC_SIZE * 3), %rdx + cmpq %r11, %rdx + jae L(zero) +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (%rdi, %rdx), %ecx + cmpl (%rsi, %rdx), %ecx + jne L(wcscmp_return) +# else + movzbl (%rdi, %rdx), %eax + movzbl (%rsi, %rdx), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (VEC_SIZE * 3)(%rdi, %rdx), %ecx + cmpl (VEC_SIZE * 3)(%rsi, %rdx), %ecx + jne L(wcscmp_return) +# else + movzbl (VEC_SIZE * 3)(%rdi, %rdx), %eax + movzbl (VEC_SIZE * 3)(%rsi, %rdx), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(next_3_vectors): + VMOVU VEC_SIZE(%rdi), %YMM0 + VMOVU VEC_SIZE(%rsi), %YMM1 + /* Each bit in K0 represents a mismatch in YMM0 and YMM1. */ + VPCMP $4, %YMM0, %YMM1, %k0 + VPCMP $0, %YMMZERO, %YMM0, %k1 + VPCMP $0, %YMMZERO, %YMM1, %k2 + /* Each bit in K1 represents a NULL in YMM0 or YMM1. */ + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch. */ + kord %k0, %k1, %k1 + ktestd %k1, %k1 + jne L(return_vec_size) + + VMOVU (VEC_SIZE * 2)(%rdi), %YMM2 + VMOVU (VEC_SIZE * 3)(%rdi), %YMM3 + VMOVU (VEC_SIZE * 2)(%rsi), %YMM4 + VMOVU (VEC_SIZE * 3)(%rsi), %YMM5 + + /* Each bit in K0 represents a mismatch in YMM2 and YMM4. */ + VPCMP $4, %YMM2, %YMM4, %k0 + VPCMP $0, %YMMZERO, %YMM2, %k1 + VPCMP $0, %YMMZERO, %YMM4, %k2 + /* Each bit in K1 represents a NULL in YMM2 or YMM4. */ + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch. */ + kord %k0, %k1, %k1 + ktestd %k1, %k1 + jne L(return_2_vec_size) + + /* Each bit in K0 represents a mismatch in YMM3 and YMM5. */ + VPCMP $4, %YMM3, %YMM5, %k0 + VPCMP $0, %YMMZERO, %YMM3, %k1 + VPCMP $0, %YMMZERO, %YMM5, %k2 + /* Each bit in K1 represents a NULL in YMM3 or YMM5. */ + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch. */ + kord %k0, %k1, %k1 + ktestd %k1, %k1 + jne L(return_3_vec_size) +L(main_loop_header): + leaq (VEC_SIZE * 4)(%rdi), %rdx + movl $PAGE_SIZE, %ecx + /* Align load via RAX. */ + andq $-(VEC_SIZE * 4), %rdx + subq %rdi, %rdx + leaq (%rdi, %rdx), %rax +# ifdef USE_AS_STRNCMP + /* Starting from this point, the maximum offset, or simply the + 'offset', DECREASES by the same amount when base pointers are + moved forward. Return 0 when: + 1) On match: offset <= the matched vector index. + 2) On mistmach, offset is before the mistmatched index. + */ + subq %rdx, %r11 + jbe L(zero) +# endif + addq %rsi, %rdx + movq %rdx, %rsi + andl $(PAGE_SIZE - 1), %esi + /* Number of bytes before page crossing. */ + subq %rsi, %rcx + /* Number of VEC_SIZE * 4 blocks before page crossing. */ + shrq $DIVIDE_BY_VEC_4_SHIFT, %rcx + /* ESI: Number of VEC_SIZE * 4 blocks before page crossing. */ + movl %ecx, %esi + jmp L(loop_start) + + .p2align 4 +L(loop): +# ifdef USE_AS_STRNCMP + /* Base pointers are moved forward by 4 * VEC_SIZE. Decrease + the maximum offset (%r11) by the same amount. */ + subq $(VEC_SIZE * 4), %r11 + jbe L(zero) +# endif + addq $(VEC_SIZE * 4), %rax + addq $(VEC_SIZE * 4), %rdx +L(loop_start): + testl %esi, %esi + leal -1(%esi), %esi + je L(loop_cross_page) +L(back_to_loop): + /* Main loop, comparing 4 vectors are a time. */ + VMOVA (%rax), %YMM0 + VMOVA VEC_SIZE(%rax), %YMM2 + VMOVA (VEC_SIZE * 2)(%rax), %YMM4 + VMOVA (VEC_SIZE * 3)(%rax), %YMM6 + VMOVU (%rdx), %YMM1 + VMOVU VEC_SIZE(%rdx), %YMM3 + VMOVU (VEC_SIZE * 2)(%rdx), %YMM5 + VMOVU (VEC_SIZE * 3)(%rdx), %YMM7 + + VPCMP $4, %YMM0, %YMM1, %k0 + VPCMP $0, %YMMZERO, %YMM0, %k1 + VPCMP $0, %YMMZERO, %YMM1, %k2 + kord %k1, %k2, %k1 + /* Each bit in K4 represents a NULL or a mismatch in YMM0 and + YMM1. */ + kord %k0, %k1, %k4 + + VPCMP $4, %YMM2, %YMM3, %k0 + VPCMP $0, %YMMZERO, %YMM2, %k1 + VPCMP $0, %YMMZERO, %YMM3, %k2 + kord %k1, %k2, %k1 + /* Each bit in K5 represents a NULL or a mismatch in YMM2 and + YMM3. */ + kord %k0, %k1, %k5 + + VPCMP $4, %YMM4, %YMM5, %k0 + VPCMP $0, %YMMZERO, %YMM4, %k1 + VPCMP $0, %YMMZERO, %YMM5, %k2 + kord %k1, %k2, %k1 + /* Each bit in K6 represents a NULL or a mismatch in YMM4 and + YMM5. */ + kord %k0, %k1, %k6 + + VPCMP $4, %YMM6, %YMM7, %k0 + VPCMP $0, %YMMZERO, %YMM6, %k1 + VPCMP $0, %YMMZERO, %YMM7, %k2 + kord %k1, %k2, %k1 + /* Each bit in K7 represents a NULL or a mismatch in YMM6 and + YMM7. */ + kord %k0, %k1, %k7 + + kord %k4, %k5, %k0 + kord %k6, %k7, %k1 + + /* Test each mask (32 bits) individually because for VEC_SIZE + == 32 is not possible to OR the four masks and keep all bits + in a 64-bit integer register, differing from SSE2 strcmp + where ORing is possible. */ + kortestd %k0, %k1 + je L(loop) + ktestd %k4, %k4 + je L(test_vec) + kmovd %k4, %edi + tzcntl %edi, %ecx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %ecx +# endif +# ifdef USE_AS_STRNCMP + cmpq %rcx, %r11 + jbe L(zero) +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rcx), %edi + cmpl (%rdx, %rcx), %edi + jne L(wcscmp_return) +# else + movzbl (%rax, %rcx), %eax + movzbl (%rdx, %rcx), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rcx), %edi + cmpl (%rdx, %rcx), %edi + jne L(wcscmp_return) +# else + movzbl (%rax, %rcx), %eax + movzbl (%rdx, %rcx), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(test_vec): +# ifdef USE_AS_STRNCMP + /* The first vector matched. Return 0 if the maximum offset + (%r11) <= VEC_SIZE. */ + cmpq $VEC_SIZE, %r11 + jbe L(zero) +# endif + ktestd %k5, %k5 + je L(test_2_vec) + kmovd %k5, %ecx + tzcntl %ecx, %edi +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %edi +# endif +# ifdef USE_AS_STRNCMP + addq $VEC_SIZE, %rdi + cmpq %rdi, %r11 + jbe L(zero) +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rdi), %ecx + cmpl (%rdx, %rdi), %ecx + jne L(wcscmp_return) +# else + movzbl (%rax, %rdi), %eax + movzbl (%rdx, %rdi), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl VEC_SIZE(%rsi, %rdi), %ecx + cmpl VEC_SIZE(%rdx, %rdi), %ecx + jne L(wcscmp_return) +# else + movzbl VEC_SIZE(%rax, %rdi), %eax + movzbl VEC_SIZE(%rdx, %rdi), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(test_2_vec): +# ifdef USE_AS_STRNCMP + /* The first 2 vectors matched. Return 0 if the maximum offset + (%r11) <= 2 * VEC_SIZE. */ + cmpq $(VEC_SIZE * 2), %r11 + jbe L(zero) +# endif + ktestd %k6, %k6 + je L(test_3_vec) + kmovd %k6, %ecx + tzcntl %ecx, %edi +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %edi +# endif +# ifdef USE_AS_STRNCMP + addq $(VEC_SIZE * 2), %rdi + cmpq %rdi, %r11 + jbe L(zero) +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rdi), %ecx + cmpl (%rdx, %rdi), %ecx + jne L(wcscmp_return) +# else + movzbl (%rax, %rdi), %eax + movzbl (%rdx, %rdi), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (VEC_SIZE * 2)(%rsi, %rdi), %ecx + cmpl (VEC_SIZE * 2)(%rdx, %rdi), %ecx + jne L(wcscmp_return) +# else + movzbl (VEC_SIZE * 2)(%rax, %rdi), %eax + movzbl (VEC_SIZE * 2)(%rdx, %rdi), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(test_3_vec): +# ifdef USE_AS_STRNCMP + /* The first 3 vectors matched. Return 0 if the maximum offset + (%r11) <= 3 * VEC_SIZE. */ + cmpq $(VEC_SIZE * 3), %r11 + jbe L(zero) +# endif + kmovd %k7, %esi + tzcntl %esi, %ecx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %ecx +# endif +# ifdef USE_AS_STRNCMP + addq $(VEC_SIZE * 3), %rcx + cmpq %rcx, %r11 + jbe L(zero) +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rcx), %esi + cmpl (%rdx, %rcx), %esi + jne L(wcscmp_return) +# else + movzbl (%rax, %rcx), %eax + movzbl (%rdx, %rcx), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (VEC_SIZE * 3)(%rsi, %rcx), %esi + cmpl (VEC_SIZE * 3)(%rdx, %rcx), %esi + jne L(wcscmp_return) +# else + movzbl (VEC_SIZE * 3)(%rax, %rcx), %eax + movzbl (VEC_SIZE * 3)(%rdx, %rcx), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(loop_cross_page): + xorl %r10d, %r10d + movq %rdx, %rcx + /* Align load via RDX. We load the extra ECX bytes which should + be ignored. */ + andl $((VEC_SIZE * 4) - 1), %ecx + /* R10 is -RCX. */ + subq %rcx, %r10 + + /* This works only if VEC_SIZE * 2 == 64. */ +# if (VEC_SIZE * 2) != 64 +# error (VEC_SIZE * 2) != 64 +# endif + + /* Check if the first VEC_SIZE * 2 bytes should be ignored. */ + cmpl $(VEC_SIZE * 2), %ecx + jge L(loop_cross_page_2_vec) + + VMOVU (%rax, %r10), %YMM2 + VMOVU VEC_SIZE(%rax, %r10), %YMM3 + VMOVU (%rdx, %r10), %YMM4 + VMOVU VEC_SIZE(%rdx, %r10), %YMM5 + + VPCMP $4, %YMM4, %YMM2, %k0 + VPCMP $0, %YMMZERO, %YMM2, %k1 + VPCMP $0, %YMMZERO, %YMM4, %k2 + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch in YMM2 and + YMM4. */ + kord %k0, %k1, %k1 + + VPCMP $4, %YMM5, %YMM3, %k3 + VPCMP $0, %YMMZERO, %YMM3, %k4 + VPCMP $0, %YMMZERO, %YMM5, %k5 + kord %k4, %k5, %k4 + /* Each bit in K3 represents a NULL or a mismatch in YMM3 and + YMM5. */ + kord %k3, %k4, %k3 + +# ifdef USE_AS_WCSCMP + /* NB: Each bit in K1/K3 represents 4-byte element. */ + kshiftlw $8, %k3, %k2 + /* NB: Divide shift count by 4 since each bit in K1 represent 4 + bytes. */ + movl %ecx, %SHIFT_REG32 + sarl $2, %SHIFT_REG32 +# else + kshiftlq $32, %k3, %k2 +# endif + + /* Each bit in K1 represents a NULL or a mismatch. */ + korq %k1, %k2, %k1 + kmovq %k1, %rdi + + /* Since ECX < VEC_SIZE * 2, simply skip the first ECX bytes. */ + shrxq %SHIFT_REG64, %rdi, %rdi + testq %rdi, %rdi + je L(loop_cross_page_2_vec) + tzcntq %rdi, %rcx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %ecx +# endif +# ifdef USE_AS_STRNCMP + cmpq %rcx, %r11 + jbe L(zero) +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rcx), %edi + cmpl (%rdx, %rcx), %edi + jne L(wcscmp_return) +# else + movzbl (%rax, %rcx), %eax + movzbl (%rdx, %rcx), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rcx), %edi + cmpl (%rdx, %rcx), %edi + jne L(wcscmp_return) +# else + movzbl (%rax, %rcx), %eax + movzbl (%rdx, %rcx), %edx + subl %edx, %eax +# endif +# endif + ret + + .p2align 4 +L(loop_cross_page_2_vec): + /* The first VEC_SIZE * 2 bytes match or are ignored. */ + VMOVU (VEC_SIZE * 2)(%rax, %r10), %YMM0 + VMOVU (VEC_SIZE * 3)(%rax, %r10), %YMM1 + VMOVU (VEC_SIZE * 2)(%rdx, %r10), %YMM2 + VMOVU (VEC_SIZE * 3)(%rdx, %r10), %YMM3 + + VPCMP $4, %YMM0, %YMM2, %k0 + VPCMP $0, %YMMZERO, %YMM0, %k1 + VPCMP $0, %YMMZERO, %YMM2, %k2 + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch in YMM0 and + YMM2. */ + kord %k0, %k1, %k1 + + VPCMP $4, %YMM1, %YMM3, %k3 + VPCMP $0, %YMMZERO, %YMM1, %k4 + VPCMP $0, %YMMZERO, %YMM3, %k5 + kord %k4, %k5, %k4 + /* Each bit in K3 represents a NULL or a mismatch in YMM1 and + YMM3. */ + kord %k3, %k4, %k3 + +# ifdef USE_AS_WCSCMP + /* NB: Each bit in K1/K3 represents 4-byte element. */ + kshiftlw $8, %k3, %k2 +# else + kshiftlq $32, %k3, %k2 +# endif + + /* Each bit in K1 represents a NULL or a mismatch. */ + korq %k1, %k2, %k1 + kmovq %k1, %rdi + + xorl %r8d, %r8d + /* If ECX > VEC_SIZE * 2, skip ECX - (VEC_SIZE * 2) bytes. */ + subl $(VEC_SIZE * 2), %ecx + jle 1f + /* R8 has number of bytes skipped. */ + movl %ecx, %r8d +# ifdef USE_AS_WCSCMP + /* NB: Divide shift count by 4 since each bit in K1 represent 4 + bytes. */ + sarl $2, %ecx +# endif + /* Skip ECX bytes. */ + shrq %cl, %rdi +1: + /* Before jumping back to the loop, set ESI to the number of + VEC_SIZE * 4 blocks before page crossing. */ + movl $(PAGE_SIZE / (VEC_SIZE * 4) - 1), %esi + + testq %rdi, %rdi +# ifdef USE_AS_STRNCMP + /* At this point, if %rdi value is 0, it already tested + VEC_SIZE*4+%r10 byte starting from %rax. This label + checks whether strncmp maximum offset reached or not. */ + je L(string_nbyte_offset_check) +# else + je L(back_to_loop) +# endif + tzcntq %rdi, %rcx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %ecx +# endif + addq %r10, %rcx + /* Adjust for number of bytes skipped. */ + addq %r8, %rcx +# ifdef USE_AS_STRNCMP + addq $(VEC_SIZE * 2), %rcx + subq %rcx, %r11 + jbe L(zero) +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (%rsi, %rcx), %edi + cmpl (%rdx, %rcx), %edi + jne L(wcscmp_return) +# else + movzbl (%rax, %rcx), %eax + movzbl (%rdx, %rcx), %edx + subl %edx, %eax +# endif +# else +# ifdef USE_AS_WCSCMP + movq %rax, %rsi + xorl %eax, %eax + movl (VEC_SIZE * 2)(%rsi, %rcx), %edi + cmpl (VEC_SIZE * 2)(%rdx, %rcx), %edi + jne L(wcscmp_return) +# else + movzbl (VEC_SIZE * 2)(%rax, %rcx), %eax + movzbl (VEC_SIZE * 2)(%rdx, %rcx), %edx + subl %edx, %eax +# endif +# endif + ret + +# ifdef USE_AS_STRNCMP +L(string_nbyte_offset_check): + leaq (VEC_SIZE * 4)(%r10), %r10 + cmpq %r10, %r11 + jbe L(zero) + jmp L(back_to_loop) +# endif + + .p2align 4 +L(cross_page_loop): + /* Check one byte/dword at a time. */ +# ifdef USE_AS_WCSCMP + cmpl %ecx, %eax +# else + subl %ecx, %eax +# endif + jne L(different) + addl $SIZE_OF_CHAR, %edx + cmpl $(VEC_SIZE * 4), %edx + je L(main_loop_header) +# ifdef USE_AS_STRNCMP + cmpq %r11, %rdx + jae L(zero) +# endif +# ifdef USE_AS_WCSCMP + movl (%rdi, %rdx), %eax + movl (%rsi, %rdx), %ecx +# else + movzbl (%rdi, %rdx), %eax + movzbl (%rsi, %rdx), %ecx +# endif + /* Check null char. */ + testl %eax, %eax + jne L(cross_page_loop) + /* Since %eax == 0, subtract is OK for both SIGNED and UNSIGNED + comparisons. */ + subl %ecx, %eax +# ifndef USE_AS_WCSCMP +L(different): +# endif + ret + +# ifdef USE_AS_WCSCMP + .p2align 4 +L(different): + /* Use movl to avoid modifying EFLAGS. */ + movl $0, %eax + setl %al + negl %eax + orl $1, %eax + ret +# endif + +# ifdef USE_AS_STRNCMP + .p2align 4 +L(zero): + xorl %eax, %eax + ret + + .p2align 4 +L(char0): +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (%rdi), %ecx + cmpl (%rsi), %ecx + jne L(wcscmp_return) +# else + movzbl (%rsi), %ecx + movzbl (%rdi), %eax + subl %ecx, %eax +# endif + ret +# endif + + .p2align 4 +L(last_vector): + addq %rdx, %rdi + addq %rdx, %rsi +# ifdef USE_AS_STRNCMP + subq %rdx, %r11 +# endif + tzcntl %ecx, %edx +# ifdef USE_AS_WCSCMP + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + sall $2, %edx +# endif +# ifdef USE_AS_STRNCMP + cmpq %r11, %rdx + jae L(zero) +# endif +# ifdef USE_AS_WCSCMP + xorl %eax, %eax + movl (%rdi, %rdx), %ecx + cmpl (%rsi, %rdx), %ecx + jne L(wcscmp_return) +# else + movzbl (%rdi, %rdx), %eax + movzbl (%rsi, %rdx), %edx + subl %edx, %eax +# endif + ret + + /* Comparing on page boundary region requires special treatment: + It must done one vector at the time, starting with the wider + ymm vector if possible, if not, with xmm. If fetching 16 bytes + (xmm) still passes the boundary, byte comparison must be done. + */ + .p2align 4 +L(cross_page): + /* Try one ymm vector at a time. */ + cmpl $(PAGE_SIZE - VEC_SIZE), %eax + jg L(cross_page_1_vector) +L(loop_1_vector): + VMOVU (%rdi, %rdx), %YMM0 + VMOVU (%rsi, %rdx), %YMM1 + + /* Each bit in K0 represents a mismatch in YMM0 and YMM1. */ + VPCMP $4, %YMM0, %YMM1, %k0 + VPCMP $0, %YMMZERO, %YMM0, %k1 + VPCMP $0, %YMMZERO, %YMM1, %k2 + /* Each bit in K1 represents a NULL in YMM0 or YMM1. */ + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch. */ + kord %k0, %k1, %k1 + kmovd %k1, %ecx + testl %ecx, %ecx + jne L(last_vector) + + addl $VEC_SIZE, %edx + + addl $VEC_SIZE, %eax +# ifdef USE_AS_STRNCMP + /* Return 0 if the current offset (%rdx) >= the maximum offset + (%r11). */ + cmpq %r11, %rdx + jae L(zero) +# endif + cmpl $(PAGE_SIZE - VEC_SIZE), %eax + jle L(loop_1_vector) +L(cross_page_1_vector): + /* Less than 32 bytes to check, try one xmm vector. */ + cmpl $(PAGE_SIZE - 16), %eax + jg L(cross_page_1_xmm) + VMOVU (%rdi, %rdx), %XMM0 + VMOVU (%rsi, %rdx), %XMM1 + + /* Each bit in K0 represents a mismatch in XMM0 and XMM1. */ + VPCMP $4, %XMM0, %XMM1, %k0 + VPCMP $0, %XMMZERO, %XMM0, %k1 + VPCMP $0, %XMMZERO, %XMM1, %k2 + /* Each bit in K1 represents a NULL in XMM0 or XMM1. */ + korw %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch. */ + korw %k0, %k1, %k1 + kmovw %k1, %ecx + testl %ecx, %ecx + jne L(last_vector) + + addl $16, %edx +# ifndef USE_AS_WCSCMP + addl $16, %eax +# endif +# ifdef USE_AS_STRNCMP + /* Return 0 if the current offset (%rdx) >= the maximum offset + (%r11). */ + cmpq %r11, %rdx + jae L(zero) +# endif + +L(cross_page_1_xmm): +# ifndef USE_AS_WCSCMP + /* Less than 16 bytes to check, try 8 byte vector. NB: No need + for wcscmp nor wcsncmp since wide char is 4 bytes. */ + cmpl $(PAGE_SIZE - 8), %eax + jg L(cross_page_8bytes) + vmovq (%rdi, %rdx), %XMM0 + vmovq (%rsi, %rdx), %XMM1 + + /* Each bit in K0 represents a mismatch in XMM0 and XMM1. */ + VPCMP $4, %XMM0, %XMM1, %k0 + VPCMP $0, %XMMZERO, %XMM0, %k1 + VPCMP $0, %XMMZERO, %XMM1, %k2 + /* Each bit in K1 represents a NULL in XMM0 or XMM1. */ + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch. */ + kord %k0, %k1, %k1 + kmovd %k1, %ecx + +# ifdef USE_AS_WCSCMP + /* Only last 2 bits are valid. */ + andl $0x3, %ecx +# else + /* Only last 8 bits are valid. */ + andl $0xff, %ecx +# endif + + testl %ecx, %ecx + jne L(last_vector) + + addl $8, %edx + addl $8, %eax +# ifdef USE_AS_STRNCMP + /* Return 0 if the current offset (%rdx) >= the maximum offset + (%r11). */ + cmpq %r11, %rdx + jae L(zero) +# endif + +L(cross_page_8bytes): + /* Less than 8 bytes to check, try 4 byte vector. */ + cmpl $(PAGE_SIZE - 4), %eax + jg L(cross_page_4bytes) + vmovd (%rdi, %rdx), %XMM0 + vmovd (%rsi, %rdx), %XMM1 + + /* Each bit in K0 represents a mismatch in XMM0 and XMM1. */ + VPCMP $4, %XMM0, %XMM1, %k0 + VPCMP $0, %XMMZERO, %XMM0, %k1 + VPCMP $0, %XMMZERO, %XMM1, %k2 + /* Each bit in K1 represents a NULL in XMM0 or XMM1. */ + kord %k1, %k2, %k1 + /* Each bit in K1 represents a NULL or a mismatch. */ + kord %k0, %k1, %k1 + kmovd %k1, %ecx + +# ifdef USE_AS_WCSCMP + /* Only the last bit is valid. */ + andl $0x1, %ecx +# else + /* Only last 4 bits are valid. */ + andl $0xf, %ecx +# endif + + testl %ecx, %ecx + jne L(last_vector) + + addl $4, %edx +# ifdef USE_AS_STRNCMP + /* Return 0 if the current offset (%rdx) >= the maximum offset + (%r11). */ + cmpq %r11, %rdx + jae L(zero) +# endif + +L(cross_page_4bytes): +# endif + /* Less than 4 bytes to check, try one byte/dword at a time. */ +# ifdef USE_AS_STRNCMP + cmpq %r11, %rdx + jae L(zero) +# endif +# ifdef USE_AS_WCSCMP + movl (%rdi, %rdx), %eax + movl (%rsi, %rdx), %ecx +# else + movzbl (%rdi, %rdx), %eax + movzbl (%rsi, %rdx), %ecx +# endif + testl %eax, %eax + jne L(cross_page_loop) + subl %ecx, %eax + ret +END (STRCMP) +#endif diff --git a/sysdeps/x86_64/multiarch/strcmp.c b/sysdeps/x86_64/multiarch/strcmp.c index 6a9dca438..62b7abeee 100644 --- a/sysdeps/x86_64/multiarch/strcmp.c +++ b/sysdeps/x86_64/multiarch/strcmp.c @@ -30,16 +30,29 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_rtm) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { const struct cpu_features* cpu_features = __get_cpu_features (); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) - return OPTIMIZE (avx2); + { + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && CPU_FEATURE_USABLE_P (cpu_features, BMI2) + && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_AVX2_STRCMP)) + return OPTIMIZE (evex); + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + return OPTIMIZE (avx2); + } if (CPU_FEATURES_ARCH_P (cpu_features, Fast_Unaligned_Load)) return OPTIMIZE (sse2_unaligned); diff --git a/sysdeps/x86_64/multiarch/strcpy-avx2-rtm.S b/sysdeps/x86_64/multiarch/strcpy-avx2-rtm.S new file mode 100644 index 000000000..c2c581ecf --- /dev/null +++ b/sysdeps/x86_64/multiarch/strcpy-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef STRCPY +# define STRCPY __strcpy_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "strcpy-avx2.S" diff --git a/sysdeps/x86_64/multiarch/strcpy-avx2.S b/sysdeps/x86_64/multiarch/strcpy-avx2.S index b7629eaf1..5b6506d58 100644 --- a/sysdeps/x86_64/multiarch/strcpy-avx2.S +++ b/sysdeps/x86_64/multiarch/strcpy-avx2.S @@ -37,6 +37,10 @@ # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + /* zero register */ #define xmmZ xmm0 #define ymmZ ymm0 @@ -46,7 +50,7 @@ # ifndef USE_AS_STRCAT - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (STRCPY) # ifdef USE_AS_STRNCPY mov %RDX_LP, %R8_LP @@ -369,8 +373,8 @@ L(CopyVecSizeExit): lea 1(%rdi), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN .p2align 4 L(CopyTwoVecSize1): @@ -553,8 +557,7 @@ L(Exit1): lea 2(%rdi), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Exit2): @@ -569,8 +572,7 @@ L(Exit2): lea 3(%rdi), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Exit3): @@ -584,8 +586,7 @@ L(Exit3): lea 4(%rdi), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Exit4_7): @@ -602,8 +603,7 @@ L(Exit4_7): lea 1(%rdi, %rdx), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Exit8_15): @@ -620,8 +620,7 @@ L(Exit8_15): lea 1(%rdi, %rdx), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Exit16_31): @@ -638,8 +637,7 @@ L(Exit16_31): lea 1(%rdi, %rdx), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Exit32_63): @@ -656,8 +654,7 @@ L(Exit32_63): lea 1(%rdi, %rdx), %rdi jnz L(StrncpyFillTailWithZero) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN # ifdef USE_AS_STRNCPY @@ -671,8 +668,7 @@ L(StrncpyExit1): # ifdef USE_AS_STRCAT movb $0, 1(%rdi) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(StrncpyExit2): @@ -684,8 +680,7 @@ L(StrncpyExit2): # ifdef USE_AS_STRCAT movb $0, 2(%rdi) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(StrncpyExit3_4): @@ -699,8 +694,7 @@ L(StrncpyExit3_4): # ifdef USE_AS_STRCAT movb $0, (%rdi, %r8) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(StrncpyExit5_8): @@ -714,8 +708,7 @@ L(StrncpyExit5_8): # ifdef USE_AS_STRCAT movb $0, (%rdi, %r8) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(StrncpyExit9_16): @@ -729,8 +722,7 @@ L(StrncpyExit9_16): # ifdef USE_AS_STRCAT movb $0, (%rdi, %r8) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(StrncpyExit17_32): @@ -744,8 +736,7 @@ L(StrncpyExit17_32): # ifdef USE_AS_STRCAT movb $0, (%rdi, %r8) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(StrncpyExit33_64): @@ -760,8 +751,7 @@ L(StrncpyExit33_64): # ifdef USE_AS_STRCAT movb $0, (%rdi, %r8) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(StrncpyExit65): @@ -778,50 +768,43 @@ L(StrncpyExit65): # ifdef USE_AS_STRCAT movb $0, 65(%rdi) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN # ifndef USE_AS_STRCAT .p2align 4 L(Fill1): mov %dl, (%rdi) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Fill2): mov %dx, (%rdi) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Fill3_4): mov %dx, (%rdi) mov %dx, -2(%rdi, %r8) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Fill5_8): mov %edx, (%rdi) mov %edx, -4(%rdi, %r8) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Fill9_16): mov %rdx, (%rdi) mov %rdx, -8(%rdi, %r8) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(Fill17_32): vmovdqu %xmmZ, (%rdi) vmovdqu %xmmZ, -16(%rdi, %r8) - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(CopyVecSizeUnalignedVec2): @@ -898,8 +881,7 @@ L(Fill): cmp $1, %r8d ja L(Fill2) je L(Fill1) - VZEROUPPER - ret + VZEROUPPER_RETURN /* end of ifndef USE_AS_STRCAT */ # endif @@ -929,8 +911,7 @@ L(UnalignedFourVecSizeLeaveCase3): # ifdef USE_AS_STRCAT movb $0, (VEC_SIZE * 4)(%rdi) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(UnalignedFourVecSizeLeaveCase2): @@ -1001,16 +982,14 @@ L(StrncpyExit): # ifdef USE_AS_STRCAT movb $0, (%rdi) # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(ExitZero): # ifndef USE_AS_STRCAT mov %rdi, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN # endif diff --git a/sysdeps/x86_64/multiarch/strcpy-evex.S b/sysdeps/x86_64/multiarch/strcpy-evex.S new file mode 100644 index 000000000..a343a1a69 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strcpy-evex.S @@ -0,0 +1,1003 @@ +/* strcpy with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# ifndef USE_AS_STRCAT +# include + +# ifndef STRCPY +# define STRCPY __strcpy_evex +# endif + +# endif + +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 + +/* Number of bytes in a vector register */ +# ifndef VEC_SIZE +# define VEC_SIZE 32 +# endif + +# define XMM2 xmm18 +# define XMM3 xmm19 + +# define YMM2 ymm18 +# define YMM3 ymm19 +# define YMM4 ymm20 +# define YMM5 ymm21 +# define YMM6 ymm22 +# define YMM7 ymm23 + +# ifndef USE_AS_STRCAT + +/* zero register */ +# define XMMZERO xmm16 +# define YMMZERO ymm16 +# define YMM1 ymm17 + + .section .text.evex,"ax",@progbits +ENTRY (STRCPY) +# ifdef USE_AS_STRNCPY + mov %RDX_LP, %R8_LP + test %R8_LP, %R8_LP + jz L(ExitZero) +# endif + mov %rsi, %rcx +# ifndef USE_AS_STPCPY + mov %rdi, %rax /* save result */ +# endif + + vpxorq %XMMZERO, %XMMZERO, %XMMZERO +# endif + + and $((VEC_SIZE * 4) - 1), %ecx + cmp $(VEC_SIZE * 2), %ecx + jbe L(SourceStringAlignmentLessTwoVecSize) + + and $-VEC_SIZE, %rsi + and $(VEC_SIZE - 1), %ecx + + vpcmpb $0, (%rsi), %YMMZERO, %k0 + kmovd %k0, %edx + shr %cl, %rdx + +# ifdef USE_AS_STRNCPY +# if defined USE_AS_STPCPY || defined USE_AS_STRCAT + mov $VEC_SIZE, %r10 + sub %rcx, %r10 + cmp %r10, %r8 +# else + mov $(VEC_SIZE + 1), %r10 + sub %rcx, %r10 + cmp %r10, %r8 +# endif + jbe L(CopyVecSizeTailCase2OrCase3) +# endif + test %edx, %edx + jnz L(CopyVecSizeTail) + + vpcmpb $0, VEC_SIZE(%rsi), %YMMZERO, %k1 + kmovd %k1, %edx + +# ifdef USE_AS_STRNCPY + add $VEC_SIZE, %r10 + cmp %r10, %r8 + jbe L(CopyTwoVecSizeCase2OrCase3) +# endif + test %edx, %edx + jnz L(CopyTwoVecSize) + + VMOVU (%rsi, %rcx), %YMM2 /* copy VEC_SIZE bytes */ + VMOVU %YMM2, (%rdi) + +/* If source address alignment != destination address alignment */ + .p2align 4 +L(UnalignVecSizeBoth): + sub %rcx, %rdi +# ifdef USE_AS_STRNCPY + add %rcx, %r8 + sbb %rcx, %rcx + or %rcx, %r8 +# endif + mov $VEC_SIZE, %rcx + VMOVA (%rsi, %rcx), %YMM2 + VMOVU %YMM2, (%rdi, %rcx) + VMOVA VEC_SIZE(%rsi, %rcx), %YMM2 + vpcmpb $0, %YMM2, %YMMZERO, %k0 + kmovd %k0, %edx + add $VEC_SIZE, %rcx +# ifdef USE_AS_STRNCPY + sub $(VEC_SIZE * 3), %r8 + jbe L(CopyVecSizeCase2OrCase3) +# endif + test %edx, %edx +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec2) +# else + jnz L(CopyVecSize) +# endif + + VMOVU %YMM2, (%rdi, %rcx) + VMOVA VEC_SIZE(%rsi, %rcx), %YMM3 + vpcmpb $0, %YMM3, %YMMZERO, %k0 + kmovd %k0, %edx + add $VEC_SIZE, %rcx +# ifdef USE_AS_STRNCPY + sub $VEC_SIZE, %r8 + jbe L(CopyVecSizeCase2OrCase3) +# endif + test %edx, %edx +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec3) +# else + jnz L(CopyVecSize) +# endif + + VMOVU %YMM3, (%rdi, %rcx) + VMOVA VEC_SIZE(%rsi, %rcx), %YMM4 + vpcmpb $0, %YMM4, %YMMZERO, %k0 + kmovd %k0, %edx + add $VEC_SIZE, %rcx +# ifdef USE_AS_STRNCPY + sub $VEC_SIZE, %r8 + jbe L(CopyVecSizeCase2OrCase3) +# endif + test %edx, %edx +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec4) +# else + jnz L(CopyVecSize) +# endif + + VMOVU %YMM4, (%rdi, %rcx) + VMOVA VEC_SIZE(%rsi, %rcx), %YMM2 + vpcmpb $0, %YMM2, %YMMZERO, %k0 + kmovd %k0, %edx + add $VEC_SIZE, %rcx +# ifdef USE_AS_STRNCPY + sub $VEC_SIZE, %r8 + jbe L(CopyVecSizeCase2OrCase3) +# endif + test %edx, %edx +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec2) +# else + jnz L(CopyVecSize) +# endif + + VMOVU %YMM2, (%rdi, %rcx) + VMOVA VEC_SIZE(%rsi, %rcx), %YMM2 + vpcmpb $0, %YMM2, %YMMZERO, %k0 + kmovd %k0, %edx + add $VEC_SIZE, %rcx +# ifdef USE_AS_STRNCPY + sub $VEC_SIZE, %r8 + jbe L(CopyVecSizeCase2OrCase3) +# endif + test %edx, %edx +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec2) +# else + jnz L(CopyVecSize) +# endif + + VMOVA VEC_SIZE(%rsi, %rcx), %YMM3 + VMOVU %YMM2, (%rdi, %rcx) + vpcmpb $0, %YMM3, %YMMZERO, %k0 + kmovd %k0, %edx + add $VEC_SIZE, %rcx +# ifdef USE_AS_STRNCPY + sub $VEC_SIZE, %r8 + jbe L(CopyVecSizeCase2OrCase3) +# endif + test %edx, %edx +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec3) +# else + jnz L(CopyVecSize) +# endif + + VMOVU %YMM3, (%rdi, %rcx) + mov %rsi, %rdx + lea VEC_SIZE(%rsi, %rcx), %rsi + and $-(VEC_SIZE * 4), %rsi + sub %rsi, %rdx + sub %rdx, %rdi +# ifdef USE_AS_STRNCPY + lea (VEC_SIZE * 8)(%r8, %rdx), %r8 +# endif +L(UnalignedFourVecSizeLoop): + VMOVA (%rsi), %YMM4 + VMOVA VEC_SIZE(%rsi), %YMM5 + VMOVA (VEC_SIZE * 2)(%rsi), %YMM6 + VMOVA (VEC_SIZE * 3)(%rsi), %YMM7 + vpminub %YMM5, %YMM4, %YMM2 + vpminub %YMM7, %YMM6, %YMM3 + vpminub %YMM2, %YMM3, %YMM2 + /* If K7 != 0, there is a null byte. */ + vpcmpb $0, %YMM2, %YMMZERO, %k7 + kmovd %k7, %edx +# ifdef USE_AS_STRNCPY + sub $(VEC_SIZE * 4), %r8 + jbe L(UnalignedLeaveCase2OrCase3) +# endif + test %edx, %edx + jnz L(UnalignedFourVecSizeLeave) + +L(UnalignedFourVecSizeLoop_start): + add $(VEC_SIZE * 4), %rdi + add $(VEC_SIZE * 4), %rsi + VMOVU %YMM4, -(VEC_SIZE * 4)(%rdi) + VMOVA (%rsi), %YMM4 + VMOVU %YMM5, -(VEC_SIZE * 3)(%rdi) + VMOVA VEC_SIZE(%rsi), %YMM5 + vpminub %YMM5, %YMM4, %YMM2 + VMOVU %YMM6, -(VEC_SIZE * 2)(%rdi) + VMOVA (VEC_SIZE * 2)(%rsi), %YMM6 + VMOVU %YMM7, -VEC_SIZE(%rdi) + VMOVA (VEC_SIZE * 3)(%rsi), %YMM7 + vpminub %YMM7, %YMM6, %YMM3 + vpminub %YMM2, %YMM3, %YMM2 + /* If K7 != 0, there is a null byte. */ + vpcmpb $0, %YMM2, %YMMZERO, %k7 + kmovd %k7, %edx +# ifdef USE_AS_STRNCPY + sub $(VEC_SIZE * 4), %r8 + jbe L(UnalignedLeaveCase2OrCase3) +# endif + test %edx, %edx + jz L(UnalignedFourVecSizeLoop_start) + +L(UnalignedFourVecSizeLeave): + vpcmpb $0, %YMM4, %YMMZERO, %k1 + kmovd %k1, %edx + test %edx, %edx + jnz L(CopyVecSizeUnaligned_0) + + vpcmpb $0, %YMM5, %YMMZERO, %k2 + kmovd %k2, %ecx + test %ecx, %ecx + jnz L(CopyVecSizeUnaligned_16) + + vpcmpb $0, %YMM6, %YMMZERO, %k3 + kmovd %k3, %edx + test %edx, %edx + jnz L(CopyVecSizeUnaligned_32) + + vpcmpb $0, %YMM7, %YMMZERO, %k4 + kmovd %k4, %ecx + bsf %ecx, %edx + VMOVU %YMM4, (%rdi) + VMOVU %YMM5, VEC_SIZE(%rdi) + VMOVU %YMM6, (VEC_SIZE * 2)(%rdi) +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT +# ifdef USE_AS_STPCPY + lea (VEC_SIZE * 3)(%rdi, %rdx), %rax +# endif + VMOVU %YMM7, (VEC_SIZE * 3)(%rdi) + add $(VEC_SIZE - 1), %r8 + sub %rdx, %r8 + lea ((VEC_SIZE * 3) + 1)(%rdi, %rdx), %rdi + jmp L(StrncpyFillTailWithZero) +# else + add $(VEC_SIZE * 3), %rsi + add $(VEC_SIZE * 3), %rdi + jmp L(CopyVecSizeExit) +# endif + +/* If source address alignment == destination address alignment */ + +L(SourceStringAlignmentLessTwoVecSize): + VMOVU (%rsi), %YMM3 + VMOVU VEC_SIZE(%rsi), %YMM2 + vpcmpb $0, %YMM3, %YMMZERO, %k0 + kmovd %k0, %edx + +# ifdef USE_AS_STRNCPY +# if defined USE_AS_STPCPY || defined USE_AS_STRCAT + cmp $VEC_SIZE, %r8 +# else + cmp $(VEC_SIZE + 1), %r8 +# endif + jbe L(CopyVecSizeTail1Case2OrCase3) +# endif + test %edx, %edx + jnz L(CopyVecSizeTail1) + + VMOVU %YMM3, (%rdi) + vpcmpb $0, %YMM2, %YMMZERO, %k0 + kmovd %k0, %edx + +# ifdef USE_AS_STRNCPY +# if defined USE_AS_STPCPY || defined USE_AS_STRCAT + cmp $(VEC_SIZE * 2), %r8 +# else + cmp $((VEC_SIZE * 2) + 1), %r8 +# endif + jbe L(CopyTwoVecSize1Case2OrCase3) +# endif + test %edx, %edx + jnz L(CopyTwoVecSize1) + + and $-VEC_SIZE, %rsi + and $(VEC_SIZE - 1), %ecx + jmp L(UnalignVecSizeBoth) + +/*------End of main part with loops---------------------*/ + +/* Case1 */ + +# if (!defined USE_AS_STRNCPY) || (defined USE_AS_STRCAT) + .p2align 4 +L(CopyVecSize): + add %rcx, %rdi +# endif +L(CopyVecSizeTail): + add %rcx, %rsi +L(CopyVecSizeTail1): + bsf %edx, %edx +L(CopyVecSizeExit): + cmp $32, %edx + jae L(Exit32_63) + cmp $16, %edx + jae L(Exit16_31) + cmp $8, %edx + jae L(Exit8_15) + cmp $4, %edx + jae L(Exit4_7) + cmp $3, %edx + je L(Exit3) + cmp $1, %edx + ja L(Exit2) + je L(Exit1) + movb $0, (%rdi) +# ifdef USE_AS_STPCPY + lea (%rdi), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub $1, %r8 + lea 1(%rdi), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + + .p2align 4 +L(CopyTwoVecSize1): + add $VEC_SIZE, %rsi + add $VEC_SIZE, %rdi +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub $VEC_SIZE, %r8 +# endif + jmp L(CopyVecSizeTail1) + + .p2align 4 +L(CopyTwoVecSize): + bsf %edx, %edx + add %rcx, %rsi + add $VEC_SIZE, %edx + sub %ecx, %edx + jmp L(CopyVecSizeExit) + + .p2align 4 +L(CopyVecSizeUnaligned_0): + bsf %edx, %edx +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT +# ifdef USE_AS_STPCPY + lea (%rdi, %rdx), %rax +# endif + VMOVU %YMM4, (%rdi) + add $((VEC_SIZE * 4) - 1), %r8 + sub %rdx, %r8 + lea 1(%rdi, %rdx), %rdi + jmp L(StrncpyFillTailWithZero) +# else + jmp L(CopyVecSizeExit) +# endif + + .p2align 4 +L(CopyVecSizeUnaligned_16): + bsf %ecx, %edx + VMOVU %YMM4, (%rdi) +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT +# ifdef USE_AS_STPCPY + lea VEC_SIZE(%rdi, %rdx), %rax +# endif + VMOVU %YMM5, VEC_SIZE(%rdi) + add $((VEC_SIZE * 3) - 1), %r8 + sub %rdx, %r8 + lea (VEC_SIZE + 1)(%rdi, %rdx), %rdi + jmp L(StrncpyFillTailWithZero) +# else + add $VEC_SIZE, %rsi + add $VEC_SIZE, %rdi + jmp L(CopyVecSizeExit) +# endif + + .p2align 4 +L(CopyVecSizeUnaligned_32): + bsf %edx, %edx + VMOVU %YMM4, (%rdi) + VMOVU %YMM5, VEC_SIZE(%rdi) +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT +# ifdef USE_AS_STPCPY + lea (VEC_SIZE * 2)(%rdi, %rdx), %rax +# endif + VMOVU %YMM6, (VEC_SIZE * 2)(%rdi) + add $((VEC_SIZE * 2) - 1), %r8 + sub %rdx, %r8 + lea ((VEC_SIZE * 2) + 1)(%rdi, %rdx), %rdi + jmp L(StrncpyFillTailWithZero) +# else + add $(VEC_SIZE * 2), %rsi + add $(VEC_SIZE * 2), %rdi + jmp L(CopyVecSizeExit) +# endif + +# ifdef USE_AS_STRNCPY +# ifndef USE_AS_STRCAT + .p2align 4 +L(CopyVecSizeUnalignedVec6): + VMOVU %YMM6, (%rdi, %rcx) + jmp L(CopyVecSizeVecExit) + + .p2align 4 +L(CopyVecSizeUnalignedVec5): + VMOVU %YMM5, (%rdi, %rcx) + jmp L(CopyVecSizeVecExit) + + .p2align 4 +L(CopyVecSizeUnalignedVec4): + VMOVU %YMM4, (%rdi, %rcx) + jmp L(CopyVecSizeVecExit) + + .p2align 4 +L(CopyVecSizeUnalignedVec3): + VMOVU %YMM3, (%rdi, %rcx) + jmp L(CopyVecSizeVecExit) +# endif + +/* Case2 */ + + .p2align 4 +L(CopyVecSizeCase2): + add $VEC_SIZE, %r8 + add %rcx, %rdi + add %rcx, %rsi + bsf %edx, %edx + cmp %r8d, %edx + jb L(CopyVecSizeExit) + jmp L(StrncpyExit) + + .p2align 4 +L(CopyTwoVecSizeCase2): + add %rcx, %rsi + bsf %edx, %edx + add $VEC_SIZE, %edx + sub %ecx, %edx + cmp %r8d, %edx + jb L(CopyVecSizeExit) + jmp L(StrncpyExit) + +L(CopyVecSizeTailCase2): + add %rcx, %rsi + bsf %edx, %edx + cmp %r8d, %edx + jb L(CopyVecSizeExit) + jmp L(StrncpyExit) + +L(CopyVecSizeTail1Case2): + bsf %edx, %edx + cmp %r8d, %edx + jb L(CopyVecSizeExit) + jmp L(StrncpyExit) + +/* Case2 or Case3, Case3 */ + + .p2align 4 +L(CopyVecSizeCase2OrCase3): + test %rdx, %rdx + jnz L(CopyVecSizeCase2) +L(CopyVecSizeCase3): + add $VEC_SIZE, %r8 + add %rcx, %rdi + add %rcx, %rsi + jmp L(StrncpyExit) + + .p2align 4 +L(CopyTwoVecSizeCase2OrCase3): + test %rdx, %rdx + jnz L(CopyTwoVecSizeCase2) + add %rcx, %rsi + jmp L(StrncpyExit) + + .p2align 4 +L(CopyVecSizeTailCase2OrCase3): + test %rdx, %rdx + jnz L(CopyVecSizeTailCase2) + add %rcx, %rsi + jmp L(StrncpyExit) + + .p2align 4 +L(CopyTwoVecSize1Case2OrCase3): + add $VEC_SIZE, %rdi + add $VEC_SIZE, %rsi + sub $VEC_SIZE, %r8 +L(CopyVecSizeTail1Case2OrCase3): + test %rdx, %rdx + jnz L(CopyVecSizeTail1Case2) + jmp L(StrncpyExit) +# endif + +/*------------End labels regarding with copying 1-VEC_SIZE bytes--and 1-(VEC_SIZE*2) bytes----*/ + + .p2align 4 +L(Exit1): + movzwl (%rsi), %edx + mov %dx, (%rdi) +# ifdef USE_AS_STPCPY + lea 1(%rdi), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub $2, %r8 + lea 2(%rdi), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + + .p2align 4 +L(Exit2): + movzwl (%rsi), %ecx + mov %cx, (%rdi) + movb $0, 2(%rdi) +# ifdef USE_AS_STPCPY + lea 2(%rdi), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub $3, %r8 + lea 3(%rdi), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + + .p2align 4 +L(Exit3): + mov (%rsi), %edx + mov %edx, (%rdi) +# ifdef USE_AS_STPCPY + lea 3(%rdi), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub $4, %r8 + lea 4(%rdi), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + + .p2align 4 +L(Exit4_7): + mov (%rsi), %ecx + mov %ecx, (%rdi) + mov -3(%rsi, %rdx), %ecx + mov %ecx, -3(%rdi, %rdx) +# ifdef USE_AS_STPCPY + lea (%rdi, %rdx), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub %rdx, %r8 + sub $1, %r8 + lea 1(%rdi, %rdx), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + + .p2align 4 +L(Exit8_15): + mov (%rsi), %rcx + mov -7(%rsi, %rdx), %r9 + mov %rcx, (%rdi) + mov %r9, -7(%rdi, %rdx) +# ifdef USE_AS_STPCPY + lea (%rdi, %rdx), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub %rdx, %r8 + sub $1, %r8 + lea 1(%rdi, %rdx), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + + .p2align 4 +L(Exit16_31): + VMOVU (%rsi), %XMM2 + VMOVU -15(%rsi, %rdx), %XMM3 + VMOVU %XMM2, (%rdi) + VMOVU %XMM3, -15(%rdi, %rdx) +# ifdef USE_AS_STPCPY + lea (%rdi, %rdx), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub %rdx, %r8 + sub $1, %r8 + lea 1(%rdi, %rdx), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + + .p2align 4 +L(Exit32_63): + VMOVU (%rsi), %YMM2 + VMOVU -31(%rsi, %rdx), %YMM3 + VMOVU %YMM2, (%rdi) + VMOVU %YMM3, -31(%rdi, %rdx) +# ifdef USE_AS_STPCPY + lea (%rdi, %rdx), %rax +# endif +# if defined USE_AS_STRNCPY && !defined USE_AS_STRCAT + sub %rdx, %r8 + sub $1, %r8 + lea 1(%rdi, %rdx), %rdi + jnz L(StrncpyFillTailWithZero) +# endif + ret + +# ifdef USE_AS_STRNCPY + + .p2align 4 +L(StrncpyExit1): + movzbl (%rsi), %edx + mov %dl, (%rdi) +# ifdef USE_AS_STPCPY + lea 1(%rdi), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, 1(%rdi) +# endif + ret + + .p2align 4 +L(StrncpyExit2): + movzwl (%rsi), %edx + mov %dx, (%rdi) +# ifdef USE_AS_STPCPY + lea 2(%rdi), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, 2(%rdi) +# endif + ret + + .p2align 4 +L(StrncpyExit3_4): + movzwl (%rsi), %ecx + movzwl -2(%rsi, %r8), %edx + mov %cx, (%rdi) + mov %dx, -2(%rdi, %r8) +# ifdef USE_AS_STPCPY + lea (%rdi, %r8), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, (%rdi, %r8) +# endif + ret + + .p2align 4 +L(StrncpyExit5_8): + mov (%rsi), %ecx + mov -4(%rsi, %r8), %edx + mov %ecx, (%rdi) + mov %edx, -4(%rdi, %r8) +# ifdef USE_AS_STPCPY + lea (%rdi, %r8), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, (%rdi, %r8) +# endif + ret + + .p2align 4 +L(StrncpyExit9_16): + mov (%rsi), %rcx + mov -8(%rsi, %r8), %rdx + mov %rcx, (%rdi) + mov %rdx, -8(%rdi, %r8) +# ifdef USE_AS_STPCPY + lea (%rdi, %r8), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, (%rdi, %r8) +# endif + ret + + .p2align 4 +L(StrncpyExit17_32): + VMOVU (%rsi), %XMM2 + VMOVU -16(%rsi, %r8), %XMM3 + VMOVU %XMM2, (%rdi) + VMOVU %XMM3, -16(%rdi, %r8) +# ifdef USE_AS_STPCPY + lea (%rdi, %r8), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, (%rdi, %r8) +# endif + ret + + .p2align 4 +L(StrncpyExit33_64): + /* 0/32, 31/16 */ + VMOVU (%rsi), %YMM2 + VMOVU -VEC_SIZE(%rsi, %r8), %YMM3 + VMOVU %YMM2, (%rdi) + VMOVU %YMM3, -VEC_SIZE(%rdi, %r8) +# ifdef USE_AS_STPCPY + lea (%rdi, %r8), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, (%rdi, %r8) +# endif + ret + + .p2align 4 +L(StrncpyExit65): + /* 0/32, 32/32, 64/1 */ + VMOVU (%rsi), %YMM2 + VMOVU 32(%rsi), %YMM3 + mov 64(%rsi), %cl + VMOVU %YMM2, (%rdi) + VMOVU %YMM3, 32(%rdi) + mov %cl, 64(%rdi) +# ifdef USE_AS_STPCPY + lea 65(%rdi), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, 65(%rdi) +# endif + ret + +# ifndef USE_AS_STRCAT + + .p2align 4 +L(Fill1): + mov %dl, (%rdi) + ret + + .p2align 4 +L(Fill2): + mov %dx, (%rdi) + ret + + .p2align 4 +L(Fill3_4): + mov %dx, (%rdi) + mov %dx, -2(%rdi, %r8) + ret + + .p2align 4 +L(Fill5_8): + mov %edx, (%rdi) + mov %edx, -4(%rdi, %r8) + ret + + .p2align 4 +L(Fill9_16): + mov %rdx, (%rdi) + mov %rdx, -8(%rdi, %r8) + ret + + .p2align 4 +L(Fill17_32): + VMOVU %XMMZERO, (%rdi) + VMOVU %XMMZERO, -16(%rdi, %r8) + ret + + .p2align 4 +L(CopyVecSizeUnalignedVec2): + VMOVU %YMM2, (%rdi, %rcx) + + .p2align 4 +L(CopyVecSizeVecExit): + bsf %edx, %edx + add $(VEC_SIZE - 1), %r8 + add %rcx, %rdi +# ifdef USE_AS_STPCPY + lea (%rdi, %rdx), %rax +# endif + sub %rdx, %r8 + lea 1(%rdi, %rdx), %rdi + + .p2align 4 +L(StrncpyFillTailWithZero): + xor %edx, %edx + sub $VEC_SIZE, %r8 + jbe L(StrncpyFillExit) + + VMOVU %YMMZERO, (%rdi) + add $VEC_SIZE, %rdi + + mov %rdi, %rsi + and $(VEC_SIZE - 1), %esi + sub %rsi, %rdi + add %rsi, %r8 + sub $(VEC_SIZE * 4), %r8 + jb L(StrncpyFillLessFourVecSize) + +L(StrncpyFillLoopVmovdqa): + VMOVA %YMMZERO, (%rdi) + VMOVA %YMMZERO, VEC_SIZE(%rdi) + VMOVA %YMMZERO, (VEC_SIZE * 2)(%rdi) + VMOVA %YMMZERO, (VEC_SIZE * 3)(%rdi) + add $(VEC_SIZE * 4), %rdi + sub $(VEC_SIZE * 4), %r8 + jae L(StrncpyFillLoopVmovdqa) + +L(StrncpyFillLessFourVecSize): + add $(VEC_SIZE * 2), %r8 + jl L(StrncpyFillLessTwoVecSize) + VMOVA %YMMZERO, (%rdi) + VMOVA %YMMZERO, VEC_SIZE(%rdi) + add $(VEC_SIZE * 2), %rdi + sub $VEC_SIZE, %r8 + jl L(StrncpyFillExit) + VMOVA %YMMZERO, (%rdi) + add $VEC_SIZE, %rdi + jmp L(Fill) + + .p2align 4 +L(StrncpyFillLessTwoVecSize): + add $VEC_SIZE, %r8 + jl L(StrncpyFillExit) + VMOVA %YMMZERO, (%rdi) + add $VEC_SIZE, %rdi + jmp L(Fill) + + .p2align 4 +L(StrncpyFillExit): + add $VEC_SIZE, %r8 +L(Fill): + cmp $17, %r8d + jae L(Fill17_32) + cmp $9, %r8d + jae L(Fill9_16) + cmp $5, %r8d + jae L(Fill5_8) + cmp $3, %r8d + jae L(Fill3_4) + cmp $1, %r8d + ja L(Fill2) + je L(Fill1) + ret + +/* end of ifndef USE_AS_STRCAT */ +# endif + + .p2align 4 +L(UnalignedLeaveCase2OrCase3): + test %rdx, %rdx + jnz L(UnalignedFourVecSizeLeaveCase2) +L(UnalignedFourVecSizeLeaveCase3): + lea (VEC_SIZE * 4)(%r8), %rcx + and $-VEC_SIZE, %rcx + add $(VEC_SIZE * 3), %r8 + jl L(CopyVecSizeCase3) + VMOVU %YMM4, (%rdi) + sub $VEC_SIZE, %r8 + jb L(CopyVecSizeCase3) + VMOVU %YMM5, VEC_SIZE(%rdi) + sub $VEC_SIZE, %r8 + jb L(CopyVecSizeCase3) + VMOVU %YMM6, (VEC_SIZE * 2)(%rdi) + sub $VEC_SIZE, %r8 + jb L(CopyVecSizeCase3) + VMOVU %YMM7, (VEC_SIZE * 3)(%rdi) +# ifdef USE_AS_STPCPY + lea (VEC_SIZE * 4)(%rdi), %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, (VEC_SIZE * 4)(%rdi) +# endif + ret + + .p2align 4 +L(UnalignedFourVecSizeLeaveCase2): + xor %ecx, %ecx + vpcmpb $0, %YMM4, %YMMZERO, %k1 + kmovd %k1, %edx + add $(VEC_SIZE * 3), %r8 + jle L(CopyVecSizeCase2OrCase3) + test %edx, %edx +# ifndef USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec4) +# else + jnz L(CopyVecSize) +# endif + vpcmpb $0, %YMM5, %YMMZERO, %k2 + kmovd %k2, %edx + VMOVU %YMM4, (%rdi) + add $VEC_SIZE, %rcx + sub $VEC_SIZE, %r8 + jbe L(CopyVecSizeCase2OrCase3) + test %edx, %edx +# ifndef USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec5) +# else + jnz L(CopyVecSize) +# endif + + vpcmpb $0, %YMM6, %YMMZERO, %k3 + kmovd %k3, %edx + VMOVU %YMM5, VEC_SIZE(%rdi) + add $VEC_SIZE, %rcx + sub $VEC_SIZE, %r8 + jbe L(CopyVecSizeCase2OrCase3) + test %edx, %edx +# ifndef USE_AS_STRCAT + jnz L(CopyVecSizeUnalignedVec6) +# else + jnz L(CopyVecSize) +# endif + + vpcmpb $0, %YMM7, %YMMZERO, %k4 + kmovd %k4, %edx + VMOVU %YMM6, (VEC_SIZE * 2)(%rdi) + lea VEC_SIZE(%rdi, %rcx), %rdi + lea VEC_SIZE(%rsi, %rcx), %rsi + bsf %edx, %edx + cmp %r8d, %edx + jb L(CopyVecSizeExit) +L(StrncpyExit): + cmp $65, %r8d + je L(StrncpyExit65) + cmp $33, %r8d + jae L(StrncpyExit33_64) + cmp $17, %r8d + jae L(StrncpyExit17_32) + cmp $9, %r8d + jae L(StrncpyExit9_16) + cmp $5, %r8d + jae L(StrncpyExit5_8) + cmp $3, %r8d + jae L(StrncpyExit3_4) + cmp $1, %r8d + ja L(StrncpyExit2) + je L(StrncpyExit1) +# ifdef USE_AS_STPCPY + mov %rdi, %rax +# endif +# ifdef USE_AS_STRCAT + movb $0, (%rdi) +# endif + ret + + .p2align 4 +L(ExitZero): +# ifndef USE_AS_STRCAT + mov %rdi, %rax +# endif + ret + +# endif + +# ifndef USE_AS_STRCAT +END (STRCPY) +# else +END (STRCAT) +# endif +#endif diff --git a/sysdeps/x86_64/multiarch/strlen-avx2-rtm.S b/sysdeps/x86_64/multiarch/strlen-avx2-rtm.S new file mode 100644 index 000000000..75b4b7612 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strlen-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef STRLEN +# define STRLEN __strlen_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "strlen-avx2.S" diff --git a/sysdeps/x86_64/multiarch/strlen-avx2.S b/sysdeps/x86_64/multiarch/strlen-avx2.S index caa615970..b282a7561 100644 --- a/sysdeps/x86_64/multiarch/strlen-avx2.S +++ b/sysdeps/x86_64/multiarch/strlen-avx2.S @@ -27,370 +27,531 @@ # ifdef USE_AS_WCSLEN # define VPCMPEQ vpcmpeqd # define VPMINU vpminud +# define CHAR_SIZE 4 # else # define VPCMPEQ vpcmpeqb # define VPMINU vpminub +# define CHAR_SIZE 1 # endif # ifndef VZEROUPPER # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + # define VEC_SIZE 32 +# define PAGE_SIZE 4096 +# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE) - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (STRLEN) # ifdef USE_AS_STRNLEN - /* Check for zero length. */ + /* Check zero length. */ +# ifdef __ILP32__ + /* Clear upper bits. */ + and %RSI_LP, %RSI_LP +# else test %RSI_LP, %RSI_LP - jz L(zero) -# ifdef USE_AS_WCSLEN - shl $2, %RSI_LP -# elif defined __ILP32__ - /* Clear the upper 32 bits. */ - movl %esi, %esi # endif + jz L(zero) + /* Store max len in R8_LP before adjusting if using WCSLEN. */ mov %RSI_LP, %R8_LP # endif - movl %edi, %ecx + movl %edi, %eax movq %rdi, %rdx vpxor %xmm0, %xmm0, %xmm0 - + /* Clear high bits from edi. Only keeping bits relevant to page + cross check. */ + andl $(PAGE_SIZE - 1), %eax /* Check if we may cross page boundary with one vector load. */ - andl $(2 * VEC_SIZE - 1), %ecx - cmpl $VEC_SIZE, %ecx - ja L(cros_page_boundary) + cmpl $(PAGE_SIZE - VEC_SIZE), %eax + ja L(cross_page_boundary) /* Check the first VEC_SIZE bytes. */ - VPCMPEQ (%rdi), %ymm0, %ymm1 + VPCMPEQ (%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax - testl %eax, %eax - # ifdef USE_AS_STRNLEN - jnz L(first_vec_x0_check) - /* Adjust length and check the end of data. */ - subq $VEC_SIZE, %rsi - jbe L(max) -# else - jnz L(first_vec_x0) + /* If length < VEC_SIZE handle special. */ + cmpq $CHAR_PER_VEC, %rsi + jbe L(first_vec_x0) # endif - - /* Align data for aligned loads in the loop. */ - addq $VEC_SIZE, %rdi - andl $(VEC_SIZE - 1), %ecx - andq $-VEC_SIZE, %rdi + /* If empty continue to aligned_more. Otherwise return bit + position of first match. */ + testl %eax, %eax + jz L(aligned_more) + tzcntl %eax, %eax +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %eax +# endif + VZEROUPPER_RETURN # ifdef USE_AS_STRNLEN - /* Adjust length. */ - addq %rcx, %rsi +L(zero): + xorl %eax, %eax + ret - subq $(VEC_SIZE * 4), %rsi - jbe L(last_4x_vec_or_less) + .p2align 4 +L(first_vec_x0): + /* Set bit for max len so that tzcnt will return min of max len + and position of first match. */ +# ifdef USE_AS_WCSLEN + /* NB: Multiply length by 4 to get byte count. */ + sall $2, %esi +# endif + btsq %rsi, %rax + tzcntl %eax, %eax +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %eax +# endif + VZEROUPPER_RETURN # endif - jmp L(more_4x_vec) .p2align 4 -L(cros_page_boundary): - andl $(VEC_SIZE - 1), %ecx - andq $-VEC_SIZE, %rdi - VPCMPEQ (%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax - /* Remove the leading bytes. */ - sarl %cl, %eax - testl %eax, %eax - jz L(aligned_more) +L(first_vec_x1): tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ # ifdef USE_AS_STRNLEN - /* Check the end of data. */ - cmpq %rax, %rsi - jbe L(max) + /* Use ecx which was computed earlier to compute correct value. + */ +# ifdef USE_AS_WCSLEN + leal -(VEC_SIZE * 4 + 1)(%rax, %rcx, 4), %eax +# else + subl $(VEC_SIZE * 4 + 1), %ecx + addl %ecx, %eax +# endif +# else + subl %edx, %edi + incl %edi + addl %edi, %eax # endif - addq %rdi, %rax - addq %rcx, %rax - subq %rdx, %rax # ifdef USE_AS_WCSLEN - shrq $2, %rax + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %eax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 -L(aligned_more): +L(first_vec_x2): + tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ # ifdef USE_AS_STRNLEN - /* "rcx" is less than VEC_SIZE. Calculate "rdx + rcx - VEC_SIZE" - with "rdx - (VEC_SIZE - rcx)" instead of "(rdx + rcx) - VEC_SIZE" - to void possible addition overflow. */ - negq %rcx - addq $VEC_SIZE, %rcx - - /* Check the end of data. */ - subq %rcx, %rsi - jbe L(max) + /* Use ecx which was computed earlier to compute correct value. + */ +# ifdef USE_AS_WCSLEN + leal -(VEC_SIZE * 3 + 1)(%rax, %rcx, 4), %eax +# else + subl $(VEC_SIZE * 3 + 1), %ecx + addl %ecx, %eax +# endif +# else + subl %edx, %edi + addl $(VEC_SIZE + 1), %edi + addl %edi, %eax # endif +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %eax +# endif + VZEROUPPER_RETURN - addq $VEC_SIZE, %rdi + .p2align 4 +L(first_vec_x3): + tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ +# ifdef USE_AS_STRNLEN + /* Use ecx which was computed earlier to compute correct value. + */ +# ifdef USE_AS_WCSLEN + leal -(VEC_SIZE * 2 + 1)(%rax, %rcx, 4), %eax +# else + subl $(VEC_SIZE * 2 + 1), %ecx + addl %ecx, %eax +# endif +# else + subl %edx, %edi + addl $(VEC_SIZE * 2 + 1), %edi + addl %edi, %eax +# endif +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %eax +# endif + VZEROUPPER_RETURN + .p2align 4 +L(first_vec_x4): + tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ # ifdef USE_AS_STRNLEN - subq $(VEC_SIZE * 4), %rsi - jbe L(last_4x_vec_or_less) + /* Use ecx which was computed earlier to compute correct value. + */ +# ifdef USE_AS_WCSLEN + leal -(VEC_SIZE * 1 + 1)(%rax, %rcx, 4), %eax +# else + subl $(VEC_SIZE + 1), %ecx + addl %ecx, %eax +# endif +# else + subl %edx, %edi + addl $(VEC_SIZE * 3 + 1), %edi + addl %edi, %eax # endif +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %eax +# endif + VZEROUPPER_RETURN -L(more_4x_vec): + .p2align 5 +L(aligned_more): + /* Align data to VEC_SIZE - 1. This is the same number of + instructions as using andq with -VEC_SIZE but saves 4 bytes of + code on the x4 check. */ + orq $(VEC_SIZE - 1), %rdi +L(cross_page_continue): /* Check the first 4 * VEC_SIZE. Only one VEC_SIZE at a time since data is only aligned to VEC_SIZE. */ - VPCMPEQ (%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax - testl %eax, %eax - jnz L(first_vec_x0) - - VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1 +# ifdef USE_AS_STRNLEN + /* + 1 because rdi is aligned to VEC_SIZE - 1. + CHAR_SIZE + because it simplies the logic in last_4x_vec_or_less. */ + leaq (VEC_SIZE * 4 + CHAR_SIZE + 1)(%rdi), %rcx + subq %rdx, %rcx +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %ecx +# endif +# endif + /* Load first VEC regardless. */ + VPCMPEQ 1(%rdi), %ymm0, %ymm1 +# ifdef USE_AS_STRNLEN + /* Adjust length. If near end handle specially. */ + subq %rcx, %rsi + jb L(last_4x_vec_or_less) +# endif vpmovmskb %ymm1, %eax testl %eax, %eax jnz L(first_vec_x1) - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1 + VPCMPEQ (VEC_SIZE + 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax testl %eax, %eax jnz L(first_vec_x2) - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1 + VPCMPEQ (VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax testl %eax, %eax jnz L(first_vec_x3) - addq $(VEC_SIZE * 4), %rdi - -# ifdef USE_AS_STRNLEN - subq $(VEC_SIZE * 4), %rsi - jbe L(last_4x_vec_or_less) -# endif - - /* Align data to 4 * VEC_SIZE. */ - movq %rdi, %rcx - andl $(4 * VEC_SIZE - 1), %ecx - andq $-(4 * VEC_SIZE), %rdi + VPCMPEQ (VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax + testl %eax, %eax + jnz L(first_vec_x4) + /* Align data to VEC_SIZE * 4 - 1. */ # ifdef USE_AS_STRNLEN - /* Adjust length. */ + /* Before adjusting length check if at last VEC_SIZE * 4. */ + cmpq $(CHAR_PER_VEC * 4 - 1), %rsi + jbe L(last_4x_vec_or_less_load) + incq %rdi + movl %edi, %ecx + orq $(VEC_SIZE * 4 - 1), %rdi + andl $(VEC_SIZE * 4 - 1), %ecx +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %ecx +# endif + /* Readjust length. */ addq %rcx, %rsi +# else + incq %rdi + orq $(VEC_SIZE * 4 - 1), %rdi # endif - + /* Compare 4 * VEC at a time forward. */ .p2align 4 L(loop_4x_vec): - /* Compare 4 * VEC at a time forward. */ - vmovdqa (%rdi), %ymm1 - vmovdqa VEC_SIZE(%rdi), %ymm2 - vmovdqa (VEC_SIZE * 2)(%rdi), %ymm3 - vmovdqa (VEC_SIZE * 3)(%rdi), %ymm4 - VPMINU %ymm1, %ymm2, %ymm5 - VPMINU %ymm3, %ymm4, %ymm6 - VPMINU %ymm5, %ymm6, %ymm5 - +# ifdef USE_AS_STRNLEN + /* Break if at end of length. */ + subq $(CHAR_PER_VEC * 4), %rsi + jb L(last_4x_vec_or_less_cmpeq) +# endif + /* Save some code size by microfusing VPMINU with the load. + Since the matches in ymm2/ymm4 can only be returned if there + where no matches in ymm1/ymm3 respectively there is no issue + with overlap. */ + vmovdqa 1(%rdi), %ymm1 + VPMINU (VEC_SIZE + 1)(%rdi), %ymm1, %ymm2 + vmovdqa (VEC_SIZE * 2 + 1)(%rdi), %ymm3 + VPMINU (VEC_SIZE * 3 + 1)(%rdi), %ymm3, %ymm4 + + VPMINU %ymm2, %ymm4, %ymm5 VPCMPEQ %ymm5, %ymm0, %ymm5 - vpmovmskb %ymm5, %eax - testl %eax, %eax - jnz L(4x_vec_end) + vpmovmskb %ymm5, %ecx - addq $(VEC_SIZE * 4), %rdi + subq $-(VEC_SIZE * 4), %rdi + testl %ecx, %ecx + jz L(loop_4x_vec) -# ifndef USE_AS_STRNLEN - jmp L(loop_4x_vec) -# else - subq $(VEC_SIZE * 4), %rsi - ja L(loop_4x_vec) -L(last_4x_vec_or_less): - /* Less than 4 * VEC and aligned to VEC_SIZE. */ - addl $(VEC_SIZE * 2), %esi - jle L(last_2x_vec) - - VPCMPEQ (%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax - testl %eax, %eax - jnz L(first_vec_x0) - - VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1 + VPCMPEQ %ymm1, %ymm0, %ymm1 vpmovmskb %ymm1, %eax + subq %rdx, %rdi testl %eax, %eax - jnz L(first_vec_x1) + jnz L(last_vec_return_x0) - VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax + VPCMPEQ %ymm2, %ymm0, %ymm2 + vpmovmskb %ymm2, %eax testl %eax, %eax + jnz L(last_vec_return_x1) - jnz L(first_vec_x2_check) - subl $VEC_SIZE, %esi - jle L(max) + /* Combine last 2 VEC. */ + VPCMPEQ %ymm3, %ymm0, %ymm3 + vpmovmskb %ymm3, %eax + /* rcx has combined result from all 4 VEC. It will only be used + if the first 3 other VEC all did not contain a match. */ + salq $32, %rcx + orq %rcx, %rax + tzcntq %rax, %rax + subq $(VEC_SIZE * 2 - 1), %rdi + addq %rdi, %rax +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrq $2, %rax +# endif + VZEROUPPER_RETURN - VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1 - vpmovmskb %ymm1, %eax - testl %eax, %eax - jnz L(first_vec_x3_check) - movq %r8, %rax +# ifdef USE_AS_STRNLEN + .p2align 4 +L(last_4x_vec_or_less_load): + /* Depending on entry adjust rdi / prepare first VEC in ymm1. + */ + subq $-(VEC_SIZE * 4), %rdi +L(last_4x_vec_or_less_cmpeq): + VPCMPEQ 1(%rdi), %ymm0, %ymm1 +L(last_4x_vec_or_less): # ifdef USE_AS_WCSLEN - shrq $2, %rax + /* NB: Multiply length by 4 to get byte count. */ + sall $2, %esi # endif - VZEROUPPER - ret - - .p2align 4 -L(last_2x_vec): - addl $(VEC_SIZE * 2), %esi - VPCMPEQ (%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax + /* If remaining length > VEC_SIZE * 2. This works if esi is off + by VEC_SIZE * 4. */ + testl $(VEC_SIZE * 2), %esi + jnz L(last_4x_vec) + + /* length may have been negative or positive by an offset of + VEC_SIZE * 4 depending on where this was called from. This fixes + that. */ + andl $(VEC_SIZE * 4 - 1), %esi testl %eax, %eax + jnz L(last_vec_x1_check) - jnz L(first_vec_x0_check) subl $VEC_SIZE, %esi - jle L(max) + jb L(max) - VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1 + VPCMPEQ (VEC_SIZE + 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax - testl %eax, %eax - jnz L(first_vec_x1_check) - movq %r8, %rax -# ifdef USE_AS_WCSLEN - shrq $2, %rax -# endif - VZEROUPPER - ret - - .p2align 4 -L(first_vec_x0_check): tzcntl %eax, %eax /* Check the end of data. */ - cmpq %rax, %rsi - jbe L(max) + cmpl %eax, %esi + jb L(max) + subq %rdx, %rdi + addl $(VEC_SIZE + 1), %eax addq %rdi, %rax - subq %rdx, %rax # ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN +# endif .p2align 4 -L(first_vec_x1_check): +L(last_vec_return_x0): tzcntl %eax, %eax - /* Check the end of data. */ - cmpq %rax, %rsi - jbe L(max) - addq $VEC_SIZE, %rax + subq $(VEC_SIZE * 4 - 1), %rdi addq %rdi, %rax - subq %rdx, %rax -# ifdef USE_AS_WCSLEN +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax -# endif - VZEROUPPER - ret +# endif + VZEROUPPER_RETURN .p2align 4 -L(first_vec_x2_check): +L(last_vec_return_x1): tzcntl %eax, %eax - /* Check the end of data. */ - cmpq %rax, %rsi - jbe L(max) - addq $(VEC_SIZE * 2), %rax + subq $(VEC_SIZE * 3 - 1), %rdi addq %rdi, %rax - subq %rdx, %rax -# ifdef USE_AS_WCSLEN +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax -# endif - VZEROUPPER - ret +# endif + VZEROUPPER_RETURN +# ifdef USE_AS_STRNLEN .p2align 4 -L(first_vec_x3_check): +L(last_vec_x1_check): + tzcntl %eax, %eax /* Check the end of data. */ - cmpq %rax, %rsi - jbe L(max) - addq $(VEC_SIZE * 3), %rax + cmpl %eax, %esi + jb L(max) + subq %rdx, %rdi + incl %eax addq %rdi, %rax - subq %rdx, %rax # ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN - .p2align 4 L(max): movq %r8, %rax + VZEROUPPER_RETURN + + .p2align 4 +L(last_4x_vec): + /* Test first 2x VEC normally. */ + testl %eax, %eax + jnz L(last_vec_x1) + + VPCMPEQ (VEC_SIZE + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax + testl %eax, %eax + jnz L(last_vec_x2) + + /* Normalize length. */ + andl $(VEC_SIZE * 4 - 1), %esi + VPCMPEQ (VEC_SIZE * 2 + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax + testl %eax, %eax + jnz L(last_vec_x3) + + subl $(VEC_SIZE * 3), %esi + jb L(max) + + VPCMPEQ (VEC_SIZE * 3 + 1)(%rdi), %ymm0, %ymm1 + vpmovmskb %ymm1, %eax + tzcntl %eax, %eax + /* Check the end of data. */ + cmpl %eax, %esi + jb L(max) + subq %rdx, %rdi + addl $(VEC_SIZE * 3 + 1), %eax + addq %rdi, %rax # ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax # endif - VZEROUPPER - ret + VZEROUPPER_RETURN - .p2align 4 -L(zero): - xorl %eax, %eax - ret -# endif .p2align 4 -L(first_vec_x0): +L(last_vec_x1): + /* essentially duplicates of first_vec_x1 but use 64 bit + instructions. */ tzcntl %eax, %eax + subq %rdx, %rdi + incl %eax addq %rdi, %rax - subq %rdx, %rax -# ifdef USE_AS_WCSLEN +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax -# endif - VZEROUPPER - ret +# endif + VZEROUPPER_RETURN .p2align 4 -L(first_vec_x1): +L(last_vec_x2): + /* essentially duplicates of first_vec_x1 but use 64 bit + instructions. */ tzcntl %eax, %eax - addq $VEC_SIZE, %rax + subq %rdx, %rdi + addl $(VEC_SIZE + 1), %eax addq %rdi, %rax - subq %rdx, %rax -# ifdef USE_AS_WCSLEN +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax -# endif - VZEROUPPER - ret +# endif + VZEROUPPER_RETURN .p2align 4 -L(first_vec_x2): +L(last_vec_x3): tzcntl %eax, %eax - addq $(VEC_SIZE * 2), %rax + subl $(VEC_SIZE * 2), %esi + /* Check the end of data. */ + cmpl %eax, %esi + jb L(max_end) + subq %rdx, %rdi + addl $(VEC_SIZE * 2 + 1), %eax addq %rdi, %rax - subq %rdx, %rax -# ifdef USE_AS_WCSLEN +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ shrq $2, %rax +# endif + VZEROUPPER_RETURN +L(max_end): + movq %r8, %rax + VZEROUPPER_RETURN # endif - VZEROUPPER - ret + /* Cold case for crossing page with first load. */ .p2align 4 -L(4x_vec_end): - VPCMPEQ %ymm1, %ymm0, %ymm1 +L(cross_page_boundary): + /* Align data to VEC_SIZE - 1. */ + orq $(VEC_SIZE - 1), %rdi + VPCMPEQ -(VEC_SIZE - 1)(%rdi), %ymm0, %ymm1 vpmovmskb %ymm1, %eax + /* Remove the leading bytes. sarxl only uses bits [5:0] of COUNT + so no need to manually mod rdx. */ + sarxl %edx, %eax, %eax +# ifdef USE_AS_STRNLEN testl %eax, %eax - jnz L(first_vec_x0) - VPCMPEQ %ymm2, %ymm0, %ymm2 - vpmovmskb %ymm2, %eax - testl %eax, %eax - jnz L(first_vec_x1) - VPCMPEQ %ymm3, %ymm0, %ymm3 - vpmovmskb %ymm3, %eax + jnz L(cross_page_less_vec) + leaq 1(%rdi), %rcx + subq %rdx, %rcx +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get wchar_t count. */ + shrl $2, %ecx +# endif + /* Check length. */ + cmpq %rsi, %rcx + jb L(cross_page_continue) + movq %r8, %rax +# else testl %eax, %eax - jnz L(first_vec_x2) - VPCMPEQ %ymm4, %ymm0, %ymm4 - vpmovmskb %ymm4, %eax -L(first_vec_x3): + jz L(cross_page_continue) tzcntl %eax, %eax - addq $(VEC_SIZE * 3), %rax - addq %rdi, %rax - subq %rdx, %rax -# ifdef USE_AS_WCSLEN - shrq $2, %rax +# ifdef USE_AS_WCSLEN + /* NB: Divide length by 4 to get wchar_t count. */ + shrl $2, %eax +# endif +# endif +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN + +# ifdef USE_AS_STRNLEN + .p2align 4 +L(cross_page_less_vec): + tzcntl %eax, %eax +# ifdef USE_AS_WCSLEN + /* NB: Multiply length by 4 to get byte count. */ + sall $2, %esi +# endif + cmpq %rax, %rsi + cmovb %esi, %eax +# ifdef USE_AS_WCSLEN + shrl $2, %eax +# endif + VZEROUPPER_RETURN # endif - VZEROUPPER - ret END (STRLEN) #endif diff --git a/sysdeps/x86_64/multiarch/strlen-evex.S b/sysdeps/x86_64/multiarch/strlen-evex.S new file mode 100644 index 000000000..4bf6874b8 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strlen-evex.S @@ -0,0 +1,489 @@ +/* strlen/strnlen/wcslen/wcsnlen optimized with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# include + +# ifndef STRLEN +# define STRLEN __strlen_evex +# endif + +# define VMOVA vmovdqa64 + +# ifdef USE_AS_WCSLEN +# define VPCMP vpcmpd +# define VPMINU vpminud +# define SHIFT_REG ecx +# define CHAR_SIZE 4 +# else +# define VPCMP vpcmpb +# define VPMINU vpminub +# define SHIFT_REG edx +# define CHAR_SIZE 1 +# endif + +# define XMMZERO xmm16 +# define YMMZERO ymm16 +# define YMM1 ymm17 +# define YMM2 ymm18 +# define YMM3 ymm19 +# define YMM4 ymm20 +# define YMM5 ymm21 +# define YMM6 ymm22 + +# define VEC_SIZE 32 +# define PAGE_SIZE 4096 +# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE) + + .section .text.evex,"ax",@progbits +ENTRY (STRLEN) +# ifdef USE_AS_STRNLEN + /* Check zero length. */ + test %RSI_LP, %RSI_LP + jz L(zero) +# ifdef __ILP32__ + /* Clear the upper 32 bits. */ + movl %esi, %esi +# endif + mov %RSI_LP, %R8_LP +# endif + movl %edi, %eax + vpxorq %XMMZERO, %XMMZERO, %XMMZERO + /* Clear high bits from edi. Only keeping bits relevant to page + cross check. */ + andl $(PAGE_SIZE - 1), %eax + /* Check if we may cross page boundary with one vector load. */ + cmpl $(PAGE_SIZE - VEC_SIZE), %eax + ja L(cross_page_boundary) + + /* Check the first VEC_SIZE bytes. Each bit in K0 represents a + null byte. */ + VPCMP $0, (%rdi), %YMMZERO, %k0 + kmovd %k0, %eax +# ifdef USE_AS_STRNLEN + /* If length < CHAR_PER_VEC handle special. */ + cmpq $CHAR_PER_VEC, %rsi + jbe L(first_vec_x0) +# endif + testl %eax, %eax + jz L(aligned_more) + tzcntl %eax, %eax + ret +# ifdef USE_AS_STRNLEN +L(zero): + xorl %eax, %eax + ret + + .p2align 4 +L(first_vec_x0): + /* Set bit for max len so that tzcnt will return min of max len + and position of first match. */ + btsq %rsi, %rax + tzcntl %eax, %eax + ret +# endif + + .p2align 4 +L(first_vec_x1): + tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ +# ifdef USE_AS_STRNLEN + /* Use ecx which was computed earlier to compute correct value. + */ + leal -(CHAR_PER_VEC * 4 + 1)(%rcx, %rax), %eax +# else + subl %edx, %edi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %edi +# endif + leal CHAR_PER_VEC(%rdi, %rax), %eax +# endif + ret + + .p2align 4 +L(first_vec_x2): + tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ +# ifdef USE_AS_STRNLEN + /* Use ecx which was computed earlier to compute correct value. + */ + leal -(CHAR_PER_VEC * 3 + 1)(%rcx, %rax), %eax +# else + subl %edx, %edi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %edi +# endif + leal (CHAR_PER_VEC * 2)(%rdi, %rax), %eax +# endif + ret + + .p2align 4 +L(first_vec_x3): + tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ +# ifdef USE_AS_STRNLEN + /* Use ecx which was computed earlier to compute correct value. + */ + leal -(CHAR_PER_VEC * 2 + 1)(%rcx, %rax), %eax +# else + subl %edx, %edi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %edi +# endif + leal (CHAR_PER_VEC * 3)(%rdi, %rax), %eax +# endif + ret + + .p2align 4 +L(first_vec_x4): + tzcntl %eax, %eax + /* Safe to use 32 bit instructions as these are only called for + size = [1, 159]. */ +# ifdef USE_AS_STRNLEN + /* Use ecx which was computed earlier to compute correct value. + */ + leal -(CHAR_PER_VEC + 1)(%rcx, %rax), %eax +# else + subl %edx, %edi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %edi +# endif + leal (CHAR_PER_VEC * 4)(%rdi, %rax), %eax +# endif + ret + + .p2align 5 +L(aligned_more): + movq %rdi, %rdx + /* Align data to VEC_SIZE. */ + andq $-(VEC_SIZE), %rdi +L(cross_page_continue): + /* Check the first 4 * VEC_SIZE. Only one VEC_SIZE at a time + since data is only aligned to VEC_SIZE. */ +# ifdef USE_AS_STRNLEN + /* + CHAR_SIZE because it simplies the logic in + last_4x_vec_or_less. */ + leaq (VEC_SIZE * 5 + CHAR_SIZE)(%rdi), %rcx + subq %rdx, %rcx +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %ecx +# endif +# endif + /* Load first VEC regardless. */ + VPCMP $0, VEC_SIZE(%rdi), %YMMZERO, %k0 +# ifdef USE_AS_STRNLEN + /* Adjust length. If near end handle specially. */ + subq %rcx, %rsi + jb L(last_4x_vec_or_less) +# endif + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x1) + + VPCMP $0, (VEC_SIZE * 2)(%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + test %eax, %eax + jnz L(first_vec_x2) + + VPCMP $0, (VEC_SIZE * 3)(%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x3) + + VPCMP $0, (VEC_SIZE * 4)(%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(first_vec_x4) + + addq $VEC_SIZE, %rdi +# ifdef USE_AS_STRNLEN + /* Check if at last VEC_SIZE * 4 length. */ + cmpq $(CHAR_PER_VEC * 4 - 1), %rsi + jbe L(last_4x_vec_or_less_load) + movl %edi, %ecx + andl $(VEC_SIZE * 4 - 1), %ecx +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarl $2, %ecx +# endif + /* Readjust length. */ + addq %rcx, %rsi +# endif + /* Align data to VEC_SIZE * 4. */ + andq $-(VEC_SIZE * 4), %rdi + + /* Compare 4 * VEC at a time forward. */ + .p2align 4 +L(loop_4x_vec): + /* Load first VEC regardless. */ + VMOVA (VEC_SIZE * 4)(%rdi), %YMM1 +# ifdef USE_AS_STRNLEN + /* Break if at end of length. */ + subq $(CHAR_PER_VEC * 4), %rsi + jb L(last_4x_vec_or_less_cmpeq) +# endif + /* Save some code size by microfusing VPMINU with the load. Since + the matches in ymm2/ymm4 can only be returned if there where no + matches in ymm1/ymm3 respectively there is no issue with overlap. + */ + VPMINU (VEC_SIZE * 5)(%rdi), %YMM1, %YMM2 + VMOVA (VEC_SIZE * 6)(%rdi), %YMM3 + VPMINU (VEC_SIZE * 7)(%rdi), %YMM3, %YMM4 + + VPCMP $0, %YMM2, %YMMZERO, %k0 + VPCMP $0, %YMM4, %YMMZERO, %k1 + subq $-(VEC_SIZE * 4), %rdi + kortestd %k0, %k1 + jz L(loop_4x_vec) + + /* Check if end was in first half. */ + kmovd %k0, %eax + subq %rdx, %rdi +# ifdef USE_AS_WCSLEN + shrq $2, %rdi +# endif + testl %eax, %eax + jz L(second_vec_return) + + VPCMP $0, %YMM1, %YMMZERO, %k2 + kmovd %k2, %edx + /* Combine VEC1 matches (edx) with VEC2 matches (eax). */ +# ifdef USE_AS_WCSLEN + sall $CHAR_PER_VEC, %eax + orl %edx, %eax + tzcntl %eax, %eax +# else + salq $CHAR_PER_VEC, %rax + orq %rdx, %rax + tzcntq %rax, %rax +# endif + addq %rdi, %rax + ret + + +# ifdef USE_AS_STRNLEN + +L(last_4x_vec_or_less_load): + /* Depending on entry adjust rdi / prepare first VEC in YMM1. */ + VMOVA (VEC_SIZE * 4)(%rdi), %YMM1 +L(last_4x_vec_or_less_cmpeq): + VPCMP $0, %YMM1, %YMMZERO, %k0 + addq $(VEC_SIZE * 3), %rdi +L(last_4x_vec_or_less): + kmovd %k0, %eax + /* If remaining length > VEC_SIZE * 2. This works if esi is off by + VEC_SIZE * 4. */ + testl $(CHAR_PER_VEC * 2), %esi + jnz L(last_4x_vec) + + /* length may have been negative or positive by an offset of + CHAR_PER_VEC * 4 depending on where this was called from. This + fixes that. */ + andl $(CHAR_PER_VEC * 4 - 1), %esi + testl %eax, %eax + jnz L(last_vec_x1_check) + + /* Check the end of data. */ + subl $CHAR_PER_VEC, %esi + jb L(max) + + VPCMP $0, (VEC_SIZE * 2)(%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + tzcntl %eax, %eax + /* Check the end of data. */ + cmpl %eax, %esi + jb L(max) + + subq %rdx, %rdi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarq $2, %rdi +# endif + leaq (CHAR_PER_VEC * 2)(%rdi, %rax), %rax + ret +L(max): + movq %r8, %rax + ret +# endif + + /* Placed here in strnlen so that the jcc L(last_4x_vec_or_less) + in the 4x VEC loop can use 2 byte encoding. */ + .p2align 4 +L(second_vec_return): + VPCMP $0, %YMM3, %YMMZERO, %k0 + /* Combine YMM3 matches (k0) with YMM4 matches (k1). */ +# ifdef USE_AS_WCSLEN + kunpckbw %k0, %k1, %k0 + kmovd %k0, %eax + tzcntl %eax, %eax +# else + kunpckdq %k0, %k1, %k0 + kmovq %k0, %rax + tzcntq %rax, %rax +# endif + leaq (CHAR_PER_VEC * 2)(%rdi, %rax), %rax + ret + + +# ifdef USE_AS_STRNLEN +L(last_vec_x1_check): + tzcntl %eax, %eax + /* Check the end of data. */ + cmpl %eax, %esi + jb L(max) + subq %rdx, %rdi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarq $2, %rdi +# endif + leaq (CHAR_PER_VEC)(%rdi, %rax), %rax + ret + + .p2align 4 +L(last_4x_vec): + /* Test first 2x VEC normally. */ + testl %eax, %eax + jnz L(last_vec_x1) + + VPCMP $0, (VEC_SIZE * 2)(%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(last_vec_x2) + + /* Normalize length. */ + andl $(CHAR_PER_VEC * 4 - 1), %esi + VPCMP $0, (VEC_SIZE * 3)(%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + testl %eax, %eax + jnz L(last_vec_x3) + + /* Check the end of data. */ + subl $(CHAR_PER_VEC * 3), %esi + jb L(max) + + VPCMP $0, (VEC_SIZE * 4)(%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + tzcntl %eax, %eax + /* Check the end of data. */ + cmpl %eax, %esi + jb L(max_end) + + subq %rdx, %rdi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarq $2, %rdi +# endif + leaq (CHAR_PER_VEC * 4)(%rdi, %rax), %rax + ret + + .p2align 4 +L(last_vec_x1): + tzcntl %eax, %eax + subq %rdx, %rdi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarq $2, %rdi +# endif + leaq (CHAR_PER_VEC)(%rdi, %rax), %rax + ret + + .p2align 4 +L(last_vec_x2): + tzcntl %eax, %eax + subq %rdx, %rdi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarq $2, %rdi +# endif + leaq (CHAR_PER_VEC * 2)(%rdi, %rax), %rax + ret + + .p2align 4 +L(last_vec_x3): + tzcntl %eax, %eax + subl $(CHAR_PER_VEC * 2), %esi + /* Check the end of data. */ + cmpl %eax, %esi + jb L(max_end) + subq %rdx, %rdi +# ifdef USE_AS_WCSLEN + /* NB: Divide bytes by 4 to get the wchar_t count. */ + sarq $2, %rdi +# endif + leaq (CHAR_PER_VEC * 3)(%rdi, %rax), %rax + ret +L(max_end): + movq %r8, %rax + ret +# endif + + /* Cold case for crossing page with first load. */ + .p2align 4 +L(cross_page_boundary): + movq %rdi, %rdx + /* Align data to VEC_SIZE. */ + andq $-VEC_SIZE, %rdi + VPCMP $0, (%rdi), %YMMZERO, %k0 + kmovd %k0, %eax + /* Remove the leading bytes. */ +# ifdef USE_AS_WCSLEN + /* NB: Divide shift count by 4 since each bit in K0 represent 4 + bytes. */ + movl %edx, %ecx + shrl $2, %ecx + andl $(CHAR_PER_VEC - 1), %ecx +# endif + /* SHIFT_REG is ecx for USE_AS_WCSLEN and edx otherwise. */ + sarxl %SHIFT_REG, %eax, %eax + testl %eax, %eax +# ifndef USE_AS_STRNLEN + jz L(cross_page_continue) + tzcntl %eax, %eax + ret +# else + jnz L(cross_page_less_vec) +# ifndef USE_AS_WCSLEN + movl %edx, %ecx + andl $(CHAR_PER_VEC - 1), %ecx +# endif + movl $CHAR_PER_VEC, %eax + subl %ecx, %eax + /* Check the end of data. */ + cmpq %rax, %rsi + ja L(cross_page_continue) + movl %esi, %eax + ret +L(cross_page_less_vec): + tzcntl %eax, %eax + /* Select min of length and position of first null. */ + cmpq %rax, %rsi + cmovb %esi, %eax + ret +# endif + +END (STRLEN) +#endif diff --git a/sysdeps/x86_64/multiarch/strlen-sse2.S b/sysdeps/x86_64/multiarch/strlen-sse2.S index 65769f3c2..f10741c07 100644 --- a/sysdeps/x86_64/multiarch/strlen-sse2.S +++ b/sysdeps/x86_64/multiarch/strlen-sse2.S @@ -20,4 +20,4 @@ # define strlen __strlen_sse2 #endif -#include "../strlen.S" +#include "strlen-vec.S" diff --git a/sysdeps/x86_64/multiarch/strlen-vec.S b/sysdeps/x86_64/multiarch/strlen-vec.S new file mode 100644 index 000000000..439e486a4 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strlen-vec.S @@ -0,0 +1,270 @@ +/* SSE2 version of strlen and SSE4.1 version of wcslen. + Copyright (C) 2012-2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include + +#ifdef AS_WCSLEN +# define PMINU pminud +# define PCMPEQ pcmpeqd +# define SHIFT_RETURN shrq $2, %rax +#else +# define PMINU pminub +# define PCMPEQ pcmpeqb +# define SHIFT_RETURN +#endif + +/* Long lived register in strlen(s), strnlen(s, n) are: + + %xmm3 - zero + %rdi - s + %r10 (s+n) & (~(64-1)) + %r11 s+n +*/ + + +.text +ENTRY(strlen) + +/* Test 64 bytes from %rax for zero. Save result as bitmask in %rdx. */ +#define FIND_ZERO \ + PCMPEQ (%rax), %xmm0; \ + PCMPEQ 16(%rax), %xmm1; \ + PCMPEQ 32(%rax), %xmm2; \ + PCMPEQ 48(%rax), %xmm3; \ + pmovmskb %xmm0, %esi; \ + pmovmskb %xmm1, %edx; \ + pmovmskb %xmm2, %r8d; \ + pmovmskb %xmm3, %ecx; \ + salq $16, %rdx; \ + salq $16, %rcx; \ + orq %rsi, %rdx; \ + orq %r8, %rcx; \ + salq $32, %rcx; \ + orq %rcx, %rdx; + +#ifdef AS_STRNLEN +/* Do not read anything when n==0. */ + test %RSI_LP, %RSI_LP + jne L(n_nonzero) + xor %rax, %rax + ret +L(n_nonzero): +# ifdef AS_WCSLEN +/* Check for overflow from maxlen * sizeof(wchar_t). If it would + overflow the only way this program doesn't have undefined behavior + is if there is a null terminator in valid memory so wcslen will + suffice. */ + mov %RSI_LP, %R10_LP + sar $62, %R10_LP + test %R10_LP, %R10_LP + jnz __wcslen_sse4_1 + sal $2, %RSI_LP +# endif + + +/* Initialize long lived registers. */ + + add %RDI_LP, %RSI_LP +# ifdef AS_WCSLEN +/* Check for overflow again from s + maxlen * sizeof(wchar_t). */ + jbe __wcslen_sse4_1 +# endif + mov %RSI_LP, %R10_LP + and $-64, %R10_LP + mov %RSI_LP, %R11_LP +#endif + + pxor %xmm0, %xmm0 + pxor %xmm1, %xmm1 + pxor %xmm2, %xmm2 + pxor %xmm3, %xmm3 + movq %rdi, %rax + movq %rdi, %rcx + andq $4095, %rcx +/* Offsets 4032-4047 will be aligned into 4032 thus fit into page. */ + cmpq $4047, %rcx +/* We cannot unify this branching as it would be ~6 cycles slower. */ + ja L(cross_page) + +#ifdef AS_STRNLEN +/* Test if end is among first 64 bytes. */ +# define STRNLEN_PROLOG \ + mov %r11, %rsi; \ + subq %rax, %rsi; \ + andq $-64, %rax; \ + testq $-64, %rsi; \ + je L(strnlen_ret) +#else +# define STRNLEN_PROLOG andq $-64, %rax; +#endif + +/* Ignore bits in mask that come before start of string. */ +#define PROLOG(lab) \ + movq %rdi, %rcx; \ + xorq %rax, %rcx; \ + STRNLEN_PROLOG; \ + sarq %cl, %rdx; \ + test %rdx, %rdx; \ + je L(lab); \ + bsfq %rdx, %rax; \ + SHIFT_RETURN; \ + ret + +#ifdef AS_STRNLEN + andq $-16, %rax + FIND_ZERO +#else + /* Test first 16 bytes unaligned. */ + movdqu (%rax), %xmm4 + PCMPEQ %xmm0, %xmm4 + pmovmskb %xmm4, %edx + test %edx, %edx + je L(next48_bytes) + bsf %edx, %eax /* If eax is zeroed 16bit bsf can be used. */ + SHIFT_RETURN + ret + +L(next48_bytes): +/* Same as FIND_ZERO except we do not check first 16 bytes. */ + andq $-16, %rax + PCMPEQ 16(%rax), %xmm1 + PCMPEQ 32(%rax), %xmm2 + PCMPEQ 48(%rax), %xmm3 + pmovmskb %xmm1, %edx + pmovmskb %xmm2, %r8d + pmovmskb %xmm3, %ecx + salq $16, %rdx + salq $16, %rcx + orq %r8, %rcx + salq $32, %rcx + orq %rcx, %rdx +#endif + + /* When no zero byte is found xmm1-3 are zero so we do not have to + zero them. */ + PROLOG(loop) + + .p2align 4 +L(cross_page): + andq $-64, %rax + FIND_ZERO + PROLOG(loop_init) + +#ifdef AS_STRNLEN +/* We must do this check to correctly handle strnlen (s, -1). */ +L(strnlen_ret): + bts %rsi, %rdx + sarq %cl, %rdx + test %rdx, %rdx + je L(loop_init) + bsfq %rdx, %rax + SHIFT_RETURN + ret +#endif + .p2align 4 +L(loop_init): + pxor %xmm1, %xmm1 + pxor %xmm2, %xmm2 + pxor %xmm3, %xmm3 +#ifdef AS_STRNLEN + .p2align 4 +L(loop): + + addq $64, %rax + cmpq %rax, %r10 + je L(exit_end) + + movdqa (%rax), %xmm0 + PMINU 16(%rax), %xmm0 + PMINU 32(%rax), %xmm0 + PMINU 48(%rax), %xmm0 + PCMPEQ %xmm3, %xmm0 + pmovmskb %xmm0, %edx + testl %edx, %edx + jne L(exit) + jmp L(loop) + + .p2align 4 +L(exit_end): + cmp %rax, %r11 + je L(first) /* Do not read when end is at page boundary. */ + pxor %xmm0, %xmm0 + FIND_ZERO + +L(first): + bts %r11, %rdx + bsfq %rdx, %rdx + addq %rdx, %rax + subq %rdi, %rax + SHIFT_RETURN + ret + + .p2align 4 +L(exit): + pxor %xmm0, %xmm0 + FIND_ZERO + + bsfq %rdx, %rdx + addq %rdx, %rax + subq %rdi, %rax + SHIFT_RETURN + ret + +#else + + /* Main loop. Unrolled twice to improve L2 cache performance on core2. */ + .p2align 4 +L(loop): + + movdqa 64(%rax), %xmm0 + PMINU 80(%rax), %xmm0 + PMINU 96(%rax), %xmm0 + PMINU 112(%rax), %xmm0 + PCMPEQ %xmm3, %xmm0 + pmovmskb %xmm0, %edx + testl %edx, %edx + jne L(exit64) + + subq $-128, %rax + + movdqa (%rax), %xmm0 + PMINU 16(%rax), %xmm0 + PMINU 32(%rax), %xmm0 + PMINU 48(%rax), %xmm0 + PCMPEQ %xmm3, %xmm0 + pmovmskb %xmm0, %edx + testl %edx, %edx + jne L(exit0) + jmp L(loop) + + .p2align 4 +L(exit64): + addq $64, %rax +L(exit0): + pxor %xmm0, %xmm0 + FIND_ZERO + + bsfq %rdx, %rdx + addq %rdx, %rax + subq %rdi, %rax + SHIFT_RETURN + ret + +#endif + +END(strlen) diff --git a/sysdeps/x86_64/multiarch/strncat-avx2-rtm.S b/sysdeps/x86_64/multiarch/strncat-avx2-rtm.S new file mode 100644 index 000000000..0dcea18db --- /dev/null +++ b/sysdeps/x86_64/multiarch/strncat-avx2-rtm.S @@ -0,0 +1,3 @@ +#define USE_AS_STRNCAT +#define STRCAT __strncat_avx2_rtm +#include "strcat-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/strncat-evex.S b/sysdeps/x86_64/multiarch/strncat-evex.S new file mode 100644 index 000000000..8884f0237 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strncat-evex.S @@ -0,0 +1,3 @@ +#define USE_AS_STRNCAT +#define STRCAT __strncat_evex +#include "strcat-evex.S" diff --git a/sysdeps/x86_64/multiarch/strncmp-avx2-rtm.S b/sysdeps/x86_64/multiarch/strncmp-avx2-rtm.S new file mode 100644 index 000000000..68bad365b --- /dev/null +++ b/sysdeps/x86_64/multiarch/strncmp-avx2-rtm.S @@ -0,0 +1,4 @@ +#define STRCMP __strncmp_avx2_rtm +#define USE_AS_STRNCMP 1 +#define OVERFLOW_STRCMP __strcmp_avx2_rtm +#include "strcmp-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/strncmp-avx2.S b/sysdeps/x86_64/multiarch/strncmp-avx2.S index 1678bcc23..f138e9f1f 100644 --- a/sysdeps/x86_64/multiarch/strncmp-avx2.S +++ b/sysdeps/x86_64/multiarch/strncmp-avx2.S @@ -1,3 +1,4 @@ #define STRCMP __strncmp_avx2 #define USE_AS_STRNCMP 1 +#define OVERFLOW_STRCMP __strcmp_avx2 #include "strcmp-avx2.S" diff --git a/sysdeps/x86_64/multiarch/strncmp-evex.S b/sysdeps/x86_64/multiarch/strncmp-evex.S new file mode 100644 index 000000000..a1d53e8c9 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strncmp-evex.S @@ -0,0 +1,3 @@ +#define STRCMP __strncmp_evex +#define USE_AS_STRNCMP 1 +#include "strcmp-evex.S" diff --git a/sysdeps/x86_64/multiarch/strncmp.c b/sysdeps/x86_64/multiarch/strncmp.c index a565626ae..60ba0fe35 100644 --- a/sysdeps/x86_64/multiarch/strncmp.c +++ b/sysdeps/x86_64/multiarch/strncmp.c @@ -30,16 +30,29 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (ssse3) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (sse42) attribute_hidden; extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_rtm) attribute_hidden; +extern __typeof (REDIRECT_NAME) OPTIMIZE (evex) attribute_hidden; static inline void * IFUNC_SELECTOR (void) { const struct cpu_features* cpu_features = __get_cpu_features (); - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) + if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) - return OPTIMIZE (avx2); + { + if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) + && CPU_FEATURE_USABLE_P (cpu_features, BMI2) + && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_AVX2_STRCMP)) + return OPTIMIZE (evex); + + if (CPU_FEATURE_USABLE_P (cpu_features, RTM)) + return OPTIMIZE (avx2_rtm); + + if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)) + return OPTIMIZE (avx2); + } if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_2) && !CPU_FEATURES_ARCH_P (cpu_features, Slow_SSE4_2)) diff --git a/sysdeps/x86_64/multiarch/strncpy-avx2-rtm.S b/sysdeps/x86_64/multiarch/strncpy-avx2-rtm.S new file mode 100644 index 000000000..79e708329 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strncpy-avx2-rtm.S @@ -0,0 +1,3 @@ +#define USE_AS_STRNCPY +#define STRCPY __strncpy_avx2_rtm +#include "strcpy-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/strncpy-evex.S b/sysdeps/x86_64/multiarch/strncpy-evex.S new file mode 100644 index 000000000..40e391f0d --- /dev/null +++ b/sysdeps/x86_64/multiarch/strncpy-evex.S @@ -0,0 +1,3 @@ +#define USE_AS_STRNCPY +#define STRCPY __strncpy_evex +#include "strcpy-evex.S" diff --git a/sysdeps/x86_64/multiarch/strnlen-avx2-rtm.S b/sysdeps/x86_64/multiarch/strnlen-avx2-rtm.S new file mode 100644 index 000000000..04f1626a5 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strnlen-avx2-rtm.S @@ -0,0 +1,4 @@ +#define STRLEN __strnlen_avx2_rtm +#define USE_AS_STRNLEN 1 + +#include "strlen-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/strnlen-evex.S b/sysdeps/x86_64/multiarch/strnlen-evex.S new file mode 100644 index 000000000..722022f30 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strnlen-evex.S @@ -0,0 +1,4 @@ +#define STRLEN __strnlen_evex +#define USE_AS_STRNLEN 1 + +#include "strlen-evex.S" diff --git a/sysdeps/x86_64/multiarch/strrchr-avx2-rtm.S b/sysdeps/x86_64/multiarch/strrchr-avx2-rtm.S new file mode 100644 index 000000000..5def14ec1 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strrchr-avx2-rtm.S @@ -0,0 +1,12 @@ +#ifndef STRRCHR +# define STRRCHR __strrchr_avx2_rtm +#endif + +#define ZERO_UPPER_VEC_REGISTERS_RETURN \ + ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST + +#define VZEROUPPER_RETURN jmp L(return_vzeroupper) + +#define SECTION(p) p##.avx.rtm + +#include "strrchr-avx2.S" diff --git a/sysdeps/x86_64/multiarch/strrchr-avx2.S b/sysdeps/x86_64/multiarch/strrchr-avx2.S index 53ea44530..0deba9711 100644 --- a/sysdeps/x86_64/multiarch/strrchr-avx2.S +++ b/sysdeps/x86_64/multiarch/strrchr-avx2.S @@ -36,9 +36,13 @@ # define VZEROUPPER vzeroupper # endif +# ifndef SECTION +# define SECTION(p) p##.avx +# endif + # define VEC_SIZE 32 - .section .text.avx,"ax",@progbits + .section SECTION(.text),"ax",@progbits ENTRY (STRRCHR) movd %esi, %xmm4 movl %edi, %ecx @@ -166,8 +170,8 @@ L(return_value): # endif bsrl %eax, %eax leaq -VEC_SIZE(%rdi, %rax), %rax - VZEROUPPER - ret +L(return_vzeroupper): + ZERO_UPPER_VEC_REGISTERS_RETURN .p2align 4 L(match): @@ -198,8 +202,7 @@ L(find_nul): jz L(return_value) bsrl %eax, %eax leaq -VEC_SIZE(%rdi, %rax), %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(char_and_nul): @@ -222,14 +225,12 @@ L(char_and_nul_in_first_vec): jz L(return_null) bsrl %eax, %eax leaq -VEC_SIZE(%rdi, %rax), %rax - VZEROUPPER - ret + VZEROUPPER_RETURN .p2align 4 L(return_null): xorl %eax, %eax - VZEROUPPER - ret + VZEROUPPER_RETURN END (STRRCHR) #endif diff --git a/sysdeps/x86_64/multiarch/strrchr-evex.S b/sysdeps/x86_64/multiarch/strrchr-evex.S new file mode 100644 index 000000000..f920b5a58 --- /dev/null +++ b/sysdeps/x86_64/multiarch/strrchr-evex.S @@ -0,0 +1,265 @@ +/* strrchr/wcsrchr optimized with 256-bit EVEX instructions. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#if IS_IN (libc) + +# include + +# ifndef STRRCHR +# define STRRCHR __strrchr_evex +# endif + +# define VMOVU vmovdqu64 +# define VMOVA vmovdqa64 + +# ifdef USE_AS_WCSRCHR +# define VPBROADCAST vpbroadcastd +# define VPCMP vpcmpd +# define SHIFT_REG r8d +# else +# define VPBROADCAST vpbroadcastb +# define VPCMP vpcmpb +# define SHIFT_REG ecx +# endif + +# define XMMZERO xmm16 +# define YMMZERO ymm16 +# define YMMMATCH ymm17 +# define YMM1 ymm18 + +# define VEC_SIZE 32 + + .section .text.evex,"ax",@progbits +ENTRY (STRRCHR) + movl %edi, %ecx + /* Broadcast CHAR to YMMMATCH. */ + VPBROADCAST %esi, %YMMMATCH + + vpxorq %XMMZERO, %XMMZERO, %XMMZERO + + /* Check if we may cross page boundary with one vector load. */ + andl $(2 * VEC_SIZE - 1), %ecx + cmpl $VEC_SIZE, %ecx + ja L(cros_page_boundary) + + VMOVU (%rdi), %YMM1 + + /* Each bit in K0 represents a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM1, %k0 + /* Each bit in K1 represents a CHAR in YMM1. */ + VPCMP $0, %YMMMATCH, %YMM1, %k1 + kmovd %k0, %ecx + kmovd %k1, %eax + + addq $VEC_SIZE, %rdi + + testl %eax, %eax + jnz L(first_vec) + + testl %ecx, %ecx + jnz L(return_null) + + andq $-VEC_SIZE, %rdi + xorl %edx, %edx + jmp L(aligned_loop) + + .p2align 4 +L(first_vec): + /* Check if there is a null byte. */ + testl %ecx, %ecx + jnz L(char_and_nul_in_first_vec) + + /* Remember the match and keep searching. */ + movl %eax, %edx + movq %rdi, %rsi + andq $-VEC_SIZE, %rdi + jmp L(aligned_loop) + + .p2align 4 +L(cros_page_boundary): + andl $(VEC_SIZE - 1), %ecx + andq $-VEC_SIZE, %rdi + +# ifdef USE_AS_WCSRCHR + /* NB: Divide shift count by 4 since each bit in K1 represent 4 + bytes. */ + movl %ecx, %SHIFT_REG + sarl $2, %SHIFT_REG +# endif + + VMOVA (%rdi), %YMM1 + + /* Each bit in K0 represents a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM1, %k0 + /* Each bit in K1 represents a CHAR in YMM1. */ + VPCMP $0, %YMMMATCH, %YMM1, %k1 + kmovd %k0, %edx + kmovd %k1, %eax + + shrxl %SHIFT_REG, %edx, %edx + shrxl %SHIFT_REG, %eax, %eax + addq $VEC_SIZE, %rdi + + /* Check if there is a CHAR. */ + testl %eax, %eax + jnz L(found_char) + + testl %edx, %edx + jnz L(return_null) + + jmp L(aligned_loop) + + .p2align 4 +L(found_char): + testl %edx, %edx + jnz L(char_and_nul) + + /* Remember the match and keep searching. */ + movl %eax, %edx + leaq (%rdi, %rcx), %rsi + + .p2align 4 +L(aligned_loop): + VMOVA (%rdi), %YMM1 + addq $VEC_SIZE, %rdi + + /* Each bit in K0 represents a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM1, %k0 + /* Each bit in K1 represents a CHAR in YMM1. */ + VPCMP $0, %YMMMATCH, %YMM1, %k1 + kmovd %k0, %ecx + kmovd %k1, %eax + orl %eax, %ecx + jnz L(char_nor_null) + + VMOVA (%rdi), %YMM1 + add $VEC_SIZE, %rdi + + /* Each bit in K0 represents a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM1, %k0 + /* Each bit in K1 represents a CHAR in YMM1. */ + VPCMP $0, %YMMMATCH, %YMM1, %k1 + kmovd %k0, %ecx + kmovd %k1, %eax + orl %eax, %ecx + jnz L(char_nor_null) + + VMOVA (%rdi), %YMM1 + addq $VEC_SIZE, %rdi + + /* Each bit in K0 represents a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM1, %k0 + /* Each bit in K1 represents a CHAR in YMM1. */ + VPCMP $0, %YMMMATCH, %YMM1, %k1 + kmovd %k0, %ecx + kmovd %k1, %eax + orl %eax, %ecx + jnz L(char_nor_null) + + VMOVA (%rdi), %YMM1 + addq $VEC_SIZE, %rdi + + /* Each bit in K0 represents a null byte in YMM1. */ + VPCMP $0, %YMMZERO, %YMM1, %k0 + /* Each bit in K1 represents a CHAR in YMM1. */ + VPCMP $0, %YMMMATCH, %YMM1, %k1 + kmovd %k0, %ecx + kmovd %k1, %eax + orl %eax, %ecx + jz L(aligned_loop) + + .p2align 4 +L(char_nor_null): + /* Find a CHAR or a null byte in a loop. */ + testl %eax, %eax + jnz L(match) +L(return_value): + testl %edx, %edx + jz L(return_null) + movl %edx, %eax + movq %rsi, %rdi + bsrl %eax, %eax +# ifdef USE_AS_WCSRCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq -VEC_SIZE(%rdi, %rax, 4), %rax +# else + leaq -VEC_SIZE(%rdi, %rax), %rax +# endif + ret + + .p2align 4 +L(match): + /* Find a CHAR. Check if there is a null byte. */ + kmovd %k0, %ecx + testl %ecx, %ecx + jnz L(find_nul) + + /* Remember the match and keep searching. */ + movl %eax, %edx + movq %rdi, %rsi + jmp L(aligned_loop) + + .p2align 4 +L(find_nul): + /* Mask out any matching bits after the null byte. */ + movl %ecx, %r8d + subl $1, %r8d + xorl %ecx, %r8d + andl %r8d, %eax + testl %eax, %eax + /* If there is no CHAR here, return the remembered one. */ + jz L(return_value) + bsrl %eax, %eax +# ifdef USE_AS_WCSRCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq -VEC_SIZE(%rdi, %rax, 4), %rax +# else + leaq -VEC_SIZE(%rdi, %rax), %rax +# endif + ret + + .p2align 4 +L(char_and_nul): + /* Find both a CHAR and a null byte. */ + addq %rcx, %rdi + movl %edx, %ecx +L(char_and_nul_in_first_vec): + /* Mask out any matching bits after the null byte. */ + movl %ecx, %r8d + subl $1, %r8d + xorl %ecx, %r8d + andl %r8d, %eax + testl %eax, %eax + /* Return null pointer if the null byte comes first. */ + jz L(return_null) + bsrl %eax, %eax +# ifdef USE_AS_WCSRCHR + /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ + leaq -VEC_SIZE(%rdi, %rax, 4), %rax +# else + leaq -VEC_SIZE(%rdi, %rax), %rax +# endif + ret + + .p2align 4 +L(return_null): + xorl %eax, %eax + ret + +END (STRRCHR) +#endif diff --git a/sysdeps/x86_64/multiarch/wcschr-avx2-rtm.S b/sysdeps/x86_64/multiarch/wcschr-avx2-rtm.S new file mode 100644 index 000000000..d49dbbf0b --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcschr-avx2-rtm.S @@ -0,0 +1,3 @@ +#define STRCHR __wcschr_avx2_rtm +#define USE_AS_WCSCHR 1 +#include "strchr-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wcschr-evex.S b/sysdeps/x86_64/multiarch/wcschr-evex.S new file mode 100644 index 000000000..7cb8f1e41 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcschr-evex.S @@ -0,0 +1,3 @@ +#define STRCHR __wcschr_evex +#define USE_AS_WCSCHR 1 +#include "strchr-evex.S" diff --git a/sysdeps/x86_64/multiarch/wcscmp-avx2-rtm.S b/sysdeps/x86_64/multiarch/wcscmp-avx2-rtm.S new file mode 100644 index 000000000..d6ca2b806 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcscmp-avx2-rtm.S @@ -0,0 +1,4 @@ +#define STRCMP __wcscmp_avx2_rtm +#define USE_AS_WCSCMP 1 + +#include "strcmp-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wcscmp-evex.S b/sysdeps/x86_64/multiarch/wcscmp-evex.S new file mode 100644 index 000000000..42e73e51e --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcscmp-evex.S @@ -0,0 +1,4 @@ +#define STRCMP __wcscmp_evex +#define USE_AS_WCSCMP 1 + +#include "strcmp-evex.S" diff --git a/sysdeps/x86_64/multiarch/wcslen-avx2-rtm.S b/sysdeps/x86_64/multiarch/wcslen-avx2-rtm.S new file mode 100644 index 000000000..35658d736 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcslen-avx2-rtm.S @@ -0,0 +1,4 @@ +#define STRLEN __wcslen_avx2_rtm +#define USE_AS_WCSLEN 1 + +#include "strlen-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wcslen-evex.S b/sysdeps/x86_64/multiarch/wcslen-evex.S new file mode 100644 index 000000000..bdafa83bd --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcslen-evex.S @@ -0,0 +1,4 @@ +#define STRLEN __wcslen_evex +#define USE_AS_WCSLEN 1 + +#include "strlen-evex.S" diff --git a/sysdeps/x86_64/multiarch/wcslen-sse4_1.S b/sysdeps/x86_64/multiarch/wcslen-sse4_1.S new file mode 100644 index 000000000..7e62621af --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcslen-sse4_1.S @@ -0,0 +1,4 @@ +#define AS_WCSLEN +#define strlen __wcslen_sse4_1 + +#include "strlen-vec.S" diff --git a/sysdeps/x86_64/multiarch/wcslen.c b/sysdeps/x86_64/multiarch/wcslen.c index f89bed42a..3032061d3 100644 --- a/sysdeps/x86_64/multiarch/wcslen.c +++ b/sysdeps/x86_64/multiarch/wcslen.c @@ -24,7 +24,7 @@ # undef __wcslen # define SYMBOL_NAME wcslen -# include "ifunc-avx2.h" +# include "ifunc-wcslen.h" libc_ifunc_redirected (__redirect_wcslen, __wcslen, IFUNC_SELECTOR ()); weak_alias (__wcslen, wcslen); diff --git a/sysdeps/x86_64/multiarch/wcsncmp-avx2-rtm.S b/sysdeps/x86_64/multiarch/wcsncmp-avx2-rtm.S new file mode 100644 index 000000000..f467582cb --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcsncmp-avx2-rtm.S @@ -0,0 +1,5 @@ +#define STRCMP __wcsncmp_avx2_rtm +#define USE_AS_STRNCMP 1 +#define USE_AS_WCSCMP 1 +#define OVERFLOW_STRCMP __wcscmp_avx2_rtm +#include "strcmp-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wcsncmp-avx2.S b/sysdeps/x86_64/multiarch/wcsncmp-avx2.S index 4fa1de4d3..e9ede522b 100644 --- a/sysdeps/x86_64/multiarch/wcsncmp-avx2.S +++ b/sysdeps/x86_64/multiarch/wcsncmp-avx2.S @@ -1,5 +1,5 @@ #define STRCMP __wcsncmp_avx2 #define USE_AS_STRNCMP 1 #define USE_AS_WCSCMP 1 - +#define OVERFLOW_STRCMP __wcscmp_avx2 #include "strcmp-avx2.S" diff --git a/sysdeps/x86_64/multiarch/wcsncmp-evex.S b/sysdeps/x86_64/multiarch/wcsncmp-evex.S new file mode 100644 index 000000000..8a8e31071 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcsncmp-evex.S @@ -0,0 +1,5 @@ +#define STRCMP __wcsncmp_evex +#define USE_AS_STRNCMP 1 +#define USE_AS_WCSCMP 1 + +#include "strcmp-evex.S" diff --git a/sysdeps/x86_64/multiarch/wcsnlen-avx2-rtm.S b/sysdeps/x86_64/multiarch/wcsnlen-avx2-rtm.S new file mode 100644 index 000000000..7437ebee2 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcsnlen-avx2-rtm.S @@ -0,0 +1,5 @@ +#define STRLEN __wcsnlen_avx2_rtm +#define USE_AS_WCSLEN 1 +#define USE_AS_STRNLEN 1 + +#include "strlen-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wcsnlen-evex.S b/sysdeps/x86_64/multiarch/wcsnlen-evex.S new file mode 100644 index 000000000..24773bb4e --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcsnlen-evex.S @@ -0,0 +1,5 @@ +#define STRLEN __wcsnlen_evex +#define USE_AS_WCSLEN 1 +#define USE_AS_STRNLEN 1 + +#include "strlen-evex.S" diff --git a/sysdeps/x86_64/multiarch/wcsnlen-sse4_1.S b/sysdeps/x86_64/multiarch/wcsnlen-sse4_1.S index a8cab0cb0..5fa51fe07 100644 --- a/sysdeps/x86_64/multiarch/wcsnlen-sse4_1.S +++ b/sysdeps/x86_64/multiarch/wcsnlen-sse4_1.S @@ -2,4 +2,4 @@ #define AS_STRNLEN #define strlen __wcsnlen_sse4_1 -#include "../strlen.S" +#include "strlen-vec.S" diff --git a/sysdeps/x86_64/multiarch/wcsnlen.c b/sysdeps/x86_64/multiarch/wcsnlen.c index 81b1a221f..2963fbe05 100644 --- a/sysdeps/x86_64/multiarch/wcsnlen.c +++ b/sysdeps/x86_64/multiarch/wcsnlen.c @@ -24,27 +24,7 @@ # undef __wcsnlen # define SYMBOL_NAME wcsnlen -# include - -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (sse4_1) attribute_hidden; -extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden; - -static inline void * -IFUNC_SELECTOR (void) -{ - const struct cpu_features* cpu_features = __get_cpu_features (); - - if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER) - && CPU_FEATURE_USABLE_P (cpu_features, AVX2) - && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) - return OPTIMIZE (avx2); - - if (CPU_FEATURE_USABLE_P (cpu_features, SSE4_1)) - return OPTIMIZE (sse4_1); - - return OPTIMIZE (sse2); -} +# include "ifunc-wcslen.h" libc_ifunc_redirected (__redirect_wcsnlen, __wcsnlen, IFUNC_SELECTOR ()); weak_alias (__wcsnlen, wcsnlen); diff --git a/sysdeps/x86_64/multiarch/wcsrchr-avx2-rtm.S b/sysdeps/x86_64/multiarch/wcsrchr-avx2-rtm.S new file mode 100644 index 000000000..9bf760833 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcsrchr-avx2-rtm.S @@ -0,0 +1,3 @@ +#define STRRCHR __wcsrchr_avx2_rtm +#define USE_AS_WCSRCHR 1 +#include "strrchr-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wcsrchr-evex.S b/sysdeps/x86_64/multiarch/wcsrchr-evex.S new file mode 100644 index 000000000..c64602f7d --- /dev/null +++ b/sysdeps/x86_64/multiarch/wcsrchr-evex.S @@ -0,0 +1,3 @@ +#define STRRCHR __wcsrchr_evex +#define USE_AS_WCSRCHR 1 +#include "strrchr-evex.S" diff --git a/sysdeps/x86_64/multiarch/wmemchr-avx2-rtm.S b/sysdeps/x86_64/multiarch/wmemchr-avx2-rtm.S new file mode 100644 index 000000000..58ed21db0 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wmemchr-avx2-rtm.S @@ -0,0 +1,4 @@ +#define MEMCHR __wmemchr_avx2_rtm +#define USE_AS_WMEMCHR 1 + +#include "memchr-avx2-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wmemchr-evex.S b/sysdeps/x86_64/multiarch/wmemchr-evex.S new file mode 100644 index 000000000..06cd0f9f5 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wmemchr-evex.S @@ -0,0 +1,4 @@ +#define MEMCHR __wmemchr_evex +#define USE_AS_WMEMCHR 1 + +#include "memchr-evex.S" diff --git a/sysdeps/x86_64/multiarch/wmemcmp-avx2-movbe-rtm.S b/sysdeps/x86_64/multiarch/wmemcmp-avx2-movbe-rtm.S new file mode 100644 index 000000000..31104d121 --- /dev/null +++ b/sysdeps/x86_64/multiarch/wmemcmp-avx2-movbe-rtm.S @@ -0,0 +1,4 @@ +#define MEMCMP __wmemcmp_avx2_movbe_rtm +#define USE_AS_WMEMCMP 1 + +#include "memcmp-avx2-movbe-rtm.S" diff --git a/sysdeps/x86_64/multiarch/wmemcmp-evex-movbe.S b/sysdeps/x86_64/multiarch/wmemcmp-evex-movbe.S new file mode 100644 index 000000000..4726d74aa --- /dev/null +++ b/sysdeps/x86_64/multiarch/wmemcmp-evex-movbe.S @@ -0,0 +1,4 @@ +#define MEMCMP __wmemcmp_evex_movbe +#define USE_AS_WMEMCMP 1 + +#include "memcmp-evex-movbe.S" diff --git a/sysdeps/x86_64/strlen.S b/sysdeps/x86_64/strlen.S index d223ea170..8422c15cc 100644 --- a/sysdeps/x86_64/strlen.S +++ b/sysdeps/x86_64/strlen.S @@ -1,5 +1,5 @@ -/* SSE2 version of strlen/wcslen. - Copyright (C) 2012-2021 Free Software Foundation, Inc. +/* SSE2 version of strlen. + Copyright (C) 2021 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -16,243 +16,6 @@ License along with the GNU C Library; if not, see . */ -#include +#include "multiarch/strlen-vec.S" -#ifdef AS_WCSLEN -# define PMINU pminud -# define PCMPEQ pcmpeqd -# define SHIFT_RETURN shrq $2, %rax -#else -# define PMINU pminub -# define PCMPEQ pcmpeqb -# define SHIFT_RETURN -#endif - -/* Long lived register in strlen(s), strnlen(s, n) are: - - %xmm3 - zero - %rdi - s - %r10 (s+n) & (~(64-1)) - %r11 s+n -*/ - - -.text -ENTRY(strlen) - -/* Test 64 bytes from %rax for zero. Save result as bitmask in %rdx. */ -#define FIND_ZERO \ - PCMPEQ (%rax), %xmm0; \ - PCMPEQ 16(%rax), %xmm1; \ - PCMPEQ 32(%rax), %xmm2; \ - PCMPEQ 48(%rax), %xmm3; \ - pmovmskb %xmm0, %esi; \ - pmovmskb %xmm1, %edx; \ - pmovmskb %xmm2, %r8d; \ - pmovmskb %xmm3, %ecx; \ - salq $16, %rdx; \ - salq $16, %rcx; \ - orq %rsi, %rdx; \ - orq %r8, %rcx; \ - salq $32, %rcx; \ - orq %rcx, %rdx; - -#ifdef AS_STRNLEN -/* Do not read anything when n==0. */ - test %RSI_LP, %RSI_LP - jne L(n_nonzero) - xor %rax, %rax - ret -L(n_nonzero): -# ifdef AS_WCSLEN - shl $2, %RSI_LP -# endif - -/* Initialize long lived registers. */ - - add %RDI_LP, %RSI_LP - mov %RSI_LP, %R10_LP - and $-64, %R10_LP - mov %RSI_LP, %R11_LP -#endif - - pxor %xmm0, %xmm0 - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 - movq %rdi, %rax - movq %rdi, %rcx - andq $4095, %rcx -/* Offsets 4032-4047 will be aligned into 4032 thus fit into page. */ - cmpq $4047, %rcx -/* We cannot unify this branching as it would be ~6 cycles slower. */ - ja L(cross_page) - -#ifdef AS_STRNLEN -/* Test if end is among first 64 bytes. */ -# define STRNLEN_PROLOG \ - mov %r11, %rsi; \ - subq %rax, %rsi; \ - andq $-64, %rax; \ - testq $-64, %rsi; \ - je L(strnlen_ret) -#else -# define STRNLEN_PROLOG andq $-64, %rax; -#endif - -/* Ignore bits in mask that come before start of string. */ -#define PROLOG(lab) \ - movq %rdi, %rcx; \ - xorq %rax, %rcx; \ - STRNLEN_PROLOG; \ - sarq %cl, %rdx; \ - test %rdx, %rdx; \ - je L(lab); \ - bsfq %rdx, %rax; \ - SHIFT_RETURN; \ - ret - -#ifdef AS_STRNLEN - andq $-16, %rax - FIND_ZERO -#else - /* Test first 16 bytes unaligned. */ - movdqu (%rax), %xmm4 - PCMPEQ %xmm0, %xmm4 - pmovmskb %xmm4, %edx - test %edx, %edx - je L(next48_bytes) - bsf %edx, %eax /* If eax is zeroed 16bit bsf can be used. */ - SHIFT_RETURN - ret - -L(next48_bytes): -/* Same as FIND_ZERO except we do not check first 16 bytes. */ - andq $-16, %rax - PCMPEQ 16(%rax), %xmm1 - PCMPEQ 32(%rax), %xmm2 - PCMPEQ 48(%rax), %xmm3 - pmovmskb %xmm1, %edx - pmovmskb %xmm2, %r8d - pmovmskb %xmm3, %ecx - salq $16, %rdx - salq $16, %rcx - orq %r8, %rcx - salq $32, %rcx - orq %rcx, %rdx -#endif - - /* When no zero byte is found xmm1-3 are zero so we do not have to - zero them. */ - PROLOG(loop) - - .p2align 4 -L(cross_page): - andq $-64, %rax - FIND_ZERO - PROLOG(loop_init) - -#ifdef AS_STRNLEN -/* We must do this check to correctly handle strnlen (s, -1). */ -L(strnlen_ret): - bts %rsi, %rdx - sarq %cl, %rdx - test %rdx, %rdx - je L(loop_init) - bsfq %rdx, %rax - SHIFT_RETURN - ret -#endif - .p2align 4 -L(loop_init): - pxor %xmm1, %xmm1 - pxor %xmm2, %xmm2 - pxor %xmm3, %xmm3 -#ifdef AS_STRNLEN - .p2align 4 -L(loop): - - addq $64, %rax - cmpq %rax, %r10 - je L(exit_end) - - movdqa (%rax), %xmm0 - PMINU 16(%rax), %xmm0 - PMINU 32(%rax), %xmm0 - PMINU 48(%rax), %xmm0 - PCMPEQ %xmm3, %xmm0 - pmovmskb %xmm0, %edx - testl %edx, %edx - jne L(exit) - jmp L(loop) - - .p2align 4 -L(exit_end): - cmp %rax, %r11 - je L(first) /* Do not read when end is at page boundary. */ - pxor %xmm0, %xmm0 - FIND_ZERO - -L(first): - bts %r11, %rdx - bsfq %rdx, %rdx - addq %rdx, %rax - subq %rdi, %rax - SHIFT_RETURN - ret - - .p2align 4 -L(exit): - pxor %xmm0, %xmm0 - FIND_ZERO - - bsfq %rdx, %rdx - addq %rdx, %rax - subq %rdi, %rax - SHIFT_RETURN - ret - -#else - - /* Main loop. Unrolled twice to improve L2 cache performance on core2. */ - .p2align 4 -L(loop): - - movdqa 64(%rax), %xmm0 - PMINU 80(%rax), %xmm0 - PMINU 96(%rax), %xmm0 - PMINU 112(%rax), %xmm0 - PCMPEQ %xmm3, %xmm0 - pmovmskb %xmm0, %edx - testl %edx, %edx - jne L(exit64) - - subq $-128, %rax - - movdqa (%rax), %xmm0 - PMINU 16(%rax), %xmm0 - PMINU 32(%rax), %xmm0 - PMINU 48(%rax), %xmm0 - PCMPEQ %xmm3, %xmm0 - pmovmskb %xmm0, %edx - testl %edx, %edx - jne L(exit0) - jmp L(loop) - - .p2align 4 -L(exit64): - addq $64, %rax -L(exit0): - pxor %xmm0, %xmm0 - FIND_ZERO - - bsfq %rdx, %rdx - addq %rdx, %rax - subq %rdi, %rax - SHIFT_RETURN - ret - -#endif - -END(strlen) libc_hidden_builtin_def (strlen) diff --git a/sysdeps/x86_64/sysdep.h b/sysdeps/x86_64/sysdep.h index d07b8f0aa..7bebdeb21 100644 --- a/sysdeps/x86_64/sysdep.h +++ b/sysdeps/x86_64/sysdep.h @@ -95,6 +95,28 @@ lose: \ #define R14_LP r14 #define R15_LP r15 +/* Zero upper vector registers and return with xtest. NB: Use VZEROALL + to avoid RTM abort triggered by VZEROUPPER inside transactionally. */ +#define ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST \ + xtest; \ + jz 1f; \ + vzeroall; \ + ret; \ +1: \ + vzeroupper; \ + ret + +/* Zero upper vector registers and return. */ +#ifndef ZERO_UPPER_VEC_REGISTERS_RETURN +# define ZERO_UPPER_VEC_REGISTERS_RETURN \ + VZEROUPPER; \ + ret +#endif + +#ifndef VZEROUPPER_RETURN +# define VZEROUPPER_RETURN VZEROUPPER; ret +#endif + #else /* __ASSEMBLER__ */ /* Long and pointer size in bytes. */ diff --git a/sysdeps/x86_64/tst-rsi-strlen.c b/sysdeps/x86_64/tst-rsi-strlen.c new file mode 100644 index 000000000..a80c4f85c --- /dev/null +++ b/sysdeps/x86_64/tst-rsi-strlen.c @@ -0,0 +1,81 @@ +/* Test strlen with 0 in the RSI register. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#ifdef WIDE +# define TEST_NAME "wcslen" +#else +# define TEST_NAME "strlen" +#endif /* WIDE */ + +#define TEST_MAIN +#include + +#ifdef WIDE +# include +# define STRLEN wcslen +# define CHAR wchar_t +#else +# define STRLEN strlen +# define CHAR char +#endif /* WIDE */ + +IMPL (STRLEN, 1) + +typedef size_t (*proto_t) (const CHAR *); + +typedef struct +{ + void (*fn) (void); +} parameter_t; + +size_t +__attribute__ ((weak, noinline, noclone)) +do_strlen (parameter_t *a, int zero, const CHAR *str) +{ + return CALL (a, str); +} + +static int +test_main (void) +{ + test_init (); + + size_t size = page_size / sizeof (CHAR) - 1; + CHAR *buf = (CHAR *) buf2; + buf[size] = 0; + + parameter_t a; + + int ret = 0; + FOR_EACH_IMPL (impl, 0) + { + a.fn = impl->fn; + /* NB: Pass 0 in RSI. */ + size_t res = do_strlen (&a, 0, buf); + if (res != size) + { + error (0, 0, "Wrong result in function %s: %zu != %zu", + impl->name, res, size); + ret = 1; + } + } + + return ret ? EXIT_FAILURE : EXIT_SUCCESS; +} + +#include diff --git a/sysdeps/x86_64/tst-rsi-wcslen.c b/sysdeps/x86_64/tst-rsi-wcslen.c new file mode 100644 index 000000000..f45a7dfb5 --- /dev/null +++ b/sysdeps/x86_64/tst-rsi-wcslen.c @@ -0,0 +1,20 @@ +/* Test wcslen with 0 in the RSI register. + Copyright (C) 2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#define WIDE 1 +#include "tst-rsi-strlen.c" -- 2.30.2