bug-envz1 tst-strxfrm2 tst-endian tst-svc2 \
tst-strtok_r bug-strcoll2 tst-cmp tst-xbzero-opt \
test-endian-types test-endian-file-scope \
- test-endian-sign-conversion
+ test-endian-sign-conversion tst-memmove-overflow
# This test allocates a lot of memory and can run for a long time.
xtests = tst-strcoll-overflow
--- /dev/null
+/* Test for signed comparision bug in memmove (bug 25620).
+ Copyright (C) 2020 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* This test shifts a memory region which is a bit larger than 2 GiB
+ by one byte. In order to make it more likely that the memory
+ allocation succeeds on 32-bit systems, most of the allocation
+ consists of shared pages. Only a portion at the start and end of
+ the allocation are unshared, and contain a specific non-repeating
+ bit pattern. */
+
+#include <array_length.h>
+#include <libc-diag.h>
+#include <stdint.h>
+#include <string.h>
+#include <support/blob_repeat.h>
+#include <support/check.h>
+#include <support/xunistd.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#define TEST_MAIN
+#define TEST_NAME "memmove"
+#include "test-string.h"
+#include <support/test-driver.h>
+
+IMPL (memmove, 1)
+
+/* Size of the part of the allocation which is not shared, at the
+ start and the end of the overall allocation. 4 MiB. */
+static const size_t unshared_size = 4U << 20;
+
+/* The allocation is 2 GiB plus 8 MiB. This should work with all page
+ sizes that occur in practice. */
+static const size_t allocation_size = (2U << 30) + 2 * unshared_size;
+
+/* Compute the expected byte at the given index. This is used to
+ produce a non-repeating pattern. */
+static inline unsigned char
+expected_value (size_t index)
+{
+ uint32_t randomized = 0x9e3779b9 * index; /* Based on golden ratio. */
+ return randomized >> 25; /* Result is in the range [0, 127]. */
+}
+
+static int
+test_main (void)
+{
+ test_init ();
+
+ FOR_EACH_IMPL (impl, 0)
+ {
+ printf ("info: testing %s\n", impl->name);
+
+ /* Check that the allocation sizes are multiples of the page
+ size. */
+ TEST_COMPARE (allocation_size % xsysconf (_SC_PAGESIZE), 0);
+ TEST_COMPARE (unshared_size % xsysconf (_SC_PAGESIZE), 0);
+
+ /* The repeating pattern has the MSB set in all bytes. */
+ unsigned char repeating_pattern[128];
+ for (unsigned int i = 0; i < array_length (repeating_pattern); ++i)
+ repeating_pattern[i] = 0x80 | i;
+
+ struct support_blob_repeat repeat
+ = support_blob_repeat_allocate_shared (repeating_pattern,
+ sizeof (repeating_pattern),
+ (allocation_size
+ / sizeof (repeating_pattern)));
+ if (repeat.start == NULL)
+ FAIL_UNSUPPORTED ("repeated blob allocation failed: %m");
+ TEST_COMPARE (repeat.size, allocation_size);
+
+ /* Unshared the start and the end of the allocation. */
+ unsigned char *start = repeat.start;
+ xmmap (start, unshared_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1);
+ xmmap (start + allocation_size - unshared_size, unshared_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1);
+
+ /* Initialize the non-repeating pattern. */
+ for (size_t i = 0; i < unshared_size; ++i)
+ start[i] = expected_value (i);
+ for (size_t i = allocation_size - unshared_size; i < allocation_size;
+ ++i)
+ start[i] = expected_value (i);
+
+ /* Make sure that there was really no sharing. */
+ asm volatile ("" ::: "memory");
+ for (size_t i = 0; i < unshared_size; ++i)
+ TEST_COMPARE (start[i], expected_value (i));
+ for (size_t i = allocation_size - unshared_size; i < allocation_size;
+ ++i)
+ TEST_COMPARE (start[i], expected_value (i));
+
+ /* Used for a nicer error diagnostic using
+ TEST_COMPARE_BLOB. */
+ unsigned char expected_start[128];
+ memcpy (expected_start, start + 1, sizeof (expected_start));
+ unsigned char expected_end[128];
+ memcpy (expected_end,
+ start + allocation_size - sizeof (expected_end),
+ sizeof (expected_end));
+
+ /* Move the entire allocation forward by one byte. */
+ DIAG_PUSH_NEEDS_COMMENT;
+#if __GNUC_PREREQ (8, 0)
+ /* GCC 8 warns about string function argument overflows. */
+ DIAG_IGNORE_NEEDS_COMMENT (8, "-Warray-bounds");
+ DIAG_IGNORE_NEEDS_COMMENT (8, "-Wstringop-overflow");
+#endif
+ memmove (start, start + 1, allocation_size - 1);
+ DIAG_POP_NEEDS_COMMENT;
+
+ /* Check that the unshared of the memory region have been
+ shifted as expected. The TEST_COMPARE_BLOB checks are
+ redundant, but produce nicer diagnostics. */
+ asm volatile ("" ::: "memory");
+ TEST_COMPARE_BLOB (expected_start, sizeof (expected_start),
+ start, sizeof (expected_start));
+ TEST_COMPARE_BLOB (expected_end, sizeof (expected_end),
+ start + allocation_size - sizeof (expected_end) - 1,
+ sizeof (expected_end));
+ for (size_t i = 0; i < unshared_size - 1; ++i)
+ TEST_COMPARE (start[i], expected_value (i + 1));
+ /* The gap between the checked start and end area of the mapping
+ has shared mappings at unspecified boundaries, so do not
+ check the expected values in the middle. */
+ for (size_t i = allocation_size - unshared_size; i < allocation_size - 1;
+ ++i)
+ TEST_COMPARE (start[i], expected_value (i + 1));
+
+ support_blob_repeat_free (&repeat);
+ }
+
+ return 0;
+}
+
+#include <support/test-driver.c>
}
/* Allocations larger than maximum_small_size potentially use mmap
- with alias mappings. */
+ with alias mappings. If SHARED, the alias mappings are created
+ using MAP_SHARED instead of MAP_PRIVATE. */
static struct support_blob_repeat
allocate_big (size_t total_size, const void *element, size_t element_size,
- size_t count)
+ size_t count, bool shared)
{
unsigned long page_size = xsysconf (_SC_PAGESIZE);
size_t stride_size = minimum_stride_size (page_size, element_size);
{
size_t remaining_size = total_size;
char *current = target;
- int flags = MAP_FIXED | MAP_FILE | MAP_PRIVATE;
+ int flags = MAP_FIXED | MAP_FILE;
+ if (shared)
+ flags |= MAP_SHARED;
+ else
+ flags |= MAP_PRIVATE;
#ifdef MAP_NORESERVE
flags |= MAP_NORESERVE;
#endif
}
struct support_blob_repeat
-support_blob_repeat_allocate (const void *element, size_t element_size,
- size_t count)
+repeat_allocate (const void *element, size_t element_size,
+ size_t count, bool shared)
{
size_t total_size;
if (check_mul_overflow_size_t (element_size, count, &total_size))
if (total_size <= maximum_small_size)
return allocate_malloc (total_size, element, element_size, count);
else
- return allocate_big (total_size, element, element_size, count);
+ return allocate_big (total_size, element, element_size, count, shared);
+}
+
+struct support_blob_repeat
+support_blob_repeat_allocate (const void *element, size_t element_size,
+ size_t count)
+{
+ return repeat_allocate (element, element_size, count, false);
+}
+
+struct support_blob_repeat
+support_blob_repeat_allocate_shared (const void *element, size_t element_size,
+ size_t count)
+{
+ return repeat_allocate (element, element_size, count, true);
}
void
size_t element_size,
size_t count);
-/* Deallocate the blob created by support_blob_repeat_allocate. */
+/* Like support_blob_repeat_allocate, except that copy-on-write
+ semantics are disabled. This means writing to one part of the blob
+ can affect other parts. It is possible to map non-shared memory
+ over parts of the resulting blob using MAP_ANONYMOUS | MAP_FIXED
+ | MAP_PRIVATE, so that writes to these parts do not affect
+ others. */
+struct support_blob_repeat support_blob_repeat_allocate_shared
+ (const void *element, size_t element_size, size_t count);
+
+/* Deallocate the blob created by support_blob_repeat_allocate or
+ support_blob_repeat_allocate_shared. */
void support_blob_repeat_free (struct support_blob_repeat *);
#endif /* SUPPORT_BLOB_REPEAT_H */
<http://www.gnu.org/licenses/>. */
#include <stdio.h>
+#include <string.h>
#include <support/blob_repeat.h>
#include <support/check.h>
}
support_blob_repeat_free (&repeat);
- repeat = support_blob_repeat_allocate ("012345678", 9, 10 * 1000 * 1000);
- if (repeat.start == NULL)
- puts ("warning: not enough memory for large mapping");
- else
+ for (int do_shared = 0; do_shared < 2; ++do_shared)
{
- unsigned char *p = repeat.start;
- for (int i = 0; i < 10 * 1000 * 1000; ++i)
- for (int j = 0; j <= 8; ++j)
- if (p[i * 9 + j] != '0' + j)
- {
- printf ("error: element %d index %d\n", i, j);
- TEST_COMPARE (p[i * 9 + j], '0' + j);
- }
+ if (do_shared)
+ repeat = support_blob_repeat_allocate_shared ("012345678", 9,
+ 10 * 1000 * 1000);
+ else
+ repeat = support_blob_repeat_allocate ("012345678", 9,
+ 10 * 1000 * 1000);
+ if (repeat.start == NULL)
+ puts ("warning: not enough memory for large mapping");
+ else
+ {
+ unsigned char *p = repeat.start;
+ for (int i = 0; i < 10 * 1000 * 1000; ++i)
+ for (int j = 0; j <= 8; ++j)
+ if (p[i * 9 + j] != '0' + j)
+ {
+ printf ("error: element %d index %d\n", i, j);
+ TEST_COMPARE (p[i * 9 + j], '0' + j);
+ }
+
+ enum { total_size = 9 * 10 * 1000 * 1000 };
+ p[total_size - 1] = '\0';
+ asm ("" ::: "memory");
+ if (do_shared)
+ /* The write is repeated in multiple places earlier in the
+ string due to page sharing. */
+ TEST_VERIFY (strlen (repeat.start) < total_size - 1);
+ else
+ TEST_COMPARE (strlen (repeat.start), total_size - 1);
+ }
+ support_blob_repeat_free (&repeat);
}
- support_blob_repeat_free (&repeat);
return 0;
}
mov dst, dstin /* Preserve dstin, we need to return it. */
cmp count, #64
- bge .Lcpy_not_short
+ bhs .Lcpy_not_short
/* Deal with small copies quickly by dropping straight into the
exit block. */
1:
subs tmp2, count, #64 /* Use tmp2 for count. */
- blt .Ltail63aligned
+ blo .Ltail63aligned
cmp tmp2, #512
- bge .Lcpy_body_long
+ bhs .Lcpy_body_long
.Lcpy_body_medium: /* Count in tmp2. */
#ifdef USE_VFP
add src, src, #64
vstr d1, [dst, #56]
add dst, dst, #64
- bge 1b
+ bhs 1b
tst tmp2, #0x3f
beq .Ldone
ldrd A_l, A_h, [src, #64]!
strd A_l, A_h, [dst, #64]!
subs tmp2, tmp2, #64
- bge 1b
+ bhs 1b
tst tmp2, #0x3f
bne 1f
ldr tmp2,[sp], #FRAME_SIZE
add src, src, #32
subs tmp2, tmp2, #prefetch_lines * 64 * 2
- blt 2f
+ blo 2f
1:
cpy_line_vfp d3, 0
cpy_line_vfp d4, 64
add dst, dst, #2 * 64
add src, src, #2 * 64
subs tmp2, tmp2, #prefetch_lines * 64
- bge 1b
+ bhs 1b
2:
cpy_tail_vfp d3, 0
1:
pld [src, #(3 * 64)]
subs count, count, #64
- ldrmi tmp2, [sp], #FRAME_SIZE
- bmi .Ltail63unaligned
+ ldrlo tmp2, [sp], #FRAME_SIZE
+ blo .Ltail63unaligned
pld [src, #(4 * 64)]
#ifdef USE_NEON
neon_load_multi d0-d3, src
neon_load_multi d4-d7, src
subs count, count, #64
- bmi 2f
+ blo 2f
1:
pld [src, #(4 * 64)]
neon_store_multi d0-d3, dst
neon_store_multi d4-d7, dst
neon_load_multi d4-d7, src
subs count, count, #64
- bpl 1b
+ bhs 1b
2:
neon_store_multi d0-d3, dst
neon_store_multi d4-d7, dst
cfi_remember_state
subs r2, r2, #4
- blt 8f
+ blo 8f
ands ip, r0, #3
PLD( pld [r1, #0] )
bne 9f
cfi_rel_offset (r6, 4)
cfi_rel_offset (r7, 8)
cfi_rel_offset (r8, 12)
- blt 5f
+ blo 5f
CALGN( ands ip, r1, #31 )
CALGN( rsb r3, ip, #32 )
#endif
PLD( pld [r1, #0] )
-2: PLD( subs r2, r2, #96 )
+2: PLD( cmp r2, #96 )
PLD( pld [r1, #28] )
- PLD( blt 4f )
+ PLD( blo 4f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
subs r2, r2, #32
stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
- bge 3b
- PLD( cmn r2, #96 )
- PLD( bge 4b )
+ bhs 3b
5: ands ip, r2, #28
rsb ip, ip, #32
strbge r4, [r0], #1
subs r2, r2, ip
strb lr, [r0], #1
- blt 8b
+ blo 8b
ands ip, r1, #3
beq 1b
.macro forward_copy_shift pull push
subs r2, r2, #28
- blt 14f
+ blo 14f
CALGN( ands ip, r1, #31 )
CALGN( rsb ip, ip, #32 )
cfi_rel_offset (r10, 16)
PLD( pld [r1, #0] )
- PLD( subs r2, r2, #96 )
+ PLD( cmp r2, #96 )
PLD( pld [r1, #28] )
- PLD( blt 13f )
+ PLD( blo 13f )
PLD( pld [r1, #60] )
PLD( pld [r1, #92] )
mov ip, ip, PULL #\pull
orr ip, ip, lr, PUSH #\push
stmia r0!, {r3, r4, r5, r6, r7, r8, r10, ip}
- bge 12b
- PLD( cmn r2, #96 )
- PLD( bge 13b )
+ bhs 12b
pop {r5 - r8, r10}
cfi_adjust_cfa_offset (-20)
add r1, r1, r2
add r0, r0, r2
subs r2, r2, #4
- blt 8f
+ blo 8f
ands ip, r0, #3
PLD( pld [r1, #-4] )
bne 9f
cfi_rel_offset (r6, 4)
cfi_rel_offset (r7, 8)
cfi_rel_offset (r8, 12)
- blt 5f
+ blo 5f
CALGN( ands ip, r1, #31 )
CALGN( sbcsne r4, ip, r2 ) @ C is always set here
#endif
PLD( pld [r1, #-4] )
-2: PLD( subs r2, r2, #96 )
+2: PLD( cmp r2, #96 )
PLD( pld [r1, #-32] )
- PLD( blt 4f )
+ PLD( blo 4f )
PLD( pld [r1, #-64] )
PLD( pld [r1, #-96] )
4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr}
subs r2, r2, #32
stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr}
- bge 3b
- PLD( cmn r2, #96 )
- PLD( bge 4b )
+ bhs 3b
5: ands ip, r2, #28
rsb ip, ip, #32
strbge r4, [r0, #-1]!
subs r2, r2, ip
strb lr, [r0, #-1]!
- blt 8b
+ blo 8b
ands ip, r1, #3
beq 1b
.macro backward_copy_shift push pull
subs r2, r2, #28
- blt 14f
+ blo 14f
CALGN( ands ip, r1, #31 )
CALGN( rsb ip, ip, #32 )
cfi_rel_offset (r10, 16)
PLD( pld [r1, #-4] )
- PLD( subs r2, r2, #96 )
+ PLD( cmp r2, #96 )
PLD( pld [r1, #-32] )
- PLD( blt 13f )
+ PLD( blo 13f )
PLD( pld [r1, #-64] )
PLD( pld [r1, #-96] )
mov r4, r4, PUSH #\push
orr r4, r4, r3, PULL #\pull
stmdb r0!, {r4 - r8, r10, ip, lr}
- bge 12b
- PLD( cmn r2, #96 )
- PLD( bge 13b )
+ bhs 12b
pop {r5 - r8, r10}
cfi_adjust_cfa_offset (-20)