diff options
author | Zack Weinberg <zackw@panix.com> | 2021-02-27 21:23:30 -0500 |
---|---|---|
committer | Zack Weinberg <zackw@panix.com> | 2021-03-07 11:00:44 -0500 |
commit | 578271c3776a442fa55ac5f5ea83c7dc83ede979 (patch) | |
tree | b3dc74d79ab27e473f707f6dc763073ef7f3244a /test/explicit-bzero.c | |
parent | 23cf52eb46eb71eb52de7c70316f8c7cd630b50c (diff) | |
download | libxcrypt-578271c3776a442fa55ac5f5ea83c7dc83ede979.tar.gz libxcrypt-578271c3776a442fa55ac5f5ea83c7dc83ede979.tar.bz2 libxcrypt-578271c3776a442fa55ac5f5ea83c7dc83ede979.zip |
Improve fallback implementation of explicit_bzero.
Instead of writes through a volatile pointer, which forces inefficient
code generation and _isn’t_ guaranteed to do what we want in this
context, use a regular call to memset followed by a no-op assembly
insert that is declared to read the memory that we wrote. Also mark
the function as not inlinable, which has no effect now (it’s in a TU
by itself anyway) but may become important if we ever turn on
link-time optimization.
Also, and perhaps more important, import glibc’s test of
explicit_bzero and apply it to whatever “memory clear that can’t be
optimized out” function we have found.
Diffstat (limited to 'test/explicit-bzero.c')
-rw-r--r-- | test/explicit-bzero.c | 332 |
1 files changed, 332 insertions, 0 deletions
diff --git a/test/explicit-bzero.c b/test/explicit-bzero.c new file mode 100644 index 0000000..b7f5255 --- /dev/null +++ b/test/explicit-bzero.c @@ -0,0 +1,332 @@ +/* Test that explicit_bzero block clears are not optimized out. + Copyright (C) 2016-2020 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +/* This test is conceptually based on a test designed by Matthew + Dempsky for the OpenBSD regression suite: + <openbsd>/src/regress/lib/libc/explicit_bzero/explicit_bzero.c. + The basic idea is, we have a function that contains a + block-clearing operation (not necessarily explicit_bzero), after + which the block is dead, in the compiler-jargon sense. Execute + that function while running on a user-allocated alternative + stack. Then we have another pointer to the memory region affected + by the block clear -- namely, the original allocation for the + alternative stack -- and can find out whether it actually happened. + + The OpenBSD test uses sigaltstack and SIGUSR1 to get onto an + alternative stack. This causes a number of awkward problems; some + operating systems (e.g. Solaris and OSX) wipe the signal stack upon + returning to the normal stack, there's no way to be sure that other + processes running on the same system will not interfere, and the + signal stack is very small so it's not safe to call printf there. + This implementation instead uses the <ucontext.h> coroutine + interface. The coroutine stack is still too small to safely use + printf, but we know the OS won't erase it, so we can do all the + checks and printing from the normal stack. */ + +#include "crypt-port.h" + +#ifndef HAVE_UCONTEXT_H +/* We can't do this test if we don't have the ucontext API. */ +int main(void) +{ + return 77; +} +#else + +#include <stdio.h> +#include <stdlib.h> +#include <ucontext.h> + +#ifdef HAVE_VALGRIND_VALGRIND_H +# include <valgrind/valgrind.h> +# include <valgrind/memcheck.h> +#else +# define VALGRIND_STACK_REGISTER(start, end) do {} while (0) +# define VALGRIND_MAKE_MEM_DEFINED(addr, len) do {} while (0) +#endif + +/* A byte pattern that is unlikely to occur by chance: the first 16 + prime numbers (OEIS A000040). */ +static const unsigned char test_pattern[16] = +{ + 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53 +}; + +/* Immediately after each subtest returns, we call swapcontext to get + back onto the main stack. That call might itself overwrite the + test pattern, so we fill a modest-sized buffer with copies of it + and check whether any of them survived. */ + +#define PATTERN_SIZE (sizeof test_pattern) +#define PATTERN_REPS 32 +#define TEST_BUFFER_SIZE (PATTERN_SIZE * PATTERN_REPS) + +/* There are three subtests, two of which are sanity checks. + Each test follows this sequence: + + main coroutine + ---- -------- + advance cur_subtest + swap + call setup function + prepare test buffer + swap + verify that buffer + was filled in + swap + possibly clear buffer + return + swap + check buffer again, + according to test + expectation + + In the "no_clear" case, we don't do anything to the test buffer + between preparing it and letting it go out of scope, and we expect + to find it. This confirms that the test buffer does get filled in + and we can find it from the stack buffer. In the "ordinary_clear" + case, we clear it using memset. Depending on the target, the + compiler may not be able to apply dead store elimination to the + memset call, so the test does not fail if the memset is not + eliminated. Finally, the "explicit_clear" case uses explicit_bzero + and expects _not_ to find the test buffer, which is the real + test. */ + +static ucontext_t uc_main, uc_co; + +static NO_INLINE int +use_test_buffer (unsigned char *buf) +{ + unsigned int sum = 0; + + for (unsigned int i = 0; i < PATTERN_REPS; i++) + sum += buf[i * PATTERN_SIZE]; + + return (sum == 2 * PATTERN_REPS) ? 0 : 1; +} + +/* Always check the test buffer immediately after filling it; this + makes externally visible side effects depend on the buffer existing + and having been filled in. */ +static inline void +prepare_test_buffer (unsigned char *buf) +{ + for (unsigned int i = 0; i < PATTERN_REPS; i++) + memcpy (buf + i*PATTERN_SIZE, test_pattern, PATTERN_SIZE); + + if (swapcontext (&uc_co, &uc_main)) + abort (); + + /* Force the compiler to really copy the pattern to buf. */ + if (use_test_buffer (buf)) + abort (); +} + +static void +setup_no_clear (void) +{ + unsigned char buf[TEST_BUFFER_SIZE]; + prepare_test_buffer (buf); +} + +static void +setup_ordinary_clear (void) +{ + unsigned char buf[TEST_BUFFER_SIZE]; + prepare_test_buffer (buf); + memset (buf, 0, TEST_BUFFER_SIZE); +} + +static void +setup_explicit_clear (void) +{ + unsigned char buf[TEST_BUFFER_SIZE]; + prepare_test_buffer (buf); + explicit_bzero (buf, TEST_BUFFER_SIZE); +} + +enum test_expectation + { + EXPECT_NONE = 1, + EXPECT_SOME, + EXPECT_ALL, + NO_EXPECTATIONS, + ARRAY_END = 0 + }; +struct subtest +{ + void (*setup_subtest) (void); + const char *label; + enum test_expectation expected; +}; +static const struct subtest *cur_subtest; + +static const struct subtest subtests[] = +{ + { setup_no_clear, "no clear", EXPECT_SOME }, + /* The memset may happen or not, depending on compiler + optimizations. */ + { setup_ordinary_clear, "ordinary clear", NO_EXPECTATIONS }, + { setup_explicit_clear, "explicit clear", EXPECT_NONE }, + { 0, 0, ARRAY_END } +}; + +static void +test_coroutine (void) +{ + while (cur_subtest->setup_subtest) + { + cur_subtest->setup_subtest (); + if (swapcontext (&uc_co, &uc_main)) + abort (); + } +} + +/* All the code above this point runs on the coroutine stack. + All the code below this point runs on the main stack. */ + +static int test_status; +static unsigned char *co_stack_buffer; +static size_t co_stack_size; + +static unsigned int +count_test_patterns (unsigned char *buf, size_t bufsiz) +{ + VALGRIND_MAKE_MEM_DEFINED (buf, bufsiz); + unsigned char *first = memmem (buf, bufsiz, test_pattern, PATTERN_SIZE); + if (!first) + return 0; + unsigned int cnt = 0; + for (unsigned int i = 0; i < PATTERN_REPS; i++) + { + unsigned char *p = first + i*PATTERN_SIZE; + if (p + PATTERN_SIZE - buf > (ptrdiff_t)bufsiz) + break; + if (memcmp (p, test_pattern, PATTERN_SIZE) == 0) + cnt++; + } + return cnt; +} + +static void +check_test_buffer (enum test_expectation expected, + const char *label, const char *stage) +{ + unsigned int cnt = count_test_patterns (co_stack_buffer, co_stack_size); + switch (expected) + { + case EXPECT_NONE: + if (cnt == 0) + printf ("PASS: %s/%s: expected 0 got %u\n", label, stage, cnt); + else + { + printf ("FAIL: %s/%s: expected 0 got %u\n", label, stage, cnt); + test_status = 1; + } + break; + + case EXPECT_SOME: + if (cnt > 0) + printf ("PASS: %s/%s: expected some got %u\n", label, stage, cnt); + else + { + printf ("FAIL: %s/%s: expected some got 0\n", label, stage); + test_status = 1; + } + break; + + case EXPECT_ALL: + if (cnt == PATTERN_REPS) + printf ("PASS: %s/%s: expected %d got %u\n", label, stage, + PATTERN_REPS, cnt); + else + { + printf ("FAIL: %s/%s: expected %d got %u\n", label, stage, + PATTERN_REPS, cnt); + test_status = 1; + } + break; + + case NO_EXPECTATIONS: + printf ("INFO: %s/%s: found %u patterns%s\n", label, stage, cnt, + cnt == 0 ? " (memset not eliminated)" : ""); + break; + + default: + printf ("ERROR: %s/%s: invalid value for 'expected' = %d\n", + label, stage, (int)expected); + test_status = 1; + } +} + +static void +test_loop (void) +{ + cur_subtest = subtests; + while (cur_subtest->setup_subtest) + { + if (swapcontext (&uc_main, &uc_co)) + abort (); + check_test_buffer (EXPECT_ALL, cur_subtest->label, "prepare"); + if (swapcontext (&uc_main, &uc_co)) + abort (); + check_test_buffer (cur_subtest->expected, cur_subtest->label, "test"); + cur_subtest++; + } + /* Terminate the coroutine. */ + if (swapcontext (&uc_main, &uc_co)) + abort (); +} + +int +main (void) +{ + size_t page_alignment = (size_t) sysconf (_SC_PAGESIZE); + if (page_alignment < sizeof (void *)) + page_alignment = sizeof (void *); + + co_stack_size = SIGSTKSZ + TEST_BUFFER_SIZE; + if (co_stack_size < page_alignment * 4) + co_stack_size = page_alignment * 4; + + void *p; + int err = posix_memalign (&p, page_alignment, co_stack_size); + if (err || !p) + { + printf ("ERROR: allocating alt stack: %s\n", strerror (err)); + return 2; + } + co_stack_buffer = p; + memset (co_stack_buffer, 0, co_stack_size); + VALGRIND_STACK_REGISTER (co_stack_buffer, co_stack_buffer + co_stack_size); + + if (getcontext (&uc_co)) + { + printf ("ERROR: allocating coroutine context: %s\n", strerror (err)); + return 2; + } + uc_co.uc_stack.ss_sp = co_stack_buffer; + uc_co.uc_stack.ss_size = co_stack_size; + uc_co.uc_link = &uc_main; + makecontext (&uc_co, test_coroutine, 0); + + test_loop (); + return test_status; +} + +#endif /* have ucontext.h */ |