summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxim Ostapenko <m.ostapenko@samsung.com>2016-03-24 15:11:55 +0300
committerMaxim Ostapenko <m.ostapenko@samsung.com>2016-04-25 12:03:05 +0300
commit213a798e2faade204bed283254e0bc703aa7ad2e (patch)
treef963caf88922d0a992da13393fad26be0e6e647b
parent9587dbfdfb7ac52f4f052ed2529b7ffa7b71287f (diff)
downloadlinaro-gcc-213a798e2faade204bed283254e0bc703aa7ad2e.tar.gz
linaro-gcc-213a798e2faade204bed283254e0bc703aa7ad2e.tar.bz2
linaro-gcc-213a798e2faade204bed283254e0bc703aa7ad2e.zip
Backport sanitizer patches from GCC mainline.
Backport from mainline. Enable Address Sanitizer for AArch64. 2014-09-26 Christophe Lyon <christophe.lyon@linaro.org> gcc/ * config/aarch64/aarch64-linux.h (ASAN_CC1_SPEC): Define. (CC1_SPEC): Define. * config/aarch64/aarch64.c (aarch64_asan_shadow_offset): New function. (TARGET_ASAN_SHADOW_OFFSET): Define. libsanitizer/ * configure.tgt: Enable build on aarch64*-linux. Conflicts: ChangeLog.vd 2014-05-14 Yury Gribov <y.gribov@samsung.com> PR sanitizer/61100 * Makefile.am (nodist_saninclude_HEADERS): Install public headers. * Makefile.in: Regenerate. * c-c++-common/asan/asan-interface-1.c: New test. * lib/asan-dg.exp (asan_include_flags): New function. (asan_init): Call asan_include_flags to obtain path to sanitizer headers. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210413 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/testsuite/c-c++-common/asan/asan-interface-1.c PR sanitizer/61272 * ubsan.c (is_ubsan_builtin_p): Turn assert into a condition. * g++.dg/ubsan/pr61272.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210723 138bc75d-0d04-0410-961f-82ee72b054a4 libsanitizer merge from upstream r209283 git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210743 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h * g++.dg/asan/asan_test.C: Add -std=c++11 and -DSANITIZER_USE_DEJAGNU_GTEST=1 to dg-options, remove -DASAN_USE_DEJAGNU_GTEST=1. * g++.dg/asan/asan_mem_test.cc: Updated from upstream r209283. * g++.dg/asan/asan_oob_test.cc: Likewise. * g++.dg/asan/sanitizer_test_utils.h: Likewise. * g++.dg/asan/asan_str_test.cc: Likewise. * g++.dg/asan/asan_test_utils.h: Likewise. * g++.dg/asan/sanitizer_test_config.h: Likewise. * g++.dg/asan/asan_test.cc: Likewise. Allow size 12 for long double. * g++.dg/asan/sanitizer_pthread_wrappers.h: New file. Imported from upstream r209283. * g++.dg/asan/asan_test_config.h: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@211090 138bc75d-0d04-0410-961f-82ee72b054a4 2014-04-15 Max Ostapenko <m.ostapenko@partner.samsung.com> * c-c++-common/asan/null-deref-1.c: Change regexp to pass test under qemu-arm. * c-c++-common/ubsan/div-by-zero-1.c: Likewise. * c-c++-common/ubsan/div-by-zero-2.c: Likewise. * c-c++-common/ubsan/div-by-zero-3.c: Likewise. * c-c++-common/ubsan/load-bool-enum.c (foo): Likewise. * c-c++-common/ubsan/null-1.c: Likewise. * c-c++-common/ubsan/null-10.c: Likewise. * c-c++-common/ubsan/null-11.c: Likewise. * c-c++-common/ubsan/null-2.c: Likewise. * c-c++-common/ubsan/null-3.c: Likewise. * c-c++-common/ubsan/null-4.c: Likewise. * c-c++-common/ubsan/null-5.c: Likewise. * c-c++-common/ubsan/null-6.c: Likewise. * c-c++-common/ubsan/null-7.c: Likewise. * c-c++-common/ubsan/null-8.c: Likewise. * c-c++-common/ubsan/null-9.c: Likewise. * c-c++-common/ubsan/overflow-add-2.c: Likewise. * c-c++-common/ubsan/overflow-int128.c: Likewise. * c-c++-common/ubsan/overflow-mul-2.c: Likewise. * c-c++-common/ubsan/overflow-mul-4.c: Likewise. * c-c++-common/ubsan/overflow-negate-1.c: Likewise. * c-c++-common/ubsan/overflow-sub-2.c: Likewise. * c-c++-common/ubsan/pr59333.c: Likewise. * c-c++-common/ubsan/pr59667.c: Likewise. * c-c++-common/ubsan/pr60613-2.c: Likewise. * c-c++-common/ubsan/pr60636.c: Likewise. * c-c++-common/ubsan/shift-1.c: Likewise. * c-c++-common/ubsan/shift-2.c: Likewise. * c-c++-common/ubsan/vla-1.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@209402 138bc75d-0d04-0410-961f-82ee72b054a4 PR middle-end/60281 * asan.c (asan_emit_stack_protection): Force the base to align to appropriate bits if STRICT_ALIGNMENT. Set shadow_mem align to appropriate bits if STRICT_ALIGNMENT. * cfgexpand.c (expand_stack_vars): Set base_align appropriately when asan is on. (expand_used_vars): Leave a space in the stack frame for alignment if STRICT_ALIGNMENT. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@209554 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/60275 * common.opt (fsanitize-recover, fsanitize-undefined-trap-on-error): New options. * gcc.c (sanitize_spec_function): Don't return "" for "undefined" if flag_sanitize_undefined_trap_on_error. * sanitizer.def (BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW_ABORT, BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT, BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE_ABORT, BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT, BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW_ABORT, BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW_ABORT, BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW_ABORT, BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW_ABORT, BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT): New builtins. * ubsan.c (ubsan_instrument_unreachable): Return __builtin_trap () if flag_sanitize_undefined_trap_on_error. (ubsan_expand_null_ifn): Emit __builtin_trap () if flag_sanitize_undefined_trap_on_error and __ubsan_handle_type_mismatch_abort if !flag_sanitize_recover. (ubsan_expand_null_ifn, ubsan_build_overflow_builtin, instrument_bool_enum_load): Emit __builtin_trap () if flag_sanitize_undefined_trap_on_error and __builtin_handle_*_abort () if !flag_sanitize_recover. * doc/invoke.texi (-fsanitize-recover, -fsanitize-undefined-trap-on-error): Document. c-family/ * c-ubsan.c (ubsan_instrument_return): Return __builtin_trap () if flag_sanitize_undefined_trap_on_error. (ubsan_instrument_division, ubsan_instrument_shift, ubsan_instrument_vla): Likewise. Use __ubsan_handle_*_abort () if !flag_sanitize_recover. testsuite/ * g++.dg/ubsan/return-2.C: Revert 2014-03-24 changes, add -fno-sanitize-recover to dg-options. * g++.dg/ubsan/cxx11-shift-1.C: Remove c++11 target restriction, add -std=c++11 to dg-options. * g++.dg/ubsan/cxx11-shift-2.C: Likewise. * g++.dg/ubsan/cxx1y-vla.C: Remove c++1y target restriction, add -std=c++1y to dg-options. * c-c++-common/ubsan/undefined-1.c: Revert 2014-03-24 changes, add -fno-sanitize-recover to dg-options. * c-c++-common/ubsan/overflow-sub-1.c: Likewise. * c-c++-common/ubsan/vla-4.c: Likewise. * c-c++-common/ubsan/pr59503.c: Likewise. * c-c++-common/ubsan/vla-3.c: Likewise. * c-c++-common/ubsan/save-expr-1.c: Likewise. * c-c++-common/ubsan/overflow-add-1.c: Likewise. * c-c++-common/ubsan/shift-3.c: Likewise. * c-c++-common/ubsan/overflow-1.c: Likewise. * c-c++-common/ubsan/overflow-negate-2.c: Likewise. * c-c++-common/ubsan/vla-2.c: Likewise. * c-c++-common/ubsan/overflow-mul-1.c: Likewise. * c-c++-common/ubsan/pr60613-1.c: Likewise. * c-c++-common/ubsan/shift-6.c: Likewise. * c-c++-common/ubsan/overflow-mul-3.c: Likewise. * c-c++-common/ubsan/overflow-add-3.c: New test. * c-c++-common/ubsan/overflow-add-4.c: New test. * c-c++-common/ubsan/div-by-zero-6.c: New test. * c-c++-common/ubsan/div-by-zero-7.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@209672 138bc75d-0d04-0410-961f-82ee72b054a4 Fix ubsan internal-fn.c handling. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@209699 138bc75d-0d04-0410-961f-82ee72b054a4 * gcc.c (sanitize_spec_function): Handle SANITIZE_FLOAT_DIVIDE. * builtins.def: Initialize builtins even for SANITIZE_FLOAT_DIVIDE. * flag-types.h (enum sanitize_code): Add SANITIZE_FLOAT_DIVIDE. * opts.c (common_handle_option): Add -fsanitize=float-divide-by-zero. c-family/ * c-ubsan.c (ubsan_instrument_division): Handle REAL_TYPEs. Perform INT_MIN / -1 sanitization only for integer types. c/ * c-typeck.c (build_binary_op): Call ubsan_instrument_division also when SANITIZE_FLOAT_DIVIDE is on. cp/ * typeck.c (cp_build_binary_op): Call ubsan_instrument_division even when SANITIZE_FLOAT_DIVIDE is on. Set doing_div_or_mod even for non-integer types. testsuite/ * c-c++-common/ubsan/div-by-zero-5.c: Fix formatting. * c-c++-common/ubsan/float-div-by-zero-1.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@209927 138bc75d-0d04-0410-961f-82ee72b054a4 * doc/invoke.texi: Describe -fsanitize=float-divide-by-zero. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210002 138bc75d-0d04-0410-961f-82ee72b054a4 * builtins.def: Change SANITIZE_FLOAT_DIVIDE to SANITIZE_NONDEFAULT. * gcc.c (sanitize_spec_function): Likewise. * convert.c (convert_to_integer): Include "ubsan.h". Add floating-point to integer instrumentation. * doc/invoke.texi: Document -fsanitize=float-cast-overflow. * flag-types.h (enum sanitize_code): Add SANITIZE_FLOAT_CAST and SANITIZE_NONDEFAULT. * opts.c (common_handle_option): Handle -fsanitize=float-cast-overflow. * sanitizer.def (BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW, BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW_ABORT): Add. * ubsan.c: Include "realmpfr.h" and "dfp.h". (get_ubsan_type_info_for_type): Handle REAL_TYPEs. (ubsan_type_descriptor): Set tkind to 0xffff for types other than float/double/long double. (ubsan_instrument_float_cast): New function. * ubsan.h (ubsan_instrument_float_cast): Declare. testsuite/ * c-c++-common/ubsan/float-cast-overflow-1.c: New test. * c-c++-common/ubsan/float-cast-overflow-10.c: New test. * c-c++-common/ubsan/float-cast-overflow-2.c: New test. * c-c++-common/ubsan/float-cast-overflow-3.c: New test. * c-c++-common/ubsan/float-cast-overflow-4.c: New test. * c-c++-common/ubsan/float-cast-overflow-5.c: New test. * c-c++-common/ubsan/float-cast-overflow-6.c: New test. * c-c++-common/ubsan/float-cast-overflow-7.c: New test. * c-c++-common/ubsan/float-cast-overflow-7.h: New file. * c-c++-common/ubsan/float-cast-overflow-8.c: New test. * c-c++-common/ubsan/float-cast-overflow-9.c: New test. * c-c++-common/ubsan/float-cast.h: New file. * g++.dg/ubsan/float-cast-overflow-bf.C: New test. * gcc.dg/ubsan/float-cast-overflow-bf.c: New test. libsanitizer/ * ubsan/ubsan_value.cc (getFloatValue): Handle 96-bit floating-point types. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210862 138bc75d-0d04-0410-961f-82ee72b054a4 2014-05-27 Dominique d'Humieres <dominiq@lps.ens.fr> PR testsuite/61319 * c-c++-common/ubsan/float-cast-overflow-1.c: Make the sign of -nan optional. * c-c++-common/ubsan/float-cast-overflow-2.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-4.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210958 138bc75d-0d04-0410-961f-82ee72b054a4 PR testsuite/61319 * c-c++-common/ubsan/float-cast.h: Conditionally define LLONG_MAX, LLONG_MIN, and ULLONG_MAX. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@210970 138bc75d-0d04-0410-961f-82ee72b054a4 * sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream r209879. * sanitizer_common/sanitizer_common.h: Likewise. * asan/asan_mapping.h: Likewise. * asan/asan_linux.cc: Likewise. * tsan/tsan_mman.cc: Cherry pick upstream r209744. * sanitizer_common/sanitizer_allocator.h: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@211080 138bc75d-0d04-0410-961f-82ee72b054a4 2014-06-11 Richard Biener <rguenther@suse.de> * asan/asan_linux.cc: Cherry pick upstream r210012. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@211453 138bc75d-0d04-0410-961f-82ee72b054a4 * asan.c (pass_sanopt::execute): Handle IFN_UBSAN_BOUNDS. * flag-types.h (enum sanitize_code): Add SANITIZE_BOUNDS and or it into SANITIZE_UNDEFINED. * doc/invoke.texi: Describe -fsanitize=bounds. * gimplify.c (gimplify_call_expr): Add gimplification of internal functions created in the FEs. * internal-fn.c: Move "internal-fn.h" after "tree.h". (expand_UBSAN_BOUNDS): New function. * internal-fn.def (UBSAN_BOUNDS): New internal function. * internal-fn.h: Don't define internal functions here. * opts.c (common_handle_option): Add -fsanitize=bounds. * sanitizer.def (BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS, BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS_ABORT): Add. * tree-core.h: Define internal functions here. (struct tree_base): Add ifn field. * tree-pretty-print.c: Include "internal-fn.h". (dump_generic_node): Handle functions without CALL_EXPR_FN. * tree.c (get_callee_fndecl): Likewise. (build_call_expr_internal_loc): New function. * tree.def (CALL_EXPR): Update description. * tree.h (CALL_EXPR_IFN): Define. (build_call_expr_internal_loc): Declare. * ubsan.c (get_ubsan_type_info_for_type): Return 0 for non-arithmetic types. (ubsan_type_descriptor): Change bool parameter to enum ubsan_print_style. Adjust the code. Add handling of UBSAN_PRINT_ARRAY. (ubsan_expand_bounds_ifn): New function. (ubsan_expand_null_ifn): Adjust ubsan_type_descriptor call. (ubsan_build_overflow_builtin): Likewise. (instrument_bool_enum_load): Likewise. (ubsan_instrument_float_cast): Likewise. * ubsan.h (enum ubsan_print_style): New enum. (ubsan_expand_bounds_ifn): Declare. (ubsan_type_descriptor): Adjust declaration. Use a default parameter. c-family/ * c-gimplify.c: Include "c-ubsan.h" and "pointer-set.h". (ubsan_walk_array_refs_r): New function. (c_genericize): Instrument array bounds. * c-ubsan.c: Include "internal-fn.h". (ubsan_instrument_division): Mark instrumented arrays as having side effects. Adjust ubsan_type_descriptor call. (ubsan_instrument_shift): Likewise. (ubsan_instrument_vla): Adjust ubsan_type_descriptor call. (ubsan_instrument_bounds): New function. (ubsan_array_ref_instrumented_p): New function. (ubsan_maybe_instrument_array_ref): New function. * c-ubsan.h (ubsan_instrument_bounds): Declare. (ubsan_array_ref_instrumented_p): Declare. (ubsan_maybe_instrument_array_ref): Declare. testsuite/ * c-c++-common/ubsan/bounds-1.c: New test. * c-c++-common/ubsan/bounds-2.c: New test. * c-c++-common/ubsan/bounds-3.c: New test. * c-c++-common/ubsan/bounds-4.c: New test. * c-c++-common/ubsan/bounds-5.c: New test. * c-c++-common/ubsan/bounds-6.c: New test. Conflicts: gcc/c-family/c-ubsan.c gcc/ubsan.c 2014-06-23 Paolo Carlini <paolo.carlini@oracle.com> * sanitizer_common/sanitizer_common_interceptors.inc: Cherry pick upstream r211008. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@211912 138bc75d-0d04-0410-961f-82ee72b054a4 * c-c++-common/ubsan/bounds-2.c: Adjust dg-output. (fn1): Remove store to out-of-bounds location. Add memory barrier. (fn2): Likewise. (fn5): Likewise. (fn6): Likewise. (fn7): Likewise. (fn8): Likewise. (fn9): Likewise. (fn11): Likewise. * c-c++-common/ubsan/bounds-5.c (fn1): Remove store to out-of-bounds location. Add memory barrier. (fn2): Likewise. (fn3): Likewise. (fn4): Likewise. (fn5): Likewise. * c-c++-common/ubsan/bounds-7.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@212061 138bc75d-0d04-0410-961f-82ee72b054a4 * doc/invoke.texi (-fsanitize=bounds): Move to the table with -fsanitize=undefined suboptions. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@212146 138bc75d-0d04-0410-961f-82ee72b054a4 * convert.c (convert_to_integer): Don't instrument conversions if the function has no_sanitize_undefined attribute. * ubsan.c: Don't run the ubsan pass if the function has no_sanitize_undefined attribute. c/ * c-decl.c (grokdeclarator): Don't instrument VLAs if the function has no_sanitize_undefined attribute. cp/ * cp-gimplify.c (cp_genericize): Don't instrument returns if the function has no_sanitize_undefined attribute. * decl.c (compute_array_index_type): Don't instrument VLAs if the function has no_sanitize_undefined attribute. testsuite/ * c-c++-common/ubsan/attrib-2.c: New test. * g++.dg/ubsan/return-3.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@212148 138bc75d-0d04-0410-961f-82ee72b054a4 * doc/invoke.texi (-fsanitize=bounds): Tweak wording. (-fsanitize=float-divide-by-zero): Move to the table with -fsanitize=undefined suboptions. (-fsanitize=float-cast-overflow): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@212260 138bc75d-0d04-0410-961f-82ee72b054a4 * c-ubsan.c (ubsan_instrument_bounds): Don't instrument if TYPE_MAX_VALUE is NULL. * gcc.dg/ubsan/bounds-1.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@212552 138bc75d-0d04-0410-961f-82ee72b054a4 gcc/testsuite/ 2014-07-17 Max Ostapenko <m.ostapenko@partner.samsung.com> * c-c++-common/ubsan/bounds-2.c: Change output pattern. * c-c++-common/ubsan/bounds-5.c: Likewise. * c-c++-common/ubsan/bounds-6.c: Likewise. * c-c++-common/ubsan/bounds-7.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-1.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-10.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-2.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-3.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-4.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-5.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-6.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise. * gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise. * g++.dg/ubsan/float-cast-overflow-bf.C: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@212755 138bc75d-0d04-0410-961f-82ee72b054a4 * c-c++-common/ubsan/bounds-2.c (fn4): Adjust to check the array size in the structure. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@212933 138bc75d-0d04-0410-961f-82ee72b054a4 * opts.c (common_handle_option): Handle -fsanitize=alignment. * ubsan.h (enum ubsan_null_ckind): Add UBSAN_CTOR_CALL. (ubsan_expand_bounds_ifn, ubsan_expand_null_ifn): Change return type to bool. * stor-layout.h (min_align_of_type): New prototype. * asan.c (pass_sanopt::execute): Don't perform gsi_next if ubsan_expand* told us not to do it. Remove the extra gsi_end_p check. * ubsan.c: Include builtins.h. (ubsan_expand_bounds_ifn): Change return type to bool, always return true. (ubsan_expand_null_ifn): Change return type to bool, change argument to gimple_stmt_iterator *. Handle both null and alignment sanitization, take type from ckind argument's type rather than first argument. (instrument_member_call): Removed. (instrument_mem_ref): Remove t argument, add mem and base arguments. Handle both null and alignment sanitization, don't say whole struct access is member access. Build 3 argument IFN_UBSAN_NULL call instead of 2 argument. (instrument_null): Adjust instrument_mem_ref caller. Don't instrument calls here. (pass_ubsan::gate, pass_ubsan::execute): Handle SANITIZE_ALIGNMENT like SANITIZE_NULL. * stor-layout.c (min_align_of_type): New function. * flag-types.h (enum sanitize_code): Add SANITIZE_ALIGNMENT. Or it into SANITIZE_UNDEFINED. * doc/invoke.texi (-fsanitize=alignment): Document. cp/ * cp-gimplify.c (cp_genericize_r): For -fsanitize=null and/or -fsanitize=alignment call ubsan_maybe_instrument_reference for casts to REFERENCE_TYPE and ubsan_maybe_instrument_member_call for calls to member functions. c-family/ * c-common.h (min_align_of_type): Removed prototype. * c-common.c (min_align_of_type): Removed. * c-ubsan.h (ubsan_maybe_instrument_reference, ubsan_maybe_instrument_member_call): New prototypes. * c-ubsan.c: Include stor-layout.h and builtins.h. (ubsan_maybe_instrument_reference_or_call, ubsan_maybe_instrument_reference, ubsan_maybe_instrument_call): New functions. testsuite/ * c-c++-common/ubsan/align-1.c: New test. * c-c++-common/ubsan/align-2.c: New test. * c-c++-common/ubsan/align-3.c: New test. * c-c++-common/ubsan/align-4.c: New test. * c-c++-common/ubsan/align-5.c: New test. * c-c++-common/ubsan/attrib-4.c: New test. * g++.dg/ubsan/align-1.C: New test. * g++.dg/ubsan/align-2.C: New test. * g++.dg/ubsan/align-3.C: New test. * g++.dg/ubsan/attrib-1.C: New test. * g++.dg/ubsan/null-1.C: New test. * g++.dg/ubsan/null-2.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@213406 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/asan.c gcc/c-family/c-common.c 2014-08-28 Yury Gribov <y.gribov@samsung.com> * c-c++-common/asan/pr62089.c: Fix test on 32-bit platforms. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@214673 138bc75d-0d04-0410-961f-82ee72b054a4 gcc/ * flag-types.h (enum sanitize_code): Add SANITIZE_NONNULL_ATTRIBUTE and SANITIZE_RETURNS_NONNULL_ATTRIBUTE, or them into SANITIZE_UNDEFINED. * opts.c (common_handle_option): Handle SANITIZE_NONNULL_ATTRIBUTE and SANITIZE_RETURNS_NONNULL_ATTRIBUTE and disable flag_delete_null_pointer_checks for them. * sanitizer.def (BUILT_IN_UBSAN_HANDLE_NONNULL_ARG, BUILT_IN_UBSAN_HANDLE_NONNULL_ARG_ABORT, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): New. * ubsan.c (instrument_bool_enum_load): Set *gsi back to stmt's iterator. (instrument_nonnull_arg, instrument_nonnull_return): New functions. (pass_ubsan::gate): Return true even for SANITIZE_NONNULL_ATTRIBUTE or SANITIZE_RETURNS_NONNULL_ATTRIBUTE. (pass_ubsan::execute): Call instrument_nonnull_{arg,return}. * doc/invoke.texi (-fsanitize=nonnull-attribute, -fsanitize=returns-nonnull-attribute): Document. gcc/testsuite/ * c-c++-common/ubsan/attrib-3.c: New test. * c-c++-common/ubsan/nonnull-1.c: New test. * c-c++-common/ubsan/nonnull-2.c: New test. * c-c++-common/ubsan/nonnull-3.c: New test. * c-c++-common/ubsan/nonnull-4.c: New test. * c-c++-common/ubsan/nonnull-5.c: New test. libsanitizer/ * ubsan/ubsan_handlers.cc, ubsan/ubsan_handlers.h: Cherry pick upstream r215485, r217389, r217391 and r217400. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215118 138bc75d-0d04-0410-961f-82ee72b054a4 gcc/ChangeLog: 2014-09-19 Marat Zakirov <m.zakirov@samsung.com> * asan.c (build_check_stmt): Alignment arg was added. (asan_expand_check_ifn): Optimization for alignment >= 8. gcc/testsuite/ChangeLog: 2014-09-19 Marat Zakirov <m.zakirov@samsung.com> * c-c++-common/asan/red-align-1.c: New test. * c-c++-common/asan/red-align-2.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215380 138bc75d-0d04-0410-961f-82ee72b054a4 [libsanitizer merge from upstream r218156] git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215527 138bc75d-0d04-0410-961f-82ee72b054a4 libiberty/ 2014-09-26 Max Ostapenko <m.ostapenko@partner.samsung.com> * pex-common.h (struct pex_funcs): Add new parameter for open_write field. * pex-unix.c (pex_unix_open_write): Add support for new parameter. * pex-djgpp.c (pex_djgpp_open_write): Likewise. * pex-win32.c (pex_win32_open_write): Likewise. * pex-common.c (pex_run_in_environment): Likewise. include/ 2014-09-26 Max Ostapenko <m.ostapenko@partner.samsung.com> * libiberty.h (PEX_STDOUT_APPEND): New flag. (PEX_STDERR_APPEND): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215632 138bc75d-0d04-0410-961f-82ee72b054a4 gcc/ 2014-09-26 Jakub Jelinek <jakub@redhat.com> Max Ostapenko <m.ostapenko@partner.samsung.com> * common.opt: New option. * doc/invoke.texi: Describe new option. * gcc.c (execute): Don't free first string early, but at the end of the function. Call retry_ice if compiler exited with ICE_EXIT_CODE. (main): Factor out common code. (print_configuration): New function. (files_equal_p): Likewise. (check_repro): Likewise. (run_attempt): Likewise. (do_report_bug): Likewise. (append_text): Likewise. (try_generate_repro): Likewise git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215633 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63316 * asan.c (asan_expand_check_ifn): Fix up align >= 8 optimization. * c-c++-common/asan/pr63316.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215547 138bc75d-0d04-0410-961f-82ee72b054a4 PR c/61405 PR c/53874 gcc/ * asan.c (maybe_instrument_call): Add default case. * ipa-pure-const.c (special_builtin_state): Likewise. * predict.c (expr_expected_value_1): Likewise. * lto-streamer-out.c (write_symbol): Initialize variable. gcc/c-family/ * c-common.h (struct c_common_resword): Don't define CPP_KEYWORD. gcc/c/ * c-parser.c: Don't define CPP_KEYWORD. (c_parser_switch_statement): Pass original type to c_finish_case. * c-tree.h (c_finish_case): Update declaration. * c-typeck.c (c_finish_case): Add TYPE parameter. Pass it conditionally to c_do_switch_warnings. gcc/cp/ * semantics.c (finish_switch_cond): Call unlowered_expr_type. * tree.c (bot_manip): Add default case. * parser.c (cp_parser_primary_expression): Cast the controlling expression of a switch to an int. (cp_parser_unqualified_id): Likewise. gcc/testsuite/ * c-c++-common/pr53874.c: New test. * c-c++-common/pr61405.c: New test. libcpp/ * include/cpplib.h (enum cpp_ttype): Define CPP_KEYWORD. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215559 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/c/c-tree.h 2014-10-03 Yury Gribov <y.gribov@samsung.com> * asan.c (asan_finish_file): Disable __asan_init calls for KASan; don't emit empty ctors. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215851 138bc75d-0d04-0410-961f-82ee72b054a4 * ubsan.h (ubsan_get_source_location): New prototype. * ubsan.c (ubsan_source_location_type): New variable. Function renamed to ... (ubsan_get_source_location_type): ... this. Cache return value in ubsan_source_location_type variable. (ubsan_source_location, ubsan_create_data): Use ubsan_get_source_location_type instead of ubsan_source_location_type. * asan.c (asan_protect_global): Don't protect globals with ubsan_get_source_location_type () type. (asan_add_global): Provide global decl location info if possible. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@215916 138bc75d-0d04-0410-961f-82ee72b054a4 * doc/invoke.texi: Document -fsanitize=bool and -fsanitize=enum. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216033 138bc75d-0d04-0410-961f-82ee72b054a4 * asan.c (pass_sanopt::execute): Handle IFN_UBSAN_OBJECT_SIZE. * doc/invoke.texi: Document -fsanitize=object-size. * flag-types.h (enum sanitize_code): Add SANITIZE_OBJECT_SIZE and or it into SANITIZE_UNDEFINED. * gimple-fold.c (gimple_fold_call): Optimize IFN_UBSAN_OBJECT_SIZE. * internal-fn.c (expand_UBSAN_OBJECT_SIZE): New function. * internal-fn.def (UBSAN_OBJECT_SIZE): Define. * opts.c (common_handle_option): Handle -fsanitize=object-size. * ubsan.c: Include tree-object-size.h. (ubsan_type_descriptor): Call tree_to_uhwi instead of tree_to_shwi. (ubsan_expand_bounds_ifn): Use false instead of 0. (ubsan_expand_objsize_ifn): New function. (instrument_object_size): New function. (pass_ubsan::execute): Add object size instrumentation. * ubsan.h (ubsan_expand_objsize_ifn): Declare. testsuite/ * c-c++-common/ubsan/object-size-1.c: New test. * c-c++-common/ubsan/object-size-2.c: New test. * c-c++-common/ubsan/object-size-3.c: New test. * c-c++-common/ubsan/object-size-4.c: New test. * c-c++-common/ubsan/object-size-5.c: New test. * c-c++-common/ubsan/object-size-6.c: New test. * c-c++-common/ubsan/object-size-7.c: New test. * c-c++-common/ubsan/object-size-8.c: New test. * c-c++-common/ubsan/object-size-9.c: New test. * g++.dg/ubsan/object-size-1.C: New test. * gcc.dg/ubsan/object-size-9.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216099 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/internal-fn.c gcc/ChangeLog: 2014-09-19 Marat Zakirov <m.zakirov@samsung.com> * asan.c (instrument_derefs): BIT_FIELD_REF added. gcc/testsuite/ChangeLog: 2014-09-19 Marat Zakirov <m.zakirov@samsung.com> * c-c++-common/asan/bitfield-5.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216144 138bc75d-0d04-0410-961f-82ee72b054a4 * ubsan/Makefile.am (DEFS): Add -DPIC. * ubsan/Makefile.in: Regenerated. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216206 138bc75d-0d04-0410-961f-82ee72b054a4 Fix sanitizer build on sparc. * sanitizer_common/sanitizer_platform_limits_linux.cc (time_t): Define at __kernel_time_t, as needed for sparc. (struct __old_kernel_stat): Don't check if __sparc__ is defined. * libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h (__sanitizer): Define struct___old_kernel_stat_sz, struct_kernel_stat_sz, and struct_kernel_stat64_sz for sparc. (__sanitizer_ipc_perm): Adjust for sparc targets. (__sanitizer_shmid_ds): Likewsie. (__sanitizer_sigaction): Likewsie. (IOC_SIZE): Likewsie. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216224 138bc75d-0d04-0410-961f-82ee72b054a4 * flag-types.h (sanitize_code): Don't assume targets have 32-bit integers. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216349 138bc75d-0d04-0410-961f-82ee72b054a4 New syntax for -fsanitize-recover. 2014-10-22 Jakub Jelinek <jakub@redhat.com> Yury Gribov <y.gribov@samsung.com> gcc/ * common.opt (flag_sanitize_recover): New variable. (fsanitize-recover): Remove Var/Init, deprecate. (fsanitize-recover=): New option. * doc/invoke.texi (fsanitize-recover): Update docs. * opts.c (finish_options): Use opts->x_flag_sanitize instead of flag_sanitize. Prohibit -fsanitize-recover for anything besides UBSan. Formatting. (common_handle_option): Handle OPT_fsanitize_recover_ and OPT_fsanitize_recover. Use opts->x_flag_sanitize instead of flag_sanitize. * asan.c (pass_sanopt::execute): Fix up formatting. * ubsan.c (ubsan_expand_bounds_ifn, ubsan_expand_null_ifn, ubsan_expand_objsize_ifn, ubsan_build_overflow_builtin, instrument_bool_enum_load, ubsan_instrument_float_cast, instrument_nonnull_arg, instrument_nonnull_return): Check bits in flag_sanitize_recover bitmask instead of flag_sanitize_recover as bool flag. gcc/c-family/ * c-ubsan.c (ubsan_instrument_division, ubsan_instrument_shift, ubsan_instrument_vla): Check bits in flag_sanitize_recover bitmask instead of flag_sanitize_recover as bool flag. gcc/testsuite/ * c-c++-common/ubsan/align-1.c: Update cmdline options. * c-c++-common/ubsan/align-3.c: Likewise. * c-c++-common/ubsan/bounds-1.c: Likewise. * c-c++-common/ubsan/div-by-zero-7.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-10.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-7.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise. * c-c++-common/ubsan/nonnull-2.c: Likewise. * c-c++-common/ubsan/nonnull-3.c: Likewise. * c-c++-common/ubsan/object-size-3.c: Likewise. * c-c++-common/ubsan/overflow-1.c: Likewise. * c-c++-common/ubsan/overflow-add-1.c: Likewise. * c-c++-common/ubsan/overflow-add-3.c: Likewise. * c-c++-common/ubsan/overflow-mul-1.c: Likewise. * c-c++-common/ubsan/overflow-mul-3.c: Likewise. * c-c++-common/ubsan/overflow-negate-2.c: Likewise. * c-c++-common/ubsan/overflow-sub-1.c: Likewise. * c-c++-common/ubsan/pr59503.c: Likewise. * c-c++-common/ubsan/pr60613-1.c: Likewise. * c-c++-common/ubsan/save-expr-1.c: Likewise. * c-c++-common/ubsan/shift-3.c: Likewise. * c-c++-common/ubsan/shift-6.c: Likewise. * c-c++-common/ubsan/undefined-1.c: Likewise. * c-c++-common/ubsan/vla-2.c: Likewise. * c-c++-common/ubsan/vla-3.c: Likewise. * c-c++-common/ubsan/vla-4.c: Likewise. * g++.dg/ubsan/cxx11-shift-1.C: Likewise. * g++.dg/ubsan/return-2.C: Likewise. * c-c++-common/ubsan/recovery-1.c: New test. * c-c++-common/ubsan/recovery-2.c: New test. * c-c++-common/ubsan/recovery-3.c: New test. * c-c++-common/ubsan/recovery-common.inc: New file. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216552 138bc75d-0d04-0410-961f-82ee72b054a4 * c-ubsan.c (ubsan_instrument_shift): Perform the MINUS_EXPR in unsigned type. * c-c++-common/ubsan/undefined-2.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216593 138bc75d-0d04-0410-961f-82ee72b054a4 Allow to override Asan shadow offset. 2014-10-28 Yury Gribov <y.gribov@samsung.com> gcc/ * asan.c (set_asan_shadow_offset): New function. (asan_shadow_offset): Likewise. (asan_emit_stack_protection): Call asan_shadow_offset. (build_shadow_mem_access): Likewise. * asan.h (set_asan_shadow_offset): Declare. * common.opt (fasan-shadow-offset): New option. (frandom-seed): Fixed parameter name. * doc/invoke.texi (fasan-shadow-offset): Describe new option. (frandom-seed): Fixed parameter name. * opts-global.c (handle_common_deferred_options): Handle -fasan-shadow-offset. * opts.c (common_handle_option): Likewise. gcc/testsuite/ * c-c++-common/asan/shadow-offset-1.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216773 138bc75d-0d04-0410-961f-82ee72b054a4 Add strtoll and strtoull to libiberty. 2014-10-28 Yury Gribov <y.gribov@samsung.com> include/ * libiberty.h (strtol, strtoul, strtoll, strtoull): New prototypes. libiberty/ * strtoll.c: New file. * strtoull.c: New file. * configure.ac: Add long long checks. Add harness for strtoll and strtoull. Check decls for strtol, strtoul, strtoll, strtoull. * Makefile.in (CFILES, CONFIGURED_OFILES): Add strtoll and strtoull. * config.in: Regenerate. * configure: Regenerate. * functions.texi: Regenerate. * testsuite/Makefile.in (check-strtol): New rule. (test-strtol): Likewise. (mostlyclean): Clean up strtol test. * testsuite/test-strtol.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216772 138bc75d-0d04-0410-961f-82ee72b054a4 Enable -fsanitize-recover for KASan. 2014-10-28 Yury Gribov <y.gribov@samsung.com> gcc/ * asan.c (report_error_func): Add noabort path. (check_func): Ditto. Formatting. (asan_expand_check_ifn): Handle noabort path. * common.opt (flag_sanitize_recover): Add SANITIZE_KERNEL_ADDRESS to default value. * doc/invoke.texi (-fsanitize-recover=): Mention KASan. * opts.c (finish_options): Reword comment. * sanitizer.def: Add noabort ASan builtins. gcc/testsuite/ * c-c++-common/asan/kasan-recover-1.c: New test. * c-c++-common/asan/kasan-recover-2.c: New test. * c-c++-common/asan/instrument-with-calls-1.c: Get rid of -save-temps. * c-c++-common/asan/instrument-with-calls-2.c: Likewise. * c-c++-common/asan/instrument-with-calls-3.c: Likewise. * c-c++-common/asan/kasan-recover-1.c: Likewise. * c-c++-common/asan/kasan-recover-2.c: Likewise. * c-c++-common/asan/no-asan-globals.c: Likewise. * c-c++-common/asan/no-instrument-reads.c: Likewise. * c-c++-common/asan/no-instrument-writes.c: Likewise. * c-c++-common/asan/no-use-after-return.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216778 138bc75d-0d04-0410-961f-82ee72b054a4 Don't inline GCC memory builtins if ASan is enabled. gcc/ 2014-10-28 Max Ostapenko <m.ostapenko@partner.samsung.com> * asan.h (asan_intercepted_p): New function. * asan.c (asan_mem_ref_hasher::hash): Remove MEM_REF access size from hash value construction. Call iterative_hash_expr instead of explicit hash building. (asan_mem_ref_hasher::equal): Change condition. (has_mem_ref_been_instrumented): Likewise. (update_mem_ref_hash_table): Likewise. (maybe_update_mem_ref_hash_table): New function. (instrument_strlen_call): Removed. (get_mem_refs_of_builtin_call): Handle new parameter. (instrument_builtin_call): Call maybe_update_mem_ref_hash_table instead of instrument_mem_region_access if intercepted_p is true. (instrument_mem_region_access): Instrument only base with len instead of base and end with 1. (build_check_stmt): Remove start_instrumented and end_instrumented parameters. (enum asan_check_flags): Remove ASAN_CHECK_START_INSTRUMENTED and ASAN_CHECK_END_INSTRUMENTED. Change ASAN_CHECK_LAST. (asan_expand_check_ifn): Remove start_instrumented and end_instrumented. * builtins.c (expand_builtin): Include asan.h. Don't expand string/memory builtin functions that have interceptors if ASan is enabled. gcc/testsuite/ * c-c++-common/asan/no-redundant-instrumentation-1.c: Updated test. * c-c++-common/asan/no-redundant-instrumentation-4.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-5.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-6.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-7.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-8.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-2.c: Removed. * c-c++-common/asan/no-redundant-instrumentation-9.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-10.c: New test. * c-c++-common/asan/no-redundant-instrumentation-11.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-12.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-13.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-14.c: Likewise. * c-c++-common/asan/no-redundant-instrumentation-15.c: Likewise. * c-c++-common/asan/pr63638.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216783 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63697 * tree-vrp.c (simplify_internal_call_using_ranges): For subcode == MINUS_EXPR, check overflow on vr0.min - vr1.max and vr0.max - vr1.min instead of vr0.min - vr1.min and vr0.max - vr1.max. * c-c++-common/ubsan/overflow-sub-3.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216962 138bc75d-0d04-0410-961f-82ee72b054a4 * ubsan.c (instrument_object_size): Optimize [x & CST] array accesses. testsuite/ * c-c++-common/ubsan/object-size-10.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217071 138bc75d-0d04-0410-961f-82ee72b054a4 * Makefile.in (OBJS): Add sanopt.o. (GTFILES): Add sanopt.c. * asan.h (asan_expand_check_ifn): Declare. * asan.c (asan_expand_check_ifn): No longer static. (class pass_sanopt, pass_sanopt::execute, make_pass_sanopt): Move... * sanopt.c: ...here. New file. testsuite/ * c-c++-common/ubsan/align-2.c: Remove dg-output. * c-c++-common/ubsan/align-4.c: Likewise. * g++.dg/ubsan/null-1.C: Likewise. * g++.dg/ubsan/null-2.C: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217099 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/asan.c * sanopt.c (sanopt_optimize_walker): Limit removal of the checks. Remove vector limit. testsuite/ * c-c++-common/ubsan/align-2.c: Add dg-output. * c-c++-common/ubsan/align-4.c: Likewise. * c-c++-common/ubsan/align-6.c: New test. * c-c++-common/ubsan/align-7.c: New test. * c-c++-common/ubsan/align-8.c: New test. * g++.dg/ubsan/null-1.C: Add dg-output. * g++.dg/ubsan/null-2.C: Likewise. * g++.dg/ubsan/null-3.C: New test. * g++.dg/ubsan/null-4.C: New test. * g++.dg/ubsan/null-5.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217189 138bc75d-0d04-0410-961f-82ee72b054a4 * c-c++-common/ubsan/align-7.c: Skip for -flto -fno-fat-lto-objects. * c-c++-common/ubsan/align-8.c: Likewise. * g++.dg/ubsan/null-4.C: Likewise. * g++.dg/ubsan/null-5.C: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217282 138bc75d-0d04-0410-961f-82ee72b054a4 Avoid tail call in c-c++-common/asan/strlen-overflow-1.c PR testsuite/63830 * c-c++-common/asan/strlen-overflow-1.c (main): Avoid tail call. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217417 138bc75d-0d04-0410-961f-82ee72b054a4 * doc/invoke.texi: Document -fsanitize=bool and -fsanitize=enum. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216033 138bc75d-0d04-0410-961f-82ee72b054a4 * fold-const.c (fold_binary_loc): Don't fold if the result is undefined. * match.pd (A + (-B) -> A - B, A - (-B) -> A + B, -(-A) -> A): Likewise. * c-c++-common/ubsan/overflow-sub-4.c: New test. * c-c++-common/ubsan/overflow-sub-2.c: Adjust dg-output. * c-c++-common/ubsan/overflow-int128.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217427 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/match.pd libsanitizer merge from upstream r221802 git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217518 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: libsanitizer/asan/Makefile.in PR sanitizer/63839 * asan.c (ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST, ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST): Define. * builtin-attrs.def (ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST): Define. * builtins.c (fold_builtin_0): Don't include ubsan.h. Don't instrument BUILT_IN_UNREACHABLE here. * sanitizer.def (BUILT_IN_UBSAN_HANDLE_BUILTIN_UNREACHABLE): Make const. * sanopt.c (pass_sanopt::execute): Instrument BUILT_IN_UNREACHABLE. * tree-ssa-ccp.c (optimize_unreachable): Bail out if SANITIZE_UNREACHABLE. * ubsan.c (ubsan_instrument_unreachable): Rewrite for GIMPLE. * ubsan.h (ubsan_instrument_unreachable): Adjust declaration. testsuite/ * c-c++-common/ubsan/pr63839.c: New test. * c-c++-common/ubsan/unreachable-2.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217553 138bc75d-0d04-0410-961f-82ee72b054a4 * fold-const.c (fold_negate_expr): Don't fold INTEGER_CST if that overflows when SANITIZE_SI_OVERFLOW is on. Guard -(-A) folding with TYPE_OVERFLOW_SANITIZED. * c-c++-common/ubsan/overflow-negate-3.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217556 138bc75d-0d04-0410-961f-82ee72b054a4 2014-08-12 Yury Gribov <y.gribov@samsung.com> gcc/ * internal-fn.c (init_internal_fns): Fix off-by-one. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@213848 138bc75d-0d04-0410-961f-82ee72b054a4 * tree-ssa-reassoc.c (optimize_range_tests_diff): Perform MINUS_EXPR in unsigned type to avoid undefined behavior. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@216252 138bc75d-0d04-0410-961f-82ee72b054a4 * sanitizer_common/Makefile.am (AM_CXXFLAGS): Use -std=gnu++11. * asan/Makefile.am (AM_CXXFLAGS): Ditto. * lsan/Makefile.am (AM_CXXFLAGS): Ditto. * interception/Makefile.am (AM_CXXFLAGS): Ditto. * tsan/Makefile.am (AM_CXXFLAGS): Ditto. * libbacktrace/Makefile.am (AM_CXXFLAGS): Ditto. * ubsan/Makefile.am (AM_CXXFLAGS): Ditto. * sanitizer_common/Makefile.in: Regenerate. * asan/Makefile.in: Ditto. * lsan/Makefile.in: Ditto. * interception/Makefile.in: Ditto. * tsan/Makefile.in: Ditto. * libbacktrace/Makefile.in: Ditto. * ubsan/Makefile.in: Ditto. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217562 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: libsanitizer/asan/Makefile.in * sanopt.c: Include tree-ssa-operands.h. (struct sanopt_info): Add has_freeing_call_p, has_freeing_call_computed_p, imm_dom_path_with_freeing_call_p, imm_dom_path_with_freeing_call_computed_p, freeing_call_events, being_visited_p fields. (struct sanopt_ctx): Add asan_check_map field. (imm_dom_path_with_freeing_call, maybe_optimize_ubsan_null_ifn, maybe_optimize_asan_check_ifn): New functions. (sanopt_optimize_walker): Use them, optimize even ASAN_CHECK internal calls. (pass_sanopt::execute): Call sanopt_optimize even for -fsanitize=address. * gimple.c (nonfreeing_call_p): Return true for non-ECF_LEAF internal calls. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217581 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/sanopt.c Export "detect_leaks=0" PR bootstrap/63888 * bootstrap-asan.mk (ASAN_OPTIONS): Export "detect_leaks=0". git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217678 138bc75d-0d04-0410-961f-82ee72b054a4 2014-11-18 Yury Gribov <y.gribov@samsung.com> PR sanitizer/63802 gcc/ * stor-layout.c (min_align_of_type): Respect user alignment more. gcc/testsuite/ * c-c++-common/ubsan/pr63802.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217689 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/stor-layout.c gcc 2014-11-18 Marat Zakirov <m.zakirov@samsung.com> * opts.c (finish_options): Disable aggressive opts for sanitizer. (common_handle_option): Move code to finish_options. gcc/testsuite 2014-11-18 Marat Zakirov <m.zakirov@samsung.com> * c-c++-common/asan/aggressive-opts.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217690 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63866 * asan.c (asan_global_struct): Create a TYPE_DECL for "__asan_global", put it into TYPE_NAME and TYPE_STUB_DECL. * ubsan.c (ubsan_type_descriptor_type): New variable. Function renamed to ... (ubsan_get_type_descriptor_type): ... this. Cache return value in ubsan_type_descriptor_type variable. Create a TYPE_DECL for "__ubsan_type_descriptor", put it into TYPE_NAME and TYPE_STUB_DECL. (ubsan_get_source_location_type): Create a TYPE_DECL for "__ubsan_source_location", put it into TYPE_NAME and TYPE_STUB_DECL. (ubsan_type_descriptor, ubsan_create_data): Call ubsan_get_type_descriptor_type instead of ubsan_type_descriptor_type. Create a TYPE_DECL for name, put it into TYPE_NAME and TYPE_STUB_DECL. * c-c++-common/ubsan/pr63866.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217718 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63813 * c-ubsan.c (ubsan_maybe_instrument_reference_or_call): Change type argument to ptype, set type to TREE_TYPE (ptype). Don't call get_pointer_alignment for non-pointers. Use ptype, or if it is reference type, corresponding pointer type, as type of kind argument. (ubsan_maybe_instrument_reference, ubsan_maybe_instrument_member_call): Adjust callers. * g++.dg/ubsan/pr63813.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217741 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63913 * ubsan.c: Include tree-eh.h. (instrument_bool_enum_load): Handle loads that can throw. * g++.dg/ubsan/pr63913.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217755 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63520 * internal-fn.c (expand_ubsan_result_store): New function. (expand_addsub_overflow, expand_neg_overflow, expand_mul_overflow): Use it instead of just emit_move_insn. * c-c++-common/ubsan/pr63520.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217758 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/internal-fn.c PR sanitizer/63879 * fold-const.c (negate_expr_p) <case NEGATE_EXPR>: Return !TYPE_OVERFLOW_SANITIZED. (fold_negate_expr) <case INTEGER_CST>: Fold when overflow does not trap and when overflow wraps, or when SANITIZE_SI_OVERFLOW is 0. * c-c++-common/ubsan/pr63879-1.c: New test. * c-c++-common/ubsan/pr63879-2.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217766 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/62132 * c-c++-common/asan/misalign-1.c: Pass -fno-omit-frame-pointer on darwin, adjust dg-output. * c-c++-common/asan/misalign-2.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217777 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63690 * ubsan.c (instrument_object_size): Check for MEM_REF. * gcc.dg/ubsan/pr63690.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217778 138bc75d-0d04-0410-961f-82ee72b054a4 Fixing the mess I did with the two previous commits. Sorry! PR sanitizer/62132 * c-c++-common/asan/misalign-1.c: Pass -fno-omit-frame-pointer on darwin, adjust dg-output. * c-c++-common/asan/misalign-2.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217779 138bc75d-0d04-0410-961f-82ee72b054a4 Fix up accidentally twice-applied patch. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217784 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63939 * c-c++-common/asan/global-overflow-1.c: Allow extra spaces after stack pointer address, to fit darwin output when symbolizer is not present. * c-c++-common/asan/heap-overflow-1.c: Likewise. * c-c++-common/asan/memcmp-1.c: Likewise. * c-c++-common/asan/misalign-1.c: Likewise. * c-c++-common/asan/misalign-2.c: Likewise. * c-c++-common/asan/null-deref-1.c: Likewise. * c-c++-common/asan/stack-overflow-1.c: Likewise. * c-c++-common/asan/strlen-overflow-1.c: Likewise. * c-c++-common/asan/strncpy-overflow-1.c: Likewise. * c-c++-common/asan/use-after-free-1.c: Likewise. * g++.dg/asan/deep-stack-uaf-1.C: Likewise. * g++.dg/asan/deep-tail-call-1.C: Likewise. * g++.dg/asan/large-func-test-1.C: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217817 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/64013 * sanitizer_common/sanitizer_linux.cc (FileExists): Cherry pick upstream r222532. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@217921 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63788 * asan.c (initialize_sanitizer_builtins): Add BT_FN_SIZE_CONST_PTR_INT var. Conditionally build BUILT_IN_OBJECT_SIZE decl. (ATTR_PURE_NOTHROW_LEAF_LIST): Define. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218084 138bc75d-0d04-0410-961f-82ee72b054a4 * gcc.c (SANITIZER_SPEC): Don't error on -fsanitize=thread without -pie or -shared, error on -fsanitize=thread -static instead. * lib/tsan-dg.exp (check_effective_target_fsanitize_thread, tsan_init): Don't use -fPIE or -pie. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218097 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/testsuite/lib/tsan-dg.exp PR c/63862 c-family/ * c-ubsan.c (ubsan_instrument_shift): Change the type of a MINUS_EXPR to op1_utype. * c-gimplify.c (c_gimplify_expr): Convert right operand of a shift expression to unsigned_type_node. c/ * c-typeck.c (build_binary_op) <RSHIFT_EXPR, LSHIFT_EXPR>: Don't convert the right operand to integer type. cp/ * typeck.c (cp_build_binary_op) <RSHIFT_EXPR, LSHIFT_EXPR>: Don't convert the right operand to integer type. testsuite/ * gcc.c-torture/execute/shiftopt-1.c: Don't XFAIL anymore. * c-c++-common/ubsan/shift-7.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218142 138bc75d-0d04-0410-961f-82ee72b054a4 * c-ubsan.c (ubsan_instrument_shift): Use op1_utype for MINUS_EXPR instead of unsigned_type_node. * c-c++-common/ubsan/shift-8.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218163 138bc75d-0d04-0410-961f-82ee72b054a4 Backport from GCC mainline. 2015-03-16 Max Ostapenko <m.ostapenko@partner.samsung.com> PR sanitizer/64820 gcc/ * cfgexpand.c (align_base): New function. (alloc_stack_frame_space): Call it. (expand_stack_vars): Align prev_frame to be sure data->asan_vec elements aligned properly. gcc/testsuite/ * c-c++-common/asan/pr64820.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221457 138bc75d-0d04-0410-961f-82ee72b054a4 Backport from GCC mainline. 2015-04-01 Max Ostapenko <m.ostapenko@partner.samsung.com> PR target/65624 gcc/ * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_builtin): Increase args array size by one to avoid buffer overflow. gcc/testsuite/ * gcc.target/aarch64/pr65624.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221807 138bc75d-0d04-0410-961f-82ee72b054a4 Backport from GCC trunk. 2015-04-13 Yury Gribov <y.gribov@samsung.com> PR sanitizer/64839 libsanitizer/ * sanitizer_common/sanitizer_platform.h: Cherry pick upstream r234470. * sanitizer_common/sanitizer_platform_limits_posix.cc: Ditto. * configure.ac (RPC_DEFS): Check for precense of RPC headers. * sanitizer_common/Makefile.am (DEFS): Pass info to compiler. * Makefile.in: Regenerate. * asan/Makefile.in: Regenerate. * config.h.in: Regenerate. * configure: Regenerate. * interception/Makefile.in: Regenerate. * libbacktrace/Makefile.in: Regenerate. * lsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.in: Regenerate. * tsan/Makefile.in: Regenerate. * ubsan/Makefile.in: Regenerate. Backport from GCC trunk. 2015-04-17 Yury Gribov <y.gribov@samsung.com> gcc/ * asan.c (set_sanitized_sections): New function. (section_sanitized_p): Ditto. (asan_protect_global): Optionally sanitize user-defined sections. * asan.h (set_sanitized_sections): Declare new function. * common.opt (fsanitize-sections): New option. * doc/invoke.texi (-fsanitize-sections): Document new option. * opts-global.c (handle_common_deferred_options): Handle new option. gcc/testsuite/ * c-c++-common/asan/user-section-1.c: New test. Backport from mainline. PR testsuite/66046 * c-c++-common/ubsan/align-6.c: Don't match trailing newlines in the last dg-output. * c-c++-common/ubsan/align-7.c: Likewise. * c-c++-common/ubsan/bounds-8.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise. * c-c++-common/ubsan/load-bool-enum.c: Likewise. * c-c++-common/ubsan/null-1.c: Likewise. * c-c++-common/ubsan/null-10.c: Likewise. * c-c++-common/ubsan/null-11.c: Likewise. * c-c++-common/ubsan/null-2.c: Likewise. * c-c++-common/ubsan/null-3.c: Likewise. * c-c++-common/ubsan/null-4.c: Likewise. * c-c++-common/ubsan/null-5.c: Likewise. * c-c++-common/ubsan/null-6.c: Likewise. * c-c++-common/ubsan/null-7.c: Likewise. * c-c++-common/ubsan/null-8.c: Likewise. * c-c++-common/ubsan/null-9.c: Likewise. * c-c++-common/ubsan/object-size-1.c: Likewise. * c-c++-common/ubsan/object-size-10.c: Likewise. * c-c++-common/ubsan/object-size-4.c: Likewise. * c-c++-common/ubsan/object-size-5.c: Likewise. * c-c++-common/ubsan/object-size-7.c: Likewise. * c-c++-common/ubsan/object-size-8.c: Likewise. * c-c++-common/ubsan/object-size-9.c: Likewise. * c-c++-common/ubsan/overflow-add-2.c: Likewise. * c-c++-common/ubsan/overflow-int128.c: Likewise. * c-c++-common/ubsan/overflow-mul-2.c: Likewise. * c-c++-common/ubsan/overflow-mul-4.c: Likewise. * c-c++-common/ubsan/overflow-negate-1.c: Likewise. * c-c++-common/ubsan/overflow-negate-3.c: Likewise. * c-c++-common/ubsan/overflow-sub-2.c: Likewise. * c-c++-common/ubsan/overflow-sub-4.c: Likewise. * c-c++-common/ubsan/pr59333.c: Likewise. * c-c++-common/ubsan/pr59667.c: Likewise. * c-c++-common/ubsan/pr60613-2.c: Likewise. * c-c++-common/ubsan/pr60636.c: Likewise. * c-c++-common/ubsan/pr63802.c: Likewise. * c-c++-common/ubsan/recovery-1.c: Likewise. * c-c++-common/ubsan/recovery-3.c: Likewise. * c-c++-common/ubsan/shift-1.c: Likewise. * c-c++-common/ubsan/shift-2.c: Likewise. * c-c++-common/ubsan/shift-4.c: Likewise. * c-c++-common/ubsan/shift-7.c: Likewise. * c-c++-common/ubsan/undefined-2.c: Likewise. * c-c++-common/ubsan/vla-1.c: Likewise. * g++.dg/ubsan/null-1.C: Likewise. * g++.dg/ubsan/null-3.C: Likewise. * g++.dg/ubsan/null-4.C: Likewise. * g++.dg/ubsan/vptr-8.C: Likewise. * g++.dg/ubsan/vptr-9.C: Likewise. * gcc.dg/ubsan/bounds-2.c: Likewise. * gcc.dg/ubsan/object-size-9.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@222878 138bc75d-0d04-0410-961f-82ee72b054a4 Backport from mainline. 2015-04-22 Yury Gribov <y.gribov@samsung.com> Allow wildcards in -fsanitize-sections. gcc/ * asan.c (set_sanitized_sections): Parse incoming arg. (section_sanitized_p): Support wildcards. * doc/invoke.texi (-fsanitize-sections): Update description. gcc/testsuite/ * c-c++-common/asan/user-section-1.c: New test. * c-c++-common/asan/user-section-2.c: New test. * c-c++-common/asan/user-section-3.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@222322 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/63956 * ubsan.c (is_ubsan_builtin_p): Check also built-in class. cp/ * constexpr.c: Include ubsan.h. (cxx_eval_call_expression): Bail out for IFN_UBSAN_{NULL,BOUNDS} internal functions and for ubsan builtins. * error.c: Include internal-fn.h. (dump_expr): Add printing of internal functions. testsuite/ * c-c++-common/ubsan/shift-5.c: Add xfails. * g++.dg/ubsan/div-by-zero-1.C: Don't use -w. Add xfail. * g++.dg/ubsan/pr63956.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218221 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/cp/constexpr.c PR sanitizer/64121 * ubsan.c (instrument_object_size): Stop searching if the base occurs in abnormal phi. * c-c++-common/ubsan/pr64121.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218222 138bc75d-0d04-0410-961f-82ee72b054a4 * constexpr.c (cxx_eval_check_shift_p): New function. (cxx_eval_binary_expression): Call it. Set NON_CONSTANT_P if it returns true. * g++.dg/cpp0x/constexpr-shift1.C: New test. * g++.dg/cpp1y/constexpr-shift1.C: New test. * g++.dg/ubsan/pr63956.C: Add dg-errors. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218279 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/cp/constexpr.c PR middle-end/56917 * fold-const.c (fold_unary_loc): Perform the negation in A's type when transforming ~ (A - 1) or ~ (A + -1) to -A. * c-c++-common/ubsan/pr56917.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218395 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/64289 * c-convert.c: Include ubsan.h. (convert): For real -> integral casts and -fsanitize=float-cast-overflow don't call convert_to_integer, but instead instrument the float cast directly. * c-c++-common/ubsan/pr64289.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@218811 138bc75d-0d04-0410-961f-82ee72b054a4 * lib/ubsan-dg.exp: Add library path for libstdc++. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@219035 138bc75d-0d04-0410-961f-82ee72b054a4 * lib/ubsan-dg.exp (check_effective_target_fsanitize_undefined): Check if testcases run without errors, not just if they compile. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@219084 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/testsuite/lib/ubsan-dg.exp PR sanitizer/64344 * ubsan.h (ubsan_instrument_float_cast): Add ARG argument. * ubsan.c (ubsan_instrument_float_cast): Add ARG argument, pass it to libubsan handler instead of EXPR. Fold comparisons earlier, if the result is integer_zerop, return NULL_TREE. * convert.c (convert_to_integer): Pass expr as ARG. c/ * c-typeck.c (convert_for_assignment, c_finish_return): For -fsanitize=float-cast-overflow casts from REAL_TYPE to integer/enum types also set in_late_binary_op around convert call. * c-convert.c (convert): For -fsanitize=float-cast-overflow REAL_TYPE to integral type casts, if not in_late_binary_op, pass c_fully_fold result on expr as last argument to ubsan_instrument_float_cast, if in_late_binary_op, don't use c_save_expr but save_expr. testsuite/ * c-c++-common/ubsan/pr64344-1.c: New test. * c-c++-common/ubsan/pr64344-2.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@219201 138bc75d-0d04-0410-961f-82ee72b054a4 * opts.c (common_handle_option): Add support for -fno-sanitize=all and -f{,no-}sanitize-recover=all. * doc/invoke.texi: Document -fno-sanitize=all, -f{,no-}sanitize-recover=all. Document that -fsanitize=float-cast-overflow is not enabled by -fsanitize=undefined. Fix up documentation of -f{,no-}sanitize-recover. * c-c++-common/asan/sanitize-all-1.c: New test. * c-c++-common/ubsan/sanitize-all-1.c: New test. * c-c++-common/ubsan/sanitize-all-2.c: New test. * c-c++-common/ubsan/sanitize-all-3.c: New test. * c-c++-common/ubsan/sanitize-all-4.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@219241 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/64632 * ubsan/ubsan_type_hash.cc: Cherry pick upstream r224972. * g++.dg/ubsan/pr64632.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@219912 138bc75d-0d04-0410-961f-82ee72b054a4 2015-01-27 Jakub Jelinek <jakub@redhat.com> Yury Gribov <y.gribov@samsung.com> PR ubsan/64741 * ubsan.c (ubsan_source_location): Refactor code. (ubsan_type_descriptor): Update type size. Refactor code. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@220159 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/ubsan.c * g++.dg/ubsan/shift-1.C: New test. * gcc.dg/ubsan/c-shift-2.c: New test. * c-c++-common/ubsan/shift-5.c: Remove file. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@220631 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/65019 * ubsan.c (ubsan_expand_objsize_ifn): Always return true. * g++.dg/ubsan/pr65019.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@220641 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/64984 * except.c (check_noexcept_r): Return NULL for internal calls. * g++.dg/ubsan/pr64984.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@220649 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/65000 * g++.dg/ubsan/pr65000.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@220663 138bc75d-0d04-0410-961f-82ee72b054a4 PR bootstrap/63888 * asan/asan_globals.cc (RegisterGlobal): Disable detect_odr_violation support until it is rewritten upstream. * c-c++-common/asan/pr63888.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@220919 138bc75d-0d04-0410-961f-82ee72b054a4 * c-c++-common/ubsan/bounds-6.c: Use dg-do run. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221217 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/65280 * doc/invoke.texi: Update description of -fsanitize=bounds. * c-ubsan.c (ubsan_instrument_bounds): Check for COMPONENT_REF before trying to figure out whether we have a flexible array member. * c-c++-common/ubsan/bounds-1.c: Add testing of flexible array member-like arrays. * c-c++-common/ubsan/bounds-8.c: New test. * c-c++-common/ubsan/bounds-9.c: New test. * gcc.dg/ubsan/bounds-2.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221250 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/65367 * ubsan.c (ubsan_expand_objsize_ifn): Update GSI instead of GSI_ORIG when only removing the statement. Handle expanding UBSAN_OBJECT_SIZE separately. * c-c++-common/ubsan/pr65367.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221325 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/65583 * ubsan.c (ubsan_create_edge): New function. (instrument_bool_enum_load): Call it. (instrument_nonnull_arg): Likewise. (instrument_nonnull_return): Likewise. (instrument_object_size): Likewise. * g++.dg/ubsan/pr65583.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221723 138bc75d-0d04-0410-961f-82ee72b054a4 PR tree-optimization/65709 * ubsan.c (instrument_mem_ref): Use TREE_TYPE (base) instead of TREE_TYPE (TREE_TYPE (t)). * c-c++-common/ubsan/align-9.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221958 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/testsuite/ChangeLog * c-ubsan.c (ubsan_instrument_bounds): Don't skip instrumenting flexible member array-like members if SANITIZE_BOUNDS_STRICT. * doc/invoke.texi: Document -fsanitize=bounds-strict. * flag-types.h (enum sanitize_code): Add SANITIZE_BOUNDS_STRICT, or it into SANITIZE_NONDEFAULT. * opts.c (common_handle_option): Handle -fsanitize=bounds-strict. * c-c++-common/ubsan/bounds-10.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@222871 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: gcc/c-family/ChangeLog gcc/flag-types.h * c-ubsan.c (ubsan_instrument_shift): Use type0. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@223646 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/66452 * toplev.c (check_global_declaration): Don't warn about artificial decls. * g++.dg/ubsan/pr66452.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@224216 138bc75d-0d04-0410-961f-82ee72b054a4 * common.opt (fsanitize-undefined-trap-on-error): Add Driver. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@224708 138bc75d-0d04-0410-961f-82ee72b054a4 Backport from mainline. libstdc++-v3/ * include/bits/regex_compiler.h (_S_cache_size): Multiply the RHS of the shift-expression by _IsChar::value. Conditionally include target specific files while building TSAN 2015-01-25 Venkataramanan Kumar <venkataramanan.kumar@linaro.org> * configure.ac (TSAN_TARGET_DEPENDENT_OBJECTS): Define. * configure: Regenerate. * tsan/Makefile.am (EXTRA_libtsan_la_SOURCES): Define. (libtsan_la_DEPENDENCIES): Likewise. * Makefile.in: Regenerate. * asan/Makefile.in: Regenerate. * interception/Makefile.in: Regenerate. * libbacktrace/Makefile.in: Regenerate. * lsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.in: Regenerate. * tsan/Makefile.in: Regenerate. * ubsan/Makefile.in: Regenerate. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@220034 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: libsanitizer/configure libsanitizer/tsan/Makefile.am libsanitizer/tsan/Makefile.in libsanitizer merge from upstream r250806. libsanitizer/ 2015-10-20 Maxim Ostapenko <m.ostapenko@partner.samsung.com> * All source files: Merge from upstream r250806. * configure.ac (link_sanitizer_common): Add -lrt flag. * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets. Set CXX_ABI_NEEDED=true for darwin. * asan/Makefile.am (asan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0. * asan/Makefile.in: Regenerate. * ubsan/Makefile.am (ubsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=1. (libubsan_la_LIBADD): Add -lc++abi if CXX_ABI_NEEDED is true. * ubsan/Makefile.in: Regenerate. * tsan/Makefile.am (tsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. * sanitizer_common/Makefile.in: Regenerate. * asan/libtool-version: Bump the libasan SONAME. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229111 138bc75d-0d04-0410-961f-82ee72b054a4 libsanitizer merge from upstream r250806, compiler part. gcc/ * asan.c (asan_emit_stack_protection): Don't pass local stack to asan_stack_malloc_[n] anymore. Check if asan_stack_malloc_[n] returned NULL and use local stack than. (asan_finish_file): Insert __asan_version_mismatch_check_v[n] call in addition to __asan_init. * sanitizer.def (BUILT_IN_ASAN_INIT): Rename to __asan_init. (BUILT_IN_ASAN_VERSION_MISMATCH_CHECK): Add new builtin call. * asan.h (asan_intercepted_p): Handle new string builtins. * ubsan.c (ubsan_use_new_style_p): New function. (ubsan_instrument_float_cast): If location is unknown, assign input_location to loc. Propagate loc to ubsan_create_data if ubsan_use_new_style_p returned true. config/ * bootstrap-asan.mk: Replace ASAN_OPTIONS=detect_leaks with LSAN_OPTIONS=detect_leaks. gcc/testsuite/ * c-c++-common/ubsan/float-cast-overflow-10.c: Adjust test. * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise. * g++.dg/asan/default-options-1.C: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229112 138bc75d-0d04-0410-961f-82ee72b054a4 libsanitizer/ PR sanitizer/63958 Reapply: 2014-10-14 David S. Miller <davem@davemloft.net> * sanitizer_common/sanitizer_platform_limits_linux.cc (time_t): Define at __kernel_time_t, as needed for sparc. (struct __old_kernel_stat): Don't check if __sparc__ is defined. * libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h (__sanitizer): Define struct___old_kernel_stat_sz, struct_kernel_stat_sz, and struct_kernel_stat64_sz for sparc. (__sanitizer_ipc_perm): Adjust for sparc targets. (__sanitizer_shmid_ds): Likewsie. (__sanitizer_sigaction): Likewise. (IOC_SIZE): Likewsie. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229113 138bc75d-0d04-0410-961f-82ee72b054a4 libsanitizer/ PR bootstrap/63888 Reapply: 2015-02-20 Jakub Jelinek <jakub@redhat.com> * asan/asan_globals.cc (RegisterGlobal): Disable detect_odr_violation support until it is rewritten upstream. * c-c++-common/asan/pr63888.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229114 138bc75d-0d04-0410-961f-82ee72b054a4 libsanitizer/ * sanitizer_common/sanitizer_stacktrace.cc (GetCanonicFrame): Assume we compiled code with GCC when extracting the caller PC for ARM if no valid frame pointer is available. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229115 138bc75d-0d04-0410-961f-82ee72b054a4 libsanitizer/ * HOWTO_MERGE: New file. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229116 138bc75d-0d04-0410-961f-82ee72b054a4 Update HOWTO_MERGE file for libsanitizer. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229215 138bc75d-0d04-0410-961f-82ee72b054a4 Fix ASan output pattern tests on Darwin. gcc/testsuite/ PR sanitizer/68042 * c-c++-common/asan/memcmp-1.c: Adjust test to pass on Darwin. * c-c++-common/asan/sanity-check-pure-c-1.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229212 138bc75d-0d04-0410-961f-82ee72b054a4 Fix bootstrap on x86_64-apple-darwin14 after r229119. libsanitizer/ PR bootstrap/68041 * configure.ac (link_sanitizer_common): Link against librt only if it contains shm_open, required by sanitizers. (CXX_ABI_NEEDED): Remove variable. * configure: Regenerate. * ubsan/Makefile.am (libubsan_la_LIBADD): Do not add -lc++abi anymore. * ubsan/Makefile.in: Regenerate. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229168 138bc75d-0d04-0410-961f-82ee72b054a4 libsanitizer merge from upstream r253555. libsanitizer/ 2015-11-23 Maxim Ostapenko <m.ostapenko@partner.samsung.com> * All source files: Merge from upstream r253555. * configure.tgt: Enable LSan on aarch64-*-linux* targets. Add new dependences for TSan for aarch64-*-linux* targets. * tsan/Makefile.am: Add new source files. * configure: Regenerate. * tsan/Makefile.in: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@230739 138bc75d-0d04-0410-961f-82ee72b054a4 Conflicts: libsanitizer/tsan/Makefile.am libsanitizer/tsan/Makefile.in libsanitizer merge from upstream r253555, compiler part. gcc/ * opts.c (finish_options): Allow -fsanitize-recover=address for userspace sanitization. * asan.c (asan_expand_check_ifn): Redefine recover_p. * doc/invoke.texi (fsanitize-recover): Update documentation. gcc/testsuite/ * c-c++-common/asan/halt_on_error-1.c: New test. * c-c++-common/asan/halt_on_error-2.c: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@230741 138bc75d-0d04-0410-961f-82ee72b054a4 Fix libsanitizer build with old kernel headers on ARM after r230739. libsanitizer/ * include/system/linux/asm/ptrace.h: New header. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@230790 138bc75d-0d04-0410-961f-82ee72b054a4 Move ptrace.h to appropriate place. libsanitizer/ * include/system/linux/asm/ptrace.h: Move to ... * include/system/asm/ptrace.h: ... this. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@230804 138bc75d-0d04-0410-961f-82ee72b054a4 Backport from mainline r219968. Fix ASan build on Aarch64 42-bit VMA and Glibc < 2.21. PR sanitizer/64435 * sanitizer_common/sanitizer_platform_limits_posix.cc: Cherry pick upstream r226637. * sanitizer_common/sanitizer_posix.cc: Cherry pick upstream r226639. libbacktrace/ 2016-03-02 Maxim Ostapenko <m.ostapenko@partner.samsung.com> * elf.c (backtrace_initialize): Properly initialize elf_fileline_fn to avoid possible crash. (elf_add): Don't set *fileline_fn to elf_nodebug value in case of missing debug info anymore. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@233911 138bc75d-0d04-0410-961f-82ee72b054a4 Backport from mainline. gcc/ * config/elfos.h (ASM_DECLARE_OBJECT_NAME): Use decl size instead of type size. (ASM_FINISH_DECLARE_OBJECT): Likewise. gcc/testsuite/ * gcc.target/i386/struct-size.c: New. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@213699 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/66908 * c-ubsan.c: Include gimplify.h. (ubsan_instrument_division): Unshare OP0 and OP1. (ubsan_instrument_shift): Likewise. * c-c++-common/ubsan/pr66908.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@226110 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/66977 * typeck.c (get_member_function_from_ptrfunc): Don't sanitize RSHIFT_EXPR. * g++.dg/ubsan/pr66977.C: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@226440 138bc75d-0d04-0410-961f-82ee72b054a4 * c-ubsan.c (ubsan_instrument_division): Use unshare_expr throughout. (ubsan_instrument_shift): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@226116 138bc75d-0d04-0410-961f-82ee72b054a4 2015-08-21 Yury Gribov <y.gribov@samsung.com> * sanitizer.def (BUILT_IN_UBSAN_HANDLE_NONNULL_ARG, BUILT_IN_UBSAN_HANDLE_NONNULL_ARG): Fix builtin types. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@227054 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/67279 * c-typeck.c (build_binary_op): Don't instrument static initializers. * gcc.dg/ubsan/pr67279.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@227491 138bc75d-0d04-0410-961f-82ee72b054a4 2015-09-17 Bernd Edlinger <bernd.edlinger@hotmail.de> PR sanitizer/64078 * c-c++-common/ubsan/object-size-9.c (s): Add alignment attribute. (f2, f3): Make the function static. * c-c++-common/ubsan/object-size-10.c (a, b): Add alignment attribute. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@227886 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/64906 * c-ubsan.c (ubsan_instrument_division): Also pre-evaluate OP1. * c-c++-common/ubsan/pr64906.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@228112 138bc75d-0d04-0410-961f-82ee72b054a4 * c-ubsan.c (ubsan_instrument_division): Remove unnecessary code. (ubsan_instrument_shift): Likewise. * c-c++-common/ubsan/bounds-11.c: New test. * c-c++-common/ubsan/bounds-12.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@228114 138bc75d-0d04-0410-961f-82ee72b054a4 2015-10-29 Richard Biener <rguenther@suse.de> PR middle-end/56956 * fold-const.c (fold_cond_expr_with_comparison): Do not fold unsigned conditonal negation to ABS_EXPR. * c-c++-common/ubsan/pr56956.c: New testcase. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@229517 138bc75d-0d04-0410-961f-82ee72b054a4 PR c++/67941 * lambda.c (maybe_add_lambda_conv_op): Mark _FUN as no_sanitize_undefined. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@230897 138bc75d-0d04-0410-961f-82ee72b054a4 * lambda.c (maybe_add_lambda_conv_op): Only set no_sanitize_undefined if SANITIZE_NULL. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@230908 138bc75d-0d04-0410-961f-82ee72b054a4 gcc: PR sanitizer/68418 * c-family/c-ubsan.c (ubsan_instrument_shift): Disable sanitization of left shifts for wrapping signed types as well. gcc/testsuite: PR sanitizer/68418 * gcc.dg/ubsan/c99-wrapv-shift-1.c, gcc.dg/ubsan/c99-wrapv-shift-2.c: New testcases. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@231582 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/69055 * ubsan.c (ubsan_instrument_float_cast): Call initialize_sanitizer_builtins. * gfortran.dg/pr69055.f90: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@232024 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/69099 * c-convert.c (convert) [INTEGER_TYPE]: Drop ARG. Don't pass ARG to ubsan_instrument_float_cast. Fold EXPR. Use NULL_TREE instead of NULL. * convert.c (convert_to_integer_1): Adjust call to ubsan_instrument_float_cast. Use NULL_TREE instead of NULL. * ubsan.c (ubsan_instrument_float_cast): Drop the ARG parameter. Use EXPR instead of ARG. * ubsan.h (ubsan_instrument_float_cast): Adjust declaration. * gcc.dg/ubsan/float-cast-overflow-atomic.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@232099 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/68824 * tsan/tsan_interceptors.cc (NEED_TLS_GET_ADDR, __tls_get_addr, InitializeInterceptors): Cherry pick upstream r258119. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@232555 138bc75d-0d04-0410-961f-82ee72b054a4 Fix PR sanitizer/69276 * g++.dg/asan/pr69276.C: New test. PR sanitizer/PR69276 * asan.c (has_stmt_been_instrumented_p): Instrument gimple calls that are gimple_store_p. (maybe_instrument_call): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@233137 138bc75d-0d04-0410-961f-82ee72b054a4 Do not emit red stack zones for a fn with no_sanitize_address PR sanitizer/69863 * cfgexpand.c (asan_sanitize_stack_p): New function. (partition_stack_vars): Use the function. (expand_stack_vars): Likewise. (defer_stack_allocation): Likewise. (expand_used_vars): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@233524 138bc75d-0d04-0410-961f-82ee72b054a4 2016-04-08 Maxim Ostapenko <m.ostapenko@samsung.com> PR sanitizer/70541 * asan.c (instrument_derefs): If we get unknown location, extract it with EXPR_LOCATION. (maybe_instrument_call): Instrument gimple_call's arguments if needed. * c-c++-common/asan/pr70541.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@234827 138bc75d-0d04-0410-961f-82ee72b054a4 PR sanitizer/70712 * cfgexpand.c (expand_stack_vars): Fix typo. * c-c++-common/asan/pr70712.c: New test. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@235384 138bc75d-0d04-0410-961f-82ee72b054a4 Change-Id: Ie47e8a2109bfedb19d2cd756a4683c4fd405f64e Signed-off-by: Maxim Ostapenko <m.ostapenko@samsung.com>
-rw-r--r--config/ChangeLog10
-rw-r--r--config/bootstrap-asan.mk3
-rwxr-xr-xgcc/ChangeLog808
-rw-r--r--gcc/Makefile.in2
-rw-r--r--gcc/asan.c735
-rw-r--r--gcc/asan.h31
-rw-r--r--gcc/builtin-attrs.def2
-rw-r--r--gcc/builtins.c57
-rw-r--r--gcc/builtins.def2
-rw-r--r--gcc/c-family/ChangeLog132
-rw-r--r--gcc/c-family/c-common.c21
-rw-r--r--gcc/c-family/c-common.h4
-rw-r--r--gcc/c-family/c-gimplify.c84
-rw-r--r--gcc/c-family/c-ubsan.c376
-rw-r--r--gcc/c-family/c-ubsan.h5
-rw-r--r--gcc/c/ChangeLog57
-rw-r--r--gcc/c/c-convert.c21
-rw-r--r--gcc/c/c-decl.c6
-rw-r--r--gcc/c/c-parser.c7
-rw-r--r--gcc/c/c-tree.h2
-rw-r--r--gcc/c/c-typeck.c40
-rw-r--r--gcc/cfgexpand.c49
-rw-r--r--gcc/common.opt32
-rw-r--r--gcc/config/aarch64/aarch64-builtins.c6
-rw-r--r--gcc/config/aarch64/aarch64-linux.h6
-rw-r--r--gcc/config/aarch64/aarch64.c11
-rw-r--r--gcc/config/elfos.h4
-rw-r--r--gcc/convert.c17
-rw-r--r--gcc/cp/ChangeLog81
-rw-r--r--gcc/cp/cp-gimplify.c28
-rw-r--r--gcc/cp/decl.c6
-rw-r--r--gcc/cp/error.c9
-rw-r--r--gcc/cp/except.c2
-rw-r--r--gcc/cp/lambda.c9
-rw-r--r--gcc/cp/parser.c4
-rw-r--r--gcc/cp/semantics.c106
-rw-r--r--gcc/cp/tree.c2
-rw-r--r--gcc/cp/typeck.c30
-rw-r--r--gcc/doc/invoke.texi147
-rw-r--r--gcc/flag-types.h14
-rw-r--r--gcc/fold-const.c42
-rw-r--r--gcc/gcc.c409
-rw-r--r--gcc/gimple-fold.c13
-rw-r--r--gcc/gimple.c3
-rw-r--r--gcc/gimplify.c18
-rw-r--r--gcc/internal-fn.c60
-rw-r--r--gcc/internal-fn.def4
-rw-r--r--gcc/internal-fn.h7
-rw-r--r--gcc/ipa-pure-const.c2
-rw-r--r--gcc/lto-streamer-out.c2
-rw-r--r--gcc/opts-global.c13
-rw-r--r--gcc/opts.c100
-rw-r--r--gcc/params.def2
-rw-r--r--gcc/predict.c2
-rw-r--r--gcc/sanitizer.def137
-rw-r--r--gcc/sanopt.c634
-rw-r--r--gcc/stor-layout.c24
-rw-r--r--gcc/stor-layout.h3
-rwxr-xr-xgcc/testsuite/ChangeLog805
-rw-r--r--gcc/testsuite/c-c++-common/asan/aggressive-opts.c28
-rw-r--r--gcc/testsuite/c-c++-common/asan/asan-interface-1.c1
-rw-r--r--gcc/testsuite/c-c++-common/asan/bitfield-5.c24
-rw-r--r--gcc/testsuite/c-c++-common/asan/global-overflow-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/asan/halt_on_error-1.c23
-rw-r--r--gcc/testsuite/c-c++-common/asan/halt_on_error-2.c24
-rw-r--r--gcc/testsuite/c-c++-common/asan/heap-overflow-1.c6
-rw-r--r--gcc/testsuite/c-c++-common/asan/instrument-with-calls-1.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/instrument-with-calls-2.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/instrument-with-calls-3.c14
-rw-r--r--gcc/testsuite/c-c++-common/asan/kasan-recover-1.c11
-rw-r--r--gcc/testsuite/c-c++-common/asan/kasan-recover-2.c11
-rw-r--r--gcc/testsuite/c-c++-common/asan/memcmp-1.c4
-rw-r--r--gcc/testsuite/c-c++-common/asan/misalign-1.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/misalign-2.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-asan-globals.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-instrument-reads.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-instrument-writes.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-1.c16
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-10.c18
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-11.c20
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-12.c16
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-13.c15
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-14.c15
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-15.c16
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-2.c26
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-4.c10
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-5.c12
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-6.c13
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-7.c22
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-8.c14
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-9.c13
-rw-r--r--gcc/testsuite/c-c++-common/asan/no-use-after-return.c5
-rw-r--r--gcc/testsuite/c-c++-common/asan/null-deref-1.c4
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr63316.c22
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr63888.c34
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr64820.c39
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr70541.c34
-rw-r--r--gcc/testsuite/c-c++-common/asan/pr70712.c32
-rw-r--r--gcc/testsuite/c-c++-common/asan/red-align-1.c20
-rw-r--r--gcc/testsuite/c-c++-common/asan/red-align-2.c20
-rw-r--r--gcc/testsuite/c-c++-common/asan/sanitize-all-1.c12
-rw-r--r--gcc/testsuite/c-c++-common/asan/sanity-check-pure-c-1.c4
-rw-r--r--gcc/testsuite/c-c++-common/asan/shadow-offset-1.c11
-rw-r--r--gcc/testsuite/c-c++-common/asan/stack-overflow-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/asan/strlen-overflow-1.c17
-rw-r--r--gcc/testsuite/c-c++-common/asan/strncpy-overflow-1.c8
-rw-r--r--gcc/testsuite/c-c++-common/asan/use-after-free-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/asan/user-section-1.c11
-rw-r--r--gcc/testsuite/c-c++-common/asan/user-section-2.c11
-rw-r--r--gcc/testsuite/c-c++-common/asan/user-section-3.c11
-rw-r--r--gcc/testsuite/c-c++-common/pr53874.c35
-rw-r--r--gcc/testsuite/c-c++-common/pr61405.c31
-rw-r--r--gcc/testsuite/c-c++-common/tsan/mutexset1.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-1.c41
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-2.c56
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-3.c66
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-4.c14
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-5.c15
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-6.c33
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-7.c33
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-8.c32
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/align-9.c21
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/attrib-2.c71
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/attrib-3.c23
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/attrib-4.c15
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-1.c81
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-10.c16
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-11.c23
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-12.c23
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-2.c167
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-3.c23
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-4.c17
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-5.c113
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-6.c37
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-7.c54
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-8.c14
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/bounds-9.c24
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-2.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c6
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-6.c49
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/div-by-zero-7.c41
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-1.c204
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-10.c46
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-2.c73
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-3.c40
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-4.c52
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-5.c40
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-6.c40
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.c196
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.h156
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-8.c143
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-9.c35
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-cast.h39
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c26
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/load-bool-enum.c4
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/nonnull-1.c38
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/nonnull-2.c36
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/nonnull-3.c36
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/nonnull-4.c34
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/nonnull-5.c34
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-1.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-10.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-11.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-3.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-4.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-5.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-6.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-7.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-8.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/null-9.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-1.c125
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-10.c79
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-2.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-3.c56
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-4.c31
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-5.c38
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-6.c9
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-7.c29
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-8.c32
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/object-size-9.c97
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c26
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-add-3.c17
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-add-4.c15
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-int128.c4
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c6
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-mul-3.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-mul-4.c62
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c18
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-negate-2.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-negate-3.c22
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c20
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/overflow-sub-4.c20
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr56917.c34
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr56956.c15
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr59503.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr59667.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr60613-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr60613-2.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr63520.c16
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr63802.c23
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr63839.c23
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr63866.c11
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr63879-1.c23
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr63879-2.c13
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr64121.c16
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr64289.c9
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr64344-1.c9
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr64344-2.c11
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr64906.c12
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr65367.c9
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/pr66908.c15
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/recovery-1.c9
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/recovery-2.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/recovery-3.c9
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/recovery-common.inc19
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/sanitize-all-1.c8
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/sanitize-all-2.c41
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/sanitize-all-3.c42
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/sanitize-all-4.c42
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/save-expr-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-2.c8
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-3.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-4.c2
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-5.c33
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-6.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-7.c28
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/shift-8.c64
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/undefined-1.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/undefined-2.c8
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/unreachable-2.c14
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/vla-1.c28
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/vla-2.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/vla-3.c10
-rw-r--r--gcc/testsuite/c-c++-common/ubsan/vla-4.c10
-rw-r--r--gcc/testsuite/g++.dg/asan/asan_mem_test.cc22
-rw-r--r--gcc/testsuite/g++.dg/asan/asan_oob_test.cc2
-rw-r--r--gcc/testsuite/g++.dg/asan/asan_str_test.cc37
-rw-r--r--gcc/testsuite/g++.dg/asan/asan_test.C2
-rw-r--r--gcc/testsuite/g++.dg/asan/asan_test.cc370
-rw-r--r--gcc/testsuite/g++.dg/asan/asan_test_config.h16
-rw-r--r--gcc/testsuite/g++.dg/asan/asan_test_utils.h24
-rw-r--r--gcc/testsuite/g++.dg/asan/deep-stack-uaf-1.C6
-rw-r--r--gcc/testsuite/g++.dg/asan/deep-tail-call-1.C10
-rw-r--r--gcc/testsuite/g++.dg/asan/default-options-1.C2
-rw-r--r--gcc/testsuite/g++.dg/asan/large-func-test-1.C6
-rw-r--r--gcc/testsuite/g++.dg/asan/pr69276.C38
-rw-r--r--gcc/testsuite/g++.dg/asan/sanitizer_pthread_wrappers.h64
-rw-r--r--gcc/testsuite/g++.dg/asan/sanitizer_test_config.h28
-rw-r--r--gcc/testsuite/g++.dg/asan/sanitizer_test_utils.h74
-rw-r--r--gcc/testsuite/g++.dg/asan/symbolize-callback-1.C21
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-shift1.C73
-rw-r--r--gcc/testsuite/g++.dg/cpp1y/constexpr-shift1.C9
-rw-r--r--gcc/testsuite/g++.dg/ubsan/align-1.C27
-rw-r--r--gcc/testsuite/g++.dg/ubsan/align-2.C45
-rw-r--r--gcc/testsuite/g++.dg/ubsan/align-3.C45
-rw-r--r--gcc/testsuite/g++.dg/ubsan/attrib-1.C27
-rw-r--r--gcc/testsuite/g++.dg/ubsan/cxx11-shift-1.C12
-rw-r--r--gcc/testsuite/g++.dg/ubsan/cxx11-shift-2.C4
-rw-r--r--gcc/testsuite/g++.dg/ubsan/cxx1y-vla.C13
-rw-r--r--gcc/testsuite/g++.dg/ubsan/div-by-zero-1.C10
-rw-r--r--gcc/testsuite/g++.dg/ubsan/float-cast-overflow-bf.C62
-rw-r--r--gcc/testsuite/g++.dg/ubsan/null-1.C30
-rw-r--r--gcc/testsuite/g++.dg/ubsan/null-2.C39
-rw-r--r--gcc/testsuite/g++.dg/ubsan/null-3.C20
-rw-r--r--gcc/testsuite/g++.dg/ubsan/null-4.C20
-rw-r--r--gcc/testsuite/g++.dg/ubsan/null-5.C19
-rw-r--r--gcc/testsuite/g++.dg/ubsan/null-6.C5
-rw-r--r--gcc/testsuite/g++.dg/ubsan/object-size-1.C18
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr63813.C12
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr63956.C172
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr64632.C23
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr64984.C76
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr65000.C14
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr65019.C24
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr65583.C140
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr66452.C16
-rw-r--r--gcc/testsuite/g++.dg/ubsan/pr66977.C27
-rw-r--r--gcc/testsuite/g++.dg/ubsan/return-2.C10
-rw-r--r--gcc/testsuite/g++.dg/ubsan/return-3.C27
-rw-r--r--gcc/testsuite/g++.dg/ubsan/shift-1.C31
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/bounds-1.c10
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/bounds-2.c19
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/c-shift-2.c31
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-1.c9
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-2.c9
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-atomic.c171
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-bf.c72
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/object-size-9.c24
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/pr63690.c9
-rw-r--r--gcc/testsuite/gcc.dg/ubsan/pr67279.c14
-rw-r--r--gcc/testsuite/gcc.target/aarch64/pr65624.c15
-rw-r--r--gcc/testsuite/gfortran.dg/pr69055.f9010
-rw-r--r--gcc/testsuite/lib/prune.exp1
-rw-r--r--gcc/testsuite/lib/tsan-dg.exp14
-rw-r--r--gcc/testsuite/lib/ubsan-dg.exp10
-rw-r--r--gcc/toplev.c1
-rw-r--r--gcc/tree-core.h10
-rw-r--r--gcc/tree-pretty-print.c6
-rw-r--r--gcc/tree-ssa-ccp.c3
-rw-r--r--gcc/tree-ssa-reassoc.c7
-rw-r--r--gcc/tree.c6
-rw-r--r--gcc/tree.def2
-rw-r--r--gcc/tree.h17
-rw-r--r--gcc/ubsan.c1191
-rw-r--r--gcc/ubsan.h26
-rw-r--r--gcc/wide-int.h3169
-rw-r--r--include/ChangeLog9
-rw-r--r--include/libiberty.h32
-rw-r--r--libbacktrace/ChangeLog7
-rw-r--r--libbacktrace/elf.c3
-rw-r--r--libcpp/ChangeLog6
-rw-r--r--libcpp/include/cpplib.h3
-rw-r--r--libiberty/ChangeLog23
-rw-r--r--libiberty/Makefile.in27
-rw-r--r--libiberty/config.in31
-rwxr-xr-xlibiberty/configure122
-rw-r--r--libiberty/configure.ac14
-rw-r--r--libiberty/functions.texi18
-rw-r--r--libiberty/pex-common.c8
-rw-r--r--libiberty/pex-common.h2
-rw-r--r--libiberty/pex-djgpp.c6
-rw-r--r--libiberty/pex-unix.c7
-rw-r--r--libiberty/pex-win32.c6
-rw-r--r--libiberty/strtoll.c175
-rw-r--r--libiberty/strtoull.c122
-rw-r--r--libiberty/testsuite/Makefile.in11
-rw-r--r--libiberty/testsuite/test-strtol.c184
-rw-r--r--libsanitizer/ChangeLog281
-rw-r--r--libsanitizer/HOWTO_MERGE39
-rw-r--r--libsanitizer/MERGE2
-rw-r--r--libsanitizer/Makefile.in3
-rw-r--r--libsanitizer/asan/Makefile.am14
-rw-r--r--libsanitizer/asan/Makefile.in44
-rw-r--r--libsanitizer/asan/asan_activation.cc142
-rw-r--r--libsanitizer/asan/asan_activation.h21
-rw-r--r--libsanitizer/asan/asan_activation_flags.inc33
-rw-r--r--libsanitizer/asan/asan_allocator.cc906
-rw-r--r--libsanitizer/asan/asan_allocator.h89
-rw-r--r--libsanitizer/asan/asan_allocator2.cc811
-rw-r--r--libsanitizer/asan/asan_debugging.cc139
-rw-r--r--libsanitizer/asan/asan_dll_thunk.cc196
-rw-r--r--libsanitizer/asan/asan_fake_stack.cc117
-rw-r--r--libsanitizer/asan/asan_fake_stack.h8
-rw-r--r--libsanitizer/asan/asan_flags.cc177
-rw-r--r--libsanitizer/asan/asan_flags.h92
-rw-r--r--libsanitizer/asan/asan_flags.inc138
-rw-r--r--libsanitizer/asan/asan_globals.cc108
-rw-r--r--libsanitizer/asan/asan_init_version.h32
-rw-r--r--libsanitizer/asan/asan_intercepted_functions.h77
-rw-r--r--libsanitizer/asan/asan_interceptors.cc549
-rw-r--r--libsanitizer/asan/asan_interceptors.h90
-rw-r--r--libsanitizer/asan/asan_interface_internal.h157
-rw-r--r--libsanitizer/asan/asan_internal.h54
-rw-r--r--libsanitizer/asan/asan_linux.cc171
-rw-r--r--libsanitizer/asan/asan_mac.cc290
-rw-r--r--libsanitizer/asan/asan_mac.h57
-rw-r--r--libsanitizer/asan/asan_malloc_linux.cc115
-rw-r--r--libsanitizer/asan/asan_malloc_mac.cc370
-rw-r--r--libsanitizer/asan/asan_malloc_win.cc124
-rw-r--r--libsanitizer/asan/asan_mapping.h144
-rw-r--r--libsanitizer/asan/asan_new_delete.cc74
-rw-r--r--libsanitizer/asan/asan_poisoning.cc188
-rw-r--r--libsanitizer/asan/asan_poisoning.h39
-rw-r--r--libsanitizer/asan/asan_posix.cc109
-rw-r--r--libsanitizer/asan/asan_preinit.cc16
-rw-r--r--libsanitizer/asan/asan_report.cc898
-rw-r--r--libsanitizer/asan/asan_report.h84
-rw-r--r--libsanitizer/asan/asan_rtl.cc623
-rw-r--r--libsanitizer/asan/asan_stack.cc27
-rw-r--r--libsanitizer/asan/asan_stack.h101
-rw-r--r--libsanitizer/asan/asan_stats.cc22
-rw-r--r--libsanitizer/asan/asan_stats.h8
-rw-r--r--libsanitizer/asan/asan_suppressions.cc108
-rw-r--r--libsanitizer/asan/asan_suppressions.h28
-rw-r--r--libsanitizer/asan/asan_thread.cc110
-rw-r--r--libsanitizer/asan/asan_thread.h60
-rw-r--r--libsanitizer/asan/asan_win.cc250
-rw-r--r--libsanitizer/asan/asan_win_dll_thunk.cc425
-rw-r--r--libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc96
-rw-r--r--libsanitizer/asan/libtool-version2
-rw-r--r--libsanitizer/config.h.in6
-rwxr-xr-xlibsanitizer/configure136
-rw-r--r--libsanitizer/configure.ac61
-rw-r--r--libsanitizer/configure.tgt7
-rw-r--r--libsanitizer/include/sanitizer/allocator_interface.h64
-rw-r--r--libsanitizer/include/sanitizer/asan_interface.h116
-rw-r--r--libsanitizer/include/sanitizer/common_interface_defs.h50
-rw-r--r--libsanitizer/include/sanitizer/coverage_interface.h63
-rw-r--r--libsanitizer/include/sanitizer/dfsan_interface.h29
-rw-r--r--libsanitizer/include/sanitizer/lsan_interface.h46
-rw-r--r--libsanitizer/include/sanitizer/msan_interface.h109
-rw-r--r--libsanitizer/include/sanitizer/tsan_interface_atomic.h (renamed from libsanitizer/tsan/tsan_interface_atomic.h)163
-rw-r--r--libsanitizer/include/system/asm/ptrace.h7
-rw-r--r--libsanitizer/interception/Makefile.am1
-rw-r--r--libsanitizer/interception/Makefile.in6
-rw-r--r--libsanitizer/interception/interception.h32
-rw-r--r--libsanitizer/interception/interception_linux.cc6
-rw-r--r--libsanitizer/interception/interception_linux.h24
-rw-r--r--libsanitizer/interception/interception_type_test.cc12
-rw-r--r--libsanitizer/interception/interception_win.cc242
-rw-r--r--libsanitizer/interception/interception_win.h36
-rw-r--r--libsanitizer/libbacktrace/Makefile.am1
-rw-r--r--libsanitizer/libbacktrace/Makefile.in5
-rw-r--r--libsanitizer/lsan/Makefile.am1
-rw-r--r--libsanitizer/lsan/Makefile.in6
-rw-r--r--libsanitizer/lsan/lsan.cc62
-rw-r--r--libsanitizer/lsan/lsan.h20
-rw-r--r--libsanitizer/lsan/lsan_allocator.cc78
-rw-r--r--libsanitizer/lsan/lsan_allocator.h2
-rw-r--r--libsanitizer/lsan/lsan_common.cc547
-rw-r--r--libsanitizer/lsan/lsan_common.h84
-rw-r--r--libsanitizer/lsan/lsan_common_linux.cc72
-rw-r--r--libsanitizer/lsan/lsan_flags.inc41
-rw-r--r--libsanitizer/lsan/lsan_interceptors.cc58
-rw-r--r--libsanitizer/lsan/lsan_preinit.cc6
-rw-r--r--libsanitizer/lsan/lsan_thread.cc10
-rw-r--r--libsanitizer/lsan/lsan_thread.h4
-rw-r--r--libsanitizer/sanitizer_common/Makefile.am25
-rw-r--r--libsanitizer/sanitizer_common/Makefile.in73
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_addrhashmap.h340
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.cc30
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.h179
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_interface.h36
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_internal.h41
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_asm.h6
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic.h21
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang.h71
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h95
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h114
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h136
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_bitvector.h349
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_bvgraph.h163
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.cc444
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h382
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc3165
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc558
-rwxr-xr-xlibsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc84
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc309
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc121
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc65
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage.cc111
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc956
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc125
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h410
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc188
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc426
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h91
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flag_parser.cc169
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flag_parser.h120
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.cc177
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.h66
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.inc192
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_freebsd.h135
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_interface_internal.h59
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_internal_defs.h91
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_lfstack.h8
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.cc73
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.h41
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libignore.cc55
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libignore.h9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cc950
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.h33
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc471
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_list.h38
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cc384
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.h39
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc337
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mutex.h90
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h70
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform.h118
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h305
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc14
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc530
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h557
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.cc305
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.h81
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc266
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_printf.cc67
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps.h77
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc179
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc86
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc88
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc188
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_quarantine.h24
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_report_decorator.h12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.cc281
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.h38
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h176
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cc193
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.h98
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc62
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc142
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h62
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stoptheworld.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc317
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.cc145
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.h39
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.cc84
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.h158
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h149
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc130
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h22
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc413
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc182
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h46
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc716
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc263
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_syscall_linux_aarch64.inc136
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.cc20
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.h15
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc144
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h59
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc156
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_win.cc538
-rw-r--r--libsanitizer/tsan/Makefile.am64
-rw-r--r--libsanitizer/tsan/Makefile.in109
-rw-r--r--libsanitizer/tsan/tsan_clock.cc456
-rw-r--r--libsanitizer/tsan/tsan_clock.h95
-rw-r--r--libsanitizer/tsan/tsan_defs.h84
-rw-r--r--libsanitizer/tsan/tsan_dense_alloc.h135
-rw-r--r--libsanitizer/tsan/tsan_fd.cc60
-rw-r--r--libsanitizer/tsan/tsan_fd.h8
-rw-r--r--libsanitizer/tsan/tsan_flags.cc135
-rw-r--r--libsanitizer/tsan/tsan_flags.h77
-rw-r--r--libsanitizer/tsan/tsan_flags.inc76
-rw-r--r--libsanitizer/tsan/tsan_interceptors.cc1643
-rw-r--r--libsanitizer/tsan/tsan_interceptors.h47
-rw-r--r--libsanitizer/tsan/tsan_interface.cc81
-rw-r--r--libsanitizer/tsan/tsan_interface.h27
-rw-r--r--libsanitizer/tsan/tsan_interface_ann.cc53
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc471
-rw-r--r--libsanitizer/tsan/tsan_interface_inl.h32
-rw-r--r--libsanitizer/tsan/tsan_interface_java.cc196
-rw-r--r--libsanitizer/tsan/tsan_interface_java.h15
-rw-r--r--libsanitizer/tsan/tsan_libdispatch_mac.cc70
-rw-r--r--libsanitizer/tsan/tsan_malloc_mac.cc67
-rw-r--r--libsanitizer/tsan/tsan_md5.cc14
-rw-r--r--libsanitizer/tsan/tsan_mman.cc208
-rw-r--r--libsanitizer/tsan/tsan_mman.h12
-rw-r--r--libsanitizer/tsan/tsan_mutex.cc57
-rw-r--r--libsanitizer/tsan/tsan_mutex.h14
-rw-r--r--libsanitizer/tsan/tsan_mutexset.h9
-rw-r--r--libsanitizer/tsan/tsan_new_delete.cc94
-rw-r--r--libsanitizer/tsan/tsan_platform.h411
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cc288
-rw-r--r--libsanitizer/tsan/tsan_platform_mac.cc152
-rw-r--r--libsanitizer/tsan/tsan_platform_posix.cc122
-rw-r--r--libsanitizer/tsan/tsan_platform_windows.cc12
-rw-r--r--libsanitizer/tsan/tsan_report.cc195
-rw-r--r--libsanitizer/tsan/tsan_report.h40
-rw-r--r--libsanitizer/tsan/tsan_rtl.cc621
-rw-r--r--libsanitizer/tsan/tsan_rtl.h371
-rw-r--r--libsanitizer/tsan/tsan_rtl_aarch64.S204
-rw-r--r--libsanitizer/tsan/tsan_rtl_amd64.S22
-rw-r--r--libsanitizer/tsan/tsan_rtl_mutex.cc293
-rw-r--r--libsanitizer/tsan/tsan_rtl_report.cc554
-rw-r--r--libsanitizer/tsan/tsan_rtl_thread.cc103
-rw-r--r--libsanitizer/tsan/tsan_stack_trace.cc44
-rw-r--r--libsanitizer/tsan/tsan_stack_trace.h37
-rw-r--r--libsanitizer/tsan/tsan_stat.cc370
-rw-r--r--libsanitizer/tsan/tsan_stat.h362
-rw-r--r--libsanitizer/tsan/tsan_suppressions.cc161
-rw-r--r--libsanitizer/tsan/tsan_suppressions.h11
-rw-r--r--libsanitizer/tsan/tsan_symbolize.cc118
-rw-r--r--libsanitizer/tsan/tsan_symbolize.h4
-rw-r--r--libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc191
-rw-r--r--libsanitizer/tsan/tsan_sync.cc467
-rw-r--r--libsanitizer/tsan/tsan_sync.h101
-rw-r--r--libsanitizer/tsan/tsan_trace.h30
-rw-r--r--libsanitizer/tsan/tsan_update_shadow_word_inl.h16
-rw-r--r--libsanitizer/tsan/tsan_vector.h22
-rw-r--r--libsanitizer/ubsan/Makefile.am15
-rw-r--r--libsanitizer/ubsan/Makefile.in34
-rw-r--r--libsanitizer/ubsan/ubsan_checks.inc51
-rw-r--r--libsanitizer/ubsan/ubsan_diag.cc206
-rw-r--r--libsanitizer/ubsan/ubsan_diag.h116
-rw-r--r--libsanitizer/ubsan/ubsan_flags.cc92
-rw-r--r--libsanitizer/ubsan/ubsan_flags.h47
-rw-r--r--libsanitizer/ubsan/ubsan_flags.inc24
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.cc461
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.h51
-rw-r--r--libsanitizer/ubsan/ubsan_handlers_cxx.cc76
-rw-r--r--libsanitizer/ubsan/ubsan_handlers_cxx.h13
-rw-r--r--libsanitizer/ubsan/ubsan_init.cc76
-rw-r--r--libsanitizer/ubsan/ubsan_init.h29
-rw-r--r--libsanitizer/ubsan/ubsan_init_standalone.cc32
-rw-r--r--libsanitizer/ubsan/ubsan_platform.h27
-rw-r--r--libsanitizer/ubsan/ubsan_type_hash.cc234
-rw-r--r--libsanitizer/ubsan/ubsan_type_hash.h5
-rw-r--r--libsanitizer/ubsan/ubsan_type_hash_itanium.cc249
-rw-r--r--libsanitizer/ubsan/ubsan_type_hash_win.cc79
-rw-r--r--libsanitizer/ubsan/ubsan_value.cc14
-rw-r--r--libsanitizer/ubsan/ubsan_value.h7
-rw-r--r--libstdc++-v3/include/bits/regex_compiler.h3
605 files changed, 46624 insertions, 13939 deletions
diff --git a/config/ChangeLog b/config/ChangeLog
index e569d7aed4b..a816687ba9b 100644
--- a/config/ChangeLog
+++ b/config/ChangeLog
@@ -1,3 +1,13 @@
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * bootstrap-asan.mk: Replace ASAN_OPTIONS=detect_leaks with
+ LSAN_OPTIONS=detect_leaks.
+
+2014-11-17 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR bootstrap/63888
+ * bootstrap-asan.mk (ASAN_OPTIONS): Export "detect_leaks=0".
+
2014-10-30 Release Manager
* GCC 4.9.2 released.
diff --git a/config/bootstrap-asan.mk b/config/bootstrap-asan.mk
index fbef02125dc..70baaf9bb17 100644
--- a/config/bootstrap-asan.mk
+++ b/config/bootstrap-asan.mk
@@ -1,5 +1,8 @@
# This option enables -fsanitize=address for stage2 and stage3.
+# Suppress LeakSanitizer in bootstrap.
+export LSAN_OPTIONS="detect_leaks=0"
+
STAGE2_CFLAGS += -fsanitize=address
STAGE3_CFLAGS += -fsanitize=address
POSTSTAGE1_LDFLAGS += -fsanitize=address -static-libasan \
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 23d5d67c527..e981d6ed6ed 100755
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,811 @@
+2016-04-23 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/70712
+ * cfgexpand.c (expand_stack_vars): Fix typo.
+
+2016-04-08 Maxim Ostapenko <m.ostapenko@samsung.com>
+
+ PR sanitizer/70541
+ * asan.c (instrument_derefs): If we get unknown location, extract it
+ with EXPR_LOCATION.
+ (maybe_instrument_call): Instrument gimple_call's arguments if needed.
+
+2016-02-18 Jakub Jelinek <jakub@redhat.com>
+ Martin Liska <mliska@suse.cz>
+
+ PR sanitizer/69863
+ * cfgexpand.c (asan_sanitize_stack_p): New function.
+ (partition_stack_vars): Use the function.
+ (expand_stack_vars): Likewise.
+ (defer_stack_allocation): Likewise.
+ (expand_used_vars): Likewise.
+
+2016-02-04 Martin Liska <mliska@suse.cz>
+
+ PR sanitizer/69276
+ * asan.c (has_stmt_been_instrumented_p): Instrument gimple calls
+ that are gimple_store_p.
+ (maybe_instrument_call): Likewise.
+
+2016-01-06 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/69099
+ * convert.c (convert_to_integer_1): Adjust call to
+ ubsan_instrument_float_cast. Use NULL_TREE instead of NULL.
+ * ubsan.c (ubsan_instrument_float_cast): Drop the ARG parameter. Use
+ EXPR instead of ARG.
+ * ubsan.h (ubsan_instrument_float_cast): Adjust declaration.
+
+2016-01-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/69055
+ * ubsan.c (ubsan_instrument_float_cast): Call
+ initialize_sanitizer_builtins.
+
+2015-12-12 Paolo Bonzini <bonzini@gnu.org>
+
+ PR sanitizer/68418
+ * c-family/c-ubsan.c (ubsan_instrument_shift): Disable
+ sanitization of left shifts for wrapping signed types as well.
+
+2015-10-29 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/56956
+ * fold-const.c (fold_cond_expr_with_comparison): Do not fold
+ unsigned conditonal negation to ABS_EXPR.
+
+2015-08-21 Yury Gribov <y.gribov@samsung.com>
+
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_NONNULL_ARG,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_ARG): Fix builtin types.
+
+2013-08-07 Ilya Enkovich <ilya.enkovich@intel.com>
+
+ * config/elfos.h (ASM_DECLARE_OBJECT_NAME): Use decl size
+ instead of type size.
+ (ASM_FINISH_DECLARE_OBJECT): Likewise.
+
+2015-11-23 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * opts.c (finish_options): Allow -fsanitize-recover=address for
+ userspace sanitization.
+ * asan.c (asan_expand_check_ifn): Redefine recover_p.
+ * doc/invoke.texi (fsanitize-recover): Update documentation.
+
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * asan.c (asan_emit_stack_protection): Don't pass local stack to
+ asan_stack_malloc_[n] anymore. Check if asan_stack_malloc_[n] returned
+ NULL and use local stack than.
+ (asan_finish_file): Insert __asan_version_mismatch_check_v[n] call
+ in addition to __asan_init.
+ * sanitizer.def (BUILT_IN_ASAN_INIT): Rename to __asan_init.
+ (BUILT_IN_ASAN_VERSION_MISMATCH_CHECK): Add new builtin call.
+ * asan.h (asan_intercepted_p): Handle new string builtins.
+ * ubsan.c (ubsan_use_new_style_p): New function.
+ (ubsan_instrument_float_cast): If location is unknown, assign
+ input_location to loc. Propagate loc to ubsan_create_data if
+ ubsan_use_new_style_p returned true.
+
+2015-06-20 Marek Polacek <polacek@redhat.com>
+
+ * common.opt (fsanitize-undefined-trap-on-error): Add Driver.
+
+2015-06-08 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/66452
+ * toplev.c (check_global_declaration): Don't warn about artificial
+ decls.
+
+2015-05-07 Marek Polacek <polacek@redhat.com>
+ Martin Uecker <uecker@eecs.berkeley.edu>
+
+ * doc/invoke.texi: Document -fsanitize=bounds-strict.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_BOUNDS_STRICT, or it
+ into SANITIZE_NONDEFAULT.
+ * opts.c (common_handle_option): Handle -fsanitize=bounds-strict.
+
+2015-04-09 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/65709
+ * ubsan.c (instrument_mem_ref): Use TREE_TYPE (base) instead of
+ TREE_TYPE (TREE_TYPE (t)).
+
+2015-03-27 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/65583
+ * ubsan.c (ubsan_create_edge): New function.
+ (instrument_bool_enum_load): Call it.
+ (instrument_nonnull_arg): Likewise.
+ (instrument_nonnull_return): Likewise.
+ (instrument_object_size): Likewise.
+
+2015-03-10 Marek Polacek <polacek@redhat.com>
+ Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/65367
+ * ubsan.c (ubsan_expand_objsize_ifn): Update GSI instead of GSI_ORIG
+ when only removing the statement. Handle expanding UBSAN_OBJECT_SIZE
+ separately.
+
+2015-03-07 Marek Polacek <polacek@redhat.com>
+ Martin Uecker <uecker@eecs.berkeley.edu>
+
+ PR sanitizer/65280
+ * doc/invoke.texi: Update description of -fsanitize=bounds.
+
+2015-02-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/65019
+ * ubsan.c (ubsan_expand_objsize_ifn): Always return true.
+
+2015-01-27 Jakub Jelinek <jakub@redhat.com>
+ Yury Gribov <y.gribov@samsung.com>
+
+ PR ubsan/64741
+ * ubsan.c (ubsan_source_location): Refactor code.
+ (ubsan_type_descriptor): Update type size. Refactor code.
+
+2015-01-06 Jakub Jelinek <jakub@redhat.com>
+
+ * opts.c (common_handle_option): Add support for
+ -fno-sanitize=all and -f{,no-}sanitize-recover=all.
+ * doc/invoke.texi: Document -fno-sanitize=all,
+ -f{,no-}sanitize-recover=all. Document that
+ -fsanitize=float-cast-overflow is not enabled
+ by -fsanitize=undefined. Fix up documentation
+ of -f{,no-}sanitize-recover.
+
+2015-01-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64344
+ * ubsan.h (ubsan_instrument_float_cast): Add ARG argument.
+ * ubsan.c (ubsan_instrument_float_cast): Add ARG argument, pass
+ it to libubsan handler instead of EXPR. Fold comparisons earlier,
+ if the result is integer_zerop, return NULL_TREE.
+ * convert.c (convert_to_integer): Pass expr as ARG.
+
+2014-12-04 Marek Polacek <polacek@redhat.com>
+
+ PR middle-end/56917
+ * fold-const.c (fold_unary_loc): Perform the negation in A's type
+ when transforming ~ (A - 1) or ~ (A + -1) to -A.
+
+2014-12-01 Marek Polacek <polacek@redhat.com>
+ Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64121
+ * ubsan.c (instrument_object_size): Stop searching if the base
+ occurs in abnormal phi.
+
+2014-12-01 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63956
+ * ubsan.c (is_ubsan_builtin_p): Check also built-in class.
+
+2015-04-22 Yury Gribov <y.gribov@samsung.com>
+
+ * doc/invoke.texi (-fsanitize-sections): Update description.
+ * asan.c (set_sanitized_sections): Parse incoming arg.
+ (section_sanitized_p): Support wildcards.
+
+2015-04-17 Yury Gribov <y.gribov@samsung.com>
+
+ * asan.c (set_sanitized_sections): New function.
+ (section_sanitized_p): Ditto.
+ (asan_protect_global): Optionally sanitize user-defined
+ sections.
+ * asan.h (set_sanitized_sections): Declare new function.
+ * common.opt (fsanitize-sections): New option.
+ * doc/invoke.texi (-fsanitize-sections): Document new option.
+ * opts-global.c (handle_common_deferred_options): Handle new
+ option.
+
+2015-04-01 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR target/65624
+ * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_builtin): Increase
+ args array size by one to avoid buffer overflow.
+
+2015-03-16 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR sanitizer/64820
+ * cfgexpand.c (align_base): New function.
+ (alloc_stack_frame_space): Call it.
+ (expand_stack_vars): Align prev_frame to be sure
+ data->asan_vec elements aligned properly.
+
+2014-11-26 Jakub Jelinek <jakub@redhat.com>
+
+ * gcc.c (SANITIZER_SPEC): Don't error on -fsanitize=thread
+ without -pie or -shared, error on -fsanitize=thread -static instead.
+
+2014-11-26 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63788
+ * asan.c (initialize_sanitizer_builtins): Add BT_FN_SIZE_CONST_PTR_INT
+ var. Conditionally build BUILT_IN_OBJECT_SIZE decl.
+ (ATTR_PURE_NOTHROW_LEAF_LIST): Define.
+
+2014-11-19 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63690
+ * ubsan.c (instrument_object_size): Check for MEM_REF.
+
+2014-11-19 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63879
+ * fold-const.c (negate_expr_p) <case NEGATE_EXPR>: Return
+ !TYPE_OVERFLOW_SANITIZED.
+ (fold_negate_expr) <case INTEGER_CST>: Fold when overflow
+ does not trap and when overflow wraps, or when SANITIZE_SI_OVERFLOW
+ is 0.
+
+2014-11-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63520
+ * internal-fn.c (expand_ubsan_result_store): New function.
+ (expand_addsub_overflow, expand_neg_overflow, expand_mul_overflow):
+ Use it instead of just emit_move_insn.
+
+2014-11-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63913
+ * ubsan.c: Include tree-eh.h.
+ (instrument_bool_enum_load): Handle loads that can throw.
+
+2014-11-18 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63866
+ * asan.c (asan_global_struct): Create a TYPE_DECL for "__asan_global",
+ put it into TYPE_NAME and TYPE_STUB_DECL.
+ * ubsan.c (ubsan_type_descriptor_type): New variable.
+ Function renamed to ...
+ (ubsan_get_type_descriptor_type): ... this. Cache
+ return value in ubsan_type_descriptor_type variable.
+ Create a TYPE_DECL for "__ubsan_type_descriptor", put it into
+ TYPE_NAME and TYPE_STUB_DECL.
+ (ubsan_get_source_location_type): Create a TYPE_DECL for
+ "__ubsan_source_location", put it into TYPE_NAME and TYPE_STUB_DECL.
+ (ubsan_type_descriptor, ubsan_create_data): Call
+ ubsan_get_type_descriptor_type instead of ubsan_type_descriptor_type.
+ Create a TYPE_DECL for name, put it into TYPE_NAME and TYPE_STUB_DECL.
+
+2014-11-18 Marat Zakirov <m.zakirov@samsung.com>
+
+ * opts.c (finish_options): Disable aggressive opts for sanitizer.
+ (common_handle_option): Move code to finish_options.
+
+2014-11-18 Yury Gribov <y.gribov@samsung.com>
+
+ PR sanitizer/63802
+ * stor-layout.c (min_align_of_type): Respect user alignment
+ more.
+
+2014-11-14 Jakub Jelinek <jakub@redhat.com>
+ Marek Polacek <polacek@redhat.com>
+
+ * sanopt.c: Include tree-ssa-operands.h.
+ (struct sanopt_info): Add has_freeing_call_p,
+ has_freeing_call_computed_p, imm_dom_path_with_freeing_call_p,
+ imm_dom_path_with_freeing_call_computed_p, freeing_call_events,
+ being_visited_p fields.
+ (struct sanopt_ctx): Add asan_check_map field.
+ (imm_dom_path_with_freeing_call, maybe_optimize_ubsan_null_ifn,
+ maybe_optimize_asan_check_ifn): New functions.
+ (sanopt_optimize_walker): Use them, optimize even ASAN_CHECK
+ internal calls.
+ (pass_sanopt::execute): Call sanopt_optimize even for
+ -fsanitize=address.
+ * gimple.c (nonfreeing_call_p): Return true for non-ECF_LEAF
+ internal calls.
+
+2014-10-15 Jakub Jelinek <jakub@redhat.com>
+
+ * tree-ssa-reassoc.c (optimize_range_tests_diff): Perform
+ MINUS_EXPR in unsigned type to avoid undefined behavior.
+
+2014-08-12 Yury Gribov <y.gribov@samsung.com>
+
+ * internal-fn.c (init_internal_fns): Fix off-by-one.
+
+2014-11-14 Marek Polacek <polacek@redhat.com>
+
+ * fold-const.c (fold_negate_expr): Don't fold INTEGER_CST if
+ that overflows when SANITIZE_SI_OVERFLOW is on. Guard -(-A)
+ folding with TYPE_OVERFLOW_SANITIZED.
+
+2014-11-14 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63839
+ * asan.c (ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST,
+ ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST): Define.
+ * builtin-attrs.def (ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST):
+ Define.
+ * builtins.c (fold_builtin_0): Don't include ubsan.h. Don't
+ instrument BUILT_IN_UNREACHABLE here.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_BUILTIN_UNREACHABLE): Make
+ const.
+ * sanopt.c (pass_sanopt::execute): Instrument BUILT_IN_UNREACHABLE.
+ * tree-ssa-ccp.c (optimize_unreachable): Bail out if
+ SANITIZE_UNREACHABLE.
+ * ubsan.c (ubsan_instrument_unreachable): Rewrite for GIMPLE.
+ * ubsan.h (ubsan_instrument_unreachable): Adjust declaration.
+
+2014-11-12 Marek Polacek <polacek@redhat.com>
+
+ * fold-const.c (fold_binary_loc): Don't fold if the result
+ is undefined.
+ * match.pd (A + (-B) -> A - B, A - (-B) -> A + B,
+ -(-A) -> A): Likewise.
+
+2014-10-09 Marek Polacek <polacek@redhat.com>
+
+ * doc/invoke.texi: Document -fsanitize=bool and -fsanitize=enum.
+
+2014-11-06 Marek Polacek <polacek@redhat.com>
+
+ * sanopt.c (sanopt_optimize_walker): Limit removal of the checks.
+ Remove vector limit.
+
+2014-11-04 Marek Polacek <polacek@redhat.com>
+
+ * Makefile.in (OBJS): Add sanopt.o.
+ (GTFILES): Add sanopt.c.
+ * asan.h (asan_expand_check_ifn): Declare.
+ * asan.c (asan_expand_check_ifn): No longer static.
+ (class pass_sanopt, pass_sanopt::execute, make_pass_sanopt): Move...
+ * sanopt.c: ...here. New file.
+
+2014-11-04 Marek Polacek <polacek@redhat.com>
+
+ * ubsan.c (instrument_object_size): Optimize [x & CST] array accesses.
+
+2014-10-31 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63697
+ * tree-vrp.c (simplify_internal_call_using_ranges): For subcode ==
+ MINUS_EXPR, check overflow on vr0.min - vr1.max and vr0.max - vr1.min
+ instead of vr0.min - vr1.min and vr0.max - vr1.max.
+
+2014-10-28 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * asan.h (asan_intercepted_p): New function.
+ * asan.c (asan_mem_ref_hasher::hash): Remove MEM_REF access size from
+ hash value construction. Call iterative_hash_expr instead of explicit
+ hash building.
+ (asan_mem_ref_hasher::equal): Change condition.
+ (has_mem_ref_been_instrumented): Likewise.
+ (update_mem_ref_hash_table): Likewise.
+ (maybe_update_mem_ref_hash_table): New function.
+ (instrument_strlen_call): Removed.
+ (get_mem_refs_of_builtin_call): Handle new parameter.
+ (instrument_builtin_call): Call maybe_update_mem_ref_hash_table instead
+ of instrument_mem_region_access if intercepted_p is true.
+ (instrument_mem_region_access): Instrument only base with len instead of
+ base and end with 1.
+ (build_check_stmt): Remove start_instrumented and end_instrumented
+ parameters.
+ (enum asan_check_flags): Remove ASAN_CHECK_START_INSTRUMENTED and
+ ASAN_CHECK_END_INSTRUMENTED. Change ASAN_CHECK_LAST.
+ (asan_expand_check_ifn): Remove start_instrumented and end_instrumented.
+ * builtins.c (expand_builtin): Include asan.h. Don't expand string/memory
+ builtin functions that have interceptors if ASan is enabled.
+
+2014-10-28 Yury Gribov <y.gribov@samsung.com>
+
+ * asan.c (report_error_func): Add noabort path.
+ (check_func): Ditto. Formatting.
+ (asan_expand_check_ifn): Handle noabort path.
+ * common.opt (flag_sanitize_recover): Add SANITIZE_KERNEL_ADDRESS
+ to default value.
+ * doc/invoke.texi (-fsanitize-recover=): Mention KASan.
+ * opts.c (finish_options): Reword comment.
+ * sanitizer.def: Add noabort ASan builtins.
+
+2014-10-28 Yury Gribov <y.gribov@samsung.com>
+
+ * asan.c (set_asan_shadow_offset): New function.
+ (asan_shadow_offset): Likewise.
+ (asan_emit_stack_protection): Call asan_shadow_offset.
+ (build_shadow_mem_access): Likewise.
+ * asan.h (set_asan_shadow_offset): Declare.
+ * common.opt (fasan-shadow-offset): New option.
+ (frandom-seed): Fixed parameter name.
+ * doc/invoke.texi (fasan-shadow-offset): Describe new option.
+ (frandom-seed): Fixed parameter name.
+ * opts-global.c (handle_common_deferred_options): Handle
+ -fasan-shadow-offset.
+ * opts.c (common_handle_option): Likewise.
+
+2014-10-22 Jakub Jelinek <jakub@redhat.com>
+ Yury Gribov <y.gribov@samsung.com>
+
+ * common.opt (flag_sanitize_recover): New variable.
+ (fsanitize-recover): Remove Var/Init, deprecate.
+ (fsanitize-recover=): New option.
+ * doc/invoke.texi (fsanitize-recover): Update docs.
+ * opts.c (finish_options): Use opts->x_flag_sanitize
+ instead of flag_sanitize. Prohibit -fsanitize-recover
+ for anything besides UBSan. Formatting.
+ (common_handle_option): Handle OPT_fsanitize_recover_
+ and OPT_fsanitize_recover. Use opts->x_flag_sanitize
+ instead of flag_sanitize.
+ * asan.c (pass_sanopt::execute): Fix up formatting.
+ * ubsan.c (ubsan_expand_bounds_ifn, ubsan_expand_null_ifn,
+ ubsan_expand_objsize_ifn, ubsan_build_overflow_builtin,
+ instrument_bool_enum_load, ubsan_instrument_float_cast,
+ instrument_nonnull_arg, instrument_nonnull_return): Check
+ bits in flag_sanitize_recover bitmask instead of
+ flag_sanitize_recover as bool flag.
+
+2014-10-16 DJ Delorie <dj@redhat.com>
+
+ * flag-types.h (sanitize_code): Don't assume targets have 32-bit
+ integers.
+
+2014-10-13 Marat Zakirov <m.zakirov@samsung.com>
+
+ * asan.c (instrument_derefs): BIT_FIELD_REF added.
+
+2014-10-10 Marek Polacek <polacek@redhat.com>
+
+ * asan.c (pass_sanopt::execute): Handle IFN_UBSAN_OBJECT_SIZE.
+ * doc/invoke.texi: Document -fsanitize=object-size.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_OBJECT_SIZE and
+ or it into SANITIZE_UNDEFINED.
+ * gimple-fold.c (gimple_fold_call): Optimize IFN_UBSAN_OBJECT_SIZE.
+ * internal-fn.c (expand_UBSAN_OBJECT_SIZE): New function.
+ * internal-fn.def (UBSAN_OBJECT_SIZE): Define.
+ * opts.c (common_handle_option): Handle -fsanitize=object-size.
+ * ubsan.c: Include tree-object-size.h.
+ (ubsan_type_descriptor): Call tree_to_uhwi instead of tree_to_shwi.
+ (ubsan_expand_bounds_ifn): Use false instead of 0.
+ (ubsan_expand_objsize_ifn): New function.
+ (instrument_object_size): New function.
+ (pass_ubsan::execute): Add object size instrumentation.
+ * ubsan.h (ubsan_expand_objsize_ifn): Declare.
+
+2014-10-09 Marek Polacek <polacek@redhat.com>
+
+ * doc/invoke.texi: Document -fsanitize=bool and -fsanitize=enum.
+
+2014-10-03 Yury Gribov <y.gribov@samsung.com>
+
+ * asan.c (asan_finish_file): Disable __asan_init calls for KASan;
+ don't emit empty ctors.
+
+2014-09-24 Marek Polacek <polacek@redhat.com>
+
+ PR c/61405
+ PR c/53874
+ * asan.c (maybe_instrument_call): Add default case.
+ * ipa-pure-const.c (special_builtin_state): Likewise.
+ * predict.c (expr_expected_value_1): Likewise.
+ * lto-streamer-out.c (write_symbol): Initialize variable.
+
+2014-09-24 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63316
+ * asan.c (asan_expand_check_ifn): Fix up align >= 8 optimization.
+
+2015-01-23 Jakub Jelinek <jakub@redhat.com>
+
+ PR driver/64737
+ * gcc.c (print_configuration): Don't print a blank line at the end
+ here...
+ (run_attempt): ... but here unstead.
+
+2015-01-22 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR driver/64690
+ * gcc.c (insert_comments): New function.
+ (try_generate_repro): Call it.
+ (append_text): Removed.
+
+2014-09-26 Thomas Schwinge <thomas@codesourcery.com>
+
+ * gcc.c (try_generate_repro): Remove argument "prog". Change all
+ users.
+ (run_attempt): Handle errors of "pex_run" invocation.
+
+2014-09-26 Jakub Jelinek <jakub@redhat.com>
+ Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * common.opt: New option.
+ * doc/invoke.texi: Describe new option.
+ * gcc.c (execute): Don't free first string early, but at the end
+ of the function. Call retry_ice if compiler exited with
+ ICE_EXIT_CODE.
+ (main): Factor out common code.
+ (print_configuration): New function.
+ (files_equal_p): Likewise.
+ (check_repro): Likewise.
+ (run_attempt): Likewise.
+ (do_report_bug): Likewise.
+ (append_text): Likewise.
+ (try_generate_repro): Likewise
+
+2014-08-11 Yury Gribov <y.gribov@samsung.com>
+
+ * gimple.c (gimple_call_fnspec): Support internal functions.
+ (gimple_call_return_flags): Use const.
+ * Makefile.in (GTFILES): Add internal-fn.h to list of GC files.
+ * internal-fn.def: Add fnspec information.
+ * internal-fn.h (internal_fn_fnspec): New function.
+ (init_internal_fns): Declare new function.
+ * internal-fn.c (internal_fn_fnspec_array): New global variable.
+ (init_internal_fns): New function.
+ * tree-core.h: Update macro call.
+ * tree.c (build_common_builtin_nodes): Initialize internal fns.
+
+2014-09-23 Kostya Serebryany <kcc@google.com>
+
+ Update to match the changed asan API.
+ * asan.c (asan_global_struct): Update the __asan_global definition
+ to match the new API.
+ (asan_add_global): Ditto.
+ * sanitizer.def (BUILT_IN_ASAN_INIT): Rename __asan_init_v3
+ to __asan_init_v4.
+
+2014-09-19 Marat Zakirov <m.zakirov@samsung.com>
+
+ * asan.c (build_check_stmt): Alignment arg was added.
+ (asan_expand_check_ifn): Optimization for alignment >= 8.
+
+2014-09-10 Jakub Jelinek <jakub@redhat.com>
+
+ * flag-types.h (enum sanitize_code): Add SANITIZE_NONNULL_ATTRIBUTE
+ and SANITIZE_RETURNS_NONNULL_ATTRIBUTE, or them into SANITIZE_UNDEFINED.
+ * opts.c (common_handle_option): Handle SANITIZE_NONNULL_ATTRIBUTE and
+ SANITIZE_RETURNS_NONNULL_ATTRIBUTE and disable
+ flag_delete_null_pointer_checks for them.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_NONNULL_ARG,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_ARG_ABORT,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): New.
+ * ubsan.c (instrument_bool_enum_load): Set *gsi back to
+ stmt's iterator.
+ (instrument_nonnull_arg, instrument_nonnull_return): New functions.
+ (pass_ubsan::gate): Return true even for SANITIZE_NONNULL_ATTRIBUTE
+ or SANITIZE_RETURNS_NONNULL_ATTRIBUTE.
+ (pass_ubsan::execute): Call instrument_nonnull_{arg,return}.
+ * doc/invoke.texi (-fsanitize=nonnull-attribute,
+ -fsanitize=returns-nonnull-attribute): Document.
+
+2014-09-10 Jakub Jelinek <jakub@redhat.com>
+
+ * ubsan.h (struct ubsan_mismatch_data): Removed.
+ (ubsan_create_data): Remove MISMATCH argument, add LOCCNT argument.
+ * ubsan.c (ubsan_source_location): For unknown locations,
+ pass { NULL, 0, 0 } instead of { "<unknown>", x, y }.
+ (ubsan_create_data): Remove MISMATCH argument, add LOCCNT argument.
+ Allow more than one location and arbitrary extra arguments passed
+ in ... instead of through MISMATCH pointer.
+ (ubsan_instrument_unreachable, ubsan_expand_bounds_ifn,
+ ubsan_expand_null_ifn, ubsan_build_overflow_builtin,
+ instrument_bool_enum_load, ubsan_instrument_float_cast): Adjust
+ callers.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * opts.c (common_handle_option): Handle -fsanitize=alignment.
+ * ubsan.h (enum ubsan_null_ckind): Add UBSAN_CTOR_CALL.
+ (ubsan_expand_bounds_ifn, ubsan_expand_null_ifn): Change return
+ type to bool.
+ * stor-layout.h (min_align_of_type): New prototype.
+ * asan.c (pass_sanopt::execute): Don't perform gsi_next if
+ ubsan_expand* told us not to do it. Remove the extra gsi_end_p
+ check.
+ * ubsan.c: Include builtins.h.
+ (ubsan_expand_bounds_ifn): Change return type to bool,
+ always return true.
+ (ubsan_expand_null_ifn): Change return type to bool, change
+ argument to gimple_stmt_iterator *. Handle both null and alignment
+ sanitization, take type from ckind argument's type rather than
+ first argument.
+ (instrument_member_call): Removed.
+ (instrument_mem_ref): Remove t argument, add mem and base arguments.
+ Handle both null and alignment sanitization, don't say whole
+ struct access is member access. Build 3 argument IFN_UBSAN_NULL
+ call instead of 2 argument.
+ (instrument_null): Adjust instrument_mem_ref caller. Don't
+ instrument calls here.
+ (pass_ubsan::gate, pass_ubsan::execute): Handle SANITIZE_ALIGNMENT
+ like SANITIZE_NULL.
+ * stor-layout.c (min_align_of_type): New function.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_ALIGNMENT.
+ Or it into SANITIZE_UNDEFINED.
+ * doc/invoke.texi (-fsanitize=alignment): Document.
+
+2014-08-11 Yury Gribov <y.gribov@samsung.com>
+
+ * asan.c (asan_check_flags): New enum.
+ (build_check_stmt_with_calls): Removed function.
+ (build_check_stmt): Split inlining logic to
+ asan_expand_check_ifn.
+ (instrument_derefs): Rename parameter.
+ (instrument_mem_region_access): Rename parameter.
+ (instrument_strlen_call): Likewise.
+ (asan_expand_check_ifn): New function.
+ (asan_instrument): Remove old code.
+ (pass_sanopt::execute): Change handling of
+ asan-instrumentation-with-call-threshold.
+ (asan_clear_shadow): Fix formatting.
+ (asan_function_start): Likewise.
+ (asan_emit_stack_protection): Likewise.
+ * doc/invoke.texi (asan-instrumentation-with-call-threshold):
+ Update description.
+ * internal-fn.c (expand_ASAN_CHECK): New function.
+ * internal-fn.def (ASAN_CHECK): New internal function.
+ * params.def (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD):
+ Update description.
+ (PARAM_ASAN_USE_AFTER_RETURN): Likewise.
+ * tree.c: Small comment fix.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * opts.c (common_handle_option): Handle -fsanitize=alignment.
+ * ubsan.h (enum ubsan_null_ckind): Add UBSAN_CTOR_CALL.
+ (ubsan_expand_bounds_ifn, ubsan_expand_null_ifn): Change return
+ type to bool.
+ * stor-layout.h (min_align_of_type): New prototype.
+ * asan.c (pass_sanopt::execute): Don't perform gsi_next if
+ ubsan_expand* told us not to do it. Remove the extra gsi_end_p
+ check.
+ * ubsan.c: Include builtins.h.
+ (ubsan_expand_bounds_ifn): Change return type to bool,
+ always return true.
+ (ubsan_expand_null_ifn): Change return type to bool, change
+ argument to gimple_stmt_iterator *. Handle both null and alignment
+ sanitization, take type from ckind argument's type rather than
+ first argument.
+ (instrument_member_call): Removed.
+ (instrument_mem_ref): Remove t argument, add mem and base arguments.
+ Handle both null and alignment sanitization, don't say whole
+ struct access is member access. Build 3 argument IFN_UBSAN_NULL
+ call instead of 2 argument.
+ (instrument_null): Adjust instrument_mem_ref caller. Don't
+ instrument calls here.
+ (pass_ubsan::gate, pass_ubsan::execute): Handle SANITIZE_ALIGNMENT
+ like SANITIZE_NULL.
+ * stor-layout.c (min_align_of_type): New function.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_ALIGNMENT.
+ Or it into SANITIZE_UNDEFINED.
+ * doc/invoke.texi (-fsanitize=alignment): Document.
+
+2014-07-03 Marek Polacek <polacek@redhat.com>
+
+ * doc/invoke.texi (-fsanitize=bounds): Tweak wording.
+ (-fsanitize=float-divide-by-zero): Move to the table with
+ -fsanitize=undefined suboptions.
+ (-fsanitize=float-cast-overflow): Likewise.
+
+2014-06-30 Marek Polacek <polacek@redhat.com>
+
+ * convert.c (convert_to_integer): Don't instrument conversions if the
+ function has no_sanitize_undefined attribute.
+ * ubsan.c: Don't run the ubsan pass if the function has
+ no_sanitize_undefined attribute.
+
+2014-06-30 Jakub Jelinek <jakub@redhat.com>
+
+ * doc/invoke.texi (-fsanitize=bounds): Move to the table with
+ -fsanitize=undefined suboptions.
+
+2014-06-20 Marek Polacek <polacek@redhat.com>
+
+ * asan.c (pass_sanopt::execute): Handle IFN_UBSAN_BOUNDS.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_BOUNDS and or it
+ into SANITIZE_UNDEFINED.
+ * doc/invoke.texi: Describe -fsanitize=bounds.
+ * gimplify.c (gimplify_call_expr): Add gimplification of internal
+ functions created in the FEs.
+ * internal-fn.c: Move "internal-fn.h" after "tree.h".
+ (expand_UBSAN_BOUNDS): New function.
+ * internal-fn.def (UBSAN_BOUNDS): New internal function.
+ * internal-fn.h: Don't define internal functions here.
+ * opts.c (common_handle_option): Add -fsanitize=bounds.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS,
+ BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS_ABORT): Add.
+ * tree-core.h: Define internal functions here.
+ (struct tree_base): Add ifn field.
+ * tree-pretty-print.c: Include "internal-fn.h".
+ (dump_generic_node): Handle functions without CALL_EXPR_FN.
+ * tree.c (get_callee_fndecl): Likewise.
+ (build_call_expr_internal_loc): New function.
+ * tree.def (CALL_EXPR): Update description.
+ * tree.h (CALL_EXPR_IFN): Define.
+ (build_call_expr_internal_loc): Declare.
+ * ubsan.c (get_ubsan_type_info_for_type): Return 0 for non-arithmetic
+ types.
+ (ubsan_type_descriptor): Change bool parameter to enum
+ ubsan_print_style. Adjust the code. Add handling of
+ UBSAN_PRINT_ARRAY.
+ (ubsan_expand_bounds_ifn): New function.
+ (ubsan_expand_null_ifn): Adjust ubsan_type_descriptor call.
+ (ubsan_build_overflow_builtin): Likewise.
+ (instrument_bool_enum_load): Likewise.
+ (ubsan_instrument_float_cast): Likewise.
+ * ubsan.h (enum ubsan_print_style): New enum.
+ (ubsan_expand_bounds_ifn): Declare.
+ (ubsan_type_descriptor): Adjust declaration. Use a default parameter.
+
+2014-05-23 Marek Polacek <polacek@redhat.com>
+ Jakub Jelinek <jakub@redhat.com>
+
+ * builtins.def: Change SANITIZE_FLOAT_DIVIDE to SANITIZE_NONDEFAULT.
+ * gcc.c (sanitize_spec_function): Likewise.
+ * convert.c (convert_to_integer): Include "ubsan.h". Add
+ floating-point to integer instrumentation.
+ * doc/invoke.texi: Document -fsanitize=float-cast-overflow.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_FLOAT_CAST and
+ SANITIZE_NONDEFAULT.
+ * opts.c (common_handle_option): Handle -fsanitize=float-cast-overflow.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW,
+ BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW_ABORT): Add.
+ * ubsan.c: Include "realmpfr.h" and "dfp.h".
+ (get_ubsan_type_info_for_type): Handle REAL_TYPEs.
+ (ubsan_type_descriptor): Set tkind to 0xffff for types other than
+ float/double/long double.
+ (ubsan_instrument_float_cast): New function.
+ * ubsan.h (ubsan_instrument_float_cast): Declare.
+
+2014-05-02 Marek Polacek <polacek@redhat.com>
+
+ * doc/invoke.texi: Describe -fsanitize=float-divide-by-zero.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * gcc.c (sanitize_spec_function): Handle SANITIZE_FLOAT_DIVIDE.
+ * builtins.def: Initialize builtins even for SANITIZE_FLOAT_DIVIDE.
+ * flag-types.h (enum sanitize_code): Add SANITIZE_FLOAT_DIVIDE.
+ * opts.c (common_handle_option): Add -fsanitize=float-divide-by-zero.
+
+2014-04-22 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/60275
+ * common.opt (fsanitize-recover, fsanitize-undefined-trap-on-error):
+ New options.
+ * gcc.c (sanitize_spec_function): Don't return "" for "undefined"
+ if flag_sanitize_undefined_trap_on_error.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW_ABORT,
+ BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT,
+ BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE_ABORT,
+ BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT,
+ BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW_ABORT,
+ BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW_ABORT,
+ BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW_ABORT,
+ BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW_ABORT,
+ BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT): New builtins.
+ * ubsan.c (ubsan_instrument_unreachable): Return
+ __builtin_trap () if flag_sanitize_undefined_trap_on_error.
+ (ubsan_expand_null_ifn): Emit __builtin_trap ()
+ if flag_sanitize_undefined_trap_on_error and
+ __ubsan_handle_type_mismatch_abort if !flag_sanitize_recover.
+ (ubsan_expand_null_ifn, ubsan_build_overflow_builtin,
+ instrument_bool_enum_load): Emit __builtin_trap () if
+ flag_sanitize_undefined_trap_on_error and
+ __builtin_handle_*_abort () if !flag_sanitize_recover.
+ * doc/invoke.texi (-fsanitize-recover,
+ -fsanitize-undefined-trap-on-error): Document.
+
+2014-04-22 Lin Zuojian <manjian2006@gmail.com>
+
+ PR middle-end/60281
+ * asan.c (asan_emit_stack_protection): Force the base to align to
+ appropriate bits if STRICT_ALIGNMENT. Set shadow_mem align to
+ appropriate bits if STRICT_ALIGNMENT.
+ * cfgexpand.c (expand_stack_vars): Set base_align appropriately
+ when asan is on.
+ (expand_used_vars): Leave a space in the stack frame for alignment
+ if STRICT_ALIGNMENT.
+
+2014-05-21 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/61272
+ * ubsan.c (is_ubsan_builtin_p): Turn assert into a condition.
+
2015-02-06 Jakub Jelinek <jakub@redhat.com>
PR ipa/64896
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index ce80aa257dd..ff73324c498 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1377,6 +1377,7 @@ OBJS = \
asan.o \
tsan.o \
ubsan.o \
+ sanopt.o \
tree-call-cdce.o \
tree-cfg.o \
tree-cfgcleanup.o \
@@ -2286,6 +2287,7 @@ GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/asan.c \
$(srcdir)/ubsan.c \
$(srcdir)/tsan.c \
+ $(srcdir)/sanopt.c \
$(srcdir)/ipa-devirt.c \
$(srcdir)/internal-fn.h \
@all_gtfiles@
diff --git a/gcc/asan.c b/gcc/asan.c
index f6c42a1d0b0..de400369bb1 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -54,6 +54,8 @@ along with GCC; see the file COPYING3. If not see
#include "ubsan.h"
#include "predict.h"
#include "params.h"
+#include "builtins.h"
+#include "fnmatch.h"
/* AddressSanitizer finds out-of-bounds and use-after-free bugs
with <2x slowdown on average.
@@ -228,11 +230,86 @@ along with GCC; see the file COPYING3. If not see
// 1 if it has dynamic initialization, 0 otherwise.
uptr __has_dynamic_init;
+
+ // A pointer to struct that contains source location, could be NULL.
+ __asan_global_source_location *__location;
}
A destructor function that calls the runtime asan library function
_asan_unregister_globals is also installed. */
+static unsigned HOST_WIDE_INT asan_shadow_offset_value;
+static bool asan_shadow_offset_computed;
+static vec<char *> sanitized_sections;
+
+/* Sets shadow offset to value in string VAL. */
+
+bool
+set_asan_shadow_offset (const char *val)
+{
+ char *endp;
+
+ errno = 0;
+#ifdef HAVE_LONG_LONG
+ asan_shadow_offset_value = strtoull (val, &endp, 0);
+#else
+ asan_shadow_offset_value = strtoul (val, &endp, 0);
+#endif
+ if (!(*val != '\0' && *endp == '\0' && errno == 0))
+ return false;
+
+ asan_shadow_offset_computed = true;
+
+ return true;
+}
+
+/* Set list of user-defined sections that need to be sanitized. */
+
+void
+set_sanitized_sections (const char *sections)
+{
+ char *pat;
+ unsigned i;
+ FOR_EACH_VEC_ELT (sanitized_sections, i, pat)
+ free (pat);
+ sanitized_sections.truncate (0);
+
+ for (const char *s = sections; *s; )
+ {
+ const char *end;
+ for (end = s; *end && *end != ','; ++end);
+ size_t len = end - s;
+ sanitized_sections.safe_push (xstrndup (s, len));
+ s = *end ? end + 1 : end;
+ }
+}
+
+/* Checks whether section SEC should be sanitized. */
+
+static bool
+section_sanitized_p (const char *sec)
+{
+ char *pat;
+ unsigned i;
+ FOR_EACH_VEC_ELT (sanitized_sections, i, pat)
+ if (fnmatch (pat, sec, FNM_PERIOD) == 0)
+ return true;
+ return false;
+}
+
+/* Returns Asan shadow offset. */
+
+static unsigned HOST_WIDE_INT
+asan_shadow_offset ()
+{
+ if (!asan_shadow_offset_computed)
+ {
+ asan_shadow_offset_computed = true;
+ asan_shadow_offset_value = targetm.asan_shadow_offset ();
+ }
+ return asan_shadow_offset_value;
+}
+
alias_set_type asan_shadow_set = -1;
/* Pointer types to 1 resp. 2 byte integers in shadow memory. A separate
@@ -248,9 +325,7 @@ enum asan_check_flags
ASAN_CHECK_STORE = 1 << 0,
ASAN_CHECK_SCALAR_ACCESS = 1 << 1,
ASAN_CHECK_NON_ZERO_LEN = 1 << 2,
- ASAN_CHECK_START_INSTRUMENTED = 1 << 3,
- ASAN_CHECK_END_INSTRUMENTED = 1 << 4,
- ASAN_CHECK_LAST = 1 << 5
+ ASAN_CHECK_LAST = 1 << 3
};
/* Hashtable support for memory references used by gimple
@@ -347,9 +422,7 @@ struct asan_mem_ref_hasher
inline hashval_t
asan_mem_ref_hasher::hash (const asan_mem_ref *mem_ref)
{
- hashval_t h = iterative_hash_expr (mem_ref->start, 0);
- h = iterative_hash_host_wide_int (mem_ref->access_size, h);
- return h;
+ return iterative_hash_expr (mem_ref->start, 0);
}
/* Compare two memory references. We accept the length of either
@@ -359,8 +432,7 @@ inline bool
asan_mem_ref_hasher::equal (const asan_mem_ref *m1,
const asan_mem_ref *m2)
{
- return (m1->access_size == m2->access_size
- && operand_equal_p (m1->start, m2->start, 0));
+ return operand_equal_p (m1->start, m2->start, 0);
}
static hash_table <asan_mem_ref_hasher> asan_mem_ref_ht;
@@ -411,7 +483,8 @@ has_mem_ref_been_instrumented (tree ref, HOST_WIDE_INT access_size)
asan_mem_ref r;
asan_mem_ref_init (&r, ref, access_size);
- return (get_mem_ref_hash_table ().find (&r) != NULL);
+ asan_mem_ref *saved_ref = get_mem_ref_hash_table ()->find (&r);
+ return saved_ref && saved_ref->access_size >= access_size;
}
/* Return true iff the memory reference REF has been instrumented. */
@@ -428,19 +501,11 @@ has_mem_ref_been_instrumented (const asan_mem_ref *ref)
static bool
has_mem_ref_been_instrumented (const asan_mem_ref *ref, tree len)
{
- /* First let's see if the address of the beginning of REF has been
- instrumented. */
- if (!has_mem_ref_been_instrumented (ref))
- return false;
+ HOST_WIDE_INT size_in_bytes
+ = tree_fits_shwi_p (len) ? tree_to_shwi (len) : -1;
- if (len != 0)
- {
- /* Let's see if the end of the region has been instrumented. */
- if (!has_mem_ref_been_instrumented (asan_mem_ref_get_end (ref, len),
- ref->access_size))
- return false;
- }
- return true;
+ return size_in_bytes != -1
+ && has_mem_ref_been_instrumented (ref->start, size_in_bytes);
}
/* Set REF to the memory reference present in a gimple assignment
@@ -486,7 +551,8 @@ get_mem_refs_of_builtin_call (const gimple call,
asan_mem_ref *dst,
tree *dst_len,
bool *dst_is_store,
- bool *dest_is_deref)
+ bool *dest_is_deref,
+ bool *intercepted_p)
{
gcc_checking_assert (gimple_call_builtin_p (call, BUILT_IN_NORMAL));
@@ -496,6 +562,8 @@ get_mem_refs_of_builtin_call (const gimple call,
bool is_store = true, got_reference_p = false;
HOST_WIDE_INT access_size = 1;
+ *intercepted_p = asan_intercepted_p ((DECL_FUNCTION_CODE (callee)));
+
switch (DECL_FUNCTION_CODE (callee))
{
/* (s, s, n) style memops. */
@@ -828,12 +896,12 @@ has_stmt_been_instrumented_p (gimple stmt)
tree src0_len = NULL_TREE, src1_len = NULL_TREE, dest_len = NULL_TREE;
bool src0_is_store = false, src1_is_store = false,
- dest_is_store = false, dest_is_deref = false;
+ dest_is_store = false, dest_is_deref = false, intercepted_p = true;
if (get_mem_refs_of_builtin_call (stmt,
&src0, &src0_len, &src0_is_store,
&src1, &src1_len, &src1_is_store,
&dest, &dest_len, &dest_is_store,
- &dest_is_deref))
+ &dest_is_deref, &intercepted_p))
{
if (src0.start != NULL_TREE
&& !has_mem_ref_been_instrumented (&src0, src0_len))
@@ -850,6 +918,16 @@ has_stmt_been_instrumented_p (gimple stmt)
return true;
}
}
+ else if (is_gimple_call (stmt) && gimple_store_p (stmt))
+ {
+ asan_mem_ref r;
+ asan_mem_ref_init (&r, NULL, 1);
+
+ r.start = gimple_call_lhs (stmt);
+ r.access_size = int_size_in_bytes (TREE_TYPE (r.start));
+ return has_mem_ref_been_instrumented (&r);
+ }
+
return false;
}
@@ -863,8 +941,8 @@ update_mem_ref_hash_table (tree ref, HOST_WIDE_INT access_size)
asan_mem_ref r;
asan_mem_ref_init (&r, ref, access_size);
- asan_mem_ref **slot = ht.find_slot (&r, INSERT);
- if (*slot == NULL)
+ asan_mem_ref **slot = ht->find_slot (&r, INSERT);
+ if (*slot == NULL || (*slot)->access_size < access_size)
*slot = asan_mem_ref_new (ref, access_size);
}
@@ -1073,12 +1151,16 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
snprintf (buf, sizeof buf, "__asan_stack_malloc_%d",
use_after_return_class);
ret = init_one_libfunc (buf);
- rtx addr = convert_memory_address (ptr_mode, base);
- ret = emit_library_call_value (ret, NULL_RTX, LCT_NORMAL, ptr_mode, 2,
+ ret = emit_library_call_value (ret, NULL_RTX, LCT_NORMAL, ptr_mode, 1,
GEN_INT (asan_frame_size
+ base_align_bias),
- TYPE_MODE (pointer_sized_int_node),
- addr, ptr_mode);
+ TYPE_MODE (pointer_sized_int_node));
+ /* __asan_stack_malloc_[n] returns a pointer to fake stack if succeeded
+ and NULL otherwise. Check RET value is NULL here and jump over the
+ BASE reassignment in this case. Otherwise, reassign BASE to RET. */
+ int very_unlikely = REG_BR_PROB_BASE / 2000 - 1;
+ emit_cmp_and_jump_insns (ret, const0_rtx, EQ, NULL_RTX,
+ VOIDmode, 0, lab, very_unlikely);
ret = convert_memory_address (Pmode, ret);
emit_move_insn (base, ret);
emit_label (lab);
@@ -1114,7 +1196,7 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb,
NULL_RTX, 1, OPTAB_DIRECT);
shadow_base
= plus_constant (Pmode, shadow_base,
- targetm.asan_shadow_offset ()
+ asan_shadow_offset ()
+ (base_align_bias >> ASAN_SHADOW_SHIFT));
gcc_assert (asan_shadow_set != -1
&& (ASAN_RED_ZONE_SIZE >> ASAN_SHADOW_SHIFT) == 4);
@@ -1302,11 +1384,13 @@ asan_protect_global (tree decl)
to be an array of such vars, putting padding in there
breaks this assumption. */
|| (DECL_SECTION_NAME (decl) != NULL_TREE
- && !DECL_HAS_IMPLICIT_SECTION_NAME_P (decl))
+ && !DECL_HAS_IMPLICIT_SECTION_NAME_P (decl)
+ && !section_sanitized_p (TREE_STRING_POINTER (DECL_SECTION_NAME (decl))))
|| DECL_SIZE (decl) == 0
|| ASAN_RED_ZONE_SIZE * BITS_PER_UNIT > MAX_OFILE_ALIGNMENT
|| !valid_constant_size_p (DECL_SIZE_UNIT (decl))
- || DECL_ALIGN_UNIT (decl) > 2 * ASAN_RED_ZONE_SIZE)
+ || DECL_ALIGN_UNIT (decl) > 2 * ASAN_RED_ZONE_SIZE
+ || TREE_TYPE (decl) == ubsan_get_source_location_type ())
return false;
rtl = DECL_RTL (decl);
@@ -1333,44 +1417,72 @@ asan_protect_global (tree decl)
IS_STORE is either 1 (for a store) or 0 (for a load). */
static tree
-report_error_func (bool is_store, HOST_WIDE_INT size_in_bytes, int *nargs)
-{
- static enum built_in_function report[2][6]
- = { { BUILT_IN_ASAN_REPORT_LOAD1, BUILT_IN_ASAN_REPORT_LOAD2,
- BUILT_IN_ASAN_REPORT_LOAD4, BUILT_IN_ASAN_REPORT_LOAD8,
- BUILT_IN_ASAN_REPORT_LOAD16, BUILT_IN_ASAN_REPORT_LOAD_N },
- { BUILT_IN_ASAN_REPORT_STORE1, BUILT_IN_ASAN_REPORT_STORE2,
- BUILT_IN_ASAN_REPORT_STORE4, BUILT_IN_ASAN_REPORT_STORE8,
- BUILT_IN_ASAN_REPORT_STORE16, BUILT_IN_ASAN_REPORT_STORE_N } };
+report_error_func (bool is_store, bool recover_p, HOST_WIDE_INT size_in_bytes,
+ int *nargs)
+{
+ static enum built_in_function report[2][2][6]
+ = { { { BUILT_IN_ASAN_REPORT_LOAD1, BUILT_IN_ASAN_REPORT_LOAD2,
+ BUILT_IN_ASAN_REPORT_LOAD4, BUILT_IN_ASAN_REPORT_LOAD8,
+ BUILT_IN_ASAN_REPORT_LOAD16, BUILT_IN_ASAN_REPORT_LOAD_N },
+ { BUILT_IN_ASAN_REPORT_STORE1, BUILT_IN_ASAN_REPORT_STORE2,
+ BUILT_IN_ASAN_REPORT_STORE4, BUILT_IN_ASAN_REPORT_STORE8,
+ BUILT_IN_ASAN_REPORT_STORE16, BUILT_IN_ASAN_REPORT_STORE_N } },
+ { { BUILT_IN_ASAN_REPORT_LOAD1_NOABORT,
+ BUILT_IN_ASAN_REPORT_LOAD2_NOABORT,
+ BUILT_IN_ASAN_REPORT_LOAD4_NOABORT,
+ BUILT_IN_ASAN_REPORT_LOAD8_NOABORT,
+ BUILT_IN_ASAN_REPORT_LOAD16_NOABORT,
+ BUILT_IN_ASAN_REPORT_LOAD_N_NOABORT },
+ { BUILT_IN_ASAN_REPORT_STORE1_NOABORT,
+ BUILT_IN_ASAN_REPORT_STORE2_NOABORT,
+ BUILT_IN_ASAN_REPORT_STORE4_NOABORT,
+ BUILT_IN_ASAN_REPORT_STORE8_NOABORT,
+ BUILT_IN_ASAN_REPORT_STORE16_NOABORT,
+ BUILT_IN_ASAN_REPORT_STORE_N_NOABORT } } };
if (size_in_bytes == -1)
{
*nargs = 2;
- return builtin_decl_implicit (report[is_store][5]);
+ return builtin_decl_implicit (report[recover_p][is_store][5]);
}
*nargs = 1;
- return builtin_decl_implicit (report[is_store][exact_log2 (size_in_bytes)]);
+ int size_log2 = exact_log2 (size_in_bytes);
+ return builtin_decl_implicit (report[recover_p][is_store][size_log2]);
}
/* Construct a function tree for __asan_{load,store}{1,2,4,8,16,_n}.
IS_STORE is either 1 (for a store) or 0 (for a load). */
static tree
-check_func (bool is_store, int size_in_bytes, int *nargs)
-{
- static enum built_in_function check[2][6]
- = { { BUILT_IN_ASAN_LOAD1, BUILT_IN_ASAN_LOAD2,
- BUILT_IN_ASAN_LOAD4, BUILT_IN_ASAN_LOAD8,
- BUILT_IN_ASAN_LOAD16, BUILT_IN_ASAN_LOADN },
- { BUILT_IN_ASAN_STORE1, BUILT_IN_ASAN_STORE2,
- BUILT_IN_ASAN_STORE4, BUILT_IN_ASAN_STORE8,
- BUILT_IN_ASAN_STORE16, BUILT_IN_ASAN_STOREN } };
+check_func (bool is_store, bool recover_p, HOST_WIDE_INT size_in_bytes,
+ int *nargs)
+{
+ static enum built_in_function check[2][2][6]
+ = { { { BUILT_IN_ASAN_LOAD1, BUILT_IN_ASAN_LOAD2,
+ BUILT_IN_ASAN_LOAD4, BUILT_IN_ASAN_LOAD8,
+ BUILT_IN_ASAN_LOAD16, BUILT_IN_ASAN_LOADN },
+ { BUILT_IN_ASAN_STORE1, BUILT_IN_ASAN_STORE2,
+ BUILT_IN_ASAN_STORE4, BUILT_IN_ASAN_STORE8,
+ BUILT_IN_ASAN_STORE16, BUILT_IN_ASAN_STOREN } },
+ { { BUILT_IN_ASAN_LOAD1_NOABORT,
+ BUILT_IN_ASAN_LOAD2_NOABORT,
+ BUILT_IN_ASAN_LOAD4_NOABORT,
+ BUILT_IN_ASAN_LOAD8_NOABORT,
+ BUILT_IN_ASAN_LOAD16_NOABORT,
+ BUILT_IN_ASAN_LOADN_NOABORT },
+ { BUILT_IN_ASAN_STORE1_NOABORT,
+ BUILT_IN_ASAN_STORE2_NOABORT,
+ BUILT_IN_ASAN_STORE4_NOABORT,
+ BUILT_IN_ASAN_STORE8_NOABORT,
+ BUILT_IN_ASAN_STORE16_NOABORT,
+ BUILT_IN_ASAN_STOREN_NOABORT } } };
if (size_in_bytes == -1)
{
*nargs = 2;
- return builtin_decl_implicit (check[is_store][5]);
+ return builtin_decl_implicit (check[recover_p][is_store][5]);
}
*nargs = 1;
- return builtin_decl_implicit (check[is_store][exact_log2 (size_in_bytes)]);
+ int size_log2 = exact_log2 (size_in_bytes);
+ return builtin_decl_implicit (check[recover_p][is_store][size_log2]);
}
/* Split the current basic block and create a condition statement
@@ -1492,7 +1604,7 @@ insert_if_then_before_iter (gimple cond,
}
/* Build
- (base_addr >> ASAN_SHADOW_SHIFT) + targetm.asan_shadow_offset (). */
+ (base_addr >> ASAN_SHADOW_SHIFT) + asan_shadow_offset (). */
static tree
build_shadow_mem_access (gimple_stmt_iterator *gsi, location_t location,
@@ -1509,7 +1621,7 @@ build_shadow_mem_access (gimple_stmt_iterator *gsi, location_t location,
gimple_set_location (g, location);
gsi_insert_after (gsi, g, GSI_NEW_STMT);
- t = build_int_cst (uintptr_type, targetm.asan_shadow_offset ());
+ t = build_int_cst (uintptr_type, asan_shadow_offset ());
g = gimple_build_assign_with_ops (PLUS_EXPR,
make_ssa_name (uintptr_type, NULL),
gimple_assign_lhs (g), t);
@@ -1597,22 +1709,13 @@ static void
build_check_stmt (location_t loc, tree base, tree len,
HOST_WIDE_INT size_in_bytes, gimple_stmt_iterator *iter,
bool is_non_zero_len, bool before_p, bool is_store,
- bool is_scalar_access, unsigned int align = 0,
- bool start_instrumented = false,
- bool end_instrumented = false)
+ bool is_scalar_access, unsigned int align = 0)
{
gimple_stmt_iterator gsi = *iter;
gimple g;
gcc_assert (!(size_in_bytes > 0 && !is_non_zero_len));
- if (start_instrumented && end_instrumented)
- {
- if (!before_p)
- gsi_next (iter);
- return;
- }
-
gsi = *iter;
base = unshare_expr (base);
@@ -1655,14 +1758,12 @@ build_check_stmt (location_t loc, tree base, tree len,
flags |= ASAN_CHECK_NON_ZERO_LEN;
if (is_scalar_access)
flags |= ASAN_CHECK_SCALAR_ACCESS;
- if (start_instrumented)
- flags |= ASAN_CHECK_START_INSTRUMENTED;
- if (end_instrumented)
- flags |= ASAN_CHECK_END_INSTRUMENTED;
- g = gimple_build_call_internal (IFN_ASAN_CHECK, 3,
+ g = gimple_build_call_internal (IFN_ASAN_CHECK, 4,
build_int_cst (integer_type_node, flags),
- base, len);
+ base, len,
+ build_int_cst (integer_type_node,
+ align / BITS_PER_UNIT));
gimple_set_location (g, loc);
if (before_p)
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
@@ -1689,6 +1790,8 @@ instrument_derefs (gimple_stmt_iterator *iter, tree t,
tree type, base;
HOST_WIDE_INT size_in_bytes;
+ if (location == UNKNOWN_LOCATION)
+ location = EXPR_LOCATION (t);
type = TREE_TYPE (t);
switch (TREE_CODE (t))
@@ -1698,6 +1801,7 @@ instrument_derefs (gimple_stmt_iterator *iter, tree t,
case INDIRECT_REF:
case MEM_REF:
case VAR_DECL:
+ case BIT_FIELD_REF:
break;
/* FALLTHRU */
default:
@@ -1770,6 +1874,22 @@ instrument_derefs (gimple_stmt_iterator *iter, tree t,
}
+/* Insert a memory reference into the hash table if access length
+ can be determined in compile time. */
+
+static void
+maybe_update_mem_ref_hash_table (tree base, tree len)
+{
+ if (!POINTER_TYPE_P (TREE_TYPE (base))
+ || !INTEGRAL_TYPE_P (TREE_TYPE (len)))
+ return;
+
+ HOST_WIDE_INT size_in_bytes = tree_fits_shwi_p (len) ? tree_to_shwi (len) : -1;
+
+ if (size_in_bytes != -1)
+ update_mem_ref_hash_table (base, size_in_bytes);
+}
+
/* Instrument an access to a contiguous memory region that starts at
the address pointed to by BASE, over a length of LEN (expressed in
the sizeof (*BASE) bytes). ITER points to the instruction before
@@ -1788,97 +1908,20 @@ instrument_mem_region_access (tree base, tree len,
|| integer_zerop (len))
return;
- /* If the beginning of the memory region has already been
- instrumented, do not instrument it. */
- bool start_instrumented = has_mem_ref_been_instrumented (base, 1);
-
- /* If the end of the memory region has already been instrumented, do
- not instrument it. */
- tree end = asan_mem_ref_get_end (base, len);
- bool end_instrumented = has_mem_ref_been_instrumented (end, 1);
-
HOST_WIDE_INT size_in_bytes = tree_fits_shwi_p (len) ? tree_to_shwi (len) : -1;
- build_check_stmt (location, base, len, size_in_bytes, iter,
- /*is_non_zero_len*/size_in_bytes > 0, /*before_p*/true,
- is_store, /*is_scalar_access*/false, /*align*/0,
- start_instrumented, end_instrumented);
-
- update_mem_ref_hash_table (base, 1);
- if (size_in_bytes != -1)
- update_mem_ref_hash_table (end, 1);
+ if ((size_in_bytes == -1)
+ || !has_mem_ref_been_instrumented (base, size_in_bytes))
+ {
+ build_check_stmt (location, base, len, size_in_bytes, iter,
+ /*is_non_zero_len*/size_in_bytes > 0, /*before_p*/true,
+ is_store, /*is_scalar_access*/false, /*align*/0);
+ }
+ maybe_update_mem_ref_hash_table (base, len);
*iter = gsi_for_stmt (gsi_stmt (*iter));
}
-/* Instrument the call (to the builtin strlen function) pointed to by
- ITER.
-
- This function instruments the access to the first byte of the
- argument, right before the call. After the call it instruments the
- access to the last byte of the argument; it uses the result of the
- call to deduce the offset of that last byte.
-
- Upon completion, iff the call has actually been instrumented, this
- function returns TRUE and *ITER points to the statement logically
- following the built-in strlen function call *ITER was initially
- pointing to. Otherwise, the function returns FALSE and *ITER
- remains unchanged. */
-
-static bool
-instrument_strlen_call (gimple_stmt_iterator *iter)
-{
- gimple g;
- gimple call = gsi_stmt (*iter);
- gcc_assert (is_gimple_call (call));
-
- tree callee = gimple_call_fndecl (call);
- gcc_assert (is_builtin_fn (callee)
- && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (callee) == BUILT_IN_STRLEN);
-
- location_t loc = gimple_location (call);
-
- tree len = gimple_call_lhs (call);
- if (len == NULL)
- /* Some passes might clear the return value of the strlen call;
- bail out in that case. Return FALSE as we are not advancing
- *ITER. */
- return false;
- gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (len)));
-
- len = maybe_cast_to_ptrmode (loc, len, iter, /*before_p*/false);
-
- tree str_arg = gimple_call_arg (call, 0);
- bool start_instrumented = has_mem_ref_been_instrumented (str_arg, 1);
-
- tree cptr_type = build_pointer_type (char_type_node);
- g = gimple_build_assign_with_ops (NOP_EXPR,
- make_ssa_name (cptr_type, NULL),
- str_arg, NULL);
- gimple_set_location (g, loc);
- gsi_insert_before (iter, g, GSI_SAME_STMT);
- str_arg = gimple_assign_lhs (g);
-
- build_check_stmt (loc, str_arg, NULL_TREE, 1, iter,
- /*is_non_zero_len*/true, /*before_p=*/true,
- /*is_store=*/false, /*is_scalar_access*/true, /*align*/0,
- start_instrumented, start_instrumented);
-
- g = gimple_build_assign_with_ops (POINTER_PLUS_EXPR,
- make_ssa_name (cptr_type, NULL),
- str_arg,
- len);
- gimple_set_location (g, loc);
- gsi_insert_after (iter, g, GSI_NEW_STMT);
-
- build_check_stmt (loc, gimple_assign_lhs (g), NULL_TREE, 1, iter,
- /*is_non_zero_len*/true, /*before_p=*/false,
- /*is_store=*/false, /*is_scalar_access*/true, /*align*/0);
-
- return true;
-}
-
/* Instrument the call to a built-in memory access function that is
pointed to by the iterator ITER.
@@ -1896,49 +1939,54 @@ instrument_builtin_call (gimple_stmt_iterator *iter)
gcc_checking_assert (gimple_call_builtin_p (call, BUILT_IN_NORMAL));
- tree callee = gimple_call_fndecl (call);
location_t loc = gimple_location (call);
- if (DECL_FUNCTION_CODE (callee) == BUILT_IN_STRLEN)
- iter_advanced_p = instrument_strlen_call (iter);
- else
- {
- asan_mem_ref src0, src1, dest;
- asan_mem_ref_init (&src0, NULL, 1);
- asan_mem_ref_init (&src1, NULL, 1);
- asan_mem_ref_init (&dest, NULL, 1);
+ asan_mem_ref src0, src1, dest;
+ asan_mem_ref_init (&src0, NULL, 1);
+ asan_mem_ref_init (&src1, NULL, 1);
+ asan_mem_ref_init (&dest, NULL, 1);
- tree src0_len = NULL_TREE, src1_len = NULL_TREE, dest_len = NULL_TREE;
- bool src0_is_store = false, src1_is_store = false,
- dest_is_store = false, dest_is_deref = false;
+ tree src0_len = NULL_TREE, src1_len = NULL_TREE, dest_len = NULL_TREE;
+ bool src0_is_store = false, src1_is_store = false, dest_is_store = false,
+ dest_is_deref = false, intercepted_p = true;
- if (get_mem_refs_of_builtin_call (call,
- &src0, &src0_len, &src0_is_store,
- &src1, &src1_len, &src1_is_store,
- &dest, &dest_len, &dest_is_store,
- &dest_is_deref))
+ if (get_mem_refs_of_builtin_call (call,
+ &src0, &src0_len, &src0_is_store,
+ &src1, &src1_len, &src1_is_store,
+ &dest, &dest_len, &dest_is_store,
+ &dest_is_deref, &intercepted_p))
+ {
+ if (dest_is_deref)
{
- if (dest_is_deref)
- {
- instrument_derefs (iter, dest.start, loc, dest_is_store);
- gsi_next (iter);
- iter_advanced_p = true;
- }
- else if (src0_len || src1_len || dest_len)
- {
- if (src0.start != NULL_TREE)
- instrument_mem_region_access (src0.start, src0_len,
- iter, loc, /*is_store=*/false);
- if (src1.start != NULL_TREE)
- instrument_mem_region_access (src1.start, src1_len,
- iter, loc, /*is_store=*/false);
- if (dest.start != NULL_TREE)
- instrument_mem_region_access (dest.start, dest_len,
- iter, loc, /*is_store=*/true);
- *iter = gsi_for_stmt (call);
- gsi_next (iter);
- iter_advanced_p = true;
- }
+ instrument_derefs (iter, dest.start, loc, dest_is_store);
+ gsi_next (iter);
+ iter_advanced_p = true;
+ }
+ else if (!intercepted_p
+ && (src0_len || src1_len || dest_len))
+ {
+ if (src0.start != NULL_TREE)
+ instrument_mem_region_access (src0.start, src0_len,
+ iter, loc, /*is_store=*/false);
+ if (src1.start != NULL_TREE)
+ instrument_mem_region_access (src1.start, src1_len,
+ iter, loc, /*is_store=*/false);
+ if (dest.start != NULL_TREE)
+ instrument_mem_region_access (dest.start, dest_len,
+ iter, loc, /*is_store=*/true);
+
+ *iter = gsi_for_stmt (call);
+ gsi_next (iter);
+ iter_advanced_p = true;
+ }
+ else
+ {
+ if (src0.start != NULL_TREE)
+ maybe_update_mem_ref_hash_table (src0.start, src0_len);
+ if (src1.start != NULL_TREE)
+ maybe_update_mem_ref_hash_table (src1.start, src1_len);
+ if (dest.start != NULL_TREE)
+ maybe_update_mem_ref_hash_table (dest.start, dest_len);
}
}
return iter_advanced_p;
@@ -2015,6 +2063,8 @@ maybe_instrument_call (gimple_stmt_iterator *iter)
case BUILT_IN_TRAP:
/* Don't instrument these. */
return false;
+ default:
+ break;
}
}
tree decl = builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
@@ -2022,7 +2072,39 @@ maybe_instrument_call (gimple_stmt_iterator *iter)
gimple_set_location (g, gimple_location (stmt));
gsi_insert_before (iter, g, GSI_SAME_STMT);
}
- return false;
+
+ bool instrumented = false;
+ if (gimple_store_p (stmt))
+ {
+ tree ref_expr = gimple_call_lhs (stmt);
+ instrument_derefs (iter, ref_expr,
+ gimple_location (stmt),
+ /*is_store=*/true);
+
+ instrumented = true;
+ }
+
+ /* Walk through gimple_call arguments and check them id needed. */
+ unsigned args_num = gimple_call_num_args (stmt);
+ for (unsigned i = 0; i < args_num; ++i)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ /* If ARG is not a non-aggregate register variable, compiler in general
+ creates temporary for it and pass it as argument to gimple call.
+ But in some cases, e.g. when we pass by value a small structure that
+ fits to register, compiler can avoid extra overhead by pulling out
+ these temporaries. In this case, we should check the argument. */
+ if (!is_gimple_reg (arg) && !is_gimple_min_invariant (arg))
+ {
+ instrument_derefs (iter, arg,
+ gimple_location (stmt),
+ /*is_store=*/false);
+ instrumented = true;
+ }
+ }
+ if (instrumented)
+ gsi_next (iter);
+ return instrumented;
}
/* Walk each instruction of all basic block and instrument those that
@@ -2127,19 +2209,20 @@ asan_dynamic_init_call (bool after_p)
const void *__name;
const void *__module_name;
uptr __has_dynamic_init;
+ __asan_global_source_location *__location;
} type. */
static tree
asan_global_struct (void)
{
- static const char *field_names[6]
+ static const char *field_names[7]
= { "__beg", "__size", "__size_with_redzone",
- "__name", "__module_name", "__has_dynamic_init" };
- tree fields[6], ret;
+ "__name", "__module_name", "__has_dynamic_init", "__location"};
+ tree fields[7], ret;
int i;
ret = make_node (RECORD_TYPE);
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 7; i++)
{
fields[i]
= build_decl (UNKNOWN_LOCATION, FIELD_DECL,
@@ -2150,8 +2233,13 @@ asan_global_struct (void)
if (i)
DECL_CHAIN (fields[i - 1]) = fields[i];
}
+ tree type_decl = build_decl (input_location, TYPE_DECL,
+ get_identifier ("__asan_global"), ret);
+ DECL_IGNORED_P (type_decl) = 1;
+ DECL_ARTIFICIAL (type_decl) = 1;
TYPE_FIELDS (ret) = fields[0];
- TYPE_NAME (ret) = get_identifier ("__asan_global");
+ TYPE_NAME (ret) = type_decl;
+ TYPE_STUB_DECL (ret) = type_decl;
layout_type (ret);
return ret;
}
@@ -2211,6 +2299,38 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v)
int has_dynamic_init = vnode ? vnode->dynamically_initialized : 0;
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
build_int_cst (uptr, has_dynamic_init));
+ tree locptr = NULL_TREE;
+ location_t loc = DECL_SOURCE_LOCATION (decl);
+ expanded_location xloc = expand_location (loc);
+ if (xloc.file != NULL)
+ {
+ static int lasanloccnt = 0;
+ char buf[25];
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LASANLOC", ++lasanloccnt);
+ tree var = build_decl (UNKNOWN_LOCATION, VAR_DECL, get_identifier (buf),
+ ubsan_get_source_location_type ());
+ TREE_STATIC (var) = 1;
+ TREE_PUBLIC (var) = 0;
+ DECL_ARTIFICIAL (var) = 1;
+ DECL_IGNORED_P (var) = 1;
+ pretty_printer filename_pp;
+ pp_string (&filename_pp, xloc.file);
+ tree str = asan_pp_string (&filename_pp);
+ tree ctor = build_constructor_va (TREE_TYPE (var), 3,
+ NULL_TREE, str, NULL_TREE,
+ build_int_cst (unsigned_type_node,
+ xloc.line), NULL_TREE,
+ build_int_cst (unsigned_type_node,
+ xloc.column));
+ TREE_CONSTANT (ctor) = 1;
+ TREE_STATIC (ctor) = 1;
+ DECL_INITIAL (var) = ctor;
+ varpool_node::finalize_decl (var);
+ locptr = fold_convert (uptr, build_fold_addr_expr (var));
+ }
+ else
+ locptr = build_int_cst (uptr, 0);
+ CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, locptr);
init = build_constructor (type, vinner);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init);
}
@@ -2240,6 +2360,9 @@ initialize_sanitizer_builtins (void)
pointer_sized_int_node, NULL_TREE);
tree BT_FN_VOID_INT
= build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
+ tree BT_FN_SIZE_CONST_PTR_INT
+ = build_function_type_list (size_type_node, const_ptr_type_node,
+ integer_type_node, NULL_TREE);
tree BT_FN_BOOL_VPTR_PTR_IX_INT_INT[5];
tree BT_FN_IX_CONST_VPTR_INT[5];
tree BT_FN_IX_VPTR_IX_INT[5];
@@ -2296,6 +2419,9 @@ initialize_sanitizer_builtins (void)
#define ATTR_TMPURE_NOTHROW_LEAF_LIST ECF_TM_PURE | ATTR_NOTHROW_LEAF_LIST
#undef ATTR_NORETURN_NOTHROW_LEAF_LIST
#define ATTR_NORETURN_NOTHROW_LEAF_LIST ECF_NORETURN | ATTR_NOTHROW_LEAF_LIST
+#undef ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST
+#define ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST \
+ ECF_CONST | ATTR_NORETURN_NOTHROW_LEAF_LIST
#undef ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST
#define ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST \
ECF_TM_PURE | ATTR_NORETURN_NOTHROW_LEAF_LIST
@@ -2305,6 +2431,11 @@ initialize_sanitizer_builtins (void)
#undef ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST
#define ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST \
/* ECF_COLD missing */ ATTR_NORETURN_NOTHROW_LEAF_LIST
+#undef ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST
+#define ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST \
+ /* ECF_COLD missing */ ATTR_CONST_NORETURN_NOTHROW_LEAF_LIST
+#undef ATTR_PURE_NOTHROW_LEAF_LIST
+#define ATTR_PURE_NOTHROW_LEAF_LIST ECF_PURE | ATTR_NOTHROW_LEAF_LIST
#undef DEF_SANITIZER_BUILTIN
#define DEF_SANITIZER_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
decl = add_builtin_function ("__builtin_" NAME, TYPE, ENUM, \
@@ -2314,6 +2445,15 @@ initialize_sanitizer_builtins (void)
#include "sanitizer.def"
+ /* -fsanitize=object-size uses __builtin_object_size, but that might
+ not be available for e.g. Fortran at this point. We use
+ DEF_SANITIZER_BUILTIN here only as a convenience macro. */
+ if ((flag_sanitize & SANITIZE_OBJECT_SIZE)
+ && !builtin_decl_implicit_p (BUILT_IN_OBJECT_SIZE))
+ DEF_SANITIZER_BUILTIN (BUILT_IN_OBJECT_SIZE, "object_size",
+ BT_FN_SIZE_CONST_PTR_INT,
+ ATTR_PURE_NOTHROW_LEAF_LIST)
+
#undef DEF_SANITIZER_BUILTIN
}
@@ -2387,6 +2527,8 @@ asan_finish_file (void)
{
tree fn = builtin_decl_implicit (BUILT_IN_ASAN_INIT);
append_to_statement_list (build_call_expr (fn, 0), &asan_ctor_statements);
+ fn = builtin_decl_implicit (BUILT_IN_ASAN_VERSION_MISMATCH_CHECK);
+ append_to_statement_list (build_call_expr (fn, 0), &asan_ctor_statements);
}
FOR_EACH_DEFINED_VARIABLE (vnode)
if (TREE_ASM_WRITTEN (vnode->decl)
@@ -2447,22 +2589,26 @@ asan_finish_file (void)
/* Expand the ASAN_{LOAD,STORE} builtins. */
-static bool
+bool
asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
{
gimple g = gsi_stmt (*iter);
location_t loc = gimple_location (g);
+ bool recover_p;
+ if (flag_sanitize & SANITIZE_USER_ADDRESS)
+ recover_p = (flag_sanitize_recover & SANITIZE_USER_ADDRESS) != 0;
+ else
+ recover_p = (flag_sanitize_recover & SANITIZE_KERNEL_ADDRESS) != 0;
HOST_WIDE_INT flags = tree_to_shwi (gimple_call_arg (g, 0));
gcc_assert (flags < ASAN_CHECK_LAST);
bool is_scalar_access = (flags & ASAN_CHECK_SCALAR_ACCESS) != 0;
bool is_store = (flags & ASAN_CHECK_STORE) != 0;
bool is_non_zero_len = (flags & ASAN_CHECK_NON_ZERO_LEN) != 0;
- bool start_instrumented = (flags & ASAN_CHECK_START_INSTRUMENTED) != 0;
- bool end_instrumented = (flags & ASAN_CHECK_END_INSTRUMENTED) != 0;
tree base = gimple_call_arg (g, 1);
tree len = gimple_call_arg (g, 2);
+ HOST_WIDE_INT align = tree_to_shwi (gimple_call_arg (g, 3));
HOST_WIDE_INT size_in_bytes
= is_scalar_access && tree_fits_shwi_p (len) ? tree_to_shwi (len) : -1;
@@ -2480,7 +2626,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
tree base_addr = gimple_assign_lhs (g);
int nargs;
- tree fun = check_func (is_store, size_in_bytes, &nargs);
+ tree fun = check_func (is_store, recover_p, size_in_bytes, &nargs);
if (nargs == 1)
g = gimple_build_call (fun, 1, base_addr);
else
@@ -2541,7 +2687,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
basic_block then_bb, else_bb;
gsi = create_cond_insert_point (&gsi, /*before_p*/false,
/*then_more_likely_p=*/false,
- /*create_then_fallthru_edge=*/false,
+ /*create_then_fallthru_edge*/recover_p,
&then_bb,
&else_bb);
@@ -2563,36 +2709,42 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
else
{
/* Slow path for 1, 2 and 4 byte accesses. */
-
- if (!start_instrumented)
+ /* Test (shadow != 0)
+ & ((base_addr & 7) + (real_size_in_bytes - 1)) >= shadow). */
+ tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
+ shadow_ptr_type);
+ gimple shadow_test = build_assign (NE_EXPR, shadow, 0);
+ gimple_seq seq = NULL;
+ gimple_seq_add_stmt (&seq, shadow_test);
+ /* Aligned (>= 8 bytes) can test just
+ (real_size_in_bytes - 1 >= shadow), as base_addr & 7 is known
+ to be 0. */
+ if (align < 8)
{
- /* Test (shadow != 0)
- & ((base_addr & 7) + (real_size_in_bytes - 1)) >= shadow). */
- tree shadow = build_shadow_mem_access (&gsi, loc, base_addr,
- shadow_ptr_type);
- gimple shadow_test = build_assign (NE_EXPR, shadow, 0);
- gimple_seq seq = NULL;
- gimple_seq_add_stmt (&seq, shadow_test);
- gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR, base_addr, 7));
- gimple_seq_add_stmt (&seq, build_type_cast (shadow_type,
- gimple_seq_last (seq)));
+ gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR,
+ base_addr, 7));
+ gimple_seq_add_stmt (&seq,
+ build_type_cast (shadow_type,
+ gimple_seq_last (seq)));
if (real_size_in_bytes > 1)
gimple_seq_add_stmt (&seq,
- build_assign (PLUS_EXPR, gimple_seq_last (seq),
+ build_assign (PLUS_EXPR,
+ gimple_seq_last (seq),
real_size_in_bytes - 1));
- gimple_seq_add_stmt (&seq, build_assign (GE_EXPR,
- gimple_seq_last (seq),
- shadow));
- gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR, shadow_test,
- gimple_seq_last (seq)));
- t = gimple_assign_lhs (gimple_seq_last (seq));
- gimple_seq_set_location (seq, loc);
- gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
+ t = gimple_assign_lhs (gimple_seq_last_stmt (seq));
}
+ else
+ t = build_int_cst (shadow_type, real_size_in_bytes - 1);
+ gimple_seq_add_stmt (&seq, build_assign (GE_EXPR, t, shadow));
+ gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR, shadow_test,
+ gimple_seq_last (seq)));
+ t = gimple_assign_lhs (gimple_seq_last (seq));
+ gimple_seq_set_location (seq, loc);
+ gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
/* For non-constant, misaligned or otherwise weird access sizes,
- check first and last byte. */
- if (size_in_bytes == -1 && !end_instrumented)
+ check first and last byte. */
+ if (size_in_bytes == -1)
{
g = gimple_build_assign_with_ops (MINUS_EXPR,
make_ssa_name (pointer_sized_int_node, NULL),
@@ -2623,9 +2775,8 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
shadow));
gimple_seq_add_stmt (&seq, build_assign (BIT_AND_EXPR, shadow_test,
gimple_seq_last (seq)));
- if (!start_instrumented)
- gimple_seq_add_stmt (&seq, build_assign (BIT_IOR_EXPR, t,
- gimple_seq_last (seq)));
+ gimple_seq_add_stmt (&seq, build_assign (BIT_IOR_EXPR, t,
+ gimple_seq_last (seq)));
t = gimple_assign_lhs (gimple_seq_last (seq));
gimple_seq_set_location (seq, loc);
gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING);
@@ -2640,7 +2791,7 @@ asan_expand_check_ifn (gimple_stmt_iterator *iter, bool use_calls)
/* Generate call to the run-time library (e.g. __asan_report_load8). */
gsi = gsi_start_bb (then_bb);
int nargs;
- tree fun = report_error_func (is_store, size_in_bytes, &nargs);
+ tree fun = report_error_func (is_store, recover_p, size_in_bytes, &nargs);
g = gimple_build_call (fun, nargs, base_addr, len);
gimple_set_location (g, loc);
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
@@ -2755,120 +2906,4 @@ make_pass_asan_O0 (gcc::context *ctxt)
return new pass_asan_O0 (ctxt);
}
-/* Perform optimization of sanitize functions. */
-
-static unsigned int
-execute_sanopt (void)
-{
- basic_block bb;
-
- int asan_num_accesses = 0;
- if (flag_sanitize & SANITIZE_ADDRESS)
- {
- gimple_stmt_iterator gsi;
- FOR_EACH_BB_FN (bb, cfun)
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple stmt = gsi_stmt (gsi);
- if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)
- && gimple_call_internal_fn (stmt) == IFN_ASAN_CHECK)
- ++asan_num_accesses;
- }
- }
-
- bool use_calls = ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD < INT_MAX
- && (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
- && asan_num_accesses >= ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD;
-
- FOR_EACH_BB_FN (bb, cfun)
- {
- gimple_stmt_iterator gsi;
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
- {
- gimple stmt = gsi_stmt (gsi);
- bool no_next = false;
-
- if (!is_gimple_call (stmt))
- {
- gsi_next (&gsi);
- continue;
- }
-
- if (gimple_call_internal_p (stmt))
- {
- enum internal_fn ifn = gimple_call_internal_fn (stmt);
- switch (ifn)
- {
- case IFN_UBSAN_NULL:
- ubsan_expand_null_ifn (gsi);
- break;
- case IFN_ASAN_CHECK:
- {
- no_next = asan_expand_check_ifn (&gsi, use_calls);
- break;
- }
- default:
- break;
- }
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Optimized\n ");
- print_gimple_stmt (dump_file, stmt, 0, dump_flags);
- fprintf (dump_file, "\n");
- }
-
- if (!no_next)
- gsi_next (&gsi);
- }
- }
- return 0;
-}
-
-static bool
-gate_sanopt (void)
-{
- return flag_sanitize;
-}
-
-namespace {
-
-const pass_data pass_data_sanopt =
-{
- GIMPLE_PASS, /* type */
- "sanopt", /* name */
- OPTGROUP_NONE, /* optinfo_flags */
- true, /* has_gate */
- true, /* has_execute */
- TV_NONE, /* tv_id */
- ( PROP_ssa | PROP_cfg | PROP_gimple_leh ), /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- ( TODO_verify_flow | TODO_verify_stmts
- | TODO_update_ssa ), /* todo_flags_finish */
-};
-
-class pass_sanopt : public gimple_opt_pass
-{
-public:
- pass_sanopt (gcc::context *ctxt)
- : gimple_opt_pass (pass_data_sanopt, ctxt)
- {}
-
- /* opt_pass methods: */
- bool gate () { return gate_sanopt (); }
- unsigned int execute () { return execute_sanopt (); }
-
-}; // class pass_sanopt
-
-} // anon namespace
-
-gimple_opt_pass *
-make_pass_sanopt (gcc::context *ctxt)
-{
- return new pass_sanopt (ctxt);
-}
-
#include "gt-asan.h"
diff --git a/gcc/asan.h b/gcc/asan.h
index 08d5063c9e1..ab96952fd2a 100644
--- a/gcc/asan.h
+++ b/gcc/asan.h
@@ -28,6 +28,7 @@ extern rtx asan_emit_stack_protection (rtx, rtx, unsigned int, HOST_WIDE_INT *,
extern bool asan_protect_global (tree);
extern void initialize_sanitizer_builtins (void);
extern tree asan_dynamic_init_call (bool);
+extern bool asan_expand_check_ifn (gimple_stmt_iterator *, bool);
extern gimple_stmt_iterator create_cond_insert_point
(gimple_stmt_iterator *, bool, bool, bool, basic_block *, basic_block *);
@@ -36,7 +37,7 @@ extern gimple_stmt_iterator create_cond_insert_point
extern alias_set_type asan_shadow_set;
/* Shadow memory is found at
- (address >> ASAN_SHADOW_SHIFT) + targetm.asan_shadow_offset (). */
+ (address >> ASAN_SHADOW_SHIFT) + asan_shadow_offset (). */
#define ASAN_SHADOW_SHIFT 3
/* Red zone size, stack and global variables are padded by ASAN_RED_ZONE_SIZE
@@ -76,4 +77,32 @@ asan_red_zone_size (unsigned int size)
return c ? 2 * ASAN_RED_ZONE_SIZE - c : ASAN_RED_ZONE_SIZE;
}
+extern bool set_asan_shadow_offset (const char *);
+
+extern void set_sanitized_sections (const char *);
+
+/* Return TRUE if builtin with given FCODE will be intercepted by
+ libasan. */
+
+static inline bool
+asan_intercepted_p (enum built_in_function fcode)
+{
+ return fcode == BUILT_IN_INDEX
+ || fcode == BUILT_IN_MEMCHR
+ || fcode == BUILT_IN_MEMCMP
+ || fcode == BUILT_IN_MEMCPY
+ || fcode == BUILT_IN_MEMMOVE
+ || fcode == BUILT_IN_MEMSET
+ || fcode == BUILT_IN_STRCASECMP
+ || fcode == BUILT_IN_STRCAT
+ || fcode == BUILT_IN_STRCHR
+ || fcode == BUILT_IN_STRCMP
+ || fcode == BUILT_IN_STRCPY
+ || fcode == BUILT_IN_STRDUP
+ || fcode == BUILT_IN_STRLEN
+ || fcode == BUILT_IN_STRNCASECMP
+ || fcode == BUILT_IN_STRNCAT
+ || fcode == BUILT_IN_STRNCMP
+ || fcode == BUILT_IN_STRNCPY;
+}
#endif /* TREE_ASAN */
diff --git a/gcc/builtin-attrs.def b/gcc/builtin-attrs.def
index 593df2d3922..1a01be31a2c 100644
--- a/gcc/builtin-attrs.def
+++ b/gcc/builtin-attrs.def
@@ -145,6 +145,8 @@ DEF_ATTR_TREE_LIST (ATTR_SENTINEL_NOTHROW_LIST, ATTR_SENTINEL, \
ATTR_NULL, ATTR_NOTHROW_LIST)
DEF_ATTR_TREE_LIST (ATTR_SENTINEL_NOTHROW_LEAF_LIST, ATTR_SENTINEL, \
ATTR_NULL, ATTR_NOTHROW_LEAF_LIST)
+DEF_ATTR_TREE_LIST (ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST, ATTR_CONST,\
+ ATTR_NULL, ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
/* Functions whose pointer parameter(s) are all nonnull. */
DEF_ATTR_TREE_LIST (ATTR_NONNULL_LIST, ATTR_NONNULL, ATTR_NULL, ATTR_NULL)
diff --git a/gcc/builtins.c b/gcc/builtins.c
index dd57b1ae42a..092ad18af63 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -57,7 +57,7 @@ along with GCC; see the file COPYING3. If not see
#include "value-prof.h"
#include "diagnostic-core.h"
#include "builtins.h"
-#include "ubsan.h"
+#include "asan.h"
#include "cilk.h"
@@ -5794,6 +5794,14 @@ expand_builtin (tree exp, rtx target, rtx subtarget, enum machine_mode mode,
enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
int flags;
+ /* When ASan is enabled, we don't want to expand some memory/string
+ builtins and rely on libsanitizer's hooks. This allows us to avoid
+ redundant checks and be sure, that possible overflow will be detected
+ by ASan. */
+
+ if ((flag_sanitize & SANITIZE_ADDRESS) && asan_intercepted_p (fcode))
+ return expand_call (exp, target, ignore);
+
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
return targetm.expand_builtin (exp, target, subtarget, mode, ignore);
@@ -10298,14 +10306,6 @@ fold_builtin_0 (location_t loc, tree fndecl, bool ignore ATTRIBUTE_UNUSED)
case BUILT_IN_CLASSIFY_TYPE:
return fold_builtin_classify_type (NULL_TREE);
- case BUILT_IN_UNREACHABLE:
- if (flag_sanitize & SANITIZE_UNREACHABLE
- && (current_function_decl == NULL
- || !lookup_attribute ("no_sanitize_undefined",
- DECL_ATTRIBUTES (current_function_decl))))
- return ubsan_instrument_unreachable (loc);
- break;
-
default:
break;
}
@@ -11313,6 +11313,45 @@ build_call_expr (tree fndecl, int n, ...)
return build_call_expr_loc_array (UNKNOWN_LOCATION, fndecl, n, argarray);
}
+/* Helper function for build_call_* functions; build a CALL_EXPR with
+ indicated RETURN_TYPE, FN, and NARGS, but do not initialize any of
+ the argument slots. */
+
+static tree
+build_call_1 (tree return_type, tree fn, int nargs)
+{
+ tree t;
+
+ t = build_vl_exp (CALL_EXPR, nargs + 3);
+ TREE_TYPE (t) = return_type;
+ CALL_EXPR_FN (t) = fn;
+ CALL_EXPR_STATIC_CHAIN (t) = NULL;
+
+ return t;
+}
+
+/* Build internal call expression. This is just like CALL_EXPR, except
+ its CALL_EXPR_FN is NULL. It will get gimplified later into ordinary
+ internal function. */
+
+tree
+build_call_expr_internal_loc (location_t loc, enum internal_fn ifn,
+ tree type, int n, ...)
+{
+ va_list ap;
+ int i;
+
+ tree fn = build_call_1 (type, NULL_TREE, n);
+ va_start (ap, n);
+ for (i = 0; i < n; i++)
+ CALL_EXPR_ARG (fn, i) = va_arg (ap, tree);
+ va_end (ap);
+ SET_EXPR_LOCATION (fn, loc);
+ CALL_EXPR_IFN (fn) = ifn;
+ return fn;
+}
+
+
/* Construct a CALL_EXPR with type TYPE with FN as the function expression.
N arguments are passed in the array ARGARRAY. */
diff --git a/gcc/builtins.def b/gcc/builtins.def
index 5a76ba3291e..4c3d3f7d185 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -169,7 +169,7 @@ along with GCC; see the file COPYING3. If not see
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
true, true, true, ATTRS, true, \
(flag_sanitize & (SANITIZE_ADDRESS | SANITIZE_THREAD \
- | SANITIZE_UNDEFINED)))
+ | SANITIZE_UNDEFINED | SANITIZE_NONDEFAULT)))
#undef DEF_CILKPLUS_BUILTIN
#define DEF_CILKPLUS_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 701d1cef771..ee2b7f1e3ad 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,135 @@
+2015-09-25 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_division): Remove unnecessary code.
+ (ubsan_instrument_shift): Likewise.
+
+2015-09-25 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/64906
+ * c-ubsan.c (ubsan_instrument_division): Also pre-evaluate OP1.
+
+2015-07-23 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_division): Use unshare_expr throughout.
+ (ubsan_instrument_shift): Likewise.
+
+2015-07-23 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/66908
+ * c-ubsan.c: Include gimplify.h.
+ (ubsan_instrument_division): Unshare OP0 and OP1.
+ (ubsan_instrument_shift): Likewise.
+
+2015-05-25 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_shift): Use type0.
+
+2015-05-07 Marek Polacek <polacek@redhat.com>
+ Martin Uecker <uecker@eecs.berkeley.edu>
+
+ * c-ubsan.c (ubsan_instrument_bounds): Don't skip instrumenting
+ flexible member array-like members if SANITIZE_BOUNDS_STRICT.
+
+2015-03-07 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/65280
+ * c-ubsan.c (ubsan_instrument_bounds): Check for COMPONENT_REF
+ before trying to figure out whether we have a flexible array member.
+
+2014-11-28 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_shift): Use op1_utype for MINUS_EXPR
+ instead of unsigned_type_node.
+
+2014-11-28 Marek Polacek <polacek@redhat.com>
+
+ PR c/63862
+ * c-ubsan.c (ubsan_instrument_shift): Change the type of a MINUS_EXPR
+ to op1_utype.
+ * c-gimplify.c (c_gimplify_expr): Convert right operand of a shift
+ expression to unsigned_type_node.
+
+2014-11-18 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63813
+ * c-ubsan.c (ubsan_maybe_instrument_reference_or_call): Change type
+ argument to ptype, set type to TREE_TYPE (ptype). Don't call
+ get_pointer_alignment for non-pointers. Use ptype, or if it is
+ reference type, corresponding pointer type, as type of kind
+ argument.
+ (ubsan_maybe_instrument_reference,
+ ubsan_maybe_instrument_member_call): Adjust callers.
+
+2014-10-23 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_shift): Perform the MINUS_EXPR
+ in unsigned type.
+
+2014-10-22 Jakub Jelinek <jakub@redhat.com>
+ Yury Gribov <y.gribov@samsung.com>
+
+ * c-ubsan.c (ubsan_instrument_division, ubsan_instrument_shift,
+ ubsan_instrument_vla): Check bits in flag_sanitize_recover bitmask
+ instead of flag_sanitize_recover as bool flag.
+
+2014-09-24 Marek Polacek <polacek@redhat.com>
+
+ PR c/61405
+ PR c/53874
+ * c-common.h (struct c_common_resword): Don't define CPP_KEYWORD.
+
+2014-09-10 Jakub Jelinek <jakub@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_division, ubsan_instrument_shift,
+ ubsan_instrument_vla, ubsan_instrument_return): Adjust
+ ubsan_create_data callers.
+ (ubsan_instrument_bounds): Don't emit UBSAN_BOUNDS at all if
+ index is constant or BIT_AND_EXPR with constant mask and is
+ small enough for the bound.
+ * c-gimplify.c (ubsan_walk_array_refs_r): For ADDR_EXPR of
+ ARRAY_REF, make sure the inner ARRAY_REF is not walked again.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * c-common.h (min_align_of_type): Removed prototype.
+ * c-common.c (min_align_of_type): Removed.
+ * c-ubsan.h (ubsan_maybe_instrument_reference,
+ ubsan_maybe_instrument_member_call): New prototypes.
+ * c-ubsan.c: Include stor-layout.h and builtins.h.
+ (ubsan_maybe_instrument_reference_or_call,
+ ubsan_maybe_instrument_reference, ubsan_maybe_instrument_call): New
+ functions.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * c-common.h (min_align_of_type): Removed prototype.
+ * c-common.c (min_align_of_type): Removed.
+ * c-ubsan.h (ubsan_maybe_instrument_reference,
+ ubsan_maybe_instrument_member_call): New prototypes.
+ * c-ubsan.c: Include stor-layout.h and builtins.h.
+ (ubsan_maybe_instrument_reference_or_call,
+ ubsan_maybe_instrument_reference, ubsan_maybe_instrument_call): New
+ functions.
+
+2014-07-15 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_bounds): Don't instrument if
+ TYPE_MAX_VALUE is NULL.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * c-ubsan.c (ubsan_instrument_division): Handle REAL_TYPEs. Perform
+ INT_MIN / -1 sanitization only for integer types.
+
+2014-04-22 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/60275
+ * c-ubsan.c (ubsan_instrument_return): Return __builtin_trap ()
+ if flag_sanitize_undefined_trap_on_error.
+ (ubsan_instrument_division, ubsan_instrument_shift,
+ ubsan_instrument_vla): Likewise. Use __ubsan_handle_*_abort ()
+ if !flag_sanitize_recover.
+
2015-01-20 Marek Polacek <polacek@redhat.com>
Backport from mainline
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 88567018502..079f218d514 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -4927,27 +4927,6 @@ c_common_get_alias_set (tree t)
return -1;
}
-/* Return the least alignment required for type TYPE. */
-
-unsigned int
-min_align_of_type (tree type)
-{
- unsigned int align = TYPE_ALIGN (type);
- align = MIN (align, BIGGEST_ALIGNMENT);
- if (!TYPE_USER_ALIGN (type))
- {
-#ifdef BIGGEST_FIELD_ALIGNMENT
- align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
-#endif
- unsigned int field_align = align;
-#ifdef ADJUST_FIELD_ALIGN
- tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE, type);
- field_align = ADJUST_FIELD_ALIGN (field, field_align);
-#endif
- align = MIN (align, field_align);
- }
- return align / BITS_PER_UNIT;
-}
/* Compute the value of 'sizeof (TYPE)' or '__alignof__ (TYPE)', where
the IS_SIZEOF parameter indicates which operator is being applied.
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index 06dcb7c2803..1b34be45afc 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -329,9 +329,6 @@ struct c_common_resword
/* Extra cpp_ttype values for C++. */
-/* A token type for keywords, as opposed to ordinary identifiers. */
-#define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1))
-
/* A token type for template-ids. If a template-id is processed while
parsing tentatively, it is replaced with a CPP_TEMPLATE_ID token;
the value of the CPP_TEMPLATE_ID is whatever was returned by
@@ -758,7 +755,6 @@ extern tree c_wrap_maybe_const (tree, bool);
extern tree c_save_expr (tree);
extern tree c_common_truthvalue_conversion (location_t, tree);
extern void c_apply_type_quals_to_decl (int, tree);
-extern unsigned int min_align_of_type (tree);
extern tree c_sizeof_or_alignof_type (location_t, tree, bool, bool, int);
extern tree c_alignof_expr (location_t, tree);
/* Print an error message for invalid operands to arith operation CODE.
diff --git a/gcc/c-family/c-gimplify.c b/gcc/c-family/c-gimplify.c
index 9308580c7fb..837599b034d 100644
--- a/gcc/c-family/c-gimplify.c
+++ b/gcc/c-family/c-gimplify.c
@@ -45,6 +45,8 @@ along with GCC; see the file COPYING3. If not see
#include "c-pretty-print.h"
#include "cgraph.h"
#include "cilk.h"
+#include "c-ubsan.h"
+#include "pointer-set.h"
/* The gimplification pass converts the language-dependent trees
(ld-trees) emitted by the parser into language-independent trees
@@ -67,6 +69,52 @@ along with GCC; see the file COPYING3. If not see
walk back up, we check that they fit our constraints, and copy them
into temporaries if not. */
+/* Callback for c_genericize. */
+
+static tree
+ubsan_walk_array_refs_r (tree *tp, int *walk_subtrees, void *data)
+{
+ struct pointer_set_t *pset = (struct pointer_set_t *) data;
+
+ /* Since walk_tree doesn't call the callback function on the decls
+ in BIND_EXPR_VARS, we have to walk them manually. */
+ if (TREE_CODE (*tp) == BIND_EXPR)
+ {
+ for (tree decl = BIND_EXPR_VARS (*tp); decl; decl = DECL_CHAIN (decl))
+ {
+ if (TREE_STATIC (decl))
+ {
+ *walk_subtrees = 0;
+ continue;
+ }
+ walk_tree (&DECL_INITIAL (decl), ubsan_walk_array_refs_r, pset,
+ pset);
+ walk_tree (&DECL_SIZE (decl), ubsan_walk_array_refs_r, pset, pset);
+ walk_tree (&DECL_SIZE_UNIT (decl), ubsan_walk_array_refs_r, pset,
+ pset);
+ }
+ }
+ else if (TREE_CODE (*tp) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (*tp, 0)) == ARRAY_REF)
+ {
+ ubsan_maybe_instrument_array_ref (&TREE_OPERAND (*tp, 0), true);
+ /* Make sure ubsan_maybe_instrument_array_ref is not called again
+ on the ARRAY_REF, the above call might not instrument anything
+ as the index might be constant or masked, so ensure it is not
+ walked again and walk its subtrees manually. */
+ tree aref = TREE_OPERAND (*tp, 0);
+ pset->add (aref);
+ *walk_subtrees = 0;
+ walk_tree (&TREE_OPERAND (aref, 0), ubsan_walk_array_refs_r, pset, pset);
+ walk_tree (&TREE_OPERAND (aref, 1), ubsan_walk_array_refs_r, pset, pset);
+ walk_tree (&TREE_OPERAND (aref, 2), ubsan_walk_array_refs_r, pset, pset);
+ walk_tree (&TREE_OPERAND (aref, 3), ubsan_walk_array_refs_r, pset, pset);
+ }
+ else if (TREE_CODE (*tp) == ARRAY_REF)
+ ubsan_maybe_instrument_array_ref (tp, false);
+ return NULL_TREE;
+}
+
/* Gimplification of statement trees. */
/* Convert the tree representation of FNDECL from C frontend trees to
@@ -79,6 +127,14 @@ c_genericize (tree fndecl)
int local_dump_flags;
struct cgraph_node *cgn;
+ if (flag_sanitize & SANITIZE_BOUNDS)
+ {
+ struct pointer_set_t *pset = pointer_set_create ();
+ walk_tree (&DECL_SAVED_TREE (fndecl), ubsan_walk_array_refs_r, pset,
+ pset);
+ pointer_set_destroy (pset);
+ }
+
/* Dump the C-specific tree IR. */
dump_orig = dump_begin (TDI_original, &local_dump_flags);
if (dump_orig)
@@ -179,6 +235,24 @@ c_gimplify_expr (tree *expr_p, gimple_seq *pre_p ATTRIBUTE_UNUSED,
switch (code)
{
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ {
+ /* We used to convert the right operand of a shift-expression
+ to an integer_type_node in the FEs. But it is unnecessary
+ and not desirable for diagnostics and sanitizers. We keep
+ this here to not pessimize the code, but we convert to an
+ unsigned type, because negative shift counts are undefined
+ anyway.
+ We should get rid of this conversion when we have a proper
+ type demotion/promotion pass. */
+ tree *op1_p = &TREE_OPERAND (*expr_p, 1);
+ if (TREE_CODE (TREE_TYPE (*op1_p)) != VECTOR_TYPE
+ && TYPE_MAIN_VARIANT (TREE_TYPE (*op1_p)) != unsigned_type_node)
+ *op1_p = convert (unsigned_type_node, *op1_p);
+ break;
+ }
+
case DECL_EXPR:
/* This is handled mostly by gimplify.c, but we have to deal with
not warning about int x = x; as it is a GCC extension to turn off
@@ -205,16 +279,16 @@ c_gimplify_expr (tree *expr_p, gimple_seq *pre_p ATTRIBUTE_UNUSED,
}
break;
}
-
+
case CILK_SPAWN_STMT:
- gcc_assert
- (fn_contains_cilk_spawn_p (cfun)
+ gcc_assert
+ (fn_contains_cilk_spawn_p (cfun)
&& cilk_detect_spawn_and_unwrap (expr_p));
-
+
/* If errors are seen, then just process it as a CALL_EXPR. */
if (!seen_error ())
return (enum gimplify_status) gimplify_cilk_spawn (expr_p);
-
+
case MODIFY_EXPR:
case INIT_EXPR:
case CALL_EXPR:
diff --git a/gcc/c-family/c-ubsan.c b/gcc/c-family/c-ubsan.c
index 2c0d009a2a8..1b71d472085 100644
--- a/gcc/c-family/c-ubsan.c
+++ b/gcc/c-family/c-ubsan.c
@@ -30,6 +30,10 @@ along with GCC; see the file COPYING3. If not see
#include "c-family/c-common.h"
#include "c-family/c-ubsan.h"
#include "asan.h"
+#include "internal-fn.h"
+#include "stor-layout.h"
+#include "builtins.h"
+#include "gimplify.h"
/* Instrument division by zero and INT_MIN / -1. If not instrumenting,
return NULL_TREE. */
@@ -46,18 +50,27 @@ ubsan_instrument_division (location_t loc, tree op0, tree op1)
gcc_assert (TYPE_MAIN_VARIANT (TREE_TYPE (op0))
== TYPE_MAIN_VARIANT (TREE_TYPE (op1)));
- /* TODO: REAL_TYPE is not supported yet. */
- if (TREE_CODE (type) != INTEGER_TYPE)
- return NULL_TREE;
+ op0 = unshare_expr (op0);
+ op1 = unshare_expr (op1);
- t = fold_build2 (EQ_EXPR, boolean_type_node,
- op1, build_int_cst (type, 0));
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && (flag_sanitize & SANITIZE_DIVIDE))
+ t = fold_build2 (EQ_EXPR, boolean_type_node,
+ op1, build_int_cst (type, 0));
+ else if (TREE_CODE (type) == REAL_TYPE
+ && (flag_sanitize & SANITIZE_FLOAT_DIVIDE))
+ t = fold_build2 (EQ_EXPR, boolean_type_node,
+ op1, build_real (type, dconst0));
+ else
+ return NULL_TREE;
/* We check INT_MIN / -1 only for signed types. */
- if (!TYPE_UNSIGNED (type))
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && (flag_sanitize & SANITIZE_DIVIDE)
+ && !TYPE_UNSIGNED (type))
{
tree x;
- tt = fold_build2 (EQ_EXPR, boolean_type_node, op1,
+ tt = fold_build2 (EQ_EXPR, boolean_type_node, unshare_expr (op1),
build_int_cst (type, -1));
x = fold_build2 (EQ_EXPR, boolean_type_node, op0,
TYPE_MIN_VALUE (type));
@@ -72,16 +85,27 @@ ubsan_instrument_division (location_t loc, tree op0, tree op1)
/* In case we have a SAVE_EXPR in a conditional context, we need to
make sure it gets evaluated before the condition. */
- t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t);
- tree data = ubsan_create_data ("__ubsan_overflow_data",
- &loc, NULL,
- ubsan_type_descriptor (type, false),
- NULL_TREE);
- data = build_fold_addr_expr_loc (loc, data);
- tt = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW);
- tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
- ubsan_encode_value (op1));
- t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_zero_node);
+ t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), unshare_expr (op0), t);
+ t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), unshare_expr (op1), t);
+ if (flag_sanitize_undefined_trap_on_error)
+ tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data = ubsan_create_data ("__ubsan_overflow_data", 1, &loc,
+ ubsan_type_descriptor (type), NULL_TREE,
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_DIVIDE)
+ ? BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW
+ : BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW_ABORT;
+ tt = builtin_decl_explicit (bcode);
+ op0 = unshare_expr (op0);
+ op1 = unshare_expr (op1);
+ tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
+ ubsan_encode_value (op1));
+ }
+ t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_node);
return t;
}
@@ -99,38 +123,45 @@ ubsan_instrument_shift (location_t loc, enum tree_code code,
HOST_WIDE_INT op0_prec = TYPE_PRECISION (type0);
tree uprecm1 = build_int_cst (op1_utype, op0_prec - 1);
+ op0 = unshare_expr (op0);
+ op1 = unshare_expr (op1);
+
t = fold_convert_loc (loc, op1_utype, op1);
t = fold_build2 (GT_EXPR, boolean_type_node, t, uprecm1);
+ /* If this is not a signed operation, don't perform overflow checks.
+ Also punt on bit-fields. */
+ if (!INTEGRAL_TYPE_P (type0)
+ || TYPE_OVERFLOW_WRAPS (type0)
+ || GET_MODE_BITSIZE (TYPE_MODE (type0)) != TYPE_PRECISION (type0))
+ ;
+
/* For signed x << y, in C99/C11, the following:
(unsigned) x >> (uprecm1 - y)
if non-zero, is undefined. */
- if (code == LSHIFT_EXPR
- && !TYPE_UNSIGNED (type0)
- && flag_isoc99)
+ else if (code == LSHIFT_EXPR && flag_isoc99 && cxx_dialect < cxx11)
{
- tree x = fold_build2 (MINUS_EXPR, unsigned_type_node, uprecm1,
- fold_convert (op1_utype, op1));
+ tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1,
+ fold_convert (op1_utype, unshare_expr (op1)));
tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
tt = fold_build2 (NE_EXPR, boolean_type_node, tt,
build_int_cst (TREE_TYPE (tt), 0));
}
- /* For signed x << y, in C++11/C++14, the following:
+ /* For signed x << y, in C++11 and later, the following:
x < 0 || ((unsigned) x >> (uprecm1 - y))
if > 1, is undefined. */
- if (code == LSHIFT_EXPR
- && !TYPE_UNSIGNED (TREE_TYPE (op0))
- && (cxx_dialect == cxx11 || cxx_dialect == cxx1y))
+ else if (code == LSHIFT_EXPR && cxx_dialect >= cxx11)
{
- tree x = fold_build2 (MINUS_EXPR, unsigned_type_node, uprecm1,
- fold_convert (op1_utype, op1));
- tt = fold_convert_loc (loc, unsigned_type_for (type0), op0);
+ tree x = fold_build2 (MINUS_EXPR, op1_utype, uprecm1,
+ fold_convert (op1_utype, unshare_expr (op1)));
+ tt = fold_convert_loc (loc, unsigned_type_for (type0),
+ unshare_expr (op0));
tt = fold_build2 (RSHIFT_EXPR, TREE_TYPE (tt), tt, x);
tt = fold_build2 (GT_EXPR, boolean_type_node, tt,
build_int_cst (TREE_TYPE (tt), 1));
- x = fold_build2 (LT_EXPR, boolean_type_node, op0,
+ x = fold_build2 (LT_EXPR, boolean_type_node, unshare_expr (op0),
build_int_cst (type0, 0));
tt = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, x, tt);
}
@@ -142,21 +173,31 @@ ubsan_instrument_shift (location_t loc, enum tree_code code,
/* In case we have a SAVE_EXPR in a conditional context, we need to
make sure it gets evaluated before the condition. */
- t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), op0, t);
- tree data = ubsan_create_data ("__ubsan_shift_data",
- &loc, NULL,
- ubsan_type_descriptor (type0, false),
- ubsan_type_descriptor (type1, false),
- NULL_TREE);
-
- data = build_fold_addr_expr_loc (loc, data);
-
+ t = fold_build2 (COMPOUND_EXPR, TREE_TYPE (t), unshare_expr (op0), t);
t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t,
tt ? tt : integer_zero_node);
- tt = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS);
- tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
- ubsan_encode_value (op1));
- t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_zero_node);
+
+ if (flag_sanitize_undefined_trap_on_error)
+ tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data = ubsan_create_data ("__ubsan_shift_data", 1, &loc,
+ ubsan_type_descriptor (type0),
+ ubsan_type_descriptor (type1), NULL_TREE,
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_SHIFT)
+ ? BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS
+ : BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT;
+ tt = builtin_decl_explicit (bcode);
+ op0 = unshare_expr (op0);
+ op1 = unshare_expr (op1);
+ tt = build_call_expr_loc (loc, tt, 3, data, ubsan_encode_value (op0),
+ ubsan_encode_value (op1));
+ }
+ t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_node);
return t;
}
@@ -170,14 +211,22 @@ ubsan_instrument_vla (location_t loc, tree size)
tree t, tt;
t = fold_build2 (LE_EXPR, boolean_type_node, size, build_int_cst (type, 0));
- tree data = ubsan_create_data ("__ubsan_vla_data",
- &loc, NULL,
- ubsan_type_descriptor (type, false),
- NULL_TREE);
- data = build_fold_addr_expr_loc (loc, data);
- tt = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE);
- tt = build_call_expr_loc (loc, tt, 2, data, ubsan_encode_value (size));
- t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_zero_node);
+ if (flag_sanitize_undefined_trap_on_error)
+ tt = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data = ubsan_create_data ("__ubsan_vla_data", 1, &loc,
+ ubsan_type_descriptor (type), NULL_TREE,
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_VLA)
+ ? BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE
+ : BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE_ABORT;
+ tt = builtin_decl_explicit (bcode);
+ tt = build_call_expr_loc (loc, tt, 2, data, ubsan_encode_value (size));
+ }
+ t = fold_build3 (COND_EXPR, void_type_node, t, tt, void_node);
return t;
}
@@ -187,12 +236,233 @@ ubsan_instrument_vla (location_t loc, tree size)
tree
ubsan_instrument_return (location_t loc)
{
+ if (flag_sanitize_undefined_trap_on_error)
+ return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
/* It is possible that PCH zapped table with definitions of sanitizer
builtins. Reinitialize them if needed. */
initialize_sanitizer_builtins ();
- tree data = ubsan_create_data ("__ubsan_missing_return_data", &loc,
- NULL, NULL_TREE);
+ tree data = ubsan_create_data ("__ubsan_missing_return_data", 1, &loc,
+ NULL_TREE, NULL_TREE);
tree t = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_MISSING_RETURN);
return build_call_expr_loc (loc, t, 1, build_fold_addr_expr_loc (loc, data));
}
+
+/* Instrument array bounds for ARRAY_REFs. We create special builtin,
+ that gets expanded in the sanopt pass, and make an array dimension
+ of it. ARRAY is the array, *INDEX is an index to the array.
+ Return NULL_TREE if no instrumentation is emitted.
+ IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
+
+tree
+ubsan_instrument_bounds (location_t loc, tree array, tree *index,
+ bool ignore_off_by_one)
+{
+ tree type = TREE_TYPE (array);
+ tree domain = TYPE_DOMAIN (type);
+
+ if (domain == NULL_TREE || TYPE_MAX_VALUE (domain) == NULL_TREE)
+ return NULL_TREE;
+
+ tree bound = TYPE_MAX_VALUE (domain);
+ if (ignore_off_by_one)
+ bound = fold_build2 (PLUS_EXPR, TREE_TYPE (bound), bound,
+ build_int_cst (TREE_TYPE (bound), 1));
+
+ /* Detect flexible array members and suchlike, unless
+ -fsanitize=bounds-strict. */
+ tree base = get_base_address (array);
+ if ((flag_sanitize & SANITIZE_BOUNDS_STRICT) == 0
+ && TREE_CODE (array) == COMPONENT_REF
+ && base && (TREE_CODE (base) == INDIRECT_REF
+ || TREE_CODE (base) == MEM_REF))
+ {
+ tree next = NULL_TREE;
+ tree cref = array;
+
+ /* Walk all structs/unions. */
+ while (TREE_CODE (cref) == COMPONENT_REF)
+ {
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
+ for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
+ next && TREE_CODE (next) != FIELD_DECL;
+ next = DECL_CHAIN (next))
+ ;
+ if (next)
+ /* Not a last element. Instrument it. */
+ break;
+ /* Ok, this is the last field of the structure/union. But the
+ aggregate containing the field must be the last field too,
+ recursively. */
+ cref = TREE_OPERAND (cref, 0);
+ }
+ if (!next)
+ /* Don't instrument this flexible array member-like array in non-strict
+ -fsanitize=bounds mode. */
+ return NULL_TREE;
+ }
+
+ /* Don't emit instrumentation in the most common cases. */
+ tree idx = NULL_TREE;
+ if (TREE_CODE (*index) == INTEGER_CST)
+ idx = *index;
+ else if (TREE_CODE (*index) == BIT_AND_EXPR
+ && TREE_CODE (TREE_OPERAND (*index, 1)) == INTEGER_CST)
+ idx = TREE_OPERAND (*index, 1);
+ if (idx
+ && TREE_CODE (bound) == INTEGER_CST
+ && tree_int_cst_sgn (idx) >= 0
+ && tree_int_cst_le (idx, bound))
+ return NULL_TREE;
+
+ *index = save_expr (*index);
+ /* Create a "(T *) 0" tree node to describe the array type. */
+ tree zero_with_type = build_int_cst (build_pointer_type (type), 0);
+ return build_call_expr_internal_loc (loc, IFN_UBSAN_BOUNDS,
+ void_type_node, 3, zero_with_type,
+ *index, bound);
+}
+
+/* Return true iff T is an array that was instrumented by SANITIZE_BOUNDS. */
+
+bool
+ubsan_array_ref_instrumented_p (const_tree t)
+{
+ if (TREE_CODE (t) != ARRAY_REF)
+ return false;
+
+ tree op1 = TREE_OPERAND (t, 1);
+ return TREE_CODE (op1) == COMPOUND_EXPR
+ && TREE_CODE (TREE_OPERAND (op1, 0)) == CALL_EXPR
+ && CALL_EXPR_FN (TREE_OPERAND (op1, 0)) == NULL_TREE
+ && CALL_EXPR_IFN (TREE_OPERAND (op1, 0)) == IFN_UBSAN_BOUNDS;
+}
+
+/* Instrument an ARRAY_REF, if it hasn't already been instrumented.
+ IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
+
+void
+ubsan_maybe_instrument_array_ref (tree *expr_p, bool ignore_off_by_one)
+{
+ if (!ubsan_array_ref_instrumented_p (*expr_p)
+ && current_function_decl != NULL_TREE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES (current_function_decl)))
+ {
+ tree op0 = TREE_OPERAND (*expr_p, 0);
+ tree op1 = TREE_OPERAND (*expr_p, 1);
+ tree e = ubsan_instrument_bounds (EXPR_LOCATION (*expr_p), op0, &op1,
+ ignore_off_by_one);
+ if (e != NULL_TREE)
+ {
+ tree t = copy_node (*expr_p);
+ TREE_OPERAND (t, 1) = build2 (COMPOUND_EXPR, TREE_TYPE (op1),
+ e, op1);
+ *expr_p = t;
+ }
+ }
+}
+
+static tree
+ubsan_maybe_instrument_reference_or_call (location_t loc, tree op, tree ptype,
+ enum ubsan_null_ckind ckind)
+{
+ if (current_function_decl == NULL_TREE
+ || lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES (current_function_decl)))
+ return NULL_TREE;
+
+ tree type = TREE_TYPE (ptype);
+ tree orig_op = op;
+ bool instrument = false;
+ unsigned int mina = 0;
+
+ if (flag_sanitize & SANITIZE_ALIGNMENT)
+ {
+ mina = min_align_of_type (type);
+ if (mina <= 1)
+ mina = 0;
+ }
+ while ((TREE_CODE (op) == NOP_EXPR
+ || TREE_CODE (op) == NON_LVALUE_EXPR)
+ && TREE_CODE (TREE_TYPE (op)) == POINTER_TYPE)
+ op = TREE_OPERAND (op, 0);
+ if (TREE_CODE (op) == NOP_EXPR
+ && TREE_CODE (TREE_TYPE (op)) == REFERENCE_TYPE)
+ {
+ if (mina && mina > min_align_of_type (TREE_TYPE (TREE_TYPE (op))))
+ instrument = true;
+ }
+ else
+ {
+ if ((flag_sanitize & SANITIZE_NULL) && TREE_CODE (op) == ADDR_EXPR)
+ {
+ bool strict_overflow_p = false;
+ /* tree_single_nonzero_warnv_p will not return true for non-weak
+ non-automatic decls with -fno-delete-null-pointer-checks,
+ which is disabled during -fsanitize=null. We don't want to
+ instrument those, just weak vars though. */
+ int save_flag_delete_null_pointer_checks
+ = flag_delete_null_pointer_checks;
+ flag_delete_null_pointer_checks = 1;
+ if (!tree_single_nonzero_warnv_p (op, &strict_overflow_p)
+ || strict_overflow_p)
+ instrument = true;
+ flag_delete_null_pointer_checks
+ = save_flag_delete_null_pointer_checks;
+ }
+ else if (flag_sanitize & SANITIZE_NULL)
+ instrument = true;
+ if (mina && mina > 1)
+ {
+ if (!POINTER_TYPE_P (TREE_TYPE (op))
+ || mina > get_pointer_alignment (op) / BITS_PER_UNIT)
+ instrument = true;
+ }
+ }
+ if (!instrument)
+ return NULL_TREE;
+ op = save_expr (orig_op);
+ gcc_assert (POINTER_TYPE_P (ptype));
+ if (TREE_CODE (ptype) == REFERENCE_TYPE)
+ ptype = build_pointer_type (TREE_TYPE (ptype));
+ tree kind = build_int_cst (ptype, ckind);
+ tree align = build_int_cst (pointer_sized_int_node, mina);
+ tree call
+ = build_call_expr_internal_loc (loc, IFN_UBSAN_NULL, void_type_node,
+ 3, op, kind, align);
+ TREE_SIDE_EFFECTS (call) = 1;
+ return fold_build2 (COMPOUND_EXPR, TREE_TYPE (op), call, op);
+}
+
+/* Instrument a NOP_EXPR to REFERENCE_TYPE if needed. */
+
+void
+ubsan_maybe_instrument_reference (tree stmt)
+{
+ tree op = TREE_OPERAND (stmt, 0);
+ op = ubsan_maybe_instrument_reference_or_call (EXPR_LOCATION (stmt), op,
+ TREE_TYPE (stmt),
+ UBSAN_REF_BINDING);
+ if (op)
+ TREE_OPERAND (stmt, 0) = op;
+}
+
+/* Instrument a CALL_EXPR to a method if needed. */
+
+void
+ubsan_maybe_instrument_member_call (tree stmt, bool is_ctor)
+{
+ if (call_expr_nargs (stmt) == 0)
+ return;
+ tree op = CALL_EXPR_ARG (stmt, 0);
+ if (op == error_mark_node
+ || !POINTER_TYPE_P (TREE_TYPE (op)))
+ return;
+ op = ubsan_maybe_instrument_reference_or_call (EXPR_LOCATION (stmt), op,
+ TREE_TYPE (op),
+ is_ctor ? UBSAN_CTOR_CALL
+ : UBSAN_MEMBER_CALL);
+ if (op)
+ CALL_EXPR_ARG (stmt, 0) = op;
+}
diff --git a/gcc/c-family/c-ubsan.h b/gcc/c-family/c-ubsan.h
index e504b908fc6..7feec45db06 100644
--- a/gcc/c-family/c-ubsan.h
+++ b/gcc/c-family/c-ubsan.h
@@ -25,5 +25,10 @@ extern tree ubsan_instrument_division (location_t, tree, tree);
extern tree ubsan_instrument_shift (location_t, enum tree_code, tree, tree);
extern tree ubsan_instrument_vla (location_t, tree);
extern tree ubsan_instrument_return (location_t);
+extern tree ubsan_instrument_bounds (location_t, tree, tree *, bool);
+extern bool ubsan_array_ref_instrumented_p (const_tree);
+extern void ubsan_maybe_instrument_array_ref (tree *, bool);
+extern void ubsan_maybe_instrument_reference (tree);
+extern void ubsan_maybe_instrument_member_call (tree, bool);
#endif /* GCC_C_UBSAN_H */
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index 57be674171d..d9353c5aac7 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,60 @@
+2016-01-06 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/69099
+ * c-convert.c (convert) [INTEGER_TYPE]: Drop ARG. Don't pass ARG to
+ ubsan_instrument_float_cast. Fold EXPR. Use NULL_TREE instead of
+ NULL.
+
+2015-09-04 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/67279
+ * c-typeck.c (build_binary_op): Don't instrument static initializers.
+
+2015-01-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64344
+ * c-typeck.c (convert_for_assignment, c_finish_return): For
+ -fsanitize=float-cast-overflow casts from REAL_TYPE to integer/enum
+ types also set in_late_binary_op around convert call.
+ * c-convert.c (convert): For -fsanitize=float-cast-overflow REAL_TYPE
+ to integral type casts, if not in_late_binary_op, pass c_fully_fold
+ result on expr as last argument to ubsan_instrument_float_cast,
+ if in_late_binary_op, don't use c_save_expr but save_expr.
+
+2014-12-17 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64289
+ * c-convert.c: Include ubsan.h.
+ (convert): For real -> integral casts and
+ -fsanitize=float-cast-overflow don't call convert_to_integer, but
+ instead instrument the float cast directly.
+
+2014-11-28 Marek Polacek <polacek@redhat.com>
+
+ PR c/63862
+ * c-typeck.c (build_binary_op) <RSHIFT_EXPR, LSHIFT_EXPR>: Don't
+ convert the right operand to integer type.
+
+2014-09-24 Marek Polacek <polacek@redhat.com>
+
+ PR c/61405
+ PR c/53874
+ * c-parser.c: Don't define CPP_KEYWORD.
+ (c_parser_switch_statement): Pass original type to c_finish_case.
+ * c-tree.h (c_finish_case): Update declaration.
+ * c-typeck.c (c_finish_case): Add TYPE parameter. Pass it
+ conditionally to c_do_switch_warnings.
+
+2014-06-30 Marek Polacek <polacek@redhat.com>
+
+ * c-decl.c (grokdeclarator): Don't instrument VLAs if the function
+ has no_sanitize_undefined attribute.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * c-typeck.c (build_binary_op): Call ubsan_instrument_division
+ also when SANITIZE_FLOAT_DIVIDE is on.
+
2015-02-01 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
diff --git a/gcc/c/c-convert.c b/gcc/c/c-convert.c
index 38bacdaa4df..7d59ebf4182 100644
--- a/gcc/c/c-convert.c
+++ b/gcc/c/c-convert.c
@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
#include "c-tree.h"
#include "langhooks.h"
#include "target.h"
+#include "ubsan.h"
/* Change of width--truncation and extension of integers or reals--
is represented with NOP_EXPR. Proper functioning of many things
@@ -110,6 +111,26 @@ convert (tree type, tree expr)
case INTEGER_TYPE:
case ENUMERAL_TYPE:
+ if (flag_sanitize & SANITIZE_FLOAT_CAST
+ && TREE_CODE (TREE_TYPE (expr)) == REAL_TYPE
+ && COMPLETE_TYPE_P (type)
+ && current_function_decl != NULL_TREE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES (current_function_decl)))
+ {
+ if (in_late_binary_op)
+ expr = save_expr (expr);
+ else
+ {
+ expr = c_save_expr (expr);
+ expr = c_fully_fold (expr, false, NULL);
+ }
+ tree check = ubsan_instrument_float_cast (loc, type, expr);
+ expr = fold_build1 (FIX_TRUNC_EXPR, type, expr);
+ if (check == NULL_TREE)
+ return expr;
+ return fold_build2 (COMPOUND_EXPR, TREE_TYPE (expr), check, expr);
+ }
ret = convert_to_integer (type, e);
goto maybe_fold;
diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c
index 47907d5ec7f..265fdb2e91b 100644
--- a/gcc/c/c-decl.c
+++ b/gcc/c/c-decl.c
@@ -5461,7 +5461,11 @@ grokdeclarator (const struct c_declarator *declarator,
this_size_varies = size_varies = true;
warn_variable_length_array (name, size);
if (flag_sanitize & SANITIZE_VLA
- && decl_context == NORMAL)
+ && decl_context == NORMAL
+ && current_function_decl != NULL_TREE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES
+ (current_function_decl)))
{
/* Evaluate the array size only once. */
size = c_save_expr (size);
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index 3ab01c0d7de..6aa6e47272c 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -125,11 +125,6 @@ c_parse_init (void)
C++). It would then be possible to share more of the C and C++
lexer code, if desired. */
-/* The following local token type is used. */
-
-/* A keyword. */
-#define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1))
-
/* More information about the type of a CPP_NAME token. */
typedef enum c_id_kind {
/* An ordinary identifier. */
@@ -5190,7 +5185,7 @@ c_parser_switch_statement (c_parser *parser)
save_break = c_break_label;
c_break_label = NULL_TREE;
body = c_parser_c99_block_statement (parser);
- c_finish_case (body);
+ c_finish_case (body, ce.original_type);
if (c_break_label)
{
location_t here = c_parser_peek_token (parser)->location;
diff --git a/gcc/c/c-tree.h b/gcc/c/c-tree.h
index 53768d619b7..44f2db6e056 100644
--- a/gcc/c/c-tree.h
+++ b/gcc/c/c-tree.h
@@ -617,7 +617,7 @@ extern void process_init_element (location_t, struct c_expr, bool,
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
-extern void c_finish_case (tree);
+extern void c_finish_case (tree, tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index bc24a3b31e8..cfb58cc6a39 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -5762,12 +5762,14 @@ convert_for_assignment (location_t location, location_t expr_loc, tree type,
{
tree ret;
bool save = in_late_binary_op;
- if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE)
+ if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE
+ || (coder == REAL_TYPE
+ && (codel == INTEGER_TYPE || codel == ENUMERAL_TYPE)
+ && (flag_sanitize & SANITIZE_FLOAT_CAST)))
in_late_binary_op = true;
ret = convert_and_check (expr_loc != UNKNOWN_LOCATION
? expr_loc : location, type, orig_rhs);
- if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE)
- in_late_binary_op = save;
+ in_late_binary_op = save;
return ret;
}
@@ -9219,7 +9221,11 @@ c_finish_return (location_t loc, tree retval, tree origtype)
save = in_late_binary_op;
if (TREE_CODE (TREE_TYPE (res)) == BOOLEAN_TYPE
- || TREE_CODE (TREE_TYPE (res)) == COMPLEX_TYPE)
+ || TREE_CODE (TREE_TYPE (res)) == COMPLEX_TYPE
+ || (TREE_CODE (TREE_TYPE (t)) == REAL_TYPE
+ && (TREE_CODE (TREE_TYPE (res)) == INTEGER_TYPE
+ || TREE_CODE (TREE_TYPE (res)) == ENUMERAL_TYPE)
+ && (flag_sanitize & SANITIZE_FLOAT_CAST)))
in_late_binary_op = true;
inner = t = convert (TREE_TYPE (res), t);
in_late_binary_op = save;
@@ -9425,10 +9431,11 @@ do_case (location_t loc, tree low_value, tree high_value)
return label;
}
-/* Finish the switch statement. */
+/* Finish the switch statement. TYPE is the original type of the
+ controlling expression of the switch, or NULL_TREE. */
void
-c_finish_case (tree body)
+c_finish_case (tree body, tree type)
{
struct c_switch *cs = c_switch_stack;
location_t switch_location;
@@ -9438,7 +9445,7 @@ c_finish_case (tree body)
/* Emit warnings as needed. */
switch_location = EXPR_LOCATION (cs->switch_expr);
c_do_switch_warnings (cs->cases, switch_location,
- TREE_TYPE (cs->switch_expr),
+ type ? type : TREE_TYPE (cs->switch_expr),
SWITCH_COND (cs->switch_expr));
/* Pop the stack. */
@@ -10425,11 +10432,6 @@ build_binary_op (location_t location, enum tree_code code,
/* Use the type of the value to be shifted. */
result_type = type0;
- /* Convert the non vector shift-count to an integer, regardless
- of size of value being shifted. */
- if (TREE_CODE (TREE_TYPE (op1)) != VECTOR_TYPE
- && TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
- op1 = convert (integer_type_node, op1);
/* Avoid converting op1 to result_type later. */
converted = 1;
}
@@ -10473,11 +10475,6 @@ build_binary_op (location_t location, enum tree_code code,
/* Use the type of the value to be shifted. */
result_type = type0;
- /* Convert the non vector shift-count to an integer, regardless
- of size of value being shifted. */
- if (TREE_CODE (TREE_TYPE (op1)) != VECTOR_TYPE
- && TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
- op1 = convert (integer_type_node, op1);
/* Avoid converting op1 to result_type later. */
converted = 1;
}
@@ -11000,18 +10997,21 @@ build_binary_op (location_t location, enum tree_code code,
return error_mark_node;
}
- if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE))
+ if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE))
&& current_function_decl != 0
&& !lookup_attribute ("no_sanitize_undefined",
DECL_ATTRIBUTES (current_function_decl))
- && (doing_div_or_mod || doing_shift))
+ && (doing_div_or_mod || doing_shift)
+ && !require_constant_value)
{
/* OP0 and/or OP1 might have side-effects. */
op0 = c_save_expr (op0);
op1 = c_save_expr (op1);
op0 = c_fully_fold (op0, false, NULL);
op1 = c_fully_fold (op1, false, NULL);
- if (doing_div_or_mod && (flag_sanitize & SANITIZE_DIVIDE))
+ if (doing_div_or_mod && (flag_sanitize & (SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE)))
instrument_expr = ubsan_instrument_division (location, op0, op1);
else if (doing_shift && (flag_sanitize & SANITIZE_SHIFT))
instrument_expr = ubsan_instrument_shift (location, code, op0, op1);
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 010fd177f4a..61673b78a60 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -248,6 +248,15 @@ align_local_variable (tree decl)
return align / BITS_PER_UNIT;
}
+/* Align given offset BASE with ALIGN. Truncate up if ALIGN_UP is true,
+ down otherwise. Return truncated BASE value. */
+
+static inline unsigned HOST_WIDE_INT
+align_base (HOST_WIDE_INT base, unsigned HOST_WIDE_INT align, bool align_up)
+{
+ return align_up ? (base + align - 1) & -align : base & -align;
+}
+
/* Allocate SIZE bytes at byte alignment ALIGN from the stack frame.
Return the frame offset. */
@@ -256,20 +265,17 @@ alloc_stack_frame_space (HOST_WIDE_INT size, unsigned HOST_WIDE_INT align)
{
HOST_WIDE_INT offset, new_frame_offset;
- new_frame_offset = frame_offset;
if (FRAME_GROWS_DOWNWARD)
{
- new_frame_offset -= size + frame_phase;
- new_frame_offset &= -align;
- new_frame_offset += frame_phase;
+ new_frame_offset
+ = align_base (frame_offset - frame_phase - size,
+ align, false) + frame_phase;
offset = new_frame_offset;
}
else
{
- new_frame_offset -= frame_phase;
- new_frame_offset += align - 1;
- new_frame_offset &= -align;
- new_frame_offset += frame_phase;
+ new_frame_offset
+ = align_base (frame_offset - frame_phase, align, true) + frame_phase;
offset = new_frame_offset;
new_frame_offset += size;
}
@@ -737,6 +743,18 @@ union_stack_vars (size_t a, size_t b)
}
}
+/* Return true if the current function should have its stack frame
+ protected by address sanitizer. */
+
+static inline bool
+asan_sanitize_stack_p (void)
+{
+ return ((flag_sanitize & SANITIZE_ADDRESS)
+ && ASAN_STACK
+ && !lookup_attribute ("no_sanitize_address",
+ DECL_ATTRIBUTES (current_function_decl)));
+}
+
/* A subroutine of expand_used_vars. Binpack the variables into
partitions constrained by the interference graph. The overall
algorithm used is as follows:
@@ -798,7 +816,7 @@ partition_stack_vars (void)
sizes, as the shorter vars wouldn't be adequately protected.
Don't do that for "large" (unsupported) alignment objects,
those aren't protected anyway. */
- if ((flag_sanitize & SANITIZE_ADDRESS) && ASAN_STACK && isize != jsize
+ if (asan_sanitize_stack_p () && isize != jsize
&& ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
break;
@@ -981,15 +999,18 @@ expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)
{
base = virtual_stack_vars_rtx;
- if ((flag_sanitize & SANITIZE_ADDRESS) && ASAN_STACK && pred)
+ if (asan_sanitize_stack_p () && pred)
{
- HOST_WIDE_INT prev_offset = frame_offset;
+ HOST_WIDE_INT prev_offset
+ = align_base (frame_offset,
+ MAX (alignb, ASAN_RED_ZONE_SIZE),
+ !FRAME_GROWS_DOWNWARD);
tree repr_decl = NULL_TREE;
-
offset
= alloc_stack_frame_space (stack_vars[i].size
+ ASAN_RED_ZONE_SIZE,
MAX (alignb, ASAN_RED_ZONE_SIZE));
+
data->asan_vec.safe_push (prev_offset);
data->asan_vec.safe_push (offset + stack_vars[i].size);
/* Find best representative of the partition.
@@ -1169,7 +1190,7 @@ defer_stack_allocation (tree var, bool toplevel)
/* If stack protection is enabled, *all* stack variables must be deferred,
so that we can re-order the strings to the top of the frame.
Similarly for Address Sanitizer. */
- if (flag_stack_protect || ((flag_sanitize & SANITIZE_ADDRESS) && ASAN_STACK))
+ if (flag_stack_protect || asan_sanitize_stack_p ())
return true;
/* We handle "large" alignment via dynamic allocation. We want to handle
@@ -1839,7 +1860,7 @@ expand_used_vars (void)
expand_stack_vars (stack_protect_decl_phase_2, &data);
}
- if ((flag_sanitize & SANITIZE_ADDRESS) && ASAN_STACK)
+ if (asan_sanitize_stack_p ())
/* Phase 3, any partitions that need asan protection
in addition to phase 1 and 2. */
expand_stack_vars (asan_decl_phase_3, &data);
diff --git a/gcc/common.opt b/gcc/common.opt
index 2259f29d19e..71fa4815d03 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -211,6 +211,10 @@ bool flag_opts_finished
Variable
unsigned int flag_sanitize
+; What sanitizers should recover from errors
+Variable
+unsigned int flag_sanitize_recover = SANITIZE_UNDEFINED | SANITIZE_NONDEFAULT | SANITIZE_KERNEL_ADDRESS
+
; Flag whether a prefix has been added to dump_base_name
Variable
bool dump_base_name_prefixed = false
@@ -862,6 +866,27 @@ fsanitize=
Common Driver Report Joined
Select what to sanitize
+fasan-shadow-offset=
+Common Joined RejectNegative Var(common_deferred_options) Defer
+-fasan-shadow-offset=<number> Use custom shadow memory offset.
+
+fsanitize-sections=
+Common Joined RejectNegative Var(common_deferred_options) Defer
+-fsanitize-sections=<sec1,sec2,...> Sanitize global variables
+in user-defined sections.
+
+fsanitize-recover=
+Common Report Joined
+After diagnosing undefined behavior attempt to continue execution
+
+fsanitize-recover
+Common Report
+This switch is deprecated; use -fsanitize-recover= instead
+
+fsanitize-undefined-trap-on-error
+Common Driver Report Var(flag_sanitize_undefined_trap_on_error) Init(0)
+Use trap instead of a library function for undefined behavior sanitization
+
fasynchronous-unwind-tables
Common Report Var(flag_asynchronous_unwind_tables) Optimization
Generate unwind tables that are exact at each instruction boundary
@@ -1095,6 +1120,11 @@ fdump-noaddr
Common Report Var(flag_dump_noaddr)
Suppress output of addresses in debugging dumps
+freport-bug
+Common Driver Var(flag_report_bug)
+Collect and dump debug information into temporary file if ICE in C/C++
+compiler occured.
+
fdump-passes
Common Var(flag_dump_passes) Init(0)
Dump optimization passes
@@ -1725,7 +1755,7 @@ Common Var(common_deferred_options) Defer
frandom-seed=
Common Joined RejectNegative Var(common_deferred_options) Defer
--frandom-seed=<string> Make compile reproducible using <string>
+-frandom-seed=<number> Make compile reproducible using <number>
; This switch causes the command line that was used to create an
; object file to be recorded into the object file. The exact format
diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c
index 89b705e899c..670857fadd2 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -780,8 +780,8 @@ aarch64_simd_expand_args (rtx target, int icode, int have_retval,
tree exp, builtin_simd_arg *args)
{
rtx pat;
- tree arg[SIMD_MAX_BUILTIN_ARGS];
- rtx op[SIMD_MAX_BUILTIN_ARGS];
+ tree arg[SIMD_MAX_BUILTIN_ARGS + 1];
+ rtx op[SIMD_MAX_BUILTIN_ARGS + 1]; /* First element for result operand. */
enum machine_mode tmode = insn_data[icode].operand[0].mode;
enum machine_mode mode[SIMD_MAX_BUILTIN_ARGS];
int argc = 0;
@@ -901,7 +901,7 @@ aarch64_simd_expand_builtin (int fcode, tree exp, rtx target)
aarch64_simd_builtin_datum *d =
&aarch64_simd_builtin_data[fcode - (AARCH64_SIMD_BUILTIN_BASE + 1)];
enum insn_code icode = d->code;
- builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS];
+ builtin_simd_arg args[SIMD_MAX_BUILTIN_ARGS + 1];
int num_args = insn_data[d->code].n_operands;
int is_void = 0;
int k;
diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h
index dce2f2da10d..d375624ab0a 100644
--- a/gcc/config/aarch64/aarch64-linux.h
+++ b/gcc/config/aarch64/aarch64-linux.h
@@ -23,6 +23,12 @@
#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1"
+#undef ASAN_CC1_SPEC
+#define ASAN_CC1_SPEC "%{%:sanitize(address):-funwind-tables}"
+
+#undef CC1_SPEC
+#define CC1_SPEC GNU_USER_TARGET_CC1_SPEC ASAN_CC1_SPEC
+
#define CPP_SPEC "%{pthread:-D_REENTRANT}"
#define LINUX_TARGET_LINK_SPEC "%{h*} \
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 76a20a61e77..fb25cf01d7e 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -10044,6 +10044,14 @@ aarch64_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
return default_use_by_pieces_infrastructure_p (size, align, op, speed_p);
}
+/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
+
+static unsigned HOST_WIDE_INT
+aarch64_asan_shadow_offset (void)
+{
+ return (HOST_WIDE_INT_1 << 36);
+}
+
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST aarch64_address_cost
@@ -10297,6 +10305,9 @@ aarch64_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
#undef TARGET_CAN_USE_DOLOOP_P
#define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
+#undef TARGET_ASAN_SHADOW_OFFSET
+#define TARGET_ASAN_SHADOW_OFFSET aarch64_asan_shadow_offset
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-aarch64.h"
diff --git a/gcc/config/elfos.h b/gcc/config/elfos.h
index c1d5553123d..7929708cbef 100644
--- a/gcc/config/elfos.h
+++ b/gcc/config/elfos.h
@@ -313,7 +313,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
&& (DECL) && DECL_SIZE (DECL)) \
{ \
size_directive_output = 1; \
- size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ size = tree_to_uhwi (DECL_SIZE_UNIT (DECL)); \
ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, size); \
} \
\
@@ -341,7 +341,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
&& !size_directive_output) \
{ \
size_directive_output = 1; \
- size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ size = tree_to_uhwi (DECL_SIZE_UNIT (DECL)); \
ASM_OUTPUT_SIZE_DIRECTIVE (FILE, name, size); \
} \
} \
diff --git a/gcc/convert.c b/gcc/convert.c
index ff7c9ac39d3..bfc4f9e2215 100644
--- a/gcc/convert.c
+++ b/gcc/convert.c
@@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "target.h"
#include "langhooks.h"
+#include "ubsan.h"
/* Convert EXPR to some pointer or reference type TYPE.
EXPR must be pointer, reference, integer, enumeral, or literal zero;
@@ -403,6 +404,7 @@ convert_to_integer (tree type, tree expr)
tree intype = TREE_TYPE (expr);
unsigned int inprec = element_precision (intype);
unsigned int outprec = element_precision (type);
+ location_t loc = EXPR_LOCATION (expr);
/* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
be. Consider `enum E = { a, b = (enum E) 3 };'. */
@@ -862,7 +864,20 @@ convert_to_integer (tree type, tree expr)
return build1 (CONVERT_EXPR, type, expr);
case REAL_TYPE:
- return build1 (FIX_TRUNC_EXPR, type, expr);
+ if (flag_sanitize & SANITIZE_FLOAT_CAST
+ && current_function_decl != NULL_TREE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES (current_function_decl)))
+ {
+ expr = save_expr (expr);
+ tree check = ubsan_instrument_float_cast (loc, type, expr);
+ expr = build1 (FIX_TRUNC_EXPR, type, expr);
+ if (check == NULL_TREE)
+ return expr;
+ return fold_build2 (COMPOUND_EXPR, TREE_TYPE (expr), check, expr);
+ }
+ else
+ return build1 (FIX_TRUNC_EXPR, type, expr);
case FIXED_POINT_TYPE:
return build1 (FIXED_CONVERT_EXPR, type, expr);
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 83bf91d54bc..91c4667fc83 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,84 @@
+2015-11-25 Jason Merrill <jason@redhat.com>
+
+ * lambda.c (maybe_add_lambda_conv_op): Only set
+ no_sanitize_undefined if SANITIZE_NULL.
+
+2015-11-25 Jason Merrill <jason@redhat.com>
+
+ PR c++/67941
+ * lambda.c (maybe_add_lambda_conv_op): Mark _FUN as
+ no_sanitize_undefined.
+
+2015-07-31 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/66977
+ * typeck.c (get_member_function_from_ptrfunc): Don't sanitize
+ RSHIFT_EXPR.
+
+2015-02-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64984
+ * except.c (check_noexcept_r): Return NULL for internal
+ calls.
+
+2014-12-02 Marek Polacek <polacek@redhat.com>
+
+ * constexpr.c (cxx_eval_check_shift_p): New function.
+ (cxx_eval_binary_expression): Call it. Set NON_CONSTANT_P if it
+ returns true.
+
+2014-12-01 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63956
+ * constexpr.c: Include ubsan.h.
+ (cxx_eval_call_expression): Bail out for IFN_UBSAN_{NULL,BOUNDS}
+ internal functions and for ubsan builtins.
+ * error.c: Include internal-fn.h.
+ (dump_expr): Add printing of internal functions.
+
+2014-11-28 Marek Polacek <polacek@redhat.com>
+
+ PR c/63862
+ * typeck.c (cp_build_binary_op) <RSHIFT_EXPR, LSHIFT_EXPR>: Don't
+ convert the right operand to integer type.
+
+2014-09-24 Marek Polacek <polacek@redhat.com>
+
+ PR c/61405
+ PR c/53874
+ * semantics.c (finish_switch_cond): Call unlowered_expr_type.
+ * tree.c (bot_manip): Add default case.
+ * parser.c (cp_parser_primary_expression): Cast the controlling
+ expression of a switch to an int.
+ (cp_parser_unqualified_id): Likewise.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * cp-gimplify.c (cp_genericize_r): For -fsanitize=null and/or
+ -fsanitize=alignment call ubsan_maybe_instrument_reference
+ for casts to REFERENCE_TYPE and ubsan_maybe_instrument_member_call
+ for calls to member functions.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * cp-gimplify.c (cp_genericize_r): For -fsanitize=null and/or
+ -fsanitize=alignment call ubsan_maybe_instrument_reference
+ for casts to REFERENCE_TYPE and ubsan_maybe_instrument_member_call
+ for calls to member functions.
+
+2014-06-30 Marek Polacek <polacek@redhat.com>
+
+ * cp-gimplify.c (cp_genericize): Don't instrument returns if the
+ function has no_sanitize_undefined attribute.
+ * decl.c (compute_array_index_type): Don't instrument VLAs if the
+ function has no_sanitize_undefined attribute.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * typeck.c (cp_build_binary_op): Call ubsan_instrument_division
+ even when SANITIZE_FLOAT_DIVIDE is on. Set doing_div_or_mod even
+ for non-integer types.
+
2015-02-04 Ville Voutilainen <ville.voutilainen@gmail.com>
PR c++/64901
diff --git a/gcc/cp/cp-gimplify.c b/gcc/cp/cp-gimplify.c
index 3dc32e6cfe7..b641ddbe385 100644
--- a/gcc/cp/cp-gimplify.c
+++ b/gcc/cp/cp-gimplify.c
@@ -1205,6 +1205,27 @@ cp_genericize_r (tree *stmt_p, int *walk_subtrees, void *data)
*stmt_p = size_one_node;
return NULL;
}
+ else if (flag_sanitize & (SANITIZE_NULL | SANITIZE_ALIGNMENT))
+ {
+ if (TREE_CODE (stmt) == NOP_EXPR
+ && TREE_CODE (TREE_TYPE (stmt)) == REFERENCE_TYPE)
+ ubsan_maybe_instrument_reference (stmt);
+ else if (TREE_CODE (stmt) == CALL_EXPR)
+ {
+ tree fn = CALL_EXPR_FN (stmt);
+ if (fn != NULL_TREE
+ && !error_operand_p (fn)
+ && POINTER_TYPE_P (TREE_TYPE (fn))
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) == METHOD_TYPE)
+ {
+ bool is_ctor
+ = TREE_CODE (fn) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL
+ && DECL_CONSTRUCTOR_P (TREE_OPERAND (fn, 0));
+ ubsan_maybe_instrument_member_call (stmt, is_ctor);
+ }
+ }
+ }
pointer_set_insert (p_set, *stmt_p);
@@ -1228,7 +1249,7 @@ cp_genericize_tree (tree* t_p)
/* If a function that should end with a return in non-void
function doesn't obviously end with return, add ubsan
- instrmentation code to verify it at runtime. */
+ instrumentation code to verify it at runtime. */
static void
cp_ubsan_maybe_instrument_return (tree fndecl)
@@ -1341,7 +1362,10 @@ cp_genericize (tree fndecl)
walk_tree's hash functionality. */
cp_genericize_tree (&DECL_SAVED_TREE (fndecl));
- if (flag_sanitize & SANITIZE_RETURN)
+ if (flag_sanitize & SANITIZE_RETURN
+ && current_function_decl != NULL_TREE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES (current_function_decl)))
cp_ubsan_maybe_instrument_return (fndecl);
/* Do everything else. */
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 4cc386e117f..a2999c20b11 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -8433,7 +8433,11 @@ compute_array_index_type (tree name, tree size, tsubst_flags_t complain)
throw_bad_array_length (), void_zero_node);
finish_expr_stmt (comp);
}
- else if (flag_sanitize & SANITIZE_VLA)
+ else if (flag_sanitize & SANITIZE_VLA
+ && current_function_decl != NULL_TREE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES
+ (current_function_decl)))
{
/* From C++1y onwards, we throw an exception on a negative
length size of an array; see above. */
diff --git a/gcc/cp/error.c b/gcc/cp/error.c
index 87ca4e2a306..aca89708f6d 100644
--- a/gcc/cp/error.c
+++ b/gcc/cp/error.c
@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see
#include "pointer-set.h"
#include "c-family/c-objc.h"
#include "ubsan.h"
+#include "internal-fn.h"
#include <new> // For placement-new.
@@ -1996,6 +1997,14 @@ dump_expr (cxx_pretty_printer *pp, tree t, int flags)
tree fn = CALL_EXPR_FN (t);
bool skipfirst = false;
+ /* Deal with internal functions. */
+ if (fn == NULL_TREE)
+ {
+ pp_string (pp, internal_fn_name (CALL_EXPR_IFN (t)));
+ dump_call_expr_args (pp, t, flags, skipfirst);
+ break;
+ }
+
if (TREE_CODE (fn) == ADDR_EXPR)
fn = TREE_OPERAND (fn, 0);
diff --git a/gcc/cp/except.c b/gcc/cp/except.c
index 221971ac956..db95c906634 100644
--- a/gcc/cp/except.c
+++ b/gcc/cp/except.c
@@ -1138,7 +1138,7 @@ check_noexcept_r (tree *tp, int * /*walk_subtrees*/, void * /*data*/)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
- if (code == CALL_EXPR
+ if ((code == CALL_EXPR && CALL_EXPR_FN (t))
|| code == AGGR_INIT_EXPR)
{
/* We can only use the exception specification of the called function
diff --git a/gcc/cp/lambda.c b/gcc/cp/lambda.c
index 6acbdd9ee2b..733bfc46a47 100644
--- a/gcc/cp/lambda.c
+++ b/gcc/cp/lambda.c
@@ -1019,6 +1019,15 @@ maybe_add_lambda_conv_op (tree type)
if (generic_lambda_p)
fn = add_inherited_template_parms (fn, DECL_TI_TEMPLATE (callop));
+ if (flag_sanitize & SANITIZE_NULL)
+ {
+ /* Don't UBsan this function; we're deliberately calling op() with a null
+ object argument. */
+ tree attrs = build_tree_list (get_identifier ("no_sanitize_undefined"),
+ NULL_TREE);
+ cplus_decl_attributes (&fn, attrs, 0);
+ }
+
add_method (type, fn, NULL_TREE);
if (nested)
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 93f94d260c1..c4fed2dd2f5 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -4144,7 +4144,7 @@ cp_parser_primary_expression (cp_parser *parser,
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
- switch (token->type)
+ switch ((int) token->type)
{
/* literal:
integer-literal
@@ -4830,7 +4830,7 @@ cp_parser_unqualified_id (cp_parser* parser,
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
- switch (token->type)
+ switch ((int) token->type)
{
case CPP_NAME:
{
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index bbed56fb06e..6454c160ccc 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -49,6 +49,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimplify.h"
#include "bitmap.h"
#include "omp-low.h"
+#include "ubsan.h"
static bool verify_constant (tree, bool, bool *, bool *);
#define VERIFY_CONSTANT(X) \
@@ -1126,7 +1127,8 @@ finish_switch_cond (tree cond, tree switch_stmt)
error ("switch quantity not an integer");
cond = error_mark_node;
}
- orig_type = TREE_TYPE (cond);
+ /* We want unlowered type here to handle enum bit-fields. */
+ orig_type = unlowered_expr_type (cond);
if (cond != error_mark_node)
{
/* [stmt.switch]
@@ -8385,6 +8387,18 @@ cxx_eval_call_expression (const constexpr_call *old_call, tree t,
constexpr_call *entry;
bool depth_ok;
+ if (fun == NULL_TREE)
+ switch (CALL_EXPR_IFN (t))
+ {
+ case IFN_UBSAN_NULL:
+ case IFN_UBSAN_BOUNDS:
+ return void_node;
+ default:
+ error_at (loc, "call to internal function");
+ *non_constant_p = true;
+ return t;
+ }
+
if (TREE_CODE (fun) != FUNCTION_DECL)
{
/* Might be a constexpr function pointer. */
@@ -8403,6 +8417,10 @@ cxx_eval_call_expression (const constexpr_call *old_call, tree t,
}
if (DECL_CLONED_FUNCTION_P (fun))
fun = DECL_CLONED_FUNCTION (fun);
+
+ if (is_ubsan_builtin_p (fun))
+ return void_node;
+
if (is_builtin_fn (fun))
return cxx_eval_builtin_function_call (old_call, t, allow_non_constant,
addr, non_constant_p, overflow_p);
@@ -8610,6 +8628,76 @@ cxx_eval_unary_expression (const constexpr_call *call, tree t,
return r;
}
+/* Check whether the shift operation with code CODE and type TYPE on LHS
+ and RHS is undefined. If it is, give an error with an explanation,
+ and return true; return false otherwise. */
+
+static bool
+cxx_eval_check_shift_p (location_t loc, enum tree_code code, tree type,
+ tree lhs, tree rhs)
+{
+ if ((code != LSHIFT_EXPR && code != RSHIFT_EXPR)
+ || TREE_CODE (lhs) != INTEGER_CST
+ || TREE_CODE (rhs) != INTEGER_CST)
+ return false;
+
+ tree lhstype = TREE_TYPE (lhs);
+ unsigned HOST_WIDE_INT uprec = TYPE_PRECISION (TREE_TYPE (lhs));
+
+ /* [expr.shift] The behavior is undefined if the right operand
+ is negative, or greater than or equal to the length in bits
+ of the promoted left operand. */
+ if (tree_int_cst_sgn (rhs) == -1)
+ {
+ error_at (loc, "right operand of shift expression %q+E is negative",
+ build2_loc (loc, code, type, lhs, rhs));
+ return true;
+ }
+
+ if (compare_tree_int (rhs, uprec) >= 0)
+ {
+ error_at (loc, "right operand of shift expression %q+E is >= than "
+ "the precision of the left operand",
+ build2_loc (loc, code, type, lhs, rhs));
+ return true;
+ }
+
+ /* The value of E1 << E2 is E1 left-shifted E2 bit positions; [...]
+ if E1 has a signed type and non-negative value, and E1x2^E2 is
+ representable in the corresponding unsigned type of the result type,
+ then that value, converted to the result type, is the resulting value;
+ otherwise, the behavior is undefined. */
+ if (code == LSHIFT_EXPR && !TYPE_UNSIGNED (lhstype)
+ && (cxx_dialect >= cxx11))
+ {
+ if (tree_int_cst_sgn (lhs) == -1)
+ {
+ error_at (loc, "left operand of shift expression %q+E is negative",
+ build2_loc (loc, code, type, lhs, rhs));
+ return true;
+ }
+ /* For signed x << y the following:
+ (unsigned) x >> ((prec (lhs) - 1) - y)
+ if > 1, is undefined. The right-hand side of this formula
+ is the highest bit of the LHS that can be set (starting from 0),
+ so that the shift doesn't overflow. We then right-shift the LHS
+ to see whether any other bit is set making the original shift
+ undefined -- the result is not representable in the corresponding
+ unsigned type. */
+ tree t = build_int_cst (unsigned_type_node, uprec - 1);
+ t = fold_build2 (MINUS_EXPR, unsigned_type_node, t, rhs);
+ tree ulhs = fold_convert (unsigned_type_for (lhstype), lhs);
+ t = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ulhs), ulhs, t);
+ if (tree_int_cst_lt (integer_one_node, t))
+ {
+ error_at (loc, "shift expression %q+E overflows",
+ build2_loc (loc, code, type, lhs, rhs));
+ return true;
+ }
+ }
+ return false;
+}
+
/* Subroutine of cxx_eval_constant_expression.
Like cxx_eval_unary_expression, except for binary expressions. */
@@ -8630,9 +8718,19 @@ cxx_eval_binary_expression (const constexpr_call *call, tree t,
allow_non_constant, addr,
non_constant_p, overflow_p);
VERIFY_CONSTANT (rhs);
- if (lhs == orig_lhs && rhs == orig_rhs)
- return t;
- r = fold_build2 (TREE_CODE (t), TREE_TYPE (t), lhs, rhs);
+ location_t loc = EXPR_LOCATION (t);
+ enum tree_code code = TREE_CODE (t);
+ tree type = TREE_TYPE (t);
+ r = fold_binary_loc (loc, code, type, lhs, rhs);
+ if (r == NULL_TREE)
+ {
+ if (lhs == orig_lhs && rhs == orig_rhs)
+ r = t;
+ else
+ r = build2_loc (loc, code, type, lhs, rhs);
+ }
+ else if (cxx_eval_check_shift_p (loc, code, type, lhs, rhs))
+ *non_constant_p = true;
VERIFY_CONSTANT (r);
return r;
}
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index 2820ba0f976..bc9d972b75b 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -2330,6 +2330,8 @@ bot_manip (tree* tp, int* walk_subtrees, void* data)
case BUILT_IN_FILE:
case BUILT_IN_LINE:
SET_EXPR_LOCATION (*tp, input_location);
+ default:
+ break;
}
}
return t;
diff --git a/gcc/cp/typeck.c b/gcc/cp/typeck.c
index cd8318d92c6..a39d8814344 100644
--- a/gcc/cp/typeck.c
+++ b/gcc/cp/typeck.c
@@ -3262,6 +3262,7 @@ get_member_function_from_ptrfunc (tree *instance_ptrptr, tree function,
idx = build1 (NOP_EXPR, vtable_index_type, e3);
switch (TARGET_PTRMEMFUNC_VBIT_LOCATION)
{
+ int flag_sanitize_save;
case ptrmemfunc_vbit_in_pfn:
e1 = cp_build_binary_op (input_location,
BIT_AND_EXPR, idx, integer_one_node,
@@ -3277,9 +3278,15 @@ get_member_function_from_ptrfunc (tree *instance_ptrptr, tree function,
e1 = cp_build_binary_op (input_location,
BIT_AND_EXPR, delta, integer_one_node,
complain);
+ /* Don't instrument the RSHIFT_EXPR we're about to create because
+ we're going to use DELTA number of times, and that wouldn't play
+ well with SAVE_EXPRs therein. */
+ flag_sanitize_save = flag_sanitize;
+ flag_sanitize = 0;
delta = cp_build_binary_op (input_location,
RSHIFT_EXPR, delta, integer_one_node,
complain);
+ flag_sanitize = flag_sanitize_save;
if (delta == error_mark_node)
return error_mark_node;
break;
@@ -4112,10 +4119,7 @@ cp_build_binary_op (location_t location,
enum tree_code tcode0 = code0, tcode1 = code1;
tree cop1 = fold_non_dependent_expr_sfinae (op1, tf_none);
cop1 = maybe_constant_value (cop1);
-
- if (tcode0 == INTEGER_TYPE)
- doing_div_or_mod = true;
-
+ doing_div_or_mod = true;
warn_for_div_by_zero (location, cop1);
if (tcode0 == COMPLEX_TYPE || tcode0 == VECTOR_TYPE)
@@ -4155,9 +4159,7 @@ cp_build_binary_op (location_t location,
{
tree cop1 = fold_non_dependent_expr_sfinae (op1, tf_none);
cop1 = maybe_constant_value (cop1);
-
- if (code0 == INTEGER_TYPE)
- doing_div_or_mod = true;
+ doing_div_or_mod = true;
warn_for_div_by_zero (location, cop1);
}
@@ -4234,10 +4236,6 @@ cp_build_binary_op (location_t location,
warning (0, "right shift count >= width of type");
}
}
- /* Convert the shift-count to an integer, regardless of
- size of value being shifted. */
- if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
- op1 = cp_convert (integer_type_node, op1, complain);
/* Avoid converting op1 to result_type later. */
converted = 1;
}
@@ -4282,10 +4280,6 @@ cp_build_binary_op (location_t location,
warning (0, "left shift count >= width of type");
}
}
- /* Convert the shift-count to an integer, regardless of
- size of value being shifted. */
- if (TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
- op1 = cp_convert (integer_type_node, op1, complain);
/* Avoid converting op1 to result_type later. */
converted = 1;
}
@@ -4904,7 +4898,8 @@ cp_build_binary_op (location_t location,
if (build_type == NULL_TREE)
build_type = result_type;
- if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE))
+ if ((flag_sanitize & (SANITIZE_SHIFT | SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE))
&& !processing_template_decl
&& current_function_decl != 0
&& !lookup_attribute ("no_sanitize_undefined",
@@ -4918,7 +4913,8 @@ cp_build_binary_op (location_t location,
tf_none));
op1 = maybe_constant_value (fold_non_dependent_expr_sfinae (op1,
tf_none));
- if (doing_div_or_mod && (flag_sanitize & SANITIZE_DIVIDE))
+ if (doing_div_or_mod && (flag_sanitize & (SANITIZE_DIVIDE
+ | SANITIZE_FLOAT_DIVIDE)))
{
/* For diagnostics we want to use the promoted types without
shorten_binary_op. So convert the arguments to the
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index e0563416a22..d155a7f27c4 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -288,7 +288,9 @@ Objective-C and Objective-C++ Dialects}.
@item Debugging Options
@xref{Debugging Options,,Options for Debugging Your Program or GCC}.
@gccoptlist{-d@var{letters} -dumpspecs -dumpmachine -dumpversion @gol
--fsanitize=@var{style} @gol
+-fsanitize=@var{style} -fsanitize-recover -fsanitize-recover=@var{style} @gol
+-fasan-shadow-offset=@var{number} -fsanitize-sections=@var{s1,s2,...} @gol
+-fsanitize-undefined-trap-on-error @gol
-fdbg-cnt-list -fdbg-cnt=@var{counter-value-list} @gol
-fdisable-ipa-@var{pass_name} @gol
-fdisable-rtl-@var{pass_name} @gol
@@ -333,7 +335,7 @@ Objective-C and Objective-C++ Dialects}.
-fmem-report -fpre-ipa-mem-report -fpost-ipa-mem-report -fprofile-arcs @gol
-fopt-info @gol
-fopt-info-@var{options}@r{[}=@var{file}@r{]} @gol
--frandom-seed=@var{string} -fsched-verbose=@var{n} @gol
+-frandom-seed=@var{number} -fsched-verbose=@var{n} @gol
-fsel-sched-verbose -fsel-sched-dump-cfg -fsel-sched-pipelining-verbose @gol
-fstack-usage -ftest-coverage -ftime-report -fvar-tracking @gol
-fvar-tracking-assignments -fvar-tracking-assignments-toggle @gol
@@ -5322,26 +5324,22 @@ at runtime. Current suboptions are:
@item -fsanitize=shift
@opindex fsanitize=shift
-
This option enables checking that the result of a shift operation is
not undefined. Note that what exactly is considered undefined differs
slightly between C and C++, as well as between ISO C90 and C99, etc.
@item -fsanitize=integer-divide-by-zero
@opindex fsanitize=integer-divide-by-zero
-
Detect integer division by zero as well as @code{INT_MIN / -1} division.
@item -fsanitize=unreachable
@opindex fsanitize=unreachable
-
With this option, the compiler will turn the @code{__builtin_unreachable}
call into a diagnostics message call instead. When reaching the
@code{__builtin_unreachable} call, the behavior is undefined.
@item -fsanitize=vla-bound
@opindex fsanitize=vla-bound
-
This option instructs the compiler to check that the size of a variable
length array is positive. This option does not have any effect in
@option{-std=c++1y} mode, as the standard requires the exception be thrown
@@ -5349,15 +5347,14 @@ instead.
@item -fsanitize=null
@opindex fsanitize=null
-
This option enables pointer checking. Particularly, the application
built with this option turned on will issue an error message when it
tries to dereference a NULL pointer, or if a reference (possibly an
-rvalue reference) is bound to a NULL pointer.
+rvalue reference) is bound to a NULL pointer, or if a method is invoked
+on an object pointed by a NULL pointer.
@item -fsanitize=return
@opindex fsanitize=return
-
This option enables return statement checking. Programs
built with this option turned on will issue an error message
when the end of a non-void function is reached without actually
@@ -5365,7 +5362,6 @@ returning a value. This option works in C++ only.
@item -fsanitize=signed-integer-overflow
@opindex fsanitize=signed-integer-overflow
-
This option enables signed integer overflow checking. We check that
the result of @code{+}, @code{*}, and both unary and binary @code{-}
does not overflow in the signed arithmetics. Note, integer promotion
@@ -5376,12 +5372,134 @@ signed char a = SCHAR_MAX;
a++;
@end smallexample
+@item -fsanitize=bounds
+@opindex fsanitize=bounds
+This option enables instrumentation of array bounds. Various out of bounds
+accesses are detected. Flexible array members, flexible array member-like
+arrays, and initializers of variables with static storage are not instrumented.
+
+@item -fsanitize=bounds-strict
+@opindex fsanitize=bounds-strict
+This option enables strict instrumentation of array bounds. Most out of bounds
+accesses are detected, including flexible array members and flexible array
+member-like arrays. Initializers of variables with static storage are not
+instrumented.
+
+@item -fsanitize=alignment
+@opindex fsanitize=alignment
+
+This option enables checking of alignment of pointers when they are
+dereferenced, or when a reference is bound to insufficiently aligned target,
+or when a method or constructor is invoked on insufficiently aligned object.
+
+@item -fsanitize=object-size
+@opindex fsanitize=object-size
+This option enables instrumentation of memory references using the
+@code{__builtin_object_size} function. Various out of bounds pointer
+accesses are detected.
+
+@item -fsanitize=float-divide-by-zero
+@opindex fsanitize=float-divide-by-zero
+Detect floating-point division by zero. Unlike other similar options,
+@option{-fsanitize=float-divide-by-zero} is not enabled by
+@option{-fsanitize=undefined}, since floating-point division by zero can
+be a legitimate way of obtaining infinities and NaNs.
+
+@item -fsanitize=float-cast-overflow
+@opindex fsanitize=float-cast-overflow
+This option enables floating-point type to integer conversion checking.
+We check that the result of the conversion does not overflow.
+Unlike other similar options, @option{-fsanitize=float-cast-overflow} is
+not enabled by @option{-fsanitize=undefined}.
+This option does not work well with @code{FE_INVALID} exceptions enabled.
+
+@item -fsanitize=nonnull-attribute
+@opindex fsanitize=nonnull-attribute
+
+This option enables instrumentation of calls, checking whether null values
+are not passed to arguments marked as requiring a non-null value by the
+@code{nonnull} function attribute.
+
+@item -fsanitize=returns-nonnull-attribute
+@opindex fsanitize=returns-nonnull-attribute
+
+This option enables instrumentation of return statements in functions
+marked with @code{returns_nonnull} function attribute, to detect returning
+of null values from such functions.
+
+@item -fsanitize=bool
+@opindex fsanitize=bool
+
+This option enables instrumentation of loads from bool. If a value other
+than 0/1 is loaded, a run-time error is issued.
+
+@item -fsanitize=enum
+@opindex fsanitize=enum
+
+This option enables instrumentation of loads from an enum type. If
+a value outside the range of values for the enum type is loaded,
+a run-time error is issued.
+
@end table
While @option{-ftrapv} causes traps for signed overflows to be emitted,
@option{-fsanitize=undefined} gives a diagnostic message.
This currently works only for the C family of languages.
+@item -fno-sanitize=all
+@opindex fno-sanitize=all
+
+This option disables all previously enabled sanitizers.
+@option{-fsanitize=all} is not allowed, as some sanitizers cannot be used
+together.
+
+@item -fasan-shadow-offset=@var{number}
+@opindex fasan-shadow-offset
+This option forces GCC to use custom shadow offset in AddressSanitizer checks.
+It is useful for experimenting with different shadow memory layouts in
+Kernel AddressSanitizer.
+
+@item -fsanitize-sections=@var{s1,s2,...}
+@opindex fsanitize-sections
+Sanitize global variables in selected user-defined sections. @var{si} may
+contain wildcards.
+
+@item -fsanitize-recover@r{[}=@var{opts}@r{]}
+@opindex fsanitize-recover
+@opindex fno-sanitize-recover
+@option{-fsanitize-recover=} controls error recovery mode for sanitizers
+mentioned in comma-separated list of @var{opts}. Enabling this option
+for a sanitizer component would cause it to attempt to continue
+running the program as if no error happened. This means multiple
+runtime errors can be reported in a single program run, and the exit
+code of the program may indicate success even when errors
+have been reported. The @option{-fno-sanitize-recover=} can be used to alter
+this behavior, only the first detected error will be reported
+and program will exit after that with non-zero exit code.
+
+Currently this feature only works for @option{-fsanitize=undefined} (and its suboptions
+except for @option{-fsanitize=unreachable} and @option{-fsanitize=return}),
+@option{-fsanitize=float-cast-overflow}, @option{-fsanitize=float-divide-by-zero},
+@option{-fsanitize=kernel-address} and @option{-fsanitize=address}.
+For these sanitizers error recovery is turned on by default, except @option{-fsanitize=address},
+for which this feature is experimental.
+@option{-fsanitize-recover=all} and @option{-fno-sanitize-recover=all} is also
+accepted, the former enables recovery for all sanitizers that support it,
+the latter disables recovery for all sanitizers that support it.
+
+Syntax without explicit @var{opts} parameter is deprecated. It is equivalent to
+@option{-fsanitize-recover=undefined,float-cast-overflow,float-divide-by-zero}.
+Similarly @option{-fno-sanitize-recover} is equivalent to
+@option{-fno-sanitize-recover=undefined,float-cast-overflow,float-divide-by-zero}.
+
+@item -fsanitize-undefined-trap-on-error
+@opindex fsanitize-undefined-trap-on-error
+The @option{-fsanitize-undefined-trap-on-error} instructs the compiler to
+report undefined behavior using @code{__builtin_trap ()} rather than
+a @code{libubsan} library routine. The advantage of this is that the
+@code{libubsan} library is not needed and will not be linked in, so this
+is usable even for use in freestanding environments.
+
@item -fdump-final-insns@r{[}=@var{file}@r{]}
@opindex fdump-final-insns
Dump the final internal representation (RTL) to @var{file}. If the
@@ -6096,6 +6214,11 @@ feasible to use diff on debugging dumps for compiler invocations with
different compiler binaries and/or different
text / bss / data / heap / stack / dso start locations.
+@item -freport-bug
+@opindex freport-bug
+Collect and dump debug information into temporary file if ICE in C/C++
+compiler occured.
+
@item -fdump-unnumbered
@opindex fdump-unnumbered
When doing debugging dumps, suppress instruction numbers and address output.
@@ -6509,7 +6632,7 @@ the first option takes effect and the subsequent options are
ignored. Thus only the @file{vec.miss} is produced which contains
dumps from the vectorizer about missed opportunities.
-@item -frandom-seed=@var{string}
+@item -frandom-seed=@var{number}
@opindex frandom-seed
This option provides a seed that GCC uses in place of
random numbers in generating certain symbol names
@@ -6518,7 +6641,7 @@ place unique stamps in coverage data files and the object files that
produce them. You can use the @option{-frandom-seed} option to produce
reproducibly identical object files.
-The @var{string} should be different for every file you compile.
+The @var{number} should be different for every file you compile.
@item -fsched-verbose=@var{n}
@opindex fsched-verbose
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index 1dd1b3ea575..5830c6da78a 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -220,9 +220,23 @@ enum sanitize_code {
SANITIZE_SI_OVERFLOW = 1 << 11,
SANITIZE_BOOL = 1 << 12,
SANITIZE_ENUM = 1 << 13,
+ SANITIZE_FLOAT_DIVIDE = 1 << 14,
+ SANITIZE_FLOAT_CAST = 1 << 15,
+ SANITIZE_BOUNDS = 1UL << 16,
+ SANITIZE_ALIGNMENT = 1UL << 17,
+ SANITIZE_NONNULL_ATTRIBUTE = 1UL << 18,
+ SANITIZE_RETURNS_NONNULL_ATTRIBUTE = 1UL << 19,
+ SANITIZE_OBJECT_SIZE = 1UL << 20,
+ SANITIZE_BOUNDS_STRICT = 1UL << 21,
SANITIZE_UNDEFINED = SANITIZE_SHIFT | SANITIZE_DIVIDE | SANITIZE_UNREACHABLE
| SANITIZE_VLA | SANITIZE_NULL | SANITIZE_RETURN
| SANITIZE_SI_OVERFLOW | SANITIZE_BOOL | SANITIZE_ENUM
+ | SANITIZE_BOUNDS | SANITIZE_ALIGNMENT
+ | SANITIZE_NONNULL_ATTRIBUTE
+ | SANITIZE_RETURNS_NONNULL_ATTRIBUTE
+ | SANITIZE_OBJECT_SIZE,
+ SANITIZE_NONDEFAULT = SANITIZE_FLOAT_DIVIDE | SANITIZE_FLOAT_CAST
+ | SANITIZE_BOUNDS_STRICT
};
/* flag_vtable_verify initialization levels. */
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index f96427f411c..5d38aabf312 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -418,9 +418,11 @@ negate_expr_p (tree t)
&& TYPE_OVERFLOW_WRAPS (type));
case FIXED_CST:
- case NEGATE_EXPR:
return true;
+ case NEGATE_EXPR:
+ return !TYPE_OVERFLOW_SANITIZED (type);
+
case REAL_CST:
/* We want to canonicalize to positive real constants. Pretend
that only negative ones can be easily negated. */
@@ -566,7 +568,9 @@ fold_negate_expr (location_t loc, tree t)
case INTEGER_CST:
tem = fold_negate_const (t, type);
if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
- || !TYPE_OVERFLOW_TRAPS (type))
+ || (!TYPE_OVERFLOW_TRAPS (type)
+ && TYPE_OVERFLOW_WRAPS (type))
+ || (flag_sanitize & SANITIZE_SI_OVERFLOW) == 0)
return tem;
break;
@@ -623,7 +627,9 @@ fold_negate_expr (location_t loc, tree t)
break;
case NEGATE_EXPR:
- return TREE_OPERAND (t, 0);
+ if (!TYPE_OVERFLOW_SANITIZED (type))
+ return TREE_OPERAND (t, 0);
+ break;
case PLUS_EXPR:
if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
@@ -4732,8 +4738,7 @@ fold_cond_expr_with_comparison (location_t loc, tree type,
case GE_EXPR:
case GT_EXPR:
if (TYPE_UNSIGNED (TREE_TYPE (arg1)))
- arg1 = fold_convert_loc (loc, signed_type_for
- (TREE_TYPE (arg1)), arg1);
+ break;
tem = fold_build1_loc (loc, ABS_EXPR, TREE_TYPE (arg1), arg1);
return pedantic_non_lvalue_loc (loc, fold_convert_loc (loc, type, tem));
case UNLE_EXPR:
@@ -4743,8 +4748,7 @@ fold_cond_expr_with_comparison (location_t loc, tree type,
case LE_EXPR:
case LT_EXPR:
if (TYPE_UNSIGNED (TREE_TYPE (arg1)))
- arg1 = fold_convert_loc (loc, signed_type_for
- (TREE_TYPE (arg1)), arg1);
+ break;
tem = fold_build1_loc (loc, ABS_EXPR, TREE_TYPE (arg1), arg1);
return negate_expr (fold_convert_loc (loc, type, tem));
default:
@@ -8322,9 +8326,14 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
&& integer_onep (TREE_OPERAND (arg0, 1)))
|| (TREE_CODE (arg0) == PLUS_EXPR
&& integer_all_onesp (TREE_OPERAND (arg0, 1)))))
- return fold_build1_loc (loc, NEGATE_EXPR, type,
- fold_convert_loc (loc, type,
- TREE_OPERAND (arg0, 0)));
+ {
+ /* Perform the negation in ARG0's type and only then convert
+ to TYPE as to avoid introducing undefined behavior. */
+ tree t = fold_build1_loc (loc, NEGATE_EXPR,
+ TREE_TYPE (TREE_OPERAND (arg0, 0)),
+ TREE_OPERAND (arg0, 0));
+ return fold_convert_loc (loc, type, t);
+ }
/* Convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
else if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& (tem = fold_unary_loc (loc, BIT_NOT_EXPR, type,
@@ -10870,14 +10879,20 @@ fold_binary_loc (location_t loc,
}
}
/* A - (-B) -> A + B */
- if (TREE_CODE (arg1) == NEGATE_EXPR)
+ if (TREE_CODE (arg1) == NEGATE_EXPR
+ && (!INTEGRAL_TYPE_P (type)
+ || TYPE_OVERFLOW_WRAPS (type)
+ || (flag_sanitize & SANITIZE_SI_OVERFLOW) == 0))
return fold_build2_loc (loc, PLUS_EXPR, type, op0,
fold_convert_loc (loc, type,
TREE_OPERAND (arg1, 0)));
/* (-A) - B -> (-B) - A where B is easily negated and we can swap. */
if (TREE_CODE (arg0) == NEGATE_EXPR
&& negate_expr_p (arg1)
- && reorder_operands_p (arg0, arg1))
+ && reorder_operands_p (arg0, arg1)
+ && (!INTEGRAL_TYPE_P (type)
+ || TYPE_OVERFLOW_WRAPS (type)
+ || (flag_sanitize & SANITIZE_SI_OVERFLOW) == 0))
return fold_build2_loc (loc, MINUS_EXPR, type,
fold_convert_loc (loc, type,
negate_expr (arg1)),
@@ -11021,6 +11036,9 @@ fold_binary_loc (location_t loc,
/* A - B -> A + (-B) if B is easily negatable. */
if (negate_expr_p (arg1)
+ && (!INTEGRAL_TYPE_P (type)
+ || TYPE_OVERFLOW_WRAPS (type)
+ || (flag_sanitize & SANITIZE_SI_OVERFLOW) == 0)
&& ((FLOAT_TYPE_P (type)
/* Avoid this transformation if B is a positive REAL_CST. */
&& (TREE_CODE (arg1) != REAL_CST
diff --git a/gcc/gcc.c b/gcc/gcc.c
index c7ce64d8fc8..c3d44b1c030 100644
--- a/gcc/gcc.c
+++ b/gcc/gcc.c
@@ -253,6 +253,7 @@ static void init_gcc_specs (struct obstack *, const char *, const char *,
static const char *convert_filename (const char *, int, int);
#endif
+static void try_generate_repro (const char **argv);
static const char *getenv_spec_function (int, const char **);
static const char *if_exists_spec_function (int, const char **);
static const char *if_exists_else_spec_function (int, const char **);
@@ -736,7 +737,7 @@ proper position among the other output files. */
%{!nostdlib:%{!nodefaultlibs:%{%:sanitize(address):" LIBASAN_SPEC "\
%{static:%ecannot specify -static with -fsanitize=address}}\
%{%:sanitize(thread):" LIBTSAN_SPEC "\
- %{!pie:%{!shared:%e-fsanitize=thread linking must be done with -pie or -shared}}}\
+ %{static:%ecannot specify -static with -fsanitize=thread}}\
%{%:sanitize(undefined):" LIBUBSAN_SPEC "}\
%{%:sanitize(leak):" LIBLSAN_SPEC "}}}"
#endif
@@ -2803,7 +2804,7 @@ execute (void)
}
}
- if (string != commands[i].prog)
+ if (i && string != commands[i].prog)
free (CONST_CAST (char *, string));
}
@@ -2856,6 +2857,15 @@ execute (void)
else if (WIFEXITED (status)
&& WEXITSTATUS (status) >= MIN_FATAL_STATUS)
{
+ /* For ICEs in cc1, cc1obj, cc1plus see if it is
+ reproducible or not. */
+ const char *p;
+ if (flag_report_bug
+ && WEXITSTATUS (status) == ICE_EXIT_CODE
+ && i == 0
+ && (p = strrchr (commands[0].argv[0], DIR_SEPARATOR))
+ && ! strncmp (p + 1, "cc1", 3))
+ try_generate_repro (commands[0].argv);
if (WEXITSTATUS (status) > greatest_status)
greatest_status = WEXITSTATUS (status);
ret_code = -1;
@@ -2913,6 +2923,9 @@ execute (void)
}
}
+ if (commands[0].argv[0] != commands[0].prog)
+ free (CONST_CAST (char *, commands[0].argv[0]));
+
return ret_code;
}
}
@@ -6104,6 +6117,359 @@ give_switch (int switchnum, int omit_first_word)
switches[switchnum].validated = true;
}
+/* Print GCC configuration (e.g. version, thread model, target,
+ configuration_arguments) to a given FILE. */
+
+static void
+print_configuration (FILE *file)
+{
+ int n;
+ const char *thrmod;
+
+ fnotice (file, "Target: %s\n", spec_machine);
+ fnotice (file, "Configured with: %s\n", configuration_arguments);
+
+#ifdef THREAD_MODEL_SPEC
+ /* We could have defined THREAD_MODEL_SPEC to "%*" by default,
+ but there's no point in doing all this processing just to get
+ thread_model back. */
+ obstack_init (&obstack);
+ do_spec_1 (THREAD_MODEL_SPEC, 0, thread_model);
+ obstack_1grow (&obstack, '\0');
+ thrmod = XOBFINISH (&obstack, const char *);
+#else
+ thrmod = thread_model;
+#endif
+
+ fnotice (file, "Thread model: %s\n", thrmod);
+
+ /* compiler_version is truncated at the first space when initialized
+ from version string, so truncate version_string at the first space
+ before comparing. */
+ for (n = 0; version_string[n]; n++)
+ if (version_string[n] == ' ')
+ break;
+
+ if (! strncmp (version_string, compiler_version, n)
+ && compiler_version[n] == 0)
+ fnotice (file, "gcc version %s %s\n", version_string,
+ pkgversion_string);
+ else
+ fnotice (file, "gcc driver version %s %sexecuting gcc version %s\n",
+ version_string, pkgversion_string, compiler_version);
+
+}
+
+#define RETRY_ICE_ATTEMPTS 3
+
+/* Returns true if FILE1 and FILE2 contain equivalent data, 0 otherwise. */
+
+static bool
+files_equal_p (char *file1, char *file2)
+{
+ struct stat st1, st2;
+ off_t n, len;
+ int fd1, fd2;
+ const int bufsize = 8192;
+ char *buf = XNEWVEC (char, bufsize);
+
+ fd1 = open (file1, O_RDONLY);
+ fd2 = open (file2, O_RDONLY);
+
+ if (fd1 < 0 || fd2 < 0)
+ goto error;
+
+ if (fstat (fd1, &st1) < 0 || fstat (fd2, &st2) < 0)
+ goto error;
+
+ if (st1.st_size != st2.st_size)
+ goto error;
+
+ for (n = st1.st_size; n; n -= len)
+ {
+ len = n;
+ if ((int) len > bufsize / 2)
+ len = bufsize / 2;
+
+ if (read (fd1, buf, len) != (int) len
+ || read (fd2, buf + bufsize / 2, len) != (int) len)
+ {
+ goto error;
+ }
+
+ if (memcmp (buf, buf + bufsize / 2, len) != 0)
+ goto error;
+ }
+
+ free (buf);
+ close (fd1);
+ close (fd2);
+
+ return 1;
+
+error:
+ free (buf);
+ close (fd1);
+ close (fd2);
+ return 0;
+}
+
+/* Check that compiler's output doesn't differ across runs.
+ TEMP_STDOUT_FILES and TEMP_STDERR_FILES are arrays of files, containing
+ stdout and stderr for each compiler run. Return true if all of
+ TEMP_STDOUT_FILES and TEMP_STDERR_FILES are equivalent. */
+
+static bool
+check_repro (char **temp_stdout_files, char **temp_stderr_files)
+{
+ int i;
+ for (i = 0; i < RETRY_ICE_ATTEMPTS - 2; ++i)
+ {
+ if (!files_equal_p (temp_stdout_files[i], temp_stdout_files[i + 1])
+ || !files_equal_p (temp_stderr_files[i], temp_stderr_files[i + 1]))
+ {
+ fnotice (stderr, "The bug is not reproducible, so it is"
+ " likely a hardware or OS problem.\n");
+ break;
+ }
+ }
+ return i == RETRY_ICE_ATTEMPTS - 2;
+}
+
+enum attempt_status {
+ ATTEMPT_STATUS_FAIL_TO_RUN,
+ ATTEMPT_STATUS_SUCCESS,
+ ATTEMPT_STATUS_ICE
+};
+
+
+/* Run compiler with arguments NEW_ARGV to reproduce the ICE, storing stdout
+ to OUT_TEMP and stderr to ERR_TEMP. If APPEND is TRUE, append to OUT_TEMP
+ and ERR_TEMP instead of truncating. If EMIT_SYSTEM_INFO is TRUE, also write
+ GCC configuration into to ERR_TEMP. Return ATTEMPT_STATUS_FAIL_TO_RUN if
+ compiler failed to run, ATTEMPT_STATUS_ICE if compiled ICE-ed and
+ ATTEMPT_STATUS_SUCCESS otherwise. */
+
+static enum attempt_status
+run_attempt (const char **new_argv, const char *out_temp,
+ const char *err_temp, int emit_system_info, int append)
+{
+
+ if (emit_system_info)
+ {
+ FILE *file_out = fopen (err_temp, "a");
+ print_configuration (file_out);
+ fputs ("\n", file_out);
+ fclose (file_out);
+ }
+
+ int exit_status;
+ const char *errmsg;
+ struct pex_obj *pex;
+ int err;
+ int pex_flags = PEX_USE_PIPES | PEX_LAST;
+ enum attempt_status status = ATTEMPT_STATUS_FAIL_TO_RUN;
+
+ if (append)
+ pex_flags |= PEX_STDOUT_APPEND | PEX_STDERR_APPEND;
+
+ pex = pex_init (PEX_USE_PIPES, new_argv[0], NULL);
+ if (!pex)
+ fatal_error ("pex_init failed: %m");
+
+ errmsg = pex_run (pex, pex_flags, new_argv[0],
+ CONST_CAST2 (char *const *, const char **, &new_argv[1]), out_temp,
+ err_temp, &err);
+ if (errmsg != NULL)
+ {
+ if (err == 0)
+ fatal_error (errmsg);
+ else
+ {
+ errno = err;
+ pfatal_with_name (errmsg);
+ }
+ }
+
+ if (!pex_get_status (pex, 1, &exit_status))
+ goto out;
+
+ switch (WEXITSTATUS (exit_status))
+ {
+ case ICE_EXIT_CODE:
+ status = ATTEMPT_STATUS_ICE;
+ break;
+
+ case SUCCESS_EXIT_CODE:
+ status = ATTEMPT_STATUS_SUCCESS;
+ break;
+
+ default:
+ ;
+ }
+
+out:
+ pex_free (pex);
+ return status;
+}
+
+/* This routine reads lines from IN file, adds C++ style comments
+ at the begining of each line and writes result into OUT. */
+
+static void
+insert_comments (const char *file_in, const char *file_out)
+{
+ FILE *in = fopen (file_in, "rb");
+ FILE *out = fopen (file_out, "wb");
+ char line[256];
+
+ bool add_comment = true;
+ while (fgets (line, sizeof (line), in))
+ {
+ if (add_comment)
+ fputs ("// ", out);
+ fputs (line, out);
+ add_comment = strchr (line, '\n') != NULL;
+ }
+
+ fclose (in);
+ fclose (out);
+}
+
+/* This routine adds preprocessed source code into the given ERR_FILE.
+ To do this, it adds "-E" to NEW_ARGV and execute RUN_ATTEMPT routine to
+ add information in report file. RUN_ATTEMPT should return
+ ATTEMPT_STATUS_SUCCESS, in other case we cannot generate the report. */
+
+static void
+do_report_bug (const char **new_argv, const int nargs,
+ char **out_file, char **err_file)
+{
+ int i, status;
+ int fd = open (*out_file, O_RDWR | O_APPEND);
+ if (fd < 0)
+ return;
+ write (fd, "\n//", 3);
+ for (i = 0; i < nargs; i++)
+ {
+ write (fd, " ", 1);
+ write (fd, new_argv[i], strlen (new_argv[i]));
+ }
+ write (fd, "\n\n", 2);
+ close (fd);
+ new_argv[nargs] = "-E";
+ new_argv[nargs + 1] = NULL;
+
+ status = run_attempt (new_argv, *out_file, *err_file, 0, 1);
+
+ if (status == ATTEMPT_STATUS_SUCCESS)
+ {
+ fnotice (stderr, "Preprocessed source stored into %s file,"
+ " please attach this to your bugreport.\n", *out_file);
+ /* Make sure it is not deleted. */
+ free (*out_file);
+ *out_file = NULL;
+ }
+}
+
+/* Try to reproduce ICE. If bug is reproducible, generate report .err file
+ containing GCC configuration, backtrace, compiler's command line options
+ and preprocessed source code. */
+
+static void
+try_generate_repro (const char **argv)
+{
+ int i, nargs, out_arg = -1, quiet = 0, attempt;
+ const char **new_argv;
+ char *temp_files[RETRY_ICE_ATTEMPTS * 2];
+ char **temp_stdout_files = &temp_files[0];
+ char **temp_stderr_files = &temp_files[RETRY_ICE_ATTEMPTS];
+
+ if (gcc_input_filename == NULL || ! strcmp (gcc_input_filename, "-"))
+ return;
+
+ for (nargs = 0; argv[nargs] != NULL; ++nargs)
+ /* Only retry compiler ICEs, not preprocessor ones. */
+ if (! strcmp (argv[nargs], "-E"))
+ return;
+ else if (argv[nargs][0] == '-' && argv[nargs][1] == 'o')
+ {
+ if (out_arg == -1)
+ out_arg = nargs;
+ else
+ return;
+ }
+ /* If the compiler is going to output any time information,
+ it might varry between invocations. */
+ else if (! strcmp (argv[nargs], "-quiet"))
+ quiet = 1;
+ else if (! strcmp (argv[nargs], "-ftime-report"))
+ return;
+
+ if (out_arg == -1 || !quiet)
+ return;
+
+ memset (temp_files, '\0', sizeof (temp_files));
+ new_argv = XALLOCAVEC (const char *, nargs + 4);
+ memcpy (new_argv, argv, (nargs + 1) * sizeof (const char *));
+ new_argv[nargs++] = "-frandom-seed=0";
+ new_argv[nargs++] = "-fdump-noaddr";
+ new_argv[nargs] = NULL;
+ if (new_argv[out_arg][2] == '\0')
+ new_argv[out_arg + 1] = "-";
+ else
+ new_argv[out_arg] = "-o-";
+
+ int status;
+ for (attempt = 0; attempt < RETRY_ICE_ATTEMPTS; ++attempt)
+ {
+ int emit_system_info = 0;
+ int append = 0;
+ temp_stdout_files[attempt] = make_temp_file (".out");
+ temp_stderr_files[attempt] = make_temp_file (".err");
+
+ if (attempt == RETRY_ICE_ATTEMPTS - 1)
+ {
+ append = 1;
+ emit_system_info = 1;
+ }
+
+ status = run_attempt (new_argv, temp_stdout_files[attempt],
+ temp_stderr_files[attempt], emit_system_info,
+ append);
+
+ if (status != ATTEMPT_STATUS_ICE)
+ {
+ fnotice (stderr, "The bug is not reproducible, so it is"
+ " likely a hardware or OS problem.\n");
+ goto out;
+ }
+ }
+
+ if (!check_repro (temp_stdout_files, temp_stderr_files))
+ goto out;
+
+ {
+ /* Insert commented out backtrace into report file. */
+ char **stderr_commented = &temp_stdout_files[RETRY_ICE_ATTEMPTS - 1];
+ insert_comments (temp_stderr_files[RETRY_ICE_ATTEMPTS - 1],
+ *stderr_commented);
+
+ /* In final attempt we append compiler options and preprocesssed code to last
+ generated .out file with configuration and backtrace. */
+ char **output = &temp_stdout_files[RETRY_ICE_ATTEMPTS - 1];
+ do_report_bug (new_argv, nargs, stderr_commented, output);
+ }
+
+out:
+ for (i = 0; i < RETRY_ICE_ATTEMPTS * 2; i++)
+ if (temp_files[i])
+ {
+ unlink (temp_stdout_files[i]);
+ free (temp_stdout_files[i]);
+ }
+}
+
/* Search for a file named NAME trying various prefixes including the
user's -B prefix and some standard ones.
Return the absolute file name found. If nothing is found, return NAME. */
@@ -6873,41 +7239,7 @@ warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n\n"
if (verbose_flag)
{
- int n;
- const char *thrmod;
-
- fnotice (stderr, "Target: %s\n", spec_machine);
- fnotice (stderr, "Configured with: %s\n", configuration_arguments);
-
-#ifdef THREAD_MODEL_SPEC
- /* We could have defined THREAD_MODEL_SPEC to "%*" by default,
- but there's no point in doing all this processing just to get
- thread_model back. */
- obstack_init (&obstack);
- do_spec_1 (THREAD_MODEL_SPEC, 0, thread_model);
- obstack_1grow (&obstack, '\0');
- thrmod = XOBFINISH (&obstack, const char *);
-#else
- thrmod = thread_model;
-#endif
-
- fnotice (stderr, "Thread model: %s\n", thrmod);
-
- /* compiler_version is truncated at the first space when initialized
- from version string, so truncate version_string at the first space
- before comparing. */
- for (n = 0; version_string[n]; n++)
- if (version_string[n] == ' ')
- break;
-
- if (! strncmp (version_string, compiler_version, n)
- && compiler_version[n] == 0)
- fnotice (stderr, "gcc version %s %s\n", version_string,
- pkgversion_string);
- else
- fnotice (stderr, "gcc driver version %s %sexecuting gcc version %s\n",
- version_string, pkgversion_string, compiler_version);
-
+ print_configuration (stderr);
if (n_infiles == 0)
return (0);
}
@@ -8178,7 +8510,8 @@ sanitize_spec_function (int argc, const char **argv)
if (strcmp (argv[0], "thread") == 0)
return (flag_sanitize & SANITIZE_THREAD) ? "" : NULL;
if (strcmp (argv[0], "undefined") == 0)
- return (flag_sanitize & SANITIZE_UNDEFINED) ? "" : NULL;
+ return ((flag_sanitize & (SANITIZE_UNDEFINED | SANITIZE_NONDEFAULT))
+ && !flag_sanitize_undefined_trap_on_error) ? "" : NULL;
if (strcmp (argv[0], "leak") == 0)
return ((flag_sanitize
& (SANITIZE_ADDRESS | SANITIZE_LEAK | SANITIZE_THREAD))
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 1bc41f060b6..51a623959e6 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -1202,6 +1202,19 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace)
gimple_call_arg (stmt, 1),
gimple_call_arg (stmt, 2));
break;
+ case IFN_UBSAN_OBJECT_SIZE:
+ if (integer_all_onesp (gimple_call_arg (stmt, 2))
+ || (TREE_CODE (gimple_call_arg (stmt, 1)) == INTEGER_CST
+ && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST
+ && tree_int_cst_le (gimple_call_arg (stmt, 1),
+ gimple_call_arg (stmt, 2))))
+ {
+ gsi_replace (gsi, gimple_build_nop (), true);
+ unlink_stmt_vdef (stmt);
+ release_defs (stmt);
+ return true;
+ }
+ break;
case IFN_UBSAN_CHECK_ADD:
subcode = PLUS_EXPR;
break;
diff --git a/gcc/gimple.c b/gcc/gimple.c
index 30d16530e39..86b4ab345f8 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -2505,6 +2505,9 @@ nonfreeing_call_p (gimple call)
default:
return true;
}
+ else if (gimple_call_internal_p (call)
+ && gimple_call_flags (call) & ECF_LEAF)
+ return true;
return false;
}
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 89e7334e8cd..e3a574a33bc 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -2245,6 +2245,24 @@ gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
+ /* Gimplify internal functions created in the FEs. */
+ if (CALL_EXPR_FN (*expr_p) == NULL_TREE)
+ {
+ nargs = call_expr_nargs (*expr_p);
+ enum internal_fn ifn = CALL_EXPR_IFN (*expr_p);
+ auto_vec<tree> vargs (nargs);
+
+ for (i = 0; i < nargs; i++)
+ {
+ gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
+ EXPR_LOCATION (*expr_p));
+ vargs.quick_push (CALL_EXPR_ARG (*expr_p, i));
+ }
+ gimple call = gimple_build_call_internal_vec (ifn, vargs);
+ gimplify_seq_add_stmt (pre_p, call);
+ return GS_ALL_DONE;
+ }
+
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index d64e20da6fd..6008bc0fb21 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -20,8 +20,8 @@ along with GCC; see the file COPYING3. If not see
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "internal-fn.h"
#include "tree.h"
+#include "internal-fn.h"
#include "stor-layout.h"
#include "expr.h"
#include "optabs.h"
@@ -181,6 +181,38 @@ expand_ASAN_CHECK (gimple stmt ATTRIBUTE_UNUSED)
gcc_unreachable ();
}
+/* This should get expanded in the sanopt pass. */
+
+static void
+expand_UBSAN_BOUNDS (gimple stmt ATTRIBUTE_UNUSED)
+{
+ gcc_unreachable ();
+}
+
+/* This should get expanded in the sanopt pass. */
+
+static void
+expand_UBSAN_OBJECT_SIZE (gimple stmt ATTRIBUTE_UNUSED)
+{
+ gcc_unreachable ();
+}
+
+
+/* Helper for expand_*_overflow. Store RES into TARGET. */
+
+static void
+expand_ubsan_result_store (rtx target, rtx res)
+{
+ if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
+ /* If this is a scalar in a register that is stored in a wider mode
+ than the declared mode, compute the result into its declared mode
+ and then convert to the wider mode. Our value is the computed
+ expression. */
+ convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
+ else
+ emit_move_insn (target, res);
+}
+
/* Add sub/add overflow checking to the statement STMT.
CODE says whether the operation is +, or -. */
@@ -327,7 +359,7 @@ ubsan_expand_si_overflow_addsub_check (tree_code code, gimple stmt)
emit_label (done_label);
if (lhs)
- emit_move_insn (target, res);
+ expand_ubsan_result_store (target, res);
}
/* Add negate overflow checking to the statement STMT. */
@@ -403,7 +435,7 @@ ubsan_expand_si_overflow_neg_check (gimple stmt)
emit_label (done_label);
if (lhs)
- emit_move_insn (target, res);
+ expand_ubsan_result_store (target, res);
}
/* Add mul overflow checking to the statement STMT. */
@@ -500,7 +532,7 @@ ubsan_expand_si_overflow_mul_check (gimple stmt)
rtx do_overflow = gen_label_rtx ();
rtx hipart_different = gen_label_rtx ();
- int hprec = GET_MODE_PRECISION (hmode);
+ unsigned int hprec = GET_MODE_PRECISION (hmode);
rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
NULL_RTX, 0);
hipart0 = gen_lowpart (hmode, hipart0);
@@ -535,12 +567,11 @@ ubsan_expand_si_overflow_mul_check (gimple stmt)
double_int arg0_min, arg0_max;
if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
{
- if (arg0_max.sle (double_int::max_value (hprec, false))
- && double_int::min_value (hprec, false).sle (arg0_min))
+ unsigned int mprec0 = wi::min_precision (arg0_min, SIGNED);
+ unsigned int mprec1 = wi::min_precision (arg0_max, SIGNED);
+ if (mprec0 <= hprec && mprec1 <= hprec)
op0_small_p = true;
- else if (arg0_max.sle (double_int::max_value (hprec, true))
- && (~double_int::max_value (hprec,
- true)).sle (arg0_min))
+ else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
op0_medium_p = true;
if (!arg0_min.is_negative ())
op0_sign = 0;
@@ -553,12 +584,11 @@ ubsan_expand_si_overflow_mul_check (gimple stmt)
double_int arg1_min, arg1_max;
if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
{
- if (arg1_max.sle (double_int::max_value (hprec, false))
- && double_int::min_value (hprec, false).sle (arg1_min))
+ unsigned int mprec0 = wi::min_precision (arg1_min, SIGNED);
+ unsigned int mprec1 = wi::min_precision (arg1_max, SIGNED);
+ if (mprec0 <= hprec && mprec1 <= hprec)
op1_small_p = true;
- else if (arg1_max.sle (double_int::max_value (hprec, true))
- && (~double_int::max_value (hprec,
- true)).sle (arg1_min))
+ else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
op1_medium_p = true;
if (!arg1_min.is_negative ())
op1_sign = 0;
@@ -798,7 +828,7 @@ ubsan_expand_si_overflow_mul_check (gimple stmt)
emit_label (done_label);
if (lhs)
- emit_move_insn (target, res);
+ expand_ubsan_result_store (target, res);
}
/* Expand UBSAN_CHECK_ADD call STMT. */
diff --git a/gcc/internal-fn.def b/gcc/internal-fn.def
index 2156e24ec83..b8e457c5090 100644
--- a/gcc/internal-fn.def
+++ b/gcc/internal-fn.def
@@ -49,9 +49,11 @@ DEF_INTERNAL_FN (MASK_LOAD, ECF_PURE | ECF_LEAF, NULL)
DEF_INTERNAL_FN (MASK_STORE, ECF_LEAF, NULL)
DEF_INTERNAL_FN (ANNOTATE, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (UBSAN_NULL, ECF_LEAF | ECF_NOTHROW, ".W.")
+DEF_INTERNAL_FN (UBSAN_BOUNDS, ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (UBSAN_CHECK_ADD, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (UBSAN_CHECK_SUB, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (UBSAN_CHECK_MUL, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
+DEF_INTERNAL_FN (UBSAN_OBJECT_SIZE, ECF_LEAF | ECF_NOTHROW, NULL)
DEF_INTERNAL_FN (ABNORMAL_DISPATCHER, ECF_NORETURN, NULL)
DEF_INTERNAL_FN (BUILTIN_EXPECT, ECF_CONST | ECF_LEAF | ECF_NOTHROW, NULL)
-DEF_INTERNAL_FN (ASAN_CHECK, ECF_TM_PURE | ECF_LEAF | ECF_NOTHROW, ".W..")
+DEF_INTERNAL_FN (ASAN_CHECK, ECF_TM_PURE | ECF_LEAF | ECF_NOTHROW, ".W...")
diff --git a/gcc/internal-fn.h b/gcc/internal-fn.h
index af93e154ab1..14573aa767b 100644
--- a/gcc/internal-fn.h
+++ b/gcc/internal-fn.h
@@ -20,12 +20,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_INTERNAL_FN_H
#define GCC_INTERNAL_FN_H
-enum internal_fn {
-#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) IFN_##CODE,
-#include "internal-fn.def"
-#undef DEF_INTERNAL_FN
- IFN_LAST
-};
+#include "coretypes.h"
/* Initialize internal function tables. */
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index 8f1f5b1ff92..d5bd9ff1f47 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -452,6 +452,8 @@ special_builtin_state (enum pure_const_state_e *state, bool *looping,
*looping = true;
*state = IPA_CONST;
return true;
+ default:
+ break;
}
return false;
}
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index 6b5df1a611c..8422b0a235c 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -2231,7 +2231,7 @@ write_symbol (struct streamer_tree_cache_d *cache,
{
const char *name;
enum gcc_plugin_symbol_kind kind;
- enum gcc_plugin_symbol_visibility visibility;
+ enum gcc_plugin_symbol_visibility visibility = GCCPV_DEFAULT;
unsigned slot_num;
unsigned HOST_WIDEST_INT size;
const char *comdat;
diff --git a/gcc/opts-global.c b/gcc/opts-global.c
index 111884bb5a6..0cb03740ff1 100644
--- a/gcc/opts-global.c
+++ b/gcc/opts-global.c
@@ -42,6 +42,7 @@ along with GCC; see the file COPYING3. If not see
#include "toplev.h"
#include "tree-pass.h"
#include "context.h"
+#include "asan.h"
typedef const char *const_char_p; /* For DEF_VEC_P. */
@@ -426,6 +427,18 @@ handle_common_deferred_options (void)
stack_limit_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (opt->arg));
break;
+ case OPT_fasan_shadow_offset_:
+ if (!(flag_sanitize & SANITIZE_KERNEL_ADDRESS))
+ error ("-fasan-shadow-offset should only be used "
+ "with -fsanitize=kernel-address");
+ if (!set_asan_shadow_offset (opt->arg))
+ error ("unrecognized shadow offset %qs", opt->arg);
+ break;
+
+ case OPT_fsanitize_sections_:
+ set_sanitized_sections (opt->arg);
+ break;
+
default:
gcc_unreachable ();
}
diff --git a/gcc/opts.c b/gcc/opts.c
index fbdebd79b1e..3bd7adba84d 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -869,19 +869,42 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
if (opts->x_dwarf_split_debug_info)
opts->x_debug_generate_pub_sections = 1;
- /* Userspace and kernel ASan conflict with each other and with TSan. */
+ /* Userspace and kernel ASan conflict with each other. */
- if ((flag_sanitize & SANITIZE_USER_ADDRESS)
- && (flag_sanitize & SANITIZE_KERNEL_ADDRESS))
+ if ((opts->x_flag_sanitize & SANITIZE_USER_ADDRESS)
+ && (opts->x_flag_sanitize & SANITIZE_KERNEL_ADDRESS))
error_at (loc,
- "-fsanitize=address is incompatible with "
- "-fsanitize=kernel-address");
+ "-fsanitize=address is incompatible with "
+ "-fsanitize=kernel-address");
- if ((flag_sanitize & SANITIZE_ADDRESS)
- && (flag_sanitize & SANITIZE_THREAD))
+ /* And with TSan. */
+
+ if ((opts->x_flag_sanitize & SANITIZE_ADDRESS)
+ && (opts->x_flag_sanitize & SANITIZE_THREAD))
error_at (loc,
- "-fsanitize=address and -fsanitize=kernel-address "
- "are incompatible with -fsanitize=thread");
+ "-fsanitize=address and -fsanitize=kernel-address "
+ "are incompatible with -fsanitize=thread");
+
+ /* Error recovery is not allowed for LSan and TSan. */
+
+ if (opts->x_flag_sanitize_recover & SANITIZE_THREAD)
+ error_at (loc, "-fsanitize-recover=thread is not supported");
+
+ if (opts->x_flag_sanitize_recover & SANITIZE_LEAK)
+ error_at (loc, "-fsanitize-recover=leak is not supported");
+
+ /* When instrumenting the pointers, we don't want to remove
+ the null pointer checks. */
+ if (opts->x_flag_sanitize & (SANITIZE_NULL | SANITIZE_NONNULL_ATTRIBUTE
+ | SANITIZE_RETURNS_NONNULL_ATTRIBUTE))
+ opts->x_flag_delete_null_pointer_checks = 0;
+
+ /* Aggressive compiler optimizations may cause false negatives. */
+ if (opts->x_flag_sanitize)
+ {
+ opts->x_flag_aggressive_loop_optimizations = 0;
+ opts->x_flag_strict_overflow = 0;
+ }
}
#define LEFT_COLUMN 27
@@ -1456,8 +1479,12 @@ common_handle_option (struct gcc_options *opts,
break;
case OPT_fsanitize_:
+ case OPT_fsanitize_recover_:
{
const char *p = arg;
+ unsigned int *flag
+ = code == OPT_fsanitize_ ? &opts->x_flag_sanitize
+ : &opts->x_flag_sanitize_recover;
while (*p != 0)
{
static const struct
@@ -1486,6 +1513,22 @@ common_handle_option (struct gcc_options *opts,
sizeof "signed-integer-overflow" -1 },
{ "bool", SANITIZE_BOOL, sizeof "bool" - 1 },
{ "enum", SANITIZE_ENUM, sizeof "enum" - 1 },
+ { "float-divide-by-zero", SANITIZE_FLOAT_DIVIDE,
+ sizeof "float-divide-by-zero" - 1 },
+ { "float-cast-overflow", SANITIZE_FLOAT_CAST,
+ sizeof "float-cast-overflow" - 1 },
+ { "bounds", SANITIZE_BOUNDS, sizeof "bounds" - 1 },
+ { "bounds-strict", SANITIZE_BOUNDS | SANITIZE_BOUNDS_STRICT,
+ sizeof "bounds-strict" - 1 },
+ { "alignment", SANITIZE_ALIGNMENT, sizeof "alignment" - 1 },
+ { "nonnull-attribute", SANITIZE_NONNULL_ATTRIBUTE,
+ sizeof "nonnull-attribute" - 1 },
+ { "returns-nonnull-attribute",
+ SANITIZE_RETURNS_NONNULL_ATTRIBUTE,
+ sizeof "returns-nonnull-attribute" - 1 },
+ { "object-size", SANITIZE_OBJECT_SIZE,
+ sizeof "object-size" - 1 },
+ { "all", ~0, sizeof "all" - 1 },
{ NULL, 0, 0 }
};
const char *comma;
@@ -1509,32 +1552,38 @@ common_handle_option (struct gcc_options *opts,
&& memcmp (p, spec[i].name, len) == 0)
{
/* Handle both -fsanitize and -fno-sanitize cases. */
- if (value)
- flag_sanitize |= spec[i].flag;
+ if (value && spec[i].flag == ~0U)
+ {
+ if (code == OPT_fsanitize_)
+ error_at (loc, "-fsanitize=all option is not valid");
+ else
+ *flag |= ~(SANITIZE_USER_ADDRESS | SANITIZE_THREAD
+ | SANITIZE_LEAK);
+ }
+ else if (value)
+ *flag |= spec[i].flag;
else
- flag_sanitize &= ~spec[i].flag;
+ *flag &= ~spec[i].flag;
found = true;
break;
}
if (! found)
error_at (loc,
- "unrecognized argument to -fsanitize= option: %q.*s",
- (int) len, p);
+ "unrecognized argument to -fsanitize%s= option: %q.*s",
+ code == OPT_fsanitize_ ? "" : "-recover", (int) len, p);
if (comma == NULL)
break;
p = comma + 1;
}
- /* When instrumenting the pointers, we don't want to remove
- the null pointer checks. */
- if (flag_sanitize & SANITIZE_NULL)
- opts->x_flag_delete_null_pointer_checks = 0;
+ if (code != OPT_fsanitize_)
+ break;
/* Kernel ASan implies normal ASan but does not yet support
all features. */
- if (flag_sanitize & SANITIZE_KERNEL_ADDRESS)
+ if (opts->x_flag_sanitize & SANITIZE_KERNEL_ADDRESS)
{
maybe_set_param_value (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD, 0,
opts->x_param_values,
@@ -1553,6 +1602,19 @@ common_handle_option (struct gcc_options *opts,
break;
}
+ case OPT_fasan_shadow_offset_:
+ /* Deferred. */
+ break;
+
+ case OPT_fsanitize_recover:
+ if (value)
+ opts->x_flag_sanitize_recover
+ |= SANITIZE_UNDEFINED | SANITIZE_NONDEFAULT;
+ else
+ opts->x_flag_sanitize_recover
+ &= ~(SANITIZE_UNDEFINED | SANITIZE_NONDEFAULT);
+ break;
+
case OPT_O:
case OPT_Os:
case OPT_Ofast:
diff --git a/gcc/params.def b/gcc/params.def
index 04a13232695..2af66e7a0ce 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -1088,7 +1088,7 @@ DEFPARAM (PARAM_ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD,
"asan-instrumentation-with-call-threshold",
"Use callbacks instead of inline code if number of accesses "
"in function becomes greater or equal to this number",
- INT_MAX, 0, INT_MAX)
+ 7000, 0, INT_MAX)
DEFPARAM (PARAM_UNINIT_CONTROL_DEP_ATTEMPTS,
"uninit-control-dep-attempts",
diff --git a/gcc/predict.c b/gcc/predict.c
index 249433f9129..e535975036f 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -1923,6 +1923,8 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code,
if (predictor)
*predictor = PRED_COMPARE_AND_SWAP;
return boolean_true_node;
+ default:
+ break;
}
}
diff --git a/gcc/sanitizer.def b/gcc/sanitizer.def
index b1e6f0497d9..ae3352df899 100644
--- a/gcc/sanitizer.def
+++ b/gcc/sanitizer.def
@@ -27,7 +27,10 @@ along with GCC; see the file COPYING3. If not see
for other FEs by asan.c. */
/* Address Sanitizer */
-DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v3",
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init",
+ BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_VERSION_MISMATCH_CHECK,
+ "__asan_version_mismatch_check_v6",
BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
/* Do not reorder the BUILT_IN_ASAN_{REPORT,CHECK}* builtins, e.g. cfgcleanup.c
relies on this order. */
@@ -57,6 +60,44 @@ DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE16, "__asan_report_store16",
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE_N, "__asan_report_store_n",
BT_FN_VOID_PTR_PTRMODE,
ATTR_TMPURE_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD1_NOABORT,
+ "__asan_report_load1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD2_NOABORT,
+ "__asan_report_load2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD4_NOABORT,
+ "__asan_report_load4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD8_NOABORT,
+ "__asan_report_load8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD16_NOABORT,
+ "__asan_report_load16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_LOAD_N_NOABORT,
+ "__asan_report_load_n_noabort",
+ BT_FN_VOID_PTR_PTRMODE,
+ ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE1_NOABORT,
+ "__asan_report_store1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE2_NOABORT,
+ "__asan_report_store2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE4_NOABORT,
+ "__asan_report_store4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE8_NOABORT,
+ "__asan_report_store8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE16_NOABORT,
+ "__asan_report_store16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REPORT_STORE_N_NOABORT,
+ "__asan_report_store_n_noabort",
+ BT_FN_VOID_PTR_PTRMODE,
+ ATTR_TMPURE_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD1, "__asan_load1",
BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD2, "__asan_load2",
@@ -81,6 +122,30 @@ DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE16, "__asan_store16",
BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STOREN, "__asan_storeN",
BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD1_NOABORT, "__asan_load1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD2_NOABORT, "__asan_load2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD4_NOABORT, "__asan_load4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD8_NOABORT, "__asan_load8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOAD16_NOABORT, "__asan_load16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_LOADN_NOABORT, "__asan_loadN_noabort",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE1_NOABORT, "__asan_store1_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE2_NOABORT, "__asan_store2_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE4_NOABORT, "__asan_store4_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE8_NOABORT, "__asan_store8_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STORE16_NOABORT, "__asan_store16_noabort",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_STOREN_NOABORT, "__asan_storeN_noabort",
+ BT_FN_VOID_PTR_PTRMODE, ATTR_TMPURE_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_REGISTER_GLOBALS,
"__asan_register_globals",
BT_FN_VOID_PTR_PTRMODE, ATTR_NOTHROW_LEAF_LIST)
@@ -332,7 +397,7 @@ DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS,
DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_BUILTIN_UNREACHABLE,
"__ubsan_handle_builtin_unreachable",
BT_FN_VOID_PTR,
- ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+ ATTR_COLD_CONST_NORETURN_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_MISSING_RETURN,
"__ubsan_handle_missing_return",
BT_FN_VOID_PTR,
@@ -365,3 +430,71 @@ DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE,
"__ubsan_handle_load_invalid_value",
BT_FN_VOID_PTR_PTR,
ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_DIVREM_OVERFLOW_ABORT,
+ "__ubsan_handle_divrem_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SHIFT_OUT_OF_BOUNDS_ABORT,
+ "__ubsan_handle_shift_out_of_bounds_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_VLA_BOUND_NOT_POSITIVE_ABORT,
+ "__ubsan_handle_vla_bound_not_positive_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT,
+ "__ubsan_handle_type_mismatch_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW_ABORT,
+ "__ubsan_handle_add_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW_ABORT,
+ "__ubsan_handle_sub_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW_ABORT,
+ "__ubsan_handle_mul_overflow_abort",
+ BT_FN_VOID_PTR_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW_ABORT,
+ "__ubsan_handle_negate_overflow_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT,
+ "__ubsan_handle_load_invalid_value_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW,
+ "__ubsan_handle_float_cast_overflow",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW_ABORT,
+ "__ubsan_handle_float_cast_overflow_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS,
+ "__ubsan_handle_out_of_bounds",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS_ABORT,
+ "__ubsan_handle_out_of_bounds_abort",
+ BT_FN_VOID_PTR_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_ARG,
+ "__ubsan_handle_nonnull_arg",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_ARG_ABORT,
+ "__ubsan_handle_nonnull_arg_abort",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN,
+ "__ubsan_handle_nonnull_return",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NOTHROW_LEAF_LIST)
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT,
+ "__ubsan_handle_nonnull_return_abort",
+ BT_FN_VOID_PTR,
+ ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
diff --git a/gcc/sanopt.c b/gcc/sanopt.c
new file mode 100644
index 00000000000..5a35166d546
--- /dev/null
+++ b/gcc/sanopt.c
@@ -0,0 +1,634 @@
+/* Optimize and expand sanitizer functions.
+ Copyright (C) 2014 Free Software Foundation, Inc.
+ Contributed by Marek Polacek <polacek@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "hash-table.h"
+#include "predict.h"
+#include "vec.h"
+#include "hashtab.h"
+#include "hash-set.h"
+#include "tm.h"
+#include "hard-reg-set.h"
+#include "function.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "basic-block.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-expr.h"
+#include "is-a.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "gimple-iterator.h"
+#include "hash-map.h"
+#include "plugin-api.h"
+#include "tree-pass.h"
+#include "asan.h"
+#include "gimple-pretty-print.h"
+#include "tm_p.h"
+#include "langhooks.h"
+#include "ubsan.h"
+#include "params.h"
+#include "tree-ssa-operands.h"
+
+
+/* This is used to carry information about basic blocks. It is
+ attached to the AUX field of the standard CFG block. */
+
+struct sanopt_info
+{
+ /* True if this BB might call (directly or indirectly) free/munmap
+ or similar operation. */
+ bool has_freeing_call_p;
+
+ /* True if HAS_FREEING_CALL_P flag has been computed. */
+ bool has_freeing_call_computed_p;
+
+ /* True if there is a block with HAS_FREEING_CALL_P flag set
+ on any path between an immediate dominator of BB, denoted
+ imm(BB), and BB. */
+ bool imm_dom_path_with_freeing_call_p;
+
+ /* True if IMM_DOM_PATH_WITH_FREEING_CALL_P has been computed. */
+ bool imm_dom_path_with_freeing_call_computed_p;
+
+ /* Number of possibly freeing calls encountered in this bb
+ (so far). */
+ uint64_t freeing_call_events;
+
+ /* True if BB is currently being visited during computation
+ of IMM_DOM_PATH_WITH_FREEING_CALL_P flag. */
+ bool being_visited_p;
+
+ /* True if this BB has been visited in the dominator walk. */
+ bool visited_p;
+};
+
+/* This is used to carry various hash maps and variables used
+ in sanopt_optimize_walker. */
+
+struct sanopt_ctx
+{
+ /* This map maps a pointer (the first argument of UBSAN_NULL) to
+ a vector of UBSAN_NULL call statements that check this pointer. */
+ hash_map<tree, auto_vec<gimple> > null_check_map;
+
+ /* This map maps a pointer (the second argument of ASAN_CHECK) to
+ a vector of ASAN_CHECK call statements that check the access. */
+ hash_map<tree, auto_vec<gimple> > asan_check_map;
+
+ /* Number of IFN_ASAN_CHECK statements. */
+ int asan_num_accesses;
+};
+
+
+/* Return true if there might be any call to free/munmap operation
+ on any path in between DOM (which should be imm(BB)) and BB. */
+
+static bool
+imm_dom_path_with_freeing_call (basic_block bb, basic_block dom)
+{
+ sanopt_info *info = (sanopt_info *) bb->aux;
+ edge e;
+ edge_iterator ei;
+
+ if (info->imm_dom_path_with_freeing_call_computed_p)
+ return info->imm_dom_path_with_freeing_call_p;
+
+ info->being_visited_p = true;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ sanopt_info *pred_info = (sanopt_info *) e->src->aux;
+
+ if (e->src == dom)
+ continue;
+
+ if ((pred_info->imm_dom_path_with_freeing_call_computed_p
+ && pred_info->imm_dom_path_with_freeing_call_p)
+ || (pred_info->has_freeing_call_computed_p
+ && pred_info->has_freeing_call_p))
+ {
+ info->imm_dom_path_with_freeing_call_computed_p = true;
+ info->imm_dom_path_with_freeing_call_p = true;
+ info->being_visited_p = false;
+ return true;
+ }
+ }
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ sanopt_info *pred_info = (sanopt_info *) e->src->aux;
+
+ if (e->src == dom)
+ continue;
+
+ if (pred_info->has_freeing_call_computed_p)
+ continue;
+
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_bb (e->src); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+
+ if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
+ {
+ pred_info->has_freeing_call_p = true;
+ break;
+ }
+ }
+
+ pred_info->has_freeing_call_computed_p = true;
+ if (pred_info->has_freeing_call_p)
+ {
+ info->imm_dom_path_with_freeing_call_computed_p = true;
+ info->imm_dom_path_with_freeing_call_p = true;
+ info->being_visited_p = false;
+ return true;
+ }
+ }
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->src == dom)
+ continue;
+
+ basic_block src;
+ for (src = e->src; src != dom; )
+ {
+ sanopt_info *pred_info = (sanopt_info *) src->aux;
+ if (pred_info->being_visited_p)
+ break;
+ basic_block imm = get_immediate_dominator (CDI_DOMINATORS, src);
+ if (imm_dom_path_with_freeing_call (src, imm))
+ {
+ info->imm_dom_path_with_freeing_call_computed_p = true;
+ info->imm_dom_path_with_freeing_call_p = true;
+ info->being_visited_p = false;
+ return true;
+ }
+ src = imm;
+ }
+ }
+
+ info->imm_dom_path_with_freeing_call_computed_p = true;
+ info->imm_dom_path_with_freeing_call_p = false;
+ info->being_visited_p = false;
+ return false;
+}
+
+/* Optimize away redundant UBSAN_NULL calls. */
+
+static bool
+maybe_optimize_ubsan_null_ifn (struct sanopt_ctx *ctx, gimple stmt)
+{
+ gcc_assert (gimple_call_num_args (stmt) == 3);
+ tree ptr = gimple_call_arg (stmt, 0);
+ tree cur_align = gimple_call_arg (stmt, 2);
+ gcc_assert (TREE_CODE (cur_align) == INTEGER_CST);
+ bool remove = false;
+
+ auto_vec<gimple> &v = ctx->null_check_map.get_or_insert (ptr);
+ if (v.is_empty ())
+ {
+ /* For this PTR we don't have any UBSAN_NULL stmts recorded, so there's
+ nothing to optimize yet. */
+ v.safe_push (stmt);
+ return false;
+ }
+
+ /* We already have recorded a UBSAN_NULL check for this pointer. Perhaps we
+ can drop this one. But only if this check doesn't specify stricter
+ alignment. */
+ while (!v.is_empty ())
+ {
+ gimple g = v.last ();
+ /* Remove statements for BBs that have been already processed. */
+ sanopt_info *si = (sanopt_info *) gimple_bb (g)->aux;
+ if (si->visited_p)
+ {
+ v.pop ();
+ continue;
+ }
+
+ /* At this point we shouldn't have any statements that aren't dominating
+ the current BB. */
+ tree align = gimple_call_arg (g, 2);
+ int kind = tree_to_shwi (gimple_call_arg (g, 1));
+ /* If this is a NULL pointer check where we had segv anyway, we can
+ remove it. */
+ if (integer_zerop (align)
+ && (kind == UBSAN_LOAD_OF
+ || kind == UBSAN_STORE_OF
+ || kind == UBSAN_MEMBER_ACCESS))
+ remove = true;
+ /* Otherwise remove the check in non-recovering mode, or if the
+ stmts have same location. */
+ else if (integer_zerop (align))
+ remove = (flag_sanitize_recover & SANITIZE_NULL) == 0
+ || flag_sanitize_undefined_trap_on_error
+ || gimple_location (g) == gimple_location (stmt);
+ else if (tree_int_cst_le (cur_align, align))
+ remove = (flag_sanitize_recover & SANITIZE_ALIGNMENT) == 0
+ || flag_sanitize_undefined_trap_on_error
+ || gimple_location (g) == gimple_location (stmt);
+ if (!remove && gimple_bb (g) == gimple_bb (stmt)
+ && tree_int_cst_compare (cur_align, align) == 0)
+ v.pop ();
+ break;
+ }
+
+ if (!remove)
+ v.safe_push (stmt);
+ return remove;
+}
+
+/* Optimize away redundant ASAN_CHECK calls. */
+
+static bool
+maybe_optimize_asan_check_ifn (struct sanopt_ctx *ctx, gimple stmt)
+{
+ gcc_assert (gimple_call_num_args (stmt) == 4);
+ tree ptr = gimple_call_arg (stmt, 1);
+ tree len = gimple_call_arg (stmt, 2);
+ basic_block bb = gimple_bb (stmt);
+ sanopt_info *info = (sanopt_info *) bb->aux;
+
+ if (TREE_CODE (len) != INTEGER_CST)
+ return false;
+ if (integer_zerop (len))
+ return false;
+
+ gimple_set_uid (stmt, info->freeing_call_events);
+
+ auto_vec<gimple> &v = ctx->asan_check_map.get_or_insert (ptr);
+ if (v.is_empty ())
+ {
+ /* For this PTR we don't have any ASAN_CHECK stmts recorded, so there's
+ nothing to optimize yet. */
+ v.safe_push (stmt);
+ return false;
+ }
+
+ /* We already have recorded a ASAN_CHECK check for this pointer. Perhaps
+ we can drop this one. But only if this check doesn't specify larger
+ size. */
+ while (!v.is_empty ())
+ {
+ gimple g = v.last ();
+ /* Remove statements for BBs that have been already processed. */
+ sanopt_info *si = (sanopt_info *) gimple_bb (g)->aux;
+ if (si->visited_p)
+ v.pop ();
+ else
+ break;
+ }
+
+ unsigned int i;
+ gimple g;
+ gimple to_pop = NULL;
+ bool remove = false;
+ basic_block last_bb = bb;
+ bool cleanup = false;
+
+ FOR_EACH_VEC_ELT_REVERSE (v, i, g)
+ {
+ basic_block gbb = gimple_bb (g);
+ sanopt_info *si = (sanopt_info *) gbb->aux;
+ if (gimple_uid (g) < si->freeing_call_events)
+ {
+ /* If there is a potentially freeing call after g in gbb, we should
+ remove it from the vector, can't use in optimization. */
+ cleanup = true;
+ continue;
+ }
+
+ if (TREE_CODE (len) != INTEGER_CST)
+ {
+ /* If there is some stmts not followed by freeing call event
+ for ptr already in the current bb, no need to insert anything.
+ Non-constant len is treated just as length 1. */
+ if (gbb == bb)
+ return false;
+ break;
+ }
+
+ tree glen = gimple_call_arg (g, 2);
+ /* If we've checked only smaller length than we want to check now,
+ we can't remove the current stmt. If g is in the same basic block,
+ we want to remove it though, as the current stmt is better. */
+ if (tree_int_cst_lt (glen, len))
+ {
+ if (gbb == bb)
+ {
+ to_pop = g;
+ cleanup = true;
+ }
+ continue;
+ }
+
+ while (last_bb != gbb)
+ {
+ /* Paths from last_bb to bb have been checked before.
+ gbb is necessarily a dominator of last_bb, but not necessarily
+ immediate dominator. */
+ if (((sanopt_info *) last_bb->aux)->freeing_call_events)
+ break;
+
+ basic_block imm = get_immediate_dominator (CDI_DOMINATORS, last_bb);
+ gcc_assert (imm);
+ if (imm_dom_path_with_freeing_call (last_bb, imm))
+ break;
+
+ last_bb = imm;
+ }
+ if (last_bb == gbb)
+ remove = true;
+ break;
+ }
+
+ if (cleanup)
+ {
+ unsigned int j = 0, l = v.length ();
+ for (i = 0; i < l; i++)
+ if (v[i] != to_pop
+ && (gimple_uid (v[i])
+ == ((sanopt_info *)
+ gimple_bb (v[i])->aux)->freeing_call_events))
+ {
+ if (i != j)
+ v[j] = v[i];
+ j++;
+ }
+ v.truncate (j);
+ }
+
+ if (!remove)
+ v.safe_push (stmt);
+ return remove;
+}
+
+/* Try to optimize away redundant UBSAN_NULL and ASAN_CHECK calls.
+
+ We walk blocks in the CFG via a depth first search of the dominator
+ tree; we push unique UBSAN_NULL or ASAN_CHECK statements into a vector
+ in the NULL_CHECK_MAP or ASAN_CHECK_MAP hash maps as we enter the
+ blocks. When leaving a block, we mark the block as visited; then
+ when checking the statements in the vector, we ignore statements that
+ are coming from already visited blocks, because these cannot dominate
+ anything anymore. CTX is a sanopt context. */
+
+static void
+sanopt_optimize_walker (basic_block bb, struct sanopt_ctx *ctx)
+{
+ basic_block son;
+ gimple_stmt_iterator gsi;
+ sanopt_info *info = (sanopt_info *) bb->aux;
+ bool asan_check_optimize
+ = (flag_sanitize & SANITIZE_ADDRESS)
+ && ((flag_sanitize & flag_sanitize_recover
+ & SANITIZE_KERNEL_ADDRESS) == 0);
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
+ {
+ gimple stmt = gsi_stmt (gsi);
+ bool remove = false;
+
+ if (!is_gimple_call (stmt))
+ {
+ /* Handle asm volatile or asm with "memory" clobber
+ the same as potentionally freeing call. */
+ if (gimple_code (stmt) == GIMPLE_ASM
+ && asan_check_optimize
+ && (gimple_asm_clobbers_memory_p (stmt)
+ || gimple_asm_volatile_p (stmt)))
+ info->freeing_call_events++;
+ gsi_next (&gsi);
+ continue;
+ }
+
+ if (asan_check_optimize && !nonfreeing_call_p (stmt))
+ info->freeing_call_events++;
+
+ if (gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_UBSAN_NULL:
+ remove = maybe_optimize_ubsan_null_ifn (ctx, stmt);
+ break;
+ case IFN_ASAN_CHECK:
+ if (asan_check_optimize)
+ remove = maybe_optimize_asan_check_ifn (ctx, stmt);
+ if (!remove)
+ ctx->asan_num_accesses++;
+ break;
+ default:
+ break;
+ }
+
+ if (remove)
+ {
+ /* Drop this check. */
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Optimizing out\n ");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ fprintf (dump_file, "\n");
+ }
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi, true);
+ }
+ else
+ gsi_next (&gsi);
+ }
+
+ if (asan_check_optimize)
+ {
+ info->has_freeing_call_p = info->freeing_call_events != 0;
+ info->has_freeing_call_computed_p = true;
+ }
+
+ for (son = first_dom_son (CDI_DOMINATORS, bb);
+ son;
+ son = next_dom_son (CDI_DOMINATORS, son))
+ sanopt_optimize_walker (son, ctx);
+
+ /* We're leaving this BB, so mark it to that effect. */
+ info->visited_p = true;
+}
+
+/* Try to remove redundant sanitizer checks in function FUN. */
+
+static int
+sanopt_optimize (function *fun)
+{
+ struct sanopt_ctx ctx;
+ ctx.asan_num_accesses = 0;
+
+ /* Set up block info for each basic block. */
+ alloc_aux_for_blocks (sizeof (sanopt_info));
+
+ /* We're going to do a dominator walk, so ensure that we have
+ dominance information. */
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ /* Recursively walk the dominator tree optimizing away
+ redundant checks. */
+ sanopt_optimize_walker (ENTRY_BLOCK_PTR_FOR_FN (cfun), &ctx);
+
+ free_aux_for_blocks ();
+
+ return ctx.asan_num_accesses;
+}
+
+/* Perform optimization of sanitize functions. */
+
+namespace {
+
+const pass_data pass_data_sanopt =
+{
+ GIMPLE_PASS, /* type */
+ "sanopt", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ true, /* has_gate */
+ true, /* has_execute */
+ TV_NONE, /* tv_id */
+ ( PROP_ssa | PROP_cfg | PROP_gimple_leh ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa, /* todo_flags_finish */
+};
+
+class pass_sanopt : public gimple_opt_pass
+{
+public:
+ pass_sanopt (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_sanopt, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual bool gate () { return flag_sanitize; }
+ virtual unsigned int execute ();
+
+}; // class pass_sanopt
+
+unsigned int
+pass_sanopt::execute ()
+{
+ basic_block bb;
+ int asan_num_accesses = 0;
+
+ /* Try to remove redundant checks. */
+ if (optimize
+ && (flag_sanitize
+ & (SANITIZE_NULL | SANITIZE_ALIGNMENT | SANITIZE_ADDRESS)))
+ asan_num_accesses = sanopt_optimize (fun);
+ else if (flag_sanitize & SANITIZE_ADDRESS)
+ {
+ gimple_stmt_iterator gsi;
+ FOR_EACH_BB_FN (bb, cfun)
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_call (stmt) && gimple_call_internal_p (stmt)
+ && gimple_call_internal_fn (stmt) == IFN_ASAN_CHECK)
+ ++asan_num_accesses;
+ }
+ }
+
+ bool use_calls = ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD < INT_MAX
+ && asan_num_accesses >= ASAN_INSTRUMENTATION_WITH_CALL_THRESHOLD;
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
+ {
+ gimple stmt = gsi_stmt (gsi);
+ bool no_next = false;
+
+ if (!is_gimple_call (stmt))
+ {
+ gsi_next (&gsi);
+ continue;
+ }
+
+ if (gimple_call_internal_p (stmt))
+ {
+ enum internal_fn ifn = gimple_call_internal_fn (stmt);
+ switch (ifn)
+ {
+ case IFN_UBSAN_NULL:
+ no_next = ubsan_expand_null_ifn (&gsi);
+ break;
+ case IFN_UBSAN_BOUNDS:
+ no_next = ubsan_expand_bounds_ifn (&gsi);
+ break;
+ case IFN_UBSAN_OBJECT_SIZE:
+ no_next = ubsan_expand_objsize_ifn (&gsi);
+ break;
+ case IFN_ASAN_CHECK:
+ no_next = asan_expand_check_ifn (&gsi, use_calls);
+ break;
+ default:
+ break;
+ }
+ }
+ else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
+ {
+ tree callee = gimple_call_fndecl (stmt);
+ switch (DECL_FUNCTION_CODE (callee))
+ {
+ case BUILT_IN_UNREACHABLE:
+ if (flag_sanitize & SANITIZE_UNREACHABLE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES (cfun->decl)))
+ no_next = ubsan_instrument_unreachable (&gsi);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Expanded\n ");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ fprintf (dump_file, "\n");
+ }
+
+ if (!no_next)
+ gsi_next (&gsi);
+ }
+ }
+ return 0;
+}
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_sanopt (gcc::context *ctxt)
+{
+ return new pass_sanopt (ctxt);
+}
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index 9dc10d454fd..bb5f5803c36 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2312,6 +2312,30 @@ layout_type (tree type)
gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
}
+/* Return the least alignment required for type TYPE. */
+
+unsigned int
+min_align_of_type (tree type)
+{
+ unsigned int align = TYPE_ALIGN (type);
+ if (!TYPE_USER_ALIGN (type))
+ {
+ align = MIN (align, BIGGEST_ALIGNMENT);
+#ifdef BIGGEST_FIELD_ALIGNMENT
+ align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
+#endif
+ unsigned int field_align = align;
+#ifdef ADJUST_FIELD_ALIGN
+ tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
+ type);
+ field_align = ADJUST_FIELD_ALIGN (field, field_align);
+ ggc_free (field);
+#endif
+ align = MIN (align, field_align);
+ }
+ return align / BITS_PER_UNIT;
+}
+
/* Vector types need to re-check the target flags each time we report
the machine mode. We need to do this because attribute target can
change the result of vector_mode_supported_p and have_regs_of_mode
diff --git a/gcc/stor-layout.h b/gcc/stor-layout.h
index 74a2c6c4412..e2d6b104e43 100644
--- a/gcc/stor-layout.h
+++ b/gcc/stor-layout.h
@@ -59,6 +59,9 @@ extern void layout_decl (tree, unsigned);
node, does nothing except for the first time. */
extern void layout_type (tree);
+/* Return the least alignment in bytes required for type TYPE. */
+extern unsigned int min_align_of_type (tree);
+
/* Construct various nodes representing fract or accum data types. */
extern tree make_fract_type (int, int, int);
extern tree make_accum_type (int, int, int);
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index b6739983827..88e7ab919f8 100755
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,808 @@
+2016-04-23 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/70712
+ * c-c++-common/asan/pr70712.c: New test.
+
+2016-04-08 Maxim Ostapenko <m.ostapenko@samsung.com>
+
+ PR sanitizer/70541
+ * c-c++-common/asan/pr70541.c: New test.
+
+2016-02-04 Martin Liska <mliska@suse.cz>
+
+ * g++.dg/asan/pr69276.C: New test.
+
+2016-01-06 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/69099
+ * gcc.dg/ubsan/float-cast-overflow-atomic.c: New test.
+
+2016-01-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/69055
+ * gfortran.dg/pr69055.f90: New test.
+
+2015-12-12 Paolo Bonzini <bonzini@gnu.org>
+
+ PR sanitizer/68418
+ * gcc.dg/ubsan/c99-wrapv-shift-1.c,
+ gcc.dg/ubsan/c99-wrapv-shift-2.c: New testcases.
+
+2015-10-29 Richard Biener <rguenther@suse.de>
+
+ PR middle-end/56956
+ * c-c++-common/ubsan/pr56956.c: New testcase.
+
+2015-09-25 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/bounds-11.c: New test.
+ * c-c++-common/ubsan/bounds-12.c: New test.
+
+2015-09-25 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/64906
+ * c-c++-common/ubsan/pr64906.c: New test.
+
+2015-09-17 Bernd Edlinger <bernd.edlinger@hotmail.de>
+
+ PR sanitizer/64078
+ * c-c++-common/ubsan/object-size-9.c (s): Add alignment attribute.
+ (f2, f3): Make the function static.
+ * c-c++-common/ubsan/object-size-10.c (a, b): Add alignment attribute.
+
+2015-09-04 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/67279
+ * gcc.dg/ubsan/pr67279.c: New test.
+
+2015-07-31 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/66977
+ * g++.dg/ubsan/pr66977.C: New test.
+
+2015-07-23 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/66908
+ * c-c++-common/ubsan/pr66908.c: New test.
+
+2013-08-07 Ilya Enkovich <ilya.enkovich@intel.com>
+
+ * gcc.target/i386/struct-size.c: New.
+
+2015-11-23 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/asan/halt_on_error-1.c: New test.
+ * c-c++-common/asan/halt_on_error-2.c: Likewise.
+
+2015-10-23 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR sanitizer/68042
+ * c-c++-common/asan/memcmp-1.c: Adjust test to pass on Darwin.
+ * c-c++-common/asan/sanity-check-pure-c-1.c: Likewise.
+
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/ubsan/float-cast-overflow-10.c: Adjust test.
+ * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise.
+ * g++.dg/asan/default-options-1.C: Likewise.
+
+2015-06-08 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/66452
+ * g++.dg/ubsan/pr66452.C: New test.
+
+2015-05-07 Marek Polacek <polacek@redhat.com>
+ Martin Uecker <uecker@eecs.berkeley.edu>
+
+ * c-c++-common/ubsan/bounds-10.c: New test.
+
+2015-04-09 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/65709
+ * c-c++-common/ubsan/align-9.c: New test.
+
+2013-04-09 Paul Thomas <pault@gcc.gnu.org>
+
+2015-03-27 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/65583
+ * g++.dg/ubsan/pr65583.C: New test.
+
+2015-03-10 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/65367
+ * c-c++-common/ubsan/pr65367.c: New test.
+
+2015-03-07 Marek Polacek <polacek@redhat.com>
+ Martin Uecker <uecker@eecs.berkeley.edu>
+
+ PR sanitizer/65280
+ * c-c++-common/ubsan/bounds-1.c: Add testing of flexible array
+ member-like arrays.
+ * c-c++-common/ubsan/bounds-8.c: New test.
+ * c-c++-common/ubsan/bounds-9.c: New test.
+ * gcc.dg/ubsan/bounds-2.c: New test.
+
+2015-03-05 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/bounds-6.c: Use dg-do run.
+
+2015-02-20 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/63888
+ * c-c++-common/asan/pr63888.c: New test.
+
+2015-02-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/65000
+ * g++.dg/ubsan/pr65000.C: New test.
+
+2015-02-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64984
+ * g++.dg/ubsan/pr64984.C: New test.
+
+2015-02-12 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/65019
+ * g++.dg/ubsan/pr65019.C: New test.
+
+2015-02-11 Marek Polacek <polacek@redhat.com>
+
+ * g++.dg/ubsan/shift-1.C: New test.
+ * gcc.dg/ubsan/c-shift-2.c: New test.
+ * c-c++-common/ubsan/shift-5.c: Remove file.
+
+2015-01-20 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64632
+ * g++.dg/ubsan/pr64632.C: New test.
+
+2015-01-06 Jakub Jelinek <jakub@redhat.com>
+
+ * c-c++-common/asan/sanitize-all-1.c: New test.
+ * c-c++-common/ubsan/sanitize-all-1.c: New test.
+ * c-c++-common/ubsan/sanitize-all-2.c: New test.
+ * c-c++-common/ubsan/sanitize-all-3.c: New test.
+ * c-c++-common/ubsan/sanitize-all-4.c: New test.
+
+2015-01-05 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64344
+ * c-c++-common/ubsan/pr64344-1.c: New test.
+ * c-c++-common/ubsan/pr64344-2.c: New test.
+
+2014-12-27 Segher Boessenkool <segher@kernel.crashing.org>
+
+ * lib/ubsan-dg.exp (check_effective_target_fsanitize_undefined):
+ Check if testcases run without errors, not just if they compile.
+
+2014-12-22 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ * lib/ubsan-dg.exp: Add library path for libstdc++.
+
+2014-12-17 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64289
+ * c-c++-common/ubsan/pr64289.c: New test.
+
+2014-12-04 Marek Polacek <polacek@redhat.com>
+
+ PR middle-end/56917
+ * c-c++-common/ubsan/pr56917.c: New test.
+
+2014-12-02 Marek Polacek <polacek@redhat.com>
+
+ * g++.dg/cpp0x/constexpr-shift1.C: New test.
+ * g++.dg/cpp1y/constexpr-shift1.C: New test.
+ * g++.dg/ubsan/pr63956.C: Add dg-errors.
+
+2014-12-01 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/64121
+ * c-c++-common/ubsan/pr64121.c: New test.
+
+2014-12-01 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63956
+ * c-c++-common/ubsan/shift-5.c: Add xfails.
+ * g++.dg/ubsan/div-by-zero-1.C: Don't use -w. Add xfail.
+ * g++.dg/ubsan/pr63956.C: New test.
+
+2015-04-22 Yury Gribov <y.gribov@samsung.com>
+
+ * c-c++-common/asan/user-section-1.c: New test.
+ * c-c++-common/asan/user-section-2.c: New test.
+ * c-c++-common/asan/user-section-3.c: New test.
+
+2015-05-07 Marek Polacek <polacek@redhat.com>
+
+ PR testsuite/66046
+ * c-c++-common/ubsan/align-6.c: Don't match trailing newlines in the
+ last dg-output.
+ * c-c++-common/ubsan/align-7.c: Likewise.
+ * c-c++-common/ubsan/bounds-8.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise.
+ * c-c++-common/ubsan/load-bool-enum.c: Likewise.
+ * c-c++-common/ubsan/null-1.c: Likewise.
+ * c-c++-common/ubsan/null-10.c: Likewise.
+ * c-c++-common/ubsan/null-11.c: Likewise.
+ * c-c++-common/ubsan/null-2.c: Likewise.
+ * c-c++-common/ubsan/null-3.c: Likewise.
+ * c-c++-common/ubsan/null-4.c: Likewise.
+ * c-c++-common/ubsan/null-5.c: Likewise.
+ * c-c++-common/ubsan/null-6.c: Likewise.
+ * c-c++-common/ubsan/null-7.c: Likewise.
+ * c-c++-common/ubsan/null-8.c: Likewise.
+ * c-c++-common/ubsan/null-9.c: Likewise.
+ * c-c++-common/ubsan/object-size-1.c: Likewise.
+ * c-c++-common/ubsan/object-size-10.c: Likewise.
+ * c-c++-common/ubsan/object-size-4.c: Likewise.
+ * c-c++-common/ubsan/object-size-5.c: Likewise.
+ * c-c++-common/ubsan/object-size-7.c: Likewise.
+ * c-c++-common/ubsan/object-size-8.c: Likewise.
+ * c-c++-common/ubsan/object-size-9.c: Likewise.
+ * c-c++-common/ubsan/overflow-add-2.c: Likewise.
+ * c-c++-common/ubsan/overflow-int128.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-2.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-4.c: Likewise.
+ * c-c++-common/ubsan/overflow-negate-1.c: Likewise.
+ * c-c++-common/ubsan/overflow-negate-3.c: Likewise.
+ * c-c++-common/ubsan/overflow-sub-2.c: Likewise.
+ * c-c++-common/ubsan/overflow-sub-4.c: Likewise.
+ * c-c++-common/ubsan/pr59333.c: Likewise.
+ * c-c++-common/ubsan/pr59667.c: Likewise.
+ * c-c++-common/ubsan/pr60613-2.c: Likewise.
+ * c-c++-common/ubsan/pr60636.c: Likewise.
+ * c-c++-common/ubsan/pr63802.c: Likewise.
+ * c-c++-common/ubsan/recovery-1.c: Likewise.
+ * c-c++-common/ubsan/recovery-3.c: Likewise.
+ * c-c++-common/ubsan/shift-1.c: Likewise.
+ * c-c++-common/ubsan/shift-2.c: Likewise.
+ * c-c++-common/ubsan/shift-4.c: Likewise.
+ * c-c++-common/ubsan/shift-7.c: Likewise.
+ * c-c++-common/ubsan/undefined-2.c: Likewise.
+ * c-c++-common/ubsan/vla-1.c: Likewise.
+ * g++.dg/ubsan/null-1.C: Likewise.
+ * g++.dg/ubsan/null-3.C: Likewise.
+ * g++.dg/ubsan/null-4.C: Likewise.
+ * g++.dg/ubsan/vptr-8.C: Likewise.
+ * g++.dg/ubsan/vptr-9.C: Likewise.
+ * gcc.dg/ubsan/bounds-2.c: Likewise.
+ * gcc.dg/ubsan/object-size-9.c: Likewise.
+
+2015-04-17 Yury Gribov <y.gribov@samsung.com>
+
+ * c-c++-common/asan/user-section-1.c: New test.
+
+2015-04-01 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR target/65624
+ * gcc.target/aarch64/pr65624.c: New test.
+
+2015-03-16 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR sanitizer/64820
+ * c-c++-common/asan/pr64820.c: New test.
+
+2014-11-28 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/shift-8.c: New test.
+
+2014-11-28 Marek Polacek <polacek@redhat.com>
+
+ PR c/63862
+ * gcc.c-torture/execute/shiftopt-1.c: Don't XFAIL anymore.
+ * c-c++-common/ubsan/shift-7.c: New test.
+
+2014-11-26 Jakub Jelinek <jakub@redhat.com>
+
+ * lib/tsan-dg.exp (check_effective_target_fsanitize_thread,
+ tsan_init): Don't use -fPIE or -pie.
+
+2014-11-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ PR sanitizer/63939
+ * c-c++-common/asan/global-overflow-1.c: Allow extra spaces after
+ stack pointer address, to fit darwin output when symbolizer is not
+ present.
+ * c-c++-common/asan/heap-overflow-1.c: Likewise.
+ * c-c++-common/asan/memcmp-1.c: Likewise.
+ * c-c++-common/asan/misalign-1.c: Likewise.
+ * c-c++-common/asan/misalign-2.c: Likewise.
+ * c-c++-common/asan/null-deref-1.c: Likewise.
+ * c-c++-common/asan/stack-overflow-1.c: Likewise.
+ * c-c++-common/asan/strlen-overflow-1.c: Likewise.
+ * c-c++-common/asan/strncpy-overflow-1.c: Likewise.
+ * c-c++-common/asan/use-after-free-1.c: Likewise.
+ * g++.dg/asan/deep-stack-uaf-1.C: Likewise.
+ * g++.dg/asan/deep-tail-call-1.C: Likewise.
+ * g++.dg/asan/large-func-test-1.C: Likewise.
+
+2014-11-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ PR sanitizer/63939
+ * g++.dg/asan/large-func-test-1.C: Ajust dg-output.
+
+2014-11-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ PR sanitizer/63939
+ * c-c++-common/asan/heap-overflow-1.c: Ajust dg-output.
+ * c-c++-common/asan/memcmp-1.c: Likewise.
+ * c-c++-common/asan/strncpy-overflow-1.c: Likewise.
+ * c-c++-common/asan/use-after-free-1.c: Likewise.
+
+2014-11-19 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63690
+ * gcc.dg/ubsan/pr63690.c: New test.
+
+2014-11-19 Francois-Xavier Coudert <fxcoudert@gcc.gnu.org>
+
+ PR sanitizer/62132
+ * c-c++-common/asan/misalign-1.c: Pass -fno-omit-frame-pointer on
+ darwin, adjust dg-output.
+ * c-c++-common/asan/misalign-2.c: Likewise.
+
+2014-11-19 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63879
+ * c-c++-common/ubsan/pr63879-1.c: New test.
+ * c-c++-common/ubsan/pr63879-2.c: New test.
+
+2014-11-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63520
+ * c-c++-common/ubsan/pr63520.c: New test.
+
+2014-11-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63913
+ * g++.dg/ubsan/pr63913.C: New test.
+
+2014-11-18 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63813
+ * g++.dg/ubsan/pr63813.C: New test.
+
+2014-11-18 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63866
+ * c-c++-common/ubsan/pr63866.c: New test.
+
+2014-11-18 Marat Zakirov <m.zakirov@samsung.com>
+
+ * c-c++-common/asan/aggressive-opts.c: New test.
+
+2014-11-18 Yury Gribov <y.gribov@samsung.com>
+
+ PR sanitizer/63802
+ * c-c++-common/ubsan/pr63802.c: New test.
+
+2014-11-14 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/overflow-negate-3.c: New test.
+
+2014-11-14 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/63839
+ * c-c++-common/ubsan/pr63839.c: New test.
+ * c-c++-common/ubsan/unreachable-2.c: New test.
+
+2014-11-12 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/overflow-sub-4.c: New test.
+ * c-c++-common/ubsan/overflow-sub-2.c: Adjust dg-output.
+ * c-c++-common/ubsan/overflow-int128.c: Likewise.
+
+2014-11-12 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR testsuite/63830
+ * c-c++-common/asan/strlen-overflow-1.c (main): Avoid tail call.
+
+2014-11-10 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/align-7.c: Skip for -flto -fno-fat-lto-objects.
+ * c-c++-common/ubsan/align-8.c: Likewise.
+ * g++.dg/ubsan/null-4.C: Likewise.
+ * g++.dg/ubsan/null-5.C: Likewise.
+
+2014-11-06 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/align-2.c: Add dg-output.
+ * c-c++-common/ubsan/align-4.c: Likewise.
+ * c-c++-common/ubsan/align-6.c: New test.
+ * c-c++-common/ubsan/align-7.c: New test.
+ * c-c++-common/ubsan/align-8.c: New test.
+ * g++.dg/ubsan/null-1.C: Add dg-output.
+ * g++.dg/ubsan/null-2.C: Likewise.
+ * g++.dg/ubsan/null-3.C: New test.
+ * g++.dg/ubsan/null-4.C: New test.
+ * g++.dg/ubsan/null-5.C: New test.
+
+2014-11-04 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/align-2.c: Remove dg-output.
+ * c-c++-common/ubsan/align-4.c: Likewise.
+ * g++.dg/ubsan/null-1.C: Likewise.
+ * g++.dg/ubsan/null-2.C: Likewise.
+
+2014-11-04 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/object-size-10.c: New test.
+
+2014-10-31 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63697
+ * c-c++-common/ubsan/overflow-sub-3.c: New test.
+
+2014-10-28 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/asan/no-redundant-instrumentation-1.c: Updated test.
+ * c-c++-common/asan/no-redundant-instrumentation-4.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-5.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-6.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-7.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-8.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-2.c: Removed.
+ * c-c++-common/asan/no-redundant-instrumentation-9.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-10.c: New test.
+ * c-c++-common/asan/no-redundant-instrumentation-11.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-12.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-13.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-14.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-15.c: Likewise.
+ * c-c++-common/asan/pr63638.c: Likewise.
+
+2014-10-28 Yury Gribov <y.gribov@samsung.com>
+
+ * c-c++-common/asan/kasan-recover-1.c: New test.
+ * c-c++-common/asan/kasan-recover-2.c: New test.
+ * c-c++-common/asan/instrument-with-calls-1.c: Get rid of -save-temps.
+ * c-c++-common/asan/instrument-with-calls-2.c: Likewise.
+ * c-c++-common/asan/instrument-with-calls-3.c: Likewise.
+ * c-c++-common/asan/kasan-recover-1.c: Likewise.
+ * c-c++-common/asan/kasan-recover-2.c: Likewise.
+ * c-c++-common/asan/no-asan-globals.c: Likewise.
+ * c-c++-common/asan/no-instrument-reads.c: Likewise.
+ * c-c++-common/asan/no-instrument-writes.c: Likewise.
+ * c-c++-common/asan/no-use-after-return.c: Likewise.
+
+2014-10-28 Yury Gribov <y.gribov@samsung.com>
+
+ * c-c++-common/asan/shadow-offset-1.c: New test.
+
+2014-10-23 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/undefined-2.c: New test.
+
+2014-10-22 Jakub Jelinek <jakub@redhat.com>
+ Yury Gribov <y.gribov@samsung.com>
+
+ * c-c++-common/ubsan/align-1.c: Update cmdline options.
+ * c-c++-common/ubsan/align-3.c: Likewise.
+ * c-c++-common/ubsan/bounds-1.c: Likewise.
+ * c-c++-common/ubsan/div-by-zero-7.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-10.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-7.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise.
+ * c-c++-common/ubsan/nonnull-2.c: Likewise.
+ * c-c++-common/ubsan/nonnull-3.c: Likewise.
+ * c-c++-common/ubsan/object-size-3.c: Likewise.
+ * c-c++-common/ubsan/overflow-1.c: Likewise.
+ * c-c++-common/ubsan/overflow-add-1.c: Likewise.
+ * c-c++-common/ubsan/overflow-add-3.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-1.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-3.c: Likewise.
+ * c-c++-common/ubsan/overflow-negate-2.c: Likewise.
+ * c-c++-common/ubsan/overflow-sub-1.c: Likewise.
+ * c-c++-common/ubsan/pr59503.c: Likewise.
+ * c-c++-common/ubsan/pr60613-1.c: Likewise.
+ * c-c++-common/ubsan/save-expr-1.c: Likewise.
+ * c-c++-common/ubsan/shift-3.c: Likewise.
+ * c-c++-common/ubsan/shift-6.c: Likewise.
+ * c-c++-common/ubsan/undefined-1.c: Likewise.
+ * c-c++-common/ubsan/vla-2.c: Likewise.
+ * c-c++-common/ubsan/vla-3.c: Likewise.
+ * c-c++-common/ubsan/vla-4.c: Likewise.
+ * g++.dg/ubsan/cxx11-shift-1.C: Likewise.
+ * g++.dg/ubsan/return-2.C: Likewise.
+ * c-c++-common/ubsan/recovery-1.c: New test.
+ * c-c++-common/ubsan/recovery-2.c: New test.
+ * c-c++-common/ubsan/recovery-3.c: New test.
+ * c-c++-common/ubsan/recovery-common.inc: New file.
+
+2014-09-19 Marat Zakirov <m.zakirov@samsung.com>
+
+ * c-c++-common/asan/bitfield-5.c: New test.
+
+2014-10-10 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/object-size-1.c: New test.
+ * c-c++-common/ubsan/object-size-2.c: New test.
+ * c-c++-common/ubsan/object-size-3.c: New test.
+ * c-c++-common/ubsan/object-size-4.c: New test.
+ * c-c++-common/ubsan/object-size-5.c: New test.
+ * c-c++-common/ubsan/object-size-6.c: New test.
+ * c-c++-common/ubsan/object-size-7.c: New test.
+ * c-c++-common/ubsan/object-size-8.c: New test.
+ * c-c++-common/ubsan/object-size-9.c: New test.
+ * g++.dg/ubsan/object-size-1.C: New test.
+ * gcc.dg/ubsan/object-size-9.c: New test.
+
+2014-09-24 Marek Polacek <polacek@redhat.com>
+
+ PR c/61405
+ PR c/53874
+ * c-c++-common/pr53874.c: New test.
+ * c-c++-common/pr61405.c: New test.
+
+2014-09-24 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/63316
+ * c-c++-common/asan/pr63316.c: New test.
+
+2014-09-19 Marat Zakirov <m.zakirov@samsung.com>
+
+ * c-c++-common/asan/red-align-1.c: New test.
+ * c-c++-common/asan/red-align-2.c: New test.
+
+2014-09-10 Jakub Jelinek <jakub@redhat.com>
+
+ * c-c++-common/ubsan/attrib-3.c: New test.
+ * c-c++-common/ubsan/nonnull-1.c: New test.
+ * c-c++-common/ubsan/nonnull-2.c: New test.
+ * c-c++-common/ubsan/nonnull-3.c: New test.
+ * c-c++-common/ubsan/nonnull-4.c: New test.
+ * c-c++-common/ubsan/nonnull-5.c: New test.
+
+2014-08-28 Yury Gribov <y.gribov@samsung.com>
+
+ * c-c++-common/asan/pr62089.c: Fix test on 32-bit platforms.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * c-c++-common/ubsan/align-1.c: New test.
+ * c-c++-common/ubsan/align-2.c: New test.
+ * c-c++-common/ubsan/align-3.c: New test.
+ * c-c++-common/ubsan/align-4.c: New test.
+ * c-c++-common/ubsan/align-5.c: New test.
+ * c-c++-common/ubsan/attrib-4.c: New test.
+ * g++.dg/ubsan/align-1.C: New test.
+ * g++.dg/ubsan/align-2.C: New test.
+ * g++.dg/ubsan/align-3.C: New test.
+ * g++.dg/ubsan/attrib-1.C: New test.
+ * g++.dg/ubsan/null-1.C: New test.
+ * g++.dg/ubsan/null-2.C: New test.
+
+2014-08-11 Yury Gribov <y.gribov@samsung.com>
+
+ * c-c++-common/asan/inc.c: Update test.
+ * c-c++-common/asan/instrument-with-calls-2.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-1.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-2.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-3.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-4.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-5.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-6.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-7.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-8.c: Likewise.
+ * c-c++-common/asan/no-redundant-instrumentation-9.c: Likewise.
+
+2014-08-01 Jakub Jelinek <jakub@redhat.com>
+
+ * c-c++-common/ubsan/align-1.c: New test.
+ * c-c++-common/ubsan/align-2.c: New test.
+ * c-c++-common/ubsan/align-3.c: New test.
+ * c-c++-common/ubsan/align-4.c: New test.
+ * c-c++-common/ubsan/align-5.c: New test.
+ * c-c++-common/ubsan/attrib-4.c: New test.
+ * g++.dg/ubsan/align-1.C: New test.
+ * g++.dg/ubsan/align-2.C: New test.
+ * g++.dg/ubsan/align-3.C: New test.
+ * g++.dg/ubsan/attrib-1.C: New test.
+ * g++.dg/ubsan/null-1.C: New test.
+ * g++.dg/ubsan/null-2.C: New test.
+
+2014-07-23 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/bounds-2.c (fn4): Adjust to check the array size
+ in the structure.
+
+2014-07-17 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/ubsan/bounds-2.c: Change output pattern.
+ * c-c++-common/ubsan/bounds-5.c: Likewise.
+ * c-c++-common/ubsan/bounds-6.c: Likewise.
+ * c-c++-common/ubsan/bounds-7.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-1.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-10.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-2.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-3.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-4.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-5.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-6.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise.
+ * gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise.
+ * g++.dg/ubsan/float-cast-overflow-bf.C: Likewise.
+
+2014-07-15 Marek Polacek <polacek@redhat.com>
+
+ * gcc.dg/ubsan/bounds-1.c: New test.
+
+2014-06-30 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/attrib-2.c: New test.
+ * g++.dg/ubsan/return-3.C: New test.
+
+2014-06-27 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/bounds-2.c: Adjust dg-output.
+ (fn1): Remove store to out-of-bounds location. Add memory barrier.
+ (fn2): Likewise.
+ (fn5): Likewise.
+ (fn6): Likewise.
+ (fn7): Likewise.
+ (fn8): Likewise.
+ (fn9): Likewise.
+ (fn11): Likewise.
+ * c-c++-common/ubsan/bounds-5.c (fn1): Remove store to out-of-bounds
+ location. Add memory barrier.
+ (fn2): Likewise.
+ (fn3): Likewise.
+ (fn4): Likewise.
+ (fn5): Likewise.
+ * c-c++-common/ubsan/bounds-7.c: New test.
+
+2014-06-20 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/bounds-1.c: New test.
+ * c-c++-common/ubsan/bounds-2.c: New test.
+ * c-c++-common/ubsan/bounds-3.c: New test.
+ * c-c++-common/ubsan/bounds-4.c: New test.
+ * c-c++-common/ubsan/bounds-5.c: New test.
+ * c-c++-common/ubsan/bounds-6.c: New test.
+
+2014-05-27 Marek Polacek <polacek@redhat.com>
+
+ PR testsuite/61319
+ * c-c++-common/ubsan/float-cast.h: Conditionally define LLONG_MAX,
+ LLONG_MIN, and ULLONG_MAX.
+
+2014-05-27 Dominique d'Humieres <dominiq@lps.ens.fr>
+
+ PR testsuite/61319
+ * c-c++-common/ubsan/float-cast-overflow-1.c: Make the sign of
+ -nan optional.
+ * c-c++-common/ubsan/float-cast-overflow-2.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-4.c: Likewise.
+
+2014-05-23 Marek Polacek <polacek@redhat.com>
+ Jakub Jelinek <jakub@redhat.com>
+
+ * c-c++-common/ubsan/float-cast-overflow-1.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-10.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-2.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-3.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-4.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-5.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-6.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-7.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-7.h: New file.
+ * c-c++-common/ubsan/float-cast-overflow-8.c: New test.
+ * c-c++-common/ubsan/float-cast-overflow-9.c: New test.
+ * c-c++-common/ubsan/float-cast.h: New file.
+ * g++.dg/ubsan/float-cast-overflow-bf.C: New test.
+ * gcc.dg/ubsan/float-cast-overflow-bf.c: New test.
+
+2014-04-30 Marek Polacek <polacek@redhat.com>
+
+ * c-c++-common/ubsan/div-by-zero-5.c: Fix formatting.
+ * c-c++-common/ubsan/float-div-by-zero-1.c: New test.
+
+2014-04-22 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/60275
+ * g++.dg/ubsan/return-2.C: Revert 2014-03-24 changes, add
+ -fno-sanitize-recover to dg-options.
+ * g++.dg/ubsan/cxx11-shift-1.C: Remove c++11 target restriction,
+ add -std=c++11 to dg-options.
+ * g++.dg/ubsan/cxx11-shift-2.C: Likewise.
+ * g++.dg/ubsan/cxx1y-vla.C: Remove c++1y target restriction,
+ add -std=c++1y to dg-options.
+ * c-c++-common/ubsan/undefined-1.c: Revert 2014-03-24 changes, add
+ -fno-sanitize-recover to dg-options.
+ * c-c++-common/ubsan/overflow-sub-1.c: Likewise.
+ * c-c++-common/ubsan/vla-4.c: Likewise.
+ * c-c++-common/ubsan/pr59503.c: Likewise.
+ * c-c++-common/ubsan/vla-3.c: Likewise.
+ * c-c++-common/ubsan/save-expr-1.c: Likewise.
+ * c-c++-common/ubsan/overflow-add-1.c: Likewise.
+ * c-c++-common/ubsan/shift-3.c: Likewise.
+ * c-c++-common/ubsan/overflow-1.c: Likewise.
+ * c-c++-common/ubsan/overflow-negate-2.c: Likewise.
+ * c-c++-common/ubsan/vla-2.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-1.c: Likewise.
+ * c-c++-common/ubsan/pr60613-1.c: Likewise.
+ * c-c++-common/ubsan/shift-6.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-3.c: Likewise.
+ * c-c++-common/ubsan/overflow-add-3.c: New test.
+ * c-c++-common/ubsan/overflow-add-4.c: New test.
+ * c-c++-common/ubsan/div-by-zero-6.c: New test.
+ * c-c++-common/ubsan/div-by-zero-7.c: New test.
+
+2014-04-15 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * c-c++-common/asan/null-deref-1.c: Change regexp to pass test
+ under qemu-arm.
+ * c-c++-common/ubsan/div-by-zero-1.c: Likewise.
+ * c-c++-common/ubsan/div-by-zero-2.c: Likewise.
+ * c-c++-common/ubsan/div-by-zero-3.c: Likewise.
+ * c-c++-common/ubsan/load-bool-enum.c (foo): Likewise.
+ * c-c++-common/ubsan/null-1.c: Likewise.
+ * c-c++-common/ubsan/null-10.c: Likewise.
+ * c-c++-common/ubsan/null-11.c: Likewise.
+ * c-c++-common/ubsan/null-2.c: Likewise.
+ * c-c++-common/ubsan/null-3.c: Likewise.
+ * c-c++-common/ubsan/null-4.c: Likewise.
+ * c-c++-common/ubsan/null-5.c: Likewise.
+ * c-c++-common/ubsan/null-6.c: Likewise.
+ * c-c++-common/ubsan/null-7.c: Likewise.
+ * c-c++-common/ubsan/null-8.c: Likewise.
+ * c-c++-common/ubsan/null-9.c: Likewise.
+ * c-c++-common/ubsan/overflow-add-2.c: Likewise.
+ * c-c++-common/ubsan/overflow-int128.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-2.c: Likewise.
+ * c-c++-common/ubsan/overflow-mul-4.c: Likewise.
+ * c-c++-common/ubsan/overflow-negate-1.c: Likewise.
+ * c-c++-common/ubsan/overflow-sub-2.c: Likewise.
+ * c-c++-common/ubsan/pr59333.c: Likewise.
+ * c-c++-common/ubsan/pr59667.c: Likewise.
+ * c-c++-common/ubsan/pr60613-2.c: Likewise.
+ * c-c++-common/ubsan/pr60636.c: Likewise.
+ * c-c++-common/ubsan/shift-1.c: Likewise.
+ * c-c++-common/ubsan/shift-2.c: Likewise.
+ * c-c++-common/ubsan/vla-1.c: Likewise.
+
+2014-05-30 Jakub Jelinek <jakub@redhat.com>
+
+ * g++.dg/asan/asan_test.C: Add -std=c++11 and
+ -DSANITIZER_USE_DEJAGNU_GTEST=1 to dg-options, remove
+ -DASAN_USE_DEJAGNU_GTEST=1.
+ * g++.dg/asan/asan_mem_test.cc: Updated from upstream
+ r209283.
+ * g++.dg/asan/asan_oob_test.cc: Likewise.
+ * g++.dg/asan/sanitizer_test_utils.h: Likewise.
+ * g++.dg/asan/asan_str_test.cc: Likewise.
+ * g++.dg/asan/asan_test_utils.h: Likewise.
+ * g++.dg/asan/sanitizer_test_config.h: Likewise.
+ * g++.dg/asan/asan_test.cc: Likewise. Allow size 12
+ for long double.
+ * g++.dg/asan/sanitizer_pthread_wrappers.h: New file.
+ Imported from upstream r209283.
+ * g++.dg/asan/asan_test_config.h: Likewise.
+
+2014-05-22 Kostya Serebryany <kcc@google.com>
+
+ * c-c++-common/tsan/mutexset1.c: Update the test to match
+ upstream r209283.
+ * g++.dg/asan/symbolize-callback-1.C: Delete the deprecated test.
+
+2014-05-21 Marek Polacek <polacek@redhat.com>
+
+ PR sanitizer/61272
+ * g++.dg/ubsan/pr61272.C: New test.
+
+2014-05-14 Yury Gribov <y.gribov@samsung.com>
+
+ PR sanitizer/61100
+ * c-c++-common/asan/asan-interface-1.c: New test.
+ * lib/asan-dg.exp (asan_include_flags): New function.
+ (asan_init): Call asan_include_flags to obtain path
+ to sanitizer headers.
+
2015-02-06 Jakub Jelinek <jakub@redhat.com>
PR ipa/64896
diff --git a/gcc/testsuite/c-c++-common/asan/aggressive-opts.c b/gcc/testsuite/c-c++-common/asan/aggressive-opts.c
new file mode 100644
index 00000000000..979b6118d1d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/aggressive-opts.c
@@ -0,0 +1,28 @@
+/* { dg-options "-fdump-tree-asan" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O3" } } */
+
+int ext;
+
+int
+Perl_do_sv_dump()
+{
+ int freq[10];
+ int i;
+ int max = 0;
+
+ if (max < ext)
+ max = ext;
+
+ for (i = 0; i <= max; i++)
+ if (freq[i])
+ ext = 0;
+
+ if (i > 20)
+ return freq[i];
+ else
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "ASAN_CHECK" 2 "asan1" } } */
+/* { dg-final { cleanup-tree-dump "asan1" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/asan-interface-1.c b/gcc/testsuite/c-c++-common/asan/asan-interface-1.c
index 8cd80caaa38..33ed1b0e845 100644
--- a/gcc/testsuite/c-c++-common/asan/asan-interface-1.c
+++ b/gcc/testsuite/c-c++-common/asan/asan-interface-1.c
@@ -2,7 +2,6 @@
/* { dg-do run { target { *-*-linux* } } } */
-#include <stdbool.h>
#include <sanitizer/asan_interface.h>
int main() {
diff --git a/gcc/testsuite/c-c++-common/asan/bitfield-5.c b/gcc/testsuite/c-c++-common/asan/bitfield-5.c
new file mode 100644
index 00000000000..eb5e9e9fe07
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/bitfield-5.c
@@ -0,0 +1,24 @@
+/* Check BIT_FIELD_REF. */
+
+/* { dg-do run } */
+/* { dg-shouldfail "asan" } */
+
+struct A
+{
+ int y : 20;
+ int x : 13;
+};
+
+int __attribute__ ((noinline, noclone))
+f (void *p) {
+ return ((struct A *)p)->x != 0;
+}
+
+int
+main ()
+{
+ int a = 0;
+ return f (&a);
+}
+
+/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow" } */
diff --git a/gcc/testsuite/c-c++-common/asan/global-overflow-1.c b/gcc/testsuite/c-c++-common/asan/global-overflow-1.c
index 66e91479cf0..8dd75df10bf 100644
--- a/gcc/testsuite/c-c++-common/asan/global-overflow-1.c
+++ b/gcc/testsuite/c-c++-common/asan/global-overflow-1.c
@@ -23,6 +23,6 @@ int main() {
}
/* { dg-output "READ of size 1 at 0x\[0-9a-f\]+ thread T0.*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*global-overflow-1.c:20|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r).*" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*global-overflow-1.c:20|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r).*" } */
/* { dg-output "0x\[0-9a-f\]+ is located 0 bytes to the right of global variable" } */
/* { dg-output ".*YYY\[^\n\r]* of size 10\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/halt_on_error-1.c b/gcc/testsuite/c-c++-common/asan/halt_on_error-1.c
new file mode 100644
index 00000000000..b015e9684bf
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/halt_on_error-1.c
@@ -0,0 +1,23 @@
+/* Test recovery mode. */
+/* { dg-do run } */
+/* { dg-options "-fsanitize-recover=address" } */
+/* { dg-set-target-env-var ASAN_OPTIONS "halt_on_error=false" } */
+
+#include <string.h>
+
+volatile int ten = 10;
+
+int main() {
+ char x[10];
+ memset(x, 0, ten + 1);
+ asm volatile ("" : : : "memory");
+ volatile int res = x[ten];
+ x[ten] = res + 3;
+ res = x[ten];
+ return 0;
+}
+
+/* { dg-output "WRITE of size 11 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
+/* { dg-output "\[^\n\r]*READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
+/* { dg-output "\[^\n\r]*WRITE of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
+/* { dg-output "\[^\n\r]*READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
diff --git a/gcc/testsuite/c-c++-common/asan/halt_on_error-2.c b/gcc/testsuite/c-c++-common/asan/halt_on_error-2.c
new file mode 100644
index 00000000000..4527889af14
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/halt_on_error-2.c
@@ -0,0 +1,24 @@
+/* Test recovery mode. */
+/* { dg-do run } */
+/* { dg-options "-fsanitize-recover=address" } */
+/* { dg-set-target-env-var ASAN_OPTIONS "halt_on_error=true" } */
+/* { dg-shouldfail "asan" } */
+
+#include <string.h>
+
+volatile int ten = 10;
+
+int main() {
+ char x[10];
+ memset(x, 0, ten + 1);
+ asm volatile ("" : : : "memory");
+ volatile int res = x[ten];
+ x[ten] = res + 3;
+ res = x[ten];
+ return 0;
+}
+
+/* { dg-output "WRITE of size 11 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
+/* { dg-prune-output "\[^\n\r]*READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
+/* { dg-prune-output "\[^\n\r]*WRITE of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
+/* { dg-prune-output "\[^\n\r]*READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r).*" } */
diff --git a/gcc/testsuite/c-c++-common/asan/heap-overflow-1.c b/gcc/testsuite/c-c++-common/asan/heap-overflow-1.c
index 996909d3c8e..0377a6cf468 100644
--- a/gcc/testsuite/c-c++-common/asan/heap-overflow-1.c
+++ b/gcc/testsuite/c-c++-common/asan/heap-overflow-1.c
@@ -24,8 +24,8 @@ int main(int argc, char **argv) {
}
/* { dg-output "READ of size 1 at 0x\[0-9a-f\]+ thread T0.*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*heap-overflow-1.c:21|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*heap-overflow-1.c:21|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*0x\[0-9a-f\]+ is located 0 bytes to the right of 10-byte region\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*allocated by thread T0 here:\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*(interceptor_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*heap-overflow-1.c:19|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*heap-overflow-1.c:19|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/instrument-with-calls-1.c b/gcc/testsuite/c-c++-common/asan/instrument-with-calls-1.c
index 32e32a600cb..a08b98a47db 100644
--- a/gcc/testsuite/c-c++-common/asan/instrument-with-calls-1.c
+++ b/gcc/testsuite/c-c++-common/asan/instrument-with-calls-1.c
@@ -1,5 +1,5 @@
-/* { dg-do assemble } */
-/* { dg-options "-fno-sanitize=address -fsanitize=kernel-address --param asan-instrumentation-with-call-threshold=0 -save-temps" } */
+/* { dg-do compile } */
+/* { dg-options "--param asan-instrumentation-with-call-threshold=0" } */
void f(char *a, int *b) {
*b = *a;
@@ -7,4 +7,3 @@ void f(char *a, int *b) {
/* { dg-final { scan-assembler "__asan_load1" } } */
/* { dg-final { scan-assembler "__asan_store4" } } */
-/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/c-c++-common/asan/instrument-with-calls-2.c b/gcc/testsuite/c-c++-common/asan/instrument-with-calls-2.c
index 1b361e6270d..217410c32be 100644
--- a/gcc/testsuite/c-c++-common/asan/instrument-with-calls-2.c
+++ b/gcc/testsuite/c-c++-common/asan/instrument-with-calls-2.c
@@ -1,5 +1,5 @@
-/* { dg-do assemble } */
-/* { dg-options "-fno-sanitize=address -fsanitize=kernel-address --param asan-instrumentation-with-call-threshold=1 -save-temps" } */
+/* { dg-do compile } */
+/* { dg-options "--param asan-instrumentation-with-call-threshold=1" } */
int x;
@@ -13,4 +13,3 @@ void f(int *a, int *b) {
/* { dg-final { scan-assembler-not "__asan_report_store4" } } */
/* { dg-final { scan-assembler "__asan_load4" } } */
/* { dg-final { scan-assembler-not "__asan_report_load4" } } */
-/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/c-c++-common/asan/instrument-with-calls-3.c b/gcc/testsuite/c-c++-common/asan/instrument-with-calls-3.c
new file mode 100644
index 00000000000..6dbd54951db
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/instrument-with-calls-3.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "--param asan-instrumentation-with-call-threshold=0" } */
+
+struct A {
+ char x[7];
+};
+
+void f(struct A *x, struct A *y) {
+ *x = *y;
+}
+
+/* { dg-final { scan-assembler "__asan_loadN" } } */
+/* { dg-final { scan-assembler "__asan_storeN" } } */
+
diff --git a/gcc/testsuite/c-c++-common/asan/kasan-recover-1.c b/gcc/testsuite/c-c++-common/asan/kasan-recover-1.c
new file mode 100644
index 00000000000..b7d3dda9f7d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/kasan-recover-1.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fno-sanitize=address -fsanitize=kernel-address --param asan-instrumentation-with-call-threshold=100" } */
+
+void
+foo (int *p)
+{
+ *p = 0;
+}
+
+/* { dg-final { scan-assembler "__asan_report_store4_noabort" } } */
+
diff --git a/gcc/testsuite/c-c++-common/asan/kasan-recover-2.c b/gcc/testsuite/c-c++-common/asan/kasan-recover-2.c
new file mode 100644
index 00000000000..03f29c1ff62
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/kasan-recover-2.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fno-sanitize=address -fsanitize=kernel-address" } */
+
+void
+foo (int *p)
+{
+ *p = 0;
+}
+
+/* { dg-final { scan-assembler "__asan_store4_noabort" } } */
+
diff --git a/gcc/testsuite/c-c++-common/asan/memcmp-1.c b/gcc/testsuite/c-c++-common/asan/memcmp-1.c
index 03f32e92a31..31321827405 100644
--- a/gcc/testsuite/c-c++-common/asan/memcmp-1.c
+++ b/gcc/testsuite/c-c++-common/asan/memcmp-1.c
@@ -16,5 +16,5 @@ main ()
}
/* { dg-output "ERROR: AddressSanitizer: stack-buffer-overflow.*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*(interceptor_|)memcmp |\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)memcmp|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-1.c b/gcc/testsuite/c-c++-common/asan/misalign-1.c
index 0c5b6e0c754..f1cca1665cc 100644
--- a/gcc/testsuite/c-c++-common/asan/misalign-1.c
+++ b/gcc/testsuite/c-c++-common/asan/misalign-1.c
@@ -1,5 +1,6 @@
/* { dg-do run { target { ilp32 || lp64 } } } */
/* { dg-options "-O2" } */
+/* { dg-additional-options "-fno-omit-frame-pointer" { target *-*-darwin* } } */
/* { dg-shouldfail "asan" } */
struct S { int i; } __attribute__ ((packed));
@@ -38,5 +39,5 @@ main ()
/* { dg-output "ERROR: AddressSanitizer:\[^\n\r]*on address\[^\n\r]*" } */
/* { dg-output "0x\[0-9a-f\]+ at pc 0x\[0-9a-f\]+ bp 0x\[0-9a-f\]+ sp 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*READ of size 4 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*foo(\[^\n\r]*misalign-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*misalign-1.c:34|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*foo(\[^\n\r]*misalign-1.c:1\[01]|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*misalign-1.c:3\[45]|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/misalign-2.c b/gcc/testsuite/c-c++-common/asan/misalign-2.c
index 7fbe299cc54..9f400b4c1f2 100644
--- a/gcc/testsuite/c-c++-common/asan/misalign-2.c
+++ b/gcc/testsuite/c-c++-common/asan/misalign-2.c
@@ -1,5 +1,6 @@
/* { dg-do run { target { ilp32 || lp64 } } } */
/* { dg-options "-O2" } */
+/* { dg-additional-options "-fno-omit-frame-pointer" { target *-*-darwin* } } */
/* { dg-shouldfail "asan" } */
struct S { int i; } __attribute__ ((packed));
@@ -38,5 +39,5 @@ main ()
/* { dg-output "ERROR: AddressSanitizer:\[^\n\r]*on address\[^\n\r]*" } */
/* { dg-output "0x\[0-9a-f\]+ at pc 0x\[0-9a-f\]+ bp 0x\[0-9a-f\]+ sp 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*READ of size 4 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*baz(\[^\n\r]*misalign-2.c:22|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*misalign-2.c:34|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*baz(\[^\n\r]*misalign-2.c:2\[23]|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*misalign-2.c:3\[45]|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-asan-globals.c b/gcc/testsuite/c-c++-common/asan/no-asan-globals.c
index 70a1f95a3a3..9bed29e5b67 100644
--- a/gcc/testsuite/c-c++-common/asan/no-asan-globals.c
+++ b/gcc/testsuite/c-c++-common/asan/no-asan-globals.c
@@ -1,5 +1,5 @@
-/* { dg-do assemble } */
-/* { dg-options "-save-temps --param asan-globals=0" } */
+/* { dg-do compile } */
+/* { dg-options "--param asan-globals=0" } */
volatile int ten = 10;
@@ -10,4 +10,3 @@ int main() {
}
/* { dg-final { scan-assembler-not "__asan_register_globals" } } */
-/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-instrument-reads.c b/gcc/testsuite/c-c++-common/asan/no-instrument-reads.c
index df75878de2f..00e96df054b 100644
--- a/gcc/testsuite/c-c++-common/asan/no-instrument-reads.c
+++ b/gcc/testsuite/c-c++-common/asan/no-instrument-reads.c
@@ -1,5 +1,5 @@
-/* { dg-do assemble } */
-/* { dg-options "--param asan-instrument-reads=0 -save-temps" } */
+/* { dg-do compile } */
+/* { dg-options "--param asan-instrument-reads=0" } */
volatile int ten = 10;
@@ -10,4 +10,3 @@ int main() {
}
/* { dg-final { scan-assembler-not "__asan_load" } } */
-/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-instrument-writes.c b/gcc/testsuite/c-c++-common/asan/no-instrument-writes.c
index c1500b9fb32..45370a2e56c 100644
--- a/gcc/testsuite/c-c++-common/asan/no-instrument-writes.c
+++ b/gcc/testsuite/c-c++-common/asan/no-instrument-writes.c
@@ -1,5 +1,5 @@
-/* { dg-do assemble } */
-/* { dg-options "--param asan-instrument-writes=0 -save-temps" } */
+/* { dg-do compile } */
+/* { dg-options "--param asan-instrument-writes=0" } */
volatile int ten = 10;
@@ -10,4 +10,3 @@ int main() {
}
/* { dg-final { scan-assembler-not "__asan_store" } } */
-/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-1.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-1.c
index 028f8d7152d..baacb1e536e 100644
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-1.c
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-1.c
@@ -6,7 +6,7 @@
/* { dg-do compile } */
/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
-extern char tab[4];
+extern char tab[6];
static int
test0 ()
@@ -35,17 +35,10 @@ test1 (int i)
the initialization. */
foo[i] = 1;
- /*__builtin___asan_report_store_n called once here to instrument
- the store to the memory region of tab. */
+ /* Instrument tab memory region. */
__builtin_memset (tab, 3, sizeof (tab));
- /* There is no instrumentation for the two memset calls below. */
- __builtin_memset (tab, 4, sizeof (tab));
- __builtin_memset (tab, 5, sizeof (tab));
-
- /* There is a call to __builtin___asan_report_store_n and a call
- to __builtin___asan_report_load_n to instrument the store to
- (subset of) the memory region of tab. */
+ /* Instrument tab[1] with access size 3. */
__builtin_memcpy (&tab[1], foo + i, 3);
/* This should not generate a __builtin___asan_report_load1 because
@@ -63,6 +56,5 @@ main ()
}
/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store1" 3 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store_n" 2 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load" 1 "sanopt" } } */
+/* { dg-final { scan-tree-dump-not "__builtin___asan_report_load1" "sanopt" } } */
/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-10.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-10.c
new file mode 100644
index 00000000000..24dfcfec977
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-10.c
@@ -0,0 +1,18 @@
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
+
+extern __UINT32_TYPE__ a;
+
+void
+foo ()
+{
+ /* Instrument a with access size 3. */
+ int d = __builtin_memcmp (&a, "123", 3);
+ /* This should generate a __builtin___asan_report_store4, because
+ the reference to a has been instrumented above with access size 3. */
+ a = 1;
+}
+
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store4" 1 "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-11.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-11.c
new file mode 100644
index 00000000000..4082f32694b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-11.c
@@ -0,0 +1,20 @@
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
+
+extern __UINT32_TYPE__ a;
+
+void
+foo ()
+{
+ /* Instrument a with access size 5. */
+ int d = __builtin_memcmp (&a, "12345", 4);
+ /* This should not generate a __builtin___asan_report_store4 because
+ the reference to a has been already instrumented above with access
+ size 5. */
+ a = 1;
+}
+
+/* { dg-final { scan-tree-dump-not "& 7" "sanopt" } } */
+/* { dg-final { scan-tree-dump-not "__builtin___asan_report_store" "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-12.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-12.c
new file mode 100644
index 00000000000..65e1d96f95d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-12.c
@@ -0,0 +1,16 @@
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+void
+foo (char *p)
+{
+ volatile int zero = 0;
+ __builtin_memcpy (p, "abc", zero);
+ /* This generates a __builtin___asan_report_store1 because we pass volatile
+ zero length into memcpy. */
+ p[0] = 'd';
+}
+
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store1" 1 "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-13.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-13.c
new file mode 100644
index 00000000000..c04be064188
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-13.c
@@ -0,0 +1,15 @@
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
+
+void
+foo (char *p)
+{
+ __builtin_memcpy (p, "abc", 0);
+ /* This generates a __builtin___asan_report_store1 because we didn't access
+ any byte in previous memcpy because of zero length parameter. */
+ p[0] = 'd';
+}
+
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store1" 1 "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-14.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-14.c
new file mode 100644
index 00000000000..fadce906f55
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-14.c
@@ -0,0 +1,15 @@
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
+
+void
+foo (char *p)
+{
+ __builtin_memcpy (p, "abc", 2);
+ /* This doesn't generate a __builtin___asan_report_store1 because we
+ verified p[0] through p[2] is writable in previous memcpy call. */
+ p[0] = 'd';
+}
+
+/* { dg-final { scan-tree-dump-not "__builtin___asan_report_store1" "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-15.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-15.c
new file mode 100644
index 00000000000..00676dad614
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-15.c
@@ -0,0 +1,16 @@
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+void
+foo (char *p)
+{
+ volatile int two = 2;
+ __builtin_memcpy (p, "abc", two);
+ /* This generates a __builtin___asan_report_store1 because we don't
+ optimize previous memcpy call. */
+ p[0] = 'd';
+}
+
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store1" 1 "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-2.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-2.c
deleted file mode 100644
index a58411c3af3..00000000000
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-2.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/* This tests that when faced with two references to the same memory
- location in the same basic block, the second reference should not
- be instrumented by the Address Sanitizer. But in case of access to
- overlapping regions we must be precise. */
-
-/* { dg-options "-fdump-tree-sanopt" } */
-/* { dg-do compile } */
-/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
-
-int
-main ()
-{
- char tab[5];
-
- /* Here, we instrument the access at offset 0 and access at offset
- 4. */
- __builtin_memset (tab, 1, sizeof (tab));
- /* We instrumented access at offset 0 above already, so only access
- at offset 3 is instrumented. */
- __builtin_memset (tab, 1, 3);
-}
-
-/* { dg-final { scan-tree-dump-times "& 7" 3 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store_n" 2 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report" 2 "sanopt" } } */
-/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-4.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-4.c
index c3632aa3d2d..a613b92cbce 100644
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-4.c
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-4.c
@@ -5,13 +5,13 @@
void
foo (int *a, char *b, char *c)
{
- /* One check for c[0], one check for a[], one check for c, two checks for b. */
+ /* One check for c[0], one check for a[]. */
__builtin_memmove (c, b, a[c[0]]);
- /* For a total of 5 checks. */
+ /* For a total of 2 checks. */
+ int d = c[0] == 1;
}
-/* { dg-final { scan-tree-dump-times "& 7" 5 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "& 7" 2 "sanopt" } } */
/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load1" 1 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load_n" 1 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store_n" 1 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load4" 1 "sanopt" } } */
/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-5.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-5.c
index 077ea34d028..f4ca603f0c0 100644
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-5.c
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-5.c
@@ -5,14 +5,12 @@
void
foo (int *a, char *b, char *c)
{
- /* One check for b[0], one check for a[], 2 checks for c and one checks for b. */
- __builtin_memmove (c, b, a[b[0]]);
- /* For a total of 5 checks. */
+ /* One check for a[]. */
+ __builtin_memmove (c, b, a[0]);
+ /* For a total of 1 checks. */
+ int d = a[0] == 0;
}
-/* { dg-final { scan-tree-dump-times "& 7" 5 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load1" 1 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "& 7" 1 "sanopt" } } */
/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load4" 1 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load_n" 1 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store_n" 1 "sanopt" } } */
/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-6.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-6.c
index 6d87104aac2..757f0eeb622 100644
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-6.c
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-6.c
@@ -5,16 +5,15 @@
void
foo (int *a, char *b, char *c)
{
- /* One check for c[0], one check for a[], one check for c and 2 checks for b. */
+ /* One check for c[0], one check for a[]. */
__builtin_memmove (c, b, a[c[0]]);
- /* One check for a[], one check for c and one check for b. */
+ /* One check for b[0], one check for a[]. */
__builtin_memmove (c, b, a[b[0]]);
- /* For a total of 8 checks. */
+ /* For a total of 4 checks. */
+ int d = c[0] == b[0];
}
-/* { dg-final { scan-tree-dump-times "& 7" 8 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load1" 1 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "& 7" 4 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load1" 2 "sanopt" } } */
/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load4" 2 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load_n" 2 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store_n" 2 "sanopt" } } */
/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-7.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-7.c
index 5baa10dcd7d..0c501451281 100644
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-7.c
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-7.c
@@ -2,26 +2,22 @@
/* { dg-do compile } */
/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
-char e[200];
+char e[5];
-struct S
+extern struct S
{
- char a[100];
- char b[100];
+ int a;
+ char b;
} s;
int
foo (int *a, char *b, char *c)
{
- /* 2 checks for s.a, 2 checks for e. */
- int d = __builtin_memcmp (s.a, e, 100);
- /* One check for s.a and one check for e. */
- d += __builtin_memcmp (s.a, e, 200);
- /* For a total of 6 checks. */
- return d;
+ int d = __builtin_memcmp (&s.a, e, 4);
+ /* No check because s.a was instrumented above with access size 4. */
+ return s.a;
}
-/* { dg-final { scan-tree-dump-times "& 7" 6 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load_n" 4 "sanopt" } } */
-/* { dg-final { scan-tree-dump-not "__builtin___asan_report_store" "sanopt" } } */
+/* { dg-final { scan-tree-dump-not "& 7" "sanopt" } } */
+/* { dg-final { scan-tree-dump-not "__builtin___asan_report_load4" "sanopt" } } */
/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-8.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-8.c
index 2a4c0812f39..4eeedcedc98 100644
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-8.c
+++ b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-8.c
@@ -5,16 +5,16 @@
char
foo (int *a, char *b, char *c)
{
- /* One check for b[0], one check for a[], two checks for c and one check for b. */
+ /* One check for b[0], one check for a[]. */
__builtin_memmove (c, b, a[b[0]]);
+ /* One check for c[0], one check for a[]. */
+ __builtin_memmove (b, c, a[c[0]]);
/* No checks here. */
return c[0] + b[0];
- /* For a total of 5 checks. */
+ /* For a total of 4 checks. */
}
-/* { dg-final { scan-tree-dump-times "& 7" 5 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load1" 1 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load4" 1 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load_n" 1 "sanopt" } } */
-/* { dg-final { scan-tree-dump-times "__builtin___asan_report_store_n" 1 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "& 7" 4 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load1" 2 "sanopt" } } */
+/* { dg-final { scan-tree-dump-times "__builtin___asan_report_load4" 2 "sanopt" } } */
/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-9.c b/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-9.c
deleted file mode 100644
index 9449de57884..00000000000
--- a/gcc/testsuite/c-c++-common/asan/no-redundant-instrumentation-9.c
+++ /dev/null
@@ -1,13 +0,0 @@
-/* { dg-options "-fdump-tree-sanopt" } */
-/* { dg-do compile } */
-/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
-
-__SIZE_TYPE__
-f (char *a)
-{
- a[0] = '1';
- return __builtin_strlen (a);
-}
-
-/* { dg-final { scan-tree-dump-times "__asan_report_load1" 1 "sanopt" } } */
-/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/no-use-after-return.c b/gcc/testsuite/c-c++-common/asan/no-use-after-return.c
index f326e0caee4..afbc112db38 100644
--- a/gcc/testsuite/c-c++-common/asan/no-use-after-return.c
+++ b/gcc/testsuite/c-c++-common/asan/no-use-after-return.c
@@ -1,5 +1,5 @@
-/* { dg-do assemble } */
-/* { dg-options "--param asan-use-after-return=0 -save-temps" } */
+/* { dg-do compile } */
+/* { dg-options "--param asan-use-after-return=0" } */
extern void f(char *);
@@ -10,4 +10,3 @@ int main() {
}
/* { dg-final { scan-assembler-not "__asan_option_detect_stack_use_after_return" } } */
-/* { dg-final { cleanup-saved-temps } } */
diff --git a/gcc/testsuite/c-c++-common/asan/null-deref-1.c b/gcc/testsuite/c-c++-common/asan/null-deref-1.c
index 6aea9d295f3..45d35ac2de3 100644
--- a/gcc/testsuite/c-c++-common/asan/null-deref-1.c
+++ b/gcc/testsuite/c-c++-common/asan/null-deref-1.c
@@ -18,5 +18,5 @@ int main()
/* { dg-output "ERROR: AddressSanitizer:? SEGV on unknown address\[^\n\r]*" } */
/* { dg-output "0x\[0-9a-f\]+ \[^\n\r]*pc 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in \[^\n\r]*NullDeref\[^\n\r]* (\[^\n\r]*null-deref-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*null-deref-1.c:15|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* #0 0x\[0-9a-f\]+ +(in \[^\n\r]*NullDeref\[^\n\r]* (\[^\n\r]*null-deref-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*null-deref-1.c:15|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/pr63316.c b/gcc/testsuite/c-c++-common/asan/pr63316.c
new file mode 100644
index 00000000000..d07093ac7f2
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/pr63316.c
@@ -0,0 +1,22 @@
+/* PR sanitizer/63316 */
+/* { dg-do run } */
+/* { dg-options "-fsanitize=address -O2" } */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern void *malloc (__SIZE_TYPE__);
+extern void free (void *);
+#ifdef __cplusplus
+}
+#endif
+
+int
+main ()
+{
+ int *p = (int *) malloc (sizeof (int));
+ *p = 3;
+ asm volatile ("" : : "r" (p) : "memory");
+ free (p);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/asan/pr63888.c b/gcc/testsuite/c-c++-common/asan/pr63888.c
new file mode 100644
index 00000000000..a88c860d60b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/pr63888.c
@@ -0,0 +1,34 @@
+/* PR bootstrap/63888 */
+/* { dg-do run } */
+
+__attribute__((noinline, noclone)) int
+foo (int x)
+{
+ int v = 0;
+ switch (x)
+ {
+ case 11: v = 67; break;
+ case 12: v = 68; break;
+ case 13: v = 69; break;
+ }
+ return v;
+}
+
+__attribute__((noinline, noclone)) int
+bar (int x)
+{
+ int v = 0;
+ switch (x)
+ {
+ case 18: v = 67; break;
+ case 19: v = 68; break;
+ case 20: v = 69; break;
+ }
+ return v;
+}
+
+int
+main ()
+{
+ return foo (11) - 67 + bar (19) - 68;
+}
diff --git a/gcc/testsuite/c-c++-common/asan/pr64820.c b/gcc/testsuite/c-c++-common/asan/pr64820.c
new file mode 100644
index 00000000000..583ca0a60b6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/pr64820.c
@@ -0,0 +1,39 @@
+/* { dg-do run } */
+/* { dg-require-effective-target fstack_protector } */
+/* { dg-options "-fstack-protector-strong" } */
+/* { dg-shouldfail "asan" } */
+
+#ifdef __cplusplus
+extern "C"
+#endif
+const char *
+__asan_default_options ()
+{
+ return "detect_stack_use_after_return=1";
+}
+
+__attribute__((noinline))
+char *Ident(char *x) {
+ return x;
+}
+
+__attribute__((noinline))
+char *Func1() {
+ char local[1 << 12];
+ return Ident(local);
+}
+
+__attribute__((noinline))
+void Func2(char *x) {
+ *x = 1;
+}
+int main(int argc, char **argv) {
+ Func2(Func1());
+ return 0;
+}
+
+/* { dg-output "AddressSanitizer: stack-use-after-return on address 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "WRITE of size 1 at .* thread T0.*" } */
+/* { dg-output " #0.*(Func2)?.*pr64820.(c:28)?.*" } */
+/* { dg-output "is located in stack of thread T0 at offset.*" } */
+/* { dg-output "\'local\' <== Memory access at offset 32 is inside this variable" } */
diff --git a/gcc/testsuite/c-c++-common/asan/pr70541.c b/gcc/testsuite/c-c++-common/asan/pr70541.c
new file mode 100644
index 00000000000..b2a4bd5e079
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/pr70541.c
@@ -0,0 +1,34 @@
+/* { dg-do run } */
+/* { dg-options "-fno-builtin-malloc -fno-builtin-free" } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O0" } } */
+/* { dg-shouldfail "asan" } */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+struct Simple {
+ int value;
+};
+
+int f(struct Simple simple) {
+ return simple.value;
+}
+
+int main() {
+ struct Simple *psimple = (struct Simple *) malloc(sizeof(struct Simple));
+ psimple->value = 42;
+ free(psimple);
+ printf("%d\n", f(*psimple));
+ return 0;
+}
+
+/* { dg-output "ERROR: AddressSanitizer:? heap-use-after-free on address\[^\n\r]*" } */
+/* { dg-output "0x\[0-9a-f\]+ at pc 0x\[0-9a-f\]+ bp 0x\[0-9a-f\]+ sp 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*READ of size 4 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*pr70541.c:21|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*freed by thread T0 here:\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)free|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*pr70541.c:20|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*previously allocated by thread T0 here:\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*pr70541.c:18|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/pr70712.c b/gcc/testsuite/c-c++-common/asan/pr70712.c
new file mode 100644
index 00000000000..74a6a75f20c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/pr70712.c
@@ -0,0 +1,32 @@
+/* PR sanitizer/70712 */
+/* { dg-do run } */
+
+struct __attribute__((aligned (64))) S
+{
+ char s[4];
+};
+
+struct T
+{
+ char t[8];
+ char u[480];
+
+};
+
+__attribute__((noinline, noclone)) void
+foo (struct T *p, struct S *q)
+{
+ __builtin_memset (p->t, '\0', sizeof (p->t));
+ __builtin_memset (p->u, '\0', sizeof (p->u));
+ __builtin_memset (q->s, '\0', sizeof (q->s));
+}
+
+int
+main ()
+{
+ struct S s;
+ struct T t;
+ foo (&t, &s);
+ asm volatile ("" : : "r" (&t), "r" (&s) : "memory");
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/asan/red-align-1.c b/gcc/testsuite/c-c++-common/asan/red-align-1.c
new file mode 100644
index 00000000000..afd74677207
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/red-align-1.c
@@ -0,0 +1,20 @@
+/* This tests aligment propagation to structure elem and
+ abcense of redundant & 7. */
+
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+struct st {
+ int a;
+ int b;
+ int c;
+} __attribute__((aligned(16)));
+
+int foo (struct st * s_p)
+{
+ return s_p->a;
+}
+
+/* { dg-final { scan-tree-dump-times "& 7" 0 "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/red-align-2.c b/gcc/testsuite/c-c++-common/asan/red-align-2.c
new file mode 100644
index 00000000000..d30b1ad9f0c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/red-align-2.c
@@ -0,0 +1,20 @@
+/* This tests aligment propagation to structure elem and
+ abcense of redundant & 7. */
+
+/* { dg-options "-fdump-tree-sanopt" } */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+struct st {
+ int a;
+ int b;
+ int c;
+} __attribute__((aligned(16)));
+
+int foo (struct st * s_p)
+{
+ return s_p->b;
+}
+
+/* { dg-final { scan-tree-dump-times "& 7" 1 "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/sanitize-all-1.c b/gcc/testsuite/c-c++-common/asan/sanitize-all-1.c
new file mode 100644
index 00000000000..58e4079b0ee
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/sanitize-all-1.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fno-sanitize=all" } */
+
+volatile int ten = 10;
+
+int main() {
+ volatile char x[10];
+ x[ten];
+ return 0;
+}
+
+/* { dg-final { scan-assembler-not "__asan_load" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/sanity-check-pure-c-1.c b/gcc/testsuite/c-c++-common/asan/sanity-check-pure-c-1.c
index 78d15e1aad3..219fcf8ac32 100644
--- a/gcc/testsuite/c-c++-common/asan/sanity-check-pure-c-1.c
+++ b/gcc/testsuite/c-c++-common/asan/sanity-check-pure-c-1.c
@@ -10,7 +10,7 @@ int main() {
}
/* { dg-output "heap-use-after-free.*(\n|\r\n|\r)" } */
-/* { dg-output " #0 \[^\n\r]*(in _*(interceptor_|)free|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 \[^\n\r]*(in _*(interceptor_|wrap_)free|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output " #1 \[^\n\r]*(in _*main (\[^\n\r]*sanity-check-pure-c-1.c:8|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
-/* { dg-output " #0 \[^\n\r]*(in _*(interceptor_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 \[^\n\r]*(in _*(interceptor_|wrap_)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output " #1 \[^\n\r]*(in _*main (\[^\n\r]*sanity-check-pure-c-1.c:7|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/shadow-offset-1.c b/gcc/testsuite/c-c++-common/asan/shadow-offset-1.c
new file mode 100644
index 00000000000..2ca0fd641f2
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/shadow-offset-1.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fno-sanitize=address -fsanitize=kernel-address --param asan-instrumentation-with-call-threshold=100 -fasan-shadow-offset=12345 -fdump-tree-sanopt" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int f (int *p)
+{
+ return *p;
+}
+
+/* { dg-final { scan-tree-dump "12345" "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c b/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c
index c7176509710..34d6aab48e6 100644
--- a/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c
+++ b/gcc/testsuite/c-c++-common/asan/stack-overflow-1.c
@@ -18,6 +18,6 @@ int main() {
}
/* { dg-output "READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*stack-overflow-1.c:16|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*stack-overflow-1.c:16|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*Address 0x\[0-9a-f\]+ is located in stack of thread T0.*(\n|\r\n|\r)" */
/* { dg-output "\[^\n\r]*in main.*stack-overflow-1.c.*(\n|\r\n|\r)" */
diff --git a/gcc/testsuite/c-c++-common/asan/strlen-overflow-1.c b/gcc/testsuite/c-c++-common/asan/strlen-overflow-1.c
index 4833dc77891..679dfa5747e 100644
--- a/gcc/testsuite/c-c++-common/asan/strlen-overflow-1.c
+++ b/gcc/testsuite/c-c++-common/asan/strlen-overflow-1.c
@@ -10,21 +10,16 @@ char a[2] = "0";
#ifdef __cplusplus
extern "C"
#endif
-
-__attribute__((no_sanitize_address, noinline)) __SIZE_TYPE__
-strlen (const char *p) {
-
- __SIZE_TYPE__ n = 0;
- for (; *p; ++n, ++p);
- return n;
-}
+__SIZE_TYPE__
+strlen (const char *p);
int main () {
char *p = &a[0];
asm ("" : "+r"(p));
__asan_poison_memory_region ((char *)&a[1], 1);
- return __builtin_strlen (a);
+ return __builtin_strlen (a) + 1;
}
-/* { dg-output "READ of size 1 at 0x\[0-9a-f\]+ thread T0.*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*strlen-overflow-1.c:26|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output "READ of size 2 at 0x\[0-9a-f\]+ thread T0.*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*strlen-overflow-1.c:20|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*0x\[0-9a-f\]+ is located 0 bytes to the right of global variable" } */
diff --git a/gcc/testsuite/c-c++-common/asan/strncpy-overflow-1.c b/gcc/testsuite/c-c++-common/asan/strncpy-overflow-1.c
index 3ed9fd6dd5c..45759ae8e95 100644
--- a/gcc/testsuite/c-c++-common/asan/strncpy-overflow-1.c
+++ b/gcc/testsuite/c-c++-common/asan/strncpy-overflow-1.c
@@ -13,9 +13,9 @@ int main(int argc, char **argv) {
}
/* { dg-output "WRITE of size \[0-9\]* at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*(interceptor_|)strncpy|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*strncpy-overflow-1.c:11|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)strncpy|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*strncpy-overflow-1.c:11|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*0x\[0-9a-f\]+ is located 0 bytes to the right of 9-byte region\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*allocated by thread T0 here:\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*(interceptor_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*strncpy-overflow-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*strncpy-overflow-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/use-after-free-1.c b/gcc/testsuite/c-c++-common/asan/use-after-free-1.c
index 25e9be827f0..75d8f9faa2d 100644
--- a/gcc/testsuite/c-c++-common/asan/use-after-free-1.c
+++ b/gcc/testsuite/c-c++-common/asan/use-after-free-1.c
@@ -12,11 +12,11 @@ int main() {
/* { dg-output "ERROR: AddressSanitizer:? heap-use-after-free on address\[^\n\r]*" } */
/* { dg-output "0x\[0-9a-f\]+ at pc 0x\[0-9a-f\]+ bp 0x\[0-9a-f\]+ sp 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*use-after-free-1.c:9|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*use-after-free-1.c:9|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*0x\[0-9a-f\]+ is located 5 bytes inside of 10-byte region .0x\[0-9a-f\]+,0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*freed by thread T0 here:\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*(interceptor_|)free|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*use-after-free-1.c:8|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)free|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*use-after-free-1.c:8|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*previously allocated by thread T0 here:\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #0 0x\[0-9a-f\]+ (in _*(interceptor_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*use-after-free-1.c:7|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +(in _*(interceptor_|wrap_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " #1 0x\[0-9a-f\]+ +(in _*main (\[^\n\r]*use-after-free-1.c:7|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/asan/user-section-1.c b/gcc/testsuite/c-c++-common/asan/user-section-1.c
new file mode 100644
index 00000000000..51e2b9966d6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/user-section-1.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=address -fsanitize-sections=.xxx,.yyy -fdump-tree-sanopt" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int x __attribute__((section(".xxx"))) = 1;
+int y __attribute__((section(".yyy"))) = 1;
+int z __attribute__((section(".zzz"))) = 1;
+
+/* { dg-final { scan-tree-dump "__builtin___asan_unregister_globals \\(.*, 2\\);" "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
+
diff --git a/gcc/testsuite/c-c++-common/asan/user-section-2.c b/gcc/testsuite/c-c++-common/asan/user-section-2.c
new file mode 100644
index 00000000000..f602116fe2d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/user-section-2.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=address -fsanitize-sections=.x* -fdump-tree-sanopt" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int x __attribute__((section(".x1"))) = 1;
+int y __attribute__((section(".x2"))) = 1;
+int z __attribute__((section(".x3"))) = 1;
+
+/* { dg-final { scan-tree-dump "__builtin___asan_unregister_globals \\(.*, 3\\);" "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
+
diff --git a/gcc/testsuite/c-c++-common/asan/user-section-3.c b/gcc/testsuite/c-c++-common/asan/user-section-3.c
new file mode 100644
index 00000000000..66e5f9ac770
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/asan/user-section-3.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=address -fsanitize-sections=.x* -fsanitize-sections=.y* -fdump-tree-sanopt" } */
+/* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */
+
+int x __attribute__((section(".x1"))) = 1;
+int y __attribute__((section(".x2"))) = 1;
+int z __attribute__((section(".y1"))) = 1;
+
+/* { dg-final { scan-tree-dump "__builtin___asan_unregister_globals \\(.*, 1\\);" "sanopt" } } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
+
diff --git a/gcc/testsuite/c-c++-common/pr53874.c b/gcc/testsuite/c-c++-common/pr53874.c
new file mode 100644
index 00000000000..153f997c6e9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/pr53874.c
@@ -0,0 +1,35 @@
+/* PR c/53874 */
+/* { dg-do compile } */
+/* { dg-options "-Wswitch-enum" } */
+
+enum E { A, B, C };
+struct S { enum E e:2; };
+typedef struct S TS;
+
+int
+fn0 (struct S *s)
+{
+ switch (s->e) /* { dg-warning "enumeration value .C. not handled in switch" } */
+ {
+ case A:
+ return 1;
+ case B:
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+int
+fn1 (TS *s)
+{
+ switch (s->e) /* { dg-warning "enumeration value .C. not handled in switch" } */
+ {
+ case A:
+ return 1;
+ case B:
+ return 2;
+ default:
+ return 0;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/pr61405.c b/gcc/testsuite/c-c++-common/pr61405.c
new file mode 100644
index 00000000000..9c05a84764a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/pr61405.c
@@ -0,0 +1,31 @@
+/* PR c/61405 */
+/* { dg-do compile } */
+/* { dg-options "-Wswitch" } */
+
+enum E { A, B, C };
+struct S { enum E e:2; };
+typedef struct S TS;
+
+int
+fn0 (struct S *s)
+{
+ switch (s->e) /* { dg-warning "enumeration value .C. not handled in switch" } */
+ {
+ case A:
+ return 1;
+ case B:
+ return 2;
+ }
+}
+
+int
+fn1 (TS *s)
+{
+ switch (s->e) /* { dg-warning "enumeration value .C. not handled in switch" } */
+ {
+ case A:
+ return 1;
+ case B:
+ return 2;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tsan/mutexset1.c b/gcc/testsuite/c-c++-common/tsan/mutexset1.c
index 084f5141dea..3462ec457d6 100644
--- a/gcc/testsuite/c-c++-common/tsan/mutexset1.c
+++ b/gcc/testsuite/c-c++-common/tsan/mutexset1.c
@@ -38,6 +38,6 @@ int main() {
/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
/* { dg-output " Read of size 4 at 0x\[0-9a-f\]+ by thread T1 \\(mutexes: write M\[0-9\]\\):.*" } */
/* { dg-output " Previous write of size 4 at 0x\[0-9a-f\]+ by thread T2:.*" } */
-/* { dg-output " Mutex M\[0-9\] created at:.*" } */
+/* { dg-output " Mutex M\[0-9\] \\(0x.*\\) created at:.*" } */
/* { dg-output " #0 pthread_mutex_init.*" } */
/* { dg-output " #1 main (.*mutexset1.c|\\?{2}):\[0-9]+.*" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-1.c b/gcc/testsuite/c-c++-common/ubsan/align-1.c
new file mode 100644
index 00000000000..b2ccb30ec1a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-1.c
@@ -0,0 +1,41 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=undefined" } */
+
+struct S { int a; char b; long long c; short d[10]; };
+struct T { char a; long long b; };
+struct U { char a; int b; int c; long long d; struct S e; struct T f; };
+struct V { long long a; struct S b; struct T c; struct U u; } v;
+
+__attribute__((noinline, noclone)) void
+f1 (int *p, int *q, char *r, long long *s)
+{
+ *p = *q + *r + *s;
+}
+
+
+__attribute__((noinline, noclone)) int
+f2 (struct S *p)
+{
+ return p->a;
+}
+
+__attribute__((noinline, noclone)) long long
+f3 (struct S *p, int i)
+{
+ return p->c + p->d[1] + p->d[i];
+}
+
+__attribute__((noinline, noclone)) long long
+f4 (long long *p)
+{
+ return *p;
+}
+
+int
+main ()
+{
+ f1 (&v.u.b, &v.u.c, &v.u.a, &v.u.d);
+ if (f2 (&v.u.e) + f3 (&v.u.e, 4) + f4 (&v.u.f.b) != 0)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-2.c b/gcc/testsuite/c-c++-common/ubsan/align-2.c
new file mode 100644
index 00000000000..071de8c202a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-2.c
@@ -0,0 +1,56 @@
+/* Limit this to known non-strict alignment targets. */
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-options "-fsanitize=alignment" } */
+
+struct S { int a; char b; long long c; short d[10]; };
+struct T { char a; long long b; };
+struct U { char a; int b; int c; long long d; struct S e; struct T f; } __attribute__((packed));
+struct V { long long a; struct S b; struct T c; struct U u; } v;
+
+__attribute__((noinline, noclone)) void
+f1 (int *p, int *q, char *r, long long *s)
+{
+ *p =
+ *q
+ + *r
+ + *s;
+}
+
+
+__attribute__((noinline, noclone)) int
+f2 (struct S *p)
+{
+ return p->a;
+}
+
+__attribute__((noinline, noclone)) long long
+f3 (struct S *p, int i)
+{
+ return p->c
+ + p->d[1]
+ + p->d[i];
+}
+
+__attribute__((noinline, noclone)) long long
+f4 (long long *p)
+{
+ return *p;
+}
+
+int
+main ()
+{
+ f1 (&v.u.b, &v.u.c, &v.u.a, &v.u.d);
+ if (f2 (&v.u.e) + f3 (&v.u.e, 4) + f4 (&v.u.f.b) != 0)
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-output "\.c:(14|15):\[0-9]*: \[^\n\r]*load of misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" } */
+/* { dg-output "\.c:16:\[0-9]*: \[^\n\r]*load of misaligned address 0x\[0-9a-fA-F]* for type 'long long int', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:(13|16):\[0-9]*: \[^\n\r]*store to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" } */
+/* { dg-output "\.c:23:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:(29|30):\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:30:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:31:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:37:\[0-9]*: \[^\n\r]*load of misaligned address 0x\[0-9a-fA-F]* for type 'long long int', which requires \[48] byte alignment" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-3.c b/gcc/testsuite/c-c++-common/ubsan/align-3.c
new file mode 100644
index 00000000000..bbacc4299c6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-3.c
@@ -0,0 +1,66 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=undefined" } */
+
+int c;
+
+__attribute__((noinline, noclone)) void
+f1 (int *a, char *b)
+{
+ __builtin_memcpy (a, b, sizeof (*a));
+}
+
+__attribute__((noinline, noclone)) void
+f2 (int *a, char *b)
+{
+ __builtin_memcpy (b, a, sizeof (*a));
+}
+
+__attribute__((noinline, noclone)) void
+f3 (char *b)
+{
+ __builtin_memcpy (&c, b, sizeof (c));
+}
+
+__attribute__((noinline, noclone)) void
+f4 (char *b)
+{
+ __builtin_memcpy (b, &c, sizeof (c));
+}
+
+struct T
+{
+ char a;
+ short b;
+ int c;
+ long d;
+ long long e;
+ short f;
+ float g;
+ double h;
+ long double i;
+} __attribute__((packed));
+
+__attribute__((noinline, noclone)) int
+f5 (struct T *p)
+{
+ return p->a + p->b + p->c + p->d + p->e + p->f + p->g + p->h + p->i;
+}
+
+int
+main ()
+{
+ struct S { int a; char b[sizeof (int) + 1]; } s;
+ s.a = 6;
+ f2 (&s.a, &s.b[1]);
+ f1 (&s.a, &s.b[1]);
+ c = s.a + 1;
+ f4 (&s.b[1]);
+ f3 (&s.b[1]);
+ if (c != 7 || s.a != 6)
+ __builtin_abort ();
+ struct U { long long a; long double b; char c; struct T d; } u;
+ __builtin_memset (&u, 0, sizeof (u));
+ if (f5 (&u.d) != 0)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-4.c b/gcc/testsuite/c-c++-common/ubsan/align-4.c
new file mode 100644
index 00000000000..3252595d330
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-4.c
@@ -0,0 +1,14 @@
+/* Limit this to known non-strict alignment targets. */
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-options "-fsanitize=null,alignment" } */
+
+#include "align-2.c"
+
+/* { dg-output "\.c:(14|15):\[0-9]*: \[^\n\r]*load of misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" } */
+/* { dg-output "\[^\n\r]*\.c:16:\[0-9]*: \[^\n\r]*load of misaligned address 0x\[0-9a-fA-F]* for type 'long long int', which requires \[48] byte alignment.*" } */
+/* { dg-output "\[^\n\r]*\.c:(13|16):\[0-9]*: \[^\n\r]*store to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" } */
+/* { dg-output "\[^\n\r]*\.c:23:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\[^\n\r]*\.c:(29|30):\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\[^\n\r]*\.c:30:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\[^\n\r]*\.c:31:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\[^\n\r]*\.c:37:\[0-9]*: \[^\n\r]*load of misaligned address 0x\[0-9a-fA-F]* for type 'long long int', which requires \[48] byte alignment" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-5.c b/gcc/testsuite/c-c++-common/ubsan/align-5.c
new file mode 100644
index 00000000000..b94e167bb67
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-5.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fno-sanitize=null -fsanitize=alignment -O2" } */
+/* Check that when optimizing if we know the alignment is right
+ and we are not doing -fsanitize=null instrumentation we don't
+ instrument the alignment check. */
+
+__attribute__((noinline, noclone)) int
+foo (char *p)
+{
+ p = (char *) __builtin_assume_aligned (p, __alignof__(int));
+ int *q = (int *) p;
+ return *q;
+}
+
+/* { dg-final { scan-assembler-not "__ubsan_handle" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-6.c b/gcc/testsuite/c-c++-common/ubsan/align-6.c
new file mode 100644
index 00000000000..3364746fb27
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-6.c
@@ -0,0 +1,33 @@
+/* Limit this to known non-strict alignment targets. */
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-options "-O -fsanitize=alignment -fsanitize-recover=alignment" } */
+
+struct S { int a; char b; long long c; short d[10]; };
+struct T { char a; long long b; };
+struct U { char a; int b; int c; long long d; struct S e; struct T f; } __attribute__((packed));
+struct V { long long a; struct S b; struct T c; struct U u; } v;
+
+__attribute__((noinline, noclone)) int
+foo (struct S *p)
+{
+ volatile int i;
+ i = p->a;
+ i = p->a;
+ i = p->a;
+ i = p->a;
+ return p->a;
+}
+
+int
+main ()
+{
+ if (foo (&v.u.e))
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-output "\.c:14:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:15:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:16:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:17:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:18:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-7.c b/gcc/testsuite/c-c++-common/ubsan/align-7.c
new file mode 100644
index 00000000000..b441ef627db
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-7.c
@@ -0,0 +1,33 @@
+/* Limit this to known non-strict alignment targets. */
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-options "-O -fsanitize=alignment -fno-sanitize-recover=alignment -fdump-tree-sanopt-details" } */
+/* { dg-skip-if "" { *-*-* } { "-flto -fno-fat-lto-objects" } } */
+/* { dg-shouldfail "ubsan" } */
+
+struct S { int a; char b; long long c; short d[10]; };
+struct T { char a; long long b; };
+struct U { char a; int b; int c; long long d; struct S e; struct T f; } __attribute__((packed));
+struct V { long long a; struct S b; struct T c; struct U u; } v;
+
+__attribute__((noinline, noclone)) int
+foo (struct S *p)
+{
+ volatile int i;
+ i = p->a;
+ i = p->a;
+ i = p->a;
+ i = p->a;
+ return p->a;
+}
+
+int
+main ()
+{
+ if (foo (&v.u.e))
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-output "\.c:16:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct S', which requires \[48] byte alignment" } */
+/* { dg-final { scan-tree-dump-times "Optimizing" 4 "sanopt"} } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-8.c b/gcc/testsuite/c-c++-common/ubsan/align-8.c
new file mode 100644
index 00000000000..4e43a09ff3a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-8.c
@@ -0,0 +1,32 @@
+/* Limit this to known non-strict alignment targets. */
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-options "-O -fsanitize=alignment -fsanitize-undefined-trap-on-error -fdump-tree-sanopt-details" } */
+/* { dg-skip-if "" { *-*-* } { "-flto -fno-fat-lto-objects" } } */
+/* { dg-shouldfail "ubsan" } */
+
+struct S { int a; char b; long long c; short d[10]; };
+struct T { char a; long long b; };
+struct U { char a; int b; int c; long long d; struct S e; struct T f; } __attribute__((packed));
+struct V { long long a; struct S b; struct T c; struct U u; } v;
+
+__attribute__((noinline, noclone)) int
+foo (struct S *p)
+{
+ volatile int i;
+ i = p->a;
+ i = p->a;
+ i = p->a;
+ i = p->a;
+ return p->a;
+}
+
+int
+main ()
+{
+ if (foo (&v.u.e))
+ __builtin_abort ();
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "Optimizing" 4 "sanopt"} } */
+/* { dg-final { cleanup-tree-dump "sanopt" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/align-9.c b/gcc/testsuite/c-c++-common/ubsan/align-9.c
new file mode 100644
index 00000000000..24cba94f277
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/align-9.c
@@ -0,0 +1,21 @@
+/* Limit this to known non-strict alignment targets. */
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-options "-O2 -fsanitize=alignment -fsanitize-recover=alignment" } */
+
+__attribute__((noinline, noclone)) void
+foo (void *p, const void *q)
+{
+ *(long int *) p = *(const long int *) q;
+}
+
+int
+main ()
+{
+ struct S { long c; char f[64]; char d; char e[2 * sizeof (long)]; char g[64]; } s;
+ __builtin_memset (&s, '\0', sizeof s);
+ foo (&s.e[0], &s.e[sizeof (long)]);
+ return 0;
+}
+
+/* { dg-output "\.c:8:\[0-9]*: \[^\n\r]*load of misaligned address 0x\[0-9a-fA-F]* for type 'const long int', which requires \[48] byte alignment.*" } */
+/* { dg-output "\.c:8:\[0-9]*: \[^\n\r]*store to misaligned address 0x\[0-9a-fA-F]* for type 'long int', which requires \[48] byte alignment" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/attrib-2.c b/gcc/testsuite/c-c++-common/ubsan/attrib-2.c
new file mode 100644
index 00000000000..71f2e58ea67
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/attrib-2.c
@@ -0,0 +1,71 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow" } */
+
+/* Test that we don't instrument functions marked with
+ no_sanitize_undefined attribute. */
+
+#ifndef __cplusplus
+#define bool _Bool
+#endif
+enum A { B = -3, C = 2 } a;
+bool b;
+
+__attribute__((no_sanitize_undefined))
+static void
+vla_bound (void)
+{
+ int i = -1;
+ volatile int a[i];
+}
+
+__attribute__((no_sanitize_undefined))
+static void
+si_overflow (void)
+{
+ int x = 123, y = 267;
+ volatile int z1 = x + y;
+ volatile int z2 = x - y;
+ volatile int z3 = x * y;
+ volatile int z4 = x / y;
+}
+
+__attribute__((no_sanitize_undefined))
+static void
+null (int *p)
+{
+ *p = 42;
+}
+
+__attribute__((no_sanitize_undefined))
+static void
+retrn (int *p)
+{
+ *p = 42;
+}
+
+__attribute__((no_sanitize_undefined))
+static enum A
+bool_enum (bool *p)
+{
+ *p = b;
+ return a;
+}
+
+__attribute__((no_sanitize_undefined))
+static void
+float_zero (void)
+{
+ volatile float a = 4.2f, b = 0.0f, c;
+ c = a / b;
+}
+
+__attribute__((no_sanitize_undefined))
+static void
+float_cast (void)
+{
+ volatile double d = 300;
+ volatile signed char c;
+ c = d;
+}
+
+/* { dg-final { scan-assembler-not "__ubsan_handle" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/attrib-3.c b/gcc/testsuite/c-c++-common/ubsan/attrib-3.c
new file mode 100644
index 00000000000..3aaf49d85a5
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/attrib-3.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined" } */
+
+/* Test that we don't instrument functions marked with
+ no_sanitize_undefined attribute. */
+
+__attribute__((no_sanitize_undefined, returns_nonnull))
+char *
+foo (char *x)
+{
+ return x;
+}
+
+__attribute__((nonnull)) void bar (char *, int, char *);
+
+__attribute__((no_sanitize_undefined))
+void
+baz (char *x, int y, char *z)
+{
+ bar (x, y, z);
+}
+
+/* { dg-final { scan-assembler-not "__ubsan_handle" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/attrib-4.c b/gcc/testsuite/c-c++-common/ubsan/attrib-4.c
new file mode 100644
index 00000000000..ba0f00cfb7f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/attrib-4.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined" } */
+
+/* Test that we don't instrument functions marked with
+ no_sanitize_undefined attribute. */
+
+struct S { int a[16]; };
+
+__attribute__((no_sanitize_undefined)) long long
+foo (int *a, long long *b, struct S *c)
+{
+ return a[1] + *b + c->a[a[0]];
+}
+
+/* { dg-final { scan-assembler-not "__ubsan_handle" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-1.c b/gcc/testsuite/c-c++-common/ubsan/bounds-1.c
new file mode 100644
index 00000000000..5014f6f1f7e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-1.c
@@ -0,0 +1,81 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds -fno-sanitize-recover=bounds -Wall" } */
+
+/* Don't fail on valid uses. */
+
+struct S { int a[10]; };
+struct T { int l; int a[]; };
+struct U { int l; int a[0]; };
+struct V { int l; int a[1]; };
+
+__attribute__ ((noinline, noclone))
+void
+fn_p (int p)
+{
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn_a (volatile int a[])
+{
+ /* This is not instrumented. */
+ a[4] = 5;
+}
+
+__attribute__ ((noinline, noclone))
+int
+foo_i (int i)
+{
+ int a[5] = { };
+ int k = i ? a[i] : i;
+ return k;
+}
+
+int
+main (void)
+{
+ volatile int a[5];
+ a[4] = 1;
+ a[2] = a[3];
+ fn_p (a[4]);
+ fn_a (a);
+
+ int i = 4;
+ a[i] = 1;
+ a[2] = a[i];
+ fn_p (a[i]);
+ foo_i (i);
+
+ const int n = 5;
+ volatile int b[n];
+ b[4] = 1;
+ b[2] = b[3];
+ fn_p (b[4]);
+ fn_a (b);
+
+ volatile int c[n][n][n];
+ c[2][2][2] = 2;
+ i = c[4][4][4];
+
+ volatile struct S s;
+ s.a[9] = 1;
+ i = s.a[9];
+
+ /* Don't instrument flexible array members. */
+ struct T *t = (struct T *) __builtin_malloc (sizeof (struct T) + 10);
+ t->a[1] = 1;
+
+ /* Don't instrument zero-sized arrays (GNU extension). */
+ struct U *u = (struct U *) __builtin_malloc (sizeof (struct U) + 10);
+ u->a[1] = 1;
+
+ /* Don't instrument last array in a struct. */
+ struct V *v = (struct V *) __builtin_malloc (sizeof (struct V) + 10);
+ v->a[1] = 1;
+
+ long int *d[10][5];
+ d[9][0] = (long int *) 0;
+ d[8][3] = d[9][0];
+
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-10.c b/gcc/testsuite/c-c++-common/ubsan/bounds-10.c
new file mode 100644
index 00000000000..a6187b543e8
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-10.c
@@ -0,0 +1,16 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds-strict" } */
+
+struct V { int l; int a[1]; };
+
+int
+main (void)
+{
+ /* For strict, do instrument last array in a struct. */
+ struct V *v = (struct V *) __builtin_malloc (sizeof (struct V) + 10);
+ v->a[1] = 1;
+
+ return 0;
+}
+
+/* { dg-output "index 1 out of bounds for type 'int \\\[1\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-11.c b/gcc/testsuite/c-c++-common/ubsan/bounds-11.c
new file mode 100644
index 00000000000..c3e0f220dc4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-11.c
@@ -0,0 +1,23 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds" } */
+
+struct S
+{
+ unsigned long a[1];
+ int l;
+};
+
+static inline unsigned long
+fn (const struct S *s, int i)
+{
+ return s->a[i] / i;
+}
+
+int
+main ()
+{
+ struct S s;
+ fn (&s, 1);
+}
+
+/* { dg-output "index 1 out of bounds for type 'long unsigned int \\\[1\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-12.c b/gcc/testsuite/c-c++-common/ubsan/bounds-12.c
new file mode 100644
index 00000000000..3cd3a4abb49
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-12.c
@@ -0,0 +1,23 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds" } */
+
+struct S
+{
+ unsigned long a[1];
+ int l;
+};
+
+static inline unsigned long
+fn (const struct S *s, int i)
+{
+ return s->a[i] << i;
+}
+
+int
+main ()
+{
+ struct S s;
+ fn (&s, 1);
+}
+
+/* { dg-output "index 1 out of bounds for type 'long unsigned int \\\[1\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-2.c b/gcc/testsuite/c-c++-common/ubsan/bounds-2.c
new file mode 100644
index 00000000000..10642c7a16e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-2.c
@@ -0,0 +1,167 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds -Wall -Wextra -Wno-unused -Wno-array-bounds" } */
+
+/* Test runtime errors. */
+
+struct S { int a[10]; };
+struct T { int a[5]; int s[2]; };
+
+int
+foo_5 (void)
+{
+ return 5;
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn_p (int p)
+{
+ (void) p;
+}
+
+static void __attribute__ ((noinline, noclone))
+fn1 (void)
+{
+ volatile int a[5];
+ asm ("" : : "r" (&a) : "memory");
+ a[2] = a[5];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn2 (void)
+{
+ volatile int a[5];
+ volatile int j;
+ int i = 5;
+ int *p = &i;
+ asm ("" : : "r" (&a) : "memory");
+ j = a[*p];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn3 (void)
+{
+ volatile int a[5];
+ fn_p (a[5]);
+}
+
+static void __attribute__ ((noinline, noclone))
+fn4 (void)
+{
+ struct T t;
+ asm ("" : : "r" (&t.a) : "memory");
+ t.a[foo_5 ()] = 1;
+}
+
+static void __attribute__ ((noinline, noclone))
+fn5 (void)
+{
+ int i = 5;
+ volatile int a[i];
+ asm ("" : : "r" (&a) : "memory");
+ a[2] = a[i];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn6 (void)
+{
+ int i = 5;
+ volatile int a[i];
+ volatile int j;
+ fn_p (a[i]);
+ asm ("" : : "r" (&a) : "memory");
+ j = a[foo_5 ()];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn7 (void)
+{
+ int n = 5;
+ volatile int i;
+ volatile int c[n][n][n];
+ asm ("" : : "r" (&c[5]) : "memory");
+ i = c[5][2][2];
+ asm ("" : : "r" (&c[2]) : "memory");
+ i = c[2][5][2];
+ asm ("" : : "r" (&c[2]) : "memory");
+ i = c[2][2][5];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn8 (void)
+{
+ volatile int i;
+ volatile struct S s;
+ asm ("" : : "r" (&s.a) : "memory");
+ i = s.a[10];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn9 (void)
+{
+ long int *volatile d[10][5];
+ asm ("" : : "r" (&d[10]) : "memory");
+ d[8][3] = d[10][0];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn10 (void)
+{
+ /* Beware of side-effects. */
+ volatile int x = 10;
+ volatile int e[20];
+ e[x++] = 3;
+ if (x != 11)
+ __builtin_abort ();
+ e[x--] = 3;
+ if (x != 10)
+ __builtin_abort ();
+}
+
+static void __attribute__ ((noinline, noclone))
+fn11 (void)
+{
+ char ***volatile f[5];
+ asm ("" : : "r" (&f) : "memory");
+ f[2] = f[5];
+}
+
+static void __attribute__ ((noinline, noclone))
+fn12 (int i)
+{
+ volatile int a[5] = { };
+ int k = i ? a[i] : i;
+}
+
+int
+main (void)
+{
+ fn1 ();
+ fn2 ();
+ fn3 ();
+ fn4 ();
+ fn5 ();
+ fn6 ();
+ fn7 ();
+ fn8 ();
+ fn9 ();
+ fn10 ();
+ fn11 ();
+ fn12 (5);
+ return 0;
+}
+
+/* { dg-output "index 5 out of bounds for type 'int \\\[5\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[5\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[5\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[5\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[\\\*\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[\\\*\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[\\\*\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[\\\*\\\]\\\[\\\*\\\]\\\[\\\*\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[\\\*\\\]\\\[\\\*\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[\\\*\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 10 out of bounds for type 'int \\\[10\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 10 out of bounds for type 'long int \\\*\\\[10\\\]\\\[5\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'char \\\*\\\*\\\*\\\[5\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 5 out of bounds for type 'int \\\[5\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-3.c b/gcc/testsuite/c-c++-common/ubsan/bounds-3.c
new file mode 100644
index 00000000000..fcf71a3bf8a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-3.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=bounds -Wall -Wextra" } */
+
+/* Do not generate invalid diagnostics. */
+
+extern const int a[10];
+extern int bar (int);
+void
+foo (int i, int j)
+{
+ bar (a[i] >> j);
+ bar ((unsigned long) a[i] >> j);
+ bar ((short int) (unsigned long) a[i] >> j);
+ bar (j >> a[i]);
+ bar (j >> (unsigned long) a[i]);
+ bar (j >> (short int) (unsigned long) a[i]);
+ bar (a[i] / j);
+ bar ((unsigned long) a[i] / j);
+ bar ((short int) (unsigned long) a[i] / j);
+ bar (j / a[i]);
+ bar (j / (unsigned long) a[i]);
+ bar (j / (short int) (unsigned long) a[i]);
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-4.c b/gcc/testsuite/c-c++-common/ubsan/bounds-4.c
new file mode 100644
index 00000000000..7748780884c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-4.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=bounds -Wall -Wextra -Wno-unused" } */
+
+/* Initializers of TREE_STATICs aren't instrumented.
+ But don't ICE on 'em. */
+
+int A[2];
+int *gp = &A[4];
+int *gpi;
+
+int
+main (void)
+{
+ gpi = &A[4];
+ static int *pi = &A[4];
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-5.c b/gcc/testsuite/c-c++-common/ubsan/bounds-5.c
new file mode 100644
index 00000000000..c82287a561c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-5.c
@@ -0,0 +1,113 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds -Wall -Wextra -Wno-unused -Wno-array-bounds" } */
+
+/* Test flexible array member-like arrays. Normal flexible array members
+ are tested in bounds-1.c. Test non-strict mode. */
+
+__attribute__ ((noinline, noclone))
+void
+fn1 (void)
+{
+ volatile struct S { char a[1]; char b; } s;
+ volatile int i;
+ asm ("" : : "r" (&s.a) : "memory");
+ i = s.a[0]; // OK
+ asm ("" : : "r" (&s.a) : "memory");
+ i = s.a[1]; // error
+ volatile struct S *p = &s;
+ asm ("" : : "r" (&p->a) : "memory");
+ i = p->a[0]; // OK
+ asm ("" : : "r" (&p->a) : "memory");
+ i = p->a[1]; // error
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn2 (void)
+{
+ struct S { int c; char d[4]; };
+ volatile struct T { int e; struct S f; int g; } t;
+ volatile int i;
+ asm ("" : : "r" (&t.f.d) : "memory");
+ i = t.f.d[3]; // OK
+ asm ("" : : "r" (&t.f.d) : "memory");
+ i = t.f.d[4]; // error
+ volatile struct T *p = &t;
+ asm ("" : : "r" (&p->f.d) : "memory");
+ i = p->f.d[3]; // OK
+ asm ("" : : "r" (&p->f.d) : "memory");
+ i = p->f.d[4]; // error
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn3 (void)
+{
+ volatile struct S { char b; char a[1]; } s;
+ volatile int i;
+ asm ("" : : "r" (&s.a) : "memory");
+ i = s.a[0]; // OK
+ asm ("" : : "r" (&s.a) : "memory");
+ i = s.a[1]; // error
+ volatile struct S *p = &s;
+ asm ("" : : "r" (&p->a) : "memory");
+ i = p->a[0]; // OK
+ asm ("" : : "r" (&p->a) : "memory");
+ i = p->a[1]; // error in strict mode
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn4 (void)
+{
+ volatile struct S { char b; char a[1]; } s;
+ volatile struct T { struct S s; int i; } t;
+ volatile int i;
+ asm ("" : : "r" (&t.s.a) : "memory");
+ i = t.s.a[0]; // OK
+ asm ("" : : "r" (&t.s.a) : "memory");
+ i = t.s.a[1]; // error
+ volatile struct T *pt = &t;
+ asm ("" : : "r" (&pt->s.a) : "memory");
+ i = pt->s.a[0]; // OK
+ asm ("" : : "r" (&pt->s.a) : "memory");
+ i = pt->s.a[1]; // error
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn5 (void)
+{
+ volatile struct S { char b; char a[1]; } s;
+ volatile struct U { int a; struct S s; } u;
+ volatile int i;
+ asm ("" : : "r" (&u.s.a) : "memory");
+ i = u.s.a[0]; // OK
+ asm ("" : : "r" (&u.s.a) : "memory");
+ i = u.s.a[1]; // error
+ volatile struct U *pu = &u;
+ asm ("" : : "r" (&pu->s.a) : "memory");
+ i = pu->s.a[0]; // OK
+ asm ("" : : "r" (&pu->s.a) : "memory");
+ i = pu->s.a[1]; // error in strict mode
+}
+
+int
+main (void)
+{
+ fn1 ();
+ fn2 ();
+ fn3 ();
+ fn4 ();
+ fn5 ();
+ return 0;
+}
+
+/* { dg-output "index 1 out of bounds for type 'char \\\[1\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 1 out of bounds for type 'char \\\[1\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 4 out of bounds for type 'char \\\[4\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 4 out of bounds for type 'char \\\[4\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 1 out of bounds for type 'char \\\[1\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 1 out of bounds for type 'char \\\[1\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 1 out of bounds for type 'char \\\[1\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 1 out of bounds for type 'char \\\[1\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-6.c b/gcc/testsuite/c-c++-common/ubsan/bounds-6.c
new file mode 100644
index 00000000000..aef2055fb3c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-6.c
@@ -0,0 +1,37 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds -Wall -Wextra" } */
+
+/* Test off-by-one. */
+
+struct S { int a; int b; } s[4], *t;
+struct U { int a[10]; } u[4], *v;
+volatile int *a, *b, *c;
+volatile void *d;
+volatile int e[4][4];
+
+int
+main (void)
+{
+ t = &s[4]; // OK
+ a = &s[4].a; // Error
+ b = &s[4].b; // Error
+ d = &e[4]; // OK
+ c = &e[4][0]; // Error
+ c = &e[3][4]; // OK
+ c = &e[3][3]; // OK
+
+ a = &u[4].a[9]; // Error
+ a = &u[4].a[10]; // Error
+ a = &u[3].a[9]; // OK
+ a = &u[3].a[10]; // OK
+ a = &u[3].a[11]; // Error
+
+ return 0;
+}
+
+/* { dg-output "index 4 out of bounds for type 'S \\\[4\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 4 out of bounds for type 'S \\\[4\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 4 out of bounds for type 'int \\\[4\\\]\\\[4\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 4 out of bounds for type 'U \\\[4\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 4 out of bounds for type 'U \\\[4\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 11 out of bounds for type 'int \\\[10\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-7.c b/gcc/testsuite/c-c++-common/ubsan/bounds-7.c
new file mode 100644
index 00000000000..e08a3fd80ec
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-7.c
@@ -0,0 +1,54 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds" } */
+
+/* Test negative bounds. */
+
+struct S { int a[10]; };
+
+__attribute__ ((noinline, noclone))
+void
+fn1 (void)
+{
+ volatile int i;
+ int m = -1;
+ volatile int a[7];
+ asm ("" : : "r" (&a) : "memory");
+ i = a[-1];
+ asm ("" : : "r" (&a) : "memory");
+ i = a[m];
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn2 (void)
+{
+ volatile int i;
+ int m = 7;
+ volatile int a[m];
+ asm ("" : : "r" (&a) : "memory");
+ i = a[-1];
+}
+
+__attribute__ ((noinline, noclone))
+void
+fn3 (void)
+{
+ volatile int i;
+ volatile struct S s;
+ asm ("" : : "r" (&s.a) : "memory");
+ i = s.a[-1];
+}
+
+int
+main (void)
+{
+ fn1 ();
+ fn2 ();
+ fn3 ();
+ return 0;
+}
+
+/* { dg-output "index -1 out of bounds for type 'int \\\[7\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index -1 out of bounds for type 'int \\\[7\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index -1 out of bounds for type 'int \\\[\\\*\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index -1 out of bounds for type 'int \\\[10\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-8.c b/gcc/testsuite/c-c++-common/ubsan/bounds-8.c
new file mode 100644
index 00000000000..8ab946b41ea
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-8.c
@@ -0,0 +1,14 @@
+/* PR sanitizer/65280 */
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds" } */
+
+int
+main (void)
+{
+ int *t = (int *) __builtin_malloc (sizeof (int) * 10);
+ int (*a)[1] = (int (*)[1]) t;
+ (*a)[2] = 1;
+ return 0;
+}
+
+/* { dg-output "index 2 out of bounds for type 'int \\\[1\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/bounds-9.c b/gcc/testsuite/c-c++-common/ubsan/bounds-9.c
new file mode 100644
index 00000000000..eab2ede7220
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/bounds-9.c
@@ -0,0 +1,24 @@
+/* PR sanitizer/65280 */
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds" } */
+/* Origin: Martin Uecker <uecker@eecs.berkeley.edu> */
+
+void
+foo (volatile int (*a)[3])
+{
+ (*a)[3] = 1; // error
+ a[0][0] = 1; // ok
+ a[1][0] = 1; // ok
+ a[1][4] = 1; // error
+}
+
+int
+main ()
+{
+ volatile int a[20];
+ foo ((volatile int (*)[3]) &a);
+ return 0;
+}
+
+/* { dg-output "index 3 out of bounds for type 'int \\\[3\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*index 4 out of bounds for type 'int \\\[3\\\]'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-1.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-1.c
index ec391e40be2..479ced038fb 100644
--- a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-1.c
@@ -17,8 +17,8 @@ main (void)
return 0;
}
-/* { dg-output "division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero" } */
+/* { dg-output "division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-2.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-2.c
index c8820fa9466..d1eb95f4362 100644
--- a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-2.c
@@ -16,8 +16,8 @@ main (void)
return 0;
}
-/* { dg-output "division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division by zero" } */
+/* { dg-output "division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c
index 399071ee7aa..266423aa49f 100644
--- a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-3.c
@@ -16,6 +16,6 @@ main (void)
return 0;
}
-/* { dg-output "division of -2147483648 by -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type 'int'" } */
+/* { dg-output "division of -2147483648 by -1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division of -2147483648 by -1 cannot be represented in type 'int'\[^\n\r]*" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c
index 7a28bacd14b..bb391c5b36d 100644
--- a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-5.c
@@ -1,4 +1,4 @@
-/* { dg-do compile} */
+/* { dg-do compile } */
/* { dg-options "-fsanitize=integer-divide-by-zero" } */
void
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-6.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-6.c
new file mode 100644
index 00000000000..27a18bb096e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-6.c
@@ -0,0 +1,49 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=integer-divide-by-zero -Wno-div-by-zero" } */
+
+#include <stdio.h>
+
+int x;
+
+__attribute__((noinline, noclone))
+void
+barrier (void)
+{
+ asm volatile ("" : : : "memory");
+ if (x)
+ __builtin_exit (1);
+}
+
+int
+main (void)
+{
+ volatile int a = 0;
+ volatile long long int b = 0;
+ volatile unsigned int c = 1;
+
+ barrier (); fputs ("1st\n", stderr); barrier ();
+ a / b;
+ barrier (); fputs ("2nd\n", stderr); barrier ();
+ 0 / 0;
+ barrier (); fputs ("3rd\n", stderr); barrier ();
+ a / 0;
+ barrier (); fputs ("4th\n", stderr); barrier ();
+ 0 / b;
+ barrier (); fputs ("5th\n", stderr); barrier ();
+ 2 / --c;
+ barrier (); fputs ("6th\n", stderr); barrier ();
+
+ return 0;
+}
+
+/* { dg-output "1st(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "2nd(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "3rd(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "4th(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "5th(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "6th" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/div-by-zero-7.c b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-7.c
new file mode 100644
index 00000000000..5f53bef74ea
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/div-by-zero-7.c
@@ -0,0 +1,41 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=integer-divide-by-zero -Wno-div-by-zero -fno-sanitize-recover=integer-divide-by-zero" } */
+/* { dg-shouldfail "ubsan" } */
+
+#include <stdio.h>
+
+int x;
+
+__attribute__((noinline, noclone))
+void
+barrier (void)
+{
+ asm volatile ("" : : : "memory");
+ if (++x == 3)
+ __builtin_exit (0);
+}
+
+int
+main (void)
+{
+ volatile int a = 0;
+ volatile long long int b = 0;
+ volatile unsigned int c = 1;
+
+ barrier (); fputs ("1st\n", stderr); barrier ();
+ a / b;
+ barrier (); fputs ("2nd\n", stderr); barrier ();
+ 0 / 0;
+ barrier (); fputs ("3rd\n", stderr); barrier ();
+ a / 0;
+ barrier (); fputs ("4th\n", stderr); barrier ();
+ 0 / b;
+ barrier (); fputs ("5th\n", stderr); barrier ();
+ 2 / --c;
+ barrier (); fputs ("6th\n", stderr); barrier ();
+
+ return 0;
+}
+
+/* { dg-output "1st(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-1.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-1.c
new file mode 100644
index 00000000000..cd6941c9d30
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-1.c
@@ -0,0 +1,204 @@
+/* { dg-do run { target { lp64 || ilp32 } } } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+/* { dg-additional-options "-msse2 -mfpmath=sse" { target { sse2_runtime && ia32 } } } */
+
+#include <limits.h>
+#include "float-cast.h"
+
+int
+main (void)
+{
+ const double inf = __builtin_inf ();
+ const double nan = __builtin_nan ("");
+ volatile double d;
+
+ volatile signed char sc;
+ d = SCHAR_MIN;
+ CHECK_BOUNDARY (sc, d);
+ d = 0.0;
+ CHECK_BOUNDARY (sc, d);
+ d = SCHAR_MAX;
+ CHECK_BOUNDARY (sc, d);
+ CHECK_NONNUMBERS (sc);
+
+ volatile unsigned char uc;
+ d = UCHAR_MAX;
+ CHECK_BOUNDARY (uc, d);
+ d = 0.0;
+ CHECK_BOUNDARY (uc, d);
+ CHECK_NONNUMBERS (uc);
+
+ volatile short int s;
+ d = SHRT_MIN;
+ CHECK_BOUNDARY (s, d);
+ d = 0.0;
+ CHECK_BOUNDARY (s, d);
+ d = SHRT_MAX;
+ CHECK_BOUNDARY (s, d);
+ CHECK_NONNUMBERS (s);
+
+ volatile unsigned short int us;
+ d = USHRT_MAX;
+ CHECK_BOUNDARY (us, d);
+ d = 0.0;
+ CHECK_BOUNDARY (us, d);
+ CHECK_NONNUMBERS (us);
+
+ volatile int i;
+ d = INT_MIN;
+ CHECK_BOUNDARY (i, d);
+ d = 0.0;
+ CHECK_BOUNDARY (i, d);
+ d = INT_MAX;
+ CHECK_BOUNDARY (i, d);
+ CHECK_NONNUMBERS (i);
+
+ volatile unsigned int u;
+ d = UINT_MAX;
+ CHECK_BOUNDARY (u, d);
+ d = 0.0;
+ CHECK_BOUNDARY (u, d);
+ CHECK_NONNUMBERS (u);
+
+ volatile long l;
+ /* 64-bit vs 32-bit longs matter causes too much of a headache. */
+ d = 0.0;
+ CHECK_BOUNDARY (l, d);
+ CHECK_NONNUMBERS (l);
+
+ volatile unsigned long ul;
+ d = 0.0;
+ CHECK_BOUNDARY (ul, d);
+ CHECK_NONNUMBERS (ul);
+
+ volatile long long ll;
+ d = LLONG_MIN;
+ CHECK_BOUNDARY (ll, d);
+ d = 0.0;
+ CHECK_BOUNDARY (ll, d);
+ d = LLONG_MAX;
+ CHECK_BOUNDARY (ll, d);
+ CHECK_NONNUMBERS (ll);
+
+ volatile unsigned long long ull;
+ d = ULLONG_MAX;
+ CHECK_BOUNDARY (ull, d);
+ d = 0.0;
+ CHECK_BOUNDARY (ull, d);
+ CHECK_NONNUMBERS (ull);
+
+ return 0;
+}
+
+/* { dg-output "value -133 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -129.5 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 128 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 128.5 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 132 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 256 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 256.5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 260 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -32773 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -32769.5 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 32768 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 32768.5 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 32772 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 65536 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 65536.5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 65540 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long long unsigned int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-10.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-10.c
new file mode 100644
index 00000000000..a54a838870b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-10.c
@@ -0,0 +1,46 @@
+/* { dg-do run { target dfp } } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=float-cast-overflow -fsanitize-recover=float-cast-overflow" } */
+/* { dg-additional-options "-DUSE_INT128" { target int128 } } */
+/* FIXME: When _DecimalXX <-> {signed, unsigned} __int128 conversions are
+ supported, -DBROKEN_DECIMAL_INT128 can be removed. */
+/* { dg-additional-options "-DUSE_DFP -DBROKEN_DECIMAL_INT128" } */
+
+#include "float-cast-overflow-8.c"
+
+/* _Decimal32 */
+/* { dg-output "value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* _Decimal64 */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* _Decimal128 */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-2.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-2.c
new file mode 100644
index 00000000000..b25e312b61b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-2.c
@@ -0,0 +1,73 @@
+/* { dg-do run } */
+/* { dg-require-effective-target int128 } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+#include "float-cast.h"
+
+int
+main (void)
+{
+ const double inf = __builtin_inf ();
+ const double nan = __builtin_nan ("");
+ volatile double d;
+
+ __int128 i;
+ d = INT128_MIN;
+ CHECK_BOUNDARY (i, d);
+ d = 0.0;
+ CHECK_BOUNDARY (i, d);
+ d = INT128_MAX;
+ CHECK_BOUNDARY (i, d);
+ CHECK_NONNUMBERS (i);
+
+ unsigned __int128 u;
+ d = UINT128_MAX;
+ CHECK_BOUNDARY (u, d);
+ d = 0.0;
+ CHECK_BOUNDARY (u, d);
+ CHECK_NONNUMBERS (u);
+
+ return 0;
+}
+
+/* { dg-output "runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value nan is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value -?nan is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value inf is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value -inf is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value -5 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value -1.5 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value nan is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value -?nan is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value inf is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: value -inf is outside the range of representable values of type '__int128 unsigned'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-3.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-3.c
new file mode 100644
index 00000000000..ba82111a4df
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-3.c
@@ -0,0 +1,40 @@
+/* { dg-do run { target { lp64 || ilp32 } } } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+#include <limits.h>
+#include "float-cast.h"
+
+int
+main (void)
+{
+ volatile float f;
+
+ volatile signed char s;
+ f = SCHAR_MIN;
+ CHECK_BOUNDARY (s, f);
+ f = 0.0;
+ CHECK_BOUNDARY (s, f);
+ f = SCHAR_MAX;
+ CHECK_BOUNDARY (s, f);
+
+ volatile unsigned char u;
+ f = UCHAR_MAX;
+ CHECK_BOUNDARY (u, f);
+ f = 0.0;
+ CHECK_BOUNDARY (u, f);
+
+ return 0;
+}
+
+/* { dg-output "value -133* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -129.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 128 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 128.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 132 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 256 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 256.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 260 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-4.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-4.c
new file mode 100644
index 00000000000..af76e4a3343
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-4.c
@@ -0,0 +1,52 @@
+/* { dg-do run { target { lp64 } } } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+#include <limits.h>
+#include "float-cast.h"
+
+int
+main (void)
+{
+ const long double inf = __builtin_infl ();
+ const long double nan = __builtin_nanl ("");
+ volatile long double ld;
+
+ volatile int i;
+ ld = INT_MIN;
+ CHECK_BOUNDARY (i, ld);
+ ld = 0.0l;
+ CHECK_BOUNDARY (i, ld);
+ ld = INT_MAX;
+ CHECK_BOUNDARY (i, ld);
+ CHECK_NONNUMBERS (i);
+
+ volatile unsigned int u;
+ ld = UINT_MAX;
+ CHECK_BOUNDARY (u, ld);
+ ld = 0.0l;
+ CHECK_BOUNDARY (u, ld);
+ CHECK_NONNUMBERS (u);
+
+ return 0;
+}
+
+/* { dg-output "value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-5.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-5.c
new file mode 100644
index 00000000000..4c2fbb4d9ea
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-5.c
@@ -0,0 +1,40 @@
+/* { dg-do run { target i?86-*-* x86_64-*-* ia64-*-* } } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+#include <limits.h>
+#include "float-cast.h"
+
+int
+main (void)
+{
+ volatile __float128 f;
+
+ volatile signed char s;
+ f = SCHAR_MIN;
+ CHECK_BOUNDARY (s, f);
+ f = 0.0q;
+ CHECK_BOUNDARY (s, f);
+ f = SCHAR_MAX;
+ CHECK_BOUNDARY (s, f);
+
+ volatile unsigned char u;
+ f = UCHAR_MAX;
+ CHECK_BOUNDARY (u, f);
+ f = 0.0q;
+ CHECK_BOUNDARY (u, f);
+
+ return 0;
+}
+
+/* { dg-output "value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-6.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-6.c
new file mode 100644
index 00000000000..73b8b95b554
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-6.c
@@ -0,0 +1,40 @@
+/* { dg-do run { target { { x86_64-*-* ia64-*-* } && { ! { ia32 } } } } } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+#include <limits.h>
+#include "float-cast.h"
+
+int
+main (void)
+{
+ volatile __float80 f;
+
+ volatile signed char s;
+ f = SCHAR_MIN;
+ CHECK_BOUNDARY (s, f);
+ f = 0.0w;
+ CHECK_BOUNDARY (s, f);
+ f = SCHAR_MAX;
+ CHECK_BOUNDARY (s, f);
+
+ volatile unsigned char u;
+ f = UCHAR_MAX;
+ CHECK_BOUNDARY (u, f);
+ f = 0.0w;
+ CHECK_BOUNDARY (u, f);
+
+ return 0;
+}
+
+/* { dg-output "value -133 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -129.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 128 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 128.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 132 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 256 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 256.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 260 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.c
new file mode 100644
index 00000000000..69d46284b5e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.c
@@ -0,0 +1,196 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=float-cast-overflow -fno-sanitize-recover=float-cast-overflow" } */
+/* FIXME: When _DecimalXX <-> {signed, unsigned} __int128 conversions are
+ supported, -DBROKEN_DECIMAL_INT128 can be removed. */
+/* { dg-additional-options "-DUSE_DFP -DBROKEN_DECIMAL_INT128" { target dfp } } */
+
+#define USE_FLT_DBL_LDBL
+#ifdef __SIZEOF_INT128__
+#define USE_INT128
+#endif
+#ifdef __SIZEOF_FLOAT80__
+#define USE_FLOAT80
+#endif
+#ifdef __SIZEOF_FLOAT128__
+#define USE_FLOAT128
+#endif
+
+#include "float-cast-overflow-7.h"
+
+#define TEST(type1, type2) \
+ if (cvt_##type1##_##type2 (-0.5f) != 0) abort (); \
+ if (cvt_##type1##_##type2 (0.5f) != 0) abort (); \
+ if (cvt_##type1##_##type2 (-0.75f) != 0) abort (); \
+ if (cvt_##type1##_##type2 (0.75f) != 0) abort (); \
+ if (type1##_MIN) \
+ { \
+ /* For RADIX 2 type1##_MIN should be always */ \
+ /* exactly representable in type2. */ \
+ if (type2##_RADIX == 2 \
+ || type1##_MAX <= type2##_MAX) \
+ { \
+ if (cvt_##type1##_##type2 (type1##_MIN) \
+ != type1##_MIN) abort (); \
+ volatile type2 tem = ((type2) -0.75f) + type1##_MIN; \
+ volatile type2 tem2 = ((type2) -1.0f) + type1##_MIN; \
+ if (tem != tem2 \
+ && cvt_##type1##_##type2 ((type2) -0.75f \
+ + type1##_MIN) \
+ != type1##_MIN) abort (); \
+ } \
+ else \
+ { \
+ type2 min = type1##_MIN; \
+ /* tem could be below minimum here due to */ \
+ /* rounding. */ \
+ MAXT add = 1; \
+ while (add) \
+ { \
+ volatile type2 tem = type1##_MIN + (type1) add; \
+ if (tem != min) \
+ break; \
+ MAXT newadd = add * type2##_RADIX; \
+ if (newadd < add || newadd > type1##_MAX) \
+ add = 0; \
+ else \
+ add = newadd; \
+ } \
+ if (add) \
+ { \
+ MAXT newadd \
+ = (-(type1##_MIN + (type1) add)) % add; \
+ volatile type2 tem = type1##_MIN + (type1) newadd;\
+ volatile type2 tem2 = type1##_MIN + (type1) add; \
+ if (tem == tem2) \
+ add = newadd; \
+ else \
+ { \
+ newadd += add; \
+ if (newadd < add || newadd > type1##_MAX) \
+ add = 0; \
+ else \
+ { \
+ tem = type1##_MIN + (type1) newadd; \
+ if (tem == tem2) \
+ add = newadd; \
+ else \
+ add = 0; \
+ } \
+ } \
+ } \
+ if (add \
+ && cvt_##type1##_##type2 (type1##_MIN \
+ + (type1) add) \
+ != type1##_MIN + (type1) add) abort (); \
+ } \
+ } \
+ if (type1##_MAX <= type2##_MAX) \
+ { \
+ if (cvt_##type1##_##type2 (type1##_MAX) != type1##_MAX) \
+ abort (); \
+ volatile type2 tem = ((type2) 0.75f) + type1##_MAX; \
+ volatile type2 tem2 = ((type2) 1.0f) + type1##_MAX; \
+ if (tem < tem2 \
+ && cvt_##type1##_##type2 ((type2) 0.75f + type1##_MAX)\
+ != type1##_MAX) abort (); \
+ } \
+ else \
+ { \
+ type2 max = type1##_MAX; \
+ /* tem could be above maximum here due to rounding. */ \
+ MAXT sub = 1; \
+ while (sub) \
+ { \
+ volatile type2 tem = type1##_MAX - sub; \
+ if (tem != max) \
+ break; \
+ MAXT newsub = sub * type2##_RADIX; \
+ if (newsub < sub || newsub > type1##_MAX) \
+ sub = 0; \
+ else \
+ sub = newsub; \
+ } \
+ if (sub) \
+ { \
+ MAXT newsub = ((type1##_MAX - sub) % sub); \
+ volatile type2 tem = type1##_MAX - newsub; \
+ volatile type2 tem2 = type1##_MAX - sub; \
+ if (tem == tem2) \
+ sub = newsub; \
+ else \
+ { \
+ newsub += sub; \
+ if (newsub < sub || newsub > type1##_MAX) \
+ sub = 0; \
+ else \
+ { \
+ tem = type1##_MAX - newsub; \
+ if (tem == tem2) \
+ sub = newsub; \
+ else \
+ sub = 0; \
+ } \
+ } \
+ } \
+ if (sub \
+ && cvt_##type1##_##type2 (type1##_MAX - sub) \
+ != type1##_MAX - sub) abort (); \
+ }
+
+
+#ifdef si128_MAX
+# define TESTS128(type2) TEST (si128, type2) TEST (ui128, type2)
+#else
+# define TESTS128(type2)
+#endif
+
+#define TESTS(type2) \
+ TEST (sc, type2) TEST (c, type2) TEST (uc, type2) \
+ TEST (ss, type2) TEST (us, type2) \
+ TEST (si, type2) TEST (ui, type2) \
+ TEST (sl, type2) TEST (ul, type2) \
+ TEST (sll, type2) TEST (ull, type2) \
+ TESTS128 (type2)
+
+int
+main ()
+{
+#ifdef f_MAX
+ TESTS (f)
+#endif
+#ifdef d_MAX
+ TESTS (d)
+#endif
+#ifdef ld_MAX
+ TESTS (ld)
+#endif
+#ifdef f80_MAX
+ TESTS (f80)
+#endif
+#ifdef f128_MAX
+ TESTS (f128)
+#endif
+#ifdef BROKEN_DECIMAL_INT128
+# undef TESTS128
+# define TESTS128(type2)
+# undef TWO
+# undef M1U
+# undef MAXS
+# undef MAXT
+# define TWO 2ULL
+# define M1U -1ULL
+# define MAXS (__CHAR_BIT__ * __SIZEOF_LONG_LONG__)
+# define MAXT unsigned long long
+#endif
+#ifdef d32_MAX
+ TESTS (d32)
+#endif
+#ifdef d64_MAX
+ TESTS (d64)
+#endif
+#ifdef d128_MAX
+ TESTS (d128)
+#endif
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.h b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.h
new file mode 100644
index 00000000000..b839a6cb52f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-7.h
@@ -0,0 +1,156 @@
+#define CVTFN(type1, type2) \
+__attribute__((noinline)) type1 \
+cvt_##type1##_##type2 (type2 x) \
+{ \
+ return x; \
+}
+
+typedef signed char sc;
+#define sc_MIN (-__SCHAR_MAX__ - 1)
+#define sc_MAX __SCHAR_MAX__
+typedef unsigned char uc;
+#define uc_MIN 0
+#define uc_MAX (2U * __SCHAR_MAX__ + 1U)
+typedef char c;
+#define c_MIN ((((char) -1) > (char) 0) ? uc_MIN : sc_MIN)
+#define c_MAX ((((char) -1) > (char) 0) ? uc_MAX : sc_MAX)
+typedef signed short ss;
+#define ss_MIN (-__SHRT_MAX__ - 1)
+#define ss_MAX __SHRT_MAX__
+typedef unsigned short us;
+#define us_MIN 0
+#define us_MAX (2U * __SHRT_MAX__ + 1U)
+typedef signed int si;
+#define si_MIN (-__INT_MAX__ - 1)
+#define si_MAX __INT_MAX__
+typedef unsigned int ui;
+#define ui_MIN 0
+#define ui_MAX (2U * __INT_MAX__ + 1U)
+typedef signed long sl;
+#define sl_MIN (-__LONG_MAX__ - 1L)
+#define sl_MAX __LONG_MAX__
+typedef unsigned long ul;
+#define ul_MIN 0L
+#define ul_MAX (2UL * __LONG_MAX__ + 1UL)
+typedef signed long long sll;
+#define sll_MIN (-__LONG_LONG_MAX__ - 1LL)
+#define sll_MAX __LONG_LONG_MAX__
+typedef unsigned long long ull;
+#define ull_MIN 0LL
+#define ull_MAX (2ULL * __LONG_LONG_MAX__ + 1ULL)
+#ifdef USE_INT128
+typedef signed __int128 si128;
+# define si128_MAX \
+ ((signed __int128) ((((unsigned __int128) 1) \
+ << (__CHAR_BIT__ * __SIZEOF_INT128__ - 1)) - 1))
+# define si128_MIN (-si128_MAX - 1)
+typedef unsigned __int128 ui128;
+#define ui128_MIN ((unsigned __int128) 0)
+#define ui128_MAX (((unsigned __int128) 2) * si128_MAX + 1)
+#endif
+
+#ifdef si128_MAX
+# define CVTS128(type2) CVTFN (si128, type2) CVTFN (ui128, type2)
+#else
+# define CVTS128(type2)
+#endif
+
+#define CVTS(type2) \
+ CVTFN (sc, type2) CVTFN (c, type2) CVTFN (uc, type2) \
+ CVTFN (ss, type2) CVTFN (us, type2) \
+ CVTFN (si, type2) CVTFN (ui, type2) \
+ CVTFN (sl, type2) CVTFN (ul, type2) \
+ CVTFN (sll, type2) CVTFN (ull, type2) \
+ CVTS128 (type2)
+
+#ifdef __SIZEOF_INT128__
+# define TWO ((unsigned __int128) 2)
+# define M1U ((unsigned __int128) -1)
+# define MAXS (__CHAR_BIT__ * __SIZEOF_INT128__)
+# define MAXT unsigned __int128
+#else
+# define TWO 2ULL
+# define M1U -1ULL
+# define MAXS (__CHAR_BIT__ * __SIZEOF_LONG_LONG__)
+# define MAXT unsigned long long
+#endif
+
+#ifdef USE_FLT_DBL_LDBL
+typedef float f;
+#define f_RADIX 2
+#define f_MANT_DIG __FLT_MANT_DIG__
+#define f_MAX ((TWO << (f_MANT_DIG - 1)) - 1)
+typedef double d;
+#define d_RADIX 2
+#define d_MANT_DIG __DBL_MANT_DIG__
+#define d_MAX ((TWO << (d_MANT_DIG - 1)) - 1)
+typedef long double ld;
+#define ld_RADIX 2
+#define ld_MANT_DIG __LDBL_MANT_DIG__
+#define ld_MAX \
+ (ld_MANT_DIG > MAXS ? M1U : (TWO << (ld_MANT_DIG > MAXS \
+ ? 0 : ld_MANT_DIG - 1)) - 1)
+CVTS (f)
+CVTS (d)
+CVTS (ld)
+#endif
+#ifdef USE_FLOAT80
+typedef __float80 f80;
+# define f80_RADIX 2
+# define f80_MANT_DIG 64
+# define f80_MAX ((TWO << (f80_MANT_DIG - 1)) - 1)
+CVTS (f80)
+#endif
+#ifdef USE_FLOAT128
+typedef __float128 f128;
+# define f128_RADIX 2
+# define f128_MANT_DIG 113
+# define f128_MAX \
+ (f128_MANT_DIG > MAXS ? M1U : (TWO << (f128_MANT_DIG > MAXS \
+ ? 0 : f128_MANT_DIG - 1)) - 1)
+CVTS (f128)
+#endif
+#ifdef USE_DFP
+# ifdef __cplusplus
+typedef float _Decimal32 __attribute__((mode(SD)));
+typedef float _Decimal64 __attribute__((mode(DD)));
+typedef float _Decimal128 __attribute__((mode(TD)));
+# endif
+typedef _Decimal32 d32;
+# define d32_RADIX 10
+# define d32_MANT_DIG __DEC32_MANT_DIG__
+# if d32_MANT_DIG == 7
+# define d32_MAX 9999999ULL
+# endif
+typedef _Decimal64 d64;
+# define d64_RADIX 10
+# define d64_MANT_DIG __DEC64_MANT_DIG__
+# if d64_MANT_DIG == 16
+# define d64_MAX 9999999999999999ULL
+# endif
+typedef _Decimal128 d128;
+# define d128_RADIX 10
+# define d128_MANT_DIG __DEC128_MANT_DIG__
+# if d128_MANT_DIG == 34
+# ifdef __SIZEOF_INT128__
+/* #define d128_MAX 0x1ed09bead87c0378d8e63ffffffff */
+# define d128_MAX \
+ ((((unsigned __int128) 0x1ed09bead87c0) << 64) + 0x378d8e63ffffffffULL)
+# else
+# define d128_MAX M1U
+# endif
+# endif
+# ifdef BROKEN_DECIMAL_INT128
+# undef CVTS128
+# define CVTS128(type2)
+# endif
+CVTS (d32)
+CVTS (d64)
+CVTS (d128)
+#endif
+
+extern
+#ifdef __cplusplus
+"C"
+#endif
+void abort ();
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-8.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-8.c
new file mode 100644
index 00000000000..4adb22ae3b4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-8.c
@@ -0,0 +1,143 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=float-cast-overflow -DUSE_FLT_DBL_LDBL" } */
+/* { dg-additional-options "-DUSE_INT128" { target int128 } } */
+
+#include "float-cast-overflow-7.h"
+
+#define TEST(type1, type2) \
+ if (type1##_MIN) \
+ { \
+ type2 min = type1##_MIN; \
+ type2 add = -1.0; \
+ while (1) \
+ { \
+ volatile type2 tem = min + add; \
+ if (tem != min) \
+ { \
+ volatile type1 tem3 = cvt_##type1##_##type2 (tem);\
+ break; \
+ } \
+ add = add * type2##_RADIX; \
+ if (min == add) \
+ break; \
+ } \
+ } \
+ else \
+ { \
+ volatile type1 tem3 = cvt_##type1##_##type2 (-1.0f); \
+ } \
+ { \
+ type2 max = type1##_MAX; \
+ type2 add = 1.0; \
+ while (1) \
+ { \
+ volatile type2 tem = max + add; \
+ if (tem != max) \
+ { \
+ volatile type1 tem3 = cvt_##type1##_##type2 (tem); \
+ break; \
+ } \
+ add = add * type2##_RADIX; \
+ if (max == add) \
+ break; \
+ } \
+ }
+
+#ifdef si128_MAX
+# define TESTS128(type2) TEST (si128, type2) TEST (ui128, type2)
+#else
+# define TESTS128(type2)
+#endif
+
+#define TESTS(type2) \
+ TEST (sc, type2) TEST (c, type2) TEST (uc, type2) \
+ TEST (ss, type2) TEST (us, type2) \
+ TEST (si, type2) TEST (ui, type2) \
+ TEST (sl, type2) TEST (ul, type2) \
+ TEST (sll, type2) TEST (ull, type2) \
+ TESTS128 (type2)
+
+int
+main ()
+{
+#ifdef f_MAX
+ TESTS (f)
+#endif
+#ifdef d_MAX
+ TESTS (d)
+#endif
+#ifdef ld_MAX
+ TESTS (ld)
+#endif
+#ifdef f80_MAX
+ TESTS (f80)
+#endif
+#ifdef f128_MAX
+ TESTS (f128)
+#endif
+#ifdef BROKEN_DECIMAL_INT128
+# undef TESTS128
+# define TESTS128(type2)
+# undef TWO
+# undef M1U
+# undef MAXS
+# define TWO 2ULL
+# define M1U -1ULL
+# define MAXS (__CHAR_BIT__ * __SIZEOF_LONG_LONG__)
+#endif
+#ifdef d32_MAX
+ TESTS (d32)
+#endif
+#ifdef d64_MAX
+ TESTS (d64)
+#endif
+#ifdef d128_MAX
+ TESTS (d128)
+#endif
+ return 0;
+}
+
+/* float */
+/* { dg-output "value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* No error for float and __int128 unsigned max value, as ui128_MAX is +Inf in float. */
+/* double */
+/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* long double */
+/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-9.c b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-9.c
new file mode 100644
index 00000000000..f2d71f6a533
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast-overflow-9.c
@@ -0,0 +1,35 @@
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=float-cast-overflow -fsanitize-recover=float-cast-overflow -DUSE_FLOAT80 -DUSE_FLOAT128" } */
+/* { dg-additional-options "-DUSE_INT128" { target int128 } } */
+
+#include "float-cast-overflow-8.c"
+
+/* __float80 */
+/* { dg-output "value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
+/* __float128 */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
+/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-cast.h b/gcc/testsuite/c-c++-common/ubsan/float-cast.h
new file mode 100644
index 00000000000..e76171a8f9d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-cast.h
@@ -0,0 +1,39 @@
+/* Various macros for -fsanitize=float-cast-overflow testing. */
+
+/* E.g. on CentOS 5 these aren't defined in limits.h. */
+#ifndef LLONG_MAX
+# define LLONG_MAX __LONG_LONG_MAX__
+#endif
+#ifndef LLONG_MIN
+# define LLONG_MIN (-__LONG_LONG_MAX__ - 1LL)
+#endif
+#ifndef ULLONG_MAX
+# define ULLONG_MAX (__LONG_LONG_MAX__ * 2ULL + 1ULL)
+#endif
+
+#define INT128_MAX (__int128) (((unsigned __int128) 1 << ((__SIZEOF_INT128__ * __CHAR_BIT__) - 1)) - 1)
+#define INT128_MIN (-INT128_MAX - 1)
+#define UINT128_MAX ((2 * (unsigned __int128) INT128_MAX) + 1)
+
+#define CHECK_BOUNDARY(VAR, VAL) \
+ (VAR) = (VAL) - 5.0; \
+ (VAR) = (VAL) - 1.5; \
+ (VAR) = (VAL) - 1.0; \
+ (VAR) = (VAL) - 0.75; \
+ (VAR) = (VAL) - 0.5; \
+ (VAR) = (VAL) - 0.0000001; \
+ (VAR) = (VAL) - 0.0; \
+ (VAR) = (VAL); \
+ (VAR) = (VAL) + 0.0; \
+ (VAR) = (VAL) + 0.0000001; \
+ (VAR) = (VAL) + 0.5; \
+ (VAR) = (VAL) + 0.75; \
+ (VAR) = (VAL) + 1.0; \
+ (VAR) = (VAL) + 1.5; \
+ (VAR) = (VAL) + 5.0;
+
+#define CHECK_NONNUMBERS(VAR) \
+ (VAR) = nan; \
+ (VAR) = -nan; \
+ (VAR) = inf; \
+ (VAR) = -inf;
diff --git a/gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c b/gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c
new file mode 100644
index 00000000000..2271ea9b776
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/float-div-by-zero-1.c
@@ -0,0 +1,26 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=float-divide-by-zero" } */
+
+int
+main (void)
+{
+ volatile float a = 1.3f;
+ volatile double b = 0.0;
+ volatile int c = 4;
+ volatile float res;
+
+ res = a / b;
+ res = a / 0.0;
+ res = 2.7f / b;
+ res = 3.6 / (b = 0.0, b);
+ res = c / b;
+ res = b / c;
+
+ return 0;
+}
+
+/* { dg-output "division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*division by zero\[^\n\r]*" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/load-bool-enum.c b/gcc/testsuite/c-c++-common/ubsan/load-bool-enum.c
index 96f7984f86a..c7f0683c505 100644
--- a/gcc/testsuite/c-c++-common/ubsan/load-bool-enum.c
+++ b/gcc/testsuite/c-c++-common/ubsan/load-bool-enum.c
@@ -10,8 +10,8 @@ bool b;
__attribute__((noinline, noclone)) enum A
foo (bool *p)
{
- *p = b; /* { dg-output "load-bool-enum.c:13:\[^\n\r]*runtime error: load of value 4, which is not a valid value for type '(_B|b)ool'(\n|\r\n|\r)*" } */
- return a; /* { dg-output "\[^\n\r]*load-bool-enum.c:14:\[^\n\r]*runtime error: load of value 9, which is not a valid value for type 'A'(\n|\r\n|\r)*" { target c++ } } */
+ *p = b; /* { dg-output "load-bool-enum.c:13:\[^\n\r]*runtime error: \[^\n\r]*load of value 4, which is not a valid value for type '(_B|b)ool'\[^\n\r]*(\n|\r\n|\r)*" } */
+ return a; /* { dg-output "\[^\n\r]*load-bool-enum.c:14:\[^\n\r]*runtime error: \[^\n\r]*load of value 9, which is not a valid value for type 'A'" { target c++ } } */
}
int
diff --git a/gcc/testsuite/c-c++-common/ubsan/nonnull-1.c b/gcc/testsuite/c-c++-common/ubsan/nonnull-1.c
new file mode 100644
index 00000000000..d3063ca4a6f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/nonnull-1.c
@@ -0,0 +1,38 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=nonnull-attribute,returns-nonnull-attribute" } */
+
+int q, r;
+void *a, *b, *c = (void *) &q, *d, *e, *f = (void *) &q, *g, *h;
+
+__attribute__((returns_nonnull, nonnull (1, 3)))
+void *
+foo (void *p, void *q, void *r)
+{
+ a = p;
+ b = r;
+ return q;
+}
+
+int
+bar (const void *a, const void *b)
+{
+ int c = *(const int *) a;
+ int d = *(const int *) b;
+ return c - d;
+}
+
+int
+main ()
+{
+ asm volatile ("" : : : "memory");
+ d = foo (c, b, c);
+ e = foo (e, c, f);
+ g = foo (c, f, g);
+ __builtin_memset (d, '\0', q);
+ return 0;
+}
+
+/* { dg-output "\.c:13:\[0-9]*:\[^\n\r]*null pointer returned from function declared to never return null\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\.c:29:\[0-9]*:\[^\n\r]*null pointer passed as argument 1, which is declared to never be null\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\.c:30:\[0-9]*:\[^\n\r]*null pointer passed as argument 3, which is declared to never be null\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\.c:31:\[0-9]*:\[^\n\r]*null pointer passed as argument 1, which is declared to never be null" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/nonnull-2.c b/gcc/testsuite/c-c++-common/ubsan/nonnull-2.c
new file mode 100644
index 00000000000..3eb6ae71bec
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/nonnull-2.c
@@ -0,0 +1,36 @@
+/* { dg-do run } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=undefined" } */
+
+int q, r;
+void *a, *b, *c = (void *) &q, *d, *e, *f = (void *) &q, *g, *h;
+
+__attribute__((returns_nonnull, nonnull (1, 3)))
+void *
+foo (void *p, void *q, void *r)
+{
+ a = p;
+ b = r;
+ return q;
+}
+
+int
+bar (const void *a, const void *b)
+{
+ int c = *(const int *) a;
+ int d = *(const int *) b;
+ return c - d;
+}
+
+int
+main ()
+{
+ asm volatile ("" : : : "memory");
+ d = foo (c, b, c);
+ e = foo (e, c, f);
+ g = foo (c, f, g);
+ __builtin_memset (d, '\0', q);
+ return 0;
+}
+
+/* { dg-output "\.c:14:\[0-9]*:\[^\n\r]*null pointer returned from function declared to never return null" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/nonnull-3.c b/gcc/testsuite/c-c++-common/ubsan/nonnull-3.c
new file mode 100644
index 00000000000..67fd6dde2e4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/nonnull-3.c
@@ -0,0 +1,36 @@
+/* { dg-do run } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=undefined" } */
+
+int q, r;
+void *a, *b, *c = (void *) &q, *d, *e, *f = (void *) &q, *g, *h;
+
+__attribute__((returns_nonnull, nonnull (1, 3)))
+void *
+foo (void *p, void *q, void *r)
+{
+ a = p;
+ b = r;
+ return q;
+}
+
+int
+bar (const void *a, const void *b)
+{
+ int c = *(const int *) a;
+ int d = *(const int *) b;
+ return c - d;
+}
+
+int
+main ()
+{
+ asm volatile ("" : : : "memory");
+ d = foo (c, (void *) &r, c);
+ e = foo (e, c, f);
+ g = foo (c, f, g);
+ __builtin_memset (d, '\0', q);
+ return 0;
+}
+
+/* { dg-output "\.c:30:\[0-9]*:\[^\n\r]*null pointer passed as argument 1, which is declared to never be null" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/nonnull-4.c b/gcc/testsuite/c-c++-common/ubsan/nonnull-4.c
new file mode 100644
index 00000000000..b49c72e345c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/nonnull-4.c
@@ -0,0 +1,34 @@
+/* { dg-do run } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-options "-fsanitize=undefined -fsanitize-undefined-trap-on-error" } */
+
+int q, r;
+void *a, *b, *c = (void *) &q, *d, *e, *f = (void *) &q, *g, *h;
+
+__attribute__((returns_nonnull, nonnull (1, 3)))
+void *
+foo (void *p, void *q, void *r)
+{
+ a = p;
+ b = r;
+ return q;
+}
+
+int
+bar (const void *a, const void *b)
+{
+ int c = *(const int *) a;
+ int d = *(const int *) b;
+ return c - d;
+}
+
+int
+main ()
+{
+ asm volatile ("" : : : "memory");
+ d = foo (c, b, c);
+ e = foo (e, c, f);
+ g = foo (c, f, g);
+ __builtin_memset (d, '\0', q);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/nonnull-5.c b/gcc/testsuite/c-c++-common/ubsan/nonnull-5.c
new file mode 100644
index 00000000000..fefbdc3b4ca
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/nonnull-5.c
@@ -0,0 +1,34 @@
+/* { dg-do run } */
+/* { dg-shouldfail "ubsan" } */
+/* { dg-options "-fsanitize=undefined -fsanitize-undefined-trap-on-error" } */
+
+int q, r;
+void *a, *b, *c = (void *) &q, *d, *e, *f = (void *) &q, *g, *h;
+
+__attribute__((returns_nonnull, nonnull (1, 3)))
+void *
+foo (void *p, void *q, void *r)
+{
+ a = p;
+ b = r;
+ return q;
+}
+
+int
+bar (const void *a, const void *b)
+{
+ int c = *(const int *) a;
+ int d = *(const int *) b;
+ return c - d;
+}
+
+int
+main ()
+{
+ asm volatile ("" : : : "memory");
+ d = foo (c, (void *) &r, c);
+ e = foo (e, c, f);
+ g = foo (c, f, g);
+ __builtin_memset (d, '\0', q);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-1.c b/gcc/testsuite/c-c++-common/ubsan/null-1.c
index 08d547f3aae..26dc22c6dd9 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-1.c
@@ -9,4 +9,4 @@ main (void)
return *p;
}
-/* { dg-output "load of null pointer of type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-10.c b/gcc/testsuite/c-c++-common/ubsan/null-10.c
index c3d61a01d5b..2ff948565d5 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-10.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-10.c
@@ -10,4 +10,4 @@ main (void)
return 0;
}
-/* { dg-output "load of null pointer of type 'short int'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'short int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-11.c b/gcc/testsuite/c-c++-common/ubsan/null-11.c
index 6645f2acfcb..659a5f2a07e 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-11.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-11.c
@@ -13,4 +13,4 @@ main (void)
return (*s)->i;
}
-/* { dg-output "load of null pointer of type 'struct S \\*'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'struct S \\*'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-2.c b/gcc/testsuite/c-c++-common/ubsan/null-2.c
index cb3907fa5cc..de9d5190869 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-2.c
@@ -9,4 +9,4 @@ main (void)
return ***ppp;
}
-/* { dg-output "load of null pointer of type 'int \\*\\*'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'int \\*\\*'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-3.c b/gcc/testsuite/c-c++-common/ubsan/null-3.c
index f58562c63b8..e9e8b7bd213 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-3.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-3.c
@@ -15,4 +15,4 @@ main (void)
return foo (*p);
}
-/* { dg-output "load of null pointer of type 'int \\*'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'int \\*'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-4.c b/gcc/testsuite/c-c++-common/ubsan/null-4.c
index 18506afb3c3..09018164fc5 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-4.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-4.c
@@ -11,4 +11,4 @@ main (void)
return 0;
}
-/* { dg-output "load of null pointer of type 'complex double'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'complex double'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-5.c b/gcc/testsuite/c-c++-common/ubsan/null-5.c
index c3c45b7c47e..fb2d49381cd 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-5.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-5.c
@@ -13,4 +13,4 @@ main (void)
return 0;
}
-/* { dg-output "load of null pointer of type 'volatile const complex float'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'volatile const complex float'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-6.c b/gcc/testsuite/c-c++-common/ubsan/null-6.c
index 63fade5a6e1..12397936354 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-6.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-6.c
@@ -10,4 +10,4 @@ main (void)
return 0;
}
-/* { dg-output "store to null pointer of type 'long unsigned int'(\n|\r\n|\r)" } */
+/* { dg-output "store to null pointer of type 'long unsigned int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-7.c b/gcc/testsuite/c-c++-common/ubsan/null-7.c
index bf30a0b9b39..19c21da4562 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-7.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-7.c
@@ -14,4 +14,4 @@ main (void)
return *gao ();
}
-/* { dg-output "load of null pointer of type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "load of null pointer of type 'int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-8.c b/gcc/testsuite/c-c++-common/ubsan/null-8.c
index 170377ee4d4..73bef75c377 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-8.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-8.c
@@ -13,4 +13,4 @@ main (void)
return s->i;
}
-/* { dg-output "member access within null pointer of type 'struct S'(\n|\r\n|\r)" } */
+/* { dg-output "member access within null pointer of type 'struct S'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/null-9.c b/gcc/testsuite/c-c++-common/ubsan/null-9.c
index ab43d22a325..0b993cfaa76 100644
--- a/gcc/testsuite/c-c++-common/ubsan/null-9.c
+++ b/gcc/testsuite/c-c++-common/ubsan/null-9.c
@@ -13,4 +13,4 @@ main (void)
return u->i;
}
-/* { dg-output "member access within null pointer of type 'union U'(\n|\r\n|\r)" } */
+/* { dg-output "member access within null pointer of type 'union U'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-1.c b/gcc/testsuite/c-c++-common/ubsan/object-size-1.c
new file mode 100644
index 00000000000..f4ea8bb46e7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-1.c
@@ -0,0 +1,125 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=undefined" } */
+
+/* Sanity-test -fsanitize=object-size. We use -fsanitize=undefined option
+ to check that this feature doesn't clash with -fsanitize=bounds et al. */
+
+#define N 20
+
+__attribute__((noinline, noclone)) void
+f1 (int i)
+{
+ volatile int j;
+ char *p, *orig;
+ orig = p = (char *) __builtin_calloc (N, 1);
+ j = *(p + i);
+ j = p[i];
+ p++;
+ j = p[i - 1];
+ j = *(p + i - 1);
+ __builtin_free (orig);
+}
+
+/* { dg-output "load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+
+__attribute__((noinline, noclone)) void
+f2 (int i)
+{
+ volatile int j;
+ char a[N];
+ __builtin_memset (a, 0, N);
+ j = *(a + i);
+ char *p = a;
+ j = *(p + i);
+ j = p[i];
+ p += 10;
+ j = *(p + i - 10);
+ j = p[i - 10];
+}
+
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+
+__attribute__((noinline, noclone)) void
+f3 (int i)
+{
+ volatile int j;
+ int *p = (int *) __builtin_calloc (N, sizeof (*p));
+ int *o = &p[i];
+ j = *o;
+ j = o[0];
+ __builtin_free (p);
+}
+
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+
+__attribute__((noinline, noclone)) void
+f4 (void)
+{
+ /* The second argument to __builtin_calloc is intentional. */
+ int *p = (int *) __builtin_calloc (3, 1);
+ *p = 42;
+ __builtin_free (p);
+}
+
+/* { dg-output "\[^\n\r]*store to address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
+
+__attribute__((noinline, noclone)) void
+f5 (int *p)
+{
+ /* This is not instrumented. But don't ICE, etc. */
+ volatile int i = p[N];
+}
+
+int
+main ()
+{
+ f1 (N);
+ f2 (N);
+ f3 (N);
+ f4 ();
+ int *p = (int *) __builtin_calloc (N, sizeof (*p));
+ f5 (p);
+ __builtin_free (p);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-10.c b/gcc/testsuite/c-c++-common/ubsan/object-size-10.c
new file mode 100644
index 00000000000..53e719de0cd
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-10.c
@@ -0,0 +1,79 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=undefined" } */
+
+static char a[128] __attribute__ ((aligned(4096)));
+static int b[128] __attribute__ ((aligned(4096)));
+
+__attribute__ ((noinline, noclone)) int
+fn1 (int i)
+{
+ asm ("");
+ return a[i & 127];
+}
+
+__attribute__ ((noinline, noclone)) int
+fn2 (int i)
+{
+ asm ("");
+ return a[i & 128];
+}
+
+/* { dg-output "index 128 out of bounds for type 'char \\\[128\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+
+__attribute__ ((noinline, noclone)) int
+fn3 (int i)
+{
+ asm ("");
+ return b[i & 127];
+}
+
+__attribute__ ((noinline, noclone)) int
+fn4 (int i)
+{
+ asm ("");
+ return b[i & 128];
+}
+
+/* { dg-output "\[^\n\r]*index 128 out of bounds for type 'int \\\[128\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+
+__attribute__ ((noinline, noclone)) int
+fn5 (int i, int j)
+{
+ asm ("");
+ return b[i & j];
+}
+
+/* { dg-output "\[^\n\r]*index 128 out of bounds for type 'int \\\[128\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
+
+__attribute__ ((noinline, noclone)) int
+fn6 (int i)
+{
+ asm ("");
+ return b[i & 0];
+}
+
+int
+main (void)
+{
+ fn1 (128);
+ fn2 (128);
+ fn3 (128);
+ fn4 (128);
+ fn5 (128, 127);
+ fn5 (128, 128);
+ fn6 (128);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-2.c b/gcc/testsuite/c-c++-common/ubsan/object-size-2.c
new file mode 100644
index 00000000000..dba12433c66
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=undefined" } */
+
+void
+foo (unsigned long ul)
+{
+ unsigned int u;
+ u = *(unsigned long *) ul;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-3.c b/gcc/testsuite/c-c++-common/ubsan/object-size-3.c
new file mode 100644
index 00000000000..a88081c02a9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-3.c
@@ -0,0 +1,56 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=object-size -fno-sanitize-recover=object-size" } */
+
+/* Test valid uses. */
+
+#define N 20
+
+__attribute__((noinline, noclone)) void
+f1 (int i)
+{
+ volatile int j;
+ char *p, *orig;
+ orig = p = (char *) __builtin_calloc (N, 1);
+ j = *(p + i);
+ j = p[i];
+ p++;
+ j = p[i - 1];
+ j = *(p + i - 1);
+ __builtin_free (orig);
+}
+
+__attribute__((noinline, noclone)) void
+f2 (int i)
+{
+ volatile int j;
+ char a[N];
+ __builtin_memset (a, 0, N);
+ j = *(a + i);
+ char *p = a;
+ j = *(p + i);
+ j = p[i];
+ p += 10;
+ j = *(p + i - 10);
+ j = p[i - 10];
+}
+
+__attribute__((noinline, noclone)) void
+f3 (int i)
+{
+ volatile int j;
+ int *p = (int *) __builtin_calloc (N, sizeof (*p));
+ int *o = &p[i];
+ j = *o;
+ j = o[0];
+ __builtin_free (p);
+}
+
+int
+main ()
+{
+ f1 (N - 1);
+ f2 (N - 1);
+ f3 (N - 1);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-4.c b/gcc/testsuite/c-c++-common/ubsan/object-size-4.c
new file mode 100644
index 00000000000..7e55a0fe35d
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-4.c
@@ -0,0 +1,31 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=object-size" } */
+
+/* Test that we instrument flexible array members. */
+
+struct T { int l; int a[]; };
+struct U { int l; int a[0]; };
+
+int
+main (void)
+{
+ volatile int i;
+ struct T *t = (struct T *) __builtin_calloc (sizeof (struct T)
+ + sizeof (int), 1);
+ i = t->a[1];
+
+ struct U *u = (struct U *) __builtin_calloc (sizeof (struct U)
+ + sizeof (int), 1);
+ i = u->a[1];
+ return 0;
+}
+
+/* { dg-output "load of address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-5.c b/gcc/testsuite/c-c++-common/ubsan/object-size-5.c
new file mode 100644
index 00000000000..6d10fdf73b0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-5.c
@@ -0,0 +1,38 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=object-size" } */
+
+/* Test structures with -fsanitize=object-size. */
+
+#define N 20
+
+struct S { char *p; int i; };
+struct T { struct S *s; };
+
+__attribute__((noinline, noclone)) void
+f1 (int i)
+{
+ volatile int j;
+ struct S s;
+ s.p = (char *) __builtin_calloc (N, 1);
+ j = s.p[i];
+ j = *(s.p + i);
+ __builtin_free (s.p);
+}
+
+/* { dg-output "load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
+
+int
+main ()
+{
+ f1 (N);
+ f1 (N - 1);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-6.c b/gcc/testsuite/c-c++-common/ubsan/object-size-6.c
new file mode 100644
index 00000000000..0e6035d0384
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-6.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=object-size" } */
+
+char
+foo (void *v)
+{
+ return *(char *) v;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-7.c b/gcc/testsuite/c-c++-common/ubsan/object-size-7.c
new file mode 100644
index 00000000000..268de64df6e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-7.c
@@ -0,0 +1,29 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=object-size" } */
+
+#define N 20
+
+struct S { int a; };
+
+__attribute__((noinline, noclone)) struct S
+f1 (int i)
+{
+ struct S a[N];
+ struct S *p = a;
+ struct S s;
+ s = p[i];
+ return s;
+}
+
+int
+main ()
+{
+ f1 (N);
+ return 0;
+}
+
+/* { dg-output "load of address \[^\n\r]* with insufficient space for an object of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-8.c b/gcc/testsuite/c-c++-common/ubsan/object-size-8.c
new file mode 100644
index 00000000000..f09a445f378
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-8.c
@@ -0,0 +1,32 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=undefined" } */
+
+struct S { int a; int b; };
+
+static inline __attribute__((always_inline)) int
+foo (struct S *p)
+{
+ volatile int a;
+ a = p->a; /* OK */
+ return p->b;
+}
+
+int
+bar (void)
+{
+ struct S *p = (struct S *) __builtin_calloc (sizeof (int) + sizeof (int) / 2, 1);
+ return foo (p);
+}
+
+int
+main (void)
+{
+ bar ();
+ return 0;
+}
+
+/* { dg-output "load of address \[^\n\r]* with insufficient space for an object of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/object-size-9.c b/gcc/testsuite/c-c++-common/ubsan/object-size-9.c
new file mode 100644
index 00000000000..46f1fb9f9b4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/object-size-9.c
@@ -0,0 +1,97 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=undefined" } */
+
+/* Test PARM_DECLs and RESULT_DECLs. */
+
+struct T { char d[8]; int e; };
+struct T t = { "abcdefg", 1 };
+#ifdef __cplusplus
+struct C { C () : d("abcdefg"), e(1) {} C (const C &x) { __builtin_memcpy (d, x.d, 8); e = x.e; } ~C () {} char d[8]; int e; };
+#endif
+struct U { int a : 5; int b : 19; int c : 8; };
+struct S { struct U d[10]; };
+struct S s __attribute__ ((aligned(4096)));
+
+int
+f1 (struct T x, int i)
+{
+ char *p = x.d;
+ p += i;
+ return *p;
+}
+
+/* { dg-output "load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+
+#ifdef __cplusplus
+static struct C
+f2 (int i)
+{
+ struct C x;
+ x.d[i] = 'z';
+ return x;
+}
+
+/* { dg-output "\[^\n\r]*index 12 out of bounds for type 'char \\\[8\\\]'\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+/* { dg-output "\[^\n\r]*store to address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+
+static struct C
+f3 (int i)
+{
+ struct C x;
+ char *p = x.d;
+ p += i;
+ *p = 'z';
+ return x;
+}
+
+/* { dg-output "\[^\n\r]*store to address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" { target { c++ } } } */
+
+#endif
+
+int
+f4 (int i)
+{
+ return s.d[i].b;
+}
+
+/* { dg-output "\[^\n\r]*index 12 out of bounds for type 'U \\\[10\\\]'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^\[^\n\r]*(\n|\r\n|\r)" } */
+
+int
+f5 (int i)
+{
+ struct U *u = s.d;
+ u += i;
+ return u->b;
+}
+
+/* { dg-output "\[^\n\r]*load of address \[^\n\r]* with insufficient space for an object of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
+
+int
+main (void)
+{
+ f1 (t, 12);
+#ifdef __cplusplus
+ f2 (12);
+ f3 (12);
+#endif
+ f4 (12);
+ f5 (12);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-1.c
index 68199551b49..22bacb3cf2f 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-1.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=signed-integer-overflow" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=signed-integer-overflow -fno-sanitize-recover=signed-integer-overflow" } */
#ifndef ASM1
# define ASM1(a) /* Nothing */
@@ -53,8 +51,6 @@
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
CHECK (FN1 (char, char, +), 23);
CHECK (FN1 (char, char, -), 5);
CHECK (FN1 (char, char, *), 126);
@@ -261,9 +257,5 @@ main (void)
CHECK (FN5 (unsigned long int), -77);
CHECK (FN5 (long long int), -77);
CHECK (FN5 (unsigned long long int), -77);
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c
index fd6c6d325e8..960f1b0afaf 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-add-1.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable -fno-sanitize-recover=signed-integer-overflow" } */
#define SCHAR_MAX __SCHAR_MAX__
#define SHRT_MAX __SHRT_MAX__
@@ -18,8 +16,6 @@ check (int i, int j)
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
#if __INT_MAX__ == 2147483647
/* Here, nothing should fail. */
volatile int j = INT_MAX;
@@ -59,9 +55,5 @@ main (void)
d++;
check (d, -32768);
#endif
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c b/gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c
index 85499d86c26..b104d6158fe 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-add-2.c
@@ -44,17 +44,17 @@ main (void)
return 0;
}
-/* { dg-output "signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: 1 \\+ 2147483647 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: 2147483547 \\+ 1024 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -1 \\+ -2147483648 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 \\+ -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -2147483548 \\+ -1024 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: 1 \\+ \[^\n\r]* cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1024 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -1 \\+ -\[^\n\r]* cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 1 \\+ 2147483647 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483547 \\+ 1024 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -1 \\+ -2147483648 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 \\+ -1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483548 \\+ -1024 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 1 \\+ \[^\n\r]* cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\+ 1024 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -1 \\+ -\[^\n\r]* cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1024 cannot be represented in type 'long int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-add-3.c b/gcc/testsuite/c-c++-common/ubsan/overflow-add-3.c
new file mode 100644
index 00000000000..f4062768369
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-add-3.c
@@ -0,0 +1,17 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable -fno-sanitize-recover=signed-integer-overflow" } */
+/* { dg-shouldfail "ubsan" } */
+
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+
+int
+main (void)
+{
+ volatile int j = INT_MAX;
+ volatile int i = 1;
+ volatile int k = j + i;
+ return 0;
+}
+
+/* { dg-output "signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-add-4.c b/gcc/testsuite/c-c++-common/ubsan/overflow-add-4.c
new file mode 100644
index 00000000000..3f6f6bd0895
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-add-4.c
@@ -0,0 +1,15 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable -fsanitize-undefined-trap-on-error" } */
+/* { dg-shouldfail "ubsan" } */
+
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+
+int
+main (void)
+{
+ volatile int j = INT_MAX;
+ volatile int i = 1;
+ volatile int k = j + i;
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-int128.c b/gcc/testsuite/c-c++-common/ubsan/overflow-int128.c
index 9a850243d3b..400f25b01e1 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-int128.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-int128.c
@@ -41,7 +41,7 @@ main (void)
/* { dg-output "\[^\n\r]*signed integer overflow: 0x7fffffffffffffffffffffffffffff9b \\+ 1024 cannot be represented in type '__int128'(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*signed integer overflow: -1 \\+ 0x80000000000000000000000000000000 cannot be represented in type '__int128'(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*signed integer overflow: 0x80000000000000000000000000000000 \\+ -1 cannot be represented in type '__int128'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: 0x80000000000000000000000000000000 \\+ -1 cannot be represented in type '__int128'(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 0x80000000000000000000000000000000 - 1 cannot be represented in type '__int128'(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*signed integer overflow: 0x80000000000000000000000000000064 \\+ -1024 cannot be represented in type '__int128'(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*signed integer overflow: 0x7fffffffffffffffffffffffffffffff \\* 2 cannot be represented in type '__int128'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of 0x80000000000000000000000000000000 cannot be represented in type '__int128'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of 0x80000000000000000000000000000000 cannot be represented in type '__int128'; cast to an unsigned type to negate this value to itself" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c
index afb1a259676..04a9ec7c27d 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-1.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable -fno-sanitize-recover=signed-integer-overflow" } */
#define SCHAR_MAX __SCHAR_MAX__
#define SHRT_MAX __SHRT_MAX__
@@ -18,8 +16,6 @@ check (int i, int j)
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
/* Test integer promotion. */
#if __SCHAR_MAX__ == 127
volatile signed char a = -2;
@@ -45,9 +41,5 @@ main (void)
o = m * n;
check (o, INT_MIN);
#endif
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c
index ece25a354b8..1ccc3f736b6 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-2.c
@@ -20,7 +20,7 @@ main (void)
return 0;
}
-/* { dg-output "signed integer overflow: 2147483647 \\* 2 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: 2 \\* 2147483647 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\* 2 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
+/* { dg-output "signed integer overflow: 2147483647 \\* 2 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2 \\* 2147483647 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: \[^\n\r]* \\* 2 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*signed integer overflow: 2 \\* \[^\n\r]* cannot be represented in type 'long int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-3.c b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-3.c
index 037609be0ac..9bca1f8f5b9 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-3.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-3.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=signed-integer-overflow" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=signed-integer-overflow -fno-sanitize-recover=signed-integer-overflow" } */
__attribute__((noinline, noclone)) long long
mul (long long x, long long y)
@@ -31,16 +29,10 @@ long long tab[] = {
int
main ()
{
- fputs ("UBSAN TEST START\n", stderr);
-
unsigned int i;
for (i = 0; i < sizeof (tab) / sizeof (long long); i += 3)
if (mul (tab[i], tab[i + 1]) != tab[i + 2]
|| mul (tab[i + 1], tab[i]) != tab[i + 2])
__builtin_abort ();
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-4.c b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-4.c
index 82e114001b4..1878451192e 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-mul-4.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-mul-4.c
@@ -52,35 +52,35 @@ main ()
return 0;
}
-/* { dg-output "overflow-mul-4.c:20:\[^\n\r]*signed integer overflow: 1537228672809129302 \\* 6 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:21:\[^\n\r]*signed integer overflow: -1537228672809129302 \\* -6 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:22:\[^\n\r]*signed integer overflow: 1537228672809129302 \\* -6 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:23:\[^\n\r]*signed integer overflow: -1537228672809129302 \\* 6 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:24:\[^\n\r]*signed integer overflow: 2166572392 \\* 4257126175 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:25:\[^\n\r]*signed integer overflow: -2166572392 \\* -4257126175 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:26:\[^\n\r]*signed integer overflow: 2166572392 \\* -4257126175 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:27:\[^\n\r]*signed integer overflow: -2166572392 \\* 4257126175 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:28:\[^\n\r]*signed integer overflow: 1537228672809129301 \\* 7 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:29:\[^\n\r]*signed integer overflow: -1537228672809129301 \\* -7 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:30:\[^\n\r]*signed integer overflow: 1537228672809129301 \\* -7 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:31:\[^\n\r]*signed integer overflow: -1537228672809129301 \\* 7 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:32:\[^\n\r]*signed integer overflow: 2166572391 \\* 4257126176 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:33:\[^\n\r]*signed integer overflow: -2166572391 \\* -4257126176 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:34:\[^\n\r]*signed integer overflow: 2166572391 \\* -4257126176 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:35:\[^\n\r]*signed integer overflow: -2166572391 \\* 4257126176 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:36:\[^\n\r]*signed integer overflow: 6 \\* 1537228672809129302 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:37:\[^\n\r]*signed integer overflow: -6 \\* -1537228672809129302 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:38:\[^\n\r]*signed integer overflow: -6 \\* 1537228672809129302 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:39:\[^\n\r]*signed integer overflow: 6 \\* -1537228672809129302 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:40:\[^\n\r]*signed integer overflow: 4257126175 \\* 2166572392 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:41:\[^\n\r]*signed integer overflow: -4257126175 \\* -2166572392 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:42:\[^\n\r]*signed integer overflow: -4257126175 \\* 2166572392 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:43:\[^\n\r]*signed integer overflow: 4257126175 \\* -2166572392 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:44:\[^\n\r]*signed integer overflow: 7 \\* 1537228672809129301 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:45:\[^\n\r]*signed integer overflow: -7 \\* -1537228672809129301 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:46:\[^\n\r]*signed integer overflow: -7 \\* 1537228672809129301 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:47:\[^\n\r]*signed integer overflow: 7 \\* -1537228672809129301 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:48:\[^\n\r]*signed integer overflow: 4257126176 \\* 2166572391 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:49:\[^\n\r]*signed integer overflow: -4257126176 \\* -2166572391 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*overflow-mul-4.c:50:\[^\n\r]*signed integer overflow: -4257126176 \\* 2166572391 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
+/* { dg-output "overflow-mul-4.c:20:\[^\n\r]*signed integer overflow: 1537228672809129302 \\* 6 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:21:\[^\n\r]*signed integer overflow: -1537228672809129302 \\* -6 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:22:\[^\n\r]*signed integer overflow: 1537228672809129302 \\* -6 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:23:\[^\n\r]*signed integer overflow: -1537228672809129302 \\* 6 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:24:\[^\n\r]*signed integer overflow: 2166572392 \\* 4257126175 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:25:\[^\n\r]*signed integer overflow: -2166572392 \\* -4257126175 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:26:\[^\n\r]*signed integer overflow: 2166572392 \\* -4257126175 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:27:\[^\n\r]*signed integer overflow: -2166572392 \\* 4257126175 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:28:\[^\n\r]*signed integer overflow: 1537228672809129301 \\* 7 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:29:\[^\n\r]*signed integer overflow: -1537228672809129301 \\* -7 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:30:\[^\n\r]*signed integer overflow: 1537228672809129301 \\* -7 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:31:\[^\n\r]*signed integer overflow: -1537228672809129301 \\* 7 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:32:\[^\n\r]*signed integer overflow: 2166572391 \\* 4257126176 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:33:\[^\n\r]*signed integer overflow: -2166572391 \\* -4257126176 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:34:\[^\n\r]*signed integer overflow: 2166572391 \\* -4257126176 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:35:\[^\n\r]*signed integer overflow: -2166572391 \\* 4257126176 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:36:\[^\n\r]*signed integer overflow: 6 \\* 1537228672809129302 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:37:\[^\n\r]*signed integer overflow: -6 \\* -1537228672809129302 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:38:\[^\n\r]*signed integer overflow: -6 \\* 1537228672809129302 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:39:\[^\n\r]*signed integer overflow: 6 \\* -1537228672809129302 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:40:\[^\n\r]*signed integer overflow: 4257126175 \\* 2166572392 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:41:\[^\n\r]*signed integer overflow: -4257126175 \\* -2166572392 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:42:\[^\n\r]*signed integer overflow: -4257126175 \\* 2166572392 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:43:\[^\n\r]*signed integer overflow: 4257126175 \\* -2166572392 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:44:\[^\n\r]*signed integer overflow: 7 \\* 1537228672809129301 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:45:\[^\n\r]*signed integer overflow: -7 \\* -1537228672809129301 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:46:\[^\n\r]*signed integer overflow: -7 \\* 1537228672809129301 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:47:\[^\n\r]*signed integer overflow: 7 \\* -1537228672809129301 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:48:\[^\n\r]*signed integer overflow: 4257126176 \\* 2166572391 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:49:\[^\n\r]*signed integer overflow: -4257126176 \\* -2166572391 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*overflow-mul-4.c:50:\[^\n\r]*signed integer overflow: -4257126176 \\* 2166572391 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*overflow-mul-4.c:51:\[^\n\r]*signed integer overflow: 4257126176 \\* -2166572391 cannot be represented in type 'long long int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c
index 85f81d8b547..87b47974dee 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-1.c
@@ -27,12 +27,12 @@ main (void)
return 0;
}
-/* { dg-output "negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -\[^\n\r]* cannot be represented in type 'long int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -\[^\n\r]* cannot be represented in type 'long int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -\[^\n\r]* cannot be represented in type 'long int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -9223372036854775808 cannot be represented in type 'long long int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -9223372036854775808 cannot be represented in type 'long long int'; cast to an unsigned type to negate this value to itself(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*negation of -9223372036854775808 cannot be represented in type 'long long int'; cast to an unsigned type to negate this value to itself" } */
+/* { dg-output "negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -\[^\n\r]* cannot be represented in type 'long int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -\[^\n\r]* cannot be represented in type 'long int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -\[^\n\r]* cannot be represented in type 'long int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -9223372036854775808 cannot be represented in type 'long long int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -9223372036854775808 cannot be represented in type 'long long int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -9223372036854775808 cannot be represented in type 'long long int'\[^\n\r]*; cast to an unsigned type to negate this value to itself" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-negate-2.c b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-2.c
index 6a4f288bbe0..db54b27aa3f 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-negate-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-2.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable -fno-sanitize-recover=signed-integer-overflow" } */
#define SCHAR_MIN (-__SCHAR_MAX__ - 1)
#define SHRT_MIN (-__SHRT_MAX__ - 1)
@@ -14,8 +12,6 @@
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
volatile signed char c = -SCHAR_MIN;
CHECK (c, -128);
@@ -37,9 +33,5 @@ main (void)
volatile long long lli = LLONG_MIN;
lli = -(unsigned long long) lli;
CHECK (lli, -0x8000000000000000L);
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-negate-3.c b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-3.c
new file mode 100644
index 00000000000..924df6f0ca2
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-negate-3.c
@@ -0,0 +1,22 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow" } */
+
+#define INT_MIN (-__INT_MAX__ - 1)
+
+int
+main ()
+{
+ int x = INT_MIN;
+ int y;
+ asm ("" : "+g" (x));
+ y = -(-x);
+ asm ("" : "+g" (y));
+ y = -(-INT_MIN);
+ asm ("" : "+g" (y));
+ return 0;
+}
+
+/* { dg-output "negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c
index 15f04455e6e..e92aaf4ce33 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-1.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=signed-integer-overflow -Wno-unused-variable -fno-sanitize-recover=signed-integer-overflow" } */
#define SCHAR_MAX __SCHAR_MAX__
#define SCHAR_MIN (-__SCHAR_MAX__ - 1)
@@ -20,8 +18,6 @@ check (int i, int j)
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
#if __INT_MAX__ == 2147483647
/* Here, nothing should fail. */
volatile int i = -1;
@@ -61,9 +57,5 @@ main (void)
d--;
check (d, 32767);
#endif
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c
index 6476b65d2a0..cc94061634e 100644
--- a/gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-2.c
@@ -42,13 +42,13 @@ main (void)
return 0;
}
-/* { dg-output "signed integer overflow: -2147483648 - 1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 \\+ -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -2147483548 \\+ -1024 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 \\+ -1 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -2147482648 \\+ -1048576 cannot be represented in type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* - 1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1024 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1 cannot be represented in type 'long int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* \\+ -1048576 cannot be represented in type 'long int'" } */
+/* { dg-output "signed integer overflow: -2147483648 - 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 - 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483548 - 1024 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147483648 - 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -2147482648 - 1048576 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* - 1 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* - 1 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* - 1024 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* - 1 cannot be represented in type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: -\[^\n\r]* - 1048576 cannot be represented in type 'long int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/overflow-sub-4.c b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-4.c
new file mode 100644
index 00000000000..cb4244d1d39
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/overflow-sub-4.c
@@ -0,0 +1,20 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=signed-integer-overflow" } */
+
+#define INT_MIN (-__INT_MAX__ - 1)
+
+int
+main ()
+{
+ int x = INT_MIN;
+ int y = 0;
+ int z;
+ asm ("" : "+g" (y));
+ asm ("" : "+g" (x));
+ z = y - (-x);
+ asm ("" : "+g" (z));
+ return 0;
+}
+
+/* { dg-output "negation of -2147483648 cannot be represented in type 'int'\[^\n\r]*; cast to an unsigned type to negate this value to itself\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 0 - -2147483648 cannot be represented in type 'int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr56917.c b/gcc/testsuite/c-c++-common/ubsan/pr56917.c
new file mode 100644
index 00000000000..cfbae97c0c6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr56917.c
@@ -0,0 +1,34 @@
+/* PR middle-end/56917 */
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=undefined" } */
+
+#define INT_MIN (-__INT_MAX__ - 1)
+#define LONG_MIN (-__LONG_MAX__ - 1L)
+#define LLONG_MIN (-__LONG_LONG_MAX__ - 1LL)
+
+int __attribute__ ((noinline,noclone))
+fn1 (unsigned int u)
+{
+ return (-(int) (u - 1U)) - 1;
+}
+
+long __attribute__ ((noinline,noclone))
+fn2 (unsigned long int ul)
+{
+ return (-(long) (ul - 1UL)) - 1L;
+}
+
+long long __attribute__ ((noinline,noclone))
+fn3 (unsigned long long int ull)
+{
+ return (-(long long) (ull - 1ULL)) - 1LL;
+}
+
+int
+main (void)
+{
+ if (fn1 (__INT_MAX__ + 1U) != INT_MIN
+ || fn2 (__LONG_MAX__ + 1UL) != LONG_MIN
+ || fn3 (__LONG_LONG_MAX__ + 1ULL) != LLONG_MIN)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr56956.c b/gcc/testsuite/c-c++-common/ubsan/pr56956.c
new file mode 100644
index 00000000000..996e1dd8a79
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr56956.c
@@ -0,0 +1,15 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined -fsanitize-undefined-trap-on-error" } */
+
+unsigned int __attribute__((noinline,noclone))
+foo (unsigned int x)
+{
+ return x <= __INT_MAX__ ? x : -x;
+}
+
+int
+main ()
+{
+ volatile unsigned int tem = foo (-__INT_MAX__ - 1);
+ return 0;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr59503.c b/gcc/testsuite/c-c++-common/ubsan/pr59503.c
index 1637f01b5a1..36356d532c8 100644
--- a/gcc/testsuite/c-c++-common/ubsan/pr59503.c
+++ b/gcc/testsuite/c-c++-common/ubsan/pr59503.c
@@ -1,21 +1,13 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=signed-integer-overflow" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=signed-integer-overflow -fno-sanitize-recover=signed-integer-overflow" } */
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
long long int a = 14;
long int b = 9;
asm volatile ("" : "+r" (a), "+r" (b));
if ((a - b) != 5)
__builtin_abort ();
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr59667.c b/gcc/testsuite/c-c++-common/ubsan/pr59667.c
index 7fad7029907..d5346611026 100644
--- a/gcc/testsuite/c-c++-common/ubsan/pr59667.c
+++ b/gcc/testsuite/c-c++-common/ubsan/pr59667.c
@@ -11,4 +11,4 @@ main (void)
return 0;
}
-/* { dg-output "store to null pointer of type 'float'(\n|\r\n|\r)" } */
+/* { dg-output "store to null pointer of type 'float'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr60613-1.c b/gcc/testsuite/c-c++-common/ubsan/pr60613-1.c
index 6794532a486..f358e19da9b 100644
--- a/gcc/testsuite/c-c++-common/ubsan/pr60613-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/pr60613-1.c
@@ -1,8 +1,6 @@
/* PR sanitizer/60613 */
/* { dg-do run } */
-/* { dg-options "-fsanitize=undefined" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=undefined" } */
long long y;
@@ -26,16 +24,10 @@ bar (long long x)
int
main ()
{
- fputs ("UBSAN TEST START\n", stderr);
-
y = 1;
if (foo (8 - 2040) != 8 - 1)
__builtin_abort ();
if (bar (1) != 8 - 1)
__builtin_abort ();
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr60613-2.c b/gcc/testsuite/c-c++-common/ubsan/pr60613-2.c
index 92c2de81eb8..ed33e9341f3 100644
--- a/gcc/testsuite/c-c++-common/ubsan/pr60613-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/pr60613-2.c
@@ -32,5 +32,5 @@ main ()
return 0;
}
-/* { dg-output "signed integer overflow: 8 \\- -9223372036854775801 cannot be represented in type 'long long int'(\n|\r\n|\r)" } */
+/* { dg-output "signed integer overflow: 8 \\- -9223372036854775801 cannot be represented in type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*signed integer overflow: 8 \\- -9223372036854775802 cannot be represented in type 'long long int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr63520.c b/gcc/testsuite/c-c++-common/ubsan/pr63520.c
new file mode 100644
index 00000000000..66da668dbb8
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr63520.c
@@ -0,0 +1,16 @@
+/* PR sanitizer/63520 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined" } */
+
+int a;
+
+void
+foo (void)
+{
+ while (1)
+ {
+ if (a == 1)
+ break;
+ a -= 1;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr63802.c b/gcc/testsuite/c-c++-common/ubsan/pr63802.c
new file mode 100644
index 00000000000..0ca7461ce95
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr63802.c
@@ -0,0 +1,23 @@
+/* Limit this to known non-strict alignment targets. */
+/* { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } } */
+/* { dg-options "-fsanitize=alignment" } */
+
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+
+struct test_struct {
+ unsigned long a;
+ int b;
+} __attribute__((__aligned__(64)));
+
+char a[200];
+
+int main ()
+{
+ volatile int x = ((struct test_struct*)(round_up((unsigned long)a, 64) + 16))->b;
+ volatile int y = ((struct test_struct*)(round_up((unsigned long)a, 64) + 15))->b;
+
+ return 0;
+}
+
+/* { dg-output "\.c:18:\[0-9]*: \[^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct test_struct', which requires 64 byte alignment" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr63839.c b/gcc/testsuite/c-c++-common/ubsan/pr63839.c
new file mode 100644
index 00000000000..e3933f7fe66
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr63839.c
@@ -0,0 +1,23 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=unreachable" } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-shouldfail "ubsan" } */
+
+static void __attribute__ ((noreturn))
+bar ()
+{
+} /* { dg-warning "function does return" } */
+
+void
+foo ()
+{
+ bar ();
+}
+
+int
+main (void)
+{
+ foo ();
+}
+
+/* { dg-output "execution reached a __builtin_unreachable\\(\\) call" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr63866.c b/gcc/testsuite/c-c++-common/ubsan/pr63866.c
new file mode 100644
index 00000000000..e70daa72e18
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr63866.c
@@ -0,0 +1,11 @@
+/* PR sanitizer/63866 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined -fdump-ipa-cgraph" } */
+
+int
+foo (int x, int y)
+{
+ return x + y;
+}
+
+/* { dg-final { cleanup-ipa-dump "cgraph" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr63879-1.c b/gcc/testsuite/c-c++-common/ubsan/pr63879-1.c
new file mode 100644
index 00000000000..2035849a8ce
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr63879-1.c
@@ -0,0 +1,23 @@
+/* PR sanitizer/63879 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined" } */
+
+struct A
+{
+ int inode;
+} * a;
+int b, c;
+void
+fn1 ()
+{
+ int d = 0;
+ while (b)
+ {
+ if (a->inode)
+ d++;
+ a = 0;
+ }
+ c = d - 1;
+ for (; c >= 0; c--)
+ ;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr63879-2.c b/gcc/testsuite/c-c++-common/ubsan/pr63879-2.c
new file mode 100644
index 00000000000..34eb8e79d67
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr63879-2.c
@@ -0,0 +1,13 @@
+/* PR sanitizer/63879 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined" } */
+
+int a;
+void
+fn1 ()
+{
+ int b = 2;
+ for (; a;)
+ while (b >= 0)
+ b--;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr64121.c b/gcc/testsuite/c-c++-common/ubsan/pr64121.c
new file mode 100644
index 00000000000..614d72a22af
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr64121.c
@@ -0,0 +1,16 @@
+/* PR sanitizer/64121 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined -Wno-pointer-arith" } */
+
+extern int tab[16];
+
+void
+execute (int *ip, int x)
+{
+ int *xp = tab;
+base:
+ if (x)
+ return;
+ *xp++ = *ip;
+ goto *(&&base + *ip);
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr64289.c b/gcc/testsuite/c-c++-common/ubsan/pr64289.c
new file mode 100644
index 00000000000..1e38e6d3c29
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr64289.c
@@ -0,0 +1,9 @@
+/* PR sanitizer/64289 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+int
+foo (int a)
+{
+ return (int) (0 ? 0 : a ? a : 0.5);
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr64344-1.c b/gcc/testsuite/c-c++-common/ubsan/pr64344-1.c
new file mode 100644
index 00000000000..4891beb1dac
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr64344-1.c
@@ -0,0 +1,9 @@
+/* PR sanitizer/64344 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+int
+foo (float x)
+{
+ return __builtin_log ((double ) x);
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr64344-2.c b/gcc/testsuite/c-c++-common/ubsan/pr64344-2.c
new file mode 100644
index 00000000000..9d1eafd7abe
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr64344-2.c
@@ -0,0 +1,11 @@
+/* PR sanitizer/64344 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+int
+foo (void)
+{
+ static const int a = 0.5;
+ static const int b = (int) 13.5 + 1;
+ return a + b;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr64906.c b/gcc/testsuite/c-c++-common/ubsan/pr64906.c
new file mode 100644
index 00000000000..e0ac0eefd5e
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr64906.c
@@ -0,0 +1,12 @@
+/* PR sanitizer/64906 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=integer-divide-by-zero -O -Werror=maybe-uninitialized" } */
+
+int
+fn1 (int f, int s)
+{
+ int ret = 0;
+ if (f)
+ ret = s / (f ? (unsigned long) 8 : 0);
+ return ret;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr65367.c b/gcc/testsuite/c-c++-common/ubsan/pr65367.c
new file mode 100644
index 00000000000..a98d797e9f6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr65367.c
@@ -0,0 +1,9 @@
+/* PR sanitizer/65367 */
+/* { dg-do compile } */
+/* { dg-options "-fno-tree-ccp -fno-tree-copy-prop -fno-tree-dominator-opts -fno-tree-fre -fsanitize=object-size" } */
+
+int
+foo (char *p)
+{
+ return *((const char *) "") - *p;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/pr66908.c b/gcc/testsuite/c-c++-common/ubsan/pr66908.c
new file mode 100644
index 00000000000..5f731f0cbae
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/pr66908.c
@@ -0,0 +1,15 @@
+/* PR sanitizer/66908 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=shift,bounds -O2 -Werror=maybe-uninitialized" } */
+/* { dg-additional-options "-std=gnu90" { target c } } */
+
+struct S { int a[22]; };
+static int const e[22] = { };
+
+void
+foo (struct S const *s, unsigned int m, unsigned int *res)
+{
+ unsigned int i;
+ for (i = 0; i < 22; ++i)
+ res[i] = ((s->a[i] + e[i]) << m);
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/recovery-1.c b/gcc/testsuite/c-c++-common/ubsan/recovery-1.c
new file mode 100644
index 00000000000..62dd6f769f0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/recovery-1.c
@@ -0,0 +1,9 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined -fsanitize-recover=signed-integer-overflow -w" } */
+
+#include "recovery-common.inc"
+
+/* { dg-output "shift exponent 152 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 153 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 2 cannot be represented in type 'int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/recovery-2.c b/gcc/testsuite/c-c++-common/ubsan/recovery-2.c
new file mode 100644
index 00000000000..ad5e410d9f1
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/recovery-2.c
@@ -0,0 +1,10 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=signed-integer-overflow -w" } */
+/* { dg-shouldfail "ubsan" } */
+
+#include "recovery-common.inc"
+
+/* { dg-output "shift exponent 152 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 153 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*(?!.*signed integer overflow)" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/recovery-3.c b/gcc/testsuite/c-c++-common/ubsan/recovery-3.c
new file mode 100644
index 00000000000..d1dcd3e8296
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/recovery-3.c
@@ -0,0 +1,9 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined -w" } */
+
+#include "recovery-common.inc"
+
+/* { dg-output "shift exponent 152 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 153 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 1 cannot be represented in type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*signed integer overflow: 2147483647 \\+ 2 cannot be represented in type 'int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/recovery-common.inc b/gcc/testsuite/c-c++-common/ubsan/recovery-common.inc
new file mode 100644
index 00000000000..1e0667adba4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/recovery-common.inc
@@ -0,0 +1,19 @@
+typedef const unsigned long long int CULLI;
+typedef volatile int VI;
+struct s { signed long int a; };
+
+int
+main (void)
+{
+ volatile int shiftcount = 153;
+ volatile int a = __INT_MAX__;
+ volatile int b = __INT_MAX__;
+
+ a << 152;
+ b << shiftcount;
+ a += 1;
+ b += 2;
+
+ return 0;
+}
+
diff --git a/gcc/testsuite/c-c++-common/ubsan/sanitize-all-1.c b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-1.c
new file mode 100644
index 00000000000..9ffba50443a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-1.c
@@ -0,0 +1,8 @@
+/* Test -f*sanitize*=all */
+/* { dg-do compile } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=all" } */
+
+int i;
+
+/* { dg-error "-fsanitize=all option is not valid" "" { target *-*-* } 0 } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/sanitize-all-2.c b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-2.c
new file mode 100644
index 00000000000..6ae6f3c233f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-2.c
@@ -0,0 +1,41 @@
+/* Test -f*sanitize*=all */
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-skip-if "" { *-*-* } { "-flto -fno-fat-lto-objects" } } */
+/* { dg-options "-fsanitize=undefined,float-cast-overflow,float-divide-by-zero -fno-sanitize=all -fdump-tree-optimized" } */
+
+int a[4];
+
+int
+f1 (int x, int y, int z)
+{
+ return a[x] + (1 << y) + (100 / z);
+}
+
+char *
+f2 (int x)
+{
+ char *p = (char *) __builtin_calloc (64, 1);
+ p[x] = 3;
+ return p;
+}
+
+int
+f3 (int x, int *y, double z, double w)
+{
+ int a[*y];
+ if (x)
+ __builtin_unreachable ();
+ asm volatile ("" : : "r" (&a[0]));
+ return z / w;
+}
+
+int
+main ()
+{
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-not "__ubsan_" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "UBSAN_CHECK_" "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/sanitize-all-3.c b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-3.c
new file mode 100644
index 00000000000..9be62acdbe9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-3.c
@@ -0,0 +1,42 @@
+/* Test -f*sanitize*=all */
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-skip-if "" { *-*-* } { "-flto -fno-fat-lto-objects" } } */
+/* { dg-options "-fsanitize=undefined -fsanitize-recover=all -fdump-tree-optimized" } */
+
+int a[4];
+
+int
+f1 (int x, int y, int z)
+{
+ return a[x] + (1 << y) + (100 / z);
+}
+
+char *
+f2 (int x)
+{
+ char *p = (char *) __builtin_calloc (64, 1);
+ p[x] = 3;
+ return p;
+}
+
+int
+f3 (int x, int *y, double z, double w)
+{
+ int a[*y];
+ if (x)
+ __builtin_unreachable ();
+ asm volatile ("" : : "r" (&a[0]));
+ return z / w;
+}
+
+int
+main ()
+{
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump "__ubsan_" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "__ubsan_\[a-z_\]*_abort" "optimized" } } */
+/* { dg-final { scan-tree-dump "UBSAN_CHECK_" "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/sanitize-all-4.c b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-4.c
new file mode 100644
index 00000000000..1f7ec2b53f3
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/sanitize-all-4.c
@@ -0,0 +1,42 @@
+/* Test -f*sanitize*=all */
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-skip-if "" { *-*-* } { "-flto -fno-fat-lto-objects" } } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=all -fdump-tree-optimized" } */
+
+int a[4];
+
+int
+f1 (int x, int y, int z)
+{
+ return a[x] + (1 << y) + (100 / z);
+}
+
+char *
+f2 (int x)
+{
+ char *p = (char *) __builtin_calloc (64, 1);
+ p[x] = 3;
+ return p;
+}
+
+int
+f3 (int x, int *y, double z, double w)
+{
+ int a[*y];
+ if (x)
+ __builtin_unreachable ();
+ asm volatile ("" : : "r" (&a[0]));
+ return z / w;
+}
+
+int
+main ()
+{
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump "__ubsan_\[a-z_\]*_abort" "optimized" } } */
+/* { dg-final { scan-tree-dump-not "__ubsan_\[a-z_\]*\[^et\] " "optimized" } } */
+/* { dg-final { scan-tree-dump "UBSAN_CHECK_" "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/save-expr-1.c b/gcc/testsuite/c-c++-common/ubsan/save-expr-1.c
index 19b1eb08bd6..ff6c5cff0b1 100644
--- a/gcc/testsuite/c-c++-common/ubsan/save-expr-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/save-expr-1.c
@@ -1,19 +1,11 @@
/* { dg-do compile } */
-/* { dg-options "-fsanitize=shift -Wall -Werror -O" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=shift -Wall -Werror -O -fno-sanitize-recover=shift" } */
static int x;
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
int o = 1;
int y = x << o;
-
- fputs ("UBSAN TEST END\n", stderr);
return y;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-1.c b/gcc/testsuite/c-c++-common/ubsan/shift-1.c
index d2538802aab..d770e833df7 100644
--- a/gcc/testsuite/c-c++-common/ubsan/shift-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-1.c
@@ -23,9 +23,9 @@ main (void)
return 0;
}
-/* { dg-output "shift exponent 152 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 153 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 154 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 524 is too large for \[^\n\r]*-bit type 'long long unsigned int'(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent 370 is too large for \[^\n\r]*-bit type 'int'(\n|\r\n|\r)" } */
+/* { dg-output "shift exponent 152 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 153 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 154 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 524 is too large for \[^\n\r]*-bit type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 370 is too large for \[^\n\r]*-bit type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*shift exponent 402 is too large for \[^\n\r]*-bit type 'long int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-2.c b/gcc/testsuite/c-c++-common/ubsan/shift-2.c
index aaaeb6fcc09..23067d8f3d4 100644
--- a/gcc/testsuite/c-c++-common/ubsan/shift-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-2.c
@@ -16,8 +16,8 @@ main (void)
return 0;
}
-/* { dg-output "shift exponent -3 is negative(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent -4 is negative(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent -5 is negative(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent -6 is negative(\n|\r\n|\r)" } */
+/* { dg-output "shift exponent -3 is negative\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent -4 is negative\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent -5 is negative\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent -6 is negative\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*shift exponent -11 is negative" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-3.c b/gcc/testsuite/c-c++-common/ubsan/shift-3.c
index 65ee5d8821e..d57d73e62e8 100644
--- a/gcc/testsuite/c-c++-common/ubsan/shift-3.c
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-3.c
@@ -1,19 +1,11 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=shift -w" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=shift -w -fno-sanitize-recover=shift" } */
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
unsigned int a = 1;
a <<= 31;
a <<= 1;
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-4.c b/gcc/testsuite/c-c++-common/ubsan/shift-4.c
index 5f095b61ac8..5b539ac2230 100644
--- a/gcc/testsuite/c-c++-common/ubsan/shift-4.c
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-4.c
@@ -11,4 +11,4 @@ main ()
return 0;
}
-/* { dg-output "shift exponent 120 is too large\[^\n\r]*" } */
+/* { dg-output "shift exponent 120 is too large" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-5.c b/gcc/testsuite/c-c++-common/ubsan/shift-5.c
deleted file mode 100644
index 6f9c52a7288..00000000000
--- a/gcc/testsuite/c-c++-common/ubsan/shift-5.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-fsanitize=shift -w" } */
-/* { dg-shouldfail "ubsan" } */
-
-int x;
-int
-foo (void)
-{
- /* None of the following should pass. */
- switch (x)
- {
- case 1 >> -1:
-/* { dg-error "case label does not reduce to an integer constant" "" {target c } 12 } */
-/* { dg-error "is not a constant expression" "" { target c++ } 12 } */
- case -1 >> -1:
-/* { dg-error "case label does not reduce to an integer constant" "" {target c } 15 } */
-/* { dg-error "is not a constant expression" "" { target c++ } 15 } */
- case 1 << -1:
-/* { dg-error "case label does not reduce to an integer constant" "" {target c } 18 } */
-/* { dg-error "is not a constant expression" "" { target c++ } 18 } */
- case -1 << -1:
-/* { dg-error "case label does not reduce to an integer constant" "" {target c } 21 } */
-/* { dg-error "is not a constant expression" "" { target c++ } 21 } */
- case -1 >> 200:
-/* { dg-error "case label does not reduce to an integer constant" "" {target c } 24 } */
-/* { dg-error "is not a constant expression" "" { target c++ } 24 } */
- case 1 << 200:
-/* { dg-error "case label does not reduce to an integer constant" "" {target c } 27 } */
-/* { dg-error "is not a constant expression" "" { target c++ } 27 } */
- return 1;
- }
- return 0;
-}
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-6.c b/gcc/testsuite/c-c++-common/ubsan/shift-6.c
index d88a42910a6..d596459b3ea 100644
--- a/gcc/testsuite/c-c++-common/ubsan/shift-6.c
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-6.c
@@ -1,15 +1,11 @@
/* PR sanitizer/58413 */
/* { dg-do run { target int32plus } } */
-/* { dg-options "-fsanitize=shift -w" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=shift -w -fno-sanitize-recover=shift" } */
int x = 7;
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
/* All of the following should pass. */
int A[128 >> 5] = {};
int B[128 << 5] = {};
@@ -30,9 +26,5 @@ main (void)
case 128 >> (4 + 1):
return 1;
}
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-7.c b/gcc/testsuite/c-c++-common/ubsan/shift-7.c
new file mode 100644
index 00000000000..9aba82c0168
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-7.c
@@ -0,0 +1,28 @@
+/* PR c/63862 */
+/* { dg-do run } */
+/* { dg-options "-fsanitize=undefined" } */
+
+unsigned long long int __attribute__ ((noinline, noclone))
+foo (unsigned long long int i, unsigned long long int j)
+{
+ asm ("");
+ return i >> j;
+}
+
+unsigned long long int __attribute__ ((noinline, noclone))
+bar (unsigned long long int i, unsigned long long int j)
+{
+ asm ("");
+ return i << j;
+}
+
+int
+main ()
+{
+ foo (1ULL, 0x100000000ULL);
+ bar (1ULL, 0x100000000ULL);
+ return 0;
+}
+
+/* { dg-output "shift exponent 4294967296 is too large for \[^\n\r]*-bit type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*shift exponent 4294967296 is too large for \[^\n\r]*-bit type 'long long unsigned int'" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/shift-8.c b/gcc/testsuite/c-c++-common/ubsan/shift-8.c
new file mode 100644
index 00000000000..8717f3f3427
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/shift-8.c
@@ -0,0 +1,64 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined" } */
+/* { dg-additional-options "-std=gnu11" { target c } } */
+/* { dg-additional-options "-std=c++11" { target c++ } } */
+
+signed char
+fn1 (signed char x, unsigned long y)
+{
+ return x << y;
+}
+
+short int
+fn2 (short int x, unsigned long y)
+{
+ return x << y;
+}
+
+int
+fn3 (int x, unsigned long y)
+{
+ return x << y;
+}
+
+long int
+fn4 (long int x, unsigned long y)
+{
+ return x << y;
+}
+
+long long int
+fn5 (long long int x, unsigned long y)
+{
+ return x << y;
+}
+
+signed char
+fn6 (signed char x, unsigned long long y)
+{
+ return x << y;
+}
+
+short int
+fn7 (short int x, unsigned long long y)
+{
+ return x << y;
+}
+
+int
+fn8 (int x, unsigned long long y)
+{
+ return x << y;
+}
+
+long int
+fn9 (long int x, unsigned long long y)
+{
+ return x << y;
+}
+
+long long int
+fn10 (long long int x, unsigned long long y)
+{
+ return x << y;
+}
diff --git a/gcc/testsuite/c-c++-common/ubsan/undefined-1.c b/gcc/testsuite/c-c++-common/ubsan/undefined-1.c
index d1b9ce78771..b67b0132822 100644
--- a/gcc/testsuite/c-c++-common/ubsan/undefined-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/undefined-1.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=undefined" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=undefined" } */
int
foo (int x, int y)
@@ -21,13 +19,7 @@ bar (int x, int y)
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
foo (3, 2);
bar (12, 42);
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/undefined-2.c b/gcc/testsuite/c-c++-common/ubsan/undefined-2.c
index fd5b4d3e9e0..9f764f80091 100644
--- a/gcc/testsuite/c-c++-common/ubsan/undefined-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/undefined-2.c
@@ -1,10 +1,8 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=undefined" } */
+/* { dg-options "-fsanitize=undefined -fno-sanitize-recover=signed-integer-overflow" } */
/* { dg-additional-options "-std=gnu11" { target c } } */
/* { dg-additional-options "-std=c++11" { target c++ } } */
-#include <stdio.h>
-
volatile int w, z;
__attribute__ ((noinline, noclone)) int
@@ -17,10 +15,8 @@ foo (int x, int y)
int
main ()
{
- fputs ("1st\n", stderr);
w = foo (0, -__INT_MAX__);
return 0;
}
-/* { dg-output "1st(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*shift exponent -\[^\n\r]* is negative\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "shift exponent -\[^\n\r]* is negative" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/unreachable-2.c b/gcc/testsuite/c-c++-common/ubsan/unreachable-2.c
new file mode 100644
index 00000000000..783ebc24913
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/ubsan/unreachable-2.c
@@ -0,0 +1,14 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=unreachable" } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-shouldfail "ubsan" } */
+
+int e;
+
+int
+main (void)
+{
+ return e ? 0 : (__builtin_unreachable (), 1);
+}
+
+/* { dg-output "execution reached a __builtin_unreachable\\(\\) call" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/vla-1.c b/gcc/testsuite/c-c++-common/ubsan/vla-1.c
index 0fecfa2a3d5..52ade3aab75 100644
--- a/gcc/testsuite/c-c++-common/ubsan/vla-1.c
+++ b/gcc/testsuite/c-c++-common/ubsan/vla-1.c
@@ -102,18 +102,18 @@ main (void)
return 0;
}
-/* { dg-output "variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -5(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -3(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value 0(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value 0(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -6(\n|\r\n|\r)" } */
+/* { dg-output "variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -5\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -3\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value 0\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value 0\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -1\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -6\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*variable length array bound evaluates to non-positive value -4" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/vla-2.c b/gcc/testsuite/c-c++-common/ubsan/vla-2.c
index a11e4e7227f..9cd4ddff219 100644
--- a/gcc/testsuite/c-c++-common/ubsan/vla-2.c
+++ b/gcc/testsuite/c-c++-common/ubsan/vla-2.c
@@ -1,22 +1,14 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=vla-bound -Wall -Wno-unused-variable" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=vla-bound -Wall -Wno-unused-variable -fno-sanitize-recover=vla-bound" } */
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
const int t = 0;
struct s {
int x;
/* Don't instrument this one. */
int g[t];
};
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/vla-3.c b/gcc/testsuite/c-c++-common/ubsan/vla-3.c
index 7772857fdea..6003fdddf3a 100644
--- a/gcc/testsuite/c-c++-common/ubsan/vla-3.c
+++ b/gcc/testsuite/c-c++-common/ubsan/vla-3.c
@@ -1,7 +1,5 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=vla-bound" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=vla-bound -fno-sanitize-recover=vla-bound" } */
/* Don't instrument the arrays here. */
int
@@ -13,13 +11,7 @@ foo (int n, int a[])
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
int a[6] = { };
int ret = foo (3, a);
-
- fputs ("UBSAN TEST END\n", stderr);
return ret;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/c-c++-common/ubsan/vla-4.c b/gcc/testsuite/c-c++-common/ubsan/vla-4.c
index 8a7bbac9a71..c9060177606 100644
--- a/gcc/testsuite/c-c++-common/ubsan/vla-4.c
+++ b/gcc/testsuite/c-c++-common/ubsan/vla-4.c
@@ -1,21 +1,13 @@
/* { dg-do run } */
-/* { dg-options "-fsanitize=vla-bound" } */
-
-#include <stdio.h>
+/* { dg-options "-fsanitize=vla-bound -fno-sanitize-recover=vla-bound" } */
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
int x = 1;
/* Check that the size of an array is evaluated only once. */
int a[++x];
if (x != 2)
__builtin_abort ();
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/g++.dg/asan/asan_mem_test.cc b/gcc/testsuite/g++.dg/asan/asan_mem_test.cc
index 5656aab0bac..5fc4af7e9e3 100644
--- a/gcc/testsuite/g++.dg/asan/asan_mem_test.cc
+++ b/gcc/testsuite/g++.dg/asan/asan_mem_test.cc
@@ -74,17 +74,17 @@ TEST(AddressSanitizer, MemSetOOBTest) {
// Strictly speaking we are not guaranteed to find such two pointers,
// but given the structure of asan's allocator we will.
static bool AllocateTwoAdjacentArrays(char **x1, char **x2, size_t size) {
- vector<char *> v;
+ vector<uintptr_t> v;
bool res = false;
for (size_t i = 0; i < 1000U && !res; i++) {
- v.push_back(new char[size]);
+ v.push_back(reinterpret_cast<uintptr_t>(new char[size]));
if (i == 0) continue;
sort(v.begin(), v.end());
for (size_t j = 1; j < v.size(); j++) {
assert(v[j] > v[j-1]);
if ((size_t)(v[j] - v[j-1]) < size * 2) {
- *x2 = v[j];
- *x1 = v[j-1];
+ *x2 = reinterpret_cast<char*>(v[j]);
+ *x1 = reinterpret_cast<char*>(v[j-1]);
res = true;
break;
}
@@ -92,9 +92,10 @@ static bool AllocateTwoAdjacentArrays(char **x1, char **x2, size_t size) {
}
for (size_t i = 0; i < v.size(); i++) {
- if (res && v[i] == *x1) continue;
- if (res && v[i] == *x2) continue;
- delete [] v[i];
+ char *p = reinterpret_cast<char *>(v[i]);
+ if (res && p == *x1) continue;
+ if (res && p == *x2) continue;
+ delete [] p;
}
return res;
}
@@ -223,6 +224,13 @@ TEST(AddressSanitizer, MemCmpOOBTest) {
s1[size - 1] = '\0';
s2[size - 1] = '\0';
EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0));
+
+ // Even if the buffers differ in the first byte, we still assume that
+ // memcmp may access the whole buffer and thus reporting the overflow here:
+ s1[0] = 1;
+ s2[0] = 123;
+ EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0));
+
free(s1);
free(s2);
}
diff --git a/gcc/testsuite/g++.dg/asan/asan_oob_test.cc b/gcc/testsuite/g++.dg/asan/asan_oob_test.cc
index 61d50553703..2361dc286d7 100644
--- a/gcc/testsuite/g++.dg/asan/asan_oob_test.cc
+++ b/gcc/testsuite/g++.dg/asan/asan_oob_test.cc
@@ -97,7 +97,6 @@ TEST(AddressSanitizer, OOBRightTest) {
}
}
-#if ASAN_ALLOCATOR_VERSION == 2 // Broken with the asan_allocator1
TEST(AddressSanitizer, LargeOOBRightTest) {
size_t large_power_of_two = 1 << 19;
for (size_t i = 16; i <= 256; i *= 2) {
@@ -107,7 +106,6 @@ TEST(AddressSanitizer, LargeOOBRightTest) {
delete [] p;
}
}
-#endif // ASAN_ALLOCATOR_VERSION == 2
TEST(AddressSanitizer, DISABLED_DemoOOBLeftLow) {
oob_test<U1>(10, -1);
diff --git a/gcc/testsuite/g++.dg/asan/asan_str_test.cc b/gcc/testsuite/g++.dg/asan/asan_str_test.cc
index 1b9805e8eb1..207de422316 100644
--- a/gcc/testsuite/g++.dg/asan/asan_str_test.cc
+++ b/gcc/testsuite/g++.dg/asan/asan_str_test.cc
@@ -10,6 +10,10 @@
//===----------------------------------------------------------------------===//
#include "asan_test_utils.h"
+#if defined(__APPLE__)
+#include <AvailabilityMacros.h> // For MAC_OS_X_VERSION_*
+#endif
+
// Used for string functions tests
static char global_string[] = "global";
static size_t global_string_length = 6;
@@ -59,7 +63,19 @@ TEST(AddressSanitizer, StrLenOOBTest) {
free(heap_string);
}
-#ifndef __APPLE__
+TEST(AddressSanitizer, WcsLenTest) {
+ EXPECT_EQ(0U, wcslen(Ident(L"")));
+ size_t hello_len = 13;
+ size_t hello_size = (hello_len + 1) * sizeof(wchar_t);
+ EXPECT_EQ(hello_len, wcslen(Ident(L"Hello, World!")));
+ wchar_t *heap_string = Ident((wchar_t*)malloc(hello_size));
+ memcpy(heap_string, L"Hello, World!", hello_size);
+ EXPECT_EQ(hello_len, Ident(wcslen(heap_string)));
+ EXPECT_DEATH(Ident(wcslen(heap_string + 14)), RightOOBReadMessage(0));
+ free(heap_string);
+}
+
+#if SANITIZER_TEST_HAS_STRNLEN
TEST(AddressSanitizer, StrNLenOOBTest) {
size_t size = Ident(123);
char *str = MallocAndMemsetString(size);
@@ -77,7 +93,7 @@ TEST(AddressSanitizer, StrNLenOOBTest) {
EXPECT_DEATH(Ident(strnlen(str, size + 1)), RightOOBReadMessage(0));
free(str);
}
-#endif
+#endif // SANITIZER_TEST_HAS_STRNLEN
TEST(AddressSanitizer, StrDupOOBTest) {
size_t size = Ident(42);
@@ -168,7 +184,7 @@ TEST(AddressSanitizer, StrNCpyOOBTest) {
typedef char*(*PointerToStrChr1)(const char*, int);
typedef char*(*PointerToStrChr2)(char*, int);
-USED static void RunStrChrTest(PointerToStrChr1 StrChr) {
+UNUSED static void RunStrChrTest(PointerToStrChr1 StrChr) {
size_t size = Ident(100);
char *str = MallocAndMemsetString(size);
str[10] = 'q';
@@ -184,7 +200,7 @@ USED static void RunStrChrTest(PointerToStrChr1 StrChr) {
EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0));
free(str);
}
-USED static void RunStrChrTest(PointerToStrChr2 StrChr) {
+UNUSED static void RunStrChrTest(PointerToStrChr2 StrChr) {
size_t size = Ident(100);
char *str = MallocAndMemsetString(size);
str[10] = 'q';
@@ -203,7 +219,9 @@ USED static void RunStrChrTest(PointerToStrChr2 StrChr) {
TEST(AddressSanitizer, StrChrAndIndexOOBTest) {
RunStrChrTest(&strchr);
+#if !defined(_WIN32) // no index() on Windows.
RunStrChrTest(&index);
+#endif
}
TEST(AddressSanitizer, StrCmpAndFriendsLogicTest) {
@@ -226,6 +244,7 @@ TEST(AddressSanitizer, StrCmpAndFriendsLogicTest) {
EXPECT_LT(0, strncmp("baa", "aaa", 1));
EXPECT_LT(0, strncmp("zyx", "", 2));
+#if !defined(_WIN32) // no str[n]casecmp on Windows.
// strcasecmp
EXPECT_EQ(0, strcasecmp("", ""));
EXPECT_EQ(0, strcasecmp("zzz", "zzz"));
@@ -245,6 +264,7 @@ TEST(AddressSanitizer, StrCmpAndFriendsLogicTest) {
EXPECT_LT(0, strncasecmp("xyz", "xyy", 10));
EXPECT_LT(0, strncasecmp("Baa", "aaa", 1));
EXPECT_LT(0, strncasecmp("zyx", "", 2));
+#endif
// memcmp
EXPECT_EQ(0, memcmp("a", "b", 0));
@@ -287,9 +307,11 @@ TEST(AddressSanitizer, StrCmpOOBTest) {
RunStrCmpTest(&strcmp);
}
+#if !defined(_WIN32) // no str[n]casecmp on Windows.
TEST(AddressSanitizer, StrCaseCmpOOBTest) {
RunStrCmpTest(&strcasecmp);
}
+#endif
typedef int(*PointerToStrNCmp)(const char*, const char*, size_t);
void RunStrNCmpTest(PointerToStrNCmp StrNCmp) {
@@ -322,9 +344,12 @@ TEST(AddressSanitizer, StrNCmpOOBTest) {
RunStrNCmpTest(&strncmp);
}
+#if !defined(_WIN32) // no str[n]casecmp on Windows.
TEST(AddressSanitizer, StrNCaseCmpOOBTest) {
RunStrNCmpTest(&strncasecmp);
}
+#endif
+
TEST(AddressSanitizer, StrCatOOBTest) {
// strcat() reads strlen(to) bytes from |to| before concatenating.
size_t to_size = Ident(100);
@@ -506,11 +531,13 @@ void RunAtoiOOBTest(PointerToCallAtoi Atoi) {
free(array);
}
+#if !defined(_WIN32) // FIXME: Fix and enable on Windows.
TEST(AddressSanitizer, AtoiAndFriendsOOBTest) {
RunAtoiOOBTest(&CallAtoi);
RunAtoiOOBTest(&CallAtol);
RunAtoiOOBTest(&CallAtoll);
}
+#endif
void CallStrtol(const char *nptr, char **endptr, int base) {
Ident(strtol(nptr, endptr, base));
@@ -560,11 +587,13 @@ void RunStrtolOOBTest(PointerToCallStrtol Strtol) {
free(array);
}
+#if !defined(_WIN32) // FIXME: Fix and enable on Windows.
TEST(AddressSanitizer, StrtollOOBTest) {
RunStrtolOOBTest(&CallStrtoll);
}
TEST(AddressSanitizer, StrtolOOBTest) {
RunStrtolOOBTest(&CallStrtol);
}
+#endif
diff --git a/gcc/testsuite/g++.dg/asan/asan_test.C b/gcc/testsuite/g++.dg/asan/asan_test.C
index e11531e07de..3cee7150f73 100644
--- a/gcc/testsuite/g++.dg/asan/asan_test.C
+++ b/gcc/testsuite/g++.dg/asan/asan_test.C
@@ -2,7 +2,7 @@
// { dg-skip-if "" { *-*-* } { "*" } { "-O2" } }
// { dg-skip-if "" { *-*-* } { "-flto" } { "" } }
// { dg-additional-sources "asan_globals_test-wrapper.cc" }
-// { dg-options "-fsanitize=address -fno-builtin -Wall -Wno-format -Werror -g -DASAN_UAR=0 -DASAN_HAS_EXCEPTIONS=1 -DASAN_HAS_BLACKLIST=0 -DASAN_USE_DEJAGNU_GTEST=1 -lasan -lpthread -ldl" }
+// { dg-options "-std=c++11 -fsanitize=address -fno-builtin -Wall -Wno-format -Werror -g -DASAN_UAR=0 -DASAN_HAS_EXCEPTIONS=1 -DASAN_HAS_BLACKLIST=0 -DSANITIZER_USE_DEJAGNU_GTEST=1 -lasan -lpthread -ldl" }
// { dg-additional-options "-DASAN_NEEDS_SEGV=1" { target { ! arm*-*-* } } }
// { dg-additional-options "-DASAN_LOW_MEMORY=1 -DASAN_NEEDS_SEGV=0" { target arm*-*-* } }
// { dg-additional-options "-DASAN_AVOID_EXPENSIVE_TESTS=1" { target { ! run_expensive_tests } } }
diff --git a/gcc/testsuite/g++.dg/asan/asan_test.cc b/gcc/testsuite/g++.dg/asan/asan_test.cc
index 2df8c62cbb1..5f2e2c244dd 100644
--- a/gcc/testsuite/g++.dg/asan/asan_test.cc
+++ b/gcc/testsuite/g++.dg/asan/asan_test.cc
@@ -23,27 +23,10 @@ NOINLINE void *malloc_bbb(size_t size) {
NOINLINE void *malloc_aaa(size_t size) {
void *res = malloc_bbb(size); break_optimization(0); return res;}
-#ifndef __APPLE__
-NOINLINE void *memalign_fff(size_t alignment, size_t size) {
- void *res = memalign/**/(alignment, size); break_optimization(0); return res;}
-NOINLINE void *memalign_eee(size_t alignment, size_t size) {
- void *res = memalign_fff(alignment, size); break_optimization(0); return res;}
-NOINLINE void *memalign_ddd(size_t alignment, size_t size) {
- void *res = memalign_eee(alignment, size); break_optimization(0); return res;}
-NOINLINE void *memalign_ccc(size_t alignment, size_t size) {
- void *res = memalign_ddd(alignment, size); break_optimization(0); return res;}
-NOINLINE void *memalign_bbb(size_t alignment, size_t size) {
- void *res = memalign_ccc(alignment, size); break_optimization(0); return res;}
-NOINLINE void *memalign_aaa(size_t alignment, size_t size) {
- void *res = memalign_bbb(alignment, size); break_optimization(0); return res;}
-#endif // __APPLE__
-
-
NOINLINE void free_ccc(void *p) { free(p); break_optimization(0);}
NOINLINE void free_bbb(void *p) { free_ccc(p); break_optimization(0);}
NOINLINE void free_aaa(void *p) { free_bbb(p); break_optimization(0);}
-
template<typename T>
NOINLINE void uaf_test(int size, int off) {
char *p = (char *)malloc_aaa(size);
@@ -88,19 +71,19 @@ TEST(AddressSanitizer, VariousMallocsTest) {
*c = 0;
delete c;
-#if !defined(__APPLE__) && !defined(ANDROID) && !defined(__ANDROID__)
+#if SANITIZER_TEST_HAS_POSIX_MEMALIGN
int *pm;
int pm_res = posix_memalign((void**)&pm, kPageSize, kPageSize);
EXPECT_EQ(0, pm_res);
free(pm);
-#endif
+#endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN
-#if !defined(__APPLE__)
+#if SANITIZER_TEST_HAS_MEMALIGN
int *ma = (int*)memalign(kPageSize, kPageSize);
EXPECT_EQ(0U, (uintptr_t)ma % kPageSize);
ma[123] = 0;
free(ma);
-#endif // __APPLE__
+#endif // SANITIZER_TEST_HAS_MEMALIGN
}
TEST(AddressSanitizer, CallocTest) {
@@ -109,13 +92,37 @@ TEST(AddressSanitizer, CallocTest) {
free(a);
}
+TEST(AddressSanitizer, CallocReturnsZeroMem) {
+ size_t sizes[] = {16, 1000, 10000, 100000, 2100000};
+ for (size_t s = 0; s < sizeof(sizes)/sizeof(sizes[0]); s++) {
+ size_t size = sizes[s];
+ for (size_t iter = 0; iter < 5; iter++) {
+ char *x = Ident((char*)calloc(1, size));
+ EXPECT_EQ(x[0], 0);
+ EXPECT_EQ(x[size - 1], 0);
+ EXPECT_EQ(x[size / 2], 0);
+ EXPECT_EQ(x[size / 3], 0);
+ EXPECT_EQ(x[size / 4], 0);
+ memset(x, 0x42, size);
+ free(Ident(x));
+#if !defined(_WIN32)
+ // FIXME: OOM on Windows. We should just make this a lit test
+ // with quarantine size set to 1.
+ free(Ident(malloc(Ident(1 << 27)))); // Try to drain the quarantine.
+#endif
+ }
+ }
+}
+
+#if !defined(_WIN32) // No valloc on Windows.
TEST(AddressSanitizer, VallocTest) {
void *a = valloc(100);
EXPECT_EQ(0U, (uintptr_t)a % kPageSize);
free(a);
}
+#endif
-#ifndef __APPLE__
+#if SANITIZER_TEST_HAS_PVALLOC
TEST(AddressSanitizer, PvallocTest) {
char *a = (char*)pvalloc(kPageSize + 100);
EXPECT_EQ(0U, (uintptr_t)a % kPageSize);
@@ -127,8 +134,10 @@ TEST(AddressSanitizer, PvallocTest) {
a[101] = 1; // we should not report an error here.
free(a);
}
-#endif // __APPLE__
+#endif // SANITIZER_TEST_HAS_PVALLOC
+#if !defined(_WIN32)
+// FIXME: Use an equivalent of pthread_setspecific on Windows.
void *TSDWorker(void *test_key) {
if (test_key) {
pthread_setspecific(*(pthread_key_t*)test_key, (void*)0xfeedface);
@@ -158,6 +167,7 @@ TEST(AddressSanitizer, DISABLED_TSDTest) {
PTHREAD_JOIN(th, NULL);
pthread_key_delete(test_key);
}
+#endif
TEST(AddressSanitizer, UAF_char) {
const char *uaf_string = "AddressSanitizer:.*heap-use-after-free";
@@ -168,6 +178,36 @@ TEST(AddressSanitizer, UAF_char) {
EXPECT_DEATH(uaf_test<U1>(kLargeMalloc, kLargeMalloc / 2), uaf_string);
}
+TEST(AddressSanitizer, UAF_long_double) {
+ if (sizeof(long double) == sizeof(double)) return;
+ long double *p = Ident(new long double[10]);
+ EXPECT_DEATH(Ident(p)[12] = 0, "WRITE of size 1[026]");
+ EXPECT_DEATH(Ident(p)[0] = Ident(p)[12], "READ of size 1[026]");
+ delete [] Ident(p);
+}
+
+#if !defined(_WIN32)
+struct Packed5 {
+ int x;
+ char c;
+} __attribute__((packed));
+#else
+# pragma pack(push, 1)
+struct Packed5 {
+ int x;
+ char c;
+};
+# pragma pack(pop)
+#endif
+
+TEST(AddressSanitizer, UAF_Packed5) {
+ static_assert(sizeof(Packed5) == 5, "Please check the keywords used");
+ Packed5 *p = Ident(new Packed5[2]);
+ EXPECT_DEATH(p[0] = p[3], "READ of size 5");
+ EXPECT_DEATH(p[3] = p[0], "WRITE of size 5");
+ delete [] Ident(p);
+}
+
#if ASAN_HAS_BLACKLIST
TEST(AddressSanitizer, IgnoreTest) {
int *x = Ident(new int);
@@ -244,41 +284,6 @@ TEST(AddressSanitizer, SignalTest) {
} // namespace
#endif
-static void MallocStress(size_t n) {
- uint32_t seed = my_rand();
- for (size_t iter = 0; iter < 10; iter++) {
- vector<void *> vec;
- for (size_t i = 0; i < n; i++) {
- if ((i % 3) == 0) {
- if (vec.empty()) continue;
- size_t idx = my_rand_r(&seed) % vec.size();
- void *ptr = vec[idx];
- vec[idx] = vec.back();
- vec.pop_back();
- free_aaa(ptr);
- } else {
- size_t size = my_rand_r(&seed) % 1000 + 1;
-#ifndef __APPLE__
- size_t alignment = 1 << (my_rand_r(&seed) % 7 + 3);
- char *ptr = (char*)memalign_aaa(alignment, size);
-#else
- char *ptr = (char*) malloc_aaa(size);
-#endif
- vec.push_back(ptr);
- ptr[0] = 0;
- ptr[size-1] = 0;
- ptr[size/2] = 0;
- }
- }
- for (size_t i = 0; i < vec.size(); i++)
- free_aaa(vec[i]);
- }
-}
-
-TEST(AddressSanitizer, MallocStressTest) {
- MallocStress((ASAN_LOW_MEMORY) ? 20000 : 200000);
-}
-
static void TestLargeMalloc(size_t size) {
char buff[1024];
sprintf(buff, "is located 1 bytes to the left of %lu-byte", (long)size);
@@ -286,28 +291,21 @@ static void TestLargeMalloc(size_t size) {
}
TEST(AddressSanitizer, LargeMallocTest) {
- for (int i = 113; i < (1 << 28); i = i * 2 + 13) {
+ const int max_size = (SANITIZER_WORDSIZE == 32) ? 1 << 26 : 1 << 28;
+ for (int i = 113; i < max_size; i = i * 2 + 13) {
TestLargeMalloc(i);
}
}
-#if ASAN_LOW_MEMORY != 1
TEST(AddressSanitizer, HugeMallocTest) {
-#ifdef __APPLE__
- // It was empirically found out that 1215 megabytes is the maximum amount of
- // memory available to the process under AddressSanitizer on 32-bit Mac 10.6.
- // 32-bit Mac 10.7 gives even less (< 1G).
- // (the libSystem malloc() allows allocating up to 2300 megabytes without
- // ASan).
- size_t n_megs = SANITIZER_WORDSIZE == 32 ? 500 : 4100;
-#else
- size_t n_megs = SANITIZER_WORDSIZE == 32 ? 2600 : 4100;
-#endif
- TestLargeMalloc(n_megs << 20);
+ if (SANITIZER_WORDSIZE != 64 || ASAN_AVOID_EXPENSIVE_TESTS) return;
+ size_t n_megs = 4100;
+ EXPECT_DEATH(Ident((char*)malloc(n_megs << 20))[-1] = 0,
+ "is located 1 bytes to the left|"
+ "AddressSanitizer failed to allocate");
}
-#endif
-#ifndef __APPLE__
+#if SANITIZER_TEST_HAS_MEMALIGN
void MemalignRun(size_t align, size_t size, int idx) {
char *p = (char *)memalign(align, size);
Ident(p)[idx] = 0;
@@ -323,20 +321,7 @@ TEST(AddressSanitizer, memalign) {
"is located 1 bytes to the right");
}
}
-#endif
-
-TEST(AddressSanitizer, ThreadedMallocStressTest) {
- const int kNumThreads = 4;
- const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
- pthread_t t[kNumThreads];
- for (int i = 0; i < kNumThreads; i++) {
- PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
- (void*)kNumIterations);
- }
- for (int i = 0; i < kNumThreads; i++) {
- PTHREAD_JOIN(t[i], 0);
- }
-}
+#endif // SANITIZER_TEST_HAS_MEMALIGN
void *ManyThreadsWorker(void *a) {
for (int iter = 0; iter < 100; iter++) {
@@ -377,17 +362,30 @@ TEST(AddressSanitizer, ReallocTest) {
free(ptr2);
}
+TEST(AddressSanitizer, ReallocFreedPointerTest) {
+ void *ptr = Ident(malloc(42));
+ ASSERT_TRUE(NULL != ptr);
+ free(ptr);
+ EXPECT_DEATH(ptr = realloc(ptr, 77), "attempting double-free");
+}
+
+TEST(AddressSanitizer, ReallocInvalidPointerTest) {
+ void *ptr = Ident(malloc(42));
+ EXPECT_DEATH(ptr = realloc((int*)ptr + 1, 77), "attempting free.*not malloc");
+ free(ptr);
+}
+
TEST(AddressSanitizer, ZeroSizeMallocTest) {
// Test that malloc(0) and similar functions don't return NULL.
void *ptr = Ident(malloc(0));
EXPECT_TRUE(NULL != ptr);
free(ptr);
-#if !defined(__APPLE__) && !defined(ANDROID) && !defined(__ANDROID__)
+#if SANITIZER_TEST_HAS_POSIX_MEMALIGN
int pm_res = posix_memalign(&ptr, 1<<20, 0);
EXPECT_EQ(0, pm_res);
EXPECT_TRUE(NULL != ptr);
free(ptr);
-#endif
+#endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN
int *int_ptr = new int[0];
int *int_ptr2 = new int[0];
EXPECT_TRUE(NULL != int_ptr);
@@ -397,7 +395,7 @@ TEST(AddressSanitizer, ZeroSizeMallocTest) {
delete[] int_ptr2;
}
-#ifndef __APPLE__
+#if SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE
static const char *kMallocUsableSizeErrorMsg =
"AddressSanitizer: attempting to call malloc_usable_size()";
@@ -413,8 +411,9 @@ TEST(AddressSanitizer, MallocUsableSizeTest) {
kMallocUsableSizeErrorMsg);
free(array);
EXPECT_DEATH(malloc_usable_size(array), kMallocUsableSizeErrorMsg);
+ delete int_ptr;
}
-#endif
+#endif // SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE
void WrongFree() {
int *x = (int*)malloc(100 * sizeof(int));
@@ -423,10 +422,14 @@ void WrongFree() {
free(x + 1);
}
+#if !defined(_WIN32) // FIXME: This should be a lit test.
TEST(AddressSanitizer, WrongFreeTest) {
- EXPECT_DEATH(WrongFree(),
- "ERROR: AddressSanitizer: attempting free.*not malloc");
+ EXPECT_DEATH(WrongFree(), ASAN_PCRE_DOTALL
+ "ERROR: AddressSanitizer: attempting free.*not malloc"
+ ".*is located 4 bytes inside of 400-byte region"
+ ".*allocated by thread");
}
+#endif
void DoubleFree() {
int *x = (int*)malloc(100 * sizeof(int));
@@ -437,6 +440,7 @@ void DoubleFree() {
abort();
}
+#if !defined(_WIN32) // FIXME: This should be a lit test.
TEST(AddressSanitizer, DoubleFreeTest) {
EXPECT_DEATH(DoubleFree(), ASAN_PCRE_DOTALL
"ERROR: AddressSanitizer: attempting double-free"
@@ -444,20 +448,22 @@ TEST(AddressSanitizer, DoubleFreeTest) {
".*freed by thread T0 here"
".*previously allocated by thread T0 here");
}
+#endif
template<int kSize>
NOINLINE void SizedStackTest() {
char a[kSize];
char *A = Ident((char*)&a);
+ const char *expected_death = "AddressSanitizer: stack-buffer-";
for (size_t i = 0; i < kSize; i++)
A[i] = i;
- EXPECT_DEATH(A[-1] = 0, "");
- EXPECT_DEATH(A[-20] = 0, "");
- EXPECT_DEATH(A[-31] = 0, "");
- EXPECT_DEATH(A[kSize] = 0, "");
- EXPECT_DEATH(A[kSize + 1] = 0, "");
- EXPECT_DEATH(A[kSize + 10] = 0, "");
- EXPECT_DEATH(A[kSize + 31] = 0, "");
+ EXPECT_DEATH(A[-1] = 0, expected_death);
+ EXPECT_DEATH(A[-5] = 0, expected_death);
+ EXPECT_DEATH(A[kSize] = 0, expected_death);
+ EXPECT_DEATH(A[kSize + 1] = 0, expected_death);
+ EXPECT_DEATH(A[kSize + 5] = 0, expected_death);
+ if (kSize > 16)
+ EXPECT_DEATH(A[kSize + 31] = 0, expected_death);
}
TEST(AddressSanitizer, SimpleStackTest) {
@@ -478,6 +484,9 @@ TEST(AddressSanitizer, SimpleStackTest) {
SizedStackTest<128>();
}
+#if !defined(_WIN32)
+// FIXME: It's a bit hard to write multi-line death test expectations
+// in a portable way. Anyways, this should just be turned into a lit test.
TEST(AddressSanitizer, ManyStackObjectsTest) {
char XXX[10];
char YYY[20];
@@ -486,6 +495,47 @@ TEST(AddressSanitizer, ManyStackObjectsTest) {
Ident(YYY);
EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ");
}
+#endif
+
+#if 0 // This test requires online symbolizer.
+// Moved to lit_tests/stack-oob-frames.cc.
+// Reenable here once we have online symbolizer by default.
+NOINLINE static void Frame0(int frame, char *a, char *b, char *c) {
+ char d[4] = {0};
+ char *D = Ident(d);
+ switch (frame) {
+ case 3: a[5]++; break;
+ case 2: b[5]++; break;
+ case 1: c[5]++; break;
+ case 0: D[5]++; break;
+ }
+}
+NOINLINE static void Frame1(int frame, char *a, char *b) {
+ char c[4] = {0}; Frame0(frame, a, b, c);
+ break_optimization(0);
+}
+NOINLINE static void Frame2(int frame, char *a) {
+ char b[4] = {0}; Frame1(frame, a, b);
+ break_optimization(0);
+}
+NOINLINE static void Frame3(int frame) {
+ char a[4] = {0}; Frame2(frame, a);
+ break_optimization(0);
+}
+
+TEST(AddressSanitizer, GuiltyStackFrame0Test) {
+ EXPECT_DEATH(Frame3(0), "located .*in frame <.*Frame0");
+}
+TEST(AddressSanitizer, GuiltyStackFrame1Test) {
+ EXPECT_DEATH(Frame3(1), "located .*in frame <.*Frame1");
+}
+TEST(AddressSanitizer, GuiltyStackFrame2Test) {
+ EXPECT_DEATH(Frame3(2), "located .*in frame <.*Frame2");
+}
+TEST(AddressSanitizer, GuiltyStackFrame3Test) {
+ EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3");
+}
+#endif
NOINLINE void LongJmpFunc1(jmp_buf buf) {
// create three red zones for these two stack objects.
@@ -498,6 +548,24 @@ NOINLINE void LongJmpFunc1(jmp_buf buf) {
longjmp(buf, 1);
}
+NOINLINE void TouchStackFunc() {
+ int a[100]; // long array will intersect with redzones from LongJmpFunc1.
+ int *A = Ident(a);
+ for (int i = 0; i < 100; i++)
+ A[i] = i*i;
+}
+
+// Test that we handle longjmp and do not report false positives on stack.
+TEST(AddressSanitizer, LongJmpTest) {
+ static jmp_buf buf;
+ if (!setjmp(buf)) {
+ LongJmpFunc1(buf);
+ } else {
+ TouchStackFunc();
+ }
+}
+
+#if !defined(_WIN32) // Only basic longjmp is available on Windows.
NOINLINE void BuiltinLongJmpFunc1(jmp_buf buf) {
// create three red zones for these two stack objects.
int a;
@@ -531,25 +599,10 @@ NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) {
siglongjmp(buf, 1);
}
-
-NOINLINE void TouchStackFunc() {
- int a[100]; // long array will intersect with redzones from LongJmpFunc1.
- int *A = Ident(a);
- for (int i = 0; i < 100; i++)
- A[i] = i*i;
-}
-
-// Test that we handle longjmp and do not report fals positives on stack.
-TEST(AddressSanitizer, LongJmpTest) {
- static jmp_buf buf;
- if (!setjmp(buf)) {
- LongJmpFunc1(buf);
- } else {
- TouchStackFunc();
- }
-}
-
-#if not defined(__ANDROID__)
+#if !defined(__ANDROID__) && \
+ !defined(__powerpc64__) && !defined(__powerpc__)
+// Does not work on Power:
+// https://code.google.com/p/address-sanitizer/issues/detail?id=185
TEST(AddressSanitizer, BuiltinLongJmpTest) {
static jmp_buf buf;
if (!__builtin_setjmp((void**)buf)) {
@@ -558,7 +611,8 @@ TEST(AddressSanitizer, BuiltinLongJmpTest) {
TouchStackFunc();
}
}
-#endif // not defined(__ANDROID__)
+#endif // !defined(__ANDROID__) && !defined(__powerpc64__) &&
+ // !defined(__powerpc__)
TEST(AddressSanitizer, UnderscopeLongJmpTest) {
static jmp_buf buf;
@@ -577,8 +631,10 @@ TEST(AddressSanitizer, SigLongJmpTest) {
TouchStackFunc();
}
}
+#endif
-#ifdef __EXCEPTIONS
+// FIXME: Why does clang-cl define __EXCEPTIONS?
+#if defined(__EXCEPTIONS) && !defined(_WIN32)
NOINLINE void ThrowFunc() {
// create three red zones for these two stack objects.
int a;
@@ -626,7 +682,8 @@ TEST(AddressSanitizer, ThreadStackReuseTest) {
PTHREAD_JOIN(t, 0);
}
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i686__) || defined(__x86_64__)
+#include <emmintrin.h>
TEST(AddressSanitizer, Store128Test) {
char *a = Ident((char*)malloc(Ident(12)));
char *p = a;
@@ -644,11 +701,19 @@ TEST(AddressSanitizer, Store128Test) {
}
#endif
+// FIXME: All tests that use this function should be turned into lit tests.
string RightOOBErrorMessage(int oob_distance, bool is_write) {
assert(oob_distance >= 0);
char expected_str[100];
- sprintf(expected_str, ASAN_PCRE_DOTALL "%s.*located %d bytes to the right",
- is_write ? "WRITE" : "READ", oob_distance);
+ sprintf(expected_str, ASAN_PCRE_DOTALL
+#if !GTEST_USES_SIMPLE_RE
+ "buffer-overflow.*%s.*"
+#endif
+ "located %d bytes to the right",
+#if !GTEST_USES_SIMPLE_RE
+ is_write ? "WRITE" : "READ",
+#endif
+ oob_distance);
return string(expected_str);
}
@@ -660,11 +725,19 @@ string RightOOBReadMessage(int oob_distance) {
return RightOOBErrorMessage(oob_distance, /*is_write*/false);
}
+// FIXME: All tests that use this function should be turned into lit tests.
string LeftOOBErrorMessage(int oob_distance, bool is_write) {
assert(oob_distance > 0);
char expected_str[100];
- sprintf(expected_str, ASAN_PCRE_DOTALL "%s.*located %d bytes to the left",
- is_write ? "WRITE" : "READ", oob_distance);
+ sprintf(expected_str,
+#if !GTEST_USES_SIMPLE_RE
+ ASAN_PCRE_DOTALL "%s.*"
+#endif
+ "located %d bytes to the left",
+#if !GTEST_USES_SIMPLE_RE
+ is_write ? "WRITE" : "READ",
+#endif
+ oob_distance);
return string(expected_str);
}
@@ -818,6 +891,7 @@ void ThreadedTestSpawn() {
PTHREAD_JOIN(t, 0);
}
+#if !defined(_WIN32) // FIXME: This should be a lit test.
TEST(AddressSanitizer, ThreadedTest) {
EXPECT_DEATH(ThreadedTestSpawn(),
ASAN_PCRE_DOTALL
@@ -825,6 +899,7 @@ TEST(AddressSanitizer, ThreadedTest) {
".*Thread T.*created"
".*Thread T.*created");
}
+#endif
void *ThreadedTestFunc(void *unused) {
// Check if prctl(PR_SET_NAME) is supported. Return if not.
@@ -855,7 +930,11 @@ TEST(AddressSanitizer, ShadowGapTest) {
#if SANITIZER_WORDSIZE == 32
char *addr = (char*)0x22000000;
#else
+# if defined(__powerpc64__)
+ char *addr = (char*)0x024000800000;
+# else
char *addr = (char*)0x0000100000080000;
+# endif
#endif
EXPECT_DEATH(*addr = 1, "AddressSanitizer: SEGV on unknown");
}
@@ -1015,7 +1094,8 @@ TEST(AddressSanitizer, PthreadExitTest) {
}
}
-#ifdef __EXCEPTIONS
+// FIXME: Why does clang-cl define __EXCEPTIONS?
+#if defined(__EXCEPTIONS) && !defined(_WIN32)
NOINLINE static void StackReuseAndException() {
int large_stack[1000];
Ident(large_stack);
@@ -1033,12 +1113,14 @@ TEST(AddressSanitizer, DISABLED_StressStackReuseAndExceptionsTest) {
}
#endif
+#if !defined(_WIN32)
TEST(AddressSanitizer, MlockTest) {
EXPECT_EQ(0, mlockall(MCL_CURRENT));
EXPECT_EQ(0, mlock((void*)0x12345, 0x5678));
EXPECT_EQ(0, munlockall());
EXPECT_EQ(0, munlock((void*)0x987, 0x654));
}
+#endif
struct LargeStruct {
int foo[100];
@@ -1051,19 +1133,26 @@ TEST(AddressSanitizer, LargeStructCopyTest) {
*Ident(&a) = *Ident(&a);
}
-ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
-static void NoAddressSafety() {
+ATTRIBUTE_NO_SANITIZE_ADDRESS
+static void NoSanitizeAddress() {
char *foo = new char[10];
Ident(foo)[10] = 0;
delete [] foo;
}
-TEST(AddressSanitizer, AttributeNoAddressSafetyTest) {
- Ident(NoAddressSafety)();
+TEST(AddressSanitizer, AttributeNoSanitizeAddressTest) {
+ Ident(NoSanitizeAddress)();
}
-// It doesn't work on Android, as calls to new/delete go through malloc/free.
-#if !defined(ANDROID) && !defined(__ANDROID__)
+// The new/delete/etc mismatch checks don't work on Android,
+// as calls to new/delete go through malloc/free.
+// OS X support is tracked here:
+// https://code.google.com/p/address-sanitizer/issues/detail?id=131
+// Windows support is tracked here:
+// https://code.google.com/p/address-sanitizer/issues/detail?id=309
+#if !defined(ANDROID) && !defined(__ANDROID__) && \
+ !defined(__APPLE__) && \
+ !defined(_WIN32)
static string MismatchStr(const string &str) {
return string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str;
}
@@ -1177,3 +1266,18 @@ TEST(AddressSanitizer, LongDoubleNegativeTest) {
memcpy(Ident(&a), Ident(&b), sizeof(long double));
memcpy(Ident(&c), Ident(&b), sizeof(long double));
}
+
+#if !defined(_WIN32)
+TEST(AddressSanitizer, pthread_getschedparam) {
+ int policy;
+ struct sched_param param;
+ EXPECT_DEATH(
+ pthread_getschedparam(pthread_self(), &policy, Ident(&param) + 2),
+ "AddressSanitizer: stack-buffer-.*flow");
+ EXPECT_DEATH(
+ pthread_getschedparam(pthread_self(), Ident(&policy) - 1, &param),
+ "AddressSanitizer: stack-buffer-.*flow");
+ int res = pthread_getschedparam(pthread_self(), &policy, &param);
+ ASSERT_EQ(0, res);
+}
+#endif
diff --git a/gcc/testsuite/g++.dg/asan/asan_test_config.h b/gcc/testsuite/g++.dg/asan/asan_test_config.h
index b039dc8beb2..97b7dd0873f 100644
--- a/gcc/testsuite/g++.dg/asan/asan_test_config.h
+++ b/gcc/testsuite/g++.dg/asan/asan_test_config.h
@@ -19,12 +19,6 @@
#include <string>
#include <map>
-#if ASAN_USE_DEJAGNU_GTEST
-# include "dejagnu-gtest.h"
-#else
-# include "gtest/gtest.h"
-#endif
-
using std::string;
using std::vector;
using std::map;
@@ -42,11 +36,11 @@ using std::map;
#endif
#ifndef ASAN_NEEDS_SEGV
-# error "please define ASAN_NEEDS_SEGV"
-#endif
-
-#ifndef ASAN_LOW_MEMORY
-# define ASAN_LOW_MEMORY 0
+# if defined(_WIN32)
+# define ASAN_NEEDS_SEGV 0
+# else
+# define ASAN_NEEDS_SEGV 1
+# endif
#endif
#ifndef ASAN_AVOID_EXPENSIVE_TESTS
diff --git a/gcc/testsuite/g++.dg/asan/asan_test_utils.h b/gcc/testsuite/g++.dg/asan/asan_test_utils.h
index 5ea7d4449f4..f5281a6a9e3 100644
--- a/gcc/testsuite/g++.dg/asan/asan_test_utils.h
+++ b/gcc/testsuite/g++.dg/asan/asan_test_utils.h
@@ -12,24 +12,28 @@
#ifndef ASAN_TEST_UTILS_H
#define ASAN_TEST_UTILS_H
-#if !defined(ASAN_EXTERNAL_TEST_CONFIG)
+#if !defined(SANITIZER_EXTERNAL_TEST_CONFIG)
# define INCLUDED_FROM_ASAN_TEST_UTILS_H
# include "asan_test_config.h"
# undef INCLUDED_FROM_ASAN_TEST_UTILS_H
#endif
#include "sanitizer_test_utils.h"
+#include "sanitizer_pthread_wrappers.h"
+
#include <stdio.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
-#include <strings.h>
-#include <pthread.h>
#include <stdint.h>
-#include <setjmp.h>
#include <assert.h>
#include <algorithm>
-#include <sys/mman.h>
+
+#if !defined(_WIN32)
+# include <strings.h>
+# include <sys/mman.h>
+# include <setjmp.h>
+#endif
#ifdef __linux__
# include <sys/prctl.h>
@@ -39,18 +43,10 @@
#include <unistd.h>
#endif
-#if defined(__i386__) || defined(__x86_64__)
-#include <emmintrin.h>
-#endif
-
-#ifndef __APPLE__
+#if !defined(__APPLE__) && !defined(__FreeBSD__)
#include <malloc.h>
#endif
-// Check that pthread_create/pthread_join return success.
-#define PTHREAD_CREATE(a, b, c, d) ASSERT_EQ(0, pthread_create(a, b, c, d))
-#define PTHREAD_JOIN(a, b) ASSERT_EQ(0, pthread_join(a, b))
-
#if ASAN_HAS_EXCEPTIONS
# define ASAN_THROW(x) throw (x)
#else
diff --git a/gcc/testsuite/g++.dg/asan/deep-stack-uaf-1.C b/gcc/testsuite/g++.dg/asan/deep-stack-uaf-1.C
index 6ffec4c4c6d..4ae91f7ea52 100644
--- a/gcc/testsuite/g++.dg/asan/deep-stack-uaf-1.C
+++ b/gcc/testsuite/g++.dg/asan/deep-stack-uaf-1.C
@@ -34,6 +34,6 @@ int main() {
}
// { dg-output "ERROR: AddressSanitizer:? heap-use-after-free on address.*(\n|\r\n|\r)" }
-// { dg-output " #37 0x\[0-9a-f\]+ (in \[^\n\r]*DeepFree\[^\n\r]*36|\[(\]).*(\n|\r\n|\r)" }
-// { dg-output " #99 0x\[0-9a-f\]+ (in \[^\n\r]*DeepFree\[^\n\r]*98|\[(\]).*(\n|\r\n|\r)" }
-// { dg-output " #116 0x\[0-9a-f\]+ (in \[^\n\r]*DeepFree\[^\n\r]*115|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #37 0x\[0-9a-f\]+ +(in \[^\n\r]*DeepFree\[^\n\r]*36|\[(\]).*(\n|\r\n|\r)" }
+// { dg-output " #99 0x\[0-9a-f\]+ +(in \[^\n\r]*DeepFree\[^\n\r]*98|\[(\]).*(\n|\r\n|\r)" }
+// { dg-output " #116 0x\[0-9a-f\]+ +(in \[^\n\r]*DeepFree\[^\n\r]*115|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
diff --git a/gcc/testsuite/g++.dg/asan/deep-tail-call-1.C b/gcc/testsuite/g++.dg/asan/deep-tail-call-1.C
index 5ac63f1d54b..a7cdb9fd613 100644
--- a/gcc/testsuite/g++.dg/asan/deep-tail-call-1.C
+++ b/gcc/testsuite/g++.dg/asan/deep-tail-call-1.C
@@ -15,8 +15,8 @@ int main() {
}
// { dg-output "AddressSanitizer:? global-buffer-overflow.*(\n|\r\n|\r)" }
-// { dg-output " #0 0x\[0-9a-f\]+ (in \[^\n\r]*call4\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
-// { dg-output " #1 0x\[0-9a-f\]+ (in \[^\n\r]*call3\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
-// { dg-output " #2 0x\[0-9a-f\]+ (in \[^\n\r]*call2\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
-// { dg-output " #3 0x\[0-9a-f\]+ (in \[^\n\r]*call1\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
-// { dg-output " #4 0x\[0-9a-f\]+ (in \[^\n\r]*main\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #0 0x\[0-9a-f\]+ +(in \[^\n\r]*call4\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #1 0x\[0-9a-f\]+ +(in \[^\n\r]*call3\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #2 0x\[0-9a-f\]+ +(in \[^\n\r]*call2\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #3 0x\[0-9a-f\]+ +(in \[^\n\r]*call1\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #4 0x\[0-9a-f\]+ +(in \[^\n\r]*main\[^\n\r]*|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
diff --git a/gcc/testsuite/g++.dg/asan/default-options-1.C b/gcc/testsuite/g++.dg/asan/default-options-1.C
index cdcb370d141..dc818917ddc 100644
--- a/gcc/testsuite/g++.dg/asan/default-options-1.C
+++ b/gcc/testsuite/g++.dg/asan/default-options-1.C
@@ -12,4 +12,4 @@ int main() {
return 0;
}
-// { dg-output "Using the defaults from __asan_default_options:.* foo=bar.*(\n|\r\n|\r)" }
+// { dg-output "WARNING: found 1 unrecognized flag\\(s\\):(\n|\r\n|\r).*foo(\n|\r\n|\r)" }
diff --git a/gcc/testsuite/g++.dg/asan/large-func-test-1.C b/gcc/testsuite/g++.dg/asan/large-func-test-1.C
index 9344c3bcff6..0c1b6ce0c61 100644
--- a/gcc/testsuite/g++.dg/asan/large-func-test-1.C
+++ b/gcc/testsuite/g++.dg/asan/large-func-test-1.C
@@ -38,8 +38,8 @@ int main() {
// { dg-output "ERROR: AddressSanitizer:? heap-buffer-overflow on address\[^\n\r]*" }
// { dg-output "0x\[0-9a-f\]+ at pc 0x\[0-9a-f\]+ bp 0x\[0-9a-f\]+ sp 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" }
// { dg-output "\[^\n\r]*READ of size 4 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" }
-// { dg-output " #0 0x\[0-9a-f\]+ (in \[^\n\r]*LargeFunction\[^\n\r]*(large-func-test-1.C:18|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" }
+// { dg-output " #0 0x\[0-9a-f\]+ +(in \[^\n\r]*LargeFunction\[^\n\r]*(large-func-test-1.C:18|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" }
// { dg-output "\[^\n\r]*0x\[0-9a-f\]+ is located 44 bytes to the right of 400-byte region.*(\n|\r\n|\r)" }
// { dg-output "\[^\n\r]*allocated by thread T0 here:\[^\n\r]*(\n|\r\n|\r)" }
-// { dg-output " #0( 0x\[0-9a-f\]+ (in _*(interceptor_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
-// { dg-output " #1|) 0x\[0-9a-f\]+ (in (operator new|_*_Zn\[aw\]\[mj\])|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #0( 0x\[0-9a-f\]+ +(in _*(interceptor_|)malloc|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
+// { dg-output " #1|) 0x\[0-9a-f\]+ +(in (operator new|(wrap|)_*_Zn\[aw\]\[mj\])|\[(\])\[^\n\r]*(\n|\r\n|\r)" }
diff --git a/gcc/testsuite/g++.dg/asan/pr69276.C b/gcc/testsuite/g++.dg/asan/pr69276.C
new file mode 100644
index 00000000000..ff43650d29e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/asan/pr69276.C
@@ -0,0 +1,38 @@
+/* { dg-do run } */
+/* { dg-shouldfail "asan" } */
+/* { dg-additional-options "-O0 -fno-lto" } */
+
+#include <stdlib.h>
+
+typedef __SIZE_TYPE__ size_t;
+inline void * operator new (size_t, void *p) { return p; }
+
+
+struct vec
+{
+ int size;
+};
+
+struct vnull
+{
+ operator vec() { return vec(); }
+};
+vnull vNULL;
+
+struct A
+{
+ A(): value2 (vNULL), value3 (vNULL) {}
+ int value;
+ vec value2;
+ vec value3;
+};
+
+int main()
+{
+ int *array = (int *)malloc (sizeof (int) * 1);
+ A *a = new (array) A ();
+ free (array);
+}
+
+/* { dg-output "ERROR: AddressSanitizer: heap-buffer-overflow.*(\n|\r\n|\r)" } */
+/* { dg-output " #0 0x\[0-9a-f\]+ +in A::A()" } */
diff --git a/gcc/testsuite/g++.dg/asan/sanitizer_pthread_wrappers.h b/gcc/testsuite/g++.dg/asan/sanitizer_pthread_wrappers.h
new file mode 100644
index 00000000000..2e71ecbb333
--- /dev/null
+++ b/gcc/testsuite/g++.dg/asan/sanitizer_pthread_wrappers.h
@@ -0,0 +1,64 @@
+//===-- sanitizer_pthread_wrappers.h ----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of *Sanitizer runtime.
+// It provides handy wrappers for thread manipulation, that:
+// a) assert on any failure rather than returning an error code
+// b) defines pthread-like interface on platforms where where <pthread.h>
+// is not supplied by default.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PTHREAD_WRAPPERS_H
+#define SANITIZER_PTHREAD_WRAPPERS_H
+
+#include "sanitizer_test_utils.h"
+
+#if !defined(_WIN32)
+# include <pthread.h>
+// Simply forward the arguments and check that the pthread functions succeed.
+# define PTHREAD_CREATE(a, b, c, d) ASSERT_EQ(0, pthread_create(a, b, c, d))
+# define PTHREAD_JOIN(a, b) ASSERT_EQ(0, pthread_join(a, b))
+#else
+typedef HANDLE pthread_t;
+
+struct PthreadHelperCreateThreadInfo {
+ void *(*start_routine)(void *);
+ void *arg;
+};
+
+inline DWORD WINAPI PthreadHelperThreadProc(void *arg) {
+ PthreadHelperCreateThreadInfo *start_data =
+ reinterpret_cast<PthreadHelperCreateThreadInfo*>(arg);
+ void *ret = (start_data->start_routine)(start_data->arg);
+ delete start_data;
+ return (DWORD)ret;
+}
+
+inline void PTHREAD_CREATE(pthread_t *thread, void *attr,
+ void *(*start_routine)(void *), void *arg) {
+ ASSERT_EQ(0, attr) << "Thread attributes are not supported yet.";
+ PthreadHelperCreateThreadInfo *data = new PthreadHelperCreateThreadInfo;
+ data->start_routine = start_routine;
+ data->arg = arg;
+ *thread = CreateThread(0, 0, PthreadHelperThreadProc, data, 0, 0);
+ ASSERT_NE(nullptr, *thread) << "Failed to create a thread.";
+}
+
+inline void PTHREAD_JOIN(pthread_t thread, void **value_ptr) {
+ ASSERT_EQ(0, value_ptr) << "Nonzero value_ptr is not supported yet.";
+ ASSERT_EQ(WAIT_OBJECT_0, WaitForSingleObject(thread, INFINITE));
+ ASSERT_NE(0, CloseHandle(thread));
+}
+
+inline void pthread_exit(void *retval) {
+ ASSERT_EQ(0, retval) << "Nonzero retval is not supported yet.";
+ ExitThread((DWORD)retval);
+}
+#endif // _WIN32
+
+#endif // SANITIZER_PTHREAD_WRAPPERS_H
diff --git a/gcc/testsuite/g++.dg/asan/sanitizer_test_config.h b/gcc/testsuite/g++.dg/asan/sanitizer_test_config.h
new file mode 100644
index 00000000000..a819a4097a9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/asan/sanitizer_test_config.h
@@ -0,0 +1,28 @@
+//===-- sanitizer_test_config.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of *Sanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+#if !defined(INCLUDED_FROM_SANITIZER_TEST_UTILS_H)
+# error "This file should be included into sanitizer_test_utils.h only"
+#endif
+
+#ifndef SANITIZER_TEST_CONFIG_H
+#define SANITIZER_TEST_CONFIG_H
+
+#include <vector>
+#include <string>
+#include <map>
+
+#if SANITIZER_USE_DEJAGNU_GTEST
+# include "dejagnu-gtest.h"
+#else
+# include "gtest/gtest.h"
+#endif
+
+#endif // SANITIZER_TEST_CONFIG_H
diff --git a/gcc/testsuite/g++.dg/asan/sanitizer_test_utils.h b/gcc/testsuite/g++.dg/asan/sanitizer_test_utils.h
index 99720c354fa..b118de1d346 100644
--- a/gcc/testsuite/g++.dg/asan/sanitizer_test_utils.h
+++ b/gcc/testsuite/g++.dg/asan/sanitizer_test_utils.h
@@ -14,32 +14,47 @@
#define SANITIZER_TEST_UTILS_H
#if defined(_WIN32)
-typedef unsigned __int8 uint8_t;
-typedef unsigned __int16 uint16_t;
-typedef unsigned __int32 uint32_t;
-typedef unsigned __int64 uint64_t;
-typedef __int8 int8_t;
-typedef __int16 int16_t;
-typedef __int32 int32_t;
-typedef __int64 int64_t;
+// <windows.h> should always be the first include on Windows.
+# include <windows.h>
+// MSVS headers define max/min as macros, so std::max/min gets crazy.
+# undef max
+# undef min
+#endif
+
+#if !defined(SANITIZER_EXTERNAL_TEST_CONFIG)
+# define INCLUDED_FROM_SANITIZER_TEST_UTILS_H
+# include "sanitizer_test_config.h"
+# undef INCLUDED_FROM_SANITIZER_TEST_UTILS_H
+#endif
+
+#include <stdint.h>
+
+#if defined(_MSC_VER)
# define NOINLINE __declspec(noinline)
-# define USED
-#else // defined(_WIN32)
+#else // defined(_MSC_VER)
# define NOINLINE __attribute__((noinline))
+#endif // defined(_MSC_VER)
+
+#if !defined(_MSC_VER) || defined(__clang__)
+# define UNUSED __attribute__((unused))
# define USED __attribute__((used))
-#include <stdint.h>
-#endif // defined(_WIN32)
+#else
+# define UNUSED
+# define USED
+#endif
#if !defined(__has_feature)
#define __has_feature(x) 0
#endif
-#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
-# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
+#ifndef ATTRIBUTE_NO_SANITIZE_ADDRESS
+# if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS \
__attribute__((no_sanitize_address))
-#else
-# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
-#endif
+# else
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS
+# endif
+#endif // ATTRIBUTE_NO_SANITIZE_ADDRESS
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
@@ -49,7 +64,9 @@ typedef __int64 int64_t;
// Make the compiler thinks that something is going on there.
inline void break_optimization(void *arg) {
+#if !defined(_WIN32) || defined(__clang__)
__asm__ __volatile__("" : : "r" (arg) : "memory");
+#endif
}
// This function returns its parameter but in such a way that compiler
@@ -74,5 +91,28 @@ static inline uint32_t my_rand() {
return my_rand_r(&global_seed);
}
+// Set availability of platform-specific functions.
+
+#if !defined(__APPLE__) && !defined(ANDROID) && !defined(__ANDROID__) && !defined(_WIN32)
+# define SANITIZER_TEST_HAS_POSIX_MEMALIGN 1
+#else
+# define SANITIZER_TEST_HAS_POSIX_MEMALIGN 0
+#endif
+
+#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(_WIN32)
+# define SANITIZER_TEST_HAS_MEMALIGN 1
+# define SANITIZER_TEST_HAS_PVALLOC 1
+# define SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE 1
+#else
+# define SANITIZER_TEST_HAS_MEMALIGN 0
+# define SANITIZER_TEST_HAS_PVALLOC 0
+# define SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE 0
+#endif
+
+#if !defined(__APPLE__)
+# define SANITIZER_TEST_HAS_STRNLEN 1
+#else
+# define SANITIZER_TEST_HAS_STRNLEN 0
+#endif
#endif // SANITIZER_TEST_UTILS_H
diff --git a/gcc/testsuite/g++.dg/asan/symbolize-callback-1.C b/gcc/testsuite/g++.dg/asan/symbolize-callback-1.C
deleted file mode 100644
index 9978958f9e2..00000000000
--- a/gcc/testsuite/g++.dg/asan/symbolize-callback-1.C
+++ /dev/null
@@ -1,21 +0,0 @@
-// { dg-do run }
-// { dg-skip-if "" { *-*-* } { "*" } { "-O2" } }
-// { dg-options "-fno-builtin-malloc -fno-builtin-free" }
-// { dg-shouldfail "asan" }
-
-#include <stdio.h>
-#include <stdlib.h>
-
-extern "C"
-bool __asan_symbolize(const void *, char *out_buffer, int out_size) {
- snprintf(out_buffer, out_size, "MySymbolizer");
- return true;
-}
-
-int main() {
- char *x = (char*)malloc(10);
- free(x);
- return x[5];
-}
-
-// { dg-output "MySymbolizer" }
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-shift1.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-shift1.C
new file mode 100644
index 00000000000..1f4ee73d1b7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-shift1.C
@@ -0,0 +1,73 @@
+// { dg-do compile { target c++11 } }
+
+constexpr int
+fn1 (int i, int j)
+{
+ return i << j; // { dg-error "is negative" }
+}
+
+constexpr int i1 = fn1 (1, -1);
+
+constexpr int
+fn2 (int i, int j)
+{
+ return i << j; // { dg-error "is >= than the precision of the left operand" }
+}
+
+constexpr int i2 = fn2 (1, 200);
+
+constexpr int
+fn3 (int i, int j)
+{
+ return i << j; // { dg-error "is negative" }
+}
+
+constexpr int i3 = fn3 (-1, 2);
+
+constexpr int
+fn4 (int i, int j)
+{
+ return i << j; // { dg-error "overflows" }
+}
+
+constexpr int i4 = fn4 (__INT_MAX__, 2);
+
+constexpr int
+fn5 (int i, int j)
+{
+ return i << j;
+}
+
+constexpr int i5 = fn5 (__INT_MAX__, 1);
+
+constexpr int
+fn6 (unsigned int i, unsigned int j)
+{
+ return i << j; // { dg-error "is >= than the precision of the left operand" }
+}
+
+constexpr int i6 = fn6 (1, -1);
+
+constexpr int
+fn7 (int i, int j)
+{
+ return i >> j; // { dg-error "is negative" }
+}
+
+constexpr int i7 = fn7 (1, -1);
+
+constexpr int
+fn8 (int i, int j)
+{
+ return i >> j;
+}
+
+constexpr int i8 = fn8 (-1, 1);
+
+constexpr int
+fn9 (int i, int j)
+{
+ return i >> j; // { dg-error "is >= than the precision of the left operand" }
+}
+
+constexpr int i9 = fn9 (1, 200);
diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-shift1.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-shift1.C
new file mode 100644
index 00000000000..a739fd2dfae
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-shift1.C
@@ -0,0 +1,9 @@
+// { dg-do compile { target c++14 } }
+
+constexpr int p = 1;
+constexpr __PTRDIFF_TYPE__ bar (int a)
+{
+ return ((__PTRDIFF_TYPE__) &p) << a; // { dg-error "is not a constant expression" }
+}
+constexpr __PTRDIFF_TYPE__ r = bar (2);
+constexpr __PTRDIFF_TYPE__ s = bar (0); // { dg-error "conversion from pointer" }
diff --git a/gcc/testsuite/g++.dg/ubsan/align-1.C b/gcc/testsuite/g++.dg/ubsan/align-1.C
new file mode 100644
index 00000000000..65b1222a5c0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/align-1.C
@@ -0,0 +1,27 @@
+// { dg-do run }
+// { dg-options "-fsanitize=alignment -Wall -Wno-unused-variable -std=c++11" }
+
+typedef const long int L;
+int a = 1;
+L b = 2;
+
+int
+main (void)
+{
+ int *p = &a;
+ L *l = &b;
+
+ int &r = *p;
+ auto &r2 = *p;
+ L &lr = *l;
+
+ // Try an rvalue reference.
+ auto &&r3 = *p;
+
+ // Don't evaluate the reference initializer twice.
+ int i = 1;
+ int *q = &i;
+ int &qr = ++*q;
+ if (i != 2)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/align-2.C b/gcc/testsuite/g++.dg/ubsan/align-2.C
new file mode 100644
index 00000000000..3e4f5485d02
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/align-2.C
@@ -0,0 +1,45 @@
+// Limit this to known non-strict alignment targets.
+// { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } }
+// { dg-options "-fsanitize=alignment -Wall -Wno-unused-variable -std=c++11" }
+
+typedef const long int L;
+struct S { long int l; char buf[1 + sizeof (int) + sizeof (L)]; } s;
+struct T { char a; int b; long int c; } __attribute__((packed));
+struct U { long int a; struct T b; } u;
+
+int
+main (void)
+{
+ int *p = (int *) &s.buf[1];
+ L *l = (L *) &s.buf[1 + sizeof(int)];
+
+ int &r = *p;
+ auto &r2 = *p;
+ L &lr = *l;
+
+ // Try an rvalue reference.
+ auto &&r3 = *p;
+
+ // Don't evaluate the reference initializer twice.
+ int i = 1;
+ int *q = &i;
+ int &qr = ++*q;
+ if (i != 2)
+ __builtin_abort ();
+
+ int *s = &u.b.b;
+ L *t = &u.b.c;
+ int &r4 = *s;
+ auto &r5 = *s;
+ L &lr2 = *t;
+ auto &&r6 = *s;
+}
+
+// { dg-output "\.C:16:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" }
+// { dg-output "\.C:17:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" }
+// { dg-output "\.C:18:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'const L', which requires \[48] byte alignment.*" }
+// { dg-output "\.C:21:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" }
+// { dg-output "\.C:32:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" }
+// { dg-output "\.C:33:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment.*" }
+// { dg-output "\.C:34:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'const L', which requires \[48] byte alignment.*" }
+// { dg-output "\.C:35:\[0-9]*:\[\^\n\r]*reference binding to misaligned address 0x\[0-9a-fA-F]* for type 'int', which requires 4 byte alignment" }
diff --git a/gcc/testsuite/g++.dg/ubsan/align-3.C b/gcc/testsuite/g++.dg/ubsan/align-3.C
new file mode 100644
index 00000000000..1cc40fc4c9b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/align-3.C
@@ -0,0 +1,45 @@
+// Limit this to known non-strict alignment targets.
+// { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } }
+// { dg-options "-fsanitize=alignment -Wall -Wno-unused-variable -std=c++11" }
+
+#include <new>
+
+struct U
+{
+ int a;
+ void foo () {}
+};
+struct V
+{
+ V () : a (0) {};
+ ~V () { a = 0; };
+ int a;
+ void foo () {}
+ static void bar () {}
+};
+struct S { long int l; char buf[1 + sizeof (U) + 2 * sizeof (V)]; } s;
+
+int
+main (void)
+{
+ U *p = (U *) &s.buf[1];
+ p->foo ();
+ char *q = &s.buf[1 + sizeof (U)];
+ V *u = new (q) V;
+ u->a = 1;
+ u->~V ();
+ V *v = new (&s.buf[1 + sizeof (U) + sizeof (V)]) V;
+ v->foo ();
+ v->bar (); // We don't instrument this right now.
+ v->~V ();
+}
+
+// { dg-output "\.C:26:\[0-9]*:\[\^\n\r]*member call on misaligned address 0x\[0-9a-fA-F]* for type 'struct U', which requires 4 byte alignment.*" }
+// { dg-output "\.C:28:\[0-9]*:\[\^\n\r]*constructor call on misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment.*" }
+// { dg-output "\.C:14:\[0-9]*:\[\^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment.*" }
+// { dg-output "\.C:29:\[0-9]*:\[\^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment.*" }
+// { dg-output "\.C:30:\[0-9]*:\[\^\n\r]*member call on misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment.*" }
+// { dg-output "\.C:15:\[0-9]*:\[\^\n\r]*member access within misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment.*" }
+// { dg-output "\.C:31:\[0-9]*:\[\^\n\r]*constructor call on misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment.*" }
+// { dg-output "\.C:32:\[0-9]*:\[\^\n\r]*member call on misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment.*" }
+// { dg-output "\.C:34:\[0-9]*:\[\^\n\r]*member call on misaligned address 0x\[0-9a-fA-F]* for type 'struct V', which requires 4 byte alignment" }
diff --git a/gcc/testsuite/g++.dg/ubsan/attrib-1.C b/gcc/testsuite/g++.dg/ubsan/attrib-1.C
new file mode 100644
index 00000000000..f701d02dad3
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/attrib-1.C
@@ -0,0 +1,27 @@
+// { dg-do compile }
+// { dg-options "-fsanitize=undefined -Wall -Wno-unused-variable -std=c++11" }
+
+typedef const long int L;
+
+__attribute__((no_sanitize_undefined)) void
+foo (int *p, L *l)
+{
+ int &r = *p;
+ auto &r2 = *p;
+ L &lr = *l;
+ auto &&r3 = *p;
+}
+
+struct U
+{
+ int a;
+ void foo () {}
+};
+
+__attribute__((no_sanitize_undefined)) void
+bar (U *p)
+{
+ p->foo ();
+}
+
+// { dg-final { scan-assembler-not "__ubsan_handle" } }
diff --git a/gcc/testsuite/g++.dg/ubsan/cxx11-shift-1.C b/gcc/testsuite/g++.dg/ubsan/cxx11-shift-1.C
index f2b2cbd1c2f..431eab12d3e 100644
--- a/gcc/testsuite/g++.dg/ubsan/cxx11-shift-1.C
+++ b/gcc/testsuite/g++.dg/ubsan/cxx11-shift-1.C
@@ -1,18 +1,10 @@
-/* { dg-do run { target c++11 } } */
-/* { dg-options "-fsanitize=shift -w" } */
-
-#include <stdio.h>
+/* { dg-do run } */
+/* { dg-options "-fsanitize=shift -w -fno-sanitize-recover=shift -std=c++11" } */
int
main (void)
{
- fputs ("UBSAN TEST START\n", stderr);
-
int a = 1;
a <<= 31;
-
- fputs ("UBSAN TEST END\n", stderr);
return 0;
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/g++.dg/ubsan/cxx11-shift-2.C b/gcc/testsuite/g++.dg/ubsan/cxx11-shift-2.C
index 867908aef79..fbc16dfd3d8 100644
--- a/gcc/testsuite/g++.dg/ubsan/cxx11-shift-2.C
+++ b/gcc/testsuite/g++.dg/ubsan/cxx11-shift-2.C
@@ -1,5 +1,5 @@
-/* { dg-do run { target c++11 } } */
-/* { dg-options "-fsanitize=shift -w" } */
+/* { dg-do run } */
+/* { dg-options "-fsanitize=shift -w -std=c++11" } */
int
main (void)
diff --git a/gcc/testsuite/g++.dg/ubsan/cxx1y-vla.C b/gcc/testsuite/g++.dg/ubsan/cxx1y-vla.C
deleted file mode 100644
index ca1e922d69a..00000000000
--- a/gcc/testsuite/g++.dg/ubsan/cxx1y-vla.C
+++ /dev/null
@@ -1,13 +0,0 @@
-/* { dg-do run { target c++1y } } */
-/* { dg-options "-fsanitize=vla-bound -Wall -Wno-unused-variable" } */
-/* { dg-shouldfail "ubsan" } */
-
-int
-main (void)
-{
- int y = -18;
- int a[y];
- return 0;
-}
-
-/* { dg-output "terminate called after throwing an instance" } */
diff --git a/gcc/testsuite/g++.dg/ubsan/div-by-zero-1.C b/gcc/testsuite/g++.dg/ubsan/div-by-zero-1.C
index 88acfa1517e..946f2e615f5 100644
--- a/gcc/testsuite/g++.dg/ubsan/div-by-zero-1.C
+++ b/gcc/testsuite/g++.dg/ubsan/div-by-zero-1.C
@@ -1,10 +1,14 @@
/* { dg-do compile } */
-/* { dg-options "-fsanitize=integer-divide-by-zero -w" } */
+/* { dg-options "-fsanitize=integer-divide-by-zero" } */
+
+/* TODO: We expect an error on the invalid case here, because that
+ must be a constant-expression. This will be fixed when we have
+ proper delayed folding. */
void
foo (int i)
{
switch (i)
- case 0 * (1 / 0): /* { dg-error "is not a constant expression" } */
- ;
+ case 0 * (1 / 0): /* { dg-warning "division by zero" } */
+ ; /* { dg-error "division by zero" "" { xfail *-*-* } 10 } */
}
diff --git a/gcc/testsuite/g++.dg/ubsan/float-cast-overflow-bf.C b/gcc/testsuite/g++.dg/ubsan/float-cast-overflow-bf.C
new file mode 100644
index 00000000000..f01c576c3db
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/float-cast-overflow-bf.C
@@ -0,0 +1,62 @@
+// { dg-do run { target { int32 } } }
+// { dg-options "-fsanitize=float-cast-overflow" }
+
+#define INT_MAX __INT_MAX__
+#define INT_MIN (-__INT_MAX__ - 1)
+#define UINT_MAX 2 * (unsigned) __INT_MAX__ + 1
+
+struct S
+{
+ int i:1;
+} s;
+
+struct T
+{
+ unsigned int i:1;
+} t;
+
+int
+main (void)
+{
+ volatile double d;
+
+#define CHECK_BOUNDARY(VAR, VAL) \
+ (VAR) = (VAL) - 1.5; \
+ (VAR) = (VAL) - 1.0; \
+ (VAR) = (VAL) - 0.75; \
+ (VAR) = (VAL) - 0.5; \
+ (VAR) = (VAL) - 0.0000001; \
+ (VAR) = (VAL) - 0.0; \
+ (VAR) = (VAL); \
+ (VAR) = (VAL) + 0.0; \
+ (VAR) = (VAL) + 0.0000001; \
+ (VAR) = (VAL) + 0.5; \
+ (VAR) = (VAL) + 0.75; \
+ (VAR) = (VAL) + 1.0; \
+ (VAR) = (VAL) + 1.5;
+
+ /* Signed bit-field. (INT_MIN, INT_MAX) is valid. */
+ d = INT_MIN;
+ CHECK_BOUNDARY (s.i, d);
+ d = 0.0;
+ CHECK_BOUNDARY (s.i, d);
+ d = INT_MAX;
+ CHECK_BOUNDARY (s.i, d);
+
+ /* Unsigned bit-field. (0, UINT_MAX) is valid. */
+ d = UINT_MAX;
+ CHECK_BOUNDARY (t.i, d);
+ d = 0.0;
+ CHECK_BOUNDARY (t.i, d);
+
+ return 0;
+}
+
+/* { dg-output "value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type" } */
diff --git a/gcc/testsuite/g++.dg/ubsan/null-1.C b/gcc/testsuite/g++.dg/ubsan/null-1.C
new file mode 100644
index 00000000000..04e8490fc08
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/null-1.C
@@ -0,0 +1,30 @@
+// { dg-do run }
+// { dg-options "-fsanitize=null -Wall -Wno-unused-variable -std=c++11" }
+
+typedef const long int L;
+
+int
+main (void)
+{
+ int *p = 0;
+ L *l = 0;
+
+ int &r = *p;
+ auto &r2 = *p;
+ L &lr = *l;
+
+ // Try an rvalue reference.
+ auto &&r3 = *p;
+
+ // Don't evaluate the reference initializer twice.
+ int i = 1;
+ int *q = &i;
+ int &qr = ++*q;
+ if (i != 2)
+ __builtin_abort ();
+}
+
+// { dg-output "reference binding to null pointer of type 'int'(\n|\r\n|\r)" }
+// { dg-output "\[^\n\r]*reference binding to null pointer of type 'int'(\n|\r\n|\r)" }
+// { dg-output "\[^\n\r]*reference binding to null pointer of type 'const L'(\n|\r\n|\r)" }
+// { dg-output "\[^\n\r]*reference binding to null pointer of type 'int'" }
diff --git a/gcc/testsuite/g++.dg/ubsan/null-2.C b/gcc/testsuite/g++.dg/ubsan/null-2.C
new file mode 100644
index 00000000000..88f387e17c9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/null-2.C
@@ -0,0 +1,39 @@
+// Limit this to known non-strict alignment targets.
+// { dg-do run { target { i?86-*-linux* x86_64-*-linux* } } }
+// { dg-options "-fsanitize=null -Wall -Wno-unused-variable -std=c++11" }
+
+#include <new>
+
+struct U
+{
+ int a;
+ void foo () {}
+};
+struct V
+{
+ V () {};
+ ~V () {};
+ int a;
+ void foo () {}
+ static void bar () {}
+};
+struct S { long int l; char buf[1 + sizeof (U) + 2 * sizeof (V)]; } s;
+
+int
+main (void)
+{
+ U *p = 0;
+ p->foo ();
+ char *q = 0;
+ V *u = new (q) V;
+ u->~V ();
+ V *v = new (q) V;
+ v->foo ();
+ v->bar (); // We don't instrument this right now.
+ v->~V ();
+}
+
+// { dg-output "\.C:26:\[0-9]*:\[\^\n\r]*member call on null pointer of type 'struct U'.*" }
+// { dg-output "\.C:29:\[0-9]*:\[\^\n\r]*member call on null pointer of type 'struct V'.*" }
+// { dg-output "\.C:31:\[0-9]*:\[\^\n\r]*member call on null pointer of type 'struct V'.*" }
+// { dg-output "\.C:33:\[0-9]*:\[\^\n\r]*member call on null pointer of type 'struct V'" }
diff --git a/gcc/testsuite/g++.dg/ubsan/null-3.C b/gcc/testsuite/g++.dg/ubsan/null-3.C
new file mode 100644
index 00000000000..0dfb879990b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/null-3.C
@@ -0,0 +1,20 @@
+// { dg-do run }
+// { dg-options "-fsanitize=null" }
+
+int
+main (void)
+{
+ int *p = 0;
+
+ int &r1 = *p;
+ int &r2 = *p;
+ int &r3 = *p;
+ int &r4 = *p;
+ int &r5 = *p;
+}
+
+// { dg-output "reference binding to null pointer of type 'int'(\n|\r\n|\r)" }
+// { dg-output "\[^\n\r]*reference binding to null pointer of type 'int'(\n|\r\n|\r)" }
+// { dg-output "\[^\n\r]*reference binding to null pointer of type 'int'(\n|\r\n|\r)" }
+// { dg-output "\[^\n\r]*reference binding to null pointer of type 'int'(\n|\r\n|\r)" }
+// { dg-output "\[^\n\r]*reference binding to null pointer of type 'int'" }
diff --git a/gcc/testsuite/g++.dg/ubsan/null-4.C b/gcc/testsuite/g++.dg/ubsan/null-4.C
new file mode 100644
index 00000000000..397f0b41851
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/null-4.C
@@ -0,0 +1,20 @@
+// { dg-do run }
+// { dg-options "-O -fsanitize=null -fno-sanitize-recover=null -fdump-tree-sanopt-details" }
+// { dg-skip-if "" { *-*-* } { "-flto -fno-fat-lto-objects" } }
+// { dg-shouldfail "ubsan" }
+
+int
+main (void)
+{
+ int *p = 0;
+
+ int &r1 = *p;
+ int &r2 = *p;
+ int &r3 = *p;
+ int &r4 = *p;
+ int &r5 = *p;
+}
+
+// { dg-output "reference binding to null pointer of type 'int'" }
+// { dg-final { scan-tree-dump-times "Optimizing" 4 "sanopt"} }
+// { dg-final { cleanup-tree-dump "sanopt" } }
diff --git a/gcc/testsuite/g++.dg/ubsan/null-5.C b/gcc/testsuite/g++.dg/ubsan/null-5.C
new file mode 100644
index 00000000000..fbda96ff471
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/null-5.C
@@ -0,0 +1,19 @@
+// { dg-do run }
+// { dg-options "-O -fsanitize=null -fsanitize-undefined-trap-on-error -fdump-tree-sanopt-details" }
+// { dg-skip-if "" { *-*-* } { "-flto -fno-fat-lto-objects" } }
+// { dg-shouldfail "ubsan" }
+
+int
+main (void)
+{
+ int *p = 0;
+
+ int &r1 = *p;
+ int &r2 = *p;
+ int &r3 = *p;
+ int &r4 = *p;
+ int &r5 = *p;
+}
+
+// { dg-final { scan-tree-dump-times "Optimizing" 4 "sanopt"} }
+// { dg-final { cleanup-tree-dump "sanopt" } }
diff --git a/gcc/testsuite/g++.dg/ubsan/null-6.C b/gcc/testsuite/g++.dg/ubsan/null-6.C
new file mode 100644
index 00000000000..2efe5c2b8cc
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/null-6.C
@@ -0,0 +1,5 @@
+// PR c++/67941
+// { dg-do run { target c++11 } }
+// { dg-options -fsanitize=null }
+
+int main(){ (+[](){})(); }
diff --git a/gcc/testsuite/g++.dg/ubsan/object-size-1.C b/gcc/testsuite/g++.dg/ubsan/object-size-1.C
new file mode 100644
index 00000000000..e2aad4670bb
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/object-size-1.C
@@ -0,0 +1,18 @@
+// { dg-do compile }
+// { dg-options "-fsanitize=undefined -fpermissive" }
+
+struct T { int c; char d[]; };
+
+struct T t = { 1, "a" }; // { dg-warning "initializer-string for array of chars is too long" }
+
+int
+baz (int i)
+{
+ return t.d[i];
+}
+
+int
+main (void)
+{
+ baz (3);
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr63813.C b/gcc/testsuite/g++.dg/ubsan/pr63813.C
new file mode 100644
index 00000000000..6ca5b2d18c9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr63813.C
@@ -0,0 +1,12 @@
+// PR sanitizer/63813
+// { dg-do compile }
+// { dg-options "-fsanitize=undefined -O1" }
+
+struct A {};
+struct B { long foo () const; A &bar () const; };
+
+A &
+B::bar () const
+{
+ return *reinterpret_cast <A *> (foo ());
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr63956.C b/gcc/testsuite/g++.dg/ubsan/pr63956.C
new file mode 100644
index 00000000000..185a719c8c4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr63956.C
@@ -0,0 +1,172 @@
+// PR sanitizer/63956
+// { dg-do compile }
+// { dg-options "-std=c++14 -fsanitize=undefined,float-divide-by-zero,float-cast-overflow" }
+
+#define SA(X) static_assert((X),#X)
+#define INT_MIN (-__INT_MAX__ - 1)
+
+constexpr int
+fn1 (int a, int b)
+{
+ if (b != 2)
+ a <<= b;
+ return a;
+}
+
+constexpr int i1 = fn1 (5, 3);
+constexpr int i2 = fn1 (5, -2); // { dg-error "is negative" }
+constexpr int i3 = fn1 (5, sizeof (int) * __CHAR_BIT__); // { dg-error "is >= than the precision of the left operand" }
+constexpr int i4 = fn1 (5, 256); // { dg-error "is >= than the precision of the left operand" }
+constexpr int i5 = fn1 (5, 2);
+constexpr int i6 = fn1 (-2, 4); // { dg-error "is negative" }
+constexpr int i7 = fn1 (0, 2);
+
+SA (i1 == 40);
+SA (i5 == 5);
+SA (i7 == 0);
+
+constexpr int
+fn2 (int a, int b)
+{
+ if (b != 2)
+ a >>= b;
+ return a;
+}
+
+constexpr int j1 = fn2 (4, 1);
+constexpr int j2 = fn2 (4, -1); // { dg-error "is negative" }
+constexpr int j3 = fn2 (10, sizeof (int) * __CHAR_BIT__); // { dg-error "is >= than the precision of the left operand" }
+constexpr int j4 = fn2 (1, 256); // { dg-error "is >= than the precision of the left operand" }
+constexpr int j5 = fn2 (5, 2);
+constexpr int j6 = fn2 (-2, 4);
+constexpr int j7 = fn2 (0, 4);
+
+SA (j1 == 2);
+SA (j5 == 5);
+SA (j7 == 0);
+
+constexpr int
+fn3 (int a, int b)
+{
+ if (b != 2)
+ a = a / b;
+ return a;
+}
+
+constexpr int k1 = fn3 (8, 4);
+constexpr int k2 = fn3 (7, 0); // { dg-error "is not a constant expression" }
+constexpr int k3 = fn3 (INT_MIN, -1); // { dg-error "overflow in constant expression" }
+
+SA (k1 == 2);
+
+constexpr float
+fn4 (float a, float b)
+{
+ if (b != 2.0)
+ a = a / b;
+ return a;
+}
+
+constexpr float l1 = fn4 (5.0, 3.0);
+constexpr float l2 = fn4 (7.0, 0.0); // { dg-error "is not a constant expression" }
+
+constexpr int
+fn5 (const int *a, int b)
+{
+ if (b != 2)
+ b = a[b];
+ return b;
+}
+
+constexpr int m1[4] = { 1, 2, 3, 4 };
+constexpr int m2 = fn5 (m1, 3);
+constexpr int m3 = fn5 (m1, 4); // { dg-error "array subscript out of bound" }
+
+constexpr int
+fn6 (const int &a, int b)
+{
+ if (b != 2)
+ b = a;
+ return b;
+}
+
+constexpr int
+fn7 (const int *a, int b)
+{
+ if (b != 3)
+ return fn6 (*a, b);
+ return 7;
+}
+
+constexpr int n1 = 7;
+constexpr int n2 = fn7 (&n1, 5);
+constexpr int n3 = fn7 ((const int *) 0, 8); // { dg-error "is not a constant expression" }
+
+constexpr int
+fn8 (int i)
+{
+ constexpr int g[10] = { };
+ return g[i];
+}
+
+constexpr int o1 = fn8 (9);
+constexpr int o2 = fn8 (10); // { dg-error "array subscript out of bound" }
+
+constexpr int
+fn9 (int a, int b)
+{
+ if (b != 0)
+ return a + b;
+ return a;
+}
+
+constexpr int p1 = fn9 (42, 7);
+constexpr int p2 = fn9 (__INT_MAX__, 1); // { dg-error "overflow in constant expression" }
+constexpr int p3 = fn9 (__INT_MAX__, -1);
+constexpr int p4 = fn9 (INT_MIN, 1);
+constexpr int p5 = fn9 (INT_MIN, -1); // { dg-error "overflow in constant expression" }
+
+SA (p1 == 49);
+SA (p3 == __INT_MAX__ - 1);
+SA (p4 == INT_MIN + 1);
+
+constexpr int
+fn10 (int a, int b)
+{
+ if (b != 0)
+ return a * b;
+ return a;
+}
+
+constexpr int q1 = fn10 (10, 10);
+constexpr int q2 = fn10 (__INT_MAX__, 2); // { dg-error "overflow in constant expression" }
+constexpr int q3 = fn10 (INT_MIN, 2); // { dg-error "overflow in constant expression" }
+constexpr int q4 = fn10 (-1, -1);
+
+SA (q1 == 100);
+SA (q4 == 1);
+
+constexpr int
+fn11 (double d)
+{
+ int i = d;
+ if (i != 0)
+ return i;
+ return i * 2;
+}
+
+constexpr int r1 = fn11 (3.4);
+constexpr int r2 = fn11 (__builtin_inf ()); // { dg-error "overflow in constant expression" }
+
+constexpr int
+fn12 (int i)
+{
+ if (i == 42)
+ __builtin_unreachable (); // { dg-error "is not a constant expression" }
+ return i + 10;
+}
+
+constexpr int s1 = fn12 (1);
+constexpr int s2 = fn12 (42);
+
+SA (s1 == 11);
diff --git a/gcc/testsuite/g++.dg/ubsan/pr64632.C b/gcc/testsuite/g++.dg/ubsan/pr64632.C
new file mode 100644
index 00000000000..0d2a7aa9aef
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr64632.C
@@ -0,0 +1,23 @@
+// PR sanitizer/64632
+// { dg-do run }
+// { dg-options "-fsanitize=vptr -fno-sanitize-recover=vptr" }
+
+struct S
+{
+ S () : a(0) {}
+ int a;
+ int f () { return a; }
+ virtual int v () { return 0; }
+};
+
+struct X : virtual S
+{
+ int v () { return 2; }
+};
+
+int
+main ()
+{
+ X x;
+ return x.f ();
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr64984.C b/gcc/testsuite/g++.dg/ubsan/pr64984.C
new file mode 100644
index 00000000000..34f8926cc5d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr64984.C
@@ -0,0 +1,76 @@
+// PR sanitizer/64984
+// { dg-do compile }
+// { dg-options "-fsanitize=vptr -std=gnu++11" }
+
+template <typename X, X> struct K
+{
+ static constexpr X v = 0;
+ typedef K t;
+};
+template <typename...> struct A;
+template <typename X, typename Y>
+struct A<X, Y> : Y
+{
+};
+template <typename X> X M ();
+template <typename...> struct B;
+template <typename X, typename Y>
+struct B<X, Y> : K<int, noexcept (static_cast<X>(M<Y>()))>
+{
+};
+template <typename X, typename... Y>
+struct G : A<int, B<X, Y...>>::t
+{
+};
+template <typename X> struct J : G<X, X&&>
+{
+};
+template <typename X> X&& foo (X&);
+template <typename X> X&& bar (X&&);
+template <typename X> struct P
+{
+ P (X& x) : q (x) {}
+ X q;
+};
+template <typename...> struct Q;
+template <typename X>
+struct Q<X> : P<X>
+{
+ typedef P<X> r;
+ X& s (Q&);
+ Q (X& x) : r (x) {}
+ Q (Q&& x) noexcept (J<X>::v) : r (foo<X>(s (x)))
+ {
+ }
+};
+template <typename... X> struct I : Q<X...>
+{
+ I ();
+ I (X&... x) : Q<X...>(x...)
+ {
+ }
+};
+template <typename... X>
+I<X&&...> baz (X&&... x)
+{
+ return I <X&&...> (foo<X>(x)...);
+}
+template <typename X> struct F
+{
+ int p;
+ void operator[] (X&& x)
+ {
+ baz (bar (x));
+ }
+};
+struct U
+{
+ virtual ~U ();
+};
+
+int
+main ()
+{
+ F<U> m;
+ m[U ()];
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr65000.C b/gcc/testsuite/g++.dg/ubsan/pr65000.C
new file mode 100644
index 00000000000..298547e0dc6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr65000.C
@@ -0,0 +1,14 @@
+// PR sanitizer/65000
+// { dg-do compile }
+// { dg-options "-O1 -fsanitize=undefined -fno-sanitize-recover" }
+
+struct B { virtual ~B () {} void foo (); };
+struct C { virtual ~C (); };
+struct A : public virtual C {};
+struct D : A { ~D () { d.foo (); } B d; };
+
+void
+bar ()
+{
+ D a;
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr65019.C b/gcc/testsuite/g++.dg/ubsan/pr65019.C
new file mode 100644
index 00000000000..a7f21a7d241
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr65019.C
@@ -0,0 +1,24 @@
+// PR sanitizer/65019
+// { dg-do compile }
+// { dg-options "-fsanitize=alignment,object-size,vptr -std=c++11 -O2 -fcompare-debug" }
+
+struct A { };
+struct B { };
+struct C final {
+ C (const A &, int);
+ static B *foo (const A &, int = 1);
+ virtual ~C ();
+ void *c;
+};
+
+B *
+C::foo (const A &x, int y)
+{
+ C *d = new C (x, y);
+ if (d->c == nullptr)
+ delete d;
+}
+
+C::~C ()
+{
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr65583.C b/gcc/testsuite/g++.dg/ubsan/pr65583.C
new file mode 100644
index 00000000000..4e1149e9cb6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr65583.C
@@ -0,0 +1,140 @@
+// PR sanitizer/65583
+// { dg-do compile }
+// { dg-options "-std=c++11 -fsanitize=undefined" }
+
+namespace std
+{
+ inline namespace __cxx11
+ {
+ }
+ template < typename > class allocator;
+ template < class _CharT > struct char_traits;
+ namespace __cxx11
+ {
+ template < typename _CharT, typename _Traits =
+ char_traits < _CharT >, typename _Alloc =
+ allocator < _CharT > >class basic_string;
+ typedef basic_string < char >string;
+ }
+}
+namespace std
+{
+ template < typename _Tp, _Tp __v > struct integral_constant
+ {
+ static constexpr _Tp value = __v;
+ };
+ typedef integral_constant < bool, true > true_type;
+}
+namespace __gnu_cxx
+{
+ template < typename _Tp > class new_allocator
+ {
+ public:
+ typedef long unsigned size_type;
+ typedef _Tp value_type;
+ template < typename _Tp1 > struct rebind
+ {
+ typedef new_allocator < _Tp1 > other;
+ };
+ };
+}
+namespace std
+{
+ template < typename _Tp > using __allocator_base =
+ __gnu_cxx::new_allocator < _Tp >;
+ template < typename _Tp > class allocator:public __allocator_base < _Tp >
+ {
+ };
+ template < typename _Alloc, typename _Tp > class __alloctr_rebind_helper
+ {
+ template < typename _Alloc2, typename _Tp2 >
+ static constexpr true_type _S_chk (typename _Alloc2::template rebind <
+ _Tp2 >::other *);
+ public:
+ using __type = decltype (_S_chk < _Alloc, _Tp > (nullptr));
+ };
+ template < typename _Alloc, typename _Tp, bool =
+ __alloctr_rebind_helper < _Alloc,
+ _Tp >::__type::value > struct __alloctr_rebind;
+ template < typename _Alloc, typename _Tp > struct __alloctr_rebind <_Alloc,
+ _Tp, true >
+ {
+ typedef typename _Alloc::template rebind < _Tp >::other __type;
+ };
+ template < typename _Alloc > struct allocator_traits
+ {
+ typedef typename _Alloc::value_type value_type;
+ static value_type *_S_pointer_helper (...);
+ typedef decltype (_S_pointer_helper ((_Alloc *) 0)) __pointer;
+ typedef __pointer pointer;
+ template < typename _Tp >
+ static typename _Tp::size_type _S_size_type_helper (_Tp *);
+ typedef decltype (_S_size_type_helper ((_Alloc *) 0)) __size_type;
+ typedef __size_type size_type;
+ template < typename _Tp > using rebind_alloc =
+ typename __alloctr_rebind < _Alloc, _Tp >::__type;
+ };
+}
+namespace __gnu_cxx
+{
+ template < typename _Alloc > struct __alloc_traits:std::allocator_traits <
+ _Alloc >
+ {
+ typedef std::allocator_traits < _Alloc > _Base_type;
+ template < typename _Tp > struct rebind
+ {
+ typedef typename _Base_type::template rebind_alloc < _Tp > other;
+ };
+ };
+}
+namespace std
+{
+ namespace __cxx11
+ {
+ template < typename _CharT, typename _Traits,
+ typename _Alloc > class basic_string
+ {
+ typedef typename __gnu_cxx::__alloc_traits < _Alloc >::template rebind <
+ _CharT >::other _Char_alloc_type;
+ typedef __gnu_cxx::__alloc_traits < _Char_alloc_type > _Alloc_traits;
+ typedef _Char_alloc_type allocator_type;
+ typedef typename _Alloc_traits::size_type size_type;
+ typedef typename _Alloc_traits::pointer pointer;
+ struct _Alloc_hider:allocator_type
+ {
+ _Alloc_hider (pointer __dat, const _Alloc & __a)
+ {
+ }
+ };
+ _Alloc_hider _M_dataplus;
+ union
+ {
+ size_type _M_allocated_capacity;
+ };
+ pointer _M_local_data ()
+ {
+ }
+ void _M_dispose ()
+ {
+ _M_destroy (_M_allocated_capacity);
+ }
+ void _M_destroy (size_type __size) throw ()
+ {
+ }
+ public:
+ basic_string (const _CharT * __s, const _Alloc & __a = _Alloc ()):_M_dataplus (_M_local_data (),
+ __a)
+ {
+ _M_dispose ();
+ }
+ };
+ }
+ class FileHandle
+ {
+ std::string fname;
+ FileHandle (const char *fname);
+ };
+ FileHandle::FileHandle (const char *fname):fname (fname)
+ {
+ }
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr66452.C b/gcc/testsuite/g++.dg/ubsan/pr66452.C
new file mode 100644
index 00000000000..473543ca899
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr66452.C
@@ -0,0 +1,16 @@
+// PR sanitizer/66452
+// { dg-do compile }
+// { dg-options "-Wall -fsanitize=undefined" }
+
+class A {
+public:
+ A(int);
+};
+class B {
+ A m_fn1() const;
+};
+A B::m_fn1() const {
+ for (int i = 0;i;)
+ ;
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/pr66977.C b/gcc/testsuite/g++.dg/ubsan/pr66977.C
new file mode 100644
index 00000000000..3ab8d902f10
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/pr66977.C
@@ -0,0 +1,27 @@
+// PR sanitizer/66977
+// { dg-do compile }
+// { dg-options "-fsanitize=shift -Wmaybe-uninitialized -O" }
+
+class Foo {
+
+private:
+
+ int a_;
+
+public:
+
+ Foo (int a) : a_(a) {};
+
+ inline int get_a () { return a_; };
+};
+
+int bar (int (Foo::*get)()) {
+ Foo *A = new Foo(1);
+ int result = (A->*get)();
+ delete (A);
+ return result;
+}
+
+int main () {
+ return bar (&Foo::get_a);
+}
diff --git a/gcc/testsuite/g++.dg/ubsan/return-2.C b/gcc/testsuite/g++.dg/ubsan/return-2.C
index 546f1781057..90c48b5b1d5 100644
--- a/gcc/testsuite/g++.dg/ubsan/return-2.C
+++ b/gcc/testsuite/g++.dg/ubsan/return-2.C
@@ -1,7 +1,5 @@
// { dg-do run }
-// { dg-options "-fsanitize=return" }
-
-#include <stdio.h>
+// { dg-options "-fsanitize=return -fno-sanitize-recover=return" }
struct S { S (); ~S (); };
@@ -22,12 +20,6 @@ foo (int x)
int
main ()
{
- fputs ("UBSAN TEST START\n", stderr);
-
foo (1);
foo (14);
-
- fputs ("UBSAN TEST END\n", stderr);
}
-
-/* { dg-output "UBSAN TEST START(\n|\r\n|\r)UBSAN TEST END" } */
diff --git a/gcc/testsuite/g++.dg/ubsan/return-3.C b/gcc/testsuite/g++.dg/ubsan/return-3.C
new file mode 100644
index 00000000000..95f345deb32
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/return-3.C
@@ -0,0 +1,27 @@
+// { dg-do compile }
+// { dg-options "-fsanitize=return" }
+
+struct S { S (); ~S (); };
+
+S::S () {}
+S::~S () {}
+
+__attribute__((no_sanitize_undefined))
+int
+foo (int x)
+{
+ S a;
+ {
+ S b;
+ if (x)
+ return 1;
+ }
+}
+
+int
+main ()
+{
+ foo (0);
+}
+
+// { dg-final { scan-assembler-not "__ubsan_handle" } }
diff --git a/gcc/testsuite/g++.dg/ubsan/shift-1.C b/gcc/testsuite/g++.dg/ubsan/shift-1.C
new file mode 100644
index 00000000000..05e049e82ea
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ubsan/shift-1.C
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=shift -w" } */
+/* { dg-shouldfail "ubsan" } */
+
+int
+foo (int x)
+{
+ /* None of the following should pass. */
+ switch (x)
+ {
+ case 1 >> -1: /* { dg-error "is not a constant expression" "" { xfail { *-*-* } } } */
+ case -1 >> -1: /* { dg-error "is not a constant expression" "" { xfail { *-*-* } } } */
+ case 1 << -1: /* { dg-error "is not a constant expression" "" { xfail { *-*-* } } } */
+ case -1 << -1: /* { dg-error "is not a constant expression" "" { xfail { *-*-* } } } */
+ return 1;
+ }
+ return 0;
+}
+
+int
+bar (int x)
+{
+ /* None of the following should pass. */
+ switch (x)
+ {
+ case -1 >> 200: /* { dg-error "is not a constant expression" "" { xfail { *-*-* } } } */
+ case 1 << 200: /* { dg-error "is not a constant expression" "" { xfail { *-*-* } } } */
+ return 1;
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/bounds-1.c b/gcc/testsuite/gcc.dg/ubsan/bounds-1.c
new file mode 100644
index 00000000000..6f3cd2d53af
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/bounds-1.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=bounds" } */
+
+struct T { int c; char d[]; } t = { 1, "abcdefg" };
+
+int
+baz (int i)
+{
+ return t.d[i];
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/bounds-2.c b/gcc/testsuite/gcc.dg/ubsan/bounds-2.c
new file mode 100644
index 00000000000..0f4263da722
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/bounds-2.c
@@ -0,0 +1,19 @@
+/* PR sanitizer/65280 */
+/* { dg-do run } */
+/* { dg-options "-fsanitize=bounds" } */
+
+void
+foo (int n, int (*b)[n])
+{
+ (*b)[n] = 1;
+}
+
+int
+main ()
+{
+ int a[20];
+ foo (3, (int (*)[3]) &a);
+ return 0;
+}
+
+/* { dg-output "index 3 out of bounds for type 'int \\\[\\\*\\\]'" } */
diff --git a/gcc/testsuite/gcc.dg/ubsan/c-shift-2.c b/gcc/testsuite/gcc.dg/ubsan/c-shift-2.c
new file mode 100644
index 00000000000..beb0dbec7fd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/c-shift-2.c
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=shift -w" } */
+/* { dg-shouldfail "ubsan" } */
+
+int
+foo (int x)
+{
+ /* None of the following should pass. */
+ switch (x)
+ {
+ case 1 >> -1: /* { dg-error "case label does not reduce to an integer constant" } */
+ case -1 >> -1: /* { dg-error "case label does not reduce to an integer constant" } */
+ case 1 << -1: /* { dg-error "case label does not reduce to an integer constant" } */
+ case -1 << -1: /* { dg-error "case label does not reduce to an integer constant" } */
+ return 1;
+ }
+ return 0;
+}
+
+int
+bar (int x)
+{
+ /* None of the following should pass. */
+ switch (x)
+ {
+ case -1 >> 200: /* { dg-error "case label does not reduce to an integer constant" } */
+ case 1 << 200: /* { dg-error "case label does not reduce to an integer constant" } */
+ return 1;
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-1.c b/gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-1.c
new file mode 100644
index 00000000000..51910da2c80
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-1.c
@@ -0,0 +1,9 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=shift -fwrapv -w -std=c99" } */
+
+int
+main (void)
+{
+ int a = -42;
+ a << 1;
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-2.c b/gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-2.c
new file mode 100644
index 00000000000..93087d1e817
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/c99-wrapv-shift-2.c
@@ -0,0 +1,9 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=shift -fwrapv -w -std=c99" } */
+
+int
+main (void)
+{
+ int a = 1;
+ a <<= 31;
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-atomic.c b/gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-atomic.c
new file mode 100644
index 00000000000..0a4fa0165b0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-atomic.c
@@ -0,0 +1,171 @@
+/* PR sanitizer/69099 */
+/* { dg-do compile } */
+/* { dg-options "-std=c11 -pedantic-errors -fsanitize=float-cast-overflow" } */
+/* This is gcc.dg/atomic/c11-atomic-exec-2.c. */
+
+extern void abort (void);
+extern void exit (int);
+
+#define CMPLX(X, Y) __builtin_complex ((X), (Y))
+
+#define TEST_COMPOUND(TYPE, LHSVAL, RHSVAL, OP) \
+ do \
+ { \
+ static volatile _Atomic (TYPE) a = (TYPE) (LHSVAL); \
+ if ((a OP##= (RHSVAL)) != (TYPE) ((TYPE) (LHSVAL) OP (RHSVAL))) \
+ abort (); \
+ if (a != (TYPE) ((TYPE) (LHSVAL) OP (RHSVAL))) \
+ abort (); \
+ } \
+ while (0)
+
+#define TEST_COMPOUND_ARITH(LHSVAL, RHSVAL, OP) \
+ do \
+ { \
+ TEST_COMPOUND (_Bool, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (char, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed char, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned char, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed short, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned short, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed int, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned int, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed long, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned long, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed long long, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned long long, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (float, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (double, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (long double, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (_Complex float, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (_Complex double, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (_Complex long double, (LHSVAL), (RHSVAL), OP); \
+ } \
+ while (0)
+
+#define TEST_COMPOUND_INT(LHSVAL, RHSVAL, OP) \
+ do \
+ { \
+ TEST_COMPOUND (_Bool, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (char, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed char, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned char, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed short, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned short, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed int, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned int, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed long, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned long, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (signed long long, (LHSVAL), (RHSVAL), OP); \
+ TEST_COMPOUND (unsigned long long, (LHSVAL), (RHSVAL), OP); \
+ } \
+ while (0)
+
+static void
+test_mult (void)
+{
+ TEST_COMPOUND_ARITH (1, 2, *);
+ TEST_COMPOUND_ARITH (-3, 5, *);
+ TEST_COMPOUND_ARITH (-7, -20, *);
+ TEST_COMPOUND_ARITH (1.25, 3.5, *);
+ TEST_COMPOUND_ARITH (CMPLX (1.5, 2.5), CMPLX (3.5, 4.5), *);
+ TEST_COMPOUND_ARITH (CMPLX (1.5, 2.5), 2, *);
+}
+
+static void
+test_div (void)
+{
+ TEST_COMPOUND_ARITH (1, 2, /);
+ TEST_COMPOUND_ARITH (-6, 3, /);
+ TEST_COMPOUND_ARITH (-70, -10, /);
+ TEST_COMPOUND_ARITH (1.25, 2.5, /);
+ TEST_COMPOUND_ARITH (CMPLX (1.0, 1.0), CMPLX (0.5, 0.5), /);
+ TEST_COMPOUND_ARITH (CMPLX (1.5, 2.5), 2, /);
+}
+
+static void
+test_mod (void)
+{
+ TEST_COMPOUND_INT (1, 2, %);
+ TEST_COMPOUND_INT (-3, 5, %);
+ TEST_COMPOUND_INT (-7, -2, %);
+}
+
+static void
+test_plus (void)
+{
+ TEST_COMPOUND_ARITH (1, 2, +);
+ TEST_COMPOUND_ARITH (-3, 5, +);
+ TEST_COMPOUND_ARITH (-7, -20, +);
+ TEST_COMPOUND_ARITH (1.25, 3.5, +);
+ TEST_COMPOUND_ARITH (CMPLX (1.5, 2.5), CMPLX (3.5, 4.5), +);
+ TEST_COMPOUND_ARITH (CMPLX (1.5, 2.5), 2, +);
+ static int ia[2];
+ TEST_COMPOUND (int *, &ia[1], 1, +);
+ TEST_COMPOUND (int *, &ia[1], -1, +);
+}
+
+static void
+test_minus (void)
+{
+ TEST_COMPOUND_ARITH (1, 2, -);
+ TEST_COMPOUND_ARITH (-3, 5, -);
+ TEST_COMPOUND_ARITH (-7, -20, -);
+ TEST_COMPOUND_ARITH (3.5, 1.25, -);
+ TEST_COMPOUND_ARITH (CMPLX (3.5, 4.5), CMPLX (1.5, 2.5), -);
+ TEST_COMPOUND_ARITH (CMPLX (3.5, 2.5), 2, -);
+ static int ia[2];
+ TEST_COMPOUND (int *, &ia[1], 1, -);
+ TEST_COMPOUND (int *, &ia[1], -1, -);
+}
+
+static void
+test_lshift (void)
+{
+ TEST_COMPOUND_INT (1, 7, <<);
+ TEST_COMPOUND_INT (15, 3, <<);
+}
+
+static void
+test_rshift (void)
+{
+ TEST_COMPOUND_INT (1, 1, >>);
+ TEST_COMPOUND_INT (127, 4, >>);
+}
+
+static void
+test_and (void)
+{
+ TEST_COMPOUND_INT (0x1234, 0x7856, &);
+ TEST_COMPOUND_INT (-1, 0x12345678, &);
+}
+
+static void
+test_xor (void)
+{
+ TEST_COMPOUND_INT (0x1234, 0x7856, ^);
+ TEST_COMPOUND_INT (-1, 0x12345678, ^);
+}
+
+static void
+test_or (void)
+{
+ TEST_COMPOUND_INT (0x1234, 0x7856, |);
+ TEST_COMPOUND_INT (-12345, 0x12345678, |);
+}
+
+int
+main (void)
+{
+ test_mult ();
+ test_div ();
+ test_mod ();
+ test_plus ();
+ test_minus ();
+ test_lshift ();
+ test_rshift ();
+ test_and ();
+ test_xor ();
+ test_or ();
+ exit (0);
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-bf.c b/gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-bf.c
new file mode 100644
index 00000000000..16268603375
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/float-cast-overflow-bf.c
@@ -0,0 +1,72 @@
+/* { dg-do run } */
+/* { dg-options "-fsanitize=float-cast-overflow" } */
+
+struct
+{
+ int i:1;
+} s;
+
+struct
+{
+ unsigned int i:1;
+} t;
+
+int
+main (void)
+{
+ volatile double d;
+
+#define CHECK_BOUNDARY(VAR, VAL) \
+ (VAR) = (VAL) - 1.5; \
+ (VAR) = (VAL) - 1.0; \
+ (VAR) = (VAL) - 0.5; \
+ (VAR) = (VAL) - 0.0000001; \
+ (VAR) = (VAL) - 0.0; \
+ (VAR) = (VAL); \
+ (VAR) = (VAL) + 0.0; \
+ (VAR) = (VAL) + 0.0000001; \
+ (VAR) = (VAL) + 0.5; \
+ (VAR) = (VAL) + 1.0; \
+ (VAR) = (VAL) + 1.5;
+
+ /* Signed bit-field. (-1, 0) is valid. */
+ d = -1.0;
+ CHECK_BOUNDARY (s.i, d);
+ d = 0.0;
+ CHECK_BOUNDARY (s.i, d);
+ d = 1.0;
+ CHECK_BOUNDARY (s.i, d);
+
+ /* Unsigned bit-field. (0, 1) is valid. */
+ d = -1.0;
+ CHECK_BOUNDARY (t.i, d);
+ d = 0.0;
+ CHECK_BOUNDARY (t.i, d);
+ d = 1.0;
+ CHECK_BOUNDARY (t.i, d);
+
+ return 0;
+}
+
+/* { dg-output "value -2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*value 2.5 is outside the range of representable values of type" } */
diff --git a/gcc/testsuite/gcc.dg/ubsan/object-size-9.c b/gcc/testsuite/gcc.dg/ubsan/object-size-9.c
new file mode 100644
index 00000000000..e0a2980b426
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/object-size-9.c
@@ -0,0 +1,24 @@
+/* { dg-do run } */
+/* { dg-skip-if "" { *-*-* } { "*" } { "-O2" } } */
+/* { dg-options "-fsanitize=undefined" } */
+
+struct T { int c; char d[]; };
+struct T t = { 1, "a" };
+
+int
+baz (int i)
+{
+ return t.d[i];
+}
+
+int
+main (void)
+{
+ baz (2);
+ return 0;
+}
+
+/* { dg-output "load of address \[^\n\r]* with insufficient space for an object of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*note: pointer points here\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*\\^" } */
diff --git a/gcc/testsuite/gcc.dg/ubsan/pr63690.c b/gcc/testsuite/gcc.dg/ubsan/pr63690.c
new file mode 100644
index 00000000000..bcf520a25ed
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/pr63690.c
@@ -0,0 +1,9 @@
+/* PR sanitizer/63690 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined" } */
+
+void
+foo (void)
+{
+ (*"c")++;
+}
diff --git a/gcc/testsuite/gcc.dg/ubsan/pr67279.c b/gcc/testsuite/gcc.dg/ubsan/pr67279.c
new file mode 100644
index 00000000000..5b5db42f96a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/ubsan/pr67279.c
@@ -0,0 +1,14 @@
+/* PR sanitizer/67279 */
+/* { dg-do compile } */
+/* { dg-options "-fsanitize=undefined -w" } */
+
+#define INT_MIN (-__INT_MAX__ - 1)
+
+void
+foo (void)
+{
+ static int a1 = 1 << 31;
+ static int a2 = 10 << 30;
+ static int a3 = 100 << 28;
+ static int a4 = INT_MIN / -1;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/pr65624.c b/gcc/testsuite/gcc.target/aarch64/pr65624.c
new file mode 100644
index 00000000000..8eb3786a4b8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/pr65624.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+
+#include <arm_neon.h>
+
+int32x4_t
+foo (void)
+{
+ int32x4_t vector_int32x4;
+ int16x4_t vector3_int16x4;
+ int16x4_t vector4_int16x4;
+ static int32_t buffer_int32x4[32];
+
+ vector_int32x4 = vld1q_s32(buffer_int32x4);
+ return vqdmlsl_lane_s16(vector_int32x4, vector3_int16x4, vector4_int16x4, 0);
+}
diff --git a/gcc/testsuite/gfortran.dg/pr69055.f90 b/gcc/testsuite/gfortran.dg/pr69055.f90
new file mode 100644
index 00000000000..48b34424608
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr69055.f90
@@ -0,0 +1,10 @@
+! { dg-do compile }
+! { dg-options "-fsanitize=float-cast-overflow" }
+
+subroutine pr69055
+ implicit none
+ integer :: n
+ real(8) :: b
+ b = huge(1.0D0)
+ n = b
+end subroutine pr69055
diff --git a/gcc/testsuite/lib/prune.exp b/gcc/testsuite/lib/prune.exp
index 679d894e267..859d25ef801 100644
--- a/gcc/testsuite/lib/prune.exp
+++ b/gcc/testsuite/lib/prune.exp
@@ -28,6 +28,7 @@ proc prune_gcc_output { text } {
regsub -all "(^|\n)\[^\n\]*(: )?At (top level|global scope):\[^\n\]*" $text "" text
regsub -all "(^|\n)\[^\n\]*: (recursively )?required \[^\n\]*" $text "" text
regsub -all "(^|\n)\[^\n\]*: . skipping \[0-9\]* instantiation contexts \[^\n\]*" $text "" text
+ regsub -all "(^|\n)\[^\n\]*: in constexpr expansion \[^\n\]*" $text "" text
regsub -all "(^|\n) inlined from \[^\n\]*" $text "" text
regsub -all "(^|\n)collect2: error: ld returned \[^\n\]*" $text "" text
regsub -all "(^|\n)collect: re(compiling|linking)\[^\n\]*" $text "" text
diff --git a/gcc/testsuite/lib/tsan-dg.exp b/gcc/testsuite/lib/tsan-dg.exp
index f313123aaaa..86bcbc03cc8 100644
--- a/gcc/testsuite/lib/tsan-dg.exp
+++ b/gcc/testsuite/lib/tsan-dg.exp
@@ -18,9 +18,9 @@
# code, 0 otherwise.
proc check_effective_target_fthread_sanitizer {} {
- return [check_no_compiler_messages faddress_sanitizer object {
- void foo (void) { }
- } "-fPIE -pie -fsanitize=thread"]
+ return [check_no_compiler_messages fsanitize_thread executable {
+ int main (void) { return 0; }
+ } "-fsanitize=thread"]
}
#
@@ -89,12 +89,12 @@ proc tsan_init { args } {
}
if [info exists ALWAYS_CXXFLAGS] {
set ALWAYS_CXXFLAGS [concat "{ldflags=$link_flags}" $ALWAYS_CXXFLAGS]
- set ALWAYS_CXXFLAGS [concat "{additional_flags=-fPIE -pie -fsanitize=thread -g}" $ALWAYS_CXXFLAGS]
+ set ALWAYS_CXXFLAGS [concat "{additional_flags=-fsanitize=thread -g}" $ALWAYS_CXXFLAGS]
} else {
if [info exists TEST_ALWAYS_FLAGS] {
- set TEST_ALWAYS_FLAGS "$link_flags -fPIE -pie -fsanitize=thread -g $TEST_ALWAYS_FLAGS"
+ set TEST_ALWAYS_FLAGS "$link_flags -fsanitize=thread -g $TEST_ALWAYS_FLAGS"
} else {
- set TEST_ALWAYS_FLAGS "$link_flags -fPIE -pie -fsanitize=thread -g"
+ set TEST_ALWAYS_FLAGS "$link_flags -fsanitize=thread -g"
}
}
if { $link_flags != "" } {
@@ -104,7 +104,7 @@ proc tsan_init { args } {
set individual_timeout 20
if [check_runtime_nocache tsan_works {
int main () { return 0; }
- } "-fPIE -pie -fsanitize=thread -g"] {
+ } "-fsanitize=thread -g"] {
set dg-do-what-default run
} else {
set dg-do-what-default compile
diff --git a/gcc/testsuite/lib/ubsan-dg.exp b/gcc/testsuite/lib/ubsan-dg.exp
index fecce7bf00f..23be38c5a7e 100644
--- a/gcc/testsuite/lib/ubsan-dg.exp
+++ b/gcc/testsuite/lib/ubsan-dg.exp
@@ -14,6 +14,15 @@
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
+# Return 1 if compilation with -fsanitize=undefined is error-free for trivial
+# code, 0 otherwise.
+
+proc check_effective_target_fsanitize_undefined {} {
+ return [check_runtime fsanitize_undefined {
+ int main (void) { return 0; }
+ } "-fsanitize=undefined"]
+}
+
#
# ubsan_link_flags -- compute library path and flags to find libubsan.
# (originally from g++.exp)
@@ -36,6 +45,7 @@ proc ubsan_link_flags { paths } {
append flags " -B${gccpath}/libsanitizer/ubsan/ "
append flags " -L${gccpath}/libsanitizer/ubsan/.libs"
append ld_library_path ":${gccpath}/libsanitizer/ubsan/.libs"
+ append ld_library_path ":${gccpath}/libstdc++-v3/src/.libs"
}
} else {
global tool_root_dir
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 588fa2bd4fa..c2263b20ddf 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -492,6 +492,7 @@ check_global_declaration_1 (tree decl)
to handle multiple external decls in different scopes. */
&& ! (DECL_NAME (decl) && TREE_USED (DECL_NAME (decl)))
&& ! DECL_EXTERNAL (decl)
+ && ! DECL_ARTIFICIAL (decl)
&& ! TREE_PUBLIC (decl)
/* A volatile variable might be used in some non-obvious way. */
&& ! TREE_THIS_VOLATILE (decl)
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 40443d4ef4e..533b7d8b177 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -660,6 +660,13 @@ enum annot_expr_kind {
annot_expr_ivdep_kind
};
+/* Internal functions. */
+enum internal_fn {
+#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) IFN_##CODE,
+#include "internal-fn.def"
+#undef DEF_INTERNAL_FN
+ IFN_LAST
+};
/*---------------------------------------------------------------------------
Type definitions
@@ -762,6 +769,9 @@ struct GTY(()) tree_base {
int length;
/* SSA version number. This field is only used with SSA_NAME. */
unsigned int version;
+
+ /* Internal function code. */
+ enum internal_fn ifn;
} GTY((skip(""))) u;
};
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 5a9f7781932..d771bf3bcb7 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "dumpfile.h"
#include "value-prof.h"
#include "predict.h"
+#include "internal-fn.h"
#include <new> // For placement-new.
@@ -1739,7 +1740,10 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
break;
case CALL_EXPR:
- print_call_name (buffer, CALL_EXPR_FN (node), flags);
+ if (CALL_EXPR_FN (node) != NULL_TREE)
+ print_call_name (buffer, CALL_EXPR_FN (node), flags);
+ else
+ pp_string (buffer, internal_fn_name (CALL_EXPR_IFN (node)));
/* Print parameters. */
pp_space (buffer);
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index eeefeaf0724..8a48965373d 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -2511,6 +2511,9 @@ optimize_unreachable (gimple_stmt_iterator i)
edge e;
bool ret;
+ if (flag_sanitize & SANITIZE_UNREACHABLE)
+ return false;
+
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
stmt = gsi_stmt (gsi);
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 60b4dae38a7..44f5bf957b2 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -2248,8 +2248,13 @@ optimize_range_tests_diff (enum tree_code opcode, tree type,
if (!integer_pow2p (tem1))
return false;
+ type = unsigned_type_for (type);
+ tem1 = fold_convert (type, tem1);
+ tem2 = fold_convert (type, tem2);
+ lowi = fold_convert (type, lowi);
mask = fold_build1 (BIT_NOT_EXPR, type, tem1);
- tem1 = fold_binary (MINUS_EXPR, type, rangei->exp, lowi);
+ tem1 = fold_binary (MINUS_EXPR, type,
+ fold_convert (type, rangei->exp), lowi);
tem1 = fold_build2 (BIT_AND_EXPR, type, tem1, mask);
lowj = build_int_cst (type, 0);
if (update_range_test (rangei, rangej, 1, opcode, ops, tem1,
diff --git a/gcc/tree.c b/gcc/tree.c
index 293b29f8e6f..bec9d7b7aa9 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -8948,6 +8948,10 @@ get_callee_fndecl (const_tree call)
called. */
addr = CALL_EXPR_FN (call);
+ /* If there is no function, return early. */
+ if (addr == NULL_TREE)
+ return NULL_TREE;
+
STRIP_NOPS (addr);
/* If this is a readonly function pointer, extract its initial value. */
@@ -9879,7 +9883,7 @@ local_define_builtin (const char *name, tree type, enum built_in_function code,
/* Call this function after instantiating all builtins that the language
front end cares about. This will build the rest of the builtins
- and internal functions that are relied upon by the tree optimizers and
+ and internal function that are relied upon by the tree optimizers and
the middle-end. */
void
diff --git a/gcc/tree.def b/gcc/tree.def
index 48163f705d1..56240d8cef1 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -561,7 +561,7 @@ DEFTREECODE (BIND_EXPR, "bind_expr", tcc_expression, 3)
/* Function call. CALL_EXPRs are represented by variably-sized expression
nodes. There are at least three fixed operands. Operand 0 is an
INTEGER_CST node containing the total operand count, the number of
- arguments plus 3. Operand 1 is the function, while operand 2 is
+ arguments plus 3. Operand 1 is the function or NULL, while operand 2 is
is static chain argument, or NULL. The remaining operands are the
arguments to the call. */
DEFTREECODE (CALL_EXPR, "call_expr", tcc_vl_exp, 3)
diff --git a/gcc/tree.h b/gcc/tree.h
index d73bc521c3d..2aafb89334c 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -759,6 +759,12 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
#define TYPE_OVERFLOW_TRAPS(TYPE) \
(!TYPE_UNSIGNED (TYPE) && flag_trapv)
+ /* True if an overflow is to be preserved for sanitization. */
+#define TYPE_OVERFLOW_SANITIZED(TYPE) \
+ (INTEGRAL_TYPE_P (TYPE) \
+ && !TYPE_OVERFLOW_WRAPS (TYPE) \
+ && (flag_sanitize & SANITIZE_SI_OVERFLOW))
+
/* True if pointer types have undefined overflow. */
#define POINTER_TYPE_OVERFLOW_UNDEFINED (flag_strict_overflow)
@@ -1131,12 +1137,12 @@ extern void protected_set_expr_location (tree, location_t);
#define ASSERT_EXPR_VAR(NODE) TREE_OPERAND (ASSERT_EXPR_CHECK (NODE), 0)
#define ASSERT_EXPR_COND(NODE) TREE_OPERAND (ASSERT_EXPR_CHECK (NODE), 1)
-/* CALL_EXPR accessors.
- */
+/* CALL_EXPR accessors. */
#define CALL_EXPR_FN(NODE) TREE_OPERAND (CALL_EXPR_CHECK (NODE), 1)
#define CALL_EXPR_STATIC_CHAIN(NODE) TREE_OPERAND (CALL_EXPR_CHECK (NODE), 2)
#define CALL_EXPR_ARG(NODE, I) TREE_OPERAND (CALL_EXPR_CHECK (NODE), (I) + 3)
#define call_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH (NODE) - 3)
+#define CALL_EXPR_IFN(NODE) (CALL_EXPR_CHECK (NODE)->base.u.ifn)
/* CALL_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &CALL_EXPR_ARG (NODE, 0) because that will complain if
@@ -3598,6 +3604,13 @@ extern tree build_call_valist (tree, tree, int, va_list);
build_call_array_loc (UNKNOWN_LOCATION, T1, T2, N, T3)
extern tree build_call_array_loc (location_t, tree, tree, int, const tree *);
extern tree build_call_vec (tree, tree, vec<tree, va_gc> *);
+extern tree build_call_expr_loc_array (location_t, tree, int, tree *);
+extern tree build_call_expr_loc_vec (location_t, tree, vec<tree, va_gc> *);
+extern tree build_call_expr_loc (location_t, tree, int, ...);
+extern tree build_call_expr (tree, int, ...);
+extern tree build_call_expr_internal_loc (location_t, enum internal_fn,
+ tree, int, ...);
+extern tree build_string_literal (int, const char *);
/* Construct various nodes representing data types. */
diff --git a/gcc/ubsan.c b/gcc/ubsan.c
index 25c2cde6040..7c2fe66f176 100644
--- a/gcc/ubsan.c
+++ b/gcc/ubsan.c
@@ -48,6 +48,11 @@ along with GCC; see the file COPYING3. If not see
#include "gimplify-me.h"
#include "intl.h"
#include "tree-eh.h"
+#include "realmpfr.h"
+#include "dfp.h"
+#include "builtins.h"
+#include "tree-object-size.h"
+#include "tree-eh.h"
/* Map from a tree to a VAR_DECL tree. */
@@ -160,6 +165,9 @@ ubsan_encode_value (tree t, bool in_expand_p)
}
}
+/* Cached ubsan_get_type_descriptor_type () return value. */
+static GTY(()) tree ubsan_type_descriptor_type;
+
/* Build
struct __ubsan_type_descriptor
{
@@ -170,11 +178,15 @@ ubsan_encode_value (tree t, bool in_expand_p)
type. */
static tree
-ubsan_type_descriptor_type (void)
+ubsan_get_type_descriptor_type (void)
{
static const char *field_names[3]
= { "__typekind", "__typeinfo", "__typename" };
tree fields[3], ret;
+
+ if (ubsan_type_descriptor_type)
+ return ubsan_type_descriptor_type;
+
tree itype = build_range_type (sizetype, size_zero_node, NULL_TREE);
tree flex_arr_type = build_array_type (char_type_node, itype);
@@ -189,12 +201,22 @@ ubsan_type_descriptor_type (void)
if (i)
DECL_CHAIN (fields[i - 1]) = fields[i];
}
+ tree type_decl = build_decl (input_location, TYPE_DECL,
+ get_identifier ("__ubsan_type_descriptor"),
+ ret);
+ DECL_IGNORED_P (type_decl) = 1;
+ DECL_ARTIFICIAL (type_decl) = 1;
TYPE_FIELDS (ret) = fields[0];
- TYPE_NAME (ret) = get_identifier ("__ubsan_type_descriptor");
+ TYPE_NAME (ret) = type_decl;
+ TYPE_STUB_DECL (ret) = type_decl;
layout_type (ret);
+ ubsan_type_descriptor_type = ret;
return ret;
}
+/* Cached ubsan_get_source_location_type () return value. */
+static GTY(()) tree ubsan_source_location_type;
+
/* Build
struct __ubsan_source_location
{
@@ -204,12 +226,15 @@ ubsan_type_descriptor_type (void)
}
type. */
-static tree
-ubsan_source_location_type (void)
+tree
+ubsan_get_source_location_type (void)
{
static const char *field_names[3]
= { "__filename", "__line", "__column" };
tree fields[3], ret;
+ if (ubsan_source_location_type)
+ return ubsan_source_location_type;
+
tree const_char_type = build_qualified_type (char_type_node,
TYPE_QUAL_CONST);
@@ -224,9 +249,16 @@ ubsan_source_location_type (void)
if (i)
DECL_CHAIN (fields[i - 1]) = fields[i];
}
+ tree type_decl = build_decl (input_location, TYPE_DECL,
+ get_identifier ("__ubsan_source_location"),
+ ret);
+ DECL_IGNORED_P (type_decl) = 1;
+ DECL_ARTIFICIAL (type_decl) = 1;
TYPE_FIELDS (ret) = fields[0];
- TYPE_NAME (ret) = get_identifier ("__ubsan_source_location");
+ TYPE_NAME (ret) = type_decl;
+ TYPE_STUB_DECL (ret) = type_decl;
layout_type (ret);
+ ubsan_source_location_type = ret;
return ret;
}
@@ -237,20 +269,26 @@ static tree
ubsan_source_location (location_t loc)
{
expanded_location xloc;
- tree type = ubsan_source_location_type ();
+ tree type = ubsan_get_source_location_type ();
xloc = expand_location (loc);
+ tree str;
if (xloc.file == NULL)
- xloc.file = "<unknown>";
-
- /* Fill in the values from LOC. */
- size_t len = strlen (xloc.file);
- tree str = build_string (len + 1, xloc.file);
- TREE_TYPE (str) = build_array_type (char_type_node,
- build_index_type (size_int (len)));
- TREE_READONLY (str) = 1;
- TREE_STATIC (str) = 1;
- str = build_fold_addr_expr (str);
+ {
+ str = build_int_cst (ptr_type_node, 0);
+ xloc.line = 0;
+ xloc.column = 0;
+ }
+ else
+ {
+ /* Fill in the values from LOC. */
+ size_t len = strlen (xloc.file) + 1;
+ str = build_string (len, xloc.file);
+ TREE_TYPE (str) = build_array_type_nelts (char_type_node, len);
+ TREE_READONLY (str) = 1;
+ TREE_STATIC (str) = 1;
+ str = build_fold_addr_expr (str);
+ }
tree ctor = build_constructor_va (type, 3, NULL_TREE, str, NULL_TREE,
build_int_cst (unsigned_type_node,
xloc.line), NULL_TREE,
@@ -268,20 +306,26 @@ static unsigned short
get_ubsan_type_info_for_type (tree type)
{
gcc_assert (TYPE_SIZE (type) && tree_fits_uhwi_p (TYPE_SIZE (type)));
- int prec = exact_log2 (tree_to_uhwi (TYPE_SIZE (type)));
- gcc_assert (prec != -1);
- return (prec << 1) | !TYPE_UNSIGNED (type);
+ if (TREE_CODE (type) == REAL_TYPE)
+ return tree_to_uhwi (TYPE_SIZE (type));
+ else if (INTEGRAL_TYPE_P (type))
+ {
+ int prec = exact_log2 (tree_to_uhwi (TYPE_SIZE (type)));
+ gcc_assert (prec != -1);
+ return (prec << 1) | !TYPE_UNSIGNED (type);
+ }
+ else
+ return 0;
}
/* Helper routine that returns ADDR_EXPR of a VAR_DECL of a type
descriptor. It first looks into the hash table; if not found,
create the VAR_DECL, put it into the hash table and return the
- ADDR_EXPR of it. TYPE describes a particular type. WANT_POINTER_TYPE_P
- means whether we are interested in the pointer type and not the pointer
- itself. */
+ ADDR_EXPR of it. TYPE describes a particular type. PSTYLE is
+ an enum controlling how we want to print the type. */
tree
-ubsan_type_descriptor (tree type, bool want_pointer_type_p)
+ubsan_type_descriptor (tree type, enum ubsan_print_style pstyle)
{
/* See through any typedefs. */
type = TYPE_MAIN_VARIANT (type);
@@ -294,7 +338,7 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
if (decl != NULL_TREE && varpool_get_node (decl))
return build_fold_addr_expr (decl);
- tree dtype = ubsan_type_descriptor_type ();
+ tree dtype = ubsan_get_type_descriptor_type ();
tree type2 = type;
const char *tname = NULL;
char *pretty_name;
@@ -302,7 +346,7 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
unsigned short tkind, tinfo;
/* Get the name of the type, or the name of the pointer type. */
- if (want_pointer_type_p)
+ if (pstyle == UBSAN_PRINT_POINTER)
{
gcc_assert (POINTER_TYPE_P (type));
type2 = TREE_TYPE (type);
@@ -318,6 +362,12 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
/* If an array, get its type. */
type2 = strip_array_types (type2);
+ if (pstyle == UBSAN_PRINT_ARRAY)
+ {
+ while (POINTER_TYPE_P (type2))
+ deref_depth++, type2 = TREE_TYPE (type2);
+ }
+
if (TYPE_NAME (type2) != NULL)
{
if (TREE_CODE (TYPE_NAME (type2)) == IDENTIFIER_NODE)
@@ -331,8 +381,8 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
tname = "<unknown>";
/* Decorate the type name with '', '*', "struct", or "union". */
- pretty_name = (char *) alloca (strlen (tname) + 16 + deref_depth);
- if (want_pointer_type_p)
+ pretty_name = (char *) alloca (strlen (tname) + 1024 + deref_depth);
+ if (pstyle == UBSAN_PRINT_POINTER)
{
int pos = sprintf (pretty_name, "'%s%s%s%s%s%s%s",
TYPE_VOLATILE (type2) ? "volatile " : "",
@@ -349,6 +399,33 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
pretty_name[pos++] = '\'';
pretty_name[pos] = '\0';
}
+ else if (pstyle == UBSAN_PRINT_ARRAY)
+ {
+ /* Pretty print the array dimensions. */
+ gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
+ tree t = type;
+ int pos = sprintf (pretty_name, "'%s ", tname);
+ while (deref_depth-- > 0)
+ pretty_name[pos++] = '*';
+ while (TREE_CODE (t) == ARRAY_TYPE)
+ {
+ pretty_name[pos++] = '[';
+ tree dom = TYPE_DOMAIN (t);
+ if (dom && TREE_CODE (TYPE_MAX_VALUE (dom)) == INTEGER_CST)
+ pos += sprintf (&pretty_name[pos], HOST_WIDE_INT_PRINT_DEC,
+ tree_to_uhwi (TYPE_MAX_VALUE (dom)) + 1);
+ else
+ /* ??? We can't determine the variable name; print VLA unspec. */
+ pretty_name[pos++] = '*';
+ pretty_name[pos++] = ']';
+ t = TREE_TYPE (t);
+ }
+ pretty_name[pos++] = '\'';
+ pretty_name[pos] = '\0';
+
+ /* Save the tree with stripped types. */
+ type = t;
+ }
else
sprintf (pretty_name, "'%s'", tname);
@@ -360,7 +437,14 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
tkind = 0x0000;
break;
case REAL_TYPE:
- tkind = 0x0001;
+ /* FIXME: libubsan right now only supports float, double and
+ long double type formats. */
+ if (TYPE_MODE (type) == TYPE_MODE (float_type_node)
+ || TYPE_MODE (type) == TYPE_MODE (double_type_node)
+ || TYPE_MODE (type) == TYPE_MODE (long_double_type_node))
+ tkind = 0x0001;
+ else
+ tkind = 0xffff;
break;
default:
tkind = 0xffff;
@@ -369,6 +453,12 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
tinfo = get_ubsan_type_info_for_type (type);
/* Create a new VAR_DECL of type descriptor. */
+ size_t len = strlen (pretty_name) + 1;
+ tree str = build_string (len, pretty_name);
+ TREE_TYPE (str) = build_array_type_nelts (char_type_node, len);
+ TREE_READONLY (str) = 1;
+ TREE_STATIC (str) = 1;
+
char tmp_name[32];
static unsigned int type_var_id_num;
ASM_GENERATE_INTERNAL_LABEL (tmp_name, "Lubsan_type", type_var_id_num++);
@@ -379,13 +469,12 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
DECL_EXTERNAL (decl) = 0;
+ DECL_SIZE (decl)
+ = size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (TREE_TYPE (str)));
+ DECL_SIZE_UNIT (decl)
+ = size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl),
+ TYPE_SIZE_UNIT (TREE_TYPE (str)));
- size_t len = strlen (pretty_name);
- tree str = build_string (len + 1, pretty_name);
- TREE_TYPE (str) = build_array_type (char_type_node,
- build_index_type (size_int (len)));
- TREE_READONLY (str) = 1;
- TREE_STATIC (str) = 1;
tree ctor = build_constructor_va (dtype, 3, NULL_TREE,
build_int_cst (short_unsigned_type_node,
tkind), NULL_TREE,
@@ -403,42 +492,43 @@ ubsan_type_descriptor (tree type, bool want_pointer_type_p)
}
/* Create a structure for the ubsan library. NAME is a name of the new
- structure. The arguments in ... are of __ubsan_type_descriptor type
- and there are at most two of them. MISMATCH are data used by ubsan
- pointer checking. */
+ structure. LOCCNT is number of locations, PLOC points to array of
+ locations. The arguments in ... are of __ubsan_type_descriptor type
+ and there are at most two of them, followed by NULL_TREE, followed
+ by optional extra arguments and another NULL_TREE. */
tree
-ubsan_create_data (const char *name, const location_t *ploc,
- const struct ubsan_mismatch_data *mismatch, ...)
+ubsan_create_data (const char *name, int loccnt, const location_t *ploc, ...)
{
va_list args;
tree ret, t;
- tree fields[5];
+ tree fields[6];
vec<tree, va_gc> *saved_args = NULL;
size_t i = 0;
- location_t loc = UNKNOWN_LOCATION;
+ int j;
/* Firstly, create a pointer to type descriptor type. */
- tree td_type = ubsan_type_descriptor_type ();
- TYPE_READONLY (td_type) = 1;
+ tree td_type = ubsan_get_type_descriptor_type ();
td_type = build_pointer_type (td_type);
/* Create the structure type. */
ret = make_node (RECORD_TYPE);
- if (ploc != NULL)
+ for (j = 0; j < loccnt; j++)
{
- loc = LOCATION_LOCUS (*ploc);
+ gcc_checking_assert (i < 2);
fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
- ubsan_source_location_type ());
+ ubsan_get_source_location_type ());
DECL_CONTEXT (fields[i]) = ret;
+ if (i)
+ DECL_CHAIN (fields[i - 1]) = fields[i];
i++;
}
- va_start (args, mismatch);
+ va_start (args, ploc);
for (t = va_arg (args, tree); t != NULL_TREE;
i++, t = va_arg (args, tree))
{
- gcc_checking_assert (i < 3);
+ gcc_checking_assert (i < 4);
/* Save the tree arguments for later use. */
vec_safe_push (saved_args, t);
fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
@@ -447,26 +537,28 @@ ubsan_create_data (const char *name, const location_t *ploc,
if (i)
DECL_CHAIN (fields[i - 1]) = fields[i];
}
- va_end (args);
- if (mismatch != NULL)
+ for (t = va_arg (args, tree); t != NULL_TREE;
+ i++, t = va_arg (args, tree))
{
- /* We have to add two more decls. */
- fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
- pointer_sized_int_node);
- DECL_CONTEXT (fields[i]) = ret;
- DECL_CHAIN (fields[i - 1]) = fields[i];
- i++;
-
+ gcc_checking_assert (i < 6);
+ /* Save the tree arguments for later use. */
+ vec_safe_push (saved_args, t);
fields[i] = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
- unsigned_char_type_node);
+ TREE_TYPE (t));
DECL_CONTEXT (fields[i]) = ret;
- DECL_CHAIN (fields[i - 1]) = fields[i];
- i++;
+ if (i)
+ DECL_CHAIN (fields[i - 1]) = fields[i];
}
+ va_end (args);
+ tree type_decl = build_decl (input_location, TYPE_DECL,
+ get_identifier (name), ret);
+ DECL_IGNORED_P (type_decl) = 1;
+ DECL_ARTIFICIAL (type_decl) = 1;
TYPE_FIELDS (ret) = fields[0];
- TYPE_NAME (ret) = get_identifier (name);
+ TYPE_NAME (ret) = type_decl;
+ TYPE_STUB_DECL (ret) = type_decl;
layout_type (ret);
/* Now, fill in the type. */
@@ -486,8 +578,11 @@ ubsan_create_data (const char *name, const location_t *ploc,
tree ctor = build_constructor (ret, v);
/* If desirable, set the __ubsan_source_location element. */
- if (ploc != NULL)
- CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, ubsan_source_location (loc));
+ for (j = 0; j < loccnt; j++)
+ {
+ location_t loc = LOCATION_LOCUS (ploc[j]);
+ CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, ubsan_source_location (loc));
+ }
size_t nelts = vec_safe_length (saved_args);
for (i = 0; i < nelts; i++)
@@ -496,13 +591,6 @@ ubsan_create_data (const char *name, const location_t *ploc,
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, t);
}
- if (mismatch != NULL)
- {
- /* Append the pointer data. */
- CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, mismatch->align);
- CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, mismatch->ckind);
- }
-
TREE_CONSTANT (ctor) = 1;
TREE_STATIC (ctor) = 1;
DECL_INITIAL (var) = ctor;
@@ -514,14 +602,26 @@ ubsan_create_data (const char *name, const location_t *ploc,
/* Instrument the __builtin_unreachable call. We just call the libubsan
routine instead. */
-tree
-ubsan_instrument_unreachable (location_t loc)
+bool
+ubsan_instrument_unreachable (gimple_stmt_iterator *gsi)
{
- initialize_sanitizer_builtins ();
- tree data = ubsan_create_data ("__ubsan_unreachable_data", &loc, NULL,
- NULL_TREE);
- tree t = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_BUILTIN_UNREACHABLE);
- return build_call_expr_loc (loc, t, 1, build_fold_addr_expr_loc (loc, data));
+ gimple g;
+ location_t loc = gimple_location (gsi_stmt (*gsi));
+
+ if (flag_sanitize_undefined_trap_on_error)
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data = ubsan_create_data ("__ubsan_unreachable_data", 1, &loc,
+ NULL_TREE, NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ tree fn
+ = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_BUILTIN_UNREACHABLE);
+ g = gimple_build_call (fn, 1, data);
+ }
+ gimple_set_location (g, loc);
+ gsi_replace (gsi, g, false);
+ return false;
}
/* Return true if T is a call to a libubsan routine. */
@@ -530,23 +630,133 @@ bool
is_ubsan_builtin_p (tree t)
{
return TREE_CODE (t) == FUNCTION_DECL
+ && DECL_BUILT_IN_CLASS (t) == BUILT_IN_NORMAL
&& strncmp (IDENTIFIER_POINTER (DECL_NAME (t)),
"__builtin___ubsan_", 18) == 0;
}
-/* Expand UBSAN_NULL internal call. */
+/* Create a callgraph edge for statement STMT. */
+
+static void
+ubsan_create_edge (gimple stmt)
+{
+ gcall *call_stmt = dyn_cast <gcall *> (stmt);
+ basic_block bb = gimple_bb (stmt);
+ int freq = compute_call_stmt_bb_frequency (current_function_decl, bb);
+ cgraph_node *node = cgraph_node::get (current_function_decl);
+ tree decl = gimple_call_fndecl (call_stmt);
+ if (decl)
+ node->create_edge (cgraph_node::get_create (decl), call_stmt, bb->count,
+ freq);
+}
+
+/* Expand the UBSAN_BOUNDS special builtin function. */
+
+bool
+ubsan_expand_bounds_ifn (gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ location_t loc = gimple_location (stmt);
+ gcc_assert (gimple_call_num_args (stmt) == 3);
+
+ /* Pick up the arguments of the UBSAN_BOUNDS call. */
+ tree type = TREE_TYPE (TREE_TYPE (gimple_call_arg (stmt, 0)));
+ tree index = gimple_call_arg (stmt, 1);
+ tree orig_index_type = TREE_TYPE (index);
+ tree bound = gimple_call_arg (stmt, 2);
+
+ gimple_stmt_iterator gsi_orig = *gsi;
+
+ /* Create condition "if (index > bound)". */
+ basic_block then_bb, fallthru_bb;
+ gimple_stmt_iterator cond_insert_point
+ = create_cond_insert_point (gsi, false, false, true,
+ &then_bb, &fallthru_bb);
+ index = fold_convert (TREE_TYPE (bound), index);
+ index = force_gimple_operand_gsi (&cond_insert_point, index,
+ true, NULL_TREE,
+ false, GSI_NEW_STMT);
+ gimple g = gimple_build_cond (GT_EXPR, index, bound, NULL_TREE, NULL_TREE);
+ gimple_set_location (g, loc);
+ gsi_insert_after (&cond_insert_point, g, GSI_NEW_STMT);
+
+ /* Generate __ubsan_handle_out_of_bounds call. */
+ *gsi = gsi_after_labels (then_bb);
+ if (flag_sanitize_undefined_trap_on_error)
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data
+ = ubsan_create_data ("__ubsan_out_of_bounds_data", 1, &loc,
+ ubsan_type_descriptor (type, UBSAN_PRINT_ARRAY),
+ ubsan_type_descriptor (orig_index_type),
+ NULL_TREE, NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_BOUNDS)
+ ? BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS
+ : BUILT_IN_UBSAN_HANDLE_OUT_OF_BOUNDS_ABORT;
+ tree fn = builtin_decl_explicit (bcode);
+ tree val = force_gimple_operand_gsi (gsi, ubsan_encode_value (index),
+ true, NULL_TREE, true,
+ GSI_SAME_STMT);
+ g = gimple_build_call (fn, 2, data, val);
+ }
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+
+ /* Get rid of the UBSAN_BOUNDS call from the IR. */
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi_orig, true);
+
+ /* Point GSI to next logical statement. */
+ *gsi = gsi_start_bb (fallthru_bb);
+ return true;
+}
+
+/* Expand UBSAN_NULL internal call. The type is kept on the ckind
+ argument which is a constant, because the middle-end treats pointer
+ conversions as useless and therefore the type of the first argument
+ could be changed to any other pointer type. */
-void
-ubsan_expand_null_ifn (gimple_stmt_iterator gsi)
+bool
+ubsan_expand_null_ifn (gimple_stmt_iterator *gsip)
{
+ gimple_stmt_iterator gsi = *gsip;
gimple stmt = gsi_stmt (gsi);
location_t loc = gimple_location (stmt);
- gcc_assert (gimple_call_num_args (stmt) == 2);
+ gcc_assert (gimple_call_num_args (stmt) == 3);
tree ptr = gimple_call_arg (stmt, 0);
tree ckind = gimple_call_arg (stmt, 1);
+ tree align = gimple_call_arg (stmt, 2);
+ tree check_align = NULL_TREE;
+ bool check_null;
basic_block cur_bb = gsi_bb (gsi);
+ gimple g;
+ if (!integer_zerop (align))
+ {
+ unsigned int ptralign = get_pointer_alignment (ptr) / BITS_PER_UNIT;
+ if (compare_tree_int (align, ptralign) == 1)
+ {
+ check_align = make_ssa_name (pointer_sized_int_node, NULL);
+ g = gimple_build_assign_with_ops (NOP_EXPR, check_align,
+ ptr, NULL_TREE);
+ gimple_set_location (g, loc);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ }
+ }
+ check_null = (flag_sanitize & SANITIZE_NULL) != 0;
+
+ if (check_align == NULL_TREE && !check_null)
+ {
+ gsi_remove (gsip, true);
+ /* Unlink the UBSAN_NULLs vops before replacing it. */
+ unlink_stmt_vdef (stmt);
+ return true;
+ }
+
/* Split the original block holding the pointer dereference. */
edge e = split_block (cur_bb, stmt);
@@ -579,59 +789,207 @@ ubsan_expand_null_ifn (gimple_stmt_iterator gsi)
/* Update dominance info for the newly created then_bb; note that
fallthru_bb's dominance info has already been updated by
- split_bock. */
+ split_block. */
if (dom_info_available_p (CDI_DOMINATORS))
set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
/* Put the ubsan builtin call into the newly created BB. */
- tree fn = builtin_decl_implicit (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH);
- const struct ubsan_mismatch_data m
- = { build_zero_cst (pointer_sized_int_node), ckind };
- tree data = ubsan_create_data ("__ubsan_null_data",
- &loc, &m,
- ubsan_type_descriptor (TREE_TYPE (ptr), true),
- NULL_TREE);
- data = build_fold_addr_expr_loc (loc, data);
- gimple g = gimple_build_call (fn, 2, data,
- build_zero_cst (pointer_sized_int_node));
- gimple_set_location (g, loc);
+ if (flag_sanitize_undefined_trap_on_error)
+ g = gimple_build_call (builtin_decl_implicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ enum built_in_function bcode
+ = (flag_sanitize_recover & ((check_align ? SANITIZE_ALIGNMENT : 0)
+ | (check_null ? SANITIZE_NULL : 0)))
+ ? BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH
+ : BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT;
+ tree fn = builtin_decl_implicit (bcode);
+ tree data
+ = ubsan_create_data ("__ubsan_null_data", 1, &loc,
+ ubsan_type_descriptor (TREE_TYPE (ckind),
+ UBSAN_PRINT_POINTER),
+ NULL_TREE,
+ align,
+ fold_convert (unsigned_char_type_node, ckind),
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ g = gimple_build_call (fn, 2, data,
+ check_align ? check_align
+ : build_zero_cst (pointer_sized_int_node));
+ }
gimple_stmt_iterator gsi2 = gsi_start_bb (then_bb);
+ gimple_set_location (g, loc);
gsi_insert_after (&gsi2, g, GSI_NEW_STMT);
/* Unlink the UBSAN_NULLs vops before replacing it. */
unlink_stmt_vdef (stmt);
- g = gimple_build_cond (EQ_EXPR, ptr, build_int_cst (TREE_TYPE (ptr), 0),
- NULL_TREE, NULL_TREE);
- gimple_set_location (g, loc);
+ if (check_null)
+ {
+ g = gimple_build_cond (EQ_EXPR, ptr, build_int_cst (TREE_TYPE (ptr), 0),
+ NULL_TREE, NULL_TREE);
+ gimple_set_location (g, loc);
- /* Replace the UBSAN_NULL with a GIMPLE_COND stmt. */
- gsi_replace (&gsi, g, false);
+ /* Replace the UBSAN_NULL with a GIMPLE_COND stmt. */
+ gsi_replace (&gsi, g, false);
+ }
+
+ if (check_align)
+ {
+ if (check_null)
+ {
+ /* Split the block with the condition again. */
+ e = split_block (cond_bb, stmt);
+ basic_block cond1_bb = e->src;
+ basic_block cond2_bb = e->dest;
+
+ /* Make an edge coming from the 'cond1 block' into the 'then block';
+ this edge is unlikely taken, so set up the probability
+ accordingly. */
+ e = make_edge (cond1_bb, then_bb, EDGE_TRUE_VALUE);
+ e->probability = PROB_VERY_UNLIKELY;
+
+ /* Set up the fallthrough basic block. */
+ e = find_edge (cond1_bb, cond2_bb);
+ e->flags = EDGE_FALSE_VALUE;
+ e->count = cond1_bb->count;
+ e->probability = REG_BR_PROB_BASE - PROB_VERY_UNLIKELY;
+
+ /* Update dominance info. */
+ if (dom_info_available_p (CDI_DOMINATORS))
+ {
+ set_immediate_dominator (CDI_DOMINATORS, fallthru_bb, cond1_bb);
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, cond1_bb);
+ }
+
+ gsi2 = gsi_start_bb (cond2_bb);
+ }
+
+ tree mask = build_int_cst (pointer_sized_int_node,
+ tree_to_uhwi (align) - 1);
+ g = gimple_build_assign_with_ops (BIT_AND_EXPR,
+ make_ssa_name (pointer_sized_int_node,
+ NULL),
+ check_align, mask);
+ gimple_set_location (g, loc);
+ if (check_null)
+ gsi_insert_after (&gsi2, g, GSI_NEW_STMT);
+ else
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+
+ g = gimple_build_cond (NE_EXPR, gimple_assign_lhs (g),
+ build_int_cst (pointer_sized_int_node, 0),
+ NULL_TREE, NULL_TREE);
+ gimple_set_location (g, loc);
+ if (check_null)
+ gsi_insert_after (&gsi2, g, GSI_NEW_STMT);
+ else
+ /* Replace the UBSAN_NULL with a GIMPLE_COND stmt. */
+ gsi_replace (&gsi, g, false);
+ }
+ return false;
}
-/* Instrument a member call. We check whether 'this' is NULL. */
+/* Expand UBSAN_OBJECT_SIZE internal call. */
-static void
-instrument_member_call (gimple_stmt_iterator *iter)
+bool
+ubsan_expand_objsize_ifn (gimple_stmt_iterator *gsi)
{
- tree this_parm = gimple_call_arg (gsi_stmt (*iter), 0);
- tree kind = build_int_cst (unsigned_char_type_node, UBSAN_MEMBER_CALL);
- gimple g = gimple_build_call_internal (IFN_UBSAN_NULL, 2, this_parm, kind);
- gimple_set_location (g, gimple_location (gsi_stmt (*iter)));
- gsi_insert_before (iter, g, GSI_SAME_STMT);
+ gimple stmt = gsi_stmt (*gsi);
+ location_t loc = gimple_location (stmt);
+ gcc_assert (gimple_call_num_args (stmt) == 4);
+
+ tree ptr = gimple_call_arg (stmt, 0);
+ tree offset = gimple_call_arg (stmt, 1);
+ tree size = gimple_call_arg (stmt, 2);
+ tree ckind = gimple_call_arg (stmt, 3);
+ gimple_stmt_iterator gsi_orig = *gsi;
+ gimple g;
+
+ /* See if we can discard the check. */
+ if (TREE_CODE (size) != INTEGER_CST
+ || integer_all_onesp (size))
+ /* Yes, __builtin_object_size couldn't determine the
+ object size. */;
+ else
+ {
+ /* if (offset > objsize) */
+ basic_block then_bb, fallthru_bb;
+ gimple_stmt_iterator cond_insert_point
+ = create_cond_insert_point (gsi, false, false, true,
+ &then_bb, &fallthru_bb);
+ g = gimple_build_cond (GT_EXPR, offset, size, NULL_TREE, NULL_TREE);
+ gimple_set_location (g, loc);
+ gsi_insert_after (&cond_insert_point, g, GSI_NEW_STMT);
+
+ /* Generate __ubsan_handle_type_mismatch call. */
+ *gsi = gsi_after_labels (then_bb);
+ if (flag_sanitize_undefined_trap_on_error)
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data
+ = ubsan_create_data ("__ubsan_objsz_data", 1, &loc,
+ ubsan_type_descriptor (TREE_TYPE (ptr),
+ UBSAN_PRINT_POINTER),
+ NULL_TREE,
+ build_zero_cst (pointer_sized_int_node),
+ ckind,
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_OBJECT_SIZE)
+ ? BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH
+ : BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT;
+ tree p = make_ssa_name (pointer_sized_int_node, NULL);
+ g = gimple_build_assign_with_ops (NOP_EXPR, p, ptr, NULL_TREE);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ g = gimple_build_call (builtin_decl_explicit (bcode), 2, data, p);
+ }
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+
+ /* Point GSI to next logical statement. */
+ *gsi = gsi_start_bb (fallthru_bb);
+
+ /* Get rid of the UBSAN_OBJECT_SIZE call from the IR. */
+ unlink_stmt_vdef (stmt);
+ gsi_remove (&gsi_orig, true);
+ return true;
+ }
+
+ /* Get rid of the UBSAN_OBJECT_SIZE call from the IR. */
+ unlink_stmt_vdef (stmt);
+ gsi_remove (gsi, true);
+ return true;
}
-/* Instrument a memory reference. T is the pointer, IS_LHS says
+/* Instrument a memory reference. BASE is the base of MEM, IS_LHS says
whether the pointer is on the left hand side of the assignment. */
static void
-instrument_mem_ref (tree t, gimple_stmt_iterator *iter, bool is_lhs)
+instrument_mem_ref (tree mem, tree base, gimple_stmt_iterator *iter,
+ bool is_lhs)
{
enum ubsan_null_ckind ikind = is_lhs ? UBSAN_STORE_OF : UBSAN_LOAD_OF;
- if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (TREE_TYPE (t))))
+ unsigned int align = 0;
+ if (flag_sanitize & SANITIZE_ALIGNMENT)
+ {
+ align = min_align_of_type (TREE_TYPE (base));
+ if (align <= 1)
+ align = 0;
+ }
+ if (align == 0 && (flag_sanitize & SANITIZE_NULL) == 0)
+ return;
+ tree t = TREE_OPERAND (base, 0);
+ if (!POINTER_TYPE_P (TREE_TYPE (t)))
+ return;
+ if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (base)) && mem != base)
ikind = UBSAN_MEMBER_ACCESS;
- tree kind = build_int_cst (unsigned_char_type_node, ikind);
- gimple g = gimple_build_call_internal (IFN_UBSAN_NULL, 2, t, kind);
+ tree kind = build_int_cst (build_pointer_type (TREE_TYPE (base)), ikind);
+ tree alignt = build_int_cst (pointer_sized_int_node, align);
+ gimple g = gimple_build_call_internal (IFN_UBSAN_NULL, 3, t, kind, alignt);
gimple_set_location (g, gimple_location (gsi_stmt (*iter)));
gsi_insert_before (iter, g, GSI_SAME_STMT);
}
@@ -643,15 +1001,11 @@ instrument_null (gimple_stmt_iterator gsi, bool is_lhs)
{
gimple stmt = gsi_stmt (gsi);
tree t = is_lhs ? gimple_get_lhs (stmt) : gimple_assign_rhs1 (stmt);
- t = get_base_address (t);
- const enum tree_code code = TREE_CODE (t);
+ tree base = get_base_address (t);
+ const enum tree_code code = TREE_CODE (base);
if (code == MEM_REF
- && TREE_CODE (TREE_OPERAND (t, 0)) == SSA_NAME)
- instrument_mem_ref (TREE_OPERAND (t, 0), &gsi, is_lhs);
- else if (code == ADDR_EXPR
- && POINTER_TYPE_P (TREE_TYPE (t))
- && TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == METHOD_TYPE)
- instrument_member_call (&gsi);
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
+ instrument_mem_ref (t, base, &gsi, is_lhs);
}
/* Build an ubsan builtin call for the signed-integer-overflow
@@ -663,24 +1017,35 @@ tree
ubsan_build_overflow_builtin (tree_code code, location_t loc, tree lhstype,
tree op0, tree op1)
{
- tree data = ubsan_create_data ("__ubsan_overflow_data", &loc, NULL,
- ubsan_type_descriptor (lhstype, false),
+ if (flag_sanitize_undefined_trap_on_error)
+ return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
+
+ tree data = ubsan_create_data ("__ubsan_overflow_data", 1, &loc,
+ ubsan_type_descriptor (lhstype), NULL_TREE,
NULL_TREE);
enum built_in_function fn_code;
switch (code)
{
case PLUS_EXPR:
- fn_code = BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW;
+ fn_code = (flag_sanitize_recover & SANITIZE_SI_OVERFLOW)
+ ? BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW
+ : BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW_ABORT;
break;
case MINUS_EXPR:
- fn_code = BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW;
+ fn_code = (flag_sanitize_recover & SANITIZE_SI_OVERFLOW)
+ ? BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW
+ : BUILT_IN_UBSAN_HANDLE_SUB_OVERFLOW_ABORT;
break;
case MULT_EXPR:
- fn_code = BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW;
+ fn_code = (flag_sanitize_recover & SANITIZE_SI_OVERFLOW)
+ ? BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW
+ : BUILT_IN_UBSAN_HANDLE_MUL_OVERFLOW_ABORT;
break;
case NEGATE_EXPR:
- fn_code = BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW;
+ fn_code = (flag_sanitize_recover & SANITIZE_SI_OVERFLOW)
+ ? BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW
+ : BUILT_IN_UBSAN_HANDLE_NEGATE_OVERFLOW_ABORT;
break;
default:
gcc_unreachable ();
@@ -865,69 +1230,482 @@ instrument_bool_enum_load (gimple_stmt_iterator *gsi)
update_stmt (stmt);
}
- tree data = ubsan_create_data ("__ubsan_invalid_value_data",
- &loc, NULL,
- ubsan_type_descriptor (type, false),
- NULL_TREE);
- data = build_fold_addr_expr_loc (loc, data);
- tree fn = builtin_decl_explicit (BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE);
-
gsi2 = gsi_after_labels (then_bb);
- tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs),
- true, NULL_TREE, true, GSI_SAME_STMT);
- g = gimple_build_call (fn, 2, data, val);
+ if (flag_sanitize_undefined_trap_on_error)
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data = ubsan_create_data ("__ubsan_invalid_value_data", 1, &loc,
+ ubsan_type_descriptor (type), NULL_TREE,
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc, data);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & (TREE_CODE (type) == BOOLEAN_TYPE
+ ? SANITIZE_BOOL : SANITIZE_ENUM))
+ ? BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE
+ : BUILT_IN_UBSAN_HANDLE_LOAD_INVALID_VALUE_ABORT;
+ tree fn = builtin_decl_explicit (bcode);
+
+ tree val = force_gimple_operand_gsi (&gsi2, ubsan_encode_value (urhs),
+ true, NULL_TREE, true,
+ GSI_SAME_STMT);
+ g = gimple_build_call (fn, 2, data, val);
+ }
gimple_set_location (g, loc);
gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
+ ubsan_create_edge (g);
+ *gsi = gsi_for_stmt (stmt);
}
-/* Gate and execute functions for ubsan pass. */
+/* Determine if we can propagate given LOCATION to ubsan_data descriptor to use
+ new style handlers. Libubsan uses heuristics to destinguish between old and
+ new styles and relies on these properties for filename:
+
+ a) Location's filename must not be NULL.
+ b) Location's filename must not be equal to "".
+ c) Location's filename must not be equal to "\1".
+ d) First two bytes of filename must not contain '\xff' symbol. */
-static unsigned int
-ubsan_pass (void)
+static bool
+ubsan_use_new_style_p (location_t loc)
{
- basic_block bb;
- gimple_stmt_iterator gsi;
+ if (loc == UNKNOWN_LOCATION)
+ return false;
- initialize_sanitizer_builtins ();
+ expanded_location xloc = expand_location (loc);
+ if (xloc.file == NULL || strncmp (xloc.file, "\1", 2) == 0
+ || xloc.file == '\0' || xloc.file[0] == '\xff'
+ || xloc.file[1] == '\xff')
+ return false;
- FOR_EACH_BB_FN (bb, cfun)
+ return true;
+}
+
+/* Instrument float point-to-integer conversion. TYPE is an integer type of
+ destination, EXPR is floating-point expression. */
+
+tree
+ubsan_instrument_float_cast (location_t loc, tree type, tree expr)
+{
+ tree expr_type = TREE_TYPE (expr);
+ tree t, tt, fn, min, max;
+ enum machine_mode mode = TYPE_MODE (expr_type);
+ int prec = TYPE_PRECISION (type);
+ bool uns_p = TYPE_UNSIGNED (type);
+ if (loc == UNKNOWN_LOCATION)
+ loc = input_location;
+
+ /* Float to integer conversion first truncates toward zero, so
+ even signed char c = 127.875f; is not problematic.
+ Therefore, we should complain only if EXPR is unordered or smaller
+ or equal than TYPE_MIN_VALUE - 1.0 or greater or equal than
+ TYPE_MAX_VALUE + 1.0. */
+ if (REAL_MODE_FORMAT (mode)->b == 2)
{
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
+ /* For maximum, TYPE_MAX_VALUE might not be representable
+ in EXPR_TYPE, e.g. if TYPE is 64-bit long long and
+ EXPR_TYPE is IEEE single float, but TYPE_MAX_VALUE + 1.0 is
+ either representable or infinity. */
+ REAL_VALUE_TYPE maxval = dconst1;
+ SET_REAL_EXP (&maxval, REAL_EXP (&maxval) + prec - !uns_p);
+ real_convert (&maxval, mode, &maxval);
+ max = build_real (expr_type, maxval);
+
+ /* For unsigned, assume -1.0 is always representable. */
+ if (uns_p)
+ min = build_minus_one_cst (expr_type);
+ else
{
- gimple stmt = gsi_stmt (gsi);
- if (is_gimple_debug (stmt) || gimple_clobber_p (stmt))
+ /* TYPE_MIN_VALUE is generally representable (or -inf),
+ but TYPE_MIN_VALUE - 1.0 might not be. */
+ REAL_VALUE_TYPE minval = dconstm1, minval2;
+ SET_REAL_EXP (&minval, REAL_EXP (&minval) + prec - 1);
+ real_convert (&minval, mode, &minval);
+ real_arithmetic (&minval2, MINUS_EXPR, &minval, &dconst1);
+ real_convert (&minval2, mode, &minval2);
+ if (real_compare (EQ_EXPR, &minval, &minval2)
+ && !real_isinf (&minval))
{
- gsi_next (&gsi);
- continue;
+ /* If TYPE_MIN_VALUE - 1.0 is not representable and
+ rounds to TYPE_MIN_VALUE, we need to subtract
+ more. As REAL_MODE_FORMAT (mode)->p is the number
+ of base digits, we want to subtract a number that
+ will be 1 << (REAL_MODE_FORMAT (mode)->p - 1)
+ times smaller than minval. */
+ minval2 = dconst1;
+ gcc_assert (prec > REAL_MODE_FORMAT (mode)->p);
+ SET_REAL_EXP (&minval2,
+ REAL_EXP (&minval2) + prec - 1
+ - REAL_MODE_FORMAT (mode)->p + 1);
+ real_arithmetic (&minval2, MINUS_EXPR, &minval, &minval2);
+ real_convert (&minval2, mode, &minval2);
}
+ min = build_real (expr_type, minval2);
+ }
+ }
+ else if (REAL_MODE_FORMAT (mode)->b == 10)
+ {
+ /* For _Decimal128 up to 34 decimal digits, - sign,
+ dot, e, exponent. */
+ char buf[64];
+ mpfr_t m;
+ int p = REAL_MODE_FORMAT (mode)->p;
+ REAL_VALUE_TYPE maxval, minval;
+
+ /* Use mpfr_snprintf rounding to compute the smallest
+ representable decimal number greater or equal than
+ 1 << (prec - !uns_p). */
+ mpfr_init2 (m, prec + 2);
+ mpfr_set_ui_2exp (m, 1, prec - !uns_p, GMP_RNDN);
+ mpfr_snprintf (buf, sizeof buf, "%.*RUe", p - 1, m);
+ decimal_real_from_string (&maxval, buf);
+ max = build_real (expr_type, maxval);
+
+ /* For unsigned, assume -1.0 is always representable. */
+ if (uns_p)
+ min = build_minus_one_cst (expr_type);
+ else
+ {
+ /* Use mpfr_snprintf rounding to compute the largest
+ representable decimal number less or equal than
+ (-1 << (prec - 1)) - 1. */
+ mpfr_set_si_2exp (m, -1, prec - 1, GMP_RNDN);
+ mpfr_sub_ui (m, m, 1, GMP_RNDN);
+ mpfr_snprintf (buf, sizeof buf, "%.*RDe", p - 1, m);
+ decimal_real_from_string (&minval, buf);
+ min = build_real (expr_type, minval);
+ }
+ mpfr_clear (m);
+ }
+ else
+ return NULL_TREE;
- if ((flag_sanitize & SANITIZE_SI_OVERFLOW)
- && is_gimple_assign (stmt))
- instrument_si_overflow (gsi);
+ t = fold_build2 (UNLE_EXPR, boolean_type_node, expr, min);
+ tt = fold_build2 (UNGE_EXPR, boolean_type_node, expr, max);
+ t = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, t, tt);
+ if (integer_zerop (t))
+ return NULL_TREE;
+
+ if (flag_sanitize_undefined_trap_on_error)
+ fn = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ location_t *loc_ptr = NULL;
+ unsigned num_locations = 0;
+ initialize_sanitizer_builtins ();
+ /* Figure out if we can propagate location to ubsan_data and use new
+ style handlers in libubsan. */
+ if (ubsan_use_new_style_p (loc))
+ {
+ loc_ptr = &loc;
+ num_locations = 1;
+ }
+ /* Create the __ubsan_handle_float_cast_overflow fn call. */
+ tree data = ubsan_create_data ("__ubsan_float_cast_overflow_data",
+ num_locations, loc_ptr,
+ ubsan_type_descriptor (expr_type),
+ ubsan_type_descriptor (type), NULL_TREE,
+ NULL_TREE);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_FLOAT_CAST)
+ ? BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW
+ : BUILT_IN_UBSAN_HANDLE_FLOAT_CAST_OVERFLOW_ABORT;
+ fn = builtin_decl_explicit (bcode);
+ fn = build_call_expr_loc (loc, fn, 2,
+ build_fold_addr_expr_loc (loc, data),
+ ubsan_encode_value (expr, false));
+ }
+
+ return fold_build3 (COND_EXPR, void_type_node, t, fn, integer_zero_node);
+}
+
+/* Instrument values passed to function arguments with nonnull attribute. */
- if (flag_sanitize & SANITIZE_NULL)
+static void
+instrument_nonnull_arg (gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ location_t loc[2];
+ /* infer_nonnull_range needs flag_delete_null_pointer_checks set,
+ while for nonnull sanitization it is clear. */
+ int save_flag_delete_null_pointer_checks = flag_delete_null_pointer_checks;
+ flag_delete_null_pointer_checks = 1;
+ loc[0] = gimple_location (stmt);
+ loc[1] = UNKNOWN_LOCATION;
+ for (unsigned int i = 0; i < gimple_call_num_args (stmt); i++)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ if (POINTER_TYPE_P (TREE_TYPE (arg))
+ && infer_nonnull_range (stmt, arg, false, true))
+ {
+ gimple g;
+ if (!is_gimple_val (arg))
{
- if (gimple_store_p (stmt))
- instrument_null (gsi, true);
- if (gimple_assign_load_p (stmt))
- instrument_null (gsi, false);
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg), NULL),
+ arg);
+ gimple_set_location (g, loc[0]);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ arg = gimple_assign_lhs (g);
}
- if (flag_sanitize & (SANITIZE_BOOL | SANITIZE_ENUM)
- && gimple_assign_load_p (stmt))
- instrument_bool_enum_load (&gsi);
+ basic_block then_bb, fallthru_bb;
+ *gsi = create_cond_insert_point (gsi, true, false, true,
+ &then_bb, &fallthru_bb);
+ g = gimple_build_cond (EQ_EXPR, arg,
+ build_zero_cst (TREE_TYPE (arg)),
+ NULL_TREE, NULL_TREE);
+ gimple_set_location (g, loc[0]);
+ gsi_insert_after (gsi, g, GSI_NEW_STMT);
+
+ *gsi = gsi_after_labels (then_bb);
+ if (flag_sanitize_undefined_trap_on_error)
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data = ubsan_create_data ("__ubsan_nonnull_arg_data",
+ 2, loc, NULL_TREE,
+ build_int_cst (integer_type_node,
+ i + 1),
+ NULL_TREE);
+ data = build_fold_addr_expr_loc (loc[0], data);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_NONNULL_ATTRIBUTE)
+ ? BUILT_IN_UBSAN_HANDLE_NONNULL_ARG
+ : BUILT_IN_UBSAN_HANDLE_NONNULL_ARG_ABORT;
+ tree fn = builtin_decl_explicit (bcode);
+
+ g = gimple_build_call (fn, 1, data);
+ }
+ gimple_set_location (g, loc[0]);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ ubsan_create_edge (g);
+ }
+ *gsi = gsi_for_stmt (stmt);
+ }
+ flag_delete_null_pointer_checks = save_flag_delete_null_pointer_checks;
+}
- gsi_next (&gsi);
+/* Instrument returns in functions with returns_nonnull attribute. */
+
+static void
+instrument_nonnull_return (gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ location_t loc[2];
+ tree arg = gimple_return_retval (stmt);
+ /* infer_nonnull_range needs flag_delete_null_pointer_checks set,
+ while for nonnull return sanitization it is clear. */
+ int save_flag_delete_null_pointer_checks = flag_delete_null_pointer_checks;
+ flag_delete_null_pointer_checks = 1;
+ loc[0] = gimple_location (stmt);
+ loc[1] = UNKNOWN_LOCATION;
+ if (arg
+ && POINTER_TYPE_P (TREE_TYPE (arg))
+ && is_gimple_val (arg)
+ && infer_nonnull_range (stmt, arg, false, true))
+ {
+ basic_block then_bb, fallthru_bb;
+ *gsi = create_cond_insert_point (gsi, true, false, true,
+ &then_bb, &fallthru_bb);
+ gimple g = gimple_build_cond (EQ_EXPR, arg,
+ build_zero_cst (TREE_TYPE (arg)),
+ NULL_TREE, NULL_TREE);
+ gimple_set_location (g, loc[0]);
+ gsi_insert_after (gsi, g, GSI_NEW_STMT);
+
+ *gsi = gsi_after_labels (then_bb);
+ if (flag_sanitize_undefined_trap_on_error)
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
+ else
+ {
+ tree data = ubsan_create_data ("__ubsan_nonnull_return_data",
+ 2, loc, NULL_TREE, NULL_TREE);
+ data = build_fold_addr_expr_loc (loc[0], data);
+ enum built_in_function bcode
+ = (flag_sanitize_recover & SANITIZE_RETURNS_NONNULL_ATTRIBUTE)
+ ? BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN
+ : BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT;
+ tree fn = builtin_decl_explicit (bcode);
+
+ g = gimple_build_call (fn, 1, data);
}
+ gimple_set_location (g, loc[0]);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ ubsan_create_edge (g);
+ *gsi = gsi_for_stmt (stmt);
}
- return 0;
+ flag_delete_null_pointer_checks = save_flag_delete_null_pointer_checks;
}
-static bool
-gate_ubsan (void)
+/* Instrument memory references. Here we check whether the pointer
+ points to an out-of-bounds location. */
+
+static void
+instrument_object_size (gimple_stmt_iterator *gsi, bool is_lhs)
{
- return flag_sanitize & (SANITIZE_NULL | SANITIZE_SI_OVERFLOW
- | SANITIZE_BOOL | SANITIZE_ENUM);
+ gimple stmt = gsi_stmt (*gsi);
+ location_t loc = gimple_location (stmt);
+ tree t = is_lhs ? gimple_get_lhs (stmt) : gimple_assign_rhs1 (stmt);
+ tree type;
+ tree index = NULL_TREE;
+ HOST_WIDE_INT size_in_bytes;
+
+ type = TREE_TYPE (t);
+ if (VOID_TYPE_P (type))
+ return;
+
+ switch (TREE_CODE (t))
+ {
+ case COMPONENT_REF:
+ if (TREE_CODE (t) == COMPONENT_REF
+ && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (t, 1)) != NULL_TREE)
+ {
+ tree repr = DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (t, 1));
+ t = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (t, 0),
+ repr, NULL_TREE);
+ }
+ break;
+ case ARRAY_REF:
+ index = TREE_OPERAND (t, 1);
+ break;
+ case INDIRECT_REF:
+ case MEM_REF:
+ case VAR_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ break;
+ default:
+ return;
+ }
+
+ size_in_bytes = int_size_in_bytes (type);
+ if (size_in_bytes <= 0)
+ return;
+
+ HOST_WIDE_INT bitsize, bitpos;
+ tree offset;
+ enum machine_mode mode;
+ int volatilep = 0, unsignedp = 0;
+ tree inner = get_inner_reference (t, &bitsize, &bitpos, &offset, &mode,
+ &unsignedp, &volatilep, false);
+
+ if (bitpos % BITS_PER_UNIT != 0
+ || bitsize != size_in_bytes * BITS_PER_UNIT)
+ return;
+
+ bool decl_p = DECL_P (inner);
+ tree base;
+ if (decl_p)
+ base = inner;
+ else if (TREE_CODE (inner) == MEM_REF)
+ base = TREE_OPERAND (inner, 0);
+ else
+ return;
+ tree ptr = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (t)), t);
+
+ while (TREE_CODE (base) == SSA_NAME)
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (base);
+ if (gimple_assign_ssa_name_copy_p (def_stmt)
+ || (gimple_assign_cast_p (def_stmt)
+ && POINTER_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
+ || (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == POINTER_PLUS_EXPR))
+ {
+ tree rhs1 = gimple_assign_rhs1 (def_stmt);
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
+ break;
+ else
+ base = rhs1;
+ }
+ else
+ break;
+ }
+
+ if (!POINTER_TYPE_P (TREE_TYPE (base)) && !DECL_P (base))
+ return;
+
+ tree sizet;
+ tree base_addr = base;
+ gimple bos_stmt = NULL;
+ if (decl_p)
+ base_addr = build1 (ADDR_EXPR,
+ build_pointer_type (TREE_TYPE (base)), base);
+ unsigned HOST_WIDE_INT size = compute_builtin_object_size (base_addr, 0);
+ if (size != (unsigned HOST_WIDE_INT) -1)
+ sizet = build_int_cst (sizetype, size);
+ else if (optimize)
+ {
+ if (LOCATION_LOCUS (loc) == UNKNOWN_LOCATION)
+ loc = input_location;
+ /* Generate __builtin_object_size call. */
+ sizet = builtin_decl_explicit (BUILT_IN_OBJECT_SIZE);
+ sizet = build_call_expr_loc (loc, sizet, 2, base_addr,
+ integer_zero_node);
+ sizet = force_gimple_operand_gsi (gsi, sizet, false, NULL_TREE, true,
+ GSI_SAME_STMT);
+ /* If the call above didn't end up being an integer constant, go one
+ statement back and get the __builtin_object_size stmt. Save it,
+ we might need it later. */
+ if (SSA_VAR_P (sizet))
+ {
+ gsi_prev (gsi);
+ bos_stmt = gsi_stmt (*gsi);
+
+ /* Move on to where we were. */
+ gsi_next (gsi);
+ }
+ }
+ else
+ return;
+
+ /* Generate UBSAN_OBJECT_SIZE (ptr, ptr+sizeof(*ptr)-base, objsize, ckind)
+ call. */
+ /* ptr + sizeof (*ptr) - base */
+ t = fold_build2 (MINUS_EXPR, sizetype,
+ fold_convert (pointer_sized_int_node, ptr),
+ fold_convert (pointer_sized_int_node, base_addr));
+ t = fold_build2 (PLUS_EXPR, sizetype, t, TYPE_SIZE_UNIT (type));
+
+ /* Perhaps we can omit the check. */
+ if (TREE_CODE (t) == INTEGER_CST
+ && TREE_CODE (sizet) == INTEGER_CST
+ && tree_int_cst_le (t, sizet))
+ return;
+
+ if (index != NULL_TREE
+ && TREE_CODE (index) == SSA_NAME
+ && TREE_CODE (sizet) == INTEGER_CST)
+ {
+ gimple def = SSA_NAME_DEF_STMT (index);
+ if (is_gimple_assign (def)
+ && gimple_assign_rhs_code (def) == BIT_AND_EXPR
+ && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
+ {
+ tree cst = gimple_assign_rhs2 (def);
+ tree sz = fold_build2 (EXACT_DIV_EXPR, sizetype, sizet,
+ TYPE_SIZE_UNIT (type));
+ if (tree_int_cst_sgn (cst) >= 0
+ && tree_int_cst_lt (cst, sz))
+ return;
+ }
+ }
+
+ if (bos_stmt && gimple_call_builtin_p (bos_stmt, BUILT_IN_OBJECT_SIZE))
+ ubsan_create_edge (bos_stmt);
+
+ /* We have to emit the check. */
+ t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE, true,
+ GSI_SAME_STMT);
+ ptr = force_gimple_operand_gsi (gsi, ptr, true, NULL_TREE, true,
+ GSI_SAME_STMT);
+ tree ckind = build_int_cst (unsigned_char_type_node,
+ is_lhs ? UBSAN_STORE_OF : UBSAN_LOAD_OF);
+ gimple g = gimple_build_call_internal (IFN_UBSAN_OBJECT_SIZE, 4,
+ ptr, t, sizet, ckind);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
}
namespace {
@@ -955,11 +1733,92 @@ public:
{}
/* opt_pass methods: */
- bool gate () { return gate_ubsan (); }
- unsigned int execute () { return ubsan_pass (); }
+ bool gate ()
+ {
+ return flag_sanitize & (SANITIZE_NULL | SANITIZE_SI_OVERFLOW
+ | SANITIZE_BOOL | SANITIZE_ENUM
+ | SANITIZE_ALIGNMENT
+ | SANITIZE_NONNULL_ATTRIBUTE
+ | SANITIZE_RETURNS_NONNULL_ATTRIBUTE
+ | SANITIZE_OBJECT_SIZE)
+ && current_function_decl != NULL_TREE
+ && !lookup_attribute ("no_sanitize_undefined",
+ DECL_ATTRIBUTES (current_function_decl));
+ }
+
+ virtual unsigned int execute ();
}; // class pass_ubsan
+/* Gate and execute functions for ubsan pass. */
+
+unsigned int
+pass_ubsan::execute ()
+{
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+
+ initialize_sanitizer_builtins ();
+
+ FOR_EACH_BB_FN (bb, cfun)
+ {
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_debug (stmt) || gimple_clobber_p (stmt))
+ {
+ gsi_next (&gsi);
+ continue;
+ }
+
+ if ((flag_sanitize & SANITIZE_SI_OVERFLOW)
+ && is_gimple_assign (stmt))
+ instrument_si_overflow (gsi);
+
+ if (flag_sanitize & (SANITIZE_NULL | SANITIZE_ALIGNMENT))
+ {
+ if (gimple_store_p (stmt))
+ instrument_null (gsi, true);
+ if (gimple_assign_load_p (stmt))
+ instrument_null (gsi, false);
+ }
+
+ if (flag_sanitize & (SANITIZE_BOOL | SANITIZE_ENUM)
+ && gimple_assign_load_p (stmt))
+ {
+ instrument_bool_enum_load (&gsi);
+ bb = gimple_bb (stmt);
+ }
+
+ if ((flag_sanitize & SANITIZE_NONNULL_ATTRIBUTE)
+ && is_gimple_call (stmt)
+ && !gimple_call_internal_p (stmt))
+ {
+ instrument_nonnull_arg (&gsi);
+ bb = gimple_bb (stmt);
+ }
+
+ if ((flag_sanitize & SANITIZE_RETURNS_NONNULL_ATTRIBUTE)
+ && gimple_code (stmt) == GIMPLE_RETURN)
+ {
+ instrument_nonnull_return (&gsi);
+ bb = gimple_bb (stmt);
+ }
+
+ if (flag_sanitize & SANITIZE_OBJECT_SIZE)
+ {
+ if (gimple_store_p (stmt))
+ instrument_object_size (&gsi, true);
+ if (gimple_assign_load_p (stmt))
+ instrument_object_size (&gsi, false);
+ }
+
+ gsi_next (&gsi);
+ }
+ }
+ return 0;
+}
+
} // anon namespace
gimple_opt_pass *
diff --git a/gcc/ubsan.h b/gcc/ubsan.h
index 67cc6e915e3..dcdbb4fa4c9 100644
--- a/gcc/ubsan.h
+++ b/gcc/ubsan.h
@@ -27,23 +27,27 @@ enum ubsan_null_ckind {
UBSAN_STORE_OF,
UBSAN_REF_BINDING,
UBSAN_MEMBER_ACCESS,
- UBSAN_MEMBER_CALL
+ UBSAN_MEMBER_CALL,
+ UBSAN_CTOR_CALL
};
-/* An extra data used by ubsan pointer checking. */
-struct ubsan_mismatch_data {
- tree align;
- tree ckind;
+/* This controls how ubsan prints types. Used in ubsan_type_descriptor. */
+enum ubsan_print_style {
+ UBSAN_PRINT_NORMAL,
+ UBSAN_PRINT_POINTER,
+ UBSAN_PRINT_ARRAY
};
-extern void ubsan_expand_null_ifn (gimple_stmt_iterator);
-extern tree ubsan_instrument_unreachable (location_t);
-extern tree ubsan_create_data (const char *, const location_t *,
- const struct ubsan_mismatch_data *, ...);
-extern tree ubsan_type_descriptor (tree, bool);
+extern bool ubsan_expand_bounds_ifn (gimple_stmt_iterator *);
+extern bool ubsan_expand_null_ifn (gimple_stmt_iterator *);
+extern bool ubsan_expand_objsize_ifn (gimple_stmt_iterator *);
+extern bool ubsan_instrument_unreachable (gimple_stmt_iterator *);
+extern tree ubsan_create_data (const char *, int, const location_t *, ...);
+extern tree ubsan_type_descriptor (tree, enum ubsan_print_style = UBSAN_PRINT_NORMAL);
extern tree ubsan_encode_value (tree, bool = false);
extern bool is_ubsan_builtin_p (tree);
extern tree ubsan_build_overflow_builtin (tree_code, location_t, tree, tree, tree);
+extern tree ubsan_instrument_float_cast (location_t, tree, tree);
+extern tree ubsan_get_source_location_type (void);
#endif /* GCC_UBSAN_H */
-
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
new file mode 100644
index 00000000000..cbb7f273c2f
--- /dev/null
+++ b/gcc/wide-int.h
@@ -0,0 +1,3169 @@
+/* Operations with very long integers. -*- C++ -*-
+ Copyright (C) 2012-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef WIDE_INT_H
+#define WIDE_INT_H
+
+/* wide-int.[cc|h] implements a class that efficiently performs
+ mathematical operations on finite precision integers. wide_ints
+ are designed to be transient - they are not for long term storage
+ of values. There is tight integration between wide_ints and the
+ other longer storage GCC representations (rtl and tree).
+
+ The actual precision of a wide_int depends on the flavor. There
+ are three predefined flavors:
+
+ 1) wide_int (the default). This flavor does the math in the
+ precision of its input arguments. It is assumed (and checked)
+ that the precisions of the operands and results are consistent.
+ This is the most efficient flavor. It is not possible to examine
+ bits above the precision that has been specified. Because of
+ this, the default flavor has semantics that are simple to
+ understand and in general model the underlying hardware that the
+ compiler is targetted for.
+
+ This flavor must be used at the RTL level of gcc because there
+ is, in general, not enough information in the RTL representation
+ to extend a value beyond the precision specified in the mode.
+
+ This flavor should also be used at the TREE and GIMPLE levels of
+ the compiler except for the circumstances described in the
+ descriptions of the other two flavors.
+
+ The default wide_int representation does not contain any
+ information inherent about signedness of the represented value,
+ so it can be used to represent both signed and unsigned numbers.
+ For operations where the results depend on signedness (full width
+ multiply, division, shifts, comparisons, and operations that need
+ overflow detected), the signedness must be specified separately.
+
+ 2) offset_int. This is a fixed size representation that is
+ guaranteed to be large enough to compute any bit or byte sized
+ address calculation on the target. Currently the value is 64 + 4
+ bits rounded up to the next number even multiple of
+ HOST_BITS_PER_WIDE_INT (but this can be changed when the first
+ port needs more than 64 bits for the size of a pointer).
+
+ This flavor can be used for all address math on the target. In
+ this representation, the values are sign or zero extended based
+ on their input types to the internal precision. All math is done
+ in this precision and then the values are truncated to fit in the
+ result type. Unlike most gimple or rtl intermediate code, it is
+ not useful to perform the address arithmetic at the same
+ precision in which the operands are represented because there has
+ been no effort by the front ends to convert most addressing
+ arithmetic to canonical types.
+
+ 3) widest_int. This representation is an approximation of
+ infinite precision math. However, it is not really infinite
+ precision math as in the GMP library. It is really finite
+ precision math where the precision is 4 times the size of the
+ largest integer that the target port can represent.
+
+ widest_int is supposed to be wider than any number that it needs to
+ store, meaning that there is always at least one leading sign bit.
+ All widest_int values are therefore signed.
+
+ There are several places in the GCC where this should/must be used:
+
+ * Code that does induction variable optimizations. This code
+ works with induction variables of many different types at the
+ same time. Because of this, it ends up doing many different
+ calculations where the operands are not compatible types. The
+ widest_int makes this easy, because it provides a field where
+ nothing is lost when converting from any variable,
+
+ * There are a small number of passes that currently use the
+ widest_int that should use the default. These should be
+ changed.
+
+ There are surprising features of offset_int and widest_int
+ that the users should be careful about:
+
+ 1) Shifts and rotations are just weird. You have to specify a
+ precision in which the shift or rotate is to happen in. The bits
+ above this precision remain unchanged. While this is what you
+ want, it is clearly is non obvious.
+
+ 2) Larger precision math sometimes does not produce the same
+ answer as would be expected for doing the math at the proper
+ precision. In particular, a multiply followed by a divide will
+ produce a different answer if the first product is larger than
+ what can be represented in the input precision.
+
+ The offset_int and the widest_int flavors are more expensive
+ than the default wide int, so in addition to the caveats with these
+ two, the default is the prefered representation.
+
+ All three flavors of wide_int are represented as a vector of
+ HOST_WIDE_INTs. The default and widest_int vectors contain enough elements
+ to hold a value of MAX_BITSIZE_MODE_ANY_INT bits. offset_int contains only
+ enough elements to hold ADDR_MAX_PRECISION bits. The values are stored
+ in the vector with the least significant HOST_BITS_PER_WIDE_INT bits
+ in element 0.
+
+ The default wide_int contains three fields: the vector (VAL),
+ the precision and a length (LEN). The length is the number of HWIs
+ needed to represent the value. widest_int and offset_int have a
+ constant precision that cannot be changed, so they only store the
+ VAL and LEN fields.
+
+ Since most integers used in a compiler are small values, it is
+ generally profitable to use a representation of the value that is
+ as small as possible. LEN is used to indicate the number of
+ elements of the vector that are in use. The numbers are stored as
+ sign extended numbers as a means of compression. Leading
+ HOST_WIDE_INTs that contain strings of either -1 or 0 are removed
+ as long as they can be reconstructed from the top bit that is being
+ represented.
+
+ The precision and length of a wide_int are always greater than 0.
+ Any bits in a wide_int above the precision are sign-extended from the
+ most significant bit. For example, a 4-bit value 0x8 is represented as
+ VAL = { 0xf...fff8 }. However, as an optimization, we allow other integer
+ constants to be represented with undefined bits above the precision.
+ This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN,
+ so that the INTEGER_CST representation can be used both in TYPE_PRECISION
+ and in wider precisions.
+
+ There are constructors to create the various forms of wide_int from
+ trees, rtl and constants. For trees you can simply say:
+
+ tree t = ...;
+ wide_int x = t;
+
+ However, a little more syntax is required for rtl constants since
+ they do have an explicit precision. To make an rtl into a
+ wide_int, you have to pair it with a mode. The canonical way to do
+ this is with std::make_pair as in:
+
+ rtx r = ...
+ wide_int x = std::make_pair (r, mode);
+
+ Similarly, a wide_int can only be constructed from a host value if
+ the target precision is given explicitly, such as in:
+
+ wide_int x = wi::shwi (c, prec); // sign-extend X if necessary
+ wide_int y = wi::uhwi (c, prec); // zero-extend X if necessary
+
+ However, offset_int and widest_int have an inherent precision and so
+ can be initialized directly from a host value:
+
+ offset_int x = (int) c; // sign-extend C
+ widest_int x = (unsigned int) c; // zero-extend C
+
+ It is also possible to do arithmetic directly on trees, rtxes and
+ constants. For example:
+
+ wi::add (t1, t2); // add equal-sized INTEGER_CSTs t1 and t2
+ wi::add (t1, 1); // add 1 to INTEGER_CST t1
+ wi::add (r1, r2); // add equal-sized rtx constants r1 and r2
+ wi::lshift (1, 100); // 1 << 100 as a widest_int
+
+ Many binary operations place restrictions on the combinations of inputs,
+ using the following rules:
+
+ - {tree, rtx, wide_int} op {tree, rtx, wide_int} -> wide_int
+ The inputs must be the same precision. The result is a wide_int
+ of the same precision
+
+ - {tree, rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
+ (un)signed HOST_WIDE_INT op {tree, rtx, wide_int} -> wide_int
+ The HOST_WIDE_INT is extended or truncated to the precision of
+ the other input. The result is a wide_int of the same precision
+ as that input.
+
+ - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int
+ The inputs are extended to widest_int precision and produce a
+ widest_int result.
+
+ - offset_int op offset_int -> offset_int
+ offset_int op (un)signed HOST_WIDE_INT -> offset_int
+ (un)signed HOST_WIDE_INT op offset_int -> offset_int
+
+ - widest_int op widest_int -> widest_int
+ widest_int op (un)signed HOST_WIDE_INT -> widest_int
+ (un)signed HOST_WIDE_INT op widest_int -> widest_int
+
+ Other combinations like:
+
+ - widest_int op offset_int and
+ - wide_int op offset_int
+
+ are not allowed. The inputs should instead be extended or truncated
+ so that they match.
+
+ The inputs to comparison functions like wi::eq_p and wi::lts_p
+ follow the same compatibility rules, although their return types
+ are different. Unary functions on X produce the same result as
+ a binary operation X + X. Shift functions X op Y also produce
+ the same result as X + X; the precision of the shift amount Y
+ can be arbitrarily different from X. */
+
+
+#include <utility>
+#include "system.h"
+#include "hwint.h"
+#include "signop.h"
+#include "insn-modes.h"
+
+#if 0
+#define DEBUG_WIDE_INT
+#endif
+
+/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
+ early examination of the target's mode file. The WIDE_INT_MAX_ELTS
+ can accomodate at least 1 more bit so that unsigned numbers of that
+ mode can be represented. Note that it is still possible to create
+ fixed_wide_ints that have precisions greater than
+ MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a
+ double-width multiplication result, for example. */
+#define WIDE_INT_MAX_ELTS \
+ ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT)
+
+#define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
+
+/* This is the max size of any pointer on any machine. It does not
+ seem to be as easy to sniff this out of the machine description as
+ it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
+ multiple address sizes and may have different address sizes for
+ different address spaces. However, currently the largest pointer
+ on any platform is 64 bits. When that changes, then it is likely
+ that a target hook should be defined so that targets can make this
+ value larger for those targets. */
+#define ADDR_MAX_BITSIZE 64
+
+/* This is the internal precision used when doing any address
+ arithmetic. The '4' is really 3 + 1. Three of the bits are for
+ the number of extra bits needed to do bit addresses and single bit is
+ allow everything to be signed without loosing any precision. Then
+ everything is rounded up to the next HWI for efficiency. */
+#define ADDR_MAX_PRECISION \
+ ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) & ~(HOST_BITS_PER_WIDE_INT - 1))
+
+/* The number of HWIs needed to store an offset_int. */
+#define OFFSET_INT_ELTS (ADDR_MAX_PRECISION / HOST_BITS_PER_WIDE_INT)
+
+/* The type of result produced by a binary operation on types T1 and T2.
+ Defined purely for brevity. */
+#define WI_BINARY_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::result_type
+
+/* The type of result produced by a unary operation on type T. */
+#define WI_UNARY_RESULT(T) \
+ typename wi::unary_traits <T>::result_type
+
+/* Define a variable RESULT to hold the result of a binary operation on
+ X and Y, which have types T1 and T2 respectively. Define VAR to
+ point to the blocks of RESULT. Once the user of the macro has
+ filled in VAR, it should call RESULT.set_len to set the number
+ of initialized blocks. */
+#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
+ WI_BINARY_RESULT (T1, T2) RESULT = \
+ wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
+
+/* Similar for the result of a unary operation on X, which has type T. */
+#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
+ WI_UNARY_RESULT (T) RESULT = \
+ wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
+
+template <typename T> struct generic_wide_int;
+template <int N> struct fixed_wide_int_storage;
+struct wide_int_storage;
+
+/* An N-bit integer. Until we can use typedef templates, use this instead. */
+#define FIXED_WIDE_INT(N) \
+ generic_wide_int < fixed_wide_int_storage <N> >
+
+typedef generic_wide_int <wide_int_storage> wide_int;
+typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
+typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
+
+template <bool SE>
+struct wide_int_ref_storage;
+
+typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
+
+/* This can be used instead of wide_int_ref if the referenced value is
+ known to have type T. It carries across properties of T's representation,
+ such as whether excess upper bits in a HWI are defined, and can therefore
+ help avoid redundant work.
+
+ The macro could be replaced with a template typedef, once we're able
+ to use those. */
+#define WIDE_INT_REF_FOR(T) \
+ generic_wide_int \
+ <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended> >
+
+namespace wi
+{
+ /* Classifies an integer based on its precision. */
+ enum precision_type {
+ /* The integer has both a precision and defined signedness. This allows
+ the integer to be converted to any width, since we know whether to fill
+ any extra bits with zeros or signs. */
+ FLEXIBLE_PRECISION,
+
+ /* The integer has a variable precision but no defined signedness. */
+ VAR_PRECISION,
+
+ /* The integer has a constant precision (known at GCC compile time)
+ but no defined signedness. */
+ CONST_PRECISION
+ };
+
+ /* This class, which has no default implementation, is expected to
+ provide the following members:
+
+ static const enum precision_type precision_type;
+ Classifies the type of T.
+
+ static const unsigned int precision;
+ Only defined if precision_type == CONST_PRECISION. Specifies the
+ precision of all integers of type T.
+
+ static const bool host_dependent_precision;
+ True if the precision of T depends (or can depend) on the host.
+
+ static unsigned int get_precision (const T &x)
+ Return the number of bits in X.
+
+ static wi::storage_ref *decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, const T &x)
+ Decompose X as a PRECISION-bit integer, returning the associated
+ wi::storage_ref. SCRATCH is available as scratch space if needed.
+ The routine should assert that PRECISION is acceptable. */
+ template <typename T> struct int_traits;
+
+ /* This class provides a single type, result_type, which specifies the
+ type of integer produced by a binary operation whose inputs have
+ types T1 and T2. The definition should be symmetric. */
+ template <typename T1, typename T2,
+ enum precision_type P1 = int_traits <T1>::precision_type,
+ enum precision_type P2 = int_traits <T2>::precision_type>
+ struct binary_traits;
+
+ /* The result of a unary operation on T is the same as the result of
+ a binary operation on two values of type T. */
+ template <typename T>
+ struct unary_traits : public binary_traits <T, T> {};
+
+ /* Specify the result type for each supported combination of binary
+ inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
+ mixed, in order to give stronger type checking. When both inputs
+ are CONST_PRECISION, they must have the same precision. */
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef widest_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T2>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+}
+
+/* Public functions for querying and operating on integers. */
+namespace wi
+{
+ template <typename T>
+ unsigned int get_precision (const T &);
+
+ template <typename T1, typename T2>
+ unsigned int get_binary_precision (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ void copy (T1 &, const T2 &);
+
+#define UNARY_PREDICATE \
+ template <typename T> bool
+#define UNARY_FUNCTION \
+ template <typename T> WI_UNARY_RESULT (T)
+#define BINARY_PREDICATE \
+ template <typename T1, typename T2> bool
+#define BINARY_FUNCTION \
+ template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2)
+#define SHIFT_FUNCTION \
+ template <typename T1, typename T2> WI_UNARY_RESULT (T1)
+
+ UNARY_PREDICATE fits_shwi_p (const T &);
+ UNARY_PREDICATE fits_uhwi_p (const T &);
+ UNARY_PREDICATE neg_p (const T &, signop = SIGNED);
+
+ template <typename T>
+ HOST_WIDE_INT sign_mask (const T &);
+
+ BINARY_PREDICATE eq_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ne_p (const T1 &, const T2 &);
+ BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE lts_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ltu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE le_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE les_p (const T1 &, const T2 &);
+ BINARY_PREDICATE leu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE gts_p (const T1 &, const T2 &);
+ BINARY_PREDICATE gtu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE ges_p (const T1 &, const T2 &);
+ BINARY_PREDICATE geu_p (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ int cmp (const T1 &, const T2 &, signop);
+
+ template <typename T1, typename T2>
+ int cmps (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ int cmpu (const T1 &, const T2 &);
+
+ UNARY_FUNCTION bit_not (const T &);
+ UNARY_FUNCTION neg (const T &);
+ UNARY_FUNCTION neg (const T &, bool *);
+ UNARY_FUNCTION abs (const T &);
+ UNARY_FUNCTION ext (const T &, unsigned int, signop);
+ UNARY_FUNCTION sext (const T &, unsigned int);
+ UNARY_FUNCTION zext (const T &, unsigned int);
+ UNARY_FUNCTION set_bit (const T &, unsigned int);
+
+ BINARY_FUNCTION min (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smin (const T1 &, const T2 &);
+ BINARY_FUNCTION umin (const T1 &, const T2 &);
+ BINARY_FUNCTION max (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smax (const T1 &, const T2 &);
+ BINARY_FUNCTION umax (const T1 &, const T2 &);
+
+ BINARY_FUNCTION bit_and (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_and_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION sub (const T1 &, const T2 &);
+ BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION mul (const T1 &, const T2 &);
+ BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION smul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION umul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &);
+ BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
+ BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0);
+
+ template <typename T1, typename T2>
+ bool multiple_of_p (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
+
+ SHIFT_FUNCTION lshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION lrshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION arshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn);
+ SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0);
+ SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0);
+
+#undef SHIFT_FUNCTION
+#undef BINARY_PREDICATE
+#undef BINARY_FUNCTION
+#undef UNARY_PREDICATE
+#undef UNARY_FUNCTION
+
+ bool only_sign_bit_p (const wide_int_ref &, unsigned int);
+ bool only_sign_bit_p (const wide_int_ref &);
+ int clz (const wide_int_ref &);
+ int clrsb (const wide_int_ref &);
+ int ctz (const wide_int_ref &);
+ int exact_log2 (const wide_int_ref &);
+ int floor_log2 (const wide_int_ref &);
+ int ffs (const wide_int_ref &);
+ int popcount (const wide_int_ref &);
+ int parity (const wide_int_ref &);
+
+ template <typename T>
+ unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int);
+
+ template <typename T>
+ unsigned int min_precision (const T &, signop);
+}
+
+namespace wi
+{
+ /* Contains the components of a decomposed integer for easy, direct
+ access. */
+ struct storage_ref
+ {
+ storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int);
+
+ const HOST_WIDE_INT *val;
+ unsigned int len;
+ unsigned int precision;
+
+ /* Provide enough trappings for this class to act as storage for
+ generic_wide_int. */
+ unsigned int get_len () const;
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ };
+}
+
+inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in,
+ unsigned int len_in,
+ unsigned int precision_in)
+ : val (val_in), len (len_in), precision (precision_in)
+{
+}
+
+inline unsigned int
+wi::storage_ref::get_len () const
+{
+ return len;
+}
+
+inline unsigned int
+wi::storage_ref::get_precision () const
+{
+ return precision;
+}
+
+inline const HOST_WIDE_INT *
+wi::storage_ref::get_val () const
+{
+ return val;
+}
+
+/* This class defines an integer type using the storage provided by the
+ template argument. The storage class must provide the following
+ functions:
+
+ unsigned int get_precision () const
+ Return the number of bits in the integer.
+
+ HOST_WIDE_INT *get_val () const
+ Return a pointer to the array of blocks that encodes the integer.
+
+ unsigned int get_len () const
+ Return the number of blocks in get_val (). If this is smaller
+ than the number of blocks implied by get_precision (), the
+ remaining blocks are sign extensions of block get_len () - 1.
+
+ Although not required by generic_wide_int itself, writable storage
+ classes can also provide the following functions:
+
+ HOST_WIDE_INT *write_val ()
+ Get a modifiable version of get_val ()
+
+ unsigned int set_len (unsigned int len)
+ Set the value returned by get_len () to LEN. */
+template <typename storage>
+class GTY(()) generic_wide_int : public storage
+{
+public:
+ generic_wide_int ();
+
+ template <typename T>
+ generic_wide_int (const T &);
+
+ template <typename T>
+ generic_wide_int (const T &, unsigned int);
+
+ /* Conversions. */
+ HOST_WIDE_INT to_shwi (unsigned int) const;
+ HOST_WIDE_INT to_shwi () const;
+ unsigned HOST_WIDE_INT to_uhwi (unsigned int) const;
+ unsigned HOST_WIDE_INT to_uhwi () const;
+ HOST_WIDE_INT to_short_addr () const;
+
+ /* Public accessors for the interior of a wide int. */
+ HOST_WIDE_INT sign_mask () const;
+ HOST_WIDE_INT elt (unsigned int) const;
+ unsigned HOST_WIDE_INT ulow () const;
+ unsigned HOST_WIDE_INT uhigh () const;
+ HOST_WIDE_INT slow () const;
+ HOST_WIDE_INT shigh () const;
+
+ template <typename T>
+ generic_wide_int &operator = (const T &);
+
+#define BINARY_PREDICATE(OP, F) \
+ template <typename T> \
+ bool OP (const T &c) const { return wi::F (*this, c); }
+
+#define UNARY_OPERATOR(OP, F) \
+ WI_UNARY_RESULT (generic_wide_int) OP () const { return wi::F (*this); }
+
+#define BINARY_OPERATOR(OP, F) \
+ template <typename T> \
+ WI_BINARY_RESULT (generic_wide_int, T) \
+ OP (const T &c) const { return wi::F (*this, c); }
+
+#define ASSIGNMENT_OPERATOR(OP, F) \
+ template <typename T> \
+ generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
+
+#define INCDEC_OPERATOR(OP, DELTA) \
+ generic_wide_int &OP () { *this += DELTA; return *this; }
+
+ UNARY_OPERATOR (operator ~, bit_not)
+ UNARY_OPERATOR (operator -, neg)
+ BINARY_PREDICATE (operator ==, eq_p)
+ BINARY_PREDICATE (operator !=, ne_p)
+ BINARY_OPERATOR (operator &, bit_and)
+ BINARY_OPERATOR (and_not, bit_and_not)
+ BINARY_OPERATOR (operator |, bit_or)
+ BINARY_OPERATOR (or_not, bit_or_not)
+ BINARY_OPERATOR (operator ^, bit_xor)
+ BINARY_OPERATOR (operator +, add)
+ BINARY_OPERATOR (operator -, sub)
+ BINARY_OPERATOR (operator *, mul)
+ ASSIGNMENT_OPERATOR (operator &=, bit_and)
+ ASSIGNMENT_OPERATOR (operator |=, bit_or)
+ ASSIGNMENT_OPERATOR (operator ^=, bit_xor)
+ ASSIGNMENT_OPERATOR (operator +=, add)
+ ASSIGNMENT_OPERATOR (operator -=, sub)
+ ASSIGNMENT_OPERATOR (operator *=, mul)
+ INCDEC_OPERATOR (operator ++, 1)
+ INCDEC_OPERATOR (operator --, -1)
+
+#undef BINARY_PREDICATE
+#undef UNARY_OPERATOR
+#undef BINARY_OPERATOR
+#undef ASSIGNMENT_OPERATOR
+#undef INCDEC_OPERATOR
+
+ char *dump (char *) const;
+
+ static const bool is_sign_extended
+ = wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
+};
+
+template <typename storage>
+inline generic_wide_int <storage>::generic_wide_int () {}
+
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x)
+ : storage (x)
+{
+}
+
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x,
+ unsigned int precision)
+ : storage (x, precision)
+{
+}
+
+/* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION.
+ If THIS does not fit in PRECISION, the information is lost. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_shwi (unsigned int precision) const
+{
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return sext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
+
+/* Return THIS as a signed HOST_WIDE_INT, in its natural precision. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_shwi () const
+{
+ if (is_sign_extended)
+ return this->get_val ()[0];
+ else
+ return to_shwi (this->get_precision ());
+}
+
+/* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from
+ PRECISION. If THIS does not fit in PRECISION, the information
+ is lost. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::to_uhwi (unsigned int precision) const
+{
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return zext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
+
+/* Return THIS as an signed HOST_WIDE_INT, in its natural precision. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::to_uhwi () const
+{
+ return to_uhwi (this->get_precision ());
+}
+
+/* TODO: The compiler is half converted from using HOST_WIDE_INT to
+ represent addresses to using offset_int to represent addresses.
+ We use to_short_addr at the interface from new code to old,
+ unconverted code. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_short_addr () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the implicit value of blocks above get_len (). */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::sign_mask () const
+{
+ unsigned int len = this->get_len ();
+ unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
+ if (!is_sign_extended)
+ {
+ unsigned int precision = this->get_precision ();
+ int excess = len * HOST_BITS_PER_WIDE_INT - precision;
+ if (excess > 0)
+ high <<= excess;
+ }
+ return (HOST_WIDE_INT) (high) < 0 ? -1 : 0;
+}
+
+/* Return the signed value of the least-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::slow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the signed value of the most-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::shigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return the unsigned value of the least-significant
+ explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::ulow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the unsigned value of the most-significant
+ explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::uhigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return block I, which might be implicitly or explicit encoded. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::elt (unsigned int i) const
+{
+ if (i >= this->get_len ())
+ return sign_mask ();
+ else
+ return this->get_val ()[i];
+}
+
+template <typename storage>
+template <typename T>
+generic_wide_int <storage> &
+generic_wide_int <storage>::operator = (const T &x)
+{
+ storage::operator = (x);
+ return *this;
+}
+
+namespace wi
+{
+ template <>
+ template <typename storage>
+ struct int_traits < generic_wide_int <storage> >
+ : public wi::int_traits <storage>
+ {
+ static unsigned int get_precision (const generic_wide_int <storage> &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const generic_wide_int <storage> &);
+ };
+}
+
+template <typename storage>
+inline unsigned int
+wi::int_traits < generic_wide_int <storage> >::
+get_precision (const generic_wide_int <storage> &x)
+{
+ return x.get_precision ();
+}
+
+template <typename storage>
+inline wi::storage_ref
+wi::int_traits < generic_wide_int <storage> >::
+decompose (HOST_WIDE_INT *, unsigned int precision,
+ const generic_wide_int <storage> &x)
+{
+ gcc_checking_assert (precision == x.get_precision ());
+ return wi::storage_ref (x.get_val (), x.get_len (), precision);
+}
+
+/* Provide the storage for a wide_int_ref. This acts like a read-only
+ wide_int, with the optimization that VAL is normally a pointer to
+ another integer's storage, so that no array copy is needed. */
+template <bool SE>
+struct wide_int_ref_storage : public wi::storage_ref
+{
+private:
+ /* Scratch space that can be used when decomposing the original integer.
+ It must live as long as this object. */
+ HOST_WIDE_INT scratch[2];
+
+public:
+ wide_int_ref_storage (const wi::storage_ref &);
+
+ template <typename T>
+ wide_int_ref_storage (const T &);
+
+ template <typename T>
+ wide_int_ref_storage (const T &, unsigned int);
+};
+
+/* Create a reference from an existing reference. */
+template <bool SE>
+inline wide_int_ref_storage <SE>::
+wide_int_ref_storage (const wi::storage_ref &x)
+ : storage_ref (x)
+{}
+
+/* Create a reference to integer X in its natural precision. Note
+ that the natural precision is host-dependent for primitive
+ types. */
+template <bool SE>
+template <typename T>
+inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x)
+ : storage_ref (wi::int_traits <T>::decompose (scratch,
+ wi::get_precision (x), x))
+{
+}
+
+/* Create a reference to integer X in precision PRECISION. */
+template <bool SE>
+template <typename T>
+inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x,
+ unsigned int precision)
+ : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
+{
+}
+
+namespace wi
+{
+ template <>
+ template <bool SE>
+ struct int_traits <wide_int_ref_storage <SE> >
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* wi::storage_ref can be a reference to a primitive type,
+ so this is the conservatively-correct setting. */
+ static const bool host_dependent_precision = true;
+ static const bool is_sign_extended = SE;
+ };
+}
+
+namespace wi
+{
+ unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ signop sgn);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool = true);
+}
+
+/* The storage used by wide_int. */
+class GTY(()) wide_int_storage
+{
+private:
+ HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
+ unsigned int len;
+ unsigned int precision;
+
+public:
+ wide_int_storage ();
+ template <typename T>
+ wide_int_storage (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ static wide_int from (const wide_int_ref &, unsigned int, signop);
+ static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
+ unsigned int, bool = true);
+ static wide_int create (unsigned int);
+
+ /* FIXME: target-dependent, so should disappear. */
+ wide_int bswap () const;
+};
+
+namespace wi
+{
+ template <>
+ struct int_traits <wide_int_storage>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* Guaranteed by a static assert in the wide_int_storage constructor. */
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ template <typename T1, typename T2>
+ static wide_int get_binary_result (const T1 &, const T2 &);
+ };
+}
+
+inline wide_int_storage::wide_int_storage () {}
+
+/* Initialize the storage from integer X, in its natural precision.
+ Note that we do not allow integers with host-dependent precision
+ to become wide_ints; wide_ints must always be logically independent
+ of the host. */
+template <typename T>
+inline wide_int_storage::wide_int_storage (const T &x)
+{
+ STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision);
+ WIDE_INT_REF_FOR (T) xi (x);
+ precision = xi.precision;
+ wi::copy (*this, xi);
+}
+
+inline unsigned int
+wide_int_storage::get_precision () const
+{
+ return precision;
+}
+
+inline const HOST_WIDE_INT *
+wide_int_storage::get_val () const
+{
+ return val;
+}
+
+inline unsigned int
+wide_int_storage::get_len () const
+{
+ return len;
+}
+
+inline HOST_WIDE_INT *
+wide_int_storage::write_val ()
+{
+ return val;
+}
+
+inline void
+wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
+{
+ len = l;
+ if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
+ val[len - 1] = sext_hwi (val[len - 1],
+ precision % HOST_BITS_PER_WIDE_INT);
+}
+
+/* Treat X as having signedness SGN and convert it to a PRECISION-bit
+ number. */
+inline wide_int
+wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
+ signop sgn)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, precision, sgn));
+ return result;
+}
+
+/* Create a wide_int from the explicit block encoding given by VAL and
+ LEN. PRECISION is the precision of the integer. NEED_CANON_P is
+ true if the encoding may have redundant trailing blocks. */
+inline wide_int
+wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
+ unsigned int precision, bool need_canon_p)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::from_array (result.write_val (), val, len, precision,
+ need_canon_p));
+ return result;
+}
+
+/* Return an uninitialized wide_int with precision PRECISION. */
+inline wide_int
+wide_int_storage::create (unsigned int precision)
+{
+ wide_int x;
+ x.precision = precision;
+ return x;
+}
+
+template <typename T1, typename T2>
+inline wide_int
+wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
+{
+ /* This shouldn't be used for two flexible-precision inputs. */
+ STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
+ || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
+ if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
+ return wide_int::create (wi::get_precision (y));
+ else
+ return wide_int::create (wi::get_precision (x));
+}
+
+/* The storage used by FIXED_WIDE_INT (N). */
+template <int N>
+class GTY(()) fixed_wide_int_storage
+{
+private:
+ HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT];
+ unsigned int len;
+
+public:
+ fixed_wide_int_storage ();
+ template <typename T>
+ fixed_wide_int_storage (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
+ static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
+ bool = true);
+};
+
+namespace wi
+{
+ template <>
+ template <int N>
+ struct int_traits < fixed_wide_int_storage <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static const unsigned int precision = N;
+ template <typename T1, typename T2>
+ static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
+ };
+}
+
+template <int N>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
+
+/* Initialize the storage from integer X, in precision N. */
+template <int N>
+template <typename T>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x)
+{
+ /* Check for type compatibility. We don't want to initialize a
+ fixed-width integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
+}
+
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_precision () const
+{
+ return N;
+}
+
+template <int N>
+inline const HOST_WIDE_INT *
+fixed_wide_int_storage <N>::get_val () const
+{
+ return val;
+}
+
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_len () const
+{
+ return len;
+}
+
+template <int N>
+inline HOST_WIDE_INT *
+fixed_wide_int_storage <N>::write_val ()
+{
+ return val;
+}
+
+template <int N>
+inline void
+fixed_wide_int_storage <N>::set_len (unsigned int l, bool)
+{
+ len = l;
+ /* There are no excess bits in val[len - 1]. */
+ STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
+}
+
+/* Treat X as having signedness SGN and convert it to an N-bit number. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
+{
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, N, sgn));
+ return result;
+}
+
+/* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by
+ VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
+ trailing blocks. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
+ unsigned int len,
+ bool need_canon_p)
+{
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::from_array (result.write_val (), val, len,
+ N, need_canon_p));
+ return result;
+}
+
+template <int N>
+template <typename T1, typename T2>
+inline FIXED_WIDE_INT (N)
+wi::int_traits < fixed_wide_int_storage <N> >::
+get_binary_result (const T1 &, const T2 &)
+{
+ return FIXED_WIDE_INT (N) ();
+}
+
+/* A reference to one element of a trailing_wide_ints structure. */
+class trailing_wide_int_storage
+{
+private:
+ /* The precision of the integer, which is a fixed property of the
+ parent trailing_wide_ints. */
+ unsigned int m_precision;
+
+ /* A pointer to the length field. */
+ unsigned char *m_len;
+
+ /* A pointer to the HWI array. There are enough elements to hold all
+ values of precision M_PRECISION. */
+ HOST_WIDE_INT *m_val;
+
+public:
+ trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_len () const;
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ template <typename T>
+ trailing_wide_int_storage &operator = (const T &);
+};
+
+typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int;
+
+/* trailing_wide_int behaves like a wide_int. */
+namespace wi
+{
+ template <>
+ struct int_traits <trailing_wide_int_storage>
+ : public int_traits <wide_int_storage> {};
+}
+
+/* An array of N wide_int-like objects that can be put at the end of
+ a variable-sized structure. Use extra_size to calculate how many
+ bytes beyond the sizeof need to be allocated. Use set_precision
+ to initialize the structure. */
+template <int N>
+class GTY(()) trailing_wide_ints
+{
+private:
+ /* The shared precision of each number. */
+ unsigned short m_precision;
+
+ /* The shared maximum length of each number. */
+ unsigned char m_max_len;
+
+ /* The current length of each number. */
+ unsigned char m_len[N];
+
+ /* The variable-length part of the structure, which always contains
+ at least one HWI. Element I starts at index I * M_MAX_LEN. */
+ HOST_WIDE_INT m_val[1];
+
+public:
+ void set_precision (unsigned int);
+ trailing_wide_int operator [] (unsigned int);
+ static size_t extra_size (unsigned int);
+};
+
+inline trailing_wide_int_storage::
+trailing_wide_int_storage (unsigned int precision, unsigned char *len,
+ HOST_WIDE_INT *val)
+ : m_precision (precision), m_len (len), m_val (val)
+{
+}
+
+inline unsigned int
+trailing_wide_int_storage::get_len () const
+{
+ return *m_len;
+}
+
+inline unsigned int
+trailing_wide_int_storage::get_precision () const
+{
+ return m_precision;
+}
+
+inline const HOST_WIDE_INT *
+trailing_wide_int_storage::get_val () const
+{
+ return m_val;
+}
+
+inline HOST_WIDE_INT *
+trailing_wide_int_storage::write_val ()
+{
+ return m_val;
+}
+
+inline void
+trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended)
+{
+ *m_len = len;
+ if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision)
+ m_val[len - 1] = sext_hwi (m_val[len - 1],
+ m_precision % HOST_BITS_PER_WIDE_INT);
+}
+
+template <typename T>
+inline trailing_wide_int_storage &
+trailing_wide_int_storage::operator = (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x, m_precision);
+ wi::copy (*this, xi);
+ return *this;
+}
+
+/* Initialize the structure and record that all elements have precision
+ PRECISION. */
+template <int N>
+inline void
+trailing_wide_ints <N>::set_precision (unsigned int precision)
+{
+ m_precision = precision;
+ m_max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+}
+
+/* Return a reference to element INDEX. */
+template <int N>
+inline trailing_wide_int
+trailing_wide_ints <N>::operator [] (unsigned int index)
+{
+ return trailing_wide_int_storage (m_precision, &m_len[index],
+ &m_val[index * m_max_len]);
+}
+
+/* Return how many extra bytes need to be added to the end of the structure
+ in order to handle N wide_ints of precision PRECISION. */
+template <int N>
+inline size_t
+trailing_wide_ints <N>::extra_size (unsigned int precision)
+{
+ unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+ return (N * max_len - 1) * sizeof (HOST_WIDE_INT);
+}
+
+/* This macro is used in structures that end with a trailing_wide_ints field
+ called FIELD. It declares get_NAME() and set_NAME() methods to access
+ element I of FIELD. */
+#define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I) \
+ trailing_wide_int get_##NAME () { return FIELD[I]; } \
+ template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; }
+
+namespace wi
+{
+ /* Implementation of int_traits for primitive integer types like "int". */
+ template <typename T, bool signed_p>
+ struct primitive_int_traits
+ {
+ static const enum precision_type precision_type = FLEXIBLE_PRECISION;
+ static const bool host_dependent_precision = true;
+ static const bool is_sign_extended = true;
+ static unsigned int get_precision (T);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
+ };
+}
+
+template <typename T, bool signed_p>
+inline unsigned int
+wi::primitive_int_traits <T, signed_p>::get_precision (T)
+{
+ return sizeof (T) * CHAR_BIT;
+}
+
+template <typename T, bool signed_p>
+inline wi::storage_ref
+wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, T x)
+{
+ scratch[0] = x;
+ if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+/* Allow primitive C types to be used in wi:: routines. */
+namespace wi
+{
+ template <>
+ struct int_traits <int>
+ : public primitive_int_traits <int, true> {};
+
+ template <>
+ struct int_traits <unsigned int>
+ : public primitive_int_traits <unsigned int, false> {};
+
+#if HOST_BITS_PER_INT != HOST_BITS_PER_WIDE_INT
+ template <>
+ struct int_traits <HOST_WIDE_INT>
+ : public primitive_int_traits <HOST_WIDE_INT, true> {};
+
+ template <>
+ struct int_traits <unsigned HOST_WIDE_INT>
+ : public primitive_int_traits <unsigned HOST_WIDE_INT, false> {};
+#endif
+}
+
+namespace wi
+{
+ /* Stores HWI-sized integer VAL, treating it as having signedness SGN
+ and precision PRECISION. */
+ struct hwi_with_prec
+ {
+ hwi_with_prec (HOST_WIDE_INT, unsigned int, signop);
+ HOST_WIDE_INT val;
+ unsigned int precision;
+ signop sgn;
+ };
+
+ hwi_with_prec shwi (HOST_WIDE_INT, unsigned int);
+ hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int);
+
+ hwi_with_prec minus_one (unsigned int);
+ hwi_with_prec zero (unsigned int);
+ hwi_with_prec one (unsigned int);
+ hwi_with_prec two (unsigned int);
+}
+
+inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p,
+ signop s)
+ : val (v), precision (p), sgn (s)
+{
+}
+
+/* Return a signed integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::shwi (HOST_WIDE_INT val, unsigned int precision)
+{
+ return hwi_with_prec (val, precision, SIGNED);
+}
+
+/* Return an unsigned integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision)
+{
+ return hwi_with_prec (val, precision, UNSIGNED);
+}
+
+/* Return a wide int of -1 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::minus_one (unsigned int precision)
+{
+ return wi::shwi (-1, precision);
+}
+
+/* Return a wide int of 0 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::zero (unsigned int precision)
+{
+ return wi::shwi (0, precision);
+}
+
+/* Return a wide int of 1 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::one (unsigned int precision)
+{
+ return wi::shwi (1, precision);
+}
+
+/* Return a wide int of 2 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::two (unsigned int precision)
+{
+ return wi::shwi (2, precision);
+}
+
+namespace wi
+{
+ template <>
+ struct int_traits <wi::hwi_with_prec>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* hwi_with_prec has an explicitly-given precision, rather than the
+ precision of HOST_WIDE_INT. */
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static unsigned int get_precision (const wi::hwi_with_prec &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const wi::hwi_with_prec &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x)
+{
+ return x.precision;
+}
+
+inline wi::storage_ref
+wi::int_traits <wi::hwi_with_prec>::
+decompose (HOST_WIDE_INT *scratch, unsigned int precision,
+ const wi::hwi_with_prec &x)
+{
+ gcc_checking_assert (precision == x.precision);
+ scratch[0] = x.val;
+ if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+/* Private functions for handling large cases out of line. They take
+ individual length and array parameters because that is cheaper for
+ the inline caller than constructing an object on the stack and
+ passing a reference to it. (Although many callers use wide_int_refs,
+ we generally want those to be removed by SRA.) */
+namespace wi
+{
+ bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
+ unsigned int, unsigned int);
+ unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
+ unsigned int, unsigned int);
+ unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, bool *);
+ unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, bool *);
+ unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, signop, bool *,
+ bool);
+ unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
+ HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ signop, bool *);
+}
+
+/* Return the number of bits that integer X can hold. */
+template <typename T>
+inline unsigned int
+wi::get_precision (const T &x)
+{
+ return wi::int_traits <T>::get_precision (x);
+}
+
+/* Return the number of bits that the result of a binary operation can
+ hold when the input operands are X and Y. */
+template <typename T1, typename T2>
+inline unsigned int
+wi::get_binary_precision (const T1 &x, const T2 &y)
+{
+ return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
+ get_binary_result (x, y));
+}
+
+/* Copy the contents of Y to X, but keeping X's current precision. */
+template <typename T1, typename T2>
+inline void
+wi::copy (T1 &x, const T2 &y)
+{
+ HOST_WIDE_INT *xval = x.write_val ();
+ const HOST_WIDE_INT *yval = y.get_val ();
+ unsigned int len = y.get_len ();
+ unsigned int i = 0;
+ do
+ xval[i] = yval[i];
+ while (++i < len);
+ x.set_len (len, y.is_sign_extended);
+}
+
+/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
+template <typename T>
+inline bool
+wi::fits_shwi_p (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ return xi.len == 1;
+}
+
+/* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of
+ precision. */
+template <typename T>
+inline bool
+wi::fits_uhwi_p (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ if (xi.precision <= HOST_BITS_PER_WIDE_INT)
+ return true;
+ if (xi.len == 1)
+ return xi.slow () >= 0;
+ return xi.len == 2 && xi.uhigh () == 0;
+}
+
+/* Return true if X is negative based on the interpretation of SGN.
+ For UNSIGNED, this is always false. */
+template <typename T>
+inline bool
+wi::neg_p (const T &x, signop sgn)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ if (sgn == UNSIGNED)
+ return false;
+ return xi.sign_mask () < 0;
+}
+
+/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
+template <typename T>
+inline HOST_WIDE_INT
+wi::sign_mask (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ return xi.sign_mask ();
+}
+
+/* Return true if X == Y. X and Y must be binary-compatible. */
+template <typename T1, typename T2>
+inline bool
+wi::eq_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (xi.is_sign_extended && yi.is_sign_extended)
+ {
+ /* This case reduces to array equality. */
+ if (xi.len != yi.len)
+ return false;
+ unsigned int i = 0;
+ do
+ if (xi.val[i] != yi.val[i])
+ return false;
+ while (++i != xi.len);
+ return true;
+ }
+ if (__builtin_expect (yi.len == 1, true))
+ {
+ /* XI is only equal to YI if it too has a single HWI. */
+ if (xi.len != 1)
+ return false;
+ /* Excess bits in xi.val[0] will be signs or zeros, so comparisons
+ with 0 are simple. */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return xi.val[0] == 0;
+ /* Otherwise flush out any excess bits first. */
+ unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0];
+ int excess = HOST_BITS_PER_WIDE_INT - precision;
+ if (excess > 0)
+ diff <<= excess;
+ return diff == 0;
+ }
+ return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
+}
+
+/* Return true if X != Y. X and Y must be binary-compatible. */
+template <typename T1, typename T2>
+inline bool
+wi::ne_p (const T1 &x, const T2 &y)
+{
+ return !eq_p (x, y);
+}
+
+/* Return true if X < Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::lts_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* We optimize x < y, where y is 64 or fewer bits. */
+ if (wi::fits_shwi_p (yi))
+ {
+ /* Make lts_p (x, 0) as efficient as wi::neg_p (x). */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return neg_p (xi);
+ /* If x fits directly into a shwi, we can compare directly. */
+ if (wi::fits_shwi_p (xi))
+ return xi.to_shwi () < yi.to_shwi ();
+ /* If x doesn't fit and is negative, then it must be more
+ negative than any value in y, and hence smaller than y. */
+ if (neg_p (xi))
+ return true;
+ /* If x is positive, then it must be larger than any value in y,
+ and hence greater than y. */
+ return false;
+ }
+ /* Optimize the opposite case, if it can be detected at compile time. */
+ if (STATIC_CONSTANT_P (xi.len == 1))
+ /* If YI is negative it is lower than the least HWI.
+ If YI is positive it is greater than the greatest HWI. */
+ return !neg_p (yi);
+ return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return true if X < Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::ltu_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* Optimize comparisons with constants. */
+ if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
+ return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INT) yi.val[0];
+ if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
+ return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INT) xi.val[0];
+ /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
+ for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
+ values does not change the result. */
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl;
+ }
+ return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::lt_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return lts_p (x, y);
+ else
+ return ltu_p (x, y);
+}
+
+/* Return true if X <= Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::les_p (const T1 &x, const T2 &y)
+{
+ return !lts_p (y, x);
+}
+
+/* Return true if X <= Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::leu_p (const T1 &x, const T2 &y)
+{
+ return !ltu_p (y, x);
+}
+
+/* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::le_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return les_p (x, y);
+ else
+ return leu_p (x, y);
+}
+
+/* Return true if X > Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::gts_p (const T1 &x, const T2 &y)
+{
+ return lts_p (y, x);
+}
+
+/* Return true if X > Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::gtu_p (const T1 &x, const T2 &y)
+{
+ return ltu_p (y, x);
+}
+
+/* Return true if X > Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::gt_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return gts_p (x, y);
+ else
+ return gtu_p (x, y);
+}
+
+/* Return true if X >= Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::ges_p (const T1 &x, const T2 &y)
+{
+ return !lts_p (x, y);
+}
+
+/* Return true if X >= Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::geu_p (const T1 &x, const T2 &y)
+{
+ return !ltu_p (x, y);
+}
+
+/* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::ge_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return ges_p (x, y);
+ else
+ return geu_p (x, y);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as signed values. */
+template <typename T1, typename T2>
+inline int
+wi::cmps (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (wi::fits_shwi_p (yi))
+ {
+ /* Special case for comparisons with 0. */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0);
+ /* If x fits into a signed HWI, we can compare directly. */
+ if (wi::fits_shwi_p (xi))
+ {
+ HOST_WIDE_INT xl = xi.to_shwi ();
+ HOST_WIDE_INT yl = yi.to_shwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ /* If x doesn't fit and is negative, then it must be more
+ negative than any signed HWI, and hence smaller than y. */
+ if (neg_p (xi))
+ return -1;
+ /* If x is positive, then it must be larger than any signed HWI,
+ and hence greater than y. */
+ return 1;
+ }
+ /* Optimize the opposite case, if it can be detected at compile time. */
+ if (STATIC_CONSTANT_P (xi.len == 1))
+ /* If YI is negative it is lower than the least HWI.
+ If YI is positive it is greater than the greatest HWI. */
+ return neg_p (yi) ? 1 : -1;
+ return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as unsigned values. */
+template <typename T1, typename T2>
+inline int
+wi::cmpu (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* Optimize comparisons with constants. */
+ if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
+ {
+ /* If XI doesn't fit in a HWI then it must be larger than YI. */
+ if (xi.len != 1)
+ return 1;
+ /* Otherwise compare directly. */
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.val[0];
+ return xl < yl ? -1 : xl > yl;
+ }
+ if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
+ {
+ /* If YI doesn't fit in a HWI then it must be larger than XI. */
+ if (yi.len != 1)
+ return -1;
+ /* Otherwise compare directly. */
+ unsigned HOST_WIDE_INT xl = xi.val[0];
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
+ for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
+ values does not change the result. */
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of
+ X and Y indicated by SGN. */
+template <typename T1, typename T2>
+inline int
+wi::cmp (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return cmps (x, y);
+ else
+ return cmpu (x, y);
+}
+
+/* Return ~x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::bit_not (const T &x)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = ~xi.val[i];
+ result.set_len (xi.len);
+ return result;
+}
+
+/* Return -x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x)
+{
+ return sub (0, x);
+}
+
+/* Return -x. Indicate in *OVERFLOW if X is the minimum signed value. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x, bool *overflow)
+{
+ *overflow = only_sign_bit_p (x);
+ return sub (0, x);
+}
+
+/* Return the absolute value of x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::abs (const T &x)
+{
+ return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T) (x);
+}
+
+/* Return the result of sign-extending the low OFFSET bits of X. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::sext (const T &x, unsigned int offset)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ if (offset <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = sext_hwi (xi.ulow (), offset);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
+ return result;
+}
+
+/* Return the result of zero-extending the low OFFSET bits of X. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::zext (const T &x, unsigned int offset)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ /* This is not just an optimization, it is actually required to
+ maintain canonization. */
+ if (offset >= precision)
+ {
+ wi::copy (result, xi);
+ return result;
+ }
+
+ /* In these cases we know that at least the top bit will be clear,
+ so no sign extension is necessary. */
+ if (offset < HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = zext_hwi (xi.ulow (), offset);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true);
+ return result;
+}
+
+/* Return the result of extending the low OFFSET bits of X according to
+ signedness SGN. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::ext (const T &x, unsigned int offset, signop sgn)
+{
+ return sgn == SIGNED ? sext (x, offset) : zext (x, offset);
+}
+
+/* Return an integer that represents X | (1 << bit). */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::set_bit (const T &x, unsigned int bit)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () | ((unsigned HOST_WIDE_INT) 1 << bit);
+ result.set_len (1);
+ }
+ else
+ result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit));
+ return result;
+}
+
+/* Return the mininum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::min (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::le_p (x, y, sgn))
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
+ else
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
+ return result;
+}
+
+/* Return the minimum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smin (const T1 &x, const T2 &y)
+{
+ return min (x, y, SIGNED);
+}
+
+/* Return the minimum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umin (const T1 &x, const T2 &y)
+{
+ return min (x, y, UNSIGNED);
+}
+
+/* Return the maxinum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::max (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::ge_p (x, y, sgn))
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
+ else
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
+ return result;
+}
+
+/* Return the maximum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smax (const T1 &x, const T2 &y)
+{
+ return max (x, y, SIGNED);
+}
+
+/* Return the maximum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umax (const T1 &x, const T2 &y)
+{
+ return max (x, y, UNSIGNED);
+}
+
+/* Return X & Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () & yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X & ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () & ~yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X | Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () | yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (or_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision), is_sign_extended);
+ return result;
+}
+
+/* Return X | ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () | ~yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X ^ Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_xor (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () ^ yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (xor_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision), is_sign_extended);
+ return result;
+}
+
+/* Return X + Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () + yi.ulow ();
+ result.set_len (1);
+ }
+ /* If the precision is known at compile time to be greater than
+ HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
+ knowing that (a) all bits in those HWIs are significant and
+ (b) the result has room for at least two HWIs. This provides
+ a fast path for things like offset_int and widest_int.
+
+ The STATIC_CONSTANT_P test prevents this path from being
+ used for wide_ints. wide_ints with precisions greater than
+ HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
+ point handling them inline. */
+ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
+ && __builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl + yl;
+ val[0] = resultl;
+ val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
+ result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl))
+ >> (HOST_BITS_PER_WIDE_INT - 1)));
+ }
+ else
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ UNSIGNED, 0));
+ return result;
+}
+
+/* Return X + Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl + yl;
+ if (sgn == SIGNED)
+ *overflow = (((resultl ^ xl) & (resultl ^ yl))
+ >> (precision - 1)) & 1;
+ else
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ < (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ val[0] = resultl;
+ result.set_len (1);
+ }
+ else
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow));
+ return result;
+}
+
+/* Return X - Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () - yi.ulow ();
+ result.set_len (1);
+ }
+ /* If the precision is known at compile time to be greater than
+ HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
+ knowing that (a) all bits in those HWIs are significant and
+ (b) the result has room for at least two HWIs. This provides
+ a fast path for things like offset_int and widest_int.
+
+ The STATIC_CONSTANT_P test prevents this path from being
+ used for wide_ints. wide_ints with precisions greater than
+ HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
+ point handling them inline. */
+ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
+ && __builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl - yl;
+ val[0] = resultl;
+ val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
+ result.set_len (1 + (((resultl ^ xl) & (xl ^ yl))
+ >> (HOST_BITS_PER_WIDE_INT - 1)));
+ }
+ else
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ UNSIGNED, 0));
+ return result;
+}
+
+/* Return X - Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl - yl;
+ if (sgn == SIGNED)
+ *overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1;
+ else
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ val[0] = resultl;
+ result.set_len (1);
+ }
+ else
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow));
+ return result;
+}
+
+/* Return X * Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () * yi.ulow ();
+ result.set_len (1);
+ }
+ else
+ result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len,
+ precision, UNSIGNED, 0, false));
+ return result;
+}
+
+/* Return X * Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow, false));
+ return result;
+}
+
+/* Return X * Y, treating both X and Y as signed values. Indicate in
+ *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smul (const T1 &x, const T2 &y, bool *overflow)
+{
+ return mul (x, y, SIGNED, overflow);
+}
+
+/* Return X * Y, treating both X and Y as unsigned values. Indicate in
+ *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umul (const T1 &x, const T2 &y, bool *overflow)
+{
+ return mul (x, y, UNSIGNED, overflow);
+}
+
+/* Perform a widening multiplication of X and Y, extending the values
+ according to SGN, and return the high part of the result. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul_high (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, 0, true));
+ return result;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
+ precision,
+ yi.val, yi.len, yi.precision,
+ sgn, overflow));
+ return quotient;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_trunc (const T1 &x, const T2 &y)
+{
+ return div_trunc (x, y, SIGNED);
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_trunc (const T1 &x, const T2 &y)
+{
+ return div_trunc (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
+ return quotient - 1;
+ return quotient;
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_floor (const T1 &x, const T2 &y)
+{
+ return div_floor (x, y, SIGNED);
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and udiv_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_floor (const T1 &x, const T2 &y)
+{
+ return div_floor (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards +inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
+ return quotient + 1;
+ return quotient;
+}
+
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_ceil (const T1 &x, const T2 &y)
+{
+ return div_ceil (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards nearest with ties away from zero.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the result overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
+ {
+ if (sgn == SIGNED)
+ {
+ if (wi::ges_p (wi::abs (remainder),
+ wi::lrshift (wi::abs (y), 1)))
+ {
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
+ return quotient - 1;
+ else
+ return quotient + 1;
+ }
+ }
+ else
+ {
+ if (wi::geu_p (remainder, wi::lrshift (y, 1)))
+ return quotient + 1;
+ }
+ }
+ return quotient;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as having the
+ signedness given by SGN. Store the remainder in *REMAINDER_PTR. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *remainder_ptr)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, 0));
+ remainder.set_len (remainder_len);
+
+ *remainder_ptr = remainder;
+ return quotient;
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (remainder);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ divmod_internal (0, &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, overflow);
+ remainder.set_len (remainder_len);
+
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smod_trunc (const T1 &x, const T2 &y)
+{
+ return mod_trunc (x, y, SIGNED);
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_trunc (const T1 &x, const T2 &y)
+{
+ return mod_trunc (x, y, UNSIGNED);
+}
+
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
+ return remainder + y;
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and umod_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_floor (const T1 &x, const T2 &y)
+{
+ return mod_floor (x, y, UNSIGNED);
+}
+
+/* Compute X / Y, rouding towards +inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
+ return remainder - y;
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards nearest with ties away from zero,
+ and return the remainder. Treat X and Y as having the signedness
+ given by SGN. Indicate in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
+ {
+ if (sgn == SIGNED)
+ {
+ if (wi::ges_p (wi::abs (remainder),
+ wi::lrshift (wi::abs (y), 1)))
+ {
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
+ return remainder + y;
+ else
+ return remainder - y;
+ }
+ }
+ else
+ {
+ if (wi::geu_p (remainder, wi::lrshift (y, 1)))
+ return remainder - y;
+ }
+ }
+ return remainder;
+}
+
+/* Return true if X is a multiple of Y, storing X / Y in *RES if so.
+ Treat X and Y as having the signedness given by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *res)
+{
+ WI_BINARY_RESULT (T1, T2) remainder;
+ WI_BINARY_RESULT (T1, T2) quotient
+ = divmod_trunc (x, y, sgn, &remainder);
+ if (remainder == 0)
+ {
+ *res = quotient;
+ return true;
+ }
+ return false;
+}
+
+/* Return X << Y. Return 0 if Y is greater than or equal to
+ the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::lshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, precision))
+ {
+ val[0] = 0;
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ /* For fixed-precision integers like offset_int and widest_int,
+ handle the case where the shift value is constant and the
+ result is a single nonnegative HWI (meaning that we don't
+ need to worry about val[1]). This is particularly common
+ for converting a byte count to a bit count.
+
+ For variable-precision integers like wide_int, handle HWI
+ and sub-HWI integers inline. */
+ if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
+ ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1)
+ && xi.len == 1
+ && xi.val[0] <= (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT)
+ HOST_WIDE_INT_MAX >> shift))
+ : precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () << shift;
+ result.set_len (1);
+ }
+ else
+ result.set_len (lshift_large (val, xi.val, xi.len,
+ precision, shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using a logical shift. Return 0 if Y is greater than
+ or equal to the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::lrshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, xi.precision))
+ {
+ val[0] = 0;
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ /* For fixed-precision integers like offset_int and widest_int,
+ handle the case where the shift value is constant and the
+ shifted value is a single nonnegative HWI (meaning that all
+ bits above the HWI are zero). This is particularly common
+ for converting a bit count to a byte count.
+
+ For variable-precision integers like wide_int, handle HWI
+ and sub-HWI integers inline. */
+ if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
+ ? xi.len == 1 && xi.val[0] >= 0
+ : xi.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.to_uhwi () >> shift;
+ result.set_len (1);
+ }
+ else
+ result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using an arithmetic shift. Return a sign mask if
+ Y is greater than or equal to the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::arshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, xi.precision))
+ {
+ val[0] = sign_mask (x);
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ if (xi.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a
+ logical shift otherwise. If BITSIZE is nonzero, only use the low
+ BITSIZE bits of Y. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::rshift (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == UNSIGNED)
+ return lrshift (x, y);
+ else
+ return arshift (x, y);
+}
+
+/* Return the result of rotating the low WIDTH bits of X left by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
+template <typename T1, typename T2>
+WI_UNARY_RESULT (T1)
+wi::lrotate (const T1 &x, const T2 &y, unsigned int width)
+{
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod);
+ WI_UNARY_RESULT (T1) right = wi::lrshift (x, wi::sub (width, ymod));
+ if (width != precision)
+ return wi::zext (left, width) | wi::zext (right, width);
+ return left | right;
+}
+
+/* Return the result of rotating the low WIDTH bits of X right by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
+template <typename T1, typename T2>
+WI_UNARY_RESULT (T1)
+wi::rrotate (const T1 &x, const T2 &y, unsigned int width)
+{
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+ WI_UNARY_RESULT (T1) right = wi::lrshift (x, ymod);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod));
+ if (width != precision)
+ return wi::zext (left, width) | wi::zext (right, width);
+ return left | right;
+}
+
+/* Return 0 if the number of 1s in X is even and 1 if the number of 1s
+ is odd. */
+inline int
+wi::parity (const wide_int_ref &x)
+{
+ return popcount (x) & 1;
+}
+
+/* Extract WIDTH bits from X, starting at BITPOS. */
+template <typename T>
+inline unsigned HOST_WIDE_INT
+wi::extract_uhwi (const T &x, unsigned int bitpos,
+ unsigned int width)
+{
+ unsigned precision = get_precision (x);
+ if (precision < bitpos + width)
+ precision = bitpos + width;
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ /* Handle this rare case after the above, so that we assert about
+ bogus BITPOS values. */
+ if (width == 0)
+ return 0;
+
+ unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT;
+ unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT;
+ unsigned HOST_WIDE_INT res = xi.elt (start);
+ res >>= shift;
+ if (shift + width > HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT upper = xi.elt (start + 1);
+ res |= upper << (-shift % HOST_BITS_PER_WIDE_INT);
+ }
+ return zext_hwi (res, width);
+}
+
+/* Return the minimum precision needed to store X with sign SGN. */
+template <typename T>
+inline unsigned int
+wi::min_precision (const T &x, signop sgn)
+{
+ if (sgn == SIGNED)
+ return get_precision (x) - clrsb (x);
+ else
+ return get_precision (x) - clz (x);
+}
+
+template<typename T>
+void
+gt_ggc_mx (generic_wide_int <T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *, void (*) (void *, void *), void *)
+{
+}
+
+template<int N>
+void
+gt_ggc_mx (trailing_wide_ints <N> *)
+{
+}
+
+template<int N>
+void
+gt_pch_nx (trailing_wide_ints <N> *)
+{
+}
+
+template<int N>
+void
+gt_pch_nx (trailing_wide_ints <N> *, void (*) (void *, void *), void *)
+{
+}
+
+namespace wi
+{
+ /* Used for overloaded functions in which the only other acceptable
+ scalar type is a pointer. It stops a plain 0 from being treated
+ as a null pointer. */
+ struct never_used1 {};
+ struct never_used2 {};
+
+ wide_int min_value (unsigned int, signop);
+ wide_int min_value (never_used1 *);
+ wide_int min_value (never_used2 *);
+ wide_int max_value (unsigned int, signop);
+ wide_int max_value (never_used1 *);
+ wide_int max_value (never_used2 *);
+
+ /* FIXME: this is target dependent, so should be elsewhere.
+ It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */
+ wide_int from_buffer (const unsigned char *, unsigned int);
+
+#ifndef GENERATOR_FILE
+ void to_mpz (wide_int, mpz_t, signop);
+#endif
+
+ wide_int mask (unsigned int, bool, unsigned int);
+ wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int);
+ wide_int set_bit_in_zero (unsigned int, unsigned int);
+ wide_int insert (const wide_int &x, const wide_int &y, unsigned int,
+ unsigned int);
+
+ template <typename T>
+ T mask (unsigned int, bool);
+
+ template <typename T>
+ T shifted_mask (unsigned int, unsigned int, bool);
+
+ template <typename T>
+ T set_bit_in_zero (unsigned int);
+
+ unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int);
+ unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int,
+ bool, unsigned int);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool);
+}
+
+/* Return a PRECISION-bit integer in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
+inline wide_int
+wi::mask (unsigned int width, bool negate_p, unsigned int precision)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (mask (result.write_val (), width, negate_p, precision));
+ return result;
+}
+
+/* Return a PRECISION-bit integer in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear,
+ or the inverse if NEGATE_P. */
+inline wide_int
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
+ unsigned int precision)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ precision));
+ return result;
+}
+
+/* Return a PRECISION-bit integer in which bit BIT is set and all the
+ others are clear. */
+inline wide_int
+wi::set_bit_in_zero (unsigned int bit, unsigned int precision)
+{
+ return shifted_mask (bit, 1, false, precision);
+}
+
+/* Return an integer of type T in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
+template <typename T>
+inline T
+wi::mask (unsigned int width, bool negate_p)
+{
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (mask (result.write_val (), width, negate_p,
+ wi::int_traits <T>::precision));
+ return result;
+}
+
+/* Return an integer of type T in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear, or the
+ inverse if NEGATE_P. */
+template <typename T>
+inline T
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
+{
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (shifted_mask (result.write_val (), start, width,
+ negate_p,
+ wi::int_traits <T>::precision));
+ return result;
+}
+
+/* Return an integer of type T in which bit BIT is set and all the
+ others are clear. */
+template <typename T>
+inline T
+wi::set_bit_in_zero (unsigned int bit)
+{
+ return shifted_mask <T> (bit, 1, false);
+}
+
+#endif /* WIDE_INT_H */
diff --git a/include/ChangeLog b/include/ChangeLog
index 9ff64a0ed8a..61ed93df0dc 100644
--- a/include/ChangeLog
+++ b/include/ChangeLog
@@ -1,3 +1,12 @@
+2014-10-28 Yury Gribov <y.gribov@samsung.com>
+
+ * libiberty.h (strtol, strtoul, strtoll, strtoull): New prototypes.
+
+2014-09-26 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * libiberty.h (PEX_STDOUT_APPEND): New flag.
+ (PEX_STDERR_APPEND): Likewise.
+
2014-10-30 Release Manager
* GCC 4.9.2 released.
diff --git a/include/libiberty.h b/include/libiberty.h
index 78c42eb88f1..b0295e12870 100644
--- a/include/libiberty.h
+++ b/include/libiberty.h
@@ -442,6 +442,11 @@ extern struct pex_obj *pex_init (int flags, const char *pname,
on Unix. */
#define PEX_BINARY_ERROR 0x80
+/* Append stdout to existing file instead of truncating it. */
+#define PEX_STDOUT_APPEND 0x100
+
+/* Thes same as PEX_STDOUT_APPEND, but for STDERR. */
+#define PEX_STDERR_APPEND 0x200
/* Execute one program. Returns NULL on success. On error returns an
error string (typically just the name of a system call); the error
@@ -638,6 +643,33 @@ extern int vsnprintf (char *, size_t, const char *, va_list) ATTRIBUTE_PRINTF(3,
extern int strverscmp (const char *, const char *);
#endif
+#if defined(HAVE_DECL_STRTOL) && !HAVE_DECL_STRTOL
+extern long int strtol (const char *nptr,
+ char **endptr, int base);
+#endif
+
+#if defined(HAVE_DECL_STRTOUL) && !HAVE_DECL_STRTOUL
+extern unsigned long int strtoul (const char *nptr,
+ char **endptr, int base);
+#endif
+
+#if defined(HAVE_LONG_LONG) && defined(HAVE_DECL_STRTOLL) && !HAVE_DECL_STRTOLL
+__extension__
+extern long long int strtoll (const char *nptr,
+ char **endptr, int base);
+#endif
+
+#if defined(HAVE_LONG_LONG) && defined(HAVE_DECL_STRTOULL) && !HAVE_DECL_STRTOULL
+__extension__
+extern unsigned long long int strtoull (const char *nptr,
+ char **endptr, int base);
+#endif
+
+#if defined(HAVE_DECL_STRVERSCMP) && !HAVE_DECL_STRVERSCMP
+/* Compare version strings. */
+extern int strverscmp (const char *, const char *);
+#endif
+
/* Set the title of a process */
extern void setproctitle (const char *name, ...);
diff --git a/libbacktrace/ChangeLog b/libbacktrace/ChangeLog
index 90826b5b4d6..4c14bfea178 100644
--- a/libbacktrace/ChangeLog
+++ b/libbacktrace/ChangeLog
@@ -1,3 +1,10 @@
+2016-03-02 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * elf.c (backtrace_initialize): Properly initialize elf_fileline_fn to
+ avoid possible crash.
+ (elf_add): Don't set *fileline_fn to elf_nodebug value in case of
+ missing debug info anymore.
+
2015-01-26 Matthias Klose <doko@ubuntu.com>
* configure.ac: Move AM_ENABLE_MULTILIB before AC_PROG_CC.
diff --git a/libbacktrace/elf.c b/libbacktrace/elf.c
index e63aaf5dbdf..6fcf8b9be3d 100644
--- a/libbacktrace/elf.c
+++ b/libbacktrace/elf.c
@@ -791,7 +791,6 @@ elf_add (struct backtrace_state *state, int descriptor, uintptr_t base_address,
{
if (!backtrace_close (descriptor, error_callback, data))
goto fail;
- *fileline_fn = elf_nodebug;
return 1;
}
@@ -925,7 +924,7 @@ backtrace_initialize (struct backtrace_state *state, int descriptor,
int ret;
int found_sym;
int found_dwarf;
- fileline elf_fileline_fn;
+ fileline elf_fileline_fn = elf_nodebug;
struct phdr_data pd;
ret = elf_add (state, descriptor, 0, error_callback, data, &elf_fileline_fn,
diff --git a/libcpp/ChangeLog b/libcpp/ChangeLog
index 47dc9a962ac..df6e19199c5 100644
--- a/libcpp/ChangeLog
+++ b/libcpp/ChangeLog
@@ -1,3 +1,9 @@
+2014-09-24 Marek Polacek <polacek@redhat.com>
+
+ PR c/61405
+ PR c/53874
+ * include/cpplib.h (enum cpp_ttype): Define CPP_KEYWORD.
+
2014-11-28 Jakub Jelinek <jakub@redhat.com>
Backported from mainline
diff --git a/libcpp/include/cpplib.h b/libcpp/include/cpplib.h
index a9db7abbd26..bb12da0f71d 100644
--- a/libcpp/include/cpplib.h
+++ b/libcpp/include/cpplib.h
@@ -153,6 +153,9 @@ enum cpp_ttype
TTYPE_TABLE
N_TTYPES,
+ /* A token type for keywords, as opposed to ordinary identifiers. */
+ CPP_KEYWORD,
+
/* Positions in the table. */
CPP_LAST_EQ = CPP_LSHIFT,
CPP_FIRST_DIGRAPH = CPP_HASH,
diff --git a/libiberty/ChangeLog b/libiberty/ChangeLog
index 2c84de15ded..bcbb895ac59 100644
--- a/libiberty/ChangeLog
+++ b/libiberty/ChangeLog
@@ -1,3 +1,26 @@
+2014-10-28 Yury Gribov <y.gribov@samsung.com>
+
+ * strtoll.c: New file.
+ * strtoull.c: New file.
+ * configure.ac: Add long long checks. Add harness for strtoll and
+ strtoull. Check decls for strtol, strtoul, strtoll, strtoull.
+ * Makefile.in (CFILES, CONFIGURED_OFILES): Add strtoll and strtoull.
+ * config.in: Regenerate.
+ * configure: Regenerate.
+ * functions.texi: Regenerate.
+ * testsuite/Makefile.in (check-strtol): New rule.
+ (test-strtol): Likewise.
+ (mostlyclean): Clean up strtol test.
+ * testsuite/test-strtol.c: New test.
+
+2014-09-26 Max Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * pex-common.h (struct pex_funcs): Add new parameter for open_write field.
+ * pex-unix.c (pex_unix_open_write): Add support for new parameter.
+ * pex-djgpp.c (pex_djgpp_open_write): Likewise.
+ * pex-win32.c (pex_win32_open_write): Likewise.
+ * pex-common.c (pex_run_in_environment): Likewise.
+
2014-10-30 Release Manager
* GCC 4.9.2 released.
diff --git a/libiberty/Makefile.in b/libiberty/Makefile.in
index 75ff82d984a..8ce9de75021 100644
--- a/libiberty/Makefile.in
+++ b/libiberty/Makefile.in
@@ -151,8 +151,8 @@ CFILES = alloca.c argv.c asprintf.c atexit.c \
spaces.c splay-tree.c stack-limit.c stpcpy.c stpncpy.c \
strcasecmp.c strchr.c strdup.c strerror.c strncasecmp.c \
strncmp.c strrchr.c strsignal.c strstr.c strtod.c strtol.c \
- strtoul.c strndup.c strnlen.c strverscmp.c \
- timeval-utils.c tmpnam.c \
+ strtoll.c strtoul.c strtoull.c strndup.c strnlen.c \
+ strverscmp.c timeval-utils.c tmpnam.c \
unlink-if-ordinary.c \
vasprintf.c vfork.c vfprintf.c vprintf.c vsnprintf.c vsprintf.c \
waitpid.c \
@@ -218,8 +218,8 @@ CONFIGURED_OFILES = ./asprintf.$(objext) ./atexit.$(objext) \
./strchr.$(objext) ./strdup.$(objext) ./strncasecmp.$(objext) \
./strncmp.$(objext) ./strndup.$(objext) ./strnlen.$(objext) \
./strrchr.$(objext) ./strstr.$(objext) ./strtod.$(objext) \
- ./strtol.$(objext) ./strtoul.$(objext) ./strverscmp.$(objext) \
- ./tmpnam.$(objext) \
+ ./strtol.$(objext) ./strtoul.$(objext) strtoll.$(objext) \
+ ./strtoull.$(objext) ./tmpnam.$(objext) ./strverscmp.$(objext) \
./vasprintf.$(objext) ./vfork.$(objext) ./vfprintf.$(objext) \
./vprintf.$(objext) ./vsnprintf.$(objext) ./vsprintf.$(objext) \
./waitpid.$(objext)
@@ -1152,6 +1152,15 @@ $(CONFIGURED_OFILES): stamp-picdir
else true; fi
$(COMPILE.c) $(srcdir)/strtol.c $(OUTPUT_OPTION)
+./strtoll.$(objext): $(srcdir)/strtoll.c config.h $(INCDIR)/safe-ctype.h
+ if [ x"$(PICFLAG)" != x ]; then \
+ $(COMPILE.c) $(PICFLAG) $(srcdir)/strtoll.c -o pic/$@; \
+ else true; fi
+ if [ x"$(NOASANFLAG)" != x ]; then \
+ $(COMPILE.c) $(PICFLAG) $(NOASANFLAG) $(srcdir)/strtoll.c -o noasan/$@; \
+ else true; fi
+ $(COMPILE.c) $(srcdir)/strtoll.c $(OUTPUT_OPTION)
+
./strtoul.$(objext): $(srcdir)/strtoul.c config.h $(INCDIR)/ansidecl.h \
$(INCDIR)/safe-ctype.h
if [ x"$(PICFLAG)" != x ]; then \
@@ -1159,6 +1168,16 @@ $(CONFIGURED_OFILES): stamp-picdir
else true; fi
$(COMPILE.c) $(srcdir)/strtoul.c $(OUTPUT_OPTION)
+./strtoull.$(objext): $(srcdir)/strtoull.c config.h $(INCDIR)/ansidecl.h \
+ $(INCDIR)/safe-ctype.h
+ if [ x"$(PICFLAG)" != x ]; then \
+ $(COMPILE.c) $(PICFLAG) $(srcdir)/strtoull.c -o pic/$@; \
+ else true; fi
+ if [ x"$(NOASANFLAG)" != x ]; then \
+ $(COMPILE.c) $(PICFLAG) $(NOASANFLAG) $(srcdir)/strtoull.c -o noasan/$@; \
+ else true; fi
+ $(COMPILE.c) $(srcdir)/strtoull.c $(OUTPUT_OPTION)
+
./strverscmp.$(objext): $(srcdir)/strverscmp.c $(INCDIR)/ansidecl.h \
$(INCDIR)/libiberty.h $(INCDIR)/safe-ctype.h
if [ x"$(PICFLAG)" != x ]; then \
diff --git a/libiberty/config.in b/libiberty/config.in
index 1cf9c11b6ee..7c05b9d846e 100644
--- a/libiberty/config.in
+++ b/libiberty/config.in
@@ -79,6 +79,22 @@
don't. */
#undef HAVE_DECL_SNPRINTF
+/* Define to 1 if you have the declaration of `strtol', and to 0 if you don't.
+ */
+#undef HAVE_DECL_STRTOL
+
+/* Define to 1 if you have the declaration of `strtoll', and to 0 if you
+ don't. */
+#undef HAVE_DECL_STRTOLL
+
+/* Define to 1 if you have the declaration of `strtoul', and to 0 if you
+ don't. */
+#undef HAVE_DECL_STRTOUL
+
+/* Define to 1 if you have the declaration of `strtoull', and to 0 if you
+ don't. */
+#undef HAVE_DECL_STRTOULL
+
/* Define to 1 if you have the declaration of `strverscmp', and to 0 if you
don't. */
#undef HAVE_DECL_STRVERSCMP
@@ -136,6 +152,9 @@
/* Define to 1 if you have the <limits.h> header file. */
#undef HAVE_LIMITS_H
+/* Define if you have the `long long' type. */
+#undef HAVE_LONG_LONG
+
/* Define to 1 if you have the <machine/hal_sysinfo.h> header file. */
#undef HAVE_MACHINE_HAL_SYSINFO_H
@@ -280,9 +299,15 @@
/* Define to 1 if you have the `strtol' function. */
#undef HAVE_STRTOL
+/* Define to 1 if you have the `strtoll' function. */
+#undef HAVE_STRTOLL
+
/* Define to 1 if you have the `strtoul' function. */
#undef HAVE_STRTOUL
+/* Define to 1 if you have the `strtoull' function. */
+#undef HAVE_STRTOULL
+
/* Define to 1 if you have the `strverscmp' function. */
#undef HAVE_STRVERSCMP
@@ -439,6 +464,12 @@
/* The size of `int', as computed by sizeof. */
#undef SIZEOF_INT
+/* The size of `long', as computed by sizeof. */
+#undef SIZEOF_LONG
+
+/* The size of `long long', as computed by sizeof. */
+#undef SIZEOF_LONG_LONG
+
/* Define if you know the direction of stack growth for your system; otherwise
it will be automatically deduced at run-time. STACK_DIRECTION > 0 => grows
toward higher addresses STACK_DIRECTION < 0 => grows toward lower addresses
diff --git a/libiberty/configure b/libiberty/configure
index 7bde9b38f19..e9572870df5 100755
--- a/libiberty/configure
+++ b/libiberty/configure
@@ -5117,7 +5117,7 @@ $as_echo "#define NEED_DECLARATION_ERRNO 1" >>confdefs.h
fi
-# Determine the size of an int for struct fibnode.
+# Determine sizes of some types.
# The cast to long int works around a bug in the HP C Compiler
# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
@@ -5152,6 +5152,82 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5
+$as_echo_n "checking size of long... " >&6; }
+if test "${ac_cv_sizeof_long+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_long" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ as_fn_set_status 77
+as_fn_error "cannot compute sizeof (long)
+See \`config.log' for more details." "$LINENO" 5; }; }
+ else
+ ac_cv_sizeof_long=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5
+$as_echo "$ac_cv_sizeof_long" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG $ac_cv_sizeof_long
+_ACEOF
+
+
+
+# Check for presense of long long
+ac_fn_c_check_type "$LINENO" "long long" "ac_cv_type_long_long" "$ac_includes_default"
+if test "x$ac_cv_type_long_long" = x""yes; then :
+
+$as_echo "#define HAVE_LONG_LONG 1" >>confdefs.h
+ # The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5
+$as_echo_n "checking size of long long... " >&6; }
+if test "${ac_cv_sizeof_long_long+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_long_long" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ as_fn_set_status 77
+as_fn_error "cannot compute sizeof (long long)
+See \`config.log' for more details." "$LINENO" 5; }; }
+ else
+ ac_cv_sizeof_long_long=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long" >&5
+$as_echo "$ac_cv_sizeof_long_long" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG_LONG $ac_cv_sizeof_long_long
+_ACEOF
+
+
+fi
+
# Look for a 64-bit type.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a 64-bit type" >&5
@@ -5380,6 +5456,8 @@ funcs="$funcs strstr"
funcs="$funcs strtod"
funcs="$funcs strtol"
funcs="$funcs strtoul"
+funcs="$funcs strtoll"
+funcs="$funcs strtoull"
funcs="$funcs strverscmp"
funcs="$funcs tmpnam"
funcs="$funcs vasprintf"
@@ -5416,7 +5494,7 @@ if test "x" = "y"; then
sbrk setenv setproctitle setrlimit sigsetmask snprintf spawnve spawnvpe \
stpcpy stpncpy strcasecmp strchr strdup \
strerror strncasecmp strndup strnlen strrchr strsignal strstr strtod \
- strtol strtoul strverscmp sysconf sysctl sysmp \
+ strtol strtoul strtoll strtoull strverscmp sysconf sysctl sysmp \
table times tmpnam \
vasprintf vfprintf vprintf vsprintf \
wait3 wait4 waitpid
@@ -5492,6 +5570,46 @@ fi
cat >>confdefs.h <<_ACEOF
#define HAVE_DECL_VSNPRINTF $ac_have_decl
_ACEOF
+ac_fn_c_check_decl "$LINENO" "strtol" "ac_cv_have_decl_strtol" "$ac_includes_default"
+if test "x$ac_cv_have_decl_strtol" = x""yes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRTOL $ac_have_decl
+_ACEOF
+ac_fn_c_check_decl "$LINENO" "strtoul" "ac_cv_have_decl_strtoul" "$ac_includes_default"
+if test "x$ac_cv_have_decl_strtoul" = x""yes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRTOUL $ac_have_decl
+_ACEOF
+ac_fn_c_check_decl "$LINENO" "strtoll" "ac_cv_have_decl_strtoll" "$ac_includes_default"
+if test "x$ac_cv_have_decl_strtoll" = x""yes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRTOLL $ac_have_decl
+_ACEOF
+ac_fn_c_check_decl "$LINENO" "strtoull" "ac_cv_have_decl_strtoull" "$ac_includes_default"
+if test "x$ac_cv_have_decl_strtoull" = x""yes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_STRTOULL $ac_have_decl
+_ACEOF
$as_echo "#define HAVE_SYS_ERRLIST 1" >>confdefs.h
diff --git a/libiberty/configure.ac b/libiberty/configure.ac
index d6180bcddd9..7c3f1148a43 100644
--- a/libiberty/configure.ac
+++ b/libiberty/configure.ac
@@ -266,8 +266,14 @@ AC_HEADER_TIME
libiberty_AC_DECLARE_ERRNO
-# Determine the size of an int for struct fibnode.
+# Determine sizes of some types.
AC_CHECK_SIZEOF([int])
+AC_CHECK_SIZEOF([long])
+
+# Check for presense of long long
+AC_CHECK_TYPE([long long],
+ [AC_DEFINE(HAVE_LONG_LONG, 1, [Define if you have the `long long' type.]) AC_CHECK_SIZEOF([long long])],
+ [])
# Look for a 64-bit type.
AC_MSG_CHECKING([for a 64-bit type])
@@ -359,6 +365,8 @@ funcs="$funcs strstr"
funcs="$funcs strtod"
funcs="$funcs strtol"
funcs="$funcs strtoul"
+funcs="$funcs strtoll"
+funcs="$funcs strtoull"
funcs="$funcs strverscmp"
funcs="$funcs tmpnam"
funcs="$funcs vasprintf"
@@ -395,11 +403,11 @@ if test "x" = "y"; then
sbrk setenv setproctitle setrlimit sigsetmask snprintf spawnve spawnvpe \
stpcpy stpncpy strcasecmp strchr strdup \
strerror strncasecmp strndup strnlen strrchr strsignal strstr strtod \
- strtol strtoul strverscmp sysconf sysctl sysmp \
+ strtol strtoul strtoll strtoull strverscmp sysconf sysctl sysmp \
table times tmpnam \
vasprintf vfprintf vprintf vsprintf \
wait3 wait4 waitpid)
- AC_CHECK_DECLS([basename(char *), ffs, asprintf, vasprintf, snprintf, vsnprintf])
+ AC_CHECK_DECLS([basename(char *), ffs, asprintf, vasprintf, snprintf, vsnprintf, strtol, strtoul, strtoll, strtoull])
AC_DEFINE(HAVE_SYS_ERRLIST, 1, [Define if you have the sys_errlist variable.])
AC_DEFINE(HAVE_SYS_NERR, 1, [Define if you have the sys_nerr variable.])
AC_DEFINE(HAVE_SYS_SIGLIST, 1, [Define if you have the sys_siglist variable.])
diff --git a/libiberty/functions.texi b/libiberty/functions.texi
index 9323ff9f2e4..5d368f8335c 100644
--- a/libiberty/functions.texi
+++ b/libiberty/functions.texi
@@ -1715,6 +1715,24 @@ that the converted value is unsigned.
@end deftypefn
+@c strtoll.c:33
+@deftypefn Supplemental {long long int} strtoll (const char *@var{string}, @
+ char **@var{endptr}, int @var{base})
+@deftypefnx Supplemental {unsigned long long int} strtoul (@
+ const char *@var{string}, char **@var{endptr}, int @var{base})
+
+The @code{strtoll} function converts the string in @var{string} to a
+long long integer value according to the given @var{base}, which must be
+between 2 and 36 inclusive, or be the special value 0. If @var{base}
+is 0, @code{strtoll} will look for the prefixes @code{0} and @code{0x}
+to indicate bases 8 and 16, respectively, else default to base 10.
+When the base is 16 (either explicitly or implicitly), a prefix of
+@code{0x} is allowed. The handling of @var{endptr} is as that of
+@code{strtod} above. The @code{strtoull} function is the same, except
+that the converted value is unsigned.
+
+@end deftypefn
+
@c strsignal.c:502
@deftypefn Extension int strtosigno (const char *@var{name})
diff --git a/libiberty/pex-common.c b/libiberty/pex-common.c
index 6fd3fdecd80..146010a6fca 100644
--- a/libiberty/pex-common.c
+++ b/libiberty/pex-common.c
@@ -267,7 +267,8 @@ pex_run_in_environment (struct pex_obj *obj, int flags, const char *executable,
if (out < 0)
{
out = obj->funcs->open_write (obj, outname,
- (flags & PEX_BINARY_OUTPUT) != 0);
+ (flags & PEX_BINARY_OUTPUT) != 0,
+ (flags & PEX_STDOUT_APPEND) != 0);
if (out < 0)
{
*err = errno;
@@ -319,8 +320,9 @@ pex_run_in_environment (struct pex_obj *obj, int flags, const char *executable,
}
else
{
- errdes = obj->funcs->open_write (obj, errname,
- (flags & PEX_BINARY_ERROR) != 0);
+ errdes = obj->funcs->open_write (obj, errname,
+ (flags & PEX_BINARY_ERROR) != 0,
+ (flags & PEX_STDERR_APPEND) != 0);
if (errdes < 0)
{
*err = errno;
diff --git a/libiberty/pex-common.h b/libiberty/pex-common.h
index af338e6a34b..b6db248f692 100644
--- a/libiberty/pex-common.h
+++ b/libiberty/pex-common.h
@@ -104,7 +104,7 @@ struct pex_funcs
/* Open file NAME for writing. If BINARY is non-zero, open in
binary mode. Return >= 0 on success, -1 on error. */
int (*open_write) (struct pex_obj *, const char */* name */,
- int /* binary */);
+ int /* binary */, int /* append */);
/* Execute a child process. FLAGS, EXECUTABLE, ARGV, ERR are from
pex_run. IN, OUT, ERRDES, TOCLOSE are all descriptors, from
open_read, open_write, or pipe, or they are one of STDIN_FILE_NO,
diff --git a/libiberty/pex-djgpp.c b/libiberty/pex-djgpp.c
index 0721139954f..b014ffa3317 100644
--- a/libiberty/pex-djgpp.c
+++ b/libiberty/pex-djgpp.c
@@ -43,7 +43,7 @@ extern int errno;
#endif
static int pex_djgpp_open_read (struct pex_obj *, const char *, int);
-static int pex_djgpp_open_write (struct pex_obj *, const char *, int);
+static int pex_djgpp_open_write (struct pex_obj *, const char *, int, int);
static pid_t pex_djgpp_exec_child (struct pex_obj *, int, const char *,
char * const *, char * const *,
int, int, int, int,
@@ -90,10 +90,12 @@ pex_djgpp_open_read (struct pex_obj *obj ATTRIBUTE_UNUSED,
static int
pex_djgpp_open_write (struct pex_obj *obj ATTRIBUTE_UNUSED,
- const char *name, int binary)
+ const char *name, int binary, int append)
{
/* Note that we can't use O_EXCL here because gcc may have already
created the temporary file via make_temp_file. */
+ if (append)
+ return -1;
return open (name,
(O_WRONLY | O_CREAT | O_TRUNC
| (binary ? O_BINARY : O_TEXT)),
diff --git a/libiberty/pex-unix.c b/libiberty/pex-unix.c
index addf8ee23df..07151157478 100644
--- a/libiberty/pex-unix.c
+++ b/libiberty/pex-unix.c
@@ -301,7 +301,7 @@ pex_wait (struct pex_obj *obj, pid_t pid, int *status, struct pex_time *time)
static void pex_child_error (struct pex_obj *, const char *, const char *, int)
ATTRIBUTE_NORETURN;
static int pex_unix_open_read (struct pex_obj *, const char *, int);
-static int pex_unix_open_write (struct pex_obj *, const char *, int);
+static int pex_unix_open_write (struct pex_obj *, const char *, int, int);
static pid_t pex_unix_exec_child (struct pex_obj *, int, const char *,
char * const *, char * const *,
int, int, int, int,
@@ -350,11 +350,12 @@ pex_unix_open_read (struct pex_obj *obj ATTRIBUTE_UNUSED, const char *name,
static int
pex_unix_open_write (struct pex_obj *obj ATTRIBUTE_UNUSED, const char *name,
- int binary ATTRIBUTE_UNUSED)
+ int binary ATTRIBUTE_UNUSED, int append)
{
/* Note that we can't use O_EXCL here because gcc may have already
created the temporary file via make_temp_file. */
- return open (name, O_WRONLY | O_CREAT | O_TRUNC, PUBLIC_MODE);
+ return open (name, O_WRONLY | O_CREAT
+ | (append ? O_APPEND : O_TRUNC), PUBLIC_MODE);
}
/* Close a file. */
diff --git a/libiberty/pex-win32.c b/libiberty/pex-win32.c
index eae72c51dc0..3d88ae3cccf 100644
--- a/libiberty/pex-win32.c
+++ b/libiberty/pex-win32.c
@@ -78,7 +78,7 @@ backslashify (char *s)
}
static int pex_win32_open_read (struct pex_obj *, const char *, int);
-static int pex_win32_open_write (struct pex_obj *, const char *, int);
+static int pex_win32_open_write (struct pex_obj *, const char *, int, int);
static pid_t pex_win32_exec_child (struct pex_obj *, int, const char *,
char * const *, char * const *,
int, int, int, int,
@@ -126,10 +126,12 @@ pex_win32_open_read (struct pex_obj *obj ATTRIBUTE_UNUSED, const char *name,
static int
pex_win32_open_write (struct pex_obj *obj ATTRIBUTE_UNUSED, const char *name,
- int binary)
+ int binary, int append)
{
/* Note that we can't use O_EXCL here because gcc may have already
created the temporary file via make_temp_file. */
+ if (append)
+ return -1;
return _open (name,
(_O_WRONLY | _O_CREAT | _O_TRUNC
| (binary ? _O_BINARY : _O_TEXT)),
diff --git a/libiberty/strtoll.c b/libiberty/strtoll.c
new file mode 100644
index 00000000000..37ff8cd3d4c
--- /dev/null
+++ b/libiberty/strtoll.c
@@ -0,0 +1,175 @@
+/*-
+ * Copyright (c) 2014 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. [rescinded 22 July 1999]
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+
+@deftypefn Supplemental {long long int} strtoll (const char *@var{string}, @
+ char **@var{endptr}, int @var{base})
+@deftypefnx Supplemental {unsigned long long int} strtoul (@
+ const char *@var{string}, char **@var{endptr}, int @var{base})
+
+The @code{strtoll} function converts the string in @var{string} to a
+long long integer value according to the given @var{base}, which must be
+between 2 and 36 inclusive, or be the special value 0. If @var{base}
+is 0, @code{strtoll} will look for the prefixes @code{0} and @code{0x}
+to indicate bases 8 and 16, respectively, else default to base 10.
+When the base is 16 (either explicitly or implicitly), a prefix of
+@code{0x} is allowed. The handling of @var{endptr} is as that of
+@code{strtod} above. The @code{strtoull} function is the same, except
+that the converted value is unsigned.
+
+@end deftypefn
+
+*/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#ifdef HAVE_LIMITS_H
+#include <limits.h>
+#endif
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#include <errno.h>
+#ifdef NEED_DECLARATION_ERRNO
+extern int errno;
+#endif
+#include "safe-ctype.h"
+
+#ifdef HAVE_LONG_LONG
+
+__extension__
+typedef unsigned long long ullong_type;
+
+__extension__
+typedef long long llong_type;
+
+/* FIXME: It'd be nice to configure around these, but the include files are too
+ painful. These macros should at least be more portable than hardwired hex
+ constants. */
+
+#ifndef ULLONG_MAX
+#define ULLONG_MAX (~(ullong_type)0) /* 0xFFFFFFFFFFFFFFFF */
+#endif
+
+#ifndef LLONG_MAX
+#define LLONG_MAX ((llong_type)(ULLONG_MAX >> 1)) /* 0x7FFFFFFFFFFFFFFF */
+#endif
+
+#ifndef LLONG_MIN
+#define LLONG_MIN (~LLONG_MAX) /* 0x8000000000000000 */
+#endif
+
+/*
+ * Convert a string to a long long integer.
+ *
+ * Ignores `locale' stuff. Assumes that the upper and lower case
+ * alphabets and digits are each contiguous.
+ */
+llong_type
+strtoll(const char *nptr, char **endptr, register int base)
+{
+ register const char *s = nptr;
+ register ullong_type acc;
+ register int c;
+ register ullong_type cutoff;
+ register int neg = 0, any, cutlim;
+
+ /*
+ * Skip white space and pick up leading +/- sign if any.
+ * If base is 0, allow 0x for hex and 0 for octal, else
+ * assume decimal; if base is already 16, allow 0x.
+ */
+ do {
+ c = *s++;
+ } while (ISSPACE(c));
+ if (c == '-') {
+ neg = 1;
+ c = *s++;
+ } else if (c == '+')
+ c = *s++;
+ if ((base == 0 || base == 16) &&
+ c == '0' && (*s == 'x' || *s == 'X')) {
+ c = s[1];
+ s += 2;
+ base = 16;
+ }
+ if (base == 0)
+ base = c == '0' ? 8 : 10;
+
+ /*
+ * Compute the cutoff value between legal numbers and illegal
+ * numbers. That is the largest legal value, divided by the
+ * base. An input number that is greater than this value, if
+ * followed by a legal input character, is too big. One that
+ * is equal to this value may be valid or not; the limit
+ * between valid and invalid numbers is then based on the last
+ * digit. For instance, if the range for longs is
+ * [-2147483648..2147483647] and the input base is 10,
+ * cutoff will be set to 214748364 and cutlim to either
+ * 7 (neg==0) or 8 (neg==1), meaning that if we have accumulated
+ * a value > 214748364, or equal but the next digit is > 7 (or 8),
+ * the number is too big, and we will return a range error.
+ *
+ * Set any if any `digits' consumed; make it negative to indicate
+ * overflow.
+ */
+ cutoff = neg ? -(ullong_type)LLONG_MIN : LLONG_MAX;
+ cutlim = cutoff % (ullong_type)base;
+ cutoff /= (ullong_type)base;
+ for (acc = 0, any = 0;; c = *s++) {
+ if (ISDIGIT(c))
+ c -= '0';
+ else if (ISALPHA(c))
+ c -= ISUPPER(c) ? 'A' - 10 : 'a' - 10;
+ else
+ break;
+ if (c >= base)
+ break;
+ if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim))
+ any = -1;
+ else {
+ any = 1;
+ acc *= base;
+ acc += c;
+ }
+ }
+ if (any < 0) {
+ acc = neg ? LLONG_MIN : LLONG_MAX;
+ errno = ERANGE;
+ } else if (neg)
+ acc = -acc;
+ if (endptr != 0)
+ *endptr = (char *) (any ? s - 1 : nptr);
+ return (acc);
+}
+
+#endif /* ifdef HAVE_LONG_LONG */
diff --git a/libiberty/strtoull.c b/libiberty/strtoull.c
new file mode 100644
index 00000000000..2f580fb3454
--- /dev/null
+++ b/libiberty/strtoull.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2014 Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. [rescinded 22 July 1999]
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#ifdef HAVE_LIMITS_H
+#include <limits.h>
+#endif
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#include <errno.h>
+#ifdef NEED_DECLARATION_ERRNO
+extern int errno;
+#endif
+#if 0
+#include <stdlib.h>
+#endif
+#include "ansidecl.h"
+#include "safe-ctype.h"
+
+#ifdef HAVE_LONG_LONG
+
+__extension__
+typedef unsigned long long ullong_type;
+
+#ifndef ULLONG_MAX
+#define ULLONG_MAX (~(ullong_type)0) /* 0xFFFFFFFFFFFFFFFF */
+#endif
+
+/*
+ * Convert a string to an unsigned long long integer.
+ *
+ * Ignores `locale' stuff. Assumes that the upper and lower case
+ * alphabets and digits are each contiguous.
+ */
+ullong_type
+strtoull(const char *nptr, char **endptr, register int base)
+{
+ register const char *s = nptr;
+ register ullong_type acc;
+ register int c;
+ register ullong_type cutoff;
+ register int neg = 0, any, cutlim;
+
+ /*
+ * See strtol for comments as to the logic used.
+ */
+ do {
+ c = *s++;
+ } while (ISSPACE(c));
+ if (c == '-') {
+ neg = 1;
+ c = *s++;
+ } else if (c == '+')
+ c = *s++;
+ if ((base == 0 || base == 16) &&
+ c == '0' && (*s == 'x' || *s == 'X')) {
+ c = s[1];
+ s += 2;
+ base = 16;
+ }
+ if (base == 0)
+ base = c == '0' ? 8 : 10;
+ cutoff = (ullong_type)ULLONG_MAX / (ullong_type)base;
+ cutlim = (ullong_type)ULLONG_MAX % (ullong_type)base;
+ for (acc = 0, any = 0;; c = *s++) {
+ if (ISDIGIT(c))
+ c -= '0';
+ else if (ISALPHA(c))
+ c -= ISUPPER(c) ? 'A' - 10 : 'a' - 10;
+ else
+ break;
+ if (c >= base)
+ break;
+ if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim))
+ any = -1;
+ else {
+ any = 1;
+ acc *= base;
+ acc += c;
+ }
+ }
+ if (any < 0) {
+ acc = ULLONG_MAX;
+ errno = ERANGE;
+ } else if (neg)
+ acc = -acc;
+ if (endptr != 0)
+ *endptr = (char *) (any ? s - 1 : nptr);
+ return (acc);
+}
+
+#endif /* ifdef HAVE_LONG_LONG */
diff --git a/libiberty/testsuite/Makefile.in b/libiberty/testsuite/Makefile.in
index 69ac1f5105e..e5606990eec 100644
--- a/libiberty/testsuite/Makefile.in
+++ b/libiberty/testsuite/Makefile.in
@@ -45,7 +45,7 @@ all:
# CHECK is set to "really_check" or the empty string by configure.
check: @CHECK@
-really-check: check-cplus-dem check-pexecute check-expandargv
+really-check: check-cplus-dem check-pexecute check-expandargv check-strtol
# Run some tests of the demangler.
check-cplus-dem: test-demangle $(srcdir)/demangle-expected
@@ -59,6 +59,10 @@ check-pexecute: test-pexecute
check-expandargv: test-expandargv
./test-expandargv
+# Check the strtol functionality
+check-strtol: test-strtol
+ ./test-strtol
+
TEST_COMPILE = $(CC) @DEFS@ $(LIBCFLAGS) -I.. -I$(INCDIR) $(HDEFINES)
test-demangle: $(srcdir)/test-demangle.c ../libiberty.a
$(TEST_COMPILE) -o test-demangle \
@@ -72,6 +76,10 @@ test-expandargv: $(srcdir)/test-expandargv.c ../libiberty.a
$(TEST_COMPILE) -DHAVE_CONFIG_H -I.. -o test-expandargv \
$(srcdir)/test-expandargv.c ../libiberty.a
+test-strtol: $(srcdir)/test-strtol.c ../libiberty.a
+ $(TEST_COMPILE) -DHAVE_CONFIG_H -I.. -o test-strtol \
+ $(srcdir)/test-strtol.c ../libiberty.a
+
# Standard (either GNU or Cygnus) rules we don't use.
html install-html info install-info clean-info dvi pdf install-pdf \
install etags tags installcheck:
@@ -81,6 +89,7 @@ mostlyclean:
rm -f test-demangle
rm -f test-pexecute
rm -f test-expandargv
+ rm -f test-strtol
rm -f core
clean: mostlyclean
distclean: clean
diff --git a/libiberty/testsuite/test-strtol.c b/libiberty/testsuite/test-strtol.c
new file mode 100644
index 00000000000..96d6871c777
--- /dev/null
+++ b/libiberty/testsuite/test-strtol.c
@@ -0,0 +1,184 @@
+/* Test program for strtol family of funtions,
+ Copyright (C) 2014 Free Software Foundation, Inc.
+ Written by Yury Gribov <y.gribov@samsung.com>
+
+ This file is part of the libiberty library, which is part of GCC.
+
+ This file is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ In addition to the permissions in the GNU General Public License, the
+ Free Software Foundation gives you unlimited permission to link the
+ compiled version of this file into combinations with other programs,
+ and to distribute those combinations without any restriction coming
+ from the use of this file. (The General Public License restrictions
+ do apply in other respects; for example, they cover modification of
+ the file, and distribution when not linked into a combined
+ executable.)
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.
+*/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+#include "libiberty.h"
+#include <stdio.h>
+#include <errno.h>
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifndef EXIT_SUCCESS
+#define EXIT_SUCCESS 0
+#endif
+
+#ifndef EXIT_FAILURE
+#define EXIT_FAILURE 1
+#endif
+
+
+/* Test input data. */
+
+enum conversion_fun
+{
+ STRTOL,
+ STRTOLL,
+ STRTOUL,
+ STRTOULL,
+};
+
+#ifdef HAVE_LONG_LONG
+typedef unsigned long long integer_type;
+#else
+typedef unsigned long integer_type;
+#endif
+
+struct test_data_t
+{
+ enum conversion_fun fun;
+ const char *nptr;
+ int base;
+ integer_type res;
+ int errnum;
+};
+
+const struct test_data_t test_data[] = {
+ { STRTOL, "0x123", 0, 0x123L, 0 },
+ { STRTOL, "123", 0, 123L, 0 },
+ { STRTOL, "0123", 0, 0123L, 0 },
+ { STRTOL, "0x7FFFFFFF", 0, 0x7fffffffL, 0 },
+ { STRTOL, "-0x80000000", 0, -0x80000000L, 0 },
+ { STRTOUL, "0x123", 0, 0x123UL, 0 },
+ { STRTOUL, "123", 0, 123UL, 0 },
+ { STRTOUL, "0123", 0, 0123UL, 0 },
+ { STRTOUL, "0xFFFFFFFF", 0, 0xffffffffUL, 0 },
+#if SIZEOF_LONG == 4
+ { STRTOL, "0x80000000", 0, 0x7fffffffL, ERANGE },
+ { STRTOL, "-0x80000001", 0, -0x80000000L, ERANGE },
+ { STRTOUL, "0x100000000", 0, 0xffffffffUL, ERANGE },
+#endif
+#ifdef HAVE_LONG_LONG
+ { STRTOLL, "0x123", 0, 0x123LL, 0 },
+ { STRTOLL, "123", 0, 123LL, 0 },
+ { STRTOLL, "0123", 0, 0123LL, 0 },
+ { STRTOLL, "0x7FFFFFFFFFFFFFFF", 0, 0x7fffffffffffffffLL, 0 },
+ { STRTOLL, "-0x8000000000000000", 0, -0x8000000000000000LL, 0 },
+ { STRTOULL, "0x123", 0, 0x123ULL, 0 },
+ { STRTOULL, "123", 0, 123ULL, 0 },
+ { STRTOULL, "0123", 0, 0123ULL, 0 },
+ { STRTOULL, "0xFFFFFFFFFFFFFFFF", 0, 0xffffffffffffffffULL, 0 },
+#if SIZEOF_LONG_LONG == 8
+ { STRTOLL, "0x8000000000000000", 0, 0x7fffffffffffffffLL, ERANGE },
+ { STRTOLL, "-0x8000000000000001", 0, -0x8000000000000000LL, ERANGE },
+ { STRTOULL, "0x10000000000000000", 0, 0xffffffffffffffffULL, ERANGE },
+#endif
+#endif
+};
+
+/* run_tests:
+ Run conversion function
+ Compare results
+ Return number of fails */
+
+int
+run_tests (const struct test_data_t *test_data, size_t ntests)
+{
+ int fails = 0, failed;
+ size_t i;
+
+ for (i = 0; i < ntests; ++i)
+ {
+ integer_type res;
+ int saved_errno;
+
+ errno = 0;
+
+ switch (test_data[i].fun)
+ {
+ case STRTOL:
+ res = strtol (test_data[i].nptr, 0, test_data[i].base);
+ break;
+ case STRTOUL:
+ res = strtoul (test_data[i].nptr, 0, test_data[i].base);
+ break;
+#ifdef HAVE_LONG_LONG
+ case STRTOLL:
+ res = strtoll (test_data[i].nptr, 0, test_data[i].base);
+ break;
+ case STRTOULL:
+ res = strtoull (test_data[i].nptr, 0, test_data[i].base);
+ break;
+#endif
+ }
+
+ saved_errno = errno;
+
+ failed = 0;
+
+ /* Compare result */
+ if (res != test_data[i].res)
+ {
+ printf ("FAIL: test-strtol-%zd. Results don't match.\n", i);
+ failed++;
+ }
+
+ /* Compare errno */
+ if (saved_errno != test_data[i].errnum)
+ {
+ printf ("FAIL: test-strtol-%zd. Errnos don't match.\n", i);
+ failed++;
+ }
+
+ if (!failed)
+ printf ("PASS: test-strtol-%zd.\n", i);
+ else
+ fails++;
+ }
+
+ return fails;
+}
+
+int
+main(int argc, char **argv)
+{
+ int fails;
+ fails = run_tests (test_data, sizeof (test_data) / sizeof (test_data[0]));
+ exit (fails ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog
index ee4afbfa086..2b602ac68b9 100644
--- a/libsanitizer/ChangeLog
+++ b/libsanitizer/ChangeLog
@@ -1,3 +1,284 @@
+2016-01-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/68824
+ * tsan/tsan_interceptors.cc (NEED_TLS_GET_ADDR, __tls_get_addr,
+ InitializeInterceptors): Cherry pick upstream r258119.
+
+2015-11-24 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * include/system/linux/asm/ptrace.h: Move to ...
+ * include/system/asm/ptrace.h: ... this.
+
+2015-11-24 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * include/system/linux/asm/ptrace.h: New header.
+
+2015-11-23 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * All source files: Merge from upstream r253555.
+ * configure.tgt: Enable LSan on aarch64-*-linux* targets. Add new
+ dependences for TSan for aarch64-*-linux* targets.
+ * tsan/Makefile.am: Add new source files.
+ * configure: Regenerate.
+ * tsan/Makefile.in: Likewise.
+
+2015-11-09 Alan Modra <amodra@gmail.com>
+
+ * sanitizer_common/sanitizer_common_interceptors.inc: Update size
+ params for _obstack_begin_1, _obstack_begin, _obstack_newchunk
+ interceptors.
+ * configure.ac: Substitute OBSTACK_DEFS.
+ * asan/Makefile.am: Add OBSTACK_DEFS to DEFS.
+ * tsan/Makefile.am: Likewise.
+ * configure: Regenerate.
+ * Makefile.in: Regenerate.
+ * asan/Makefile.in: Regenerate.
+ * interception/Makefile.in: Regenerate.
+ * libbacktrace/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Regenerate.
+
+2015-10-22 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR bootstrap/68041
+ * configure.ac (link_sanitizer_common): Link against librt only if it
+ contains shm_open, required by sanitizers.
+ (CXX_ABI_NEEDED): Remove variable.
+ * configure: Regenerate.
+ * ubsan/Makefile.am (libubsan_la_LIBADD): Do not add -lc++abi anymore.
+ * ubsan/Makefile.in: Regenerate.
+
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * HOWTO_MERGE: New file.
+
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * sanitizer_common/sanitizer_stacktrace.cc (GetCanonicFrame): Assume we
+ compiled code with GCC when extracting the caller PC for ARM if no
+ valid frame pointer is available.
+
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR bootstrap/63888
+ Reapply:
+ 2015-02-20 Jakub Jelinek <jakub@redhat.com>
+
+ * asan/asan_globals.cc (RegisterGlobal): Disable detect_odr_violation
+ support until it is rewritten upstream.
+
+ * c-c++-common/asan/pr63888.c: New test.
+
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ PR sanitizer/63958
+ Reapply:
+ 2014-10-14 David S. Miller <davem@davemloft.net>
+
+ * sanitizer_common/sanitizer_platform_limits_linux.cc (time_t):
+ Define at __kernel_time_t, as needed for sparc.
+ (struct __old_kernel_stat): Don't check if __sparc__ is defined.
+ * libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+ (__sanitizer): Define struct___old_kernel_stat_sz,
+ struct_kernel_stat_sz, and struct_kernel_stat64_sz for sparc.
+ (__sanitizer_ipc_perm): Adjust for sparc targets.
+ (__sanitizer_shmid_ds): Likewsie.
+ (__sanitizer_sigaction): Likewise.
+ (IOC_SIZE): Likewsie.
+
+2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
+
+ * All source files: Merge from upstream r250806.
+ * configure.ac (link_sanitizer_common): Add -lrt flag.
+ * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets.
+ Set USE_CXX_ABI_FLAG=true for darwin.
+ * asan/Makefile.am (asan_files): Add new files.
+ (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy
+ DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0.
+ * asan/Makefile.in: Regenerate.
+ * ubsan/Makefile.am (ubsan_files): Add new files.
+ (DEFS): Add DCAN_SANITIZE_UB=1.
+ (libubsan_la_LIBADD): Add -lc++abi if USE_CXX_ABI_FLAG is true.
+ * ubsan/Makefile.in: Regenerate.
+ * tsan/Makefile.am (tsan_files): Add new files.
+ (DEFS): Add DCAN_SANITIZE_UB=0.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * asan/libtool-version: Bump the libasan SONAME.
+
+2015-01-25 Venkataramanan Kumar <venkataramanan.kumar@linaro.org>
+
+ * configure.ac (TSAN_TARGET_DEPENDENT_OBJECTS): Define.
+ * configure: Regenerate.
+ * tsan/Makefile.am (EXTRA_libtsan_la_SOURCES): Define.
+ (libtsan_la_DEPENDENCIES): Likewise.
+ * Makefile.in: Regenerate.
+ * asan/Makefile.in: Regenerate.
+ * interception/Makefile.in: Regenerate.
+ * libbacktrace/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Regenerate.
+
+2015-02-20 Jakub Jelinek <jakub@redhat.com>
+
+ PR bootstrap/63888
+ * asan/asan_globals.cc (RegisterGlobal): Disable detect_odr_violation
+ support until it is rewritten upstream.
+
+2015-01-20 Jakub Jelinek <jakub@redhat.com>
+
+ PR sanitizer/64632
+ * ubsan/ubsan_type_hash.cc: Cherry pick upstream r224972.
+
+2015-04-13 Yury Gribov <y.gribov@samsung.com>
+
+ PR sanitizer/64839
+ * sanitizer_common/sanitizer_platform.h: Cherry pick
+ upstream r234470.
+ * sanitizer_common/sanitizer_platform_limits_posix.cc: Ditto.
+ * configure.ac (RPC_DEFS): Check for precense of RPC headers.
+ * sanitizer_common/Makefile.am (DEFS): Pass info to compiler.
+ * Makefile.in: Regenerate.
+ * asan/Makefile.in: Regenerate.
+ * config.h.in: Regenerate.
+ * configure: Regenerate.
+ * interception/Makefile.in: Regenerate.
+ * libbacktrace/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Regenerate.
+
+2014-11-21 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/61137
+ * config/ia64/ia64.c (ia64_attribute_takes_identifier_p): New function.
+ (TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P): Redefine to it.
+
+2014-11-14 Uros Bizjak <ubizjak@gmail.com>
+
+ * sanitizer_common/Makefile.am (AM_CXXFLAGS): Use -std=gnu++11.
+ * asan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * lsan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * interception/Makefile.am (AM_CXXFLAGS): Ditto.
+ * tsan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * libbacktrace/Makefile.am (AM_CXXFLAGS): Ditto.
+ * ubsan/Makefile.am (AM_CXXFLAGS): Ditto.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * asan/Makefile.in: Ditto.
+ * lsan/Makefile.in: Ditto.
+ * interception/Makefile.in: Ditto.
+ * tsan/Makefile.in: Ditto.
+ * libbacktrace/Makefile.in: Ditto.
+ * ubsan/Makefile.in: Ditto.
+
+2014-11-13 Kostya Serebryany <kcc@google.com>
+
+ * All source files: Merge from upstream r221802.
+ * sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
+ (LibbacktraceSymbolizer::SymbolizeData): Replace 'address'
+ with 'start' to follow the new interface.
+ * asan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * interception/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * libbacktrace/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * lsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new
+ files.
+ (AM_CXXFLAGS): Added -std=c++11.
+ * tsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * ubsan/Makefile.am (AM_CXXFLAGS): Added -std=c++11.
+ * asan/Makefile.in: Regenerate.
+ * interception/Makefile.in: Regenerate.
+ * libbacktrace/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Regenerate.
+
+2014-10-14 David S. Miller <davem@davemloft.net>
+
+ * sanitizer_common/sanitizer_platform_limits_linux.cc (time_t):
+ Define at __kernel_time_t, as needed for sparc.
+ (struct __old_kernel_stat): Don't check if __sparc__ is defined.
+ * libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+ (__sanitizer): Define struct___old_kernel_stat_sz,
+ struct_kernel_stat_sz, and struct_kernel_stat64_sz for sparc.
+ (__sanitizer_ipc_perm): Adjust for sparc targets.
+ (__sanitizer_shmid_ds): Likewsie.
+ (__sanitizer_sigaction): Likewsie.
+ (IOC_SIZE): Likewsie.
+
+2014-10-14 Jakub Jelinek <jakub@redhat.com>
+
+ * ubsan/Makefile.am (DEFS): Add -DPIC.
+ * ubsan/Makefile.in: Regenerated.
+
+2014-09-19 Kostya Serebryany <kcc@google.com>
+
+ * All source files: Merge from upstream r218156.
+ * asan/Makefile.am (asan_files): Added new files.
+ * asan/Makefile.in: Regenerate.
+ * ubsan/Makefile.am (ubsan_files): Added new files.
+ * ubsan/Makefile.in: Regenerate.
+ * tsan/Makefile.am (tsan_files): Added new files.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new
+ files.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * asan/libtool-version: Bump the libasan SONAME.
+
+2014-09-10 Jakub Jelinek <jakub@redhat.com>
+
+ * ubsan/ubsan_handlers.cc, ubsan/ubsan_handlers.h: Cherry pick
+ upstream r215485, r217389, r217391 and r217400.
+
+2014-06-23 Paolo Carlini <paolo.carlini@oracle.com>
+
+ * sanitizer_common/sanitizer_common_interceptors.inc:
+ Cherry pick upstream r211008.
+
+2014-06-11 Richard Biener <rguenther@suse.de>
+
+ * asan/asan_linux.cc: Cherry pick upstream r210012.
+
+2014-05-30 Jakub Jelinek <jakub@redhat.com>
+
+ * sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream
+ r209879.
+ * sanitizer_common/sanitizer_common.h: Likewise.
+ * asan/asan_mapping.h: Likewise.
+ * asan/asan_linux.cc: Likewise.
+ * tsan/tsan_mman.cc: Cherry pick upstream r209744.
+ * sanitizer_common/sanitizer_allocator.h: Likewise.
+
+2014-05-23 Marek Polacek <polacek@redhat.com>
+
+ * ubsan/ubsan_value.cc (getFloatValue): Handle 96-bit
+ floating-point types.
+
+2014-05-22 Kostya Serebryany <kcc@google.com>
+
+ * All source files: Merge from upstream r209283.
+ * asan/Makefile.am (asan_files): Added new files.
+ * asan/Makefile.in: Regenerate.
+ * tsan/Makefile.am (tsan_files): Added new files.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new
+ files.
+ * sanitizer_common/Makefile.in: Regenerate.
+
+2014-05-14 Yury Gribov <y.gribov@samsung.com>
+
+ PR sanitizer/61100
+ * Makefile.am (nodist_saninclude_HEADERS): Install
+ public headers.
+ * Makefile.in: Regenerate.
+
2015-01-27 Matthias Klose <doko@ubuntu.com>
* configure.ac: Move AM_ENABLE_MULTILIB before AC_PROG_CC.
diff --git a/libsanitizer/HOWTO_MERGE b/libsanitizer/HOWTO_MERGE
new file mode 100644
index 00000000000..d0eca40ec06
--- /dev/null
+++ b/libsanitizer/HOWTO_MERGE
@@ -0,0 +1,39 @@
+In general, merging process should not be very difficult, but we need to
+track various ABI changes and GCC-specific patches carefully. Here is a
+general list of actions required to perform the merge:
+
+* Checkout recent GCC tree.
+* Run merge.sh script from libsanitizer directory.
+* Modify Makefile.am files into asan/tsan/lsan/ubsan/sanitizer_common/interception
+ directories if needed. In particular, you may need to add new source files
+ and remove old ones in source files list, add new flags to {C, CXX}FLAGS if
+ needed and update DEFS with new defined variables. You can find these changes
+ in corresponding CMakeLists.txt and config-ix.cmake files from compiler-rt source
+ directory.
+* Apply all needed GCC-specific patches to libsanitizer (note that some of
+ them might be already included to upstream).
+* Apply all necessary compiler changes. Be especially careful here, you must
+ not break ABI between compiler and library. You can reveal these changes by
+ inspecting the history of AddressSanitizer.cpp and ThreadSanitizer.cpp files
+ from LLVM source tree.
+* Update ASan testsuite with corresponding tests from lib/asan/tests directory.
+ Not all tests can be migrated easily, so you don't need them all to be adapted.
+* Modify configure.ac file if needed (e.g. if you need to add link against new
+ library for sanitizer lilbs).
+* Add new target platforms in configure.tgt script if needed.
+* Bump SONAME for sanitizer libraries in asan/tsan/ubsan libtool-version files
+ if ABI has changed.
+* Regenerate configure script and all Makefiles by autoreconf. You should use
+ exactly the same autoconf and automake versions as for other GCC directories (current
+ versions are written in Makefile.in and configure files).
+* Run regression testing on at least three platforms (e.g. x86-linux-gnu, x86_64-linux-gnu,
+ aarch64-linux-gnu, arm-linux-gnueabi).
+* Run {A, UB}San bootstrap on at least three platforms.
+* Compare ABI of corresponding libclang_rt-asan and newly build libasan libraries.
+ You can use a pretty good libabigail tool (https://sourceware.org/libabigail/index.html)
+ to perform such a comparision. Note, that the list of exported symbols may differ,
+ e.g. because libasan currently does not include UBSan runtime.
+* Split your changes into logical parts (e.g. raw merge, compiler changes, GCC-specific changes
+ in libasan, configure/Makefile changes). The review process has O(N^2) complexity, so you
+ would simplify and probably speed up the review process by doing this.
+* Send your patches for review to GCC Patches Mailing List (gcc-patches@gcc.gnu.org).
diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE
index 4688f0c8693..dfd606aa0c6 100644
--- a/libsanitizer/MERGE
+++ b/libsanitizer/MERGE
@@ -1,4 +1,4 @@
-196489
+253555
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
diff --git a/libsanitizer/Makefile.in b/libsanitizer/Makefile.in
index 0b8924554dc..27b8758d64c 100644
--- a/libsanitizer/Makefile.in
+++ b/libsanitizer/Makefile.in
@@ -170,6 +170,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -181,10 +182,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
diff --git a/libsanitizer/asan/Makefile.am b/libsanitizer/asan/Makefile.am
index 3f07a834c69..02db7752089 100644
--- a/libsanitizer/asan/Makefile.am
+++ b/libsanitizer/asan/Makefile.am
@@ -3,21 +3,24 @@ AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir)
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 -DASAN_NEEDS_SEGV=1
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 -DCAN_SANITIZE_UB=0 @OBSTACK_DEFS@
if USING_MAC_INTERPOSE
DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT
endif
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
- asan_allocator2.cc \
- asan_dll_thunk.cc \
+ asan_activation.cc \
+ asan_allocator.cc \
+ asan_debugging.cc \
asan_fake_stack.cc \
+ asan_flags.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_linux.cc \
@@ -32,8 +35,11 @@ asan_files = \
asan_rtl.cc \
asan_stack.cc \
asan_stats.cc \
+ asan_suppressions.cc \
asan_thread.cc \
- asan_win.cc
+ asan_win.cc \
+ asan_win_dll_thunk.cc \
+ asan_win_dynamic_runtime_thunk.cc
libasan_la_SOURCES = $(asan_files)
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/lsan/libsanitizer_lsan.la
diff --git a/libsanitizer/asan/Makefile.in b/libsanitizer/asan/Makefile.in
index 273eb4b2645..de7e78d85cf 100644
--- a/libsanitizer/asan/Makefile.in
+++ b/libsanitizer/asan/Makefile.in
@@ -88,12 +88,14 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
-am__objects_1 = asan_allocator2.lo asan_dll_thunk.lo \
- asan_fake_stack.lo asan_globals.lo asan_interceptors.lo \
- asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
- asan_malloc_mac.lo asan_malloc_win.lo asan_new_delete.lo \
- asan_poisoning.lo asan_posix.lo asan_report.lo asan_rtl.lo \
- asan_stack.lo asan_stats.lo asan_thread.lo asan_win.lo
+am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
+ asan_fake_stack.lo asan_flags.lo asan_globals.lo \
+ asan_interceptors.lo asan_linux.lo asan_mac.lo \
+ asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
+ asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
+ asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
+ asan_suppressions.lo asan_thread.lo asan_win.lo \
+ asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -142,8 +144,8 @@ CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS \
-D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS \
- -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 \
- -DASAN_NEEDS_SEGV=1 $(am__append_1)
+ -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 \
+ -DCAN_SANITIZE_UB=0 @OBSTACK_DEFS@ $(am__append_1)
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
@@ -177,6 +179,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -188,10 +191,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -267,14 +272,17 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros \
+ $(LIBSTDCXX_RAW_CXX_CXXFLAGS) -std=gnu++11
ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
- asan_allocator2.cc \
- asan_dll_thunk.cc \
+ asan_activation.cc \
+ asan_allocator.cc \
+ asan_debugging.cc \
asan_fake_stack.cc \
+ asan_flags.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_linux.cc \
@@ -289,8 +297,11 @@ asan_files = \
asan_rtl.cc \
asan_stack.cc \
asan_stats.cc \
+ asan_suppressions.cc \
asan_thread.cc \
- asan_win.cc
+ asan_win.cc \
+ asan_win_dll_thunk.cc \
+ asan_win_dynamic_runtime_thunk.cc
libasan_la_SOURCES = $(asan_files)
libasan_la_LIBADD = \
@@ -412,9 +423,11 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_linux.Plo@am__quote@
@@ -429,8 +442,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dynamic_runtime_thunk.Plo@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
diff --git a/libsanitizer/asan/asan_activation.cc b/libsanitizer/asan/asan_activation.cc
new file mode 100644
index 00000000000..58867956086
--- /dev/null
+++ b/libsanitizer/asan/asan_activation.cc
@@ -0,0 +1,142 @@
+//===-- asan_activation.cc --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_allocator.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "asan_poisoning.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __asan {
+
+static struct AsanDeactivatedFlags {
+ AllocatorOptions allocator_options;
+ int malloc_context_size;
+ bool poison_heap;
+ bool coverage;
+ const char *coverage_dir;
+
+ void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
+#define ASAN_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &f->Name);
+#define COMMON_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &cf->Name);
+#include "asan_activation_flags.inc"
+#undef ASAN_ACTIVATION_FLAG
+#undef COMMON_ACTIVATION_FLAG
+
+ RegisterIncludeFlags(parser, cf);
+ }
+
+ void OverrideFromActivationFlags() {
+ Flags f;
+ CommonFlags cf;
+ FlagParser parser;
+ RegisterActivationFlags(&parser, &f, &cf);
+
+ // Copy the current activation flags.
+ allocator_options.CopyTo(&f, &cf);
+ cf.malloc_context_size = malloc_context_size;
+ f.poison_heap = poison_heap;
+ cf.coverage = coverage;
+ cf.coverage_dir = coverage_dir;
+ cf.verbosity = Verbosity();
+ cf.help = false; // this is activation-specific help
+
+ // Check if activation flags need to be overriden.
+ if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
+ parser.ParseString(env);
+ }
+
+ // Override from getprop asan.options.
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ parser.ParseString(buf);
+
+ SetVerbosity(cf.verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (cf.help) parser.PrintFlagDescriptions();
+
+ allocator_options.SetFrom(&f, &cf);
+ malloc_context_size = cf.malloc_context_size;
+ poison_heap = f.poison_heap;
+ coverage = cf.coverage;
+ coverage_dir = cf.coverage_dir;
+ }
+
+ void Print() {
+ Report(
+ "quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
+ "malloc_context_size %d, alloc_dealloc_mismatch %d, "
+ "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
+ allocator_options.quarantine_size_mb, allocator_options.max_redzone,
+ poison_heap, malloc_context_size,
+ allocator_options.alloc_dealloc_mismatch,
+ allocator_options.may_return_null, coverage, coverage_dir);
+ }
+} asan_deactivated_flags;
+
+static bool asan_is_deactivated;
+
+void AsanDeactivate() {
+ CHECK(!asan_is_deactivated);
+ VReport(1, "Deactivating ASan\n");
+
+ // Stash runtime state.
+ GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
+ asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
+ asan_deactivated_flags.poison_heap = CanPoisonMemory();
+ asan_deactivated_flags.coverage = common_flags()->coverage;
+ asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
+
+ // Deactivate the runtime.
+ SetCanPoisonMemory(false);
+ SetMallocContextSize(1);
+ ReInitializeCoverage(false, nullptr);
+
+ AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
+ disabled.quarantine_size_mb = 0;
+ disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
+ disabled.max_redzone = 16;
+ disabled.alloc_dealloc_mismatch = false;
+ disabled.may_return_null = true;
+ ReInitializeAllocator(disabled);
+
+ asan_is_deactivated = true;
+}
+
+void AsanActivate() {
+ if (!asan_is_deactivated) return;
+ VReport(1, "Activating ASan\n");
+
+ UpdateProcessName();
+
+ asan_deactivated_flags.OverrideFromActivationFlags();
+
+ SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
+ SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
+ ReInitializeCoverage(asan_deactivated_flags.coverage,
+ asan_deactivated_flags.coverage_dir);
+ ReInitializeAllocator(asan_deactivated_flags.allocator_options);
+
+ asan_is_deactivated = false;
+ if (Verbosity()) {
+ Report("Activated with flags:\n");
+ asan_deactivated_flags.Print();
+ }
+}
+
+} // namespace __asan
diff --git a/libsanitizer/asan/asan_activation.h b/libsanitizer/asan/asan_activation.h
new file mode 100644
index 00000000000..162a5ebcea9
--- /dev/null
+++ b/libsanitizer/asan/asan_activation.h
@@ -0,0 +1,21 @@
+//===-- asan_activation.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_ACTIVATION_H
+#define ASAN_ACTIVATION_H
+
+namespace __asan {
+void AsanDeactivate();
+void AsanActivate();
+} // namespace __asan
+
+#endif // ASAN_ACTIVATION_H
diff --git a/libsanitizer/asan/asan_activation_flags.inc b/libsanitizer/asan/asan_activation_flags.inc
new file mode 100644
index 00000000000..4bab38213c1
--- /dev/null
+++ b/libsanitizer/asan/asan_activation_flags.inc
@@ -0,0 +1,33 @@
+//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A subset of ASan (and common) runtime flags supported at activation time.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ACTIVATION_FLAG
+# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+#ifndef COMMON_ACTIVATION_FLAG
+# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+// ASAN_ACTIVATION_FLAG(Type, Name)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_ACTIVATION_FLAG(int, redzone)
+ASAN_ACTIVATION_FLAG(int, max_redzone)
+ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
+ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
+ASAN_ACTIVATION_FLAG(bool, poison_heap)
+
+COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
+COMMON_ACTIVATION_FLAG(int, malloc_context_size)
+COMMON_ACTIVATION_FLAG(bool, coverage)
+COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
+COMMON_ACTIVATION_FLAG(int, verbosity)
+COMMON_ACTIVATION_FLAG(bool, help)
diff --git a/libsanitizer/asan/asan_allocator.cc b/libsanitizer/asan/asan_allocator.cc
new file mode 100644
index 00000000000..187c405e118
--- /dev/null
+++ b/libsanitizer/asan/asan_allocator.cc
@@ -0,0 +1,906 @@
+//===-- asan_allocator.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Implementation of ASan's memory allocator, 2-nd version.
+// This variant uses the allocator from sanitizer_common, i.e. the one shared
+// with ThreadSanitizer and MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+#include "lsan/lsan_common.h"
+
+namespace __asan {
+
+// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
+// We use adaptive redzones: for larger allocation larger redzones are used.
+static u32 RZLog2Size(u32 rz_log) {
+ CHECK_LT(rz_log, 8);
+ return 16 << rz_log;
+}
+
+static u32 RZSize2Log(u32 rz_size) {
+ CHECK_GE(rz_size, 16);
+ CHECK_LE(rz_size, 2048);
+ CHECK(IsPowerOfTwo(rz_size));
+ u32 res = Log2(rz_size) - 4;
+ CHECK_EQ(rz_size, RZLog2Size(res));
+ return res;
+}
+
+static AsanAllocator &get_allocator();
+
+// The memory chunk allocated from the underlying allocator looks like this:
+// L L L L L L H H U U U U U U R R
+// L -- left redzone words (0 or more bytes)
+// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
+// U -- user memory.
+// R -- right redzone (0 or more bytes)
+// ChunkBase consists of ChunkHeader and other bytes that overlap with user
+// memory.
+
+// If the left redzone is greater than the ChunkHeader size we store a magic
+// value in the first uptr word of the memory block and store the address of
+// ChunkBase in the next uptr.
+// M B L L L L L L L L L H H U U U U U U
+// | ^
+// ---------------------|
+// M -- magic value kAllocBegMagic
+// B -- address of ChunkHeader pointing to the first 'H'
+static const uptr kAllocBegMagic = 0xCC6E96B9;
+
+struct ChunkHeader {
+ // 1-st 8 bytes.
+ u32 chunk_state : 8; // Must be first.
+ u32 alloc_tid : 24;
+
+ u32 free_tid : 24;
+ u32 from_memalign : 1;
+ u32 alloc_type : 2;
+ u32 rz_log : 3;
+ u32 lsan_tag : 2;
+ // 2-nd 8 bytes
+ // This field is used for small sizes. For large sizes it is equal to
+ // SizeClassMap::kMaxSize and the actual size is stored in the
+ // SecondaryAllocator's metadata.
+ u32 user_requested_size;
+ u32 alloc_context_id;
+};
+
+struct ChunkBase : ChunkHeader {
+ // Header2, intersects with user memory.
+ u32 free_context_id;
+};
+
+static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
+static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
+COMPILER_CHECK(kChunkHeaderSize == 16);
+COMPILER_CHECK(kChunkHeader2Size <= 16);
+
+// Every chunk of memory allocated by this allocator can be in one of 3 states:
+// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
+// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
+// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
+enum {
+ CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
+ CHUNK_ALLOCATED = 2,
+ CHUNK_QUARANTINE = 3
+};
+
+struct AsanChunk: ChunkBase {
+ uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
+ uptr UsedSize(bool locked_version = false) {
+ if (user_requested_size != SizeClassMap::kMaxSize)
+ return user_requested_size;
+ return *reinterpret_cast<uptr *>(
+ get_allocator().GetMetaData(AllocBeg(locked_version)));
+ }
+ void *AllocBeg(bool locked_version = false) {
+ if (from_memalign) {
+ if (locked_version)
+ return get_allocator().GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(this));
+ return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
+ }
+ return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+ }
+ bool AddrIsInside(uptr addr, bool locked_version = false) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+ }
+};
+
+struct QuarantineCallback {
+ explicit QuarantineCallback(AllocatorCache *cache)
+ : cache_(cache) {
+ }
+
+ void Recycle(AsanChunk *m) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+ atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
+ CHECK_NE(m->alloc_tid, kInvalidTid);
+ CHECK_NE(m->free_tid, kInvalidTid);
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+ void *p = reinterpret_cast<void *>(m->AllocBeg());
+ if (p != m) {
+ uptr *alloc_magic = reinterpret_cast<uptr *>(p);
+ CHECK_EQ(alloc_magic[0], kAllocBegMagic);
+ // Clear the magic value, as allocator internals may overwrite the
+ // contents of deallocated chunk, confusing GetAsanChunk lookup.
+ alloc_magic[0] = 0;
+ CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+ }
+
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.real_frees++;
+ thread_stats.really_freed += m->UsedSize();
+
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ void *Allocate(uptr size) {
+ return get_allocator().Allocate(cache_, size, 1, false);
+ }
+
+ void Deallocate(void *p) {
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ AllocatorCache *cache_;
+};
+
+typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
+typedef AsanQuarantine::Cache QuarantineCache;
+
+void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
+ PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+ PoisonShadow(p, size, 0);
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededASanShadowMemory(p, size);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+}
+
+// We can not use THREADLOCAL because it is not supported on some of the
+// platforms we care about (OSX 10.6, Android).
+// static THREADLOCAL AllocatorCache cache;
+AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ return &ms->allocator_cache;
+}
+
+QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
+ return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
+}
+
+void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
+ quarantine_size_mb = f->quarantine_size_mb;
+ min_redzone = f->redzone;
+ max_redzone = f->max_redzone;
+ may_return_null = cf->allocator_may_return_null;
+ alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+}
+
+void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
+ f->quarantine_size_mb = quarantine_size_mb;
+ f->redzone = min_redzone;
+ f->max_redzone = max_redzone;
+ cf->allocator_may_return_null = may_return_null;
+ f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+}
+
+struct Allocator {
+ static const uptr kMaxAllowedMallocSize =
+ FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
+ static const uptr kMaxThreadLocalQuarantine =
+ FIRST_32_SECOND_64(1 << 18, 1 << 20);
+
+ AsanAllocator allocator;
+ AsanQuarantine quarantine;
+ StaticSpinMutex fallback_mutex;
+ AllocatorCache fallback_allocator_cache;
+ QuarantineCache fallback_quarantine_cache;
+
+ // ------------------- Options --------------------------
+ atomic_uint16_t min_redzone;
+ atomic_uint16_t max_redzone;
+ atomic_uint8_t alloc_dealloc_mismatch;
+
+ // ------------------- Initialization ------------------------
+ explicit Allocator(LinkerInitialized)
+ : quarantine(LINKER_INITIALIZED),
+ fallback_quarantine_cache(LINKER_INITIALIZED) {}
+
+ void CheckOptions(const AllocatorOptions &options) const {
+ CHECK_GE(options.min_redzone, 16);
+ CHECK_GE(options.max_redzone, options.min_redzone);
+ CHECK_LE(options.max_redzone, 2048);
+ CHECK(IsPowerOfTwo(options.min_redzone));
+ CHECK(IsPowerOfTwo(options.max_redzone));
+ }
+
+ void SharedInitCode(const AllocatorOptions &options) {
+ CheckOptions(options);
+ quarantine.Init((uptr)options.quarantine_size_mb << 20,
+ kMaxThreadLocalQuarantine);
+ atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
+ memory_order_release);
+ atomic_store(&min_redzone, options.min_redzone, memory_order_release);
+ atomic_store(&max_redzone, options.max_redzone, memory_order_release);
+ }
+
+ void Initialize(const AllocatorOptions &options) {
+ allocator.Init(options.may_return_null);
+ SharedInitCode(options);
+ }
+
+ void ReInitialize(const AllocatorOptions &options) {
+ allocator.SetMayReturnNull(options.may_return_null);
+ SharedInitCode(options);
+ }
+
+ void GetOptions(AllocatorOptions *options) const {
+ options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
+ options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
+ options->may_return_null = allocator.MayReturnNull();
+ options->alloc_dealloc_mismatch =
+ atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ }
+
+ // -------------------- Helper methods. -------------------------
+ uptr ComputeRZLog(uptr user_requested_size) {
+ u32 rz_log =
+ user_requested_size <= 64 - 16 ? 0 :
+ user_requested_size <= 128 - 32 ? 1 :
+ user_requested_size <= 512 - 64 ? 2 :
+ user_requested_size <= 4096 - 128 ? 3 :
+ user_requested_size <= (1 << 14) - 256 ? 4 :
+ user_requested_size <= (1 << 15) - 512 ? 5 :
+ user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
+ u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
+ u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
+ return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+ }
+
+ // We have an address between two chunks, and we want to report just one.
+ AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
+ AsanChunk *right_chunk) {
+ // Prefer an allocated chunk over freed chunk and freed chunk
+ // over available chunk.
+ if (left_chunk->chunk_state != right_chunk->chunk_state) {
+ if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ return right_chunk;
+ if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ return right_chunk;
+ }
+ // Same chunk_state: choose based on offset.
+ sptr l_offset = 0, r_offset = 0;
+ CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
+ CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
+ if (l_offset < r_offset)
+ return left_chunk;
+ return right_chunk;
+ }
+
+ // -------------------- Allocation/Deallocation routines ---------------
+ void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
+ AllocType alloc_type, bool can_fill) {
+ if (UNLIKELY(!asan_inited))
+ AsanInitFromRtl();
+ Flags &fl = *flags();
+ CHECK(stack);
+ const uptr min_alignment = SHADOW_GRANULARITY;
+ if (alignment < min_alignment)
+ alignment = min_alignment;
+ if (size == 0) {
+ // We'd be happy to avoid allocating memory for zero-size requests, but
+ // some programs/tests depend on this behavior and assume that malloc
+ // would not return NULL even for zero-size allocations. Moreover, it
+ // looks like operator new should never return NULL, and results of
+ // consecutive "new" calls must be different even if the allocated size
+ // is zero.
+ size = 1;
+ }
+ CHECK(IsPowerOfTwo(alignment));
+ uptr rz_log = ComputeRZLog(size);
+ uptr rz_size = RZLog2Size(rz_log);
+ uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
+ uptr needed_size = rounded_size + rz_size;
+ if (alignment > min_alignment)
+ needed_size += alignment;
+ bool using_primary_allocator = true;
+ // If we are allocating from the secondary allocator, there will be no
+ // automatic right redzone, so add the right redzone manually.
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ needed_size += rz_size;
+ using_primary_allocator = false;
+ }
+ CHECK(IsAligned(needed_size, min_alignment));
+ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
+ Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
+ (void*)size);
+ return allocator.ReturnNullOrDie();
+ }
+
+ AsanThread *t = GetCurrentThread();
+ void *allocated;
+ bool check_rss_limit = true;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated =
+ allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated =
+ allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ }
+
+ if (!allocated)
+ return allocator.ReturnNullOrDie();
+
+ if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
+ // Heap poisoning is enabled, but the allocator provides an unpoisoned
+ // chunk. This is possible if CanPoisonMemory() was false for some
+ // time, for example, due to flags()->start_disabled.
+ // Anyway, poison the block before using it for anything else.
+ uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+
+ uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+ uptr alloc_end = alloc_beg + needed_size;
+ uptr beg_plus_redzone = alloc_beg + rz_size;
+ uptr user_beg = beg_plus_redzone;
+ if (!IsAligned(user_beg, alignment))
+ user_beg = RoundUpTo(user_beg, alignment);
+ uptr user_end = user_beg + size;
+ CHECK_LE(user_end, alloc_end);
+ uptr chunk_beg = user_beg - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ m->alloc_type = alloc_type;
+ m->rz_log = rz_log;
+ u32 alloc_tid = t ? t->tid() : 0;
+ m->alloc_tid = alloc_tid;
+ CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
+ m->free_tid = kInvalidTid;
+ m->from_memalign = user_beg != beg_plus_redzone;
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
+ reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
+ reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
+ }
+ if (using_primary_allocator) {
+ CHECK(size);
+ m->user_requested_size = size;
+ CHECK(allocator.FromPrimary(allocated));
+ } else {
+ CHECK(!allocator.FromPrimary(allocated));
+ m->user_requested_size = SizeClassMap::kMaxSize;
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
+ meta[0] = size;
+ meta[1] = chunk_beg;
+ }
+
+ m->alloc_context_id = StackDepotPut(*stack);
+
+ uptr size_rounded_down_to_granularity =
+ RoundDownTo(size, SHADOW_GRANULARITY);
+ // Unpoison the bulk of the memory region.
+ if (size_rounded_down_to_granularity)
+ PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
+ // Deal with the end of the region if size is not aligned to granularity.
+ if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
+ u8 *shadow =
+ (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
+ *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
+ }
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mallocs++;
+ thread_stats.malloced += size;
+ thread_stats.malloced_redzones += needed_size - size;
+ if (needed_size > SizeClassMap::kMaxSize)
+ thread_stats.malloc_large++;
+ else
+ thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
+
+ void *res = reinterpret_cast<void *>(user_beg);
+ if (can_fill && fl.max_malloc_fill_size) {
+ uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
+ REAL(memset)(res, fl.malloc_fill_byte, fill_size);
+ }
+#if CAN_SANITIZE_LEAKS
+ m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
+ : __lsan::kDirectlyLeaked;
+#endif
+ // Must be the last mutation of metadata in this function.
+ atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+ ASAN_MALLOC_HOOK(res, size);
+ return res;
+ }
+
+ void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
+ BufferedStackTrace *stack) {
+ u8 old_chunk_state = CHUNK_ALLOCATED;
+ // Flip the chunk_state atomically to avoid race on double-free.
+ if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
+ CHUNK_QUARANTINE, memory_order_acquire))
+ ReportInvalidFree(ptr, old_chunk_state, stack);
+ CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ }
+
+ // Expects the chunk to already be marked as quarantined by using
+ // AtomicallySetQuarantineFlag.
+ void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+
+ if (m->alloc_type != alloc_type) {
+ if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
+ ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
+ (AllocType)alloc_type);
+ }
+ }
+
+ CHECK_GE(m->alloc_tid, 0);
+ if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
+ CHECK_EQ(m->free_tid, kInvalidTid);
+ AsanThread *t = GetCurrentThread();
+ m->free_tid = t ? t->tid() : 0;
+ m->free_context_id = StackDepotPut(*stack);
+ // Poison the region.
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapFreeMagic);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
+ // Push into quarantine.
+ if (t) {
+ AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
+ m->UsedSize());
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *ac = &fallback_allocator_cache;
+ quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
+ m->UsedSize());
+ }
+ }
+
+ void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ if (p == 0) return;
+
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ if (delete_size && flags()->new_delete_type_mismatch &&
+ delete_size != m->UsedSize()) {
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
+ }
+ ASAN_FREE_HOOK(ptr);
+ // Must mark the chunk as quarantined before any changes to its metadata.
+ AtomicallySetQuarantineFlag(m, ptr, stack);
+ QuarantineChunk(m, ptr, stack, alloc_type);
+ }
+
+ void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
+ CHECK(old_ptr && new_size);
+ uptr p = reinterpret_cast<uptr>(old_ptr);
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.reallocs++;
+ thread_stats.realloced += new_size;
+
+ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
+ if (new_ptr) {
+ u8 chunk_state = m->chunk_state;
+ if (chunk_state != CHUNK_ALLOCATED)
+ ReportInvalidFree(old_ptr, chunk_state, stack);
+ CHECK_NE(REAL(memcpy), nullptr);
+ uptr memcpy_size = Min(new_size, m->UsedSize());
+ // If realloc() races with free(), we may start copying freed memory.
+ // However, we will report racy double-free later anyway.
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
+ Deallocate(old_ptr, 0, stack, FROM_MALLOC);
+ }
+ return new_ptr;
+ }
+
+ void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ if (CallocShouldReturnNullDueToOverflow(size, nmemb))
+ return allocator.ReturnNullOrDie();
+ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
+ // If the memory comes from the secondary allocator no need to clear it
+ // as it comes directly from mmap.
+ if (ptr && allocator.FromPrimary(ptr))
+ REAL(memset)(ptr, 0, nmemb * size);
+ return ptr;
+ }
+
+ void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
+ if (chunk_state == CHUNK_QUARANTINE)
+ ReportDoubleFree((uptr)ptr, stack);
+ else
+ ReportFreeNotMalloced((uptr)ptr, stack);
+ }
+
+ void CommitBack(AsanThreadLocalMallocStorage *ms) {
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
+ allocator.SwallowCache(ac);
+ }
+
+ // -------------------------- Chunk lookup ----------------------
+
+ // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ AsanChunk *GetAsanChunk(void *alloc_beg) {
+ if (!alloc_beg) return nullptr;
+ if (!allocator.FromPrimary(alloc_beg)) {
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
+ return m;
+ }
+ uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
+ if (alloc_magic[0] == kAllocBegMagic)
+ return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
+ return reinterpret_cast<AsanChunk *>(alloc_beg);
+ }
+
+ AsanChunk *GetAsanChunkByAddr(uptr p) {
+ void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ // Allocator must be locked when this function is called.
+ AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
+ void *alloc_beg =
+ allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ uptr AllocationSize(uptr p) {
+ AsanChunk *m = GetAsanChunkByAddr(p);
+ if (!m) return 0;
+ if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (m->Beg() != p) return 0;
+ return m->UsedSize();
+ }
+
+ AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ AsanChunk *m1 = GetAsanChunkByAddr(addr);
+ if (!m1) return AsanChunkView(m1);
+ sptr offset = 0;
+ if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ // The address is in the chunk's left redzone, so maybe it is actually
+ // a right buffer overflow from the other chunk to the left.
+ // Search a bit to the left to see if there is another chunk.
+ AsanChunk *m2 = nullptr;
+ for (uptr l = 1; l < GetPageSizeCached(); l++) {
+ m2 = GetAsanChunkByAddr(addr - l);
+ if (m2 == m1) continue; // Still the same chunk.
+ break;
+ }
+ if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
+ m1 = ChooseChunk(addr, m2, m1);
+ }
+ return AsanChunkView(m1);
+ }
+
+ void PrintStats() {
+ allocator.PrintStats();
+ }
+
+ void ForceLock() {
+ allocator.ForceLock();
+ fallback_mutex.Lock();
+ }
+
+ void ForceUnlock() {
+ fallback_mutex.Unlock();
+ allocator.ForceUnlock();
+ }
+};
+
+static Allocator instance(LINKER_INITIALIZED);
+
+static AsanAllocator &get_allocator() {
+ return instance.allocator;
+}
+
+bool AsanChunkView::IsValid() {
+ return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
+}
+uptr AsanChunkView::Beg() { return chunk_->Beg(); }
+uptr AsanChunkView::End() { return Beg() + UsedSize(); }
+uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
+uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
+uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+StackTrace AsanChunkView::GetAllocStack() {
+ return GetStackTraceFromId(chunk_->alloc_context_id);
+}
+
+StackTrace AsanChunkView::GetFreeStack() {
+ return GetStackTraceFromId(chunk_->free_context_id);
+}
+
+void InitializeAllocator(const AllocatorOptions &options) {
+ instance.Initialize(options);
+}
+
+void ReInitializeAllocator(const AllocatorOptions &options) {
+ instance.ReInitialize(options);
+}
+
+void GetAllocatorOptions(AllocatorOptions *options) {
+ instance.GetOptions(options);
+}
+
+AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ return instance.FindHeapChunkByAddress(addr);
+}
+
+void AsanThreadLocalMallocStorage::CommitBack() {
+ instance.CommitBack(this);
+}
+
+void PrintInternalAllocatorStats() {
+ instance.PrintStats();
+}
+
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ return instance.Allocate(size, alignment, stack, alloc_type, true);
+}
+
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
+ instance.Deallocate(ptr, 0, stack, alloc_type);
+}
+
+void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ instance.Deallocate(ptr, size, stack, alloc_type);
+}
+
+void *asan_malloc(uptr size, BufferedStackTrace *stack) {
+ return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+}
+
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ return instance.Calloc(nmemb, size, stack);
+}
+
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
+ if (!p)
+ return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+ if (size == 0) {
+ instance.Deallocate(p, 0, stack, FROM_MALLOC);
+ return nullptr;
+ }
+ return instance.Reallocate(p, size, stack);
+}
+
+void *asan_valloc(uptr size, BufferedStackTrace *stack) {
+ return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
+}
+
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ size = RoundUpTo(size, PageSize);
+ if (size == 0) {
+ // pvalloc(0) should allocate one page.
+ size = PageSize;
+ }
+ return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
+}
+
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ BufferedStackTrace *stack) {
+ void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
+ if (!ptr) return 0;
+ uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+ if (flags()->check_malloc_usable_size && (usable_size == 0)) {
+ GET_STACK_TRACE_FATAL(pc, bp);
+ ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
+ }
+ return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+ return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+}
+
+void asan_mz_force_lock() {
+ instance.ForceLock();
+}
+
+void asan_mz_force_unlock() {
+ instance.ForceUnlock();
+}
+
+void AsanSoftRssLimitExceededCallback(bool exceeded) {
+ instance.allocator.SetRssLimitIsExceeded(exceeded);
+}
+
+} // namespace __asan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+void LockAllocator() {
+ __asan::get_allocator().ForceLock();
+}
+
+void UnlockAllocator() {
+ __asan::get_allocator().ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__asan::get_allocator();
+ *end = *begin + sizeof(__asan::get_allocator());
+}
+
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
+ if (!m) return 0;
+ uptr chunk = m->Beg();
+ if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ return 0;
+ if (m->AddrIsInside(addr, /*locked_version=*/true))
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
+ addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
+ CHECK(m);
+ return m->Beg();
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+}
+
+bool LsanMetadata::allocated() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->chunk_state == __asan::CHUNK_ALLOCATED;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return static_cast<ChunkTag>(m->lsan_tag);
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ m->lsan_tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->UsedSize(/*locked_version=*/true);
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->alloc_context_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ __asan::get_allocator().ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
+ if (!m) return kIgnoreObjectInvalid;
+ if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
+ if (m->lsan_tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->lsan_tag = __lsan::kIgnored;
+ return kIgnoreObjectSuccess;
+ } else {
+ return kIgnoreObjectInvalid;
+ }
+}
+} // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+// ASan allocator doesn't reserve extra bytes, so normally we would
+// just return "size". We don't want to expose our redzone sizes, etc here.
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+ uptr ptr = reinterpret_cast<uptr>(p);
+ return instance.AllocationSize(ptr) > 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ if (!p) return 0;
+ uptr ptr = reinterpret_cast<uptr>(p);
+ uptr allocated_size = instance.AllocationSize(ptr);
+ // Die if p is not malloced or if it is already freed.
+ if (allocated_size == 0) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
+ }
+ return allocated_size;
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+} // extern "C"
+#endif
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 763f4a58ef9..921131ab4e9 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -7,14 +7,16 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
-// ASan-private header for asan_allocator2.cc.
+// ASan-private header for asan_allocator.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H
#define ASAN_ALLOCATOR_H
+#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
namespace __asan {
@@ -25,10 +27,22 @@ enum AllocType {
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
};
-static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
-void InitializeAllocator();
+struct AllocatorOptions {
+ u32 quarantine_size_mb;
+ u16 min_redzone;
+ u16 max_redzone;
+ u8 may_return_null;
+ u8 alloc_dealloc_mismatch;
+
+ void SetFrom(const Flags *f, const CommonFlags *cf);
+ void CopyTo(Flags *f, CommonFlags *cf);
+};
+
+void InitializeAllocator(const AllocatorOptions &options);
+void ReInitializeAllocator(const AllocatorOptions &options);
+void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
@@ -40,8 +54,9 @@ class AsanChunkView {
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
- void GetAllocStack(StackTrace *stack);
- void GetFreeStack(StackTrace *stack);
+ bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
+ StackTrace GetAllocStack();
+ StackTrace GetFreeStack();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
@@ -88,27 +103,72 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
uptr size_;
};
+struct AsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const;
+ void OnUnmap(uptr p, uptr size) const;
+};
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__powerpc64__)
+const uptr kAllocatorSpace = 0xa0000000000ULL;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+# elif defined(__aarch64__)
+// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
+// so no need to different values for different VMA.
+const uptr kAllocatorSpace = 0x10000000000ULL;
+const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# endif
+typedef DefaultSizeClassMap SizeClassMap;
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
+ SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+#else // Fallback to SizeClassAllocator32.
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+# if SANITIZER_WORDSIZE == 32
+typedef FlatByteMap<kNumRegions> ByteMap;
+# elif SANITIZER_WORDSIZE == 64
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+# endif
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
+ SizeClassMap, kRegionSizeLog,
+ ByteMap,
+ AsanMapUnmapCallback> PrimaryAllocator;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> AsanAllocator;
+
+
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
- uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
+ AllocatorCache allocator_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
AsanThreadLocalMallocStorage() {}
};
-void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type);
-void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
+void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type);
-void *asan_malloc(uptr size, StackTrace *stack);
-void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
-void *asan_realloc(void *p, uptr size, StackTrace *stack);
-void *asan_valloc(uptr size, StackTrace *stack);
-void *asan_pvalloc(uptr size, StackTrace *stack);
+void *asan_malloc(uptr size, BufferedStackTrace *stack);
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
+void *asan_valloc(uptr size, BufferedStackTrace *stack);
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack);
+ BufferedStackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr);
@@ -116,6 +176,7 @@ void asan_mz_force_lock();
void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
+void AsanSoftRssLimitExceededCallback(bool exceeded);
} // namespace __asan
#endif // ASAN_ALLOCATOR_H
diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc
deleted file mode 100644
index b9d66dc7a4a..00000000000
--- a/libsanitizer/asan/asan_allocator2.cc
+++ /dev/null
@@ -1,811 +0,0 @@
-//===-- asan_allocator2.cc ------------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Implementation of ASan's memory allocator, 2-nd version.
-// This variant uses the allocator from sanitizer_common, i.e. the one shared
-// with ThreadSanitizer and MemorySanitizer.
-//
-//===----------------------------------------------------------------------===//
-#include "asan_allocator.h"
-
-#include "asan_mapping.h"
-#include "asan_poisoning.h"
-#include "asan_report.h"
-#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_allocator.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_list.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_quarantine.h"
-#include "lsan/lsan_common.h"
-
-namespace __asan {
-
-struct AsanMapUnmapCallback {
- void OnMap(uptr p, uptr size) const {
- PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mmaps++;
- thread_stats.mmaped += size;
- }
- void OnUnmap(uptr p, uptr size) const {
- PoisonShadow(p, size, 0);
- // We are about to unmap a chunk of user memory.
- // Mark the corresponding shadow memory as not needed.
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- uptr page_size = GetPageSizeCached();
- uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
- uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.munmaps++;
- thread_stats.munmaped += size;
- }
-};
-
-#if SANITIZER_WORDSIZE == 64
-#if defined(__powerpc64__)
-const uptr kAllocatorSpace = 0xa0000000000ULL;
-const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
-#else
-const uptr kAllocatorSpace = 0x600000000000ULL;
-const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-#endif
-typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
- SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
-#elif SANITIZER_WORDSIZE == 32
-static const u64 kAddressSpaceSize = 1ULL << 32;
-typedef CompactSizeClassMap SizeClassMap;
-static const uptr kRegionSizeLog = 20;
-static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
-typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
- SizeClassMap, kRegionSizeLog,
- FlatByteMap<kFlatByteMapSize>,
- AsanMapUnmapCallback> PrimaryAllocator;
-#endif
-
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
-
-// We can not use THREADLOCAL because it is not supported on some of the
-// platforms we care about (OSX 10.6, Android).
-// static THREADLOCAL AllocatorCache cache;
-AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
- CHECK(ms);
- CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
- return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
-}
-
-static Allocator allocator;
-
-static const uptr kMaxAllowedMallocSize =
- FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
-
-static const uptr kMaxThreadLocalQuarantine =
- FIRST_32_SECOND_64(1 << 18, 1 << 20);
-
-// Every chunk of memory allocated by this allocator can be in one of 3 states:
-// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
-// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
-// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
-enum {
- CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
- CHUNK_ALLOCATED = 2,
- CHUNK_QUARANTINE = 3
-};
-
-// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
-// We use adaptive redzones: for larger allocation larger redzones are used.
-static u32 RZLog2Size(u32 rz_log) {
- CHECK_LT(rz_log, 8);
- return 16 << rz_log;
-}
-
-static u32 RZSize2Log(u32 rz_size) {
- CHECK_GE(rz_size, 16);
- CHECK_LE(rz_size, 2048);
- CHECK(IsPowerOfTwo(rz_size));
- u32 res = Log2(rz_size) - 4;
- CHECK_EQ(rz_size, RZLog2Size(res));
- return res;
-}
-
-static uptr ComputeRZLog(uptr user_requested_size) {
- u32 rz_log =
- user_requested_size <= 64 - 16 ? 0 :
- user_requested_size <= 128 - 32 ? 1 :
- user_requested_size <= 512 - 64 ? 2 :
- user_requested_size <= 4096 - 128 ? 3 :
- user_requested_size <= (1 << 14) - 256 ? 4 :
- user_requested_size <= (1 << 15) - 512 ? 5 :
- user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- return Max(rz_log, RZSize2Log(flags()->redzone));
-}
-
-// The memory chunk allocated from the underlying allocator looks like this:
-// L L L L L L H H U U U U U U R R
-// L -- left redzone words (0 or more bytes)
-// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
-// U -- user memory.
-// R -- right redzone (0 or more bytes)
-// ChunkBase consists of ChunkHeader and other bytes that overlap with user
-// memory.
-
-// If the left redzone is greater than the ChunkHeader size we store a magic
-// value in the first uptr word of the memory block and store the address of
-// ChunkBase in the next uptr.
-// M B L L L L L L L L L H H U U U U U U
-// | ^
-// ---------------------|
-// M -- magic value kAllocBegMagic
-// B -- address of ChunkHeader pointing to the first 'H'
-static const uptr kAllocBegMagic = 0xCC6E96B9;
-
-struct ChunkHeader {
- // 1-st 8 bytes.
- u32 chunk_state : 8; // Must be first.
- u32 alloc_tid : 24;
-
- u32 free_tid : 24;
- u32 from_memalign : 1;
- u32 alloc_type : 2;
- u32 rz_log : 3;
- u32 lsan_tag : 2;
- // 2-nd 8 bytes
- // This field is used for small sizes. For large sizes it is equal to
- // SizeClassMap::kMaxSize and the actual size is stored in the
- // SecondaryAllocator's metadata.
- u32 user_requested_size;
- u32 alloc_context_id;
-};
-
-struct ChunkBase : ChunkHeader {
- // Header2, intersects with user memory.
- u32 free_context_id;
-};
-
-static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
-static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
-COMPILER_CHECK(kChunkHeaderSize == 16);
-COMPILER_CHECK(kChunkHeader2Size <= 16);
-
-struct AsanChunk: ChunkBase {
- uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
- uptr UsedSize(bool locked_version = false) {
- if (user_requested_size != SizeClassMap::kMaxSize)
- return user_requested_size;
- return *reinterpret_cast<uptr *>(
- allocator.GetMetaData(AllocBeg(locked_version)));
- }
- void *AllocBeg(bool locked_version = false) {
- if (from_memalign) {
- if (locked_version)
- return allocator.GetBlockBeginFastLocked(
- reinterpret_cast<void *>(this));
- return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
- }
- return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
- }
- // If we don't use stack depot, we store the alloc/free stack traces
- // in the chunk itself.
- u32 *AllocStackBeg() {
- return (u32*)(Beg() - RZLog2Size(rz_log));
- }
- uptr AllocStackSize() {
- CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
- return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
- }
- u32 *FreeStackBeg() {
- return (u32*)(Beg() + kChunkHeader2Size);
- }
- uptr FreeStackSize() {
- if (user_requested_size < kChunkHeader2Size) return 0;
- uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
- return (available - kChunkHeader2Size) / sizeof(u32);
- }
- bool AddrIsInside(uptr addr, bool locked_version = false) {
- return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
- }
-};
-
-bool AsanChunkView::IsValid() {
- return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
-}
-uptr AsanChunkView::Beg() { return chunk_->Beg(); }
-uptr AsanChunkView::End() { return Beg() + UsedSize(); }
-uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
-uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
-
-static void GetStackTraceFromId(u32 id, StackTrace *stack) {
- CHECK(id);
- uptr size = 0;
- const uptr *trace = StackDepotGet(id, &size);
- CHECK(trace);
- stack->CopyFrom(trace, size);
-}
-
-void AsanChunkView::GetAllocStack(StackTrace *stack) {
- GetStackTraceFromId(chunk_->alloc_context_id, stack);
-}
-
-void AsanChunkView::GetFreeStack(StackTrace *stack) {
- GetStackTraceFromId(chunk_->free_context_id, stack);
-}
-
-struct QuarantineCallback;
-typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
-typedef AsanQuarantine::Cache QuarantineCache;
-static AsanQuarantine quarantine(LINKER_INITIALIZED);
-static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
-static AllocatorCache fallback_allocator_cache;
-static SpinMutex fallback_mutex;
-
-QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
- CHECK(ms);
- CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
- return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
-}
-
-struct QuarantineCallback {
- explicit QuarantineCallback(AllocatorCache *cache)
- : cache_(cache) {
- }
-
- void Recycle(AsanChunk *m) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
- CHECK_NE(m->alloc_tid, kInvalidTid);
- CHECK_NE(m->free_tid, kInvalidTid);
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
- void *p = reinterpret_cast<void *>(m->AllocBeg());
- if (p != m) {
- uptr *alloc_magic = reinterpret_cast<uptr *>(p);
- CHECK_EQ(alloc_magic[0], kAllocBegMagic);
- // Clear the magic value, as allocator internals may overwrite the
- // contents of deallocated chunk, confusing GetAsanChunk lookup.
- alloc_magic[0] = 0;
- CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
- }
-
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.real_frees++;
- thread_stats.really_freed += m->UsedSize();
-
- allocator.Deallocate(cache_, p);
- }
-
- void *Allocate(uptr size) {
- return allocator.Allocate(cache_, size, 1, false);
- }
-
- void Deallocate(void *p) {
- allocator.Deallocate(cache_, p);
- }
-
- AllocatorCache *cache_;
-};
-
-void InitializeAllocator() {
- allocator.Init();
- quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
-}
-
-static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
- AllocType alloc_type, bool can_fill) {
- if (!asan_inited)
- __asan_init();
- Flags &fl = *flags();
- CHECK(stack);
- const uptr min_alignment = SHADOW_GRANULARITY;
- if (alignment < min_alignment)
- alignment = min_alignment;
- if (size == 0) {
- // We'd be happy to avoid allocating memory for zero-size requests, but
- // some programs/tests depend on this behavior and assume that malloc would
- // not return NULL even for zero-size allocations. Moreover, it looks like
- // operator new should never return NULL, and results of consecutive "new"
- // calls must be different even if the allocated size is zero.
- size = 1;
- }
- CHECK(IsPowerOfTwo(alignment));
- uptr rz_log = ComputeRZLog(size);
- uptr rz_size = RZLog2Size(rz_log);
- uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
- uptr needed_size = rounded_size + rz_size;
- if (alignment > min_alignment)
- needed_size += alignment;
- bool using_primary_allocator = true;
- // If we are allocating from the secondary allocator, there will be no
- // automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
- needed_size += rz_size;
- using_primary_allocator = false;
- }
- CHECK(IsAligned(needed_size, min_alignment));
- if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
- Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
- (void*)size);
- return AllocatorReturnNull();
- }
-
- AsanThread *t = GetCurrentThread();
- void *allocated;
- if (t) {
- AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
- allocated = allocator.Allocate(cache, needed_size, 8, false);
- } else {
- SpinMutexLock l(&fallback_mutex);
- AllocatorCache *cache = &fallback_allocator_cache;
- allocated = allocator.Allocate(cache, needed_size, 8, false);
- }
- uptr alloc_beg = reinterpret_cast<uptr>(allocated);
- uptr alloc_end = alloc_beg + needed_size;
- uptr beg_plus_redzone = alloc_beg + rz_size;
- uptr user_beg = beg_plus_redzone;
- if (!IsAligned(user_beg, alignment))
- user_beg = RoundUpTo(user_beg, alignment);
- uptr user_end = user_beg + size;
- CHECK_LE(user_end, alloc_end);
- uptr chunk_beg = user_beg - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- m->alloc_type = alloc_type;
- m->rz_log = rz_log;
- u32 alloc_tid = t ? t->tid() : 0;
- m->alloc_tid = alloc_tid;
- CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
- m->free_tid = kInvalidTid;
- m->from_memalign = user_beg != beg_plus_redzone;
- if (alloc_beg != chunk_beg) {
- CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
- reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
- reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
- }
- if (using_primary_allocator) {
- CHECK(size);
- m->user_requested_size = size;
- CHECK(allocator.FromPrimary(allocated));
- } else {
- CHECK(!allocator.FromPrimary(allocated));
- m->user_requested_size = SizeClassMap::kMaxSize;
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
- meta[0] = size;
- meta[1] = chunk_beg;
- }
-
- m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
-
- uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
- // Unpoison the bulk of the memory region.
- if (size_rounded_down_to_granularity)
- PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
- // Deal with the end of the region if size is not aligned to granularity.
- if (size != size_rounded_down_to_granularity && fl.poison_heap) {
- u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
- *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
- }
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mallocs++;
- thread_stats.malloced += size;
- thread_stats.malloced_redzones += needed_size - size;
- uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
- thread_stats.malloced_by_size[class_id]++;
- if (needed_size > SizeClassMap::kMaxSize)
- thread_stats.malloc_large++;
-
- void *res = reinterpret_cast<void *>(user_beg);
- if (can_fill && fl.max_malloc_fill_size) {
- uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
- REAL(memset)(res, fl.malloc_fill_byte, fill_size);
- }
-#if CAN_SANITIZE_LEAKS
- m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
- : __lsan::kDirectlyLeaked;
-#endif
- // Must be the last mutation of metadata in this function.
- atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
- ASAN_MALLOC_HOOK(res, size);
- return res;
-}
-
-static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
- if (chunk_state == CHUNK_QUARANTINE)
- ReportDoubleFree((uptr)ptr, stack);
- else
- ReportFreeNotMalloced((uptr)ptr, stack);
-}
-
-static void AtomicallySetQuarantineFlag(AsanChunk *m,
- void *ptr, StackTrace *stack) {
- u8 old_chunk_state = CHUNK_ALLOCATED;
- // Flip the chunk_state atomically to avoid race on double-free.
- if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
- CHUNK_QUARANTINE, memory_order_acquire))
- ReportInvalidFree(ptr, old_chunk_state, stack);
- CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
-}
-
-// Expects the chunk to already be marked as quarantined by using
-// AtomicallySetQuarantineFlag.
-static void QuarantineChunk(AsanChunk *m, void *ptr,
- StackTrace *stack, AllocType alloc_type) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
-
- if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
- ReportAllocTypeMismatch((uptr)ptr, stack,
- (AllocType)m->alloc_type, (AllocType)alloc_type);
-
- CHECK_GE(m->alloc_tid, 0);
- if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
- CHECK_EQ(m->free_tid, kInvalidTid);
- AsanThread *t = GetCurrentThread();
- m->free_tid = t ? t->tid() : 0;
- m->free_context_id = StackDepotPut(stack->trace, stack->size);
- // Poison the region.
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapFreeMagic);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.frees++;
- thread_stats.freed += m->UsedSize();
-
- // Push into quarantine.
- if (t) {
- AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
- AllocatorCache *ac = GetAllocatorCache(ms);
- quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
- m, m->UsedSize());
- } else {
- SpinMutexLock l(&fallback_mutex);
- AllocatorCache *ac = &fallback_allocator_cache;
- quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
- m, m->UsedSize());
- }
-}
-
-static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
- uptr p = reinterpret_cast<uptr>(ptr);
- if (p == 0) return;
-
- uptr chunk_beg = p - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- ASAN_FREE_HOOK(ptr);
- // Must mark the chunk as quarantined before any changes to its metadata.
- AtomicallySetQuarantineFlag(m, ptr, stack);
- QuarantineChunk(m, ptr, stack, alloc_type);
-}
-
-static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
- CHECK(old_ptr && new_size);
- uptr p = reinterpret_cast<uptr>(old_ptr);
- uptr chunk_beg = p - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.reallocs++;
- thread_stats.realloced += new_size;
-
- void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
- if (new_ptr) {
- u8 chunk_state = m->chunk_state;
- if (chunk_state != CHUNK_ALLOCATED)
- ReportInvalidFree(old_ptr, chunk_state, stack);
- CHECK_NE(REAL(memcpy), (void*)0);
- uptr memcpy_size = Min(new_size, m->UsedSize());
- // If realloc() races with free(), we may start copying freed memory.
- // However, we will report racy double-free later anyway.
- REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, stack, FROM_MALLOC);
- }
- return new_ptr;
-}
-
-// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
-static AsanChunk *GetAsanChunk(void *alloc_beg) {
- if (!alloc_beg) return 0;
- if (!allocator.FromPrimary(alloc_beg)) {
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
- AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
- return m;
- }
- uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
- if (alloc_magic[0] == kAllocBegMagic)
- return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
- return reinterpret_cast<AsanChunk *>(alloc_beg);
-}
-
-static AsanChunk *GetAsanChunkByAddr(uptr p) {
- void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
- return GetAsanChunk(alloc_beg);
-}
-
-// Allocator must be locked when this function is called.
-static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
- void *alloc_beg =
- allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
- return GetAsanChunk(alloc_beg);
-}
-
-static uptr AllocationSize(uptr p) {
- AsanChunk *m = GetAsanChunkByAddr(p);
- if (!m) return 0;
- if (m->chunk_state != CHUNK_ALLOCATED) return 0;
- if (m->Beg() != p) return 0;
- return m->UsedSize();
-}
-
-// We have an address between two chunks, and we want to report just one.
-AsanChunk *ChooseChunk(uptr addr,
- AsanChunk *left_chunk, AsanChunk *right_chunk) {
- // Prefer an allocated chunk over freed chunk and freed chunk
- // over available chunk.
- if (left_chunk->chunk_state != right_chunk->chunk_state) {
- if (left_chunk->chunk_state == CHUNK_ALLOCATED)
- return left_chunk;
- if (right_chunk->chunk_state == CHUNK_ALLOCATED)
- return right_chunk;
- if (left_chunk->chunk_state == CHUNK_QUARANTINE)
- return left_chunk;
- if (right_chunk->chunk_state == CHUNK_QUARANTINE)
- return right_chunk;
- }
- // Same chunk_state: choose based on offset.
- sptr l_offset = 0, r_offset = 0;
- CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
- CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
- if (l_offset < r_offset)
- return left_chunk;
- return right_chunk;
-}
-
-AsanChunkView FindHeapChunkByAddress(uptr addr) {
- AsanChunk *m1 = GetAsanChunkByAddr(addr);
- if (!m1) return AsanChunkView(m1);
- sptr offset = 0;
- if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
- // The address is in the chunk's left redzone, so maybe it is actually
- // a right buffer overflow from the other chunk to the left.
- // Search a bit to the left to see if there is another chunk.
- AsanChunk *m2 = 0;
- for (uptr l = 1; l < GetPageSizeCached(); l++) {
- m2 = GetAsanChunkByAddr(addr - l);
- if (m2 == m1) continue; // Still the same chunk.
- break;
- }
- if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
- m1 = ChooseChunk(addr, m2, m1);
- }
- return AsanChunkView(m1);
-}
-
-void AsanThreadLocalMallocStorage::CommitBack() {
- AllocatorCache *ac = GetAllocatorCache(this);
- quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
- allocator.SwallowCache(GetAllocatorCache(this));
-}
-
-void PrintInternalAllocatorStats() {
- allocator.PrintStats();
-}
-
-void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
- AllocType alloc_type) {
- return Allocate(size, alignment, stack, alloc_type, true);
-}
-
-void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
- Deallocate(ptr, stack, alloc_type);
-}
-
-void *asan_malloc(uptr size, StackTrace *stack) {
- return Allocate(size, 8, stack, FROM_MALLOC, true);
-}
-
-void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
- if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return AllocatorReturnNull();
- void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
- // If the memory comes from the secondary allocator no need to clear it
- // as it comes directly from mmap.
- if (ptr && allocator.FromPrimary(ptr))
- REAL(memset)(ptr, 0, nmemb * size);
- return ptr;
-}
-
-void *asan_realloc(void *p, uptr size, StackTrace *stack) {
- if (p == 0)
- return Allocate(size, 8, stack, FROM_MALLOC, true);
- if (size == 0) {
- Deallocate(p, stack, FROM_MALLOC);
- return 0;
- }
- return Reallocate(p, size, stack);
-}
-
-void *asan_valloc(uptr size, StackTrace *stack) {
- return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
-}
-
-void *asan_pvalloc(uptr size, StackTrace *stack) {
- uptr PageSize = GetPageSizeCached();
- size = RoundUpTo(size, PageSize);
- if (size == 0) {
- // pvalloc(0) should allocate one page.
- size = PageSize;
- }
- return Allocate(size, PageSize, stack, FROM_MALLOC, true);
-}
-
-int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack) {
- void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
- CHECK(IsAligned((uptr)ptr, alignment));
- *memptr = ptr;
- return 0;
-}
-
-uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
- if (ptr == 0) return 0;
- uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
- if (flags()->check_malloc_usable_size && (usable_size == 0)) {
- GET_STACK_TRACE_FATAL(pc, bp);
- ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
- }
- return usable_size;
-}
-
-uptr asan_mz_size(const void *ptr) {
- return AllocationSize(reinterpret_cast<uptr>(ptr));
-}
-
-void asan_mz_force_lock() {
- allocator.ForceLock();
- fallback_mutex.Lock();
-}
-
-void asan_mz_force_unlock() {
- fallback_mutex.Unlock();
- allocator.ForceUnlock();
-}
-
-} // namespace __asan
-
-// --- Implementation of LSan-specific functions --- {{{1
-namespace __lsan {
-void LockAllocator() {
- __asan::allocator.ForceLock();
-}
-
-void UnlockAllocator() {
- __asan::allocator.ForceUnlock();
-}
-
-void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
- *begin = (uptr)&__asan::allocator;
- *end = *begin + sizeof(__asan::allocator);
-}
-
-uptr PointsIntoChunk(void* p) {
- uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
- if (!m) return 0;
- uptr chunk = m->Beg();
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
- m->AddrIsInside(addr, /*locked_version=*/true))
- return chunk;
- return 0;
-}
-
-uptr GetUserBegin(uptr chunk) {
- __asan::AsanChunk *m =
- __asan::GetAsanChunkByAddrFastLocked(chunk);
- CHECK(m);
- return m->Beg();
-}
-
-LsanMetadata::LsanMetadata(uptr chunk) {
- metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
-}
-
-bool LsanMetadata::allocated() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->chunk_state == __asan::CHUNK_ALLOCATED;
-}
-
-ChunkTag LsanMetadata::tag() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return static_cast<ChunkTag>(m->lsan_tag);
-}
-
-void LsanMetadata::set_tag(ChunkTag value) {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- m->lsan_tag = value;
-}
-
-uptr LsanMetadata::requested_size() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->UsedSize(/*locked_version=*/true);
-}
-
-u32 LsanMetadata::stack_trace_id() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->alloc_context_id;
-}
-
-void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- __asan::allocator.ForEachChunk(callback, arg);
-}
-
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
- uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
- if (!m) return kIgnoreObjectInvalid;
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
- if (m->lsan_tag == kIgnored)
- return kIgnoreObjectAlreadyIgnored;
- m->lsan_tag = __lsan::kIgnored;
- return kIgnoreObjectSuccess;
- } else {
- return kIgnoreObjectInvalid;
- }
-}
-} // namespace __lsan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
-
-// ASan allocator doesn't reserve extra bytes, so normally we would
-// just return "size". We don't want to expose our redzone sizes, etc here.
-uptr __asan_get_estimated_allocated_size(uptr size) {
- return size;
-}
-
-bool __asan_get_ownership(const void *p) {
- uptr ptr = reinterpret_cast<uptr>(p);
- return (AllocationSize(ptr) > 0);
-}
-
-uptr __asan_get_allocated_size(const void *p) {
- if (p == 0) return 0;
- uptr ptr = reinterpret_cast<uptr>(p);
- uptr allocated_size = AllocationSize(ptr);
- // Die if p is not malloced or if it is already freed.
- if (allocated_size == 0) {
- GET_STACK_TRACE_FATAL_HERE;
- ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
- }
- return allocated_size;
-}
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-// Provide default (no-op) implementation of malloc hooks.
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_malloc_hook(void *ptr, uptr size) {
- (void)ptr;
- (void)size;
-}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_free_hook(void *ptr) {
- (void)ptr;
-}
-} // extern "C"
-#endif
diff --git a/libsanitizer/asan/asan_debugging.cc b/libsanitizer/asan/asan_debugging.cc
new file mode 100644
index 00000000000..6e33a9d4c51
--- /dev/null
+++ b/libsanitizer/asan/asan_debugging.cc
@@ -0,0 +1,139 @@
+//===-- asan_debugging.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file contains various functions that are generally useful to call when
+// using a debugger (LLDB, GDB).
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_thread.h"
+
+namespace __asan {
+
+void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
+ descr->name[0] = 0;
+ descr->region_address = 0;
+ descr->region_size = 0;
+ descr->region_kind = "stack";
+
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access))
+ return;
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(access.frame_descr, &vars)) {
+ return;
+ }
+
+ for (uptr i = 0; i < vars.size(); i++) {
+ if (access.offset <= vars[i].beg + vars[i].size) {
+ internal_strncat(descr->name, vars[i].name_pos,
+ Min(descr->name_size, vars[i].name_len));
+ descr->region_address = addr - (access.offset - vars[i].beg);
+ descr->region_size = vars[i].size;
+ return;
+ }
+ }
+}
+
+void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+
+ descr->name[0] = 0;
+ descr->region_address = 0;
+ descr->region_size = 0;
+
+ if (!chunk.IsValid()) {
+ descr->region_kind = "heap-invalid";
+ return;
+ }
+
+ descr->region_address = chunk.Beg();
+ descr->region_size = chunk.UsedSize();
+ descr->region_kind = "heap";
+}
+
+void AsanLocateAddress(uptr addr, AddressDescription *descr) {
+ if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
+ return;
+ }
+ if (GetInfoForAddressIfGlobal(addr, descr)) {
+ return;
+ }
+ asanThreadRegistry().Lock();
+ AsanThread *thread = FindThreadByStackAddress(addr);
+ asanThreadRegistry().Unlock();
+ if (thread) {
+ GetInfoForStackVar(addr, descr, thread);
+ return;
+ }
+ GetInfoForHeapAddress(addr, descr);
+}
+
+static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+ bool alloc_stack) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) return 0;
+
+ StackTrace stack(nullptr, 0);
+ if (alloc_stack) {
+ if (chunk.AllocTid() == kInvalidTid) return 0;
+ stack = chunk.GetAllocStack();
+ if (thread_id) *thread_id = chunk.AllocTid();
+ } else {
+ if (chunk.FreeTid() == kInvalidTid) return 0;
+ stack = chunk.GetFreeStack();
+ if (thread_id) *thread_id = chunk.FreeTid();
+ }
+
+ if (trace && size) {
+ size = Min(size, Min(stack.size, kStackTraceMax));
+ for (uptr i = 0; i < size; i++)
+ trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]);
+
+ return size;
+ }
+
+ return 0;
+}
+
+} // namespace __asan
+
+using namespace __asan;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size) {
+ AddressDescription descr = { name, name_size, 0, 0, nullptr };
+ AsanLocateAddress(addr, &descr);
+ if (region_address) *region_address = descr.region_address;
+ if (region_size) *region_size = descr.region_size;
+ return descr.region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
+ if (shadow_scale)
+ *shadow_scale = SHADOW_SCALE;
+ if (shadow_offset)
+ *shadow_offset = SHADOW_OFFSET;
+}
diff --git a/libsanitizer/asan/asan_dll_thunk.cc b/libsanitizer/asan/asan_dll_thunk.cc
deleted file mode 100644
index 19c31f0def8..00000000000
--- a/libsanitizer/asan/asan_dll_thunk.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-//===-- asan_dll_thunk.cc -------------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// This file defines a family of thunks that should be statically linked into
-// the DLLs that have ASan instrumentation in order to delegate the calls to the
-// shared runtime that lives in the main binary.
-// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
-// details.
-//===----------------------------------------------------------------------===//
-
-// Only compile this code when buidling asan_dll_thunk.lib
-// Using #ifdef rather than relying on Makefiles etc.
-// simplifies the build procedure.
-#ifdef ASAN_DLL_THUNK
-
-// ----------------- Helper functions and macros --------------------- {{{1
-extern "C" {
-void *__stdcall GetModuleHandleA(const char *module_name);
-void *__stdcall GetProcAddress(void *module, const char *proc_name);
-void abort();
-}
-
-static void *getRealProcAddressOrDie(const char *name) {
- void *ret = GetProcAddress(GetModuleHandleA(0), name);
- if (!ret)
- abort();
- return ret;
-}
-
-#define WRAP_V_V(name) \
- extern "C" void name() { \
- typedef void (*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(); \
- }
-
-#define WRAP_V_W(name) \
- extern "C" void name(void *arg) { \
- typedef void (*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg); \
- }
-
-#define WRAP_V_WW(name) \
- extern "C" void name(void *arg1, void *arg2) { \
- typedef void (*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2); \
- }
-
-#define WRAP_V_WWW(name) \
- extern "C" void name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2, arg3); \
- }
-
-#define WRAP_W_V(name) \
- extern "C" void *name() { \
- typedef void *(*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(); \
- }
-
-#define WRAP_W_W(name) \
- extern "C" void *name(void *arg) { \
- typedef void *(*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg); \
- }
-
-#define WRAP_W_WW(name) \
- extern "C" void *name(void *arg1, void *arg2) { \
- typedef void *(*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2); \
- }
-
-#define WRAP_W_WWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3); \
- }
-
-#define WRAP_W_WWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
- typedef void *(*fntype)(void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4); \
- }
-
-#define WRAP_W_WWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5); \
- }
-
-#define WRAP_W_WWWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5, void *arg6) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
- }
-// }}}
-
-// ----------------- ASan own interface functions --------------------
-WRAP_W_V(__asan_should_detect_stack_use_after_return)
-
-extern "C" {
- int __asan_option_detect_stack_use_after_return;
-
- // Manually wrap __asan_init as we need to initialize
- // __asan_option_detect_stack_use_after_return afterwards.
- void __asan_init_v3() {
- typedef void (*fntype)();
- static fntype fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
- fn();
- __asan_option_detect_stack_use_after_return =
- (__asan_should_detect_stack_use_after_return() != 0);
- }
-}
-
-WRAP_V_V(__asan_handle_no_return)
-
-WRAP_V_W(__asan_report_store1)
-WRAP_V_W(__asan_report_store2)
-WRAP_V_W(__asan_report_store4)
-WRAP_V_W(__asan_report_store8)
-WRAP_V_W(__asan_report_store16)
-WRAP_V_WW(__asan_report_store_n)
-
-WRAP_V_W(__asan_report_load1)
-WRAP_V_W(__asan_report_load2)
-WRAP_V_W(__asan_report_load4)
-WRAP_V_W(__asan_report_load8)
-WRAP_V_W(__asan_report_load16)
-WRAP_V_WW(__asan_report_load_n)
-
-WRAP_V_WW(__asan_register_globals)
-WRAP_V_WW(__asan_unregister_globals)
-
-WRAP_W_WW(__asan_stack_malloc_0)
-WRAP_W_WW(__asan_stack_malloc_1)
-WRAP_W_WW(__asan_stack_malloc_2)
-WRAP_W_WW(__asan_stack_malloc_3)
-WRAP_W_WW(__asan_stack_malloc_4)
-WRAP_W_WW(__asan_stack_malloc_5)
-WRAP_W_WW(__asan_stack_malloc_6)
-WRAP_W_WW(__asan_stack_malloc_7)
-WRAP_W_WW(__asan_stack_malloc_8)
-WRAP_W_WW(__asan_stack_malloc_9)
-WRAP_W_WW(__asan_stack_malloc_10)
-
-WRAP_V_WWW(__asan_stack_free_0)
-WRAP_V_WWW(__asan_stack_free_1)
-WRAP_V_WWW(__asan_stack_free_2)
-WRAP_V_WWW(__asan_stack_free_4)
-WRAP_V_WWW(__asan_stack_free_5)
-WRAP_V_WWW(__asan_stack_free_6)
-WRAP_V_WWW(__asan_stack_free_7)
-WRAP_V_WWW(__asan_stack_free_8)
-WRAP_V_WWW(__asan_stack_free_9)
-WRAP_V_WWW(__asan_stack_free_10)
-
-// TODO(timurrrr): Add more interface functions on the as-needed basis.
-
-// ----------------- Memory allocation functions ---------------------
-WRAP_V_W(free)
-WRAP_V_WW(_free_dbg)
-
-WRAP_W_W(malloc)
-WRAP_W_WWWW(_malloc_dbg)
-
-WRAP_W_WW(calloc)
-WRAP_W_WWWWW(_calloc_dbg)
-WRAP_W_WWW(_calloc_impl)
-
-WRAP_W_WW(realloc)
-WRAP_W_WWW(_realloc_dbg)
-WRAP_W_WWW(_recalloc)
-
-WRAP_W_W(_msize)
-
-// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
-
-#endif // ASAN_DLL_THUNK
diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc
index cf4122472ef..de190d11a16 100644
--- a/libsanitizer/asan/asan_fake_stack.cc
+++ b/libsanitizer/asan/asan_fake_stack.cc
@@ -9,6 +9,7 @@
//
// FakeStack is used to detect use-after-return bugs.
//===----------------------------------------------------------------------===//
+
#include "asan_allocator.h"
#include "asan_poisoning.h"
#include "asan_thread.h"
@@ -20,13 +21,19 @@ static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
+static const u64 kAllocaRedzoneSize = 32UL;
+static const u64 kAllocaRedzoneMask = 31UL;
+
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) {
- for (uptr i = 0; i < (1U << class_id); i++)
+ for (uptr i = 0; i < (1U << class_id); i++) {
shadow[i] = magic;
+ // Make sure this does not become memset.
+ SanitizerBreakOptimization(nullptr);
+ }
} else {
// The size class is too big, it's cheaper to poison only size bytes.
PoisonShadow(ptr, size, static_cast<u8>(magic));
@@ -40,21 +47,32 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
+ uptr size = RequiredSize(stack_size_log);
FakeStack *res = reinterpret_cast<FakeStack *>(
- MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
+ flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
+ : MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
- if (common_flags()->verbosity) {
- u8 *p = reinterpret_cast<u8 *>(res);
- Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
- GetCurrentTidOrInvalid(), p,
- p + FakeStack::RequiredSize(stack_size_log), stack_size_log);
- }
+ u8 *p = reinterpret_cast<u8 *>(res);
+ VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
+ "mmapped %zdK, noreserve=%d \n",
+ GetCurrentTidOrInvalid(), p,
+ p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
+ size >> 10, flags()->uar_noreserve);
return res;
}
-void FakeStack::Destroy() {
+void FakeStack::Destroy(int tid) {
PoisonAll(0);
- UnmapOrDie(this, RequiredSize(stack_size_log_));
+ if (Verbosity() >= 2) {
+ InternalScopedString str(kNumberOfSizeClasses * 50);
+ for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
+ str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
+ NumberOfFrames(stack_size_log(), class_id));
+ Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
+ }
+ uptr size = RequiredSize(stack_size_log_);
+ FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
+ UnmapOrDie(this, size);
}
void FakeStack::PoisonAll(u8 magic) {
@@ -62,7 +80,9 @@ void FakeStack::PoisonAll(u8 magic) {
magic);
}
+#if !defined(_MSC_VER) || defined(__clang__)
ALWAYS_INLINE USED
+#endif
FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
uptr real_stack) {
CHECK_LT(class_id, kNumberOfSizeClasses);
@@ -88,10 +108,10 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
return res;
}
- return 0; // We are out of fake stack.
+ return nullptr; // We are out of fake stack.
}
-uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
+uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
@@ -101,7 +121,10 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (1UL << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
- return base + pos * BytesInSizeClass(class_id);
+ uptr res = base + pos * BytesInSizeClass(class_id);
+ *frame_end = res + BytesInSizeClass(class_id);
+ *frame_beg = res + sizeof(FakeFrame);
+ return res;
}
void FakeStack::HandleNoReturn() {
@@ -162,7 +185,7 @@ void SetTLSFakeStack(FakeStack *fs) { }
static FakeStack *GetFakeStack() {
AsanThread *t = GetCurrentThread();
- if (!t) return 0;
+ if (!t) return nullptr;
return t->fake_stack();
}
@@ -170,39 +193,39 @@ static FakeStack *GetFakeStackFast() {
if (FakeStack *fs = GetTLSFakeStack())
return fs;
if (!__asan_option_detect_stack_use_after_return)
- return 0;
+ return nullptr;
return GetFakeStack();
}
-ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
+ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
- if (!fs) return real_stack;
+ if (!fs) return 0;
+ uptr local_stack;
+ uptr real_stack = reinterpret_cast<uptr>(&local_stack);
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
- if (!ff)
- return real_stack; // Out of fake stack, return the real one.
+ if (!ff) return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
}
-ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
- if (ptr == real_stack)
- return;
+ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8);
}
-} // namespace __asan
+} // namespace __asan
// ---------------------- Interface ---------------- {{{1
+using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
- return __asan::OnMalloc(class_id, size, real_stack); \
+ __asan_stack_malloc_##class_id(uptr size) { \
+ return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
- uptr ptr, uptr size, uptr real_stack) { \
- __asan::OnFree(ptr, class_id, size, real_stack); \
+ uptr ptr, uptr size) { \
+ OnFree(ptr, class_id, size); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
@@ -216,3 +239,43 @@ DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
+ if (!fs) return nullptr;
+ uptr frame_beg, frame_end;
+ FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
+ reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
+ if (!frame) return nullptr;
+ if (frame->magic != kCurrentStackFrameMagic)
+ return nullptr;
+ if (beg) *beg = reinterpret_cast<void*>(frame_beg);
+ if (end) *end = reinterpret_cast<void*>(frame_end);
+ return reinterpret_cast<void*>(frame->real_stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_alloca_poison(uptr addr, uptr size) {
+ uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
+ uptr PartialRzAddr = addr + size;
+ uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
+ uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
+ FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
+ FastPoisonShadowPartialRightRedzone(
+ PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
+ RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
+ FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_allocas_unpoison(uptr top, uptr bottom) {
+ if ((!top) || (top > bottom)) return;
+ REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
+ (bottom - top) / SHADOW_GRANULARITY);
+}
+} // extern "C"
diff --git a/libsanitizer/asan/asan_fake_stack.h b/libsanitizer/asan/asan_fake_stack.h
index 5196025681c..550a86e2972 100644
--- a/libsanitizer/asan/asan_fake_stack.h
+++ b/libsanitizer/asan/asan_fake_stack.h
@@ -63,7 +63,7 @@ class FakeStack {
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
- void Destroy();
+ void Destroy(int tid);
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
@@ -127,7 +127,11 @@ class FakeStack {
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
- uptr AddrIsInFakeStack(uptr addr);
+ uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end);
+ USED uptr AddrIsInFakeStack(uptr addr) {
+ uptr t1, t2;
+ return AddrIsInFakeStack(addr, &t1, &t2);
+ }
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
diff --git a/libsanitizer/asan/asan_flags.cc b/libsanitizer/asan/asan_flags.cc
new file mode 100644
index 00000000000..9a68ad4eaa8
--- /dev/null
+++ b/libsanitizer/asan/asan_flags.cc
@@ -0,0 +1,177 @@
+//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_flags.h"
+#include "asan_interface_internal.h"
+#include "asan_stack.h"
+#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "ubsan/ubsan_flags.h"
+#include "ubsan/ubsan_platform.h"
+
+namespace __asan {
+
+Flags asan_flags_dont_use_directly; // use via flags().
+
+static const char *MaybeCallAsanDefaultOptions() {
+ return (&__asan_default_options) ? __asan_default_options() : "";
+}
+
+static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
+#ifdef ASAN_DEFAULT_OPTIONS
+// Stringize the macro value.
+# define ASAN_STRINGIZE(x) #x
+# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
+ return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+void Flags::SetDefaults() {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+void InitializeFlags() {
+ // Set the default values and prepare for parsing ASan and common flags.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.detect_leaks = CAN_SANITIZE_LEAKS;
+ cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = kDefaultMallocContextSize;
+ cf.intercept_tls_get_addr = true;
+ cf.exitcode = 1;
+ OverrideCommonFlags(cf);
+ }
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser asan_parser;
+ RegisterAsanFlags(&asan_parser, f);
+ RegisterCommonFlags(&asan_parser);
+
+ // Set the default values and prepare for parsing LSan and UBSan flags
+ // (which can also overwrite common flags).
+#if CAN_SANITIZE_LEAKS
+ __lsan::Flags *lf = __lsan::flags();
+ lf->SetDefaults();
+
+ FlagParser lsan_parser;
+ __lsan::RegisterLsanFlags(&lsan_parser, lf);
+ RegisterCommonFlags(&lsan_parser);
+#endif
+
+#if CAN_SANITIZE_UB
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ // Override from ASan compile definition.
+ const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
+ asan_parser.ParseString(asan_compile_def);
+
+ // Override from user-specified string.
+ const char *asan_default_options = MaybeCallAsanDefaultOptions();
+ asan_parser.ParseString(asan_default_options);
+#if CAN_SANITIZE_UB
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+
+ // Override from command line.
+ asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseString(GetEnv("LSAN_OPTIONS"));
+#endif
+#if CAN_SANITIZE_UB
+ ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
+#endif
+
+ // Let activation flags override current settings. On Android they come
+ // from a system property. On other platforms this is no-op.
+ if (!flags()->start_deactivated) {
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ asan_parser.ParseString(buf);
+ }
+
+ SetVerbosity(common_flags()->verbosity);
+
+ // TODO(eugenis): dump all flags at verbosity>=2?
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) {
+ // TODO(samsonov): print all of the flags (ASan, LSan, common).
+ asan_parser.PrintFlagDescriptions();
+ }
+
+ // Flag validation:
+ if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ Report("%s: detect_leaks is not supported on this platform.\n",
+ SanitizerToolName);
+ Die();
+ }
+ // Make "strict_init_order" imply "check_initialization_order".
+ // TODO(samsonov): Use a single runtime flag for an init-order checker.
+ if (f->strict_init_order) {
+ f->check_initialization_order = true;
+ }
+ CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
+ CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
+ CHECK_GE(f->redzone, 16);
+ CHECK_GE(f->max_redzone, f->redzone);
+ CHECK_LE(f->max_redzone, 2048);
+ CHECK(IsPowerOfTwo(f->redzone));
+ CHECK(IsPowerOfTwo(f->max_redzone));
+
+ // quarantine_size is deprecated but we still honor it.
+ // quarantine_size can not be used together with quarantine_size_mb.
+ if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
+ Report("%s: please use either 'quarantine_size' (deprecated) or "
+ "quarantine_size_mb, but not both\n", SanitizerToolName);
+ Die();
+ }
+ if (f->quarantine_size >= 0)
+ f->quarantine_size_mb = f->quarantine_size >> 20;
+ if (f->quarantine_size_mb < 0) {
+ const int kDefaultQuarantineSizeMb =
+ (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
+ f->quarantine_size_mb = kDefaultQuarantineSizeMb;
+ }
+}
+
+} // namespace __asan
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __asan_default_options() { return ""; }
+} // extern "C"
+#endif
diff --git a/libsanitizer/asan/asan_flags.h b/libsanitizer/asan/asan_flags.h
index 62b5d3215d3..6b33789b84c 100644
--- a/libsanitizer/asan/asan_flags.h
+++ b/libsanitizer/asan/asan_flags.h
@@ -14,6 +14,7 @@
#define ASAN_FLAGS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
// ASan flag values can be defined in four ways:
// 1) initialized with default values at startup.
@@ -22,99 +23,24 @@
// 3) overriden from string returned by user-specified function
// __asan_default_options().
// 4) overriden from env variable ASAN_OPTIONS.
+// 5) overriden during ASan activation (for now used on Android only).
namespace __asan {
struct Flags {
- // Size (in bytes) of quarantine used to detect use-after-free errors.
- // Lower value may reduce memory usage but increase the chance of
- // false negatives.
- int quarantine_size;
- // Size (in bytes) of redzones around heap objects.
- // Requirement: redzone >= 32, is a power of two.
- int redzone;
- // If set, prints some debugging information and does additional checks.
- bool debug;
- // Controls the way to handle globals (0 - don't detect buffer overflow
- // on globals, 1 - detect buffer overflow, 2 - print data about registered
- // globals).
- int report_globals;
- // If set, attempts to catch initialization order issues.
- bool check_initialization_order;
- // If set, uses custom wrappers and replacements for libc string functions
- // to find more errors.
- bool replace_str;
- // If set, uses custom wrappers for memset/memcpy/memmove intinsics.
- bool replace_intrin;
- // Used on Mac only.
- bool mac_ignore_invalid_free;
- // Enables stack-use-after-return checking at run-time.
- bool detect_stack_use_after_return;
- // The minimal fake stack size log.
- int uar_stack_size_log;
- // ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes
- // that will be filled with malloc_fill_byte on malloc.
- int max_malloc_fill_size, malloc_fill_byte;
- // Override exit status if something was reported.
- int exitcode;
- // If set, user may manually mark memory regions as poisoned or unpoisoned.
- bool allow_user_poisoning;
- // Number of seconds to sleep between printing an error report and
- // terminating application. Useful for debug purposes (when one needs
- // to attach gdb, for example).
- int sleep_before_dying;
- // If set, registers ASan custom segv handler.
- bool handle_segv;
- // If set, allows user register segv handler even if ASan registers one.
- bool allow_user_segv_handler;
- // If set, uses alternate stack for signal handling.
- bool use_sigaltstack;
- // Allow the users to work around the bug in Nvidia drivers prior to 295.*.
- bool check_malloc_usable_size;
- // If set, explicitly unmaps (huge) shadow at exit.
- bool unmap_shadow_on_exit;
- // If set, calls abort() instead of _exit() after printing an error report.
- bool abort_on_error;
- // Print various statistics after printing an error message or if atexit=1.
- bool print_stats;
- // Print the legend for the shadow bytes.
- bool print_legend;
- // If set, prints ASan exit stats even after program terminates successfully.
- bool atexit;
- // If set, coverage information will be dumped at shutdown time if the
- // appropriate instrumentation was enabled.
- bool coverage;
- // By default, disable core dumper on 64-bit - it makes little sense
- // to dump 16T+ core.
- bool disable_core;
- // Allow the tool to re-exec the program. This may interfere badly with the
- // debugger.
- bool allow_reexec;
- // If set, prints not only thread creation stacks for threads in error report,
- // but also thread creation stacks for threads that created those threads,
- // etc. up to main thread.
- bool print_full_thread_history;
- // Poison (or not) the heap memory on [de]allocation. Zero value is useful
- // for benchmarking the allocator or instrumentator.
- bool poison_heap;
- // If true, poison partially addressable 8-byte aligned words (default=true).
- // This flag affects heap and global buffers, but not stack buffers.
- bool poison_partial;
- // Report errors on malloc/delete, new/free, new/delete[], etc.
- bool alloc_dealloc_mismatch;
- // If true, assume that memcmp(p1, p2, n) always reads n bytes before
- // comparing p1 and p2.
- bool strict_memcmp;
- // If true, assume that dynamic initializers can never access globals from
- // other modules, even if the latter are already initialized.
- bool strict_init_order;
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+
+ void SetDefaults();
};
extern Flags asan_flags_dont_use_directly;
inline Flags *flags() {
return &asan_flags_dont_use_directly;
}
-void InitializeFlags(Flags *f, const char *env);
+
+void InitializeFlags();
} // namespace __asan
diff --git a/libsanitizer/asan/asan_flags.inc b/libsanitizer/asan/asan_flags.inc
new file mode 100644
index 00000000000..563b464bff1
--- /dev/null
+++ b/libsanitizer/asan/asan_flags.inc
@@ -0,0 +1,138 @@
+//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_FLAG
+# error "Define ASAN_FLAG prior to including this file!"
+#endif
+
+// ASAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_FLAG(int, quarantine_size, -1,
+ "Deprecated, please use quarantine_size_mb.")
+ASAN_FLAG(int, quarantine_size_mb, -1,
+ "Size (in Mb) of quarantine used to detect use-after-free "
+ "errors. Lower value may reduce memory usage but increase the "
+ "chance of false negatives.")
+ASAN_FLAG(int, redzone, 16,
+ "Minimal size (in bytes) of redzones around heap objects. "
+ "Requirement: redzone >= 16, is a power of two.")
+ASAN_FLAG(int, max_redzone, 2048,
+ "Maximal size (in bytes) of redzones around heap objects.")
+ASAN_FLAG(
+ bool, debug, false,
+ "If set, prints some debugging information and does additional checks.")
+ASAN_FLAG(
+ int, report_globals, 1,
+ "Controls the way to handle globals (0 - don't detect buffer overflow on "
+ "globals, 1 - detect buffer overflow, 2 - print data about registered "
+ "globals).")
+ASAN_FLAG(bool, check_initialization_order, false,
+ "If set, attempts to catch initialization order issues.")
+ASAN_FLAG(
+ bool, replace_str, true,
+ "If set, uses custom wrappers and replacements for libc string functions "
+ "to find more errors.")
+ASAN_FLAG(bool, replace_intrin, true,
+ "If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
+ASAN_FLAG(bool, mac_ignore_invalid_free, false,
+ "Ignore invalid free() calls to work around some bugs. Used on OS X "
+ "only.")
+ASAN_FLAG(bool, detect_stack_use_after_return, false,
+ "Enables stack-use-after-return checking at run-time.")
+ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
+ "Minimum fake stack size log.")
+ASAN_FLAG(int, max_uar_stack_size_log,
+ 20, // 1Mb per size class, i.e. ~11Mb per thread
+ "Maximum fake stack size log.")
+ASAN_FLAG(bool, uar_noreserve, false,
+ "Use mmap with 'noreserve' flag to allocate fake stack.")
+ASAN_FLAG(
+ int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
+ "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.")
+ASAN_FLAG(int, malloc_fill_byte, 0xbe,
+ "Value used to fill the newly allocated memory.")
+ASAN_FLAG(bool, allow_user_poisoning, true,
+ "If set, user may manually mark memory regions as poisoned or "
+ "unpoisoned.")
+ASAN_FLAG(
+ int, sleep_before_dying, 0,
+ "Number of seconds to sleep between printing an error report and "
+ "terminating the program. Useful for debugging purposes (e.g. when one "
+ "needs to attach gdb).")
+ASAN_FLAG(bool, check_malloc_usable_size, true,
+ "Allows the users to work around the bug in Nvidia drivers prior to "
+ "295.*.")
+ASAN_FLAG(bool, unmap_shadow_on_exit, false,
+ "If set, explicitly unmaps the (huge) shadow at exit.")
+ASAN_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap")
+ASAN_FLAG(bool, print_stats, false,
+ "Print various statistics after printing an error message or if "
+ "atexit=1.")
+ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
+ASAN_FLAG(bool, atexit, false,
+ "If set, prints ASan exit stats even after program terminates "
+ "successfully.")
+ASAN_FLAG(
+ bool, print_full_thread_history, true,
+ "If set, prints thread creation stacks for the threads involved in the "
+ "report and their ancestors up to the main thread.")
+ASAN_FLAG(
+ bool, poison_heap, true,
+ "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
+ "for benchmarking the allocator or instrumentator.")
+ASAN_FLAG(bool, poison_partial, true,
+ "If true, poison partially addressable 8-byte aligned words "
+ "(default=true). This flag affects heap and global buffers, but not "
+ "stack buffers.")
+ASAN_FLAG(bool, poison_array_cookie, true,
+ "Poison (or not) the array cookie after operator new[].")
+
+// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
+// https://code.google.com/p/address-sanitizer/issues/detail?id=131
+// https://code.google.com/p/address-sanitizer/issues/detail?id=309
+// TODO(glider,timurrrr): Fix known issues and enable this back.
+ASAN_FLAG(bool, alloc_dealloc_mismatch,
+ (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
+ "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+ASAN_FLAG(bool, new_delete_type_mismatch, true,
+ "Report errors on mismatch betwen size of new and delete.")
+ASAN_FLAG(
+ bool, strict_init_order, false,
+ "If true, assume that dynamic initializers can never access globals from "
+ "other modules, even if the latter are already initialized.")
+ASAN_FLAG(
+ bool, start_deactivated, false,
+ "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
+ "poisoning) to reduce memory consumption as much as possible, and "
+ "restores them to original values when the first instrumented module is "
+ "loaded into the process. This is mainly intended to be used on "
+ "Android. ")
+ASAN_FLAG(
+ int, detect_invalid_pointer_pairs, 0,
+ "If non-zero, try to detect operations like <, <=, >, >= and - on "
+ "invalid pointer pairs (e.g. when pointers belong to different objects). "
+ "The bigger the value the harder we try.")
+ASAN_FLAG(
+ bool, detect_container_overflow, true,
+ "If true, honor the container overflow annotations. "
+ "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
+ASAN_FLAG(int, detect_odr_violation, 2,
+ "If >=2, detect violation of One-Definition-Rule (ODR); "
+ "If ==1, detect ODR-violation only if the two variables "
+ "have different sizes")
+ASAN_FLAG(bool, dump_instruction_bytes, false,
+ "If true, dump 16 bytes starting at the instruction that caused SEGV")
+ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+ASAN_FLAG(bool, halt_on_error, true,
+ "Crash the program after printing the first error report "
+ "(WARNING: USE AT YOUR OWN RISK!)")
diff --git a/libsanitizer/asan/asan_globals.cc b/libsanitizer/asan/asan_globals.cc
index e97850a854a..9c3588b8e08 100644
--- a/libsanitizer/asan/asan_globals.cc
+++ b/libsanitizer/asan/asan_globals.cc
@@ -9,6 +9,7 @@
//
// Handle globals.
//===----------------------------------------------------------------------===//
+
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
@@ -16,10 +17,12 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "asan_suppressions.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@@ -43,6 +46,14 @@ typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals;
// Lazy-initialized and never deleted.
static VectorOfGlobals *dynamic_init_globals;
+// We want to remember where a certain range of globals was registered.
+struct GlobalRegistrationSite {
+ u32 stack_id;
+ Global *g_first, *g_last;
+};
+typedef InternalMmapVector<GlobalRegistrationSite> GlobalRegistrationSiteVector;
+static GlobalRegistrationSiteVector *global_registration_site_vector;
+
ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
FastPoisonShadow(g->beg, g->size_with_redzone, value);
}
@@ -60,25 +71,68 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) {
}
}
+const uptr kMinimalDistanceFromAnotherGlobal = 64;
+
+static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
+ if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
+ if (addr >= g.beg + g.size_with_redzone) return false;
+ return true;
+}
+
static void ReportGlobal(const Global &g, const char *prefix) {
- Report("%s Global: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
- prefix, (void*)g.beg, g.size, g.size_with_redzone, g.name,
+ Report("%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
+ prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
g.module_name, g.has_dynamic_init);
+ if (g.location) {
+ Report(" location (%p): name=%s[%p], %d %d\n", g.location,
+ g.location->filename, g.location->filename, g.location->line_no,
+ g.location->column_no);
+ }
+}
+
+static u32 FindRegistrationSite(const Global *g) {
+ mu_for_globals.CheckLocked();
+ CHECK(global_registration_site_vector);
+ for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) {
+ GlobalRegistrationSite &grs = (*global_registration_site_vector)[i];
+ if (g >= grs.g_first && g <= grs.g_last)
+ return grs.stack_id;
+ }
+ return 0;
}
-bool DescribeAddressIfGlobal(uptr addr, uptr size) {
- if (!flags()->report_globals) return false;
+int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
+ int max_globals) {
+ if (!flags()->report_globals) return 0;
BlockingMutexLock lock(&mu_for_globals);
- bool res = false;
+ int res = 0;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
if (flags()->report_globals >= 2)
ReportGlobal(g, "Search");
- res |= DescribeAddressRelativeToGlobal(addr, size, g);
+ if (IsAddressNearGlobal(addr, g)) {
+ globals[res] = g;
+ if (reg_sites)
+ reg_sites[res] = FindRegistrationSite(&g);
+ res++;
+ if (res == max_globals) break;
+ }
}
return res;
}
+bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
+ Global g = {};
+ if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
+ internal_strncpy(descr->name, g.name, descr->name_size);
+ descr->region_address = g.beg;
+ descr->region_size = g.size;
+ descr->region_kind = "global";
+ return true;
+ }
+ return false;
+}
+
// Register a global variable.
// This function may be called more than once for every global
// so we store the globals in a map.
@@ -90,14 +144,31 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
- if (flags()->poison_heap)
+ // This "ODR violation" detection is fundamentally incompatible with
+ // how GCC registers globals. Disable as useless until rewritten upstream.
+ if (0 && flags()->detect_odr_violation) {
+ // Try detecting ODR (One Definition Rule) violation, i.e. the situation
+ // where two globals with the same name are defined in different modules.
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+ }
+ }
+ if (CanPoisonMemory())
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
l->g = g;
l->next = list_of_all_globals;
list_of_all_globals = l;
if (g->has_dynamic_init) {
- if (dynamic_init_globals == 0) {
+ if (!dynamic_init_globals) {
dynamic_init_globals = new(allocator_for_globals)
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
}
@@ -108,11 +179,13 @@ static void RegisterGlobal(const Global *g) {
static void UnregisterGlobal(const Global *g) {
CHECK(asan_inited);
+ if (flags()->report_globals >= 2)
+ ReportGlobal(*g, "Removed");
CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
- if (flags()->poison_heap)
+ if (CanPoisonMemory())
PoisonShadowForGlobal(g, 0);
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
@@ -134,7 +207,7 @@ void StopInitOrderChecking() {
}
}
-} // namespace __asan
+} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
@@ -142,7 +215,18 @@ using namespace __asan; // NOLINT
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
+ GET_STACK_TRACE_MALLOC;
+ u32 stack_id = StackDepotPut(stack);
BlockingMutexLock lock(&mu_for_globals);
+ if (!global_registration_site_vector)
+ global_registration_site_vector =
+ new(allocator_for_globals) GlobalRegistrationSiteVector(128);
+ GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
+ global_registration_site_vector->push_back(site);
+ if (flags()->report_globals >= 2) {
+ PRINT_CURRENT_STACK();
+ Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
+ }
for (uptr i = 0; i < n; i++) {
RegisterGlobal(&globals[i]);
}
@@ -164,7 +248,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
- !flags()->poison_heap)
+ !CanPoisonMemory())
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
@@ -190,7 +274,7 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
- !flags()->poison_heap)
+ !CanPoisonMemory())
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
diff --git a/libsanitizer/asan/asan_init_version.h b/libsanitizer/asan/asan_init_version.h
new file mode 100644
index 00000000000..2cda18849dd
--- /dev/null
+++ b/libsanitizer/asan/asan_init_version.h
@@ -0,0 +1,32 @@
+//===-- asan_init_version.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This header defines a versioned __asan_init function to be called at the
+// startup of the instrumented program.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_INIT_VERSION_H
+#define ASAN_INIT_VERSION_H
+
+extern "C" {
+ // Every time the ASan ABI changes we also change the version number in the
+ // __asan_init function name. Objects built with incompatible ASan ABI
+ // versions will not link with run-time.
+ // Changes between ABI versions:
+ // v1=>v2: added 'module_name' to __asan_global
+ // v2=>v3: stack frame description (created by the compiler)
+ // contains the function PC as the 3-rd field (see
+ // DescribeAddressIfStack).
+ // v3=>v4: added '__asan_global_source_location' to __asan_global.
+ // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
+ // __asan_stack_free_ functions.
+ // v5=>v6: changed the name of the version check symbol
+ #define __asan_version_mismatch_check __asan_version_mismatch_check_v6
+}
+
+#endif // ASAN_INIT_VERSION_H
diff --git a/libsanitizer/asan/asan_intercepted_functions.h b/libsanitizer/asan/asan_intercepted_functions.h
deleted file mode 100644
index 19b53363a5b..00000000000
--- a/libsanitizer/asan/asan_intercepted_functions.h
+++ /dev/null
@@ -1,77 +0,0 @@
-//===-- asan_intercepted_functions.h ----------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// ASan-private header containing prototypes for wrapper functions and wrappers
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_INTERCEPTED_FUNCTIONS_H
-#define ASAN_INTERCEPTED_FUNCTIONS_H
-
-#include "sanitizer_common/sanitizer_platform_interceptors.h"
-
-// Use macro to describe if specific function should be
-// intercepted on a given platform.
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
-# define ASAN_INTERCEPT__LONGJMP 1
-# define ASAN_INTERCEPT_STRDUP 1
-# define ASAN_INTERCEPT_INDEX 1
-# define ASAN_INTERCEPT_PTHREAD_CREATE 1
-# define ASAN_INTERCEPT_MLOCKX 1
-#else
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
-# define ASAN_INTERCEPT__LONGJMP 0
-# define ASAN_INTERCEPT_STRDUP 0
-# define ASAN_INTERCEPT_INDEX 0
-# define ASAN_INTERCEPT_PTHREAD_CREATE 0
-# define ASAN_INTERCEPT_MLOCKX 0
-#endif
-
-#if SANITIZER_LINUX
-# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
-#else
-# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
-#endif
-
-#if !SANITIZER_MAC
-# define ASAN_INTERCEPT_STRNLEN 1
-#else
-# define ASAN_INTERCEPT_STRNLEN 0
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-# define ASAN_INTERCEPT_SWAPCONTEXT 1
-#else
-# define ASAN_INTERCEPT_SWAPCONTEXT 0
-#endif
-
-#if !SANITIZER_ANDROID && !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
-#else
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
-#endif
-
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGLONGJMP 1
-#else
-# define ASAN_INTERCEPT_SIGLONGJMP 0
-#endif
-
-#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT___CXA_THROW 1
-#else
-# define ASAN_INTERCEPT___CXA_THROW 0
-#endif
-
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT___CXA_ATEXIT 1
-#else
-# define ASAN_INTERCEPT___CXA_ATEXIT 0
-#endif
-
-#endif // ASAN_INTERCEPTED_FUNCTIONS_H
diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc
index decbfea5f75..356f2c02897 100644
--- a/libsanitizer/asan/asan_interceptors.cc
+++ b/libsanitizer/asan/asan_interceptors.cc
@@ -9,19 +9,28 @@
//
// Intercept various libc functions.
//===----------------------------------------------------------------------===//
-#include "asan_interceptors.h"
+#include "asan_interceptors.h"
#include "asan_allocator.h"
-#include "asan_intercepted_functions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
-#include "interception/interception.h"
+#include "asan_suppressions.h"
#include "sanitizer_common/sanitizer_libc.h"
+#if SANITIZER_POSIX
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+
+#if defined(__i386) && SANITIZER_LINUX
+#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
+#elif defined(__mips__) && SANITIZER_LINUX
+#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2"
+#endif
+
namespace __asan {
// Return true if we can quickly decide that the region is unpoisoned.
@@ -34,24 +43,52 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
return false;
}
+struct AsanInterceptorContext {
+ const char *interceptor_name;
+};
+
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so
// that no extra frames are created, and stack trace contains
// relevant information only.
// We check all shadow bytes.
-#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
+#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
+ if (__offset > __offset + __size) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
+ } \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
- GET_CURRENT_PC_BP_SP; \
- __asan_report_error(pc, bp, sp, __bad, isWrite, __size); \
+ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
+ bool suppressed = false; \
+ if (_ctx) { \
+ suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
+ if (!suppressed && HaveStackTraceBasedSuppressions()) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ suppressed = IsStackTraceSuppressed(&stack); \
+ } \
+ } \
+ if (!suppressed) { \
+ GET_CURRENT_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\
+ } \
} \
} while (0)
-#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
-#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true)
+#define ASAN_READ_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, false)
+#define ASAN_WRITE_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, true)
+
+#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \
+ ASAN_READ_RANGE((ctx), (s), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n))
+
+#define ASAN_READ_STRING(ctx, s, n) \
+ ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
// Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case.
@@ -70,16 +107,9 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
} \
} while (0)
-#define ENSURE_ASAN_INITED() do { \
- CHECK(!asan_init_is_running); \
- if (!asan_inited) { \
- __asan_init(); \
- } \
-} while (0)
-
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if ASAN_INTERCEPT_STRNLEN
- if (REAL(strnlen) != 0) {
+ if (REAL(strnlen)) {
return REAL(strnlen)(s, maxlen);
}
#endif
@@ -97,7 +127,7 @@ int OnExit() {
return 0;
}
-} // namespace __asan
+} // namespace __asan
// ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT
@@ -105,32 +135,27 @@ using namespace __asan; // NOLINT
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
-#if !SANITIZER_MAC
-#define ASAN_INTERCEPT_FUNC(name) \
- do { \
- if ((!INTERCEPT_FUNCTION(name) || !REAL(name)) && \
- common_flags()->verbosity > 0) \
- Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
- } while (0)
-#else
-// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
-#define ASAN_INTERCEPT_FUNC(name)
-#endif // SANITIZER_MAC
+#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
+ AsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx; \
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
-#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
- do { \
- } while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
- ASAN_WRITE_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- do { \
- if (asan_init_is_running) return REAL(func)(__VA_ARGS__); \
- ctx = 0; \
- (void) ctx; \
- if (SANITIZER_MAC && !asan_inited) return REAL(func)(__VA_ARGS__); \
- ENSURE_ASAN_INITED(); \
+ ASAN_WRITE_RANGE(ctx, ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ ASAN_READ_RANGE(ctx, ptr, size)
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
+ do { \
+ if (asan_init_is_running) \
+ return REAL(func)(__VA_ARGS__); \
+ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
+ return REAL(func)(__VA_ARGS__); \
+ ENSURE_ASAN_INITED(); \
+ } while (false)
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
@@ -150,11 +175,30 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
do { \
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
+// Strict init-order checking is dlopen-hostile:
+// https://code.google.com/p/address-sanitizer/issues/detail?id=178
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ if (flags()->strict_init_order) { \
+ StopInitOrderChecking(); \
+ }
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ CoverageUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (AsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
#include "sanitizer_common/sanitizer_common_interceptors.inc"
-#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
-#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s)
+// Syscall interceptors don't have contexts, we don't support suppressions
+// for them.
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s)
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s)
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \
(void)(p); \
@@ -167,47 +211,93 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
+struct ThreadStartParam {
+ atomic_uintptr_t t;
+ atomic_uintptr_t is_registered;
+};
+
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
- AsanThread *t = (AsanThread*)arg;
+ ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
+ AsanThread *t = nullptr;
+ while ((t = reinterpret_cast<AsanThread *>(
+ atomic_load(&param->t, memory_order_acquire))) == nullptr)
+ internal_sched_yield();
SetCurrentThread(t);
- return t->ThreadStart(GetTid());
+ return t->ThreadStart(GetTid(), &param->is_registered);
}
#if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect();
- // Strict init-order checking in thread-hostile.
+ // Strict init-order checking is thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
int detached = 0;
- if (attr != 0)
+ if (attr)
REAL(pthread_attr_getdetachstate)(attr, &detached);
+ ThreadStartParam param;
+ atomic_store(&param.t, 0, memory_order_relaxed);
+ atomic_store(&param.is_registered, 0, memory_order_relaxed);
+ int result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
+ if (result == 0) {
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+ atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
+ // Wait until the AsanThread object is initialized and the ThreadRegistry
+ // entry is in "started" state. One reason for this is that after this
+ // interceptor exits, the child thread's stack may be the only thing holding
+ // the |arg| pointer. This may cause LSan to report a leak if leak checking
+ // happens at a point when the interceptor has already exited, but the stack
+ // range for the child thread is not yet known.
+ while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
+ internal_sched_yield();
+ }
+ return result;
+}
- u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t = AsanThread::Create(start_routine, arg);
- CreateThreadContextArgs args = { t, &stack };
- asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
- return REAL(pthread_create)(thread, attr, asan_thread_start, t);
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return real_pthread_join(t, arg);
}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
+
+#if SANITIZER_ANDROID
+INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
+ return REAL(bsd_signal)(signum, handler);
+ }
+ return 0;
+}
+#endif
+
INTERCEPTOR(void*, signal, int signum, void *handler) {
- if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
- return 0;
+ return nullptr;
}
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
- if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
}
+
+namespace __sanitizer {
+int real_sigaction(int signum, const void *act, void *oldact) {
+ return REAL(sigaction)(signum, (const struct sigaction *)act,
+ (struct sigaction *)oldact);
+}
+} // namespace __sanitizer
+
#elif SANITIZER_POSIX
// We need to have defined REAL(sigaction) on posix systems.
DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
@@ -222,7 +312,7 @@ static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
ssize += stack - bottom;
ssize = RoundUpTo(ssize, PageSize);
static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
- if (ssize && ssize <= kMaxSaneContextStackSize) {
+ if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) {
PoisonShadow(bottom, ssize, 0);
}
}
@@ -277,108 +367,73 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
}
#endif
-// intercept mlock and friends.
-// Since asan maps 16T of RAM, mlock is completely unfriendly to asan.
-// All functions return 0 (success).
-static void MlockIsUnsupported() {
- static bool printed = false;
- if (printed) return;
- printed = true;
- if (common_flags()->verbosity > 0) {
- Printf("INFO: AddressSanitizer ignores "
- "mlock/mlockall/munlock/munlockall\n");
- }
-}
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
+ if (asan_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ if (to != from) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
-INTERCEPTOR(int, mlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-INTERCEPTOR(int, munlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
+void *__asan_memcpy(void *to, const void *from, uptr size) {
+ ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
-INTERCEPTOR(int, mlockall, int flags) {
- MlockIsUnsupported();
- return 0;
-}
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
+ if (asan_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
-INTERCEPTOR(int, munlockall, void) {
- MlockIsUnsupported();
- return 0;
+void *__asan_memset(void *block, int c, uptr size) {
+ ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
-static inline int CharCmp(unsigned char c1, unsigned char c2) {
- return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
-}
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
+ if (UNLIKELY(!asan_inited)) \
+ return internal_memmove(to, from, size); \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
-INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
- if (!asan_inited) return internal_memcmp(a1, a2, size);
- ENSURE_ASAN_INITED();
- if (flags()->replace_intrin) {
- if (flags()->strict_memcmp) {
- // Check the entire regions even if the first bytes of the buffers are
- // different.
- ASAN_READ_RANGE(a1, size);
- ASAN_READ_RANGE(a2, size);
- // Fallthrough to REAL(memcmp) below.
- } else {
- unsigned char c1 = 0, c2 = 0;
- const unsigned char *s1 = (const unsigned char*)a1;
- const unsigned char *s2 = (const unsigned char*)a2;
- uptr i;
- for (i = 0; i < size; i++) {
- c1 = s1[i];
- c2 = s2[i];
- if (c1 != c2) break;
- }
- ASAN_READ_RANGE(s1, Min(i + 1, size));
- ASAN_READ_RANGE(s2, Min(i + 1, size));
- return CharCmp(c1, c2);
- }
- }
- return REAL(memcmp(a1, a2, size));
+void *__asan_memmove(void *to, const void *from, uptr size) {
+ ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
-#define MEMMOVE_BODY { \
- if (!asan_inited) return internal_memmove(to, from, size); \
- if (asan_init_is_running) { \
- return REAL(memmove)(to, from, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_READ_RANGE(from, size); \
- ASAN_WRITE_RANGE(to, size); \
- } \
- return internal_memmove(to, from, size); \
+INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove);
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size);
}
-INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) MEMMOVE_BODY
-
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
#if !SANITIZER_MAC
- if (!asan_inited) return internal_memcpy(to, from, size);
- // memcpy is called during __asan_init() from the internals
- // of printf(...).
- if (asan_init_is_running) {
- return REAL(memcpy)(to, from, size);
- }
- ENSURE_ASAN_INITED();
- if (flags()->replace_intrin) {
- if (to != from) {
- // We do not treat memcpy with to==from as a bug.
- // See http://llvm.org/bugs/show_bug.cgi?id=11763.
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
- }
- ASAN_READ_RANGE(from, size);
- ASAN_WRITE_RANGE(to, size);
- }
- // Interposing of resolver functions is broken on Mac OS 10.7 and 10.8, so
- // calling REAL(memcpy) here leads to infinite recursion.
- // See also http://code.google.com/p/address-sanitizer/issues/detail?id=116.
- return internal_memcpy(to, from, size);
+ ASAN_MEMCPY_IMPL(ctx, to, from, size);
#else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove()
@@ -386,25 +441,20 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362.
- MEMMOVE_BODY
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size);
#endif // !SANITIZER_MAC
}
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
- if (!asan_inited) return internal_memset(block, c, size);
- // memset is called inside Printf.
- if (asan_init_is_running) {
- return REAL(memset)(block, c, size);
- }
- ENSURE_ASAN_INITED();
- if (flags()->replace_intrin) {
- ASAN_WRITE_RANGE(block, size);
- }
- return REAL(memset)(block, c, size);
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, memset);
+ ASAN_MEMSET_IMPL(ctx, block, c, size);
}
INTERCEPTOR(char*, strchr, const char *str, int c) {
- if (!asan_inited) return internal_strchr(str, c);
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strchr);
+ if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
// used.
if (asan_init_is_running) {
@@ -413,8 +463,9 @@ INTERCEPTOR(char*, strchr, const char *str, int c) {
ENSURE_ASAN_INITED();
char *result = REAL(strchr)(str, c);
if (flags()->replace_str) {
- uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1;
- ASAN_READ_RANGE(str, bytes_read);
+ uptr len = REAL(strlen)(str);
+ uptr bytes_read = (result ? result - str : len) + 1;
+ ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read);
}
return result;
}
@@ -436,13 +487,15 @@ DEFINE_REAL(char*, index, const char *string, int c)
// For both strcat() and strncat() we need to check the validity of |to|
// argument irrespective of the |from| length.
INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT
ENSURE_ASAN_INITED();
if (flags()->replace_str) {
uptr from_length = REAL(strlen)(from);
- ASAN_READ_RANGE(from, from_length + 1);
+ ASAN_READ_RANGE(ctx, from, from_length + 1);
uptr to_length = REAL(strlen)(to);
- ASAN_READ_RANGE(to, to_length);
- ASAN_WRITE_RANGE(to + to_length, from_length + 1);
+ ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
+ ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
// If the copying actually happens, the |from| string should not overlap
// with the resulting string starting at |to|, which has a length of
// to_length + from_length + 1.
@@ -455,14 +508,16 @@ INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
}
INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strncat);
ENSURE_ASAN_INITED();
if (flags()->replace_str) {
uptr from_length = MaybeRealStrnlen(from, size);
uptr copy_length = Min(size, from_length + 1);
- ASAN_READ_RANGE(from, copy_length);
+ ASAN_READ_RANGE(ctx, from, copy_length);
uptr to_length = REAL(strlen)(to);
- ASAN_READ_RANGE(to, to_length);
- ASAN_WRITE_RANGE(to + to_length, from_length + 1);
+ ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
+ ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
if (from_length > 0) {
CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1,
from, copy_length);
@@ -472,8 +527,10 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
}
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT
#if SANITIZER_MAC
- if (!asan_inited) return REAL(strcpy)(to, from); // NOLINT
+ if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
#endif
// strcpy is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
@@ -484,19 +541,21 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
if (flags()->replace_str) {
uptr from_size = REAL(strlen)(from) + 1;
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
- ASAN_READ_RANGE(from, from_size);
- ASAN_WRITE_RANGE(to, from_size);
+ ASAN_READ_RANGE(ctx, from, from_size);
+ ASAN_WRITE_RANGE(ctx, to, from_size);
}
return REAL(strcpy)(to, from); // NOLINT
}
#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
- if (!asan_inited) return internal_strdup(s);
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strdup);
+ if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
- ASAN_READ_RANGE(s, length + 1);
+ ASAN_READ_RANGE(ctx, s, length + 1);
}
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
@@ -505,90 +564,79 @@ INTERCEPTOR(char*, strdup, const char *s) {
}
#endif
-INTERCEPTOR(uptr, strlen, const char *s) {
- if (!asan_inited) return internal_strlen(s);
+INTERCEPTOR(SIZE_T, strlen, const char *s) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strlen);
+ if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
return REAL(strlen)(s);
}
ENSURE_ASAN_INITED();
- uptr length = REAL(strlen)(s);
+ SIZE_T length = REAL(strlen)(s);
if (flags()->replace_str) {
- ASAN_READ_RANGE(s, length + 1);
+ ASAN_READ_RANGE(ctx, s, length + 1);
}
return length;
}
-INTERCEPTOR(uptr, wcslen, const wchar_t *s) {
- uptr length = REAL(wcslen)(s);
+INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
+ SIZE_T length = REAL(wcslen)(s);
if (!asan_init_is_running) {
ENSURE_ASAN_INITED();
- ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t));
+ ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
}
return length;
}
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
ENSURE_ASAN_INITED();
if (flags()->replace_str) {
uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
- ASAN_READ_RANGE(from, from_size);
- ASAN_WRITE_RANGE(to, size);
+ ASAN_READ_RANGE(ctx, from, from_size);
+ ASAN_WRITE_RANGE(ctx, to, size);
}
return REAL(strncpy)(to, from, size);
}
#if ASAN_INTERCEPT_STRNLEN
INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
ENSURE_ASAN_INITED();
uptr length = REAL(strnlen)(s, maxlen);
if (flags()->replace_str) {
- ASAN_READ_RANGE(s, Min(length + 1, maxlen));
+ ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
}
return length;
}
#endif // ASAN_INTERCEPT_STRNLEN
-static inline bool IsValidStrtolBase(int base) {
- return (base == 0) || (2 <= base && base <= 36);
-}
-
-static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
- CHECK(endptr);
- if (nptr == *endptr) {
- // No digits were found at strtol call, we need to find out the last
- // symbol accessed by strtoll on our own.
- // We get this symbol by skipping leading blanks and optional +/- sign.
- while (IsSpace(*nptr)) nptr++;
- if (*nptr == '+' || *nptr == '-') nptr++;
- *endptr = (char*)nptr;
- }
- CHECK(*endptr >= nptr);
-}
-
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
char **endptr, int base) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strtol);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(strtol)(nptr, endptr, base);
}
char *real_endptr;
long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT
- if (endptr != 0) {
- *endptr = real_endptr;
- }
- if (IsValidStrtolBase(base)) {
- FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
- }
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return result;
}
INTERCEPTOR(int, atoi, const char *nptr) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atoi);
#if SANITIZER_MAC
- if (!asan_inited) return REAL(atoi)(nptr);
+ if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@@ -601,13 +649,15 @@ INTERCEPTOR(int, atoi, const char *nptr) {
// different from int). So, we just imitate this behavior.
int result = REAL(strtol)(nptr, &real_endptr, 10);
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atol);
#if SANITIZER_MAC
- if (!asan_inited) return REAL(atol)(nptr);
+ if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@@ -616,33 +666,28 @@ INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
char *real_endptr;
long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT
char **endptr, int base) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(strtoll)(nptr, endptr, base);
}
char *real_endptr;
long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT
- if (endptr != 0) {
- *endptr = real_endptr;
- }
- // If base has unsupported value, strtoll can exit with EINVAL
- // without reading any characters. So do additional checks only
- // if base is valid.
- if (IsValidStrtolBase(base)) {
- FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
- }
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return result;
}
INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atoll);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(atoll)(nptr);
@@ -650,7 +695,7 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
char *real_endptr;
long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
@@ -664,40 +709,24 @@ static void AtCxaAtexit(void *unused) {
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
#if SANITIZER_MAC
- if (!asan_inited) return REAL(__cxa_atexit)(func, arg, dso_handle);
+ if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
#endif
ENSURE_ASAN_INITED();
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
- REAL(__cxa_atexit)(AtCxaAtexit, 0, 0);
+ REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr);
return res;
}
#endif // ASAN_INTERCEPT___CXA_ATEXIT
-#if SANITIZER_WINDOWS
-INTERCEPTOR_WINAPI(DWORD, CreateThread,
- void* security, uptr stack_size,
- DWORD (__stdcall *start_routine)(void*), void* arg,
- DWORD thr_flags, void* tid) {
- // Strict init-order checking in thread-hostile.
- if (flags()->strict_init_order)
- StopInitOrderChecking();
- GET_STACK_TRACE_THREAD;
- u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t = AsanThread::Create(start_routine, arg);
- CreateThreadContextArgs args = { t, &stack };
- bool detached = false; // FIXME: how can we determine it on Windows?
- asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
- return REAL(CreateThread)(security, stack_size,
- asan_thread_start, t, thr_flags, tid);
-}
-
-namespace __asan {
-void InitializeWindowsInterceptors() {
- ASAN_INTERCEPT_FUNC(CreateThread);
+#if ASAN_INTERCEPT_FORK
+INTERCEPTOR(int, fork, void) {
+ ENSURE_ASAN_INITED();
+ if (common_flags()->coverage) CovBeforeFork();
+ int pid = REAL(fork)();
+ if (common_flags()->coverage) CovAfterFork(pid);
+ return pid;
}
-
-} // namespace __asan
-#endif
+#endif // ASAN_INTERCEPT_FORK
// ---------------------- InitializeAsanInterceptors ---------------- {{{1
namespace __asan {
@@ -705,10 +734,9 @@ void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
was_called_once = true;
- SANITIZER_COMMON_INTERCEPTORS_INIT;
+ InitializeCommonInterceptors();
// Intercept mem* functions.
- ASAN_INTERCEPT_FUNC(memcmp);
ASAN_INTERCEPT_FUNC(memmove);
ASAN_INTERCEPT_FUNC(memset);
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
@@ -741,18 +769,13 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(strtoll);
#endif
-#if ASAN_INTERCEPT_MLOCKX
- // Intercept mlock/munlock.
- ASAN_INTERCEPT_FUNC(mlock);
- ASAN_INTERCEPT_FUNC(munlock);
- ASAN_INTERCEPT_FUNC(mlockall);
- ASAN_INTERCEPT_FUNC(munlockall);
-#endif
-
// Intecept signal- and jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
ASAN_INTERCEPT_FUNC(sigaction);
+#if SANITIZER_ANDROID
+ ASAN_INTERCEPT_FUNC(bsd_signal);
+#endif
ASAN_INTERCEPT_FUNC(signal);
#endif
#if ASAN_INTERCEPT_SWAPCONTEXT
@@ -767,27 +790,31 @@ void InitializeAsanInterceptors() {
// Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW
- INTERCEPT_FUNCTION(__cxa_throw);
+ ASAN_INTERCEPT_FUNC(__cxa_throw);
#endif
// Intercept threading-related functions
#if ASAN_INTERCEPT_PTHREAD_CREATE
+#if defined(ASAN_PTHREAD_CREATE_VERSION)
+ ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION);
+#else
ASAN_INTERCEPT_FUNC(pthread_create);
#endif
+ ASAN_INTERCEPT_FUNC(pthread_join);
+#endif
// Intercept atexit function.
#if ASAN_INTERCEPT___CXA_ATEXIT
ASAN_INTERCEPT_FUNC(__cxa_atexit);
#endif
- // Some Windows-specific interceptors.
-#if SANITIZER_WINDOWS
- InitializeWindowsInterceptors();
+#if ASAN_INTERCEPT_FORK
+ ASAN_INTERCEPT_FUNC(fork);
#endif
- if (common_flags()->verbosity > 0) {
- Report("AddressSanitizer: libc interceptors initialized\n");
- }
+ InitializePlatformInterceptors();
+
+ VReport(1, "AddressSanitizer: libc interceptors initialized\n");
}
-} // namespace __asan
+} // namespace __asan
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index cae4c7f0125..46c74176ed3 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -14,12 +14,75 @@
#include "asan_internal.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// Use macro to describe if specific function should be
+// intercepted on a given platform.
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
+# define ASAN_INTERCEPT__LONGJMP 1
+# define ASAN_INTERCEPT_STRDUP 1
+# define ASAN_INTERCEPT_INDEX 1
+# define ASAN_INTERCEPT_PTHREAD_CREATE 1
+# define ASAN_INTERCEPT_FORK 1
+#else
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
+# define ASAN_INTERCEPT__LONGJMP 0
+# define ASAN_INTERCEPT_STRDUP 0
+# define ASAN_INTERCEPT_INDEX 0
+# define ASAN_INTERCEPT_PTHREAD_CREATE 0
+# define ASAN_INTERCEPT_FORK 0
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
+#else
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
+#endif
+
+#if !SANITIZER_MAC
+# define ASAN_INTERCEPT_STRNLEN 1
+#else
+# define ASAN_INTERCEPT_STRNLEN 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT_SWAPCONTEXT 1
+#else
+# define ASAN_INTERCEPT_SWAPCONTEXT 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
+#else
+# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_SIGLONGJMP 1
+#else
+# define ASAN_INTERCEPT_SIGLONGJMP 0
+#endif
+
+// Android bug: https://code.google.com/p/android/issues/detail?id=61799
+#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \
+ !(SANITIZER_ANDROID && defined(__i386))
+# define ASAN_INTERCEPT___CXA_THROW 1
+#else
+# define ASAN_INTERCEPT___CXA_THROW 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT___CXA_ATEXIT 1
+#else
+# define ASAN_INTERCEPT___CXA_ATEXIT 0
+#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
DECLARE_REAL(char*, strchr, const char *str, int c)
-DECLARE_REAL(uptr, strlen, const char *s)
+DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
@@ -27,9 +90,34 @@ struct sigaction;
DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact)
+#if !SANITIZER_MAC
+#define ASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
+ VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
+ } while (0)
+#define ASAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \
+ VReport( \
+ 1, "AddressSanitizer: failed to intercept '" #name "@@" #ver "'\n"); \
+ } while (0)
+#else
+// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
+#define ASAN_INTERCEPT_FUNC(name)
+#endif // SANITIZER_MAC
+
namespace __asan {
void InitializeAsanInterceptors();
+void InitializePlatformInterceptors();
+
+#define ENSURE_ASAN_INITED() do { \
+ CHECK(!asan_init_is_running); \
+ if (UNLIKELY(!asan_inited)) { \
+ AsanInitFromRtl(); \
+ } \
+} while (0)
} // namespace __asan
diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h
index 7deed9f4607..079da9ca522 100644
--- a/libsanitizer/asan/asan_interface_internal.h
+++ b/libsanitizer/asan/asan_interface_internal.h
@@ -7,29 +7,39 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
-// This header can be included by the instrumented program to fetch
-// data (mostly allocator statistics) from ASan runtime library.
+// This header declares the AddressSanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/asan_interface.h
//===----------------------------------------------------------------------===//
#ifndef ASAN_INTERFACE_INTERNAL_H
#define ASAN_INTERFACE_INTERNAL_H
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "asan_init_version.h"
+
using __sanitizer::uptr;
extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
- // Everytime the asan ABI changes we also change the version number in this
- // name. Objects build with incompatible asan ABI version
- // will not link with run-time.
- // Changes between ABI versions:
- // v1=>v2: added 'module_name' to __asan_global
- // v2=>v3: stack frame description (created by the compiler)
- // contains the function PC as the 3-rd field (see
- // DescribeAddressIfStack).
- SANITIZER_INTERFACE_ATTRIBUTE void __asan_init_v3();
- #define __asan_init __asan_init_v3
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_init();
+
+ // This function exists purely to get a linker/loader error when using
+ // incompatible versions of instrumentation and runtime library. Please note
+ // that __asan_version_mismatch_check is a macro that is replaced with
+ // __asan_version_mismatch_check_vXXX at compile-time.
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check();
+
+ // This structure is used to describe the source location of a place where
+ // global was defined.
+ struct __asan_global_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+ };
// This structure describes an instrumented global variable.
struct __asan_global {
@@ -40,6 +50,8 @@ extern "C" {
const char *module_name; // Module name as a C string. This pointer is a
// unique identifier of a module.
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
+ __asan_global_source_location *location; // Source location of a global,
+ // or NULL if it is unknown.
};
// These two functions should be called by the instrumented code.
@@ -75,7 +87,7 @@ extern "C" {
void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
- bool __asan_address_is_poisoned(void const volatile *addr);
+ int __asan_address_is_poisoned(void const volatile *addr);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_region_is_poisoned(uptr beg, uptr size);
@@ -84,11 +96,42 @@ extern "C" {
void __asan_describe_address(uptr addr);
SANITIZER_INTERFACE_ATTRIBUTE
- void __asan_report_error(uptr pc, uptr bp, uptr sp,
- uptr addr, bool is_write, uptr access_size);
+ int __asan_report_present();
SANITIZER_INTERFACE_ATTRIBUTE
- int __asan_set_error_exit_code(int exit_code);
+ uptr __asan_get_report_pc();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_bp();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_sp();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_address();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __asan_get_report_access_type();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_access_size();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char * __asan_get_report_description();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char * __asan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_report_error(uptr pc, uptr bp, uptr sp,
+ uptr addr, int is_write, uptr access_size, u32 exp);
+
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_death_callback(void (*callback)(void));
SANITIZER_INTERFACE_ATTRIBUTE
@@ -97,32 +140,78 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
- int out_size);
-
- SANITIZER_INTERFACE_ATTRIBUTE
- uptr __asan_get_estimated_allocated_size(uptr size);
-
- SANITIZER_INTERFACE_ATTRIBUTE bool __asan_get_ownership(const void *p);
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_free_bytes();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_unmapped_bytes();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size);
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_free_hook(void *ptr);
-
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr *__asan_test_only_reported_buggy_pointer;
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN_noabort(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN_noabort(uptr p, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size,
+ u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size,
+ u32 exp);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memcpy(void *dst, const void *src, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memset(void *s, int c, uptr n);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memmove(void* dest, const void* src, uptr n);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_cxx_array_cookie(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_load_cxx_array_cookie(uptr *p);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_intra_object_redzone(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unpoison_intra_object_redzone(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_alloca_poison(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_allocas_unpoison(uptr top, uptr bottom);
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h
index ede273a7170..e31f2648b1a 100644
--- a/libsanitizer/asan/asan_internal.h
+++ b/libsanitizer/asan/asan_internal.h
@@ -19,8 +19,6 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_libc.h"
-#define ASAN_DEFAULT_FAILURE_EXITCODE 1
-
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
# error "The AddressSanitizer run-time should not be"
" instrumented by AddressSanitizer"
@@ -28,26 +26,11 @@
// Build-time configuration options.
-// If set, asan will install its own SEGV signal handler.
-#ifndef ASAN_NEEDS_SEGV
-# if SANITIZER_ANDROID == 1
-# define ASAN_NEEDS_SEGV 0
-# else
-# define ASAN_NEEDS_SEGV 1
-# endif
-#endif
-
// If set, asan will intercept C++ exception api call(s).
#ifndef ASAN_HAS_EXCEPTIONS
# define ASAN_HAS_EXCEPTIONS 1
#endif
-// If set, asan uses the values of SHADOW_SCALE and SHADOW_OFFSET
-// provided by the instrumented objects. Otherwise constants are used.
-#ifndef ASAN_FLEXIBLE_MAPPING_AND_OFFSET
-# define ASAN_FLEXIBLE_MAPPING_AND_OFFSET 0
-#endif
-
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
@@ -58,38 +41,41 @@
# endif
#endif
-#ifndef ASAN_USE_PREINIT_ARRAY
-# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#ifndef ASAN_DYNAMIC
+# ifdef PIC
+# define ASAN_DYNAMIC 1
+# else
+# define ASAN_DYNAMIC 0
+# endif
#endif
// All internal functions in asan reside inside the __asan namespace
// to avoid namespace collisions with the user programs.
-// Seperate namespace also makes it simpler to distinguish the asan run-time
+// Separate namespace also makes it simpler to distinguish the asan run-time
// functions from the instrumented user code in a profile.
namespace __asan {
class AsanThread;
using __sanitizer::StackTrace;
+void AsanInitFromRtl();
+
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
-void ReplaceOperatorsNewAndDelete();
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
// asan_linux.cc / asan_mac.cc / asan_win.cc
void *AsanDoesNotSupportStaticLinkage();
+void AsanCheckDynamicRTPrereqs();
+void AsanCheckIncompatibleRT();
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
+void AsanOnDeadlySignal(int, void *siginfo, void *context);
+void DisableReexec();
void MaybeReexec();
-bool AsanInterceptsSignal(int signum);
-void SetAlternateSignalStack();
-void UnsetAlternateSignalStack();
-void InstallSignalHandlers();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
-void AsanPlatformThreadInit();
void StopInitOrderChecking();
// Wrapper for TLS/TSD.
@@ -100,7 +86,11 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
-// Platfrom-specific options.
+void *AsanDlSymNext(const char *sym);
+
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
+
+// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
@@ -112,9 +102,9 @@ bool PlatformHasDifferentMemcpyAndMemmove();
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
- if (&__asan_malloc_hook) __asan_malloc_hook(ptr, size)
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size)
#define ASAN_FREE_HOOK(ptr) \
- if (&__asan_free_hook) __asan_free_hook(ptr)
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr)
#define ASAN_ON_ERROR() \
if (&__asan_on_error) __asan_on_error()
@@ -138,6 +128,10 @@ const int kAsanContiguousContainerOOBMagic = 0xfc;
const int kAsanStackUseAfterScopeMagic = 0xf8;
const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe;
+const int kAsanArrayCookieMagic = 0xac;
+const int kAsanIntraObjectRedzone = 0xbb;
+const int kAsanAllocaLeftMagic = 0xca;
+const int kAsanAllocaRightMagic = 0xcb;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc
index 0692eb1f455..4e47d5a0496 100644
--- a/libsanitizer/asan/asan_linux.cc
+++ b/libsanitizer/asan/asan_linux.cc
@@ -11,11 +11,13 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_freebsd.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@@ -24,21 +26,52 @@
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/types.h>
+#include <dlfcn.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdio.h>
#include <unistd.h>
#include <unwind.h>
-#if !SANITIZER_ANDROID
-// FIXME: where to get ucontext on Android?
-#include <sys/ucontext.h>
+#if SANITIZER_FREEBSD
+#include <sys/link_elf.h>
#endif
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD
+#include <ucontext.h>
extern "C" void* _DYNAMIC;
+#else
+#include <sys/ucontext.h>
+#include <link.h>
+#endif
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
+ __FreeBSD_version <= 902001 // v9.2
+#define ucontext_t xucontext_t
+#endif
+
+typedef enum {
+ ASAN_RT_VERSION_UNDEFINED = 0,
+ ASAN_RT_VERSION_DYNAMIC,
+ ASAN_RT_VERSION_STATIC,
+} asan_rt_version_t;
+
+// FIXME: perhaps also store abi version here?
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+asan_rt_version_t __asan_rt_version;
+}
namespace __asan {
+void InitializePlatformInterceptors() {}
+
+void DisableReexec() {
+ // No need to re-exec on Linux.
+}
+
void MaybeReexec() {
// No need to re-exec on Linux.
}
@@ -48,68 +81,80 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if SANITIZER_ANDROID
- *pc = *sp = *bp = 0;
-#elif defined(__arm__)
- ucontext_t *ucontext = (ucontext_t*)context;
- *pc = ucontext->uc_mcontext.arm_pc;
- *bp = ucontext->uc_mcontext.arm_fp;
- *sp = ucontext->uc_mcontext.arm_sp;
-# elif defined(__hppa__)
- ucontext_t *ucontext = (ucontext_t*)context;
- *pc = ucontext->uc_mcontext.sc_iaoq[0];
- /* GCC uses %r3 whenever a frame pointer is needed. */
- *bp = ucontext->uc_mcontext.sc_gr[3];
- *sp = ucontext->uc_mcontext.sc_gr[30];
-# elif defined(__x86_64__)
- ucontext_t *ucontext = (ucontext_t*)context;
- *pc = ucontext->uc_mcontext.gregs[REG_RIP];
- *bp = ucontext->uc_mcontext.gregs[REG_RBP];
- *sp = ucontext->uc_mcontext.gregs[REG_RSP];
-# elif defined(__i386__)
- ucontext_t *ucontext = (ucontext_t*)context;
- *pc = ucontext->uc_mcontext.gregs[REG_EIP];
- *bp = ucontext->uc_mcontext.gregs[REG_EBP];
- *sp = ucontext->uc_mcontext.gregs[REG_ESP];
-# elif defined(__powerpc__) || defined(__powerpc64__)
- ucontext_t *ucontext = (ucontext_t*)context;
- *pc = ucontext->uc_mcontext.regs->nip;
- *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
- // The powerpc{,64}-linux ABIs do not specify r31 as the frame
- // pointer, but GCC always uses r31 when we need a frame pointer.
- *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
-# elif defined(__sparc__)
- ucontext_t *ucontext = (ucontext_t*)context;
- uptr *stk_ptr;
-# if defined (__arch64__)
- *pc = ucontext->uc_mcontext.mc_gregs[MC_PC];
- *sp = ucontext->uc_mcontext.mc_gregs[MC_O6];
- stk_ptr = (uptr *) (*sp + 2047);
- *bp = stk_ptr[15];
-# else
- *pc = ucontext->uc_mcontext.gregs[REG_PC];
- *sp = ucontext->uc_mcontext.gregs[REG_O6];
- stk_ptr = (uptr *) *sp;
- *bp = stk_ptr[15];
-# endif
-# elif defined(__mips__)
- ucontext_t *ucontext = (ucontext_t*)context;
- *pc = ucontext->uc_mcontext.gregs[31];
- *bp = ucontext->uc_mcontext.gregs[30];
- *sp = ucontext->uc_mcontext.gregs[29];
+// FIXME: should we do anything for Android?
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
#else
-# error "Unsupported arch"
-#endif
+static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ // Continue until the first dynamic library is found
+ if (!info->dlpi_name || info->dlpi_name[0] == 0)
+ return 0;
+
+ // Ignore vDSO
+ if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
+ return 0;
+
+ *(const char **)data = info->dlpi_name;
+ return 1;
+}
+
+static bool IsDynamicRTName(const char *libname) {
+ return internal_strstr(libname, "libclang_rt.asan") ||
+ internal_strstr(libname, "libasan.so");
+}
+
+static void ReportIncompatibleRT() {
+ Report("Your application is linked against incompatible ASan runtimes.\n");
+ Die();
}
-bool AsanInterceptsSignal(int signum) {
- return signum == SIGSEGV && flags()->handle_segv;
+void AsanCheckDynamicRTPrereqs() {
+ if (!ASAN_DYNAMIC)
+ return;
+
+ // Ensure that dynamic RT is the first DSO in the list
+ const char *first_dso_name = nullptr;
+ dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
+ if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
+ Report("ASan runtime does not come first in initial library list; "
+ "you should either link runtime to your application or "
+ "manually preload it with LD_PRELOAD.\n");
+ Die();
+ }
}
-void AsanPlatformThreadInit() {
- // Nothing here for now.
+void AsanCheckIncompatibleRT() {
+ if (ASAN_DYNAMIC) {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ __asan_rt_version = ASAN_RT_VERSION_DYNAMIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) {
+ ReportIncompatibleRT();
+ }
+ } else {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ // Ensure that dynamic runtime is not present. We should detect it
+ // as early as possible, otherwise ASan interceptors could bind to
+ // the functions in dynamic ASan runtime instead of the functions in
+ // system libraries, causing crashes later in ASan initialization.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ char filename[128];
+ while (proc_maps.Next(nullptr, nullptr, nullptr, filename,
+ sizeof(filename), nullptr)) {
+ if (IsDynamicRTName(filename)) {
+ Report("Your application is linked against "
+ "incompatible ASan runtimes.\n");
+ Die();
+ }
+ }
+ __asan_rt_version = ASAN_RT_VERSION_STATIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) {
+ ReportIncompatibleRT();
+ }
+ }
}
+#endif // SANITIZER_ANDROID
#if !SANITIZER_ANDROID
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
@@ -123,6 +168,10 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
}
#endif
-} // namespace __asan
+void *AsanDlSymNext(const char *sym) {
+ return dlsym(RTLD_NEXT, sym);
+}
+
+} // namespace __asan
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc
index 8d01843afaf..20e37ffe7ef 100644
--- a/libsanitizer/asan/asan_mac.cc
+++ b/libsanitizer/asan/asan_mac.cc
@@ -15,79 +15,37 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
-#include "asan_mac.h"
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mac.h"
+
+#if !SANITIZER_IOS
+#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
+#else
+extern "C" {
+ extern char ***_NSGetArgv(void);
+}
+#endif
-#include <crt_externs.h> // for _NSGetArgv
#include <dlfcn.h> // for dladdr()
+#include <fcntl.h>
+#include <libkern/OSAtomic.h>
#include <mach-o/dyld.h>
#include <mach-o/loader.h>
+#include <pthread.h>
+#include <stdlib.h> // for free()
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <sys/ucontext.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <stdlib.h> // for free()
#include <unistd.h>
-#include <libkern/OSAtomic.h>
namespace __asan {
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
- ucontext_t *ucontext = (ucontext_t*)context;
-# if SANITIZER_WORDSIZE == 64
- *pc = ucontext->uc_mcontext->__ss.__rip;
- *bp = ucontext->uc_mcontext->__ss.__rbp;
- *sp = ucontext->uc_mcontext->__ss.__rsp;
-# else
- *pc = ucontext->uc_mcontext->__ss.__eip;
- *bp = ucontext->uc_mcontext->__ss.__ebp;
- *sp = ucontext->uc_mcontext->__ss.__esp;
-# endif // SANITIZER_WORDSIZE
-}
-
-MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
-
-MacosVersion GetMacosVersionInternal() {
- int mib[2] = { CTL_KERN, KERN_OSRELEASE };
- char version[100];
- uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
- for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
- // Get the version length.
- CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
- CHECK_LT(len, maxlen);
- CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
- switch (version[0]) {
- case '9': return MACOS_VERSION_LEOPARD;
- case '1': {
- switch (version[1]) {
- case '0': return MACOS_VERSION_SNOW_LEOPARD;
- case '1': return MACOS_VERSION_LION;
- case '2': return MACOS_VERSION_MOUNTAIN_LION;
- case '3': return MACOS_VERSION_MAVERICKS;
- default: return MACOS_VERSION_UNKNOWN;
- }
- }
- default: return MACOS_VERSION_UNKNOWN;
- }
-}
-
-MacosVersion GetMacosVersion() {
- atomic_uint32_t *cache =
- reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
- MacosVersion result =
- static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
- if (result == MACOS_VERSION_UNINITIALIZED) {
- result = GetMacosVersionInternal();
- atomic_store(cache, result, memory_order_release);
- }
- return result;
-}
+void InitializePlatformInterceptors() {}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
@@ -109,35 +67,51 @@ LowLevelAllocator allocator_for_env;
// otherwise the corresponding "NAME=value" string is replaced with
// |name_value|.
void LeakyResetEnv(const char *name, const char *name_value) {
- char ***env_ptr = _NSGetEnviron();
- CHECK(env_ptr);
- char **environ = *env_ptr;
- CHECK(environ);
+ char **env = GetEnviron();
uptr name_len = internal_strlen(name);
- while (*environ != 0) {
- uptr len = internal_strlen(*environ);
+ while (*env != 0) {
+ uptr len = internal_strlen(*env);
if (len > name_len) {
- const char *p = *environ;
+ const char *p = *env;
if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
// Match.
if (name_value) {
// Replace the old value with the new one.
- *environ = const_cast<char*>(name_value);
+ *env = const_cast<char*>(name_value);
} else {
// Shift the subsequent pointers back.
- char **del = environ;
+ char **del = env;
do {
del[0] = del[1];
} while (*del++);
}
}
}
- environ++;
+ env++;
}
}
+static bool reexec_disabled = false;
+
+void DisableReexec() {
+ reexec_disabled = true;
+}
+
+extern "C" double dyldVersionNumber;
+static const double kMinDyldVersionWithAutoInterposition = 360.0;
+
+bool DyldNeedsEnvVariable() {
+ // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
+ // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
+ // GetMacosVersion() doesn't work for the simulator. Let's instead check
+ // `dyldVersionNumber`, which is exported by dyld, against a known version
+ // number from the first OS release where this appeared.
+ return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
+}
+
void MaybeReexec() {
- if (!flags()->allow_reexec) return;
+ if (reexec_disabled) return;
+
// Make sure the dynamic ASan runtime library is preloaded so that the
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
// ourselves.
@@ -148,8 +122,12 @@ void MaybeReexec() {
uptr old_env_len = dyld_insert_libraries ?
internal_strlen(dyld_insert_libraries) : 0;
uptr fname_len = internal_strlen(info.dli_fname);
- if (!dyld_insert_libraries ||
- !REAL(strstr)(dyld_insert_libraries, info.dli_fname)) {
+ const char *dylib_name = StripModuleName(info.dli_fname);
+ uptr dylib_name_len = internal_strlen(dylib_name);
+
+ bool lib_is_in_env =
+ dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name);
+ if (DyldNeedsEnvVariable() && !lib_is_in_env) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
char program_name[1024];
@@ -172,63 +150,80 @@ void MaybeReexec() {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
- if (common_flags()->verbosity >= 1) {
- Report("exec()-ing the program with\n");
- Report("%s=%s\n", kDyldInsertLibraries, new_env);
- Report("to enable ASan wrappers.\n");
- Report("Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
- }
+ VReport(1, "exec()-ing the program with\n");
+ VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
+ VReport(1, "to enable ASan wrappers.\n");
execv(program_name, *_NSGetArgv());
- } else {
- // DYLD_INSERT_LIBRARIES is set and contains the runtime library.
- if (old_env_len == fname_len) {
- // It's just the runtime library name - fine to unset the variable.
- LeakyResetEnv(kDyldInsertLibraries, NULL);
+
+ // We get here only if execv() failed.
+ Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
+ "which is required for ASan to work. ASan tried to set the "
+ "environment variable and re-execute itself, but execv() failed, "
+ "possibly because of sandbox restrictions. Make sure to launch the "
+ "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
+ CHECK("execv failed" && 0);
+ }
+
+ if (!lib_is_in_env)
+ return;
+
+ // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
+ // the dylib from the environment variable, because interceptors are installed
+ // and we don't want our children to inherit the variable.
+
+ uptr env_name_len = internal_strlen(kDyldInsertLibraries);
+ // Allocate memory to hold the previous env var name, its value, the '='
+ // sign and the '\0' char.
+ char *new_env = (char*)allocator_for_env.Allocate(
+ old_env_len + 2 + env_name_len);
+ CHECK(new_env);
+ internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
+ internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
+ new_env[env_name_len] = '=';
+ char *new_env_pos = new_env + env_name_len + 1;
+
+ // Iterate over colon-separated pieces of |dyld_insert_libraries|.
+ char *piece_start = dyld_insert_libraries;
+ char *piece_end = NULL;
+ char *old_env_end = dyld_insert_libraries + old_env_len;
+ do {
+ if (piece_start[0] == ':') piece_start++;
+ piece_end = REAL(strchr)(piece_start, ':');
+ if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
+ if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
+ uptr piece_len = piece_end - piece_start;
+
+ char *filename_start =
+ (char *)internal_memrchr(piece_start, '/', piece_len);
+ uptr filename_len = piece_len;
+ if (filename_start) {
+ filename_start += 1;
+ filename_len = piece_len - (filename_start - piece_start);
} else {
- uptr env_name_len = internal_strlen(kDyldInsertLibraries);
- // Allocate memory to hold the previous env var name, its value, the '='
- // sign and the '\0' char.
- char *new_env = (char*)allocator_for_env.Allocate(
- old_env_len + 2 + env_name_len);
- CHECK(new_env);
- internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
- internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
- new_env[env_name_len] = '=';
- char *new_env_pos = new_env + env_name_len + 1;
-
- // Iterate over colon-separated pieces of |dyld_insert_libraries|.
- char *piece_start = dyld_insert_libraries;
- char *piece_end = NULL;
- char *old_env_end = dyld_insert_libraries + old_env_len;
- do {
- if (piece_start[0] == ':') piece_start++;
- piece_end = REAL(strchr)(piece_start, ':');
- if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
- if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
- uptr piece_len = piece_end - piece_start;
-
- // If the current piece isn't the runtime library name,
- // append it to new_env.
- if ((piece_len != fname_len) ||
- (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) {
- if (new_env_pos != new_env + env_name_len + 1) {
- new_env_pos[0] = ':';
- new_env_pos++;
- }
- internal_strncpy(new_env_pos, piece_start, piece_len);
- }
- // Move on to the next piece.
- new_env_pos += piece_len;
- piece_start = piece_end;
- } while (piece_start < old_env_end);
-
- // Can't use setenv() here, because it requires the allocator to be
- // initialized.
- // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
- // a separate function called after InitializeAllocator().
- LeakyResetEnv(kDyldInsertLibraries, new_env);
+ filename_start = piece_start;
}
- }
+
+ // If the current piece isn't the runtime library name,
+ // append it to new_env.
+ if ((dylib_name_len != filename_len) ||
+ (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
+ if (new_env_pos != new_env + env_name_len + 1) {
+ new_env_pos[0] = ':';
+ new_env_pos++;
+ }
+ internal_strncpy(new_env_pos, piece_start, piece_len);
+ new_env_pos += piece_len;
+ }
+ // Move on to the next piece.
+ piece_start = piece_end;
+ } while (piece_start < old_env_end);
+
+ // Can't use setenv() here, because it requires the allocator to be
+ // initialized.
+ // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
+ // a separate function called after InitializeAllocator().
+ if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
+ LeakyResetEnv(kDyldInsertLibraries, new_env);
}
// No-op. Mac does not support static linkage anyway.
@@ -236,12 +231,11 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
-bool AsanInterceptsSignal(int signum) {
- return (signum == SIGSEGV || signum == SIGBUS) && flags()->handle_segv;
-}
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckDynamicRTPrereqs() {}
-void AsanPlatformThreadInit() {
-}
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckIncompatibleRT() {}
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
@@ -294,9 +288,8 @@ ALWAYS_INLINE
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
AsanThread *t = GetCurrentThread();
if (!t) {
- t = AsanThread::Create(0, 0);
- CreateThreadContextArgs args = { t, stack };
- asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args);
+ t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
+ parent_tid, stack, /* detached */ true);
t->Init();
asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t);
@@ -309,11 +302,10 @@ extern "C"
void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
- if (common_flags()->verbosity >= 2) {
- Report("asan_dispatch_call_block_and_release(): "
- "context: %p, pthread_self: %p\n",
- block, pthread_self());
- }
+ VReport(2,
+ "asan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
@@ -328,7 +320,7 @@ using namespace __asan; // NOLINT
// The caller retains control of the allocated context.
extern "C"
asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
- StackTrace *stack) {
+ BufferedStackTrace *stack) {
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
asan_ctxt->block = ctxt;
@@ -344,13 +336,13 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
dispatch_function_t func) { \
GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
- if (common_flags()->verbosity >= 2) { \
+ if (Verbosity() >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
- PRINT_CURRENT_STACK(); \
- } \
- return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
- asan_dispatch_call_block_and_release); \
+ PRINT_CURRENT_STACK(); \
+ } \
+ return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
+ asan_dispatch_call_block_and_release); \
}
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
@@ -362,7 +354,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
@@ -375,7 +367,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
@@ -386,7 +378,6 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
#if !defined(MISSING_BLOCKS_SUPPORT)
extern "C" {
-// FIXME: consolidate these declarations with asan_intercepted_functions.h.
void dispatch_async(dispatch_queue_t dq, void(^work)(void));
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
void(^work)(void));
@@ -408,30 +399,39 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
INTERCEPTOR(void, dispatch_async,
dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_async)(dq, asan_block);
}
INTERCEPTOR(void, dispatch_group_async,
dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_group_async)(dg, dq, asan_block);
}
INTERCEPTOR(void, dispatch_after,
dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_after)(when, queue, asan_block);
}
INTERCEPTOR(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void(^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
}
INTERCEPTOR(void, dispatch_source_set_event_handler,
dispatch_source_t ds, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_event_handler)(ds, asan_block);
}
diff --git a/libsanitizer/asan/asan_mac.h b/libsanitizer/asan/asan_mac.h
deleted file mode 100644
index 2d1d4b0bfb3..00000000000
--- a/libsanitizer/asan/asan_mac.h
+++ /dev/null
@@ -1,57 +0,0 @@
-//===-- asan_mac.h ----------------------------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Mac-specific ASan definitions.
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_MAC_H
-#define ASAN_MAC_H
-
-// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
-// and subject to change in further CoreFoundation versions. Apple does not
-// guarantee any binary compatibility from release to release.
-
-// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
-#if defined(__BIG_ENDIAN__)
-#define CF_RC_BITS 0
-#endif
-
-#if defined(__LITTLE_ENDIAN__)
-#define CF_RC_BITS 3
-#endif
-
-// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
-typedef struct __CFRuntimeBase {
- uptr _cfisa;
- u8 _cfinfo[4];
-#if __LP64__
- u32 _rc;
-#endif
-} CFRuntimeBase;
-
-enum MacosVersion {
- MACOS_VERSION_UNINITIALIZED = 0,
- MACOS_VERSION_UNKNOWN,
- MACOS_VERSION_LEOPARD,
- MACOS_VERSION_SNOW_LEOPARD,
- MACOS_VERSION_LION,
- MACOS_VERSION_MOUNTAIN_LION,
- MACOS_VERSION_MAVERICKS
-};
-
-// Used by asan_malloc_mac.cc and asan_mac.cc
-extern "C" void __CFInitialize();
-
-namespace __asan {
-
-MacosVersion GetMacosVersion();
-void MaybeReplaceCFAllocator();
-
-} // namespace __asan
-
-#endif // ASAN_MAC_H
diff --git a/libsanitizer/asan/asan_malloc_linux.cc b/libsanitizer/asan/asan_malloc_linux.cc
index e3495cb0900..d03f1bb89c8 100644
--- a/libsanitizer/asan/asan_malloc_linux.cc
+++ b/libsanitizer/asan/asan_malloc_linux.cc
@@ -13,48 +13,14 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
-#if SANITIZER_ANDROID
-DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
-DECLARE_REAL_AND_INTERCEPTOR(void*, calloc, uptr nmemb, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void*, realloc, void *ptr, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void*, memalign, uptr boundary, uptr size)
-
-struct MallocDebug {
- void* (*malloc)(uptr bytes);
- void (*free)(void* mem);
- void* (*calloc)(uptr n_elements, uptr elem_size);
- void* (*realloc)(void* oldMem, uptr bytes);
- void* (*memalign)(uptr alignment, uptr bytes);
-};
-
-const MallocDebug asan_malloc_dispatch ALIGNED(32) = {
- WRAP(malloc), WRAP(free), WRAP(calloc), WRAP(realloc), WRAP(memalign)
-};
-
-extern "C" const MallocDebug* __libc_malloc_dispatch;
-
-namespace __asan {
-void ReplaceSystemMalloc() {
- __libc_malloc_dispatch = &asan_malloc_dispatch;
-}
-} // namespace __asan
-
-#else // ANDROID
-
-namespace __asan {
-void ReplaceSystemMalloc() {
-}
-} // namespace __asan
-#endif // ANDROID
-
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
@@ -74,7 +40,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
- if (!asan_inited) {
+ if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -99,8 +65,17 @@ INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
-INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
- ALIAS("memalign");
+INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(boundary, size, &stack, FROM_MALLOC);
+}
+
+INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
+ DTLS_on_libc_memalign(res, size * boundary);
+ return res;
+}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_CURRENT_PC_BP_SP;
@@ -146,4 +121,64 @@ INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
-#endif // SANITIZER_LINUX
+#if SANITIZER_ANDROID
+// Format of __libc_malloc_dispatch has changed in Android L.
+// While we are moving towards a solution that does not depend on bionic
+// internals, here is something to support both K* and L releases.
+struct MallocDebugK {
+ void *(*malloc)(uptr bytes);
+ void (*free)(void *mem);
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+};
+
+struct MallocDebugL {
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void (*free)(void *mem);
+ fake_mallinfo (*mallinfo)(void);
+ void *(*malloc)(uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ int (*posix_memalign)(void **memptr, uptr alignment, uptr size);
+ void* (*pvalloc)(uptr size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void* (*valloc)(uptr size);
+};
+
+ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = {
+ WRAP(malloc), WRAP(free), WRAP(calloc),
+ WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)};
+
+ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = {
+ WRAP(calloc), WRAP(free), WRAP(mallinfo),
+ WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign),
+ WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc),
+ WRAP(valloc)};
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+ void **__libc_malloc_dispatch_p =
+ (void **)AsanDlSymNext("__libc_malloc_dispatch");
+ if (__libc_malloc_dispatch_p) {
+ // Decide on K vs L dispatch format by the presence of
+ // __libc_malloc_default_dispatch export in libc.
+ void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch");
+ if (default_dispatch_p)
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k;
+ else
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l;
+ }
+}
+} // namespace __asan
+
+#else // SANITIZER_ANDROID
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+}
+} // namespace __asan
+#endif // SANITIZER_ANDROID
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc
index 342e806e3b6..33ccbf09470 100644
--- a/libsanitizer/asan/asan_malloc_mac.cc
+++ b/libsanitizer/asan/asan_malloc_mac.cc
@@ -13,345 +13,51 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
-#include <AvailabilityMacros.h>
-#include <CoreFoundation/CFBase.h>
-#include <dlfcn.h>
-#include <malloc/malloc.h>
-#include <sys/mman.h>
-
-#include "asan_allocator.h"
#include "asan_interceptors.h"
-#include "asan_internal.h"
-#include "asan_mac.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
-// Similar code is used in Google Perftools,
-// http://code.google.com/p/google-perftools.
-
-// ---------------------- Replacement functions ---------------- {{{1
-using namespace __asan; // NOLINT
-
-// TODO(glider): do we need both zones?
-static malloc_zone_t *system_malloc_zone = 0;
-static malloc_zone_t asan_zone;
-
-INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
- vm_size_t start_size, unsigned zone_flags) {
- if (!asan_inited) __asan_init();
- GET_STACK_TRACE_MALLOC;
- uptr page_size = GetPageSizeCached();
- uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size);
- malloc_zone_t *new_zone =
- (malloc_zone_t*)asan_memalign(page_size, allocated_size,
- &stack, FROM_MALLOC);
- internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone));
- new_zone->zone_name = NULL; // The name will be changed anyway.
- if (GetMacosVersion() >= MACOS_VERSION_LION) {
- // Prevent the client app from overwriting the zone contents.
- // Library functions that need to modify the zone will set PROT_WRITE on it.
- // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
- mprotect(new_zone, allocated_size, PROT_READ);
- }
- return new_zone;
-}
-
-INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
- if (!asan_inited) __asan_init();
- return &asan_zone;
-}
-
-INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
- // FIXME: ASan should support purgeable allocations.
- // https://code.google.com/p/address-sanitizer/issues/detail?id=139
- if (!asan_inited) __asan_init();
- return &asan_zone;
-}
-
-INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
- // FIXME: ASan should support purgeable allocations. Ignoring them is fine
- // for now.
- if (!asan_inited) __asan_init();
-}
-
-INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
- // FIXME: ASan should support purgeable allocations. Ignoring them is fine
- // for now.
- if (!asan_inited) __asan_init();
- // Must return 0 if the contents were not purged since the last call to
- // malloc_make_purgeable().
- return 0;
-}
-
-INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
- if (!asan_inited) __asan_init();
- // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
- size_t buflen = 6 + (name ? internal_strlen(name) : 0);
- InternalScopedBuffer<char> new_name(buflen);
- if (name && zone->introspect == asan_zone.introspect) {
- internal_snprintf(new_name.data(), buflen, "asan-%s", name);
- name = new_name.data();
- }
-
- // Call the system malloc's implementation for both external and our zones,
- // since that appropriately changes VM region protections on the zone.
- REAL(malloc_set_zone_name)(zone, name);
-}
-
-INTERCEPTOR(void *, malloc, size_t size) {
- if (!asan_inited) __asan_init();
- GET_STACK_TRACE_MALLOC;
- void *res = asan_malloc(size, &stack);
- return res;
-}
-
-INTERCEPTOR(void, free, void *ptr) {
- if (!asan_inited) __asan_init();
- if (!ptr) return;
- GET_STACK_TRACE_FREE;
+using namespace __asan;
+#define COMMON_MALLOC_ZONE_NAME "asan"
+#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED()
+#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited
+#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
+#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_malloc(size, &stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_realloc(ptr, size, &stack);
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_calloc(count, size, &stack);
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
+#define COMMON_MALLOC_FREE(ptr) \
+ GET_STACK_TRACE_FREE; \
asan_free(ptr, &stack, FROM_MALLOC);
-}
-
-INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
- if (!asan_inited) __asan_init();
- GET_STACK_TRACE_MALLOC;
- return asan_realloc(ptr, size, &stack);
-}
-
-INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
- if (!asan_inited) __asan_init();
- GET_STACK_TRACE_MALLOC;
- return asan_calloc(nmemb, size, &stack);
-}
-
-INTERCEPTOR(void *, valloc, size_t size) {
- if (!asan_inited) __asan_init();
- GET_STACK_TRACE_MALLOC;
- return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
-}
-
-INTERCEPTOR(size_t, malloc_good_size, size_t size) {
- if (!asan_inited) __asan_init();
- return asan_zone.introspect->good_size(&asan_zone, size);
-}
-
-INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
- if (!asan_inited) __asan_init();
- CHECK(memptr);
- GET_STACK_TRACE_MALLOC;
- void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC);
- if (result) {
- *memptr = result;
- return 0;
- }
- return -1;
-}
-
-namespace {
-
-// TODO(glider): the mz_* functions should be united with the Linux wrappers,
-// as they are basically copied from there.
-size_t mz_size(malloc_zone_t* zone, const void* ptr) {
- return asan_mz_size(ptr);
-}
-
-void *mz_malloc(malloc_zone_t *zone, size_t size) {
- if (!asan_inited) {
- CHECK(system_malloc_zone);
- return malloc_zone_malloc(system_malloc_zone, size);
- }
- GET_STACK_TRACE_MALLOC;
- return asan_malloc(size, &stack);
-}
-
-void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
- if (!asan_inited) {
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- const size_t kCallocPoolSize = 1024;
- static uptr calloc_memory_for_dlsym[kCallocPoolSize];
- static size_t allocated;
- size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
- void *mem = (void*)&calloc_memory_for_dlsym[allocated];
- allocated += size_in_words;
- CHECK(allocated < kCallocPoolSize);
- return mem;
- }
- GET_STACK_TRACE_MALLOC;
- return asan_calloc(nmemb, size, &stack);
-}
-
-void *mz_valloc(malloc_zone_t *zone, size_t size) {
- if (!asan_inited) {
- CHECK(system_malloc_zone);
- return malloc_zone_valloc(system_malloc_zone, size);
- }
- GET_STACK_TRACE_MALLOC;
- return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
-}
-
-#define GET_ZONE_FOR_PTR(ptr) \
- malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \
- const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name
-
-void ALWAYS_INLINE free_common(void *context, void *ptr) {
- if (!ptr) return;
- GET_STACK_TRACE_FREE;
- // FIXME: need to retire this flag.
- if (!flags()->mac_ignore_invalid_free) {
- asan_free(ptr, &stack, FROM_MALLOC);
- } else {
- GET_ZONE_FOR_PTR(ptr);
- WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
- return;
- }
-}
-
-// TODO(glider): the allocation callbacks need to be refactored.
-void mz_free(malloc_zone_t *zone, void *ptr) {
- free_common(zone, ptr);
-}
-
-void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
- if (!ptr) {
- GET_STACK_TRACE_MALLOC;
- return asan_malloc(size, &stack);
- } else {
- if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_MALLOC;
- return asan_realloc(ptr, size, &stack);
- } else {
- // We can't recover from reallocating an unknown address, because
- // this would require reading at most |size| bytes from
- // potentially unaccessible memory.
- GET_STACK_TRACE_FREE;
- GET_ZONE_FOR_PTR(ptr);
- ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
- }
- }
-}
-
-void mz_destroy(malloc_zone_t* zone) {
- // A no-op -- we will not be destroyed!
- Report("mz_destroy() called -- ignoring\n");
-}
-
- // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
-void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
- if (!asan_inited) {
- CHECK(system_malloc_zone);
- return malloc_zone_memalign(system_malloc_zone, align, size);
- }
- GET_STACK_TRACE_MALLOC;
- return asan_memalign(align, size, &stack, FROM_MALLOC);
-}
-
-// This function is currently unused, and we build with -Werror.
-#if 0
-void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) {
- // TODO(glider): check that |size| is valid.
- UNIMPLEMENTED();
-}
-#endif
-#endif
-
-kern_return_t mi_enumerator(task_t task, void *,
- unsigned type_mask, vm_address_t zone_address,
- memory_reader_t reader,
- vm_range_recorder_t recorder) {
- // Should enumerate all the pointers we have. Seems like a lot of work.
- return KERN_FAILURE;
-}
-
-size_t mi_good_size(malloc_zone_t *zone, size_t size) {
- // I think it's always safe to return size, but we maybe could do better.
- return size;
-}
-
-boolean_t mi_check(malloc_zone_t *zone) {
- UNIMPLEMENTED();
-}
-
-void mi_print(malloc_zone_t *zone, boolean_t verbose) {
- UNIMPLEMENTED();
-}
-
-void mi_log(malloc_zone_t *zone, void *address) {
- // I don't think we support anything like this
-}
-
-void mi_force_lock(malloc_zone_t *zone) {
- asan_mz_force_lock();
-}
-
-void mi_force_unlock(malloc_zone_t *zone) {
- asan_mz_force_unlock();
-}
-
-void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
- AsanMallocStats malloc_stats;
- FillMallocStatistics(&malloc_stats);
- CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats));
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = asan_mz_size(ptr);
+#define COMMON_MALLOC_FILL_STATS(zone, stats) \
+ AsanMallocStats malloc_stats; \
+ FillMallocStatistics(&malloc_stats); \
+ CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
-}
-
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
-boolean_t mi_zone_locked(malloc_zone_t *zone) {
- // UNIMPLEMENTED();
- return false;
-}
-#endif
-
-} // unnamed namespace
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ GET_STACK_TRACE_FREE; \
+ ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
+#define COMMON_MALLOC_IGNORE_INVALID_FREE flags()->mac_ignore_invalid_free
+#define COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name) \
+ GET_STACK_TRACE_FREE; \
+ WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
+#define COMMON_MALLOC_NAMESPACE __asan
-namespace __asan {
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
-void ReplaceSystemMalloc() {
- static malloc_introspection_t asan_introspection;
- // Ok to use internal_memset, these places are not performance-critical.
- internal_memset(&asan_introspection, 0, sizeof(asan_introspection));
-
- asan_introspection.enumerator = &mi_enumerator;
- asan_introspection.good_size = &mi_good_size;
- asan_introspection.check = &mi_check;
- asan_introspection.print = &mi_print;
- asan_introspection.log = &mi_log;
- asan_introspection.force_lock = &mi_force_lock;
- asan_introspection.force_unlock = &mi_force_unlock;
- asan_introspection.statistics = &mi_statistics;
-
- internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
-
- // Start with a version 4 zone which is used for OS X 10.4 and 10.5.
- asan_zone.version = 4;
- asan_zone.zone_name = "asan";
- asan_zone.size = &mz_size;
- asan_zone.malloc = &mz_malloc;
- asan_zone.calloc = &mz_calloc;
- asan_zone.valloc = &mz_valloc;
- asan_zone.free = &mz_free;
- asan_zone.realloc = &mz_realloc;
- asan_zone.destroy = &mz_destroy;
- asan_zone.batch_malloc = 0;
- asan_zone.batch_free = 0;
- asan_zone.introspect = &asan_introspection;
-
- // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
- // Switch to version 6 on OSX 10.6 to support memalign.
- asan_zone.version = 6;
- asan_zone.free_definite_size = 0;
- asan_zone.memalign = &mz_memalign;
- asan_introspection.zone_locked = &mi_zone_locked;
#endif
-
- // Register the ASan zone.
- malloc_zone_register(&asan_zone);
-}
-} // namespace __asan
-
-#endif // SANITIZER_MAC
diff --git a/libsanitizer/asan/asan_malloc_win.cc b/libsanitizer/asan/asan_malloc_win.cc
index 1f2495ffc50..f2ab188bf5b 100644
--- a/libsanitizer/asan/asan_malloc_win.cc
+++ b/libsanitizer/asan/asan_malloc_win.cc
@@ -21,71 +21,77 @@
#include <stddef.h>
-// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
-// FIXME: Simply defining functions with the same signature in *.obj
-// files overrides the standard functions in *.lib
-// This works well for simple helloworld-like tests but might need to be
-// revisited in the future.
+// MT: Simply defining functions with the same signature in *.obj
+// files overrides the standard functions in the CRT.
+// MD: Memory allocation functions are defined in the CRT .dll,
+// so we have to intercept them before they are called for the first time.
+
+#if ASAN_DYNAMIC
+# define ALLOCATION_FUNCTION_ATTRIBUTE
+#else
+# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#endif
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void free(void *ptr) {
GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack, FROM_MALLOC);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void _free_dbg(void* ptr, int) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void _free_dbg(void *ptr, int) {
free(ptr);
}
+ALLOCATION_FUNCTION_ATTRIBUTE
void cfree(void *ptr) {
- CHECK(!"cfree() should not be used on Windows?");
+ CHECK(!"cfree() should not be used on Windows");
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *malloc(size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _malloc_dbg(size_t size, int , const char*, int) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_malloc_dbg(size_t size, int, const char *, int) {
return malloc(size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *calloc(size_t nmemb, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
- return calloc(n, size);
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
+ return calloc(nmemb, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
return calloc(nmemb, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *realloc(void *ptr, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
CHECK(!"_realloc_dbg should not exist!");
return 0;
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _recalloc(void* p, size_t n, size_t elem_size) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_recalloc(void *p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
const size_t size = n * elem_size;
@@ -94,13 +100,28 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
return realloc(p, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
}
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_expand(void *memblock, size_t size) {
+ // _expand is used in realloc-like functions to resize the buffer if possible.
+ // We don't want memory to stand still while resizing buffers, so return 0.
+ return 0;
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_expand_dbg(void *memblock, size_t size) {
+ return _expand(memblock, size);
+}
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
int _CrtDbgReport(int, const char*, int,
const char*, const char*, ...) {
ShowStatsAndAbort();
@@ -116,37 +137,38 @@ int _CrtSetReportMode(int, int) {
}
} // extern "C"
-using __interception::GetRealFunctionAddress;
-
-// We don't want to include "windows.h" in this file to avoid extra attributes
-// set on malloc/free etc (e.g. dllimport), so declare a few things manually:
-extern "C" int __stdcall VirtualProtect(void* addr, size_t size,
- DWORD prot, DWORD *old_prot);
-const int PAGE_EXECUTE_READWRITE = 0x40;
-
namespace __asan {
void ReplaceSystemMalloc() {
-#if defined(_DLL)
-# ifdef _WIN64
-# error ReplaceSystemMalloc was not tested on x64
-# endif
- char *crt_malloc;
- if (GetRealFunctionAddress("malloc", (void**)&crt_malloc)) {
- // Replace malloc in the CRT dll with a jump to our malloc.
- DWORD old_prot, unused;
- CHECK(VirtualProtect(crt_malloc, 16, PAGE_EXECUTE_READWRITE, &old_prot));
- REAL(memset)(crt_malloc, 0xCC /* int 3 */, 16); // just in case.
-
- ptrdiff_t jmp_offset = (char*)malloc - (char*)crt_malloc - 5;
- crt_malloc[0] = 0xE9; // jmp, should be followed by an offset.
- REAL(memcpy)(crt_malloc + 1, &jmp_offset, sizeof(jmp_offset));
-
- CHECK(VirtualProtect(crt_malloc, 16, old_prot, &unused));
-
- // FYI: FlushInstructionCache is needed on Itanium etc but not on x86/x64.
- }
-
- // FIXME: investigate whether anything else is needed.
+#if defined(ASAN_DYNAMIC)
+ // We don't check the result because CRT might not be used in the process.
+ __interception::OverrideFunction("free", (uptr)free);
+ __interception::OverrideFunction("malloc", (uptr)malloc);
+ __interception::OverrideFunction("_malloc_crt", (uptr)malloc);
+ __interception::OverrideFunction("calloc", (uptr)calloc);
+ __interception::OverrideFunction("_calloc_crt", (uptr)calloc);
+ __interception::OverrideFunction("realloc", (uptr)realloc);
+ __interception::OverrideFunction("_realloc_crt", (uptr)realloc);
+ __interception::OverrideFunction("_recalloc", (uptr)_recalloc);
+ __interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc);
+ __interception::OverrideFunction("_msize", (uptr)_msize);
+ __interception::OverrideFunction("_expand", (uptr)_expand);
+
+ // Override different versions of 'operator new' and 'operator delete'.
+ // No need to override the nothrow versions as they just wrap the throw
+ // versions.
+ // FIXME: Unfortunately, MSVC miscompiles the statements that take the
+ // addresses of the array versions of these operators,
+ // see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992
+ // We might want to try to work around this by [inline] assembly or compiling
+ // parts of the RTL with Clang.
+ void *(*op_new)(size_t sz) = operator new;
+ void (*op_delete)(void *p) = operator delete;
+ void *(*op_array_new)(size_t sz) = operator new[];
+ void (*op_array_delete)(void *p) = operator delete[];
+ __interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new);
+ __interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete);
+ __interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new);
+ __interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete);
#endif
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 1e37bc26e94..b584cfa2f3c 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -41,54 +41,127 @@
// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow ||
// || `[0x000000000000, 0x00007fff7fff]` || LowMem ||
//
-// Default Linux/i386 mapping:
+// Default Linux/i386 mapping on x86_64 machine:
// || `[0x40000000, 0xffffffff]` || HighMem ||
// || `[0x28000000, 0x3fffffff]` || HighShadow ||
// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
-// Default Linux/MIPS mapping:
-// || `[0x2aaa8000, 0xffffffff]` || HighMem ||
-// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow ||
-// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap ||
-// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow ||
-// || `[0x00000000, 0x0aaa7fff]` || LowMem ||
+// Default Linux/i386 mapping on i386 machine
+// (addresses starting with 0xc0000000 are reserved
+// for kernel and thus not sanitized):
+// || `[0x38000000, 0xbfffffff]` || HighMem ||
+// || `[0x27000000, 0x37ffffff]` || HighShadow ||
+// || `[0x24000000, 0x26ffffff]` || ShadowGap ||
+// || `[0x20000000, 0x23ffffff]` || LowShadow ||
+// || `[0x00000000, 0x1fffffff]` || LowMem ||
+//
+// Default Linux/MIPS32 mapping:
+// || `[0x2aaa0000, 0xffffffff]` || HighMem ||
+// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
+// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
+// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
+// || `[0x00000000, 0x0aa9ffff]` || LowMem ||
+//
+// Default Linux/MIPS64 mapping:
+// || `[0x4000000000, 0xffffffffff]` || HighMem ||
+// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
+// || `[0x2400000000, 0x27ffffffff]` || ShadowGap ||
+// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
+// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
+//
+// Default Linux/AArch64 (39-bit VMA) mapping:
+// || `[0x2000000000, 0x7fffffffff]` || highmem ||
+// || `[0x1400000000, 0x1fffffffff]` || highshadow ||
+// || `[0x1200000000, 0x13ffffffff]` || shadowgap ||
+// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
+// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
+//
+// Default Linux/AArch64 (42-bit VMA) mapping:
+// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
+// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
+// || `[0x09000000000, 0x09fffffffff]` || shadowgap ||
+// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
+// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
+//
+// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
+// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
+// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
+// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap ||
+// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
+// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
+//
+// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000:
+// || `[0x60000000, 0xffffffff]` || HighMem ||
+// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
+// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
+// || `[0x40000000, 0x47ffffff]` || LowShadow ||
+// || `[0x00000000, 0x3fffffff]` || LowMem ||
+//
+// Default Windows/i386 mapping:
+// (the exact location of HighShadow/HighMem may vary depending
+// on WoW64, /LARGEADDRESSAWARE, etc).
+// || `[0x50000000, 0xffffffff]` || HighMem ||
+// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
+// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
+// || `[0x30000000, 0x35ffffff]` || LowShadow ||
+// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
-static const u64 kDefaultShadowOffset32 = 1ULL << 29;
+static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
+static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
+static const u64 kIosShadowOffset64 = 0x130000000;
+static const u64 kIosSimShadowOffset32 = 1ULL << 30;
+static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
+static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
+static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
+static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
-static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
-
-#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
-# define SHADOW_SCALE (__asan_mapping_scale)
-# define SHADOW_OFFSET (__asan_mapping_offset)
-#else
-# define SHADOW_SCALE kDefaultShadowScale
-# if SANITIZER_ANDROID
-# define SHADOW_OFFSET (0)
-# else
-# if SANITIZER_WORDSIZE == 32
-# if defined(__mips__)
-# define SHADOW_OFFSET kMIPS32_ShadowOffset32
-# else
-# define SHADOW_OFFSET kDefaultShadowOffset32
-# endif
+static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
+static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
+static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
+
+#define SHADOW_SCALE kDefaultShadowScale
+
+
+#if SANITIZER_WORDSIZE == 32
+# if SANITIZER_ANDROID
+# define SHADOW_OFFSET (0)
+# elif defined(__mips__)
+# define SHADOW_OFFSET kMIPS32_ShadowOffset32
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+# elif SANITIZER_WINDOWS
+# define SHADOW_OFFSET kWindowsShadowOffset32
+# elif SANITIZER_IOSSIM
+# define SHADOW_OFFSET kIosSimShadowOffset32
+# elif SANITIZER_IOS
+# define SHADOW_OFFSET kIosShadowOffset32
# else
-# if defined(__powerpc64__)
+# define SHADOW_OFFSET kDefaultShadowOffset32
+# endif
+#else
+# if defined(__aarch64__)
+# define SHADOW_OFFSET kAArch64_ShadowOffset64
+# elif defined(__powerpc64__)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
-# elif SANITIZER_MAC
-# define SHADOW_OFFSET kDefaultShadowOffset64
-# else
-# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
-# endif
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
+# elif SANITIZER_MAC
+# define SHADOW_OFFSET kDefaultShadowOffset64
+# elif defined(__mips64)
+# define SHADOW_OFFSET kMIPS64_ShadowOffset64
+# elif SANITIZER_IOSSIM
+# define SHADOW_OFFSET kIosSimShadowOffset64
+# elif SANITIZER_IOS
+# define SHADOW_OFFSET kIosShadowOffset64
+# else
+# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
-# endif
-#endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET
+#endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
@@ -110,7 +183,8 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
// With the zero shadow base we can not actually map pages starting from 0.
// This constant is somewhat arbitrary.
-#define kZeroBaseShadowStart (1 << 18)
+#define kZeroBaseShadowStart 0
+#define kZeroBaseMaxShadowStart (1 << 18)
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
: kZeroBaseShadowStart)
diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc
index beac8cdbdd5..719cdfa2bb9 100644
--- a/libsanitizer/asan/asan_new_delete.cc
+++ b/libsanitizer/asan/asan_new_delete.cc
@@ -14,20 +14,21 @@
#include "asan_internal.h"
#include "asan_stack.h"
+#include "interception/interception.h"
+
#include <stddef.h>
-namespace __asan {
-// This function is a no-op. We need it to make sure that object file
-// with our replacements will actually be loaded from static ASan
-// run-time library at link-time.
-void ReplaceOperatorsNewAndDelete() { }
-}
+// C++ operators can't have visibility attributes on Windows.
+#if SANITIZER_WINDOWS
+# define CXX_OPERATOR_ATTRIBUTE
+#else
+# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+#endif
using namespace __asan; // NOLINT
-// On Android new() goes through malloc interceptors.
-// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
-#if !SANITIZER_ANDROID
+// This code has issues on OSX.
+// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
// Fake std::nothrow_t to avoid including <new>.
namespace std {
@@ -46,14 +47,23 @@ struct nothrow_t {};
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
-INTERCEPTOR_ATTRIBUTE
+// FreeBSD prior v9.2 have wrong definition of 'size_t'.
+// http://svnweb.freebsd.org/base?view=revision&revision=232261
+#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+#include <sys/param.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define size_t unsigned
+#endif // __FreeBSD_version
+#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+
+CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
@@ -77,16 +87,32 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
asan_free(ptr, &stack, type);
#if !SANITIZER_MAC
-INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
-INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
+}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
@@ -102,5 +128,3 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
-
-#endif
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index 86d49909b68..39f74879b38 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -11,13 +11,26 @@
//===----------------------------------------------------------------------===//
#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
+static atomic_uint8_t can_poison_memory;
+
+void SetCanPoisonMemory(bool value) {
+ atomic_store(&can_poison_memory, value, memory_order_release);
+}
+
+bool CanPoisonMemory() {
+ return atomic_load(&can_poison_memory, memory_order_acquire);
+}
+
void PoisonShadow(uptr addr, uptr size, u8 value) {
- if (!flags()->poison_heap) return;
+ if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
@@ -30,7 +43,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value) {
- if (!flags()->poison_heap) return;
+ if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
@@ -48,6 +61,36 @@ struct ShadowSegmentEndpoint {
}
};
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ uptr page_size = GetPageSizeCached();
+ uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
+ uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
+ FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+}
+
+void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
+ uptr end = ptr + size;
+ if (Verbosity()) {
+ Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
+ poison ? "" : "un", ptr, end, size);
+ if (Verbosity() >= 2)
+ PRINT_CURRENT_STACK();
+ }
+ CHECK(size);
+ CHECK_LE(size, 4096);
+ CHECK(IsAligned(end, SHADOW_GRANULARITY));
+ if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
+ *(u8 *)MemToShadow(ptr) =
+ poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
+ ptr |= SHADOW_GRANULARITY - 1;
+ ptr++;
+ }
+ for (; ptr < end; ptr += SHADOW_GRANULARITY)
+ *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
+}
+
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@@ -67,10 +110,8 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
- if (common_flags()->verbosity >= 1) {
- Printf("Trying to poison memory region [%p, %p)\n",
- (void*)beg_addr, (void*)end_addr);
- }
+ VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@@ -109,10 +150,8 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
- if (common_flags()->verbosity >= 1) {
- Printf("Trying to unpoison memory region [%p, %p)\n",
- (void*)beg_addr, (void*)end_addr);
- }
+ VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@@ -137,7 +176,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
}
-bool __asan_address_is_poisoned(void const volatile *addr) {
+int __asan_address_is_poisoned(void const volatile *addr) {
return __asan::AddressIsPoisoned((uptr)addr);
}
@@ -146,6 +185,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
uptr end = beg + size;
if (!AddrIsInMem(beg)) return beg;
if (!AddrIsInMem(end)) return end;
+ CHECK_LT(beg, end);
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
@@ -176,7 +216,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
__asan::AddressIsPoisoned(__p + __size - 1))) { \
GET_CURRENT_PC_BP_SP; \
uptr __bad = __asan_region_is_poisoned(__p, __size); \
- __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\
+ __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
} \
} while (false); \
@@ -217,6 +257,36 @@ void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
*p = x;
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_poison_cxx_array_cookie(uptr p) {
+ if (SANITIZER_WORDSIZE != 64) return;
+ if (!flags()->poison_array_cookie) return;
+ uptr s = MEM_TO_SHADOW(p);
+ *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_load_cxx_array_cookie(uptr *p) {
+ if (SANITIZER_WORDSIZE != 64) return *p;
+ if (!flags()->poison_array_cookie) return *p;
+ uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
+ u8 sval = *reinterpret_cast<u8*>(s);
+ if (sval == kAsanArrayCookieMagic) return *p;
+ // If sval is not kAsanArrayCookieMagic it can only be freed memory,
+ // which means that we are going to get double-free. So, return 0 to avoid
+ // infinite loop of destructors. We don't want to report a double-free here
+ // though, so print a warning just in case.
+ // CHECK_EQ(sval, kAsanHeapFreeMagic);
+ if (sval == kAsanHeapFreeMagic) {
+ Report("AddressSanitizer: loaded array cookie from free-d memory; "
+ "expect a double-free report\n");
+ return 0;
+ }
+ // The cookie may remain unpoisoned if e.g. it comes from a custom
+ // operator new defined inside a class.
+ return *p;
+}
+
// This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
@@ -243,14 +313,12 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
- if (common_flags()->verbosity > 0)
- Report("poisoning: %p %zx\n", (void*)addr, size);
+ VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
}
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
- if (common_flags()->verbosity > 0)
- Report("unpoisoning: %p %zx\n", (void*)addr, size);
+ VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, false);
}
@@ -258,33 +326,40 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
- if (common_flags()->verbosity >= 2)
- Printf("contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
- new_mid_p);
+ if (!flags()->detect_container_overflow) return;
+ VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
+ new_mid_p);
uptr beg = reinterpret_cast<uptr>(beg_p);
- uptr end= reinterpret_cast<uptr>(end_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
uptr granularity = SHADOW_GRANULARITY;
- CHECK(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
- IsAligned(beg, granularity));
+ if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
+ IsAligned(beg, granularity))) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
+ &stack);
+ }
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
uptr d1 = RoundDownTo(old_mid, granularity);
- uptr d2 = RoundUpTo(old_mid, granularity);
+ // uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
// Make a quick sanity check that we are indeed in this state.
- if (d1 != d2)
- CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
+ //
+ // FIXME: Two of these three checks are disabled until we fix
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=258.
+ // if (d1 != d2)
+ // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
CHECK_EQ(*(u8*)MemToShadow(a), 0);
- if (d2 + granularity <= c && c <= end)
- CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
- kAsanContiguousContainerOOBMagic);
+ // if (d2 + granularity <= c && c <= end)
+ // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
+ // kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_mid, granularity);
uptr b2 = RoundUpTo(new_mid, granularity);
@@ -297,3 +372,60 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
}
}
+
+const void *__sanitizer_contiguous_container_find_bad_address(
+ const void *beg_p, const void *mid_p, const void *end_p) {
+ if (!flags()->detect_container_overflow)
+ return nullptr;
+ uptr beg = reinterpret_cast<uptr>(beg_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
+ uptr mid = reinterpret_cast<uptr>(mid_p);
+ CHECK_LE(beg, mid);
+ CHECK_LE(mid, end);
+ // Check some bytes starting from beg, some bytes around mid, and some bytes
+ // ending with end.
+ uptr kMaxRangeToCheck = 32;
+ uptr r1_beg = beg;
+ uptr r1_end = Min(end + kMaxRangeToCheck, mid);
+ uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
+ uptr r2_end = Min(end, mid + kMaxRangeToCheck);
+ uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
+ uptr r3_end = end;
+ for (uptr i = r1_beg; i < r1_end; i++)
+ if (AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ for (uptr i = r2_beg; i < mid; i++)
+ if (AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ for (uptr i = mid; i < r2_end; i++)
+ if (!AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ for (uptr i = r3_beg; i < r3_end; i++)
+ if (!AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ return nullptr;
+}
+
+int __sanitizer_verify_contiguous_container(const void *beg_p,
+ const void *mid_p,
+ const void *end_p) {
+ return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
+ end_p) == nullptr;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
+ AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
+ AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
+}
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool WordIsPoisoned(uptr addr) {
+ return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
+}
+}
diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h
index da79a0ff2e4..30e39e9077c 100644
--- a/libsanitizer/asan/asan_poisoning.h
+++ b/libsanitizer/asan/asan_poisoning.h
@@ -13,9 +13,14 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
+#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
+// Enable/disable memory poisoning.
+void SetCanPoisonMemory(bool value);
+bool CanPoisonMemory();
+
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
@@ -31,16 +36,40 @@ void PoisonShadowPartialRightRedzone(uptr addr,
// performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
- DCHECK(flags()->poison_heap);
+ DCHECK(CanPoisonMemory());
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
- REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
+ // FIXME: Page states are different on Windows, so using the same interface
+ // for mapping shadow and zeroing out pages doesn't "just work", so we should
+ // probably provide higher-level interface for these operations.
+ // For now, just memset on Windows.
+ if (value ||
+ SANITIZER_WINDOWS == 1 ||
+ shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+ REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
+ } else {
+ uptr page_size = GetPageSizeCached();
+ uptr page_beg = RoundUpTo(shadow_beg, page_size);
+ uptr page_end = RoundDownTo(shadow_end, page_size);
+
+ if (page_beg >= page_end) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ if (page_beg != shadow_beg) {
+ REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+ }
+ if (page_end != shadow_end) {
+ REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+ }
+ ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
+ }
+ }
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
- DCHECK(flags()->poison_heap);
+ DCHECK(CanPoisonMemory());
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
@@ -55,4 +84,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
+// Calls __sanitizer::FlushUnneededShadowMemory() on
+// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
+void FlushUnneededASanShadowMemory(uptr p, uptr size);
+
} // namespace __asan
diff --git a/libsanitizer/asan/asan_posix.cc b/libsanitizer/asan/asan_posix.cc
index ac4ec9e0191..5b532e950bb 100644
--- a/libsanitizer/asan/asan_posix.cc
+++ b/libsanitizer/asan/asan_posix.cc
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
#include "asan_internal.h"
#include "asan_interceptors.h"
@@ -19,6 +19,7 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include <pthread.h>
@@ -28,70 +29,54 @@
#include <sys/resource.h>
#include <unistd.h>
-static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
-
namespace __asan {
-static void MaybeInstallSigaction(int signum,
- void (*handler)(int, siginfo_t *, void *)) {
- if (!AsanInterceptsSignal(signum))
- return;
- struct sigaction sigact;
- REAL(memset)(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = handler;
- sigact.sa_flags = SA_SIGINFO;
- if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
- CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
- if (common_flags()->verbosity >= 1) {
- Report("Installed the sigaction for signal %d\n", signum);
- }
-}
-
-static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
- uptr addr = (uptr)siginfo->si_addr;
+void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ ScopedDeadlySignal signal_scope(GetCurrentThread());
+ int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write.
- if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
- uptr pc, sp, bp;
- GetPcSpBp(context, &pc, &sp, &bp);
- ReportSIGSEGV(pc, sp, bp, addr);
-}
-
-void SetAlternateSignalStack() {
- stack_t altstack, oldstack;
- CHECK_EQ(0, sigaltstack(0, &oldstack));
- // If the alternate stack is already in place, do nothing.
- if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
- // TODO(glider): the mapped stack should have the MAP_STACK flag in the
- // future. It is not required by man 2 sigaltstack now (they're using
- // malloc()).
- void* base = MmapOrDie(kAltStackSize, __FUNCTION__);
- altstack.ss_sp = base;
- altstack.ss_flags = 0;
- altstack.ss_size = kAltStackSize;
- CHECK_EQ(0, sigaltstack(&altstack, 0));
- if (common_flags()->verbosity > 0) {
- Report("Alternative stack for T%d set: [%p,%p)\n",
- GetCurrentTidOrInvalid(),
- altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
+ if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die();
+ SignalContext sig = SignalContext::Create(siginfo, context);
+
+ // Access at a reasonable offset above SP, or slightly below it (to account
+ // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
+ // probably a stack overflow.
+ bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
+
+#if __powerpc__
+ // Large stack frames can be allocated with e.g.
+ // lis r0,-10000
+ // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
+ // If the store faults then sp will not have been updated, so test above
+ // will not work, becase the fault address will be more than just "slightly"
+ // below sp.
+ if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
+ u32 inst = *(unsigned *)sig.pc;
+ u32 ra = (inst >> 16) & 0x1F;
+ u32 opcd = inst >> 26;
+ u32 xo = (inst >> 1) & 0x3FF;
+ // Check for store-with-update to sp. The instructions we accept are:
+ // stbu rs,d(ra) stbux rs,ra,rb
+ // sthu rs,d(ra) sthux rs,ra,rb
+ // stwu rs,d(ra) stwux rs,ra,rb
+ // stdu rs,ds(ra) stdux rs,ra,rb
+ // where ra is r1 (the stack pointer).
+ if (ra == 1 &&
+ (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
+ (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
+ IsStackAccess = true;
}
-}
-
-void UnsetAlternateSignalStack() {
- stack_t altstack, oldstack;
- altstack.ss_sp = 0;
- altstack.ss_flags = SS_DISABLE;
- altstack.ss_size = 0;
- CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
- UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
-}
-
-void InstallSignalHandlers() {
- // Set the alternate signal stack for the main thread.
- // This will cause SetAlternateSignalStack to be called twice, but the stack
- // will be actually set only once.
- if (flags()->use_sigaltstack) SetAlternateSignalStack();
- MaybeInstallSigaction(SIGSEGV, ASAN_OnSIGSEGV);
- MaybeInstallSigaction(SIGBUS, ASAN_OnSIGSEGV);
+#endif // __powerpc__
+
+ // We also check si_code to filter out SEGV caused by something else other
+ // then hitting the guard page or unmapped memory, like, for example,
+ // unaligned memory access.
+ if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
+ ReportStackOverflow(sig);
+ else if (signo == SIGFPE)
+ ReportDeadlySignal("FPE", sig);
+ else
+ ReportDeadlySignal("SEGV", sig);
}
// ---------------------- TSD ---------------- {{{1
@@ -125,4 +110,4 @@ void PlatformTSDDtor(void *tsd) {
}
} // namespace __asan
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/asan/asan_preinit.cc b/libsanitizer/asan/asan_preinit.cc
index 31042401536..6cb115bd369 100644
--- a/libsanitizer/asan/asan_preinit.cc
+++ b/libsanitizer/asan/asan_preinit.cc
@@ -8,22 +8,16 @@
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Call __asan_init at the very early stage of process startup.
-// On Linux we use .preinit_array section (unless PIC macro is defined).
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
-#if ASAN_USE_PREINIT_ARRAY && !defined(PIC)
- // On Linux, we force __asan_init to be called before anyone else
- // by placing it into .preinit_array section.
- // FIXME: do we have anything like this on Mac?
+using namespace __asan;
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
// The symbol is called __local_asan_preinit, because it's not intended to be
// exported.
+ // This code linked into the main executable when -fsanitize=address is in
+ // the link flags. It can only use exported interface functions.
__attribute__((section(".preinit_array"), used))
void (*__local_asan_preinit)(void) = __asan_init;
-#elif SANITIZER_WINDOWS && defined(_DLL)
- // On Windows, when using dynamic CRT (/MD), we can put a pointer
- // to __asan_init into the global list of C initializers.
- // See crt0dat.c in the CRT sources for the details.
- #pragma section(".CRT$XIB", long, read) // NOLINT
- __declspec(allocate(".CRT$XIB")) void (*__asan_preinit)() = __asan_init;
#endif
diff --git a/libsanitizer/asan/asan_report.cc b/libsanitizer/asan/asan_report.cc
index 70c4b481a2f..7f7eaf515e7 100644
--- a/libsanitizer/asan/asan_report.cc
+++ b/libsanitizer/asan/asan_report.cc
@@ -9,6 +9,7 @@
//
// This file contains error reporting code.
//===----------------------------------------------------------------------===//
+
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
@@ -25,10 +26,23 @@ namespace __asan {
// -------------------- User-specified callbacks ----------------- {{{1
static void (*error_report_callback)(const char*);
-static char *error_message_buffer = 0;
+static char *error_message_buffer = nullptr;
static uptr error_message_buffer_pos = 0;
static uptr error_message_buffer_size = 0;
+struct ReportData {
+ uptr pc;
+ uptr sp;
+ uptr bp;
+ uptr addr;
+ bool is_write;
+ uptr access_size;
+ const char *description;
+};
+
+static bool report_happened = false;
+static ReportData report_data = {};
+
void AppendToErrorMessageBuffer(const char *buffer) {
if (error_message_buffer) {
uptr length = internal_strlen(buffer);
@@ -38,16 +52,14 @@ void AppendToErrorMessageBuffer(const char *buffer) {
buffer, remaining);
error_message_buffer[error_message_buffer_size - 1] = '\0';
// FIXME: reallocate the buffer instead of truncating the message.
- error_message_buffer_pos += remaining > length ? length : remaining;
+ error_message_buffer_pos += Min(remaining, length);
}
}
// ---------------------- Decorator ------------------------------ {{{1
-class Decorator: private __sanitizer::AnsiColorDecorator {
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
- const char *Warning() { return Red(); }
- const char *EndWarning() { return Default(); }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
@@ -59,6 +71,7 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
switch (byte) {
case kAsanHeapLeftRedzoneMagic:
case kAsanHeapRightRedzoneMagic:
+ case kAsanArrayCookieMagic:
return Red();
case kAsanHeapFreeMagic:
return Magenta();
@@ -73,6 +86,8 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
return Cyan();
case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
return Blue();
case kAsanStackUseAfterScopeMagic:
return Magenta();
@@ -80,77 +95,119 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
return Red();
case kAsanInternalHeapMagic:
return Yellow();
+ case kAsanIntraObjectRedzone:
+ return Yellow();
default:
return Default();
}
}
const char *EndShadowByte() { return Default(); }
+ const char *MemoryByte() { return Magenta(); }
+ const char *EndMemoryByte() { return Default(); }
};
// ---------------------- Helper functions ----------------------- {{{1
-static void PrintShadowByte(const char *before, u8 byte,
- const char *after = "\n") {
+static void PrintMemoryByte(InternalScopedString *str, const char *before,
+ u8 byte, bool in_shadow, const char *after = "\n") {
Decorator d;
- Printf("%s%s%x%x%s%s", before,
- d.ShadowByte(byte), byte >> 4, byte & 15, d.EndShadowByte(), after);
+ str->append("%s%s%x%x%s%s", before,
+ in_shadow ? d.ShadowByte(byte) : d.MemoryByte(),
+ byte >> 4, byte & 15,
+ in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after);
+}
+
+static void PrintShadowByte(InternalScopedString *str, const char *before,
+ u8 byte, const char *after = "\n") {
+ PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
}
-static void PrintShadowBytes(const char *before, u8 *bytes,
- u8 *guilty, uptr n) {
+static void PrintShadowBytes(InternalScopedString *str, const char *before,
+ u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
- if (before)
- Printf("%s%p:", before, bytes);
+ if (before) str->append("%s%p:", before, bytes);
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
- const char *before = p == guilty ? "[" :
- (p - 1 == guilty && i != 0) ? "" : " ";
+ const char *before =
+ p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
- PrintShadowByte(before, *p, after);
+ PrintShadowByte(str, before, *p, after);
}
- Printf("\n");
-}
-
-static void PrintLegend() {
- Printf("Shadow byte legend (one shadow byte represents %d "
- "application bytes):\n", (int)SHADOW_GRANULARITY);
- PrintShadowByte(" Addressable: ", 0);
- Printf(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++)
- PrintShadowByte("", i, " ");
- Printf("\n");
- PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
- PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
- PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
- PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
- PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
- PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
- PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
- PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
- PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
- PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
- PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
- PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
- PrintShadowByte(" Contiguous container OOB:",
+ str->append("\n");
+}
+
+static void PrintLegend(InternalScopedString *str) {
+ str->append(
+ "Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n",
+ (int)SHADOW_GRANULARITY);
+ PrintShadowByte(str, " Addressable: ", 0);
+ str->append(" Partially addressable: ");
+ for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ str->append("\n");
+ PrintShadowByte(str, " Heap left redzone: ",
+ kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(str, " Heap right redzone: ",
+ kAsanHeapRightRedzoneMagic);
+ PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(str, " Stack left redzone: ",
+ kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(str, " Stack mid redzone: ",
+ kAsanStackMidRedzoneMagic);
+ PrintShadowByte(str, " Stack right redzone: ",
+ kAsanStackRightRedzoneMagic);
+ PrintShadowByte(str, " Stack partial redzone: ",
+ kAsanStackPartialRedzoneMagic);
+ PrintShadowByte(str, " Stack after return: ",
+ kAsanStackAfterReturnMagic);
+ PrintShadowByte(str, " Stack use after scope: ",
+ kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(str, " Global init order: ",
+ kAsanInitializationOrderMagic);
+ PrintShadowByte(str, " Poisoned by user: ",
+ kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
- PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Array cookie: ",
+ kAsanArrayCookieMagic);
+ PrintShadowByte(str, " Intra object redzone: ",
+ kAsanIntraObjectRedzone);
+ PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
+ PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
}
-static void PrintShadowMemoryForAddress(uptr addr) {
- if (!AddrIsInMem(addr))
+void MaybeDumpInstructionBytes(uptr pc) {
+ if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
+ InternalScopedString str(1024);
+ str.append("First 16 instruction bytes at pc: ");
+ if (IsAccessibleMemoryRange(pc, 16)) {
+ for (int i = 0; i < 16; ++i) {
+ PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/false, " ");
+ }
+ str.append("\n");
+ } else {
+ str.append("unaccessible\n");
+ }
+ Report("%s", str.data());
+}
+
+static void PrintShadowMemoryForAddress(uptr addr) {
+ if (!AddrIsInMem(addr)) return;
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- Printf("Shadow bytes around the buggy address:\n");
+ InternalScopedString str(4096 * 8);
+ str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
const char *prefix = (i == 0) ? "=>" : " ";
- PrintShadowBytes(prefix,
- (u8*)(aligned_shadow + i * n_bytes_per_row),
- (u8*)shadow_addr, n_bytes_per_row);
+ PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
+ (u8 *)shadow_addr, n_bytes_per_row);
}
- if (flags()->print_legend)
- PrintLegend();
+ if (flags()->print_legend) PrintLegend(&str);
+ Printf("%s", str.data());
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
@@ -182,62 +239,108 @@ static bool IsASCII(unsigned char c) {
static const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
- return (name[0] == '_' && name[1] == 'Z')
- ? Symbolizer::Get()->Demangle(name)
- : name;
+ bool should_demangle = false;
+ if (name[0] == '_' && name[1] == 'Z')
+ should_demangle = true;
+ else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
+ should_demangle = true;
+
+ return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
-static void PrintGlobalNameIfASCII(const __asan_global &g) {
+static void PrintGlobalNameIfASCII(InternalScopedString *str,
+ const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
unsigned char c = *(unsigned char*)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char*)(g.beg + g.size - 1) != '\0') return;
- Printf(" '%s' is ascii string '%s'\n",
- MaybeDemangleGlobalName(g.name), (char*)g.beg);
+ str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
}
-bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
- const __asan_global &g) {
- static const uptr kMinimalDistanceFromAnotherGlobal = 64;
- if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
- if (addr >= g.beg + g.size_with_redzone) return false;
+static const char *GlobalFilename(const __asan_global &g) {
+ const char *res = g.module_name;
+ // Prefer the filename from source location, if is available.
+ if (g.location)
+ res = g.location->filename;
+ CHECK(res);
+ return res;
+}
+
+static void PrintGlobalLocation(InternalScopedString *str,
+ const __asan_global &g) {
+ str->append("%s", GlobalFilename(g));
+ if (!g.location)
+ return;
+ if (g.location->line_no)
+ str->append(":%d", g.location->line_no);
+ if (g.location->column_no)
+ str->append(":%d", g.location->column_no);
+}
+
+static void DescribeAddressRelativeToGlobal(uptr addr, uptr size,
+ const __asan_global &g) {
+ InternalScopedString str(4096);
Decorator d;
- Printf("%s", d.Location());
+ str.append("%s", d.Location());
if (addr < g.beg) {
- Printf("%p is located %zd bytes to the left", (void*)addr, g.beg - addr);
+ str.append("%p is located %zd bytes to the left", (void *)addr,
+ g.beg - addr);
} else if (addr + size > g.beg + g.size) {
if (addr < g.beg + g.size)
addr = g.beg + g.size;
- Printf("%p is located %zd bytes to the right", (void*)addr,
- addr - (g.beg + g.size));
+ str.append("%p is located %zd bytes to the right", (void *)addr,
+ addr - (g.beg + g.size));
} else {
// Can it happen?
- Printf("%p is located %zd bytes inside", (void*)addr, addr - g.beg);
+ str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+ }
+ str.append(" of global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g);
+ str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.append("%s", d.EndLocation());
+ PrintGlobalNameIfASCII(&str, g);
+ Printf("%s", str.data());
+}
+
+static bool DescribeAddressIfGlobal(uptr addr, uptr size,
+ const char *bug_type) {
+ // Assume address is close to at most four globals.
+ const int kMaxGlobalsInReport = 4;
+ __asan_global globals[kMaxGlobalsInReport];
+ u32 reg_sites[kMaxGlobalsInReport];
+ int globals_num =
+ GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals));
+ if (globals_num == 0)
+ return false;
+ for (int i = 0; i < globals_num; i++) {
+ DescribeAddressRelativeToGlobal(addr, size, globals[i]);
+ if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
+ reg_sites[i]) {
+ Printf(" registered at:\n");
+ StackDepotGet(reg_sites[i]).Print();
+ }
}
- Printf(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
- MaybeDemangleGlobalName(g.name), g.module_name, g.beg, g.size);
- Printf("%s", d.EndLocation());
- PrintGlobalNameIfASCII(g);
return true;
}
-bool DescribeAddressIfShadow(uptr addr) {
+bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr, bool print) {
if (AddrIsInMem(addr))
return false;
- static const char kAddrInShadowReport[] =
- "Address %p is located in the %s.\n";
- if (AddrIsInShadowGap(addr)) {
- Printf(kAddrInShadowReport, addr, "shadow gap area");
- return true;
- }
- if (AddrIsInHighShadow(addr)) {
- Printf(kAddrInShadowReport, addr, "high shadow area");
- return true;
- }
- if (AddrIsInLowShadow(addr)) {
- Printf(kAddrInShadowReport, addr, "low shadow area");
+ const char *area_type = nullptr;
+ if (AddrIsInShadowGap(addr)) area_type = "shadow gap";
+ else if (AddrIsInHighShadow(addr)) area_type = "high shadow";
+ else if (AddrIsInLowShadow(addr)) area_type = "low shadow";
+ if (area_type != nullptr) {
+ if (print) {
+ Printf("Address %p is located in the %s area.\n", addr, area_type);
+ } else {
+ CHECK(descr);
+ descr->region_kind = area_type;
+ }
return true;
}
CHECK(0 && "Address is not in memory and not in shadow?");
@@ -264,16 +367,15 @@ const char *ThreadNameWithParenthesis(u32 tid, char buff[],
return ThreadNameWithParenthesis(t, buff, buff_len);
}
-void PrintAccessAndVarIntersection(const char *var_name,
- uptr var_beg, uptr var_size,
- uptr addr, uptr access_size,
- uptr prev_var_end, uptr next_var_beg) {
- uptr var_end = var_beg + var_size;
+static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
+ uptr access_size, uptr prev_var_end,
+ uptr next_var_beg) {
+ uptr var_end = var.beg + var.size;
uptr addr_end = addr + access_size;
- const char *pos_descr = 0;
- // If the variable [var_beg, var_end) is the nearest variable to the
+ const char *pos_descr = nullptr;
+ // If the variable [var.beg, var_end) is the nearest variable to the
// current memory access, indicate it in the log.
- if (addr >= var_beg) {
+ if (addr >= var.beg) {
if (addr_end <= var_end)
pos_descr = "is inside"; // May happen if this is a use-after-return.
else if (addr < var_end)
@@ -282,59 +384,77 @@ void PrintAccessAndVarIntersection(const char *var_name,
next_var_beg - addr_end >= addr - var_end)
pos_descr = "overflows";
} else {
- if (addr_end > var_beg)
+ if (addr_end > var.beg)
pos_descr = "partially underflows";
else if (addr >= prev_var_end &&
- addr - prev_var_end >= var_beg - addr_end)
+ addr - prev_var_end >= var.beg - addr_end)
pos_descr = "underflows";
}
- Printf(" [%zd, %zd) '%s'", var_beg, var_beg + var_size, var_name);
+ InternalScopedString str(1024);
+ str.append(" [%zd, %zd)", var.beg, var_end);
+ // Render variable name.
+ str.append(" '");
+ for (uptr i = 0; i < var.name_len; ++i) {
+ str.append("%c", var.name_pos[i]);
+ }
+ str.append("'");
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
- Printf("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.EndLocation());
+ str.append("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.EndLocation());
} else {
- Printf("\n");
+ str.append("\n");
}
+ Printf("%s", str.data());
}
-struct StackVarDescr {
- uptr beg;
- uptr size;
- const char *name_pos;
- uptr name_len;
-};
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars) {
+ CHECK(frame_descr);
+ char *p;
+ // This string is created by the compiler and has the following form:
+ // "n alloc_1 alloc_2 ... alloc_n"
+ // where alloc_i looks like "offset size len ObjectName".
+ uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
+ if (n_objects == 0)
+ return false;
+
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr beg = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr size = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr len = (uptr)internal_simple_strtoll(p, &p, 10);
+ if (beg == 0 || size == 0 || *p != ' ') {
+ return false;
+ }
+ p++;
+ StackVarDescr var = {beg, size, p, len};
+ vars->push_back(var);
+ p += len;
+ }
+
+ return true;
+}
bool DescribeAddressIfStack(uptr addr, uptr access_size) {
AsanThread *t = FindThreadByStackAddress(addr);
if (!t) return false;
- const uptr kBufSize = 4095;
- char buf[kBufSize];
- uptr offset = 0;
- uptr frame_pc = 0;
- char tname[128];
- const char *frame_descr = t->GetFrameNameByAddr(addr, &offset, &frame_pc);
-
-#ifdef __powerpc64__
- // On PowerPC64, the address of a function actually points to a
- // three-doubleword data structure with the first field containing
- // the address of the function's code.
- frame_pc = *reinterpret_cast<uptr *>(frame_pc);
-#endif
- // This string is created by the compiler and has the following form:
- // "n alloc_1 alloc_2 ... alloc_n"
- // where alloc_i looks like "offset size len ObjectName ".
- CHECK(frame_descr);
Decorator d;
+ char tname[128];
Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%d%s "
- "at offset %zu in frame\n",
- addr, t->tid(),
- ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)),
- offset);
+ Printf("Address %p is located in stack of thread T%d%s", addr, t->tid(),
+ ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)));
+
+ // Try to fetch precise stack frame for this access.
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access)) {
+ Printf("%s\n", d.EndLocation());
+ return true;
+ }
+ Printf(" at offset %zu in frame%s\n", access.offset, d.EndLocation());
+
// Now we print the frame where the alloca has happened.
// We print this frame as a stack trace with one element.
// The symbolizer may print more than one frame if inlining was involved.
@@ -342,50 +462,42 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
- StackTrace alloca_stack;
- alloca_stack.trace[0] = frame_pc + 16;
- alloca_stack.size = 1;
+#if defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ // On PowerPC64 ELFv1, the address of a function actually points to a
+ // three-doubleword data structure with the first field containing
+ // the address of the function's code.
+ access.frame_pc = *reinterpret_cast<uptr *>(access.frame_pc);
+#endif
+ access.frame_pc += 16;
Printf("%s", d.EndLocation());
- PrintStack(&alloca_stack);
+ StackTrace alloca_stack(&access.frame_pc, 1);
+ alloca_stack.Print();
+
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(access.frame_descr, &vars)) {
+ Printf("AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n", access.frame_descr);
+ // 'addr' is a stack address, so return true even if we can't parse frame
+ return true;
+ }
+ uptr n_objects = vars.size();
// Report the number of stack objects.
- char *p;
- uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
- CHECK_GT(n_objects, 0);
Printf(" This frame has %zu object(s):\n", n_objects);
// Report all objects in this frame.
- InternalScopedBuffer<StackVarDescr> vars(n_objects);
for (uptr i = 0; i < n_objects; i++) {
- uptr beg, size;
- uptr len;
- beg = (uptr)internal_simple_strtoll(p, &p, 10);
- size = (uptr)internal_simple_strtoll(p, &p, 10);
- len = (uptr)internal_simple_strtoll(p, &p, 10);
- if (beg == 0 || size == 0 || *p != ' ') {
- Printf("AddressSanitizer can't parse the stack frame "
- "descriptor: |%s|\n", frame_descr);
- break;
- }
- p++;
- vars[i].beg = beg;
- vars[i].size = size;
- vars[i].name_pos = p;
- vars[i].name_len = len;
- p += len;
- }
- for (uptr i = 0; i < n_objects; i++) {
- buf[0] = 0;
- internal_strncat(buf, vars[i].name_pos,
- static_cast<uptr>(Min(kBufSize, vars[i].name_len)));
uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
- PrintAccessAndVarIntersection(buf, vars[i].beg, vars[i].size,
- offset, access_size,
+ PrintAccessAndVarIntersection(vars[i], access.offset, access_size,
prev_var_end, next_var_beg);
}
Printf("HINT: this may be a false positive if your program uses "
- "some custom stack unwind mechanism or swapcontext\n"
- " (longjmp and C++ exceptions *are* supported)\n");
+ "some custom stack unwind mechanism or swapcontext\n");
+ if (SANITIZER_WINDOWS)
+ Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
+ else
+ Printf(" (longjmp and C++ exceptions *are* supported)\n");
+
DescribeThread(t);
return true;
}
@@ -394,24 +506,26 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
uptr access_size) {
sptr offset;
Decorator d;
- Printf("%s", d.Location());
+ InternalScopedString str(4096);
+ str.append("%s", d.Location());
if (chunk.AddrIsAtLeft(addr, access_size, &offset)) {
- Printf("%p is located %zd bytes to the left of", (void*)addr, offset);
+ str.append("%p is located %zd bytes to the left of", (void *)addr, offset);
} else if (chunk.AddrIsAtRight(addr, access_size, &offset)) {
if (offset < 0) {
addr -= offset;
offset = 0;
}
- Printf("%p is located %zd bytes to the right of", (void*)addr, offset);
+ str.append("%p is located %zd bytes to the right of", (void *)addr, offset);
} else if (chunk.AddrIsInside(addr, access_size, &offset)) {
- Printf("%p is located %zd bytes inside of", (void*)addr, offset);
+ str.append("%p is located %zd bytes inside of", (void*)addr, offset);
} else {
- Printf("%p is located somewhere around (this is AddressSanitizer bug!)",
- (void*)addr);
+ str.append("%p is located somewhere around (this is AddressSanitizer bug!)",
+ (void *)addr);
}
- Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
- (void*)(chunk.Beg()), (void*)(chunk.End()));
- Printf("%s", d.EndLocation());
+ str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
+ (void *)(chunk.Beg()), (void *)(chunk.End()));
+ str.append("%s", d.EndLocation());
+ Printf("%s", str.data());
}
void DescribeHeapAddress(uptr addr, uptr access_size) {
@@ -426,20 +540,18 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
asanThreadRegistry().CheckLocked();
AsanThreadContext *alloc_thread =
GetThreadContextByTidLocked(chunk.AllocTid());
- StackTrace alloc_stack;
- chunk.GetAllocStack(&alloc_stack);
+ StackTrace alloc_stack = chunk.GetAllocStack();
char tname[128];
Decorator d;
- AsanThreadContext *free_thread = 0;
+ AsanThreadContext *free_thread = nullptr;
if (chunk.FreeTid() != kInvalidTid) {
free_thread = GetThreadContextByTidLocked(chunk.FreeTid());
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.EndAllocation());
- StackTrace free_stack;
- chunk.GetFreeStack(&free_stack);
- PrintStack(&free_stack);
+ StackTrace free_stack = chunk.GetFreeStack();
+ free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n",
d.Allocation(), alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
@@ -450,19 +562,19 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
}
- PrintStack(&alloc_stack);
+ alloc_stack.Print();
DescribeThread(GetCurrentThread());
if (free_thread)
DescribeThread(free_thread);
DescribeThread(alloc_thread);
}
-void DescribeAddress(uptr addr, uptr access_size) {
+static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) {
// Check if this is shadow or shadow gap.
if (DescribeAddressIfShadow(addr))
return;
CHECK(AddrIsInMem(addr));
- if (DescribeAddressIfGlobal(addr, access_size))
+ if (DescribeAddressIfGlobal(addr, access_size, bug_type))
return;
if (DescribeAddressIfStack(addr, access_size))
return;
@@ -481,15 +593,19 @@ void DescribeThread(AsanThreadContext *context) {
}
context->announced = true;
char tname[128];
- Printf("Thread T%d%s", context->tid,
- ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
- Printf(" created by T%d%s here:\n",
- context->parent_tid,
- ThreadNameWithParenthesis(context->parent_tid,
- tname, sizeof(tname)));
- uptr stack_size;
- const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size);
- PrintStack(stack_trace, stack_size);
+ InternalScopedString str(1024);
+ str.append("Thread T%d%s", context->tid,
+ ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
+ if (context->parent_tid == kInvalidTid) {
+ str.append(" created by unknown thread\n");
+ Printf("%s", str.data());
+ return;
+ }
+ str.append(
+ " created by T%d%s here:\n", context->parent_tid,
+ ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
+ Printf("%s", str.data());
+ StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
@@ -504,68 +620,136 @@ void DescribeThread(AsanThreadContext *context) {
// immediately after printing error report.
class ScopedInErrorReport {
public:
- ScopedInErrorReport() {
- static atomic_uint32_t num_calls;
- static u32 reporting_thread_tid;
- if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
+ explicit ScopedInErrorReport(ReportData *report = nullptr,
+ bool fatal = false) {
+ halt_on_error_ = fatal || flags()->halt_on_error;
+
+ if (lock_.TryLock()) {
+ StartReporting(report);
+ return;
+ }
+
+ // ASan found two bugs in different threads simultaneously.
+
+ u32 current_tid = GetCurrentTidOrInvalid();
+ if (reporting_thread_tid_ == current_tid ||
+ reporting_thread_tid_ == kInvalidTid) {
+ // This is either asynch signal or nested error during error reporting.
+ // Fail simple to avoid deadlocks in Report().
+
+ // Can't use Report() here because of potential deadlocks
+ // in nested signal handlers.
+ const char msg[] = "AddressSanitizer: nested bug in the same thread, "
+ "aborting.\n";
+ WriteToFile(kStderrFd, msg, sizeof(msg));
+
+ internal__exit(common_flags()->exitcode);
+ }
+
+ if (halt_on_error_) {
// Do not print more than one report, otherwise they will mix up.
// Error reporting functions shouldn't return at this situation, as
- // they are defined as no-return.
- Report("AddressSanitizer: while reporting a bug found another one."
- "Ignoring.\n");
- u32 current_tid = GetCurrentTidOrInvalid();
- if (current_tid != reporting_thread_tid) {
- // ASan found two bugs in different threads simultaneously. Sleep
- // long enough to make sure that the thread which started to print
- // an error report will finish doing it.
- SleepForSeconds(Max(100, flags()->sleep_before_dying + 1));
- }
+ // they are effectively no-returns.
+
+ Report("AddressSanitizer: while reporting a bug found another one. "
+ "Ignoring.\n");
+
+ // Sleep long enough to make sure that the thread which started
+ // to print an error report will finish doing it.
+ SleepForSeconds(Max(100, flags()->sleep_before_dying + 1));
+
// If we're still not dead for some reason, use raw _exit() instead of
// Die() to bypass any additional checks.
- internal__exit(flags()->exitcode);
+ internal__exit(common_flags()->exitcode);
+ } else {
+ // The other thread will eventually finish reporting
+ // so it's safe to wait
+ lock_.Lock();
}
- ASAN_ON_ERROR();
- // Make sure the registry and sanitizer report mutexes are locked while
- // we're printing an error report.
- // We can lock them only here to avoid self-deadlock in case of
- // recursive reports.
- asanThreadRegistry().Lock();
- CommonSanitizerReportMutex.Lock();
- reporting_thread_tid = GetCurrentTidOrInvalid();
- Printf("===================================================="
- "=============\n");
+
+ StartReporting(report);
}
- // Destructor is NORETURN, as functions that report errors are.
- NORETURN ~ScopedInErrorReport() {
+
+ ~ScopedInErrorReport() {
// Make sure the current thread is announced.
DescribeThread(GetCurrentThread());
+ // We may want to grab this lock again when printing stats.
+ asanThreadRegistry().Unlock();
// Print memory stats.
if (flags()->print_stats)
__asan_print_accumulated_stats();
if (error_report_callback) {
error_report_callback(error_message_buffer);
}
- Report("ABORTING\n");
- Die();
+ CommonSanitizerReportMutex.Unlock();
+ reporting_thread_tid_ = kInvalidTid;
+ lock_.Unlock();
+ if (halt_on_error_) {
+ Report("ABORTING\n");
+ Die();
+ }
+ }
+
+ private:
+ void StartReporting(ReportData *report) {
+ if (report) report_data = *report;
+ report_happened = true;
+ ASAN_ON_ERROR();
+ // Make sure the registry and sanitizer report mutexes are locked while
+ // we're printing an error report.
+ // We can lock them only here to avoid self-deadlock in case of
+ // recursive reports.
+ asanThreadRegistry().Lock();
+ CommonSanitizerReportMutex.Lock();
+ reporting_thread_tid_ = GetCurrentTidOrInvalid();
+ Printf("===================================================="
+ "=============\n");
}
+
+ static StaticSpinMutex lock_;
+ static u32 reporting_thread_tid_;
+ bool halt_on_error_;
};
-void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
+StaticSpinMutex ScopedInErrorReport::lock_;
+u32 ScopedInErrorReport::reporting_thread_tid_;
+
+void ReportStackOverflow(const SignalContext &sig) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
- " (pc %p sp %p bp %p T%d)\n",
- (void*)addr, (void*)pc, (void*)sp, (void*)bp,
- GetCurrentTidOrInvalid());
+ Report(
+ "ERROR: AddressSanitizer: stack-overflow on address %p"
+ " (pc %p bp %p sp %p T%d)\n",
+ (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
+ GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
- GET_STACK_TRACE_FATAL(pc, bp);
- PrintStack(&stack);
+ GET_STACK_TRACE_SIGNAL(sig);
+ stack.Print();
+ ReportErrorSummary("stack-overflow", &stack);
+}
+
+void ReportDeadlySignal(const char *description, const SignalContext &sig) {
+ ScopedInErrorReport in_report(/*report*/nullptr, /*fatal*/true);
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: %s on unknown address %p"
+ " (pc %p bp %p sp %p T%d)\n",
+ description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
+ (void *)sig.sp, GetCurrentTidOrInvalid());
+ if (sig.pc < GetPageSizeCached()) {
+ Report("Hint: pc points to the zero page.\n");
+ }
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_SIGNAL(sig);
+ stack.Print();
+ MaybeDumpInstructionBytes(sig.pc);
Printf("AddressSanitizer can not provide additional info.\n");
- ReportErrorSummary("SEGV", &stack);
+ ReportErrorSummary(description, &stack);
}
-void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
+void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@@ -578,12 +762,36 @@ void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("double-free", &stack);
}
-void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
+ BufferedStackTrace *free_stack) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ u32 curr_tid = GetCurrentTidOrInvalid();
+ Report("ERROR: AddressSanitizer: new-delete-type-mismatch on %p in "
+ "thread T%d%s:\n",
+ addr, curr_tid,
+ ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
+ Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
+ Printf(" size of the allocated type: %zd bytes;\n"
+ " size of the deallocated type: %zd bytes.\n",
+ asan_mz_size(reinterpret_cast<void*>(addr)), delete_size);
+ CHECK_GT(free_stack->size, 0);
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ DescribeHeapAddress(addr, 1);
+ ReportErrorSummary("new-delete-type-mismatch", &stack);
+ Report("HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+}
+
+void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@@ -595,12 +803,12 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-free", &stack);
}
-void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
+void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type) {
static const char *alloc_names[] =
@@ -616,14 +824,14 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- PrintStack(&stack);
+ stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("alloc-dealloc-mismatch", &stack);
- Report("HINT: if you don't care about these warnings you may set "
+ Report("HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
}
-void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
+void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@@ -631,27 +839,29 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
"malloc_usable_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-malloc_usable_size", stack);
}
-void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
+void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting to call "
- "__asan_get_allocated_size() for pointer which is "
+ "__sanitizer_get_allocated_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-__asan_get_allocated_size", stack);
+ ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
}
-void ReportStringFunctionMemoryRangesOverlap(
- const char *function, const char *offset1, uptr length1,
- const char *offset2, uptr length2, StackTrace *stack) {
+void ReportStringFunctionMemoryRangesOverlap(const char *function,
+ const char *offset1, uptr length1,
+ const char *offset2, uptr length2,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
char bug_type[100];
@@ -661,55 +871,148 @@ void ReportStringFunctionMemoryRangesOverlap(
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
Printf("%s", d.EndWarning());
- PrintStack(stack);
- DescribeAddress((uptr)offset1, length1);
- DescribeAddress((uptr)offset2, length2);
+ stack->Print();
+ DescribeAddress((uptr)offset1, length1, bug_type);
+ DescribeAddress((uptr)offset2, length2, bug_type);
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ const char *bug_type = "negative-size-param";
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ DescribeAddress(offset, size, bug_type);
ReportErrorSummary(bug_type, stack);
}
+void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
+ uptr old_mid, uptr new_mid,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ Report("ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_contiguous_container:\n"
+ " beg : %p\n"
+ " end : %p\n"
+ " old_mid : %p\n"
+ " new_mid : %p\n",
+ beg, end, old_mid, new_mid);
+ uptr granularity = SHADOW_GRANULARITY;
+ if (!IsAligned(beg, granularity))
+ Report("ERROR: beg is not aligned by %d\n", granularity);
+ stack->Print();
+ ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+}
+
+void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
+ Printf("%s", d.EndWarning());
+ InternalScopedString g1_loc(256), g2_loc(256);
+ PrintGlobalLocation(&g1_loc, *g1);
+ PrintGlobalLocation(&g2_loc, *g2);
+ Printf(" [1] size=%zd '%s' %s\n", g1->size,
+ MaybeDemangleGlobalName(g1->name), g1_loc.data());
+ Printf(" [2] size=%zd '%s' %s\n", g2->size,
+ MaybeDemangleGlobalName(g2->name), g2_loc.data());
+ if (stack_id1 && stack_id2) {
+ Printf("These globals were registered at these points:\n");
+ Printf(" [1]:\n");
+ StackDepotGet(stack_id1).Print();
+ Printf(" [2]:\n");
+ StackDepotGet(stack_id2).Print();
+ }
+ Report("HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_odr_violation=0\n");
+ InternalScopedString error_msg(256);
+ error_msg.append("odr-violation: global '%s' at %s",
+ MaybeDemangleGlobalName(g1->name), g1_loc.data());
+ ReportErrorSummary(error_msg.data());
+}
+
+// ----------------------- CheckForInvalidPointerPair ----------- {{{1
+static NOINLINE void
+ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
+ ScopedInErrorReport in_report;
+ const char *bug_type = "invalid-pointer-pair";
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+ DescribeAddress(a1, 1, bug_type);
+ DescribeAddress(a2, 1, bug_type);
+ ReportErrorSummary(bug_type, &stack);
+}
+
+static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
+ if (!flags()->detect_invalid_pointer_pairs) return;
+ uptr a1 = reinterpret_cast<uptr>(p1);
+ uptr a2 = reinterpret_cast<uptr>(p2);
+ AsanChunkView chunk1 = FindHeapChunkByAddress(a1);
+ AsanChunkView chunk2 = FindHeapChunkByAddress(a2);
+ bool valid1 = chunk1.IsValid();
+ bool valid2 = chunk2.IsValid();
+ if ((valid1 != valid2) || (valid1 && valid2 && !chunk1.Eq(chunk2))) {
+ GET_CALLER_PC_BP_SP; \
+ return ReportInvalidPointerPair(pc, bp, sp, a1, a2);
+ }
+}
// ----------------------- Mac-specific reports ----------------- {{{1
-void WarnMacFreeUnallocated(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
+void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack) {
// Just print a warning here.
Printf("free_common(%p) -- attempting to free unallocated memory.\n"
"AddressSanitizer is ignoring this error on Mac OS now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
-void ReportMacMzReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
+void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
-void ReportMacCfReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) {
+void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
Printf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n"
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
- PrintStack(stack);
+ stack->Print();
DescribeHeapAddress(addr, 1);
}
-} // namespace __asan
-
-// --------------------------- Interface --------------------- {{{1
-using namespace __asan; // NOLINT
+void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
+ uptr access_size, u32 exp, bool fatal) {
+ ENABLE_FRAME_POINTER;
-void __asan_report_error(uptr pc, uptr bp, uptr sp,
- uptr addr, bool is_write, uptr access_size) {
- ScopedInErrorReport in_report;
+ // Optimization experiments.
+ // The experiments can be used to evaluate potential optimizations that remove
+ // instrumentation (assess false negatives). Instead of completely removing
+ // some instrumentation, compiler can emit special calls into runtime
+ // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
+ // mask of experiments (exp).
+ // The reaction to a non-zero value of exp is to be defined.
+ (void)exp;
// Determine the error type.
const char *bug_descr = "unknown-crash";
@@ -724,6 +1027,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
switch (*shadow_addr) {
case kAsanHeapLeftRedzoneMagic:
case kAsanHeapRightRedzoneMagic:
+ case kAsanArrayCookieMagic:
bug_descr = "heap-buffer-overflow";
break;
case kAsanHeapFreeMagic:
@@ -755,12 +1059,24 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
case kAsanGlobalRedzoneMagic:
bug_descr = "global-buffer-overflow";
break;
+ case kAsanIntraObjectRedzone:
+ bug_descr = "intra-object-overflow";
+ break;
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ bug_descr = "dynamic-stack-buffer-overflow";
+ break;
}
}
+
+ ReportData report = { pc, sp, bp, addr, (bool)is_write, access_size,
+ bug_descr };
+ ScopedInErrorReport in_report(&report, fatal);
+
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s on address "
- "%p at pc 0x%zx bp 0x%zx sp 0x%zx\n",
+ "%p at pc %p bp %p sp %p\n",
bug_descr, (void*)addr, pc, bp, sp);
Printf("%s", d.EndWarning());
@@ -774,26 +1090,84 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
d.EndAccess());
GET_STACK_TRACE_FATAL(pc, bp);
- PrintStack(&stack);
+ stack.Print();
- DescribeAddress(addr, access_size);
+ DescribeAddress(addr, access_size, bug_descr);
ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr);
}
+} // namespace __asan
+
+// --------------------------- Interface --------------------- {{{1
+using namespace __asan; // NOLINT
+
+void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
+ uptr access_size, u32 exp) {
+ ENABLE_FRAME_POINTER;
+ bool fatal = flags()->halt_on_error;
+ ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal);
+}
+
void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
error_report_callback = callback;
if (callback) {
error_message_buffer_size = 1 << 16;
error_message_buffer =
- (char*)MmapOrDie(error_message_buffer_size, __FUNCTION__);
+ (char*)MmapOrDie(error_message_buffer_size, __func__);
error_message_buffer_pos = 0;
}
}
void __asan_describe_address(uptr addr) {
- DescribeAddress(addr, 1);
+ // Thread registry must be locked while we're describing an address.
+ asanThreadRegistry().Lock();
+ DescribeAddress(addr, 1, "");
+ asanThreadRegistry().Unlock();
+}
+
+int __asan_report_present() {
+ return report_happened ? 1 : 0;
+}
+
+uptr __asan_get_report_pc() {
+ return report_data.pc;
+}
+
+uptr __asan_get_report_bp() {
+ return report_data.bp;
+}
+
+uptr __asan_get_report_sp() {
+ return report_data.sp;
+}
+
+uptr __asan_get_report_address() {
+ return report_data.addr;
+}
+
+int __asan_get_report_access_type() {
+ return report_data.is_write ? 1 : 0;
+}
+
+uptr __asan_get_report_access_size() {
+ return report_data.access_size;
+}
+
+const char *__asan_get_report_description() {
+ return report_data.description;
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_sub(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_cmp(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
}
+} // extern "C"
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
diff --git a/libsanitizer/asan/asan_report.h b/libsanitizer/asan/asan_report.h
index e4c756e557f..f2a18155232 100644
--- a/libsanitizer/asan/asan_report.h
+++ b/libsanitizer/asan/asan_report.h
@@ -16,40 +16,72 @@
namespace __asan {
+struct StackVarDescr {
+ uptr beg;
+ uptr size;
+ const char *name_pos;
+ uptr name_len;
+};
+
+struct AddressDescription {
+ char *name;
+ uptr name_size;
+ uptr region_address;
+ uptr region_size;
+ const char *region_kind;
+};
+
+// Returns the number of globals close to the provided address and copies
+// them to "globals" array.
+int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
+ int max_globals);
+bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size);
-bool DescribeAddressIfGlobal(uptr addr, uptr access_size);
-bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
- const __asan_global &g);
-bool DescribeAddressIfShadow(uptr addr);
+bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
+ bool print = true);
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size);
-// Determines memory type on its own.
-void DescribeAddress(uptr addr, uptr access_size);
-
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
-void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
-void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
-void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
-void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
- AllocType alloc_type,
- AllocType dealloc_type);
-void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
- StackTrace *stack);
-void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
- StackTrace *stack);
-void NORETURN ReportStringFunctionMemoryRangesOverlap(
- const char *function, const char *offset1, uptr length1,
- const char *offset2, uptr length2, StackTrace *stack);
+void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
+ uptr access_size, u32 exp, bool fatal);
+void ReportStackOverflow(const SignalContext &sig);
+void ReportDeadlySignal(const char *description, const SignalContext &sig);
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
+ BufferedStackTrace *free_stack);
+void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
+void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
+void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
+ AllocType alloc_type,
+ AllocType dealloc_type);
+void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack);
+void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
+ BufferedStackTrace *stack);
+void ReportStringFunctionMemoryRangesOverlap(const char *function,
+ const char *offset1, uptr length1,
+ const char *offset2, uptr length2,
+ BufferedStackTrace *stack);
+void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
+ BufferedStackTrace *stack);
+void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
+ uptr old_mid, uptr new_mid,
+ BufferedStackTrace *stack);
+
+void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2);
// Mac-specific errors and warnings.
-void WarnMacFreeUnallocated(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
-void NORETURN ReportMacMzReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
-void NORETURN ReportMacCfReallocUnknown(
- uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack);
+void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack);
+void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
+ const char *zone_name,
+ BufferedStackTrace *stack);
+void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr,
+ const char *zone_name,
+ BufferedStackTrace *stack);
} // namespace __asan
diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc
index 537d40612aa..50b02453a2b 100644
--- a/libsanitizer/asan/asan_rtl.cc
+++ b/libsanitizer/asan/asan_rtl.cc
@@ -9,6 +9,8 @@
//
// Main file of the ASan run-time library.
//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_interface_internal.h"
@@ -18,14 +20,18 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "asan_suppressions.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "lsan/lsan_common.h"
+#include "ubsan/ubsan_init.h"
+#include "ubsan/ubsan_platform.h"
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
+uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
namespace __asan {
@@ -49,164 +55,20 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
}
}
- if (flags()->coverage)
- __sanitizer_cov_dump();
- if (death_callback)
- death_callback();
- if (flags()->abort_on_error)
- Abort();
- internal__exit(flags()->exitcode);
}
static void AsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n",
- file, line, cond, (uptr)v1, (uptr)v2);
+ Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
+ line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here.
- PRINT_CURRENT_STACK();
+ PRINT_CURRENT_STACK_CHECK();
Die();
}
-// -------------------------- Flags ------------------------- {{{1
-static const int kDefaultMallocContextSize = 30;
-
-Flags asan_flags_dont_use_directly; // use via flags().
-
-static const char *MaybeCallAsanDefaultOptions() {
- return (&__asan_default_options) ? __asan_default_options() : "";
-}
-
-static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() {
-#ifdef ASAN_DEFAULT_OPTIONS
-// Stringize the macro value.
-# define ASAN_STRINGIZE(x) #x
-# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
- return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
-#else
- return "";
-#endif
-}
-
-static void ParseFlagsFromString(Flags *f, const char *str) {
- CommonFlags *cf = common_flags();
- ParseCommonFlagsFromString(cf, str);
- CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
-
- ParseFlag(str, &f->quarantine_size, "quarantine_size");
- ParseFlag(str, &f->redzone, "redzone");
- CHECK_GE(f->redzone, 16);
- CHECK(IsPowerOfTwo(f->redzone));
-
- ParseFlag(str, &f->debug, "debug");
- ParseFlag(str, &f->report_globals, "report_globals");
- ParseFlag(str, &f->check_initialization_order, "check_initialization_order");
-
- ParseFlag(str, &f->replace_str, "replace_str");
- ParseFlag(str, &f->replace_intrin, "replace_intrin");
- ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free");
- ParseFlag(str, &f->detect_stack_use_after_return,
- "detect_stack_use_after_return");
- ParseFlag(str, &f->uar_stack_size_log, "uar_stack_size_log");
- ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size");
- ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte");
- ParseFlag(str, &f->exitcode, "exitcode");
- ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning");
- ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying");
- ParseFlag(str, &f->handle_segv, "handle_segv");
- ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler");
- ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack");
- ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size");
- ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit");
- ParseFlag(str, &f->abort_on_error, "abort_on_error");
- ParseFlag(str, &f->print_stats, "print_stats");
- ParseFlag(str, &f->print_legend, "print_legend");
- ParseFlag(str, &f->atexit, "atexit");
- ParseFlag(str, &f->coverage, "coverage");
- ParseFlag(str, &f->disable_core, "disable_core");
- ParseFlag(str, &f->allow_reexec, "allow_reexec");
- ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
- ParseFlag(str, &f->poison_heap, "poison_heap");
- ParseFlag(str, &f->poison_partial, "poison_partial");
- ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
- ParseFlag(str, &f->strict_memcmp, "strict_memcmp");
- ParseFlag(str, &f->strict_init_order, "strict_init_order");
-}
-
-void InitializeFlags(Flags *f, const char *env) {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = kDefaultMallocContextSize;
-
- internal_memset(f, 0, sizeof(*f));
- f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
- f->redzone = 16;
- f->debug = false;
- f->report_globals = 1;
- f->check_initialization_order = false;
- f->replace_str = true;
- f->replace_intrin = true;
- f->mac_ignore_invalid_free = false;
- f->detect_stack_use_after_return = false; // Also needs the compiler flag.
- f->uar_stack_size_log = 0;
- f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
- f->malloc_fill_byte = 0xbe;
- f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
- f->allow_user_poisoning = true;
- f->sleep_before_dying = 0;
- f->handle_segv = ASAN_NEEDS_SEGV;
- f->allow_user_segv_handler = false;
- f->use_sigaltstack = false;
- f->check_malloc_usable_size = true;
- f->unmap_shadow_on_exit = false;
- f->abort_on_error = false;
- f->print_stats = false;
- f->print_legend = true;
- f->atexit = false;
- f->coverage = false;
- f->disable_core = (SANITIZER_WORDSIZE == 64);
- f->allow_reexec = true;
- f->print_full_thread_history = true;
- f->poison_heap = true;
- f->poison_partial = true;
- // Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
- // TODO(glider,timurrrr): Fix known issues and enable this back.
- f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
- f->strict_memcmp = true;
- f->strict_init_order = false;
-
- // Override from compile definition.
- ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefiniton());
-
- // Override from user-specified string.
- ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
- if (cf->verbosity) {
- Report("Using the defaults from __asan_default_options: %s\n",
- MaybeCallAsanDefaultOptions());
- }
-
- // Override from command line.
- ParseFlagsFromString(f, env);
-
-#if !CAN_SANITIZE_LEAKS
- if (cf->detect_leaks) {
- Report("%s: detect_leaks is not supported on this platform.\n",
- SanitizerToolName);
- cf->detect_leaks = false;
- }
-#endif
-
- // Make "strict_init_order" imply "check_initialization_order".
- // TODO(samsonov): Use a single runtime flag for an init-order checker.
- if (f->strict_init_order) {
- f->check_initialization_order = true;
- }
-}
-
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
-void (*death_callback)(void);
#if !ASAN_FIXED_MAPPING
uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
@@ -220,16 +82,22 @@ void ShowStatsAndAbort() {
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
-static void ReserveShadowMemoryRange(uptr beg, uptr end) {
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
- void *res = MmapFixedNoReserve(beg, size);
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
+ void *res = MmapFixedNoReserve(beg, size, name);
if (res != (void*)beg) {
Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n", size);
Abort();
}
+ if (common_flags()->no_huge_pages_for_shadow)
+ NoHugePagesInRegion(beg, size);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
}
// --------------- LowLevelAllocateCallbac ---------- {{{1
@@ -240,12 +108,21 @@ static void OnLowLevelAllocate(uptr ptr, uptr size) {
// -------------------------- Run-time entry ------------------- {{{1
// exported functions
#define ASAN_REPORT_ERROR(type, is_write, size) \
-extern "C" NOINLINE INTERFACE_ATTRIBUTE \
-void __asan_report_ ## type ## size(uptr addr); \
-void __asan_report_ ## type ## size(uptr addr) { \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## size(uptr addr) { \
GET_CALLER_PC_BP_SP; \
- __asan_report_error(pc, bp, sp, addr, is_write, size); \
-}
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## size ## _noabort(uptr addr) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \
+} \
ASAN_REPORT_ERROR(load, false, 1)
ASAN_REPORT_ERROR(load, false, 2)
@@ -258,17 +135,123 @@ ASAN_REPORT_ERROR(store, true, 4)
ASAN_REPORT_ERROR(store, true, 8)
ASAN_REPORT_ERROR(store, true, 16)
-#define ASAN_REPORT_ERROR_N(type, is_write) \
-extern "C" NOINLINE INTERFACE_ATTRIBUTE \
-void __asan_report_ ## type ## _n(uptr addr, uptr size); \
-void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
- GET_CALLER_PC_BP_SP; \
- __asan_report_error(pc, bp, sp, addr, is_write, size); \
-}
+#define ASAN_REPORT_ERROR_N(type, is_write) \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## _n_noabort(uptr addr, uptr size) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \
+} \
ASAN_REPORT_ERROR_N(load, false)
ASAN_REPORT_ERROR_N(store, true)
+#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
+ uptr sp = MEM_TO_SHADOW(addr); \
+ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
+ : *reinterpret_cast<u16 *>(sp); \
+ if (UNLIKELY(s)) { \
+ if (UNLIKELY(size >= SHADOW_GRANULARITY || \
+ ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
+ (s8)s)) { \
+ if (__asan_test_only_reported_buggy_pointer) { \
+ *__asan_test_only_reported_buggy_pointer = addr; \
+ } else { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \
+ fatal); \
+ } \
+ } \
+ }
+
+#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+ void __asan_##type##size(uptr addr) { \
+ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, true) \
+ } \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+ void __asan_exp_##type##size(uptr addr, u32 exp) { \
+ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp, true) \
+ } \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+ void __asan_##type##size ## _noabort(uptr addr) { \
+ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, false) \
+ } \
+
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_loadN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, false, size, 0, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, false, size, exp, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_loadN_noabort(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, false, size, 0, false);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_storeN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, true, size, 0, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, true, size, exp, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_storeN_noabort(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, true, size, 0, false);
+ }
+}
+
// Force the linker to keep the symbols for various ASan interface functions.
// We want to keep those in the executable in order to let the instrumented
// dynamic libraries access the symbol even if it is not used by the executable
@@ -284,33 +267,39 @@ static NOINLINE void force_interface_symbols() {
case 3: __asan_report_load4(0); break;
case 4: __asan_report_load8(0); break;
case 5: __asan_report_load16(0); break;
- case 6: __asan_report_store1(0); break;
- case 7: __asan_report_store2(0); break;
- case 8: __asan_report_store4(0); break;
- case 9: __asan_report_store8(0); break;
- case 10: __asan_report_store16(0); break;
- case 12: __asan_register_globals(0, 0); break;
- case 13: __asan_unregister_globals(0, 0); break;
- case 14: __asan_set_death_callback(0); break;
- case 15: __asan_set_error_report_callback(0); break;
- case 16: __asan_handle_no_return(); break;
- case 17: __asan_address_is_poisoned(0); break;
- case 18: __asan_get_allocated_size(0); break;
- case 19: __asan_get_current_allocated_bytes(); break;
- case 20: __asan_get_estimated_allocated_size(0); break;
- case 21: __asan_get_free_bytes(); break;
- case 22: __asan_get_heap_size(); break;
- case 23: __asan_get_ownership(0); break;
- case 24: __asan_get_unmapped_bytes(); break;
- case 25: __asan_poison_memory_region(0, 0); break;
- case 26: __asan_unpoison_memory_region(0, 0); break;
- case 27: __asan_set_error_exit_code(0); break;
- case 30: __asan_before_dynamic_init(0); break;
- case 31: __asan_after_dynamic_init(); break;
- case 32: __asan_poison_stack_memory(0, 0); break;
- case 33: __asan_unpoison_stack_memory(0, 0); break;
- case 34: __asan_region_is_poisoned(0, 0); break;
- case 35: __asan_describe_address(0); break;
+ case 6: __asan_report_load_n(0, 0); break;
+ case 7: __asan_report_store1(0); break;
+ case 8: __asan_report_store2(0); break;
+ case 9: __asan_report_store4(0); break;
+ case 10: __asan_report_store8(0); break;
+ case 11: __asan_report_store16(0); break;
+ case 12: __asan_report_store_n(0, 0); break;
+ case 13: __asan_report_exp_load1(0, 0); break;
+ case 14: __asan_report_exp_load2(0, 0); break;
+ case 15: __asan_report_exp_load4(0, 0); break;
+ case 16: __asan_report_exp_load8(0, 0); break;
+ case 17: __asan_report_exp_load16(0, 0); break;
+ case 18: __asan_report_exp_load_n(0, 0, 0); break;
+ case 19: __asan_report_exp_store1(0, 0); break;
+ case 20: __asan_report_exp_store2(0, 0); break;
+ case 21: __asan_report_exp_store4(0, 0); break;
+ case 22: __asan_report_exp_store8(0, 0); break;
+ case 23: __asan_report_exp_store16(0, 0); break;
+ case 24: __asan_report_exp_store_n(0, 0, 0); break;
+ case 25: __asan_register_globals(nullptr, 0); break;
+ case 26: __asan_unregister_globals(nullptr, 0); break;
+ case 27: __asan_set_death_callback(nullptr); break;
+ case 28: __asan_set_error_report_callback(nullptr); break;
+ case 29: __asan_handle_no_return(); break;
+ case 30: __asan_address_is_poisoned(nullptr); break;
+ case 31: __asan_poison_memory_region(nullptr, 0); break;
+ case 32: __asan_unpoison_memory_region(nullptr, 0); break;
+ case 34: __asan_before_dynamic_init(nullptr); break;
+ case 35: __asan_after_dynamic_init(); break;
+ case 36: __asan_poison_stack_memory(0, 0); break;
+ case 37: __asan_unpoison_stack_memory(0, 0); break;
+ case 38: __asan_region_is_poisoned(0, 0); break;
+ case 39: __asan_describe_address(0); break;
}
}
@@ -334,8 +323,30 @@ static void InitializeHighMemEnd() {
CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
}
-static void ProtectGap(uptr a, uptr size) {
- CHECK_EQ(a, (uptr)Mprotect(a, size));
+static void ProtectGap(uptr addr, uptr size) {
+ if (!flags()->protect_shadow_gap)
+ return;
+ void *res = MmapNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ // A few pages at the start of the address space can not be protected.
+ // But we really want to protect as much as possible, to prevent this memory
+ // being returned as a result of a non-FIXED mmap().
+ if (addr == kZeroBaseShadowStart) {
+ uptr step = GetPageSizeCached();
+ while (size > step && addr < kZeroBaseMaxShadowStart) {
+ addr += step;
+ size -= step;
+ void *res = MmapNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ }
+ }
+
+ Report("ERROR: Failed to protect the shadow gap. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ DumpProcessMap();
+ Die();
}
static void PrintAddressSpaceLayout() {
@@ -372,14 +383,15 @@ static void PrintAddressSpaceLayout() {
(void*)MEM_TO_SHADOW(kMidShadowEnd));
}
Printf("\n");
- Printf("red_zone=%zu\n", (uptr)flags()->redzone);
- Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
+ Printf("redzone=%zu\n", (uptr)flags()->redzone);
+ Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
+ Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
- Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE);
- Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY);
- Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET);
+ Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
+ Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
+ Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
if (kMidMemBeg)
CHECK(kMidShadowBeg > kLowShadowEnd &&
@@ -387,81 +399,40 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
-} // namespace __asan
+static void AsanInitInternal() {
+ if (LIKELY(asan_inited)) return;
+ SanitizerToolName = "AddressSanitizer";
+ CHECK(!asan_init_is_running && "ASan init calls itself!");
+ asan_init_is_running = true;
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
+ CacheBinaryName();
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-#endif
+ // Initialize flags. This must be done early, because most of the
+ // initialization steps look at flags().
+ InitializeFlags();
-int NOINLINE __asan_set_error_exit_code(int exit_code) {
- int old = flags()->exitcode;
- flags()->exitcode = exit_code;
- return old;
-}
+ AsanCheckIncompatibleRT();
+ AsanCheckDynamicRTPrereqs();
-void NOINLINE __asan_handle_no_return() {
- int local_stack;
- AsanThread *curr_thread = GetCurrentThread();
- CHECK(curr_thread);
- uptr PageSize = GetPageSizeCached();
- uptr top = curr_thread->stack_top();
- uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
- static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
- if (top - bottom > kMaxExpectedCleanupSize) {
- static bool reported_warning = false;
- if (reported_warning)
- return;
- reported_warning = true;
- Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
- "stack top: %p; bottom %p; size: %p (%zd)\n"
- "False positive error reports may follow\n"
- "For details see "
- "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
- top, bottom, top - bottom, top - bottom);
- return;
- }
- PoisonShadow(bottom, top - bottom, 0);
- if (curr_thread->has_fake_stack())
- curr_thread->fake_stack()->HandleNoReturn();
-}
+ SetCanPoisonMemory(flags()->poison_heap);
+ SetMallocContextSize(common_flags()->malloc_context_size);
-void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
- death_callback = callback;
-}
-
-void __asan_init() {
- if (asan_inited) return;
- SanitizerToolName = "AddressSanitizer";
- CHECK(!asan_init_is_running && "ASan init calls itself!");
- asan_init_is_running = true;
InitializeHighMemEnd();
// Make sure we are not statically linked.
AsanDoesNotSupportStaticLinkage();
// Install tool-specific callbacks in sanitizer_common.
- SetDieCallback(AsanDie);
+ AddDieCallback(AsanDie);
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
- // Initialize flags. This must be done early, because most of the
- // initialization steps look at flags().
- const char *options = GetEnv("ASAN_OPTIONS");
- InitializeFlags(flags(), options);
__sanitizer_set_report_path(common_flags()->log_path);
+
+ // Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
- if (common_flags()->verbosity && options) {
- Report("Parsed ASAN_OPTIONS: %s\n", options);
- }
-
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -470,8 +441,12 @@ void __asan_init() {
InitializeAsanInterceptors();
+ // Enable system log ("adb logcat") on Android.
+ // Doing this before interceptors are initialized crashes in:
+ // AsanInitInternal -> android_log_write -> __interceptor_strcmp
+ AndroidLogInit();
+
ReplaceSystemMalloc();
- ReplaceOperatorsNewAndDelete();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg)
@@ -479,38 +454,37 @@ void __asan_init() {
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
-#if SANITIZER_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING
+#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
+ !ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#endif
- if (common_flags()->verbosity)
- PrintAddressSpaceLayout();
+ if (Verbosity()) PrintAddressSpaceLayout();
- if (flags()->disable_core) {
- DisableCoreDumper();
- }
+ DisableCoreDumperIfNecessary();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow.
- ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd);
+ ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
@@ -518,23 +492,21 @@ void __asan_init() {
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
+ Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+ shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
AsanTSDInit(PlatformTSDDtor);
- InstallSignalHandlers();
+ InstallDeadlySignalHandlers(AsanOnDeadlySignal);
- // Allocator should be initialized before starting external symbolizer, as
- // fork() on Mac locks the allocator.
- InitializeAllocator();
+ AllocatorOptions allocator_options;
+ allocator_options.SetFrom(flags(), common_flags());
+ InitializeAllocator(allocator_options);
- // Start symbolizer process if necessary.
- if (common_flags()->symbolize) {
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- } else {
- Symbolizer::Disable();
- }
+ MaybeStartBackgroudThread();
+ SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
@@ -544,21 +516,26 @@ void __asan_init() {
if (flags()->atexit)
Atexit(asan_atexit);
- if (flags()->coverage)
- Atexit(__sanitizer_cov_dump);
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ // Now that ASan runtime is (mostly) initialized, deactivate it if
+ // necessary, so that it can be re-activated when requested.
+ if (flags()->start_deactivated)
+ AsanDeactivate();
// interceptors
InitTlsSize();
// Create main thread.
- AsanThread *main_thread = AsanThread::Create(0, 0);
- CreateThreadContextArgs create_main_args = { main_thread, 0 };
- u32 main_tid = asanThreadRegistry().CreateThread(
- 0, true, 0, &create_main_args);
- CHECK_EQ(0, main_tid);
+ AsanThread *main_thread = AsanThread::Create(
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* stack */ nullptr, /* detached */ true);
+ CHECK_EQ(0, main_thread->tid());
SetCurrentThread(main_thread);
- main_thread->ThreadStart(internal_getpid());
+ main_thread->ThreadStart(internal_getpid(),
+ /* signal_thread_is_registered */ nullptr);
force_interface_symbols(); // no-op.
+ SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan();
@@ -567,7 +544,85 @@ void __asan_init() {
}
#endif // CAN_SANITIZE_LEAKS
- if (common_flags()->verbosity) {
- Report("AddressSanitizer Init done\n");
+#if CAN_SANITIZE_UB
+ __ubsan::InitAsPlugin();
+#endif
+
+ InitializeSuppressions();
+
+ VReport(1, "AddressSanitizer Init done\n");
+}
+
+// Initialize as requested from some part of ASan runtime library (interceptors,
+// allocator, etc).
+void AsanInitFromRtl() {
+ AsanInitInternal();
+}
+
+#if ASAN_DYNAMIC
+// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
+// (and thus normal initializers from .preinit_array or modules haven't run).
+
+class AsanInitializer {
+public: // NOLINT
+ AsanInitializer() {
+ AsanInitFromRtl();
+ }
+};
+
+static AsanInitializer asan_initializer;
+#endif // ASAN_DYNAMIC
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+void NOINLINE __asan_handle_no_return() {
+ int local_stack;
+ AsanThread *curr_thread = GetCurrentThread();
+ uptr PageSize = GetPageSizeCached();
+ uptr top, bottom;
+ if (curr_thread) {
+ top = curr_thread->stack_top();
+ bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
+ } else {
+ // If we haven't seen this thread, try asking the OS for stack bounds.
+ uptr tls_addr, tls_size, stack_size;
+ GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,
+ &tls_size);
+ top = bottom + stack_size;
}
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (top - bottom > kMaxExpectedCleanupSize) {
+ static bool reported_warning = false;
+ if (reported_warning)
+ return;
+ reported_warning = true;
+ Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
+ "stack top: %p; bottom %p; size: %p (%zd)\n"
+ "False positive error reports may follow\n"
+ "For details see "
+ "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
+ top, bottom, top - bottom, top - bottom);
+ return;
+ }
+ PoisonShadow(bottom, top - bottom, 0);
+ if (curr_thread && curr_thread->has_fake_stack())
+ curr_thread->fake_stack()->HandleNoReturn();
+}
+
+void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
+ SetUserDieCallback(callback);
+}
+
+// Initialize as requested from instrumented application code.
+// We use this call as a trigger to wake up ASan from deactivated state.
+void __asan_init() {
+ AsanActivate();
+ AsanInitInternal();
+}
+
+void __asan_version_mismatch_check() {
+ // Do nothing.
}
diff --git a/libsanitizer/asan/asan_stack.cc b/libsanitizer/asan/asan_stack.cc
index 24cccbd196e..973c5ce59ef 100644
--- a/libsanitizer/asan/asan_stack.cc
+++ b/libsanitizer/asan/asan_stack.cc
@@ -10,40 +10,25 @@
// Code for ASan stack trace.
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
-#include "asan_flags.h"
#include "asan_stack.h"
-#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_atomic.h"
namespace __asan {
-static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
- int out_size) {
- return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
- : false;
-}
+static atomic_uint32_t malloc_context_size;
-void PrintStack(const uptr *trace, uptr size) {
- StackTrace::PrintStack(trace, size, MaybeCallAsanSymbolize);
+void SetMallocContextSize(u32 size) {
+ atomic_store(&malloc_context_size, size, memory_order_release);
}
-void PrintStack(StackTrace *stack) {
- PrintStack(stack->trace, stack->size);
+u32 GetMallocContextSize() {
+ return atomic_load(&malloc_context_size, memory_order_acquire);
}
} // namespace __asan
// ------------------ Interface -------------- {{{1
-// Provide default implementation of __asan_symbolize that does nothing
-// and may be overriden by user if he wants to use his own symbolization.
-// ASan on Windows has its own implementation of this.
-#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
-bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
- return false;
-}
-#endif
-
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h
index df7a9805f92..30dc5921303 100644
--- a/libsanitizer/asan/asan_stack.h
+++ b/libsanitizer/asan/asan_stack.h
@@ -9,6 +9,7 @@
//
// ASan-private header for asan_stack.cc.
//===----------------------------------------------------------------------===//
+
#ifndef ASAN_STACK_H
#define ASAN_STACK_H
@@ -19,66 +20,98 @@
namespace __asan {
-void PrintStack(StackTrace *stack);
-void PrintStack(const uptr *trace, uptr size);
+static const u32 kDefaultMallocContextSize = 30;
-} // namespace __asan
+void SetMallocContextSize(u32 size);
+u32 GetMallocContextSize();
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
+ALWAYS_INLINE
+void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
+ uptr pc, uptr bp, void *context,
+ bool fast) {
#if SANITIZER_WINDOWS
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
- StackTrace stack; \
- stack.Unwind(max_s, pc, bp, 0, 0, fast)
+ stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
- StackTrace stack; \
- { \
- AsanThread *t; \
- stack.size = 0; \
- if (asan_inited) { \
- if ((t = GetCurrentThread()) && !t->isUnwinding()) { \
- uptr stack_top = t->stack_top(); \
- uptr stack_bottom = t->stack_bottom(); \
- ScopedUnwinding unwind_scope(t); \
- stack.Unwind(max_s, pc, bp, stack_top, stack_bottom, fast); \
- } else if (t == 0 && !fast) { \
- /* If GetCurrentThread() has failed, try to do slow unwind anyways. */ \
- stack.Unwind(max_s, pc, bp, 0, 0, false); \
- } \
- } \
+ AsanThread *t;
+ stack->size = 0;
+ if (LIKELY(asan_inited)) {
+ if ((t = GetCurrentThread()) && !t->isUnwinding()) {
+ // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
+ // yields the call stack of the signal's handler and not of the code
+ // that raised the signal (as it does on Linux).
+ if (SANITIZER_FREEBSD && t->isInDeadlySignal()) fast = true;
+ uptr stack_top = t->stack_top();
+ uptr stack_bottom = t->stack_bottom();
+ ScopedUnwinding unwind_scope(t);
+ stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+ } else if (!t && !fast) {
+ /* If GetCurrentThread() has failed, try to do slow unwind anyways. */
+ stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
+ }
}
-#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WINDOWS
+}
+
+} // namespace __asan
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
-#define GET_STACK_TRACE(max_size, fast) \
- GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
- StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
+#define GET_STACK_TRACE(max_size, fast) \
+ BufferedStackTrace stack; \
+ if (max_size <= 2) { \
+ stack.size = max_size; \
+ if (max_size > 0) { \
+ stack.top_frame_bp = GET_CURRENT_FRAME(); \
+ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
+ if (max_size > 1) \
+ stack.trace_buffer[1] = GET_CALLER_PC(); \
+ } \
+ } else { \
+ GetStackTraceWithPcBpAndContext(&stack, max_size, \
+ StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), 0, fast); \
+ }
-#define GET_STACK_TRACE_FATAL(pc, bp) \
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
- common_flags()->fast_unwind_on_fatal)
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ BufferedStackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
+ common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_SIGNAL(sig) \
+ BufferedStackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \
+ (sig).pc, (sig).bp, (sig).context, \
+ common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+#define GET_STACK_TRACE_CHECK_HERE \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
+
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
-#define GET_STACK_TRACE_MALLOC \
- GET_STACK_TRACE(common_flags()->malloc_context_size, \
- common_flags()->fast_unwind_on_malloc)
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
#define PRINT_CURRENT_STACK() \
{ \
GET_STACK_TRACE_FATAL_HERE; \
- PrintStack(&stack); \
+ stack.Print(); \
+ }
+
+#define PRINT_CURRENT_STACK_CHECK() \
+ { \
+ GET_STACK_TRACE_CHECK_HERE; \
+ stack.Print(); \
}
-#endif // ASAN_STACK_H
+#endif // ASAN_STACK_H
diff --git a/libsanitizer/asan/asan_stats.cc b/libsanitizer/asan/asan_stats.cc
index 71c8582e81c..64a788a6a5d 100644
--- a/libsanitizer/asan/asan_stats.cc
+++ b/libsanitizer/asan/asan_stats.cc
@@ -13,6 +13,7 @@
#include "asan_internal.h"
#include "asan_stats.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -48,12 +49,8 @@ void AsanStats::Print() {
(mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
mmaps, munmaps);
- PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
- PrintMallocStatsArray(" frees by size class: ", freed_by_size);
- PrintMallocStatsArray(" rfrees by size class: ", really_freed_by_size);
- Printf("Stats: malloc large: %zu small slow: %zu\n",
- malloc_large, malloc_small_slow);
+ Printf("Stats: malloc large: %zu\n", malloc_large);
}
void AsanStats::MergeFrom(const AsanStats *stats) {
@@ -127,8 +124,8 @@ static void PrintAccumulatedStats() {
BlockingMutexLock lock(&print_lock);
stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
- Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
- stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
+ Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
PrintInternalAllocatorStats();
}
@@ -137,7 +134,7 @@ static void PrintAccumulatedStats() {
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-uptr __asan_get_current_allocated_bytes() {
+uptr __sanitizer_get_current_allocated_bytes() {
AsanStats stats;
GetAccumulatedStats(&stats);
uptr malloced = stats.malloced;
@@ -147,19 +144,18 @@ uptr __asan_get_current_allocated_bytes() {
return (malloced > freed) ? malloced - freed : 1;
}
-uptr __asan_get_heap_size() {
+uptr __sanitizer_get_heap_size() {
AsanStats stats;
GetAccumulatedStats(&stats);
return stats.mmaped - stats.munmaped;
}
-uptr __asan_get_free_bytes() {
+uptr __sanitizer_get_free_bytes() {
AsanStats stats;
GetAccumulatedStats(&stats);
uptr total_free = stats.mmaped
- stats.munmaped
- + stats.really_freed
- + stats.really_freed_redzones;
+ + stats.really_freed;
uptr total_used = stats.malloced
+ stats.malloced_redzones;
// Return sane value if total_free < total_used due to racy
@@ -167,7 +163,7 @@ uptr __asan_get_free_bytes() {
return (total_free > total_used) ? total_free - total_used : 1;
}
-uptr __asan_get_unmapped_bytes() {
+uptr __sanitizer_get_unmapped_bytes() {
return 0;
}
diff --git a/libsanitizer/asan/asan_stats.h b/libsanitizer/asan/asan_stats.h
index 5b04b1718eb..a48e3f916a9 100644
--- a/libsanitizer/asan/asan_stats.h
+++ b/libsanitizer/asan/asan_stats.h
@@ -30,20 +30,14 @@ struct AsanStats {
uptr freed;
uptr real_frees;
uptr really_freed;
- uptr really_freed_redzones;
uptr reallocs;
uptr realloced;
uptr mmaps;
uptr mmaped;
uptr munmaps;
uptr munmaped;
- uptr mmaped_by_size[kNumberOfSizeClasses];
- uptr malloced_by_size[kNumberOfSizeClasses];
- uptr freed_by_size[kNumberOfSizeClasses];
- uptr really_freed_by_size[kNumberOfSizeClasses];
-
uptr malloc_large;
- uptr malloc_small_slow;
+ uptr malloced_by_size[kNumberOfSizeClasses];
// Ctor for global AsanStats (accumulated stats for dead threads).
explicit AsanStats(LinkerInitialized) { }
diff --git a/libsanitizer/asan/asan_suppressions.cc b/libsanitizer/asan/asan_suppressions.cc
new file mode 100644
index 00000000000..c94fff0500a
--- /dev/null
+++ b/libsanitizer/asan/asan_suppressions.cc
@@ -0,0 +1,108 @@
+//===-- asan_suppressions.cc ----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Issue suppression and suppression-related functions.
+//===----------------------------------------------------------------------===//
+
+#include "asan_suppressions.h"
+
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+namespace __asan {
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kInterceptorName[] = "interceptor_name";
+static const char kInterceptorViaFunction[] = "interceptor_via_fun";
+static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
+static const char kODRViolation[] = "odr_violation";
+static const char *kSuppressionTypes[] = {
+ kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
+ kODRViolation};
+
+extern "C" {
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__asan_default_suppressions();
+#else
+// No week hooks, provide empty implementation.
+const char *__asan_default_suppressions() { return ""; }
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+} // extern "C"
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+ if (&__asan_default_suppressions)
+ suppression_ctx->Parse(__asan_default_suppressions());
+}
+
+bool IsInterceptorSuppressed(const char *interceptor_name) {
+ CHECK(suppression_ctx);
+ Suppression *s;
+ // Match "interceptor_name" suppressions.
+ return suppression_ctx->Match(interceptor_name, kInterceptorName, &s);
+}
+
+bool HaveStackTraceBasedSuppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) ||
+ suppression_ctx->HasSuppressionType(kInterceptorViaLibrary);
+}
+
+bool IsODRViolationSuppressed(const char *global_var_name) {
+ CHECK(suppression_ctx);
+ Suppression *s;
+ // Match "odr_violation" suppressions.
+ return suppression_ctx->Match(global_var_name, kODRViolation, &s);
+}
+
+bool IsStackTraceSuppressed(const StackTrace *stack) {
+ if (!HaveStackTraceBasedSuppressions())
+ return false;
+
+ CHECK(suppression_ctx);
+ Symbolizer *symbolizer = Symbolizer::GetOrInit();
+ Suppression *s;
+ for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
+ uptr addr = stack->trace[i];
+
+ if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) {
+ // Match "interceptor_via_lib" suppressions.
+ if (const char *module_name = symbolizer->GetModuleNameForPc(addr))
+ if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s))
+ return true;
+ }
+
+ if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
+ SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ const char *function_name = cur->info.function;
+ if (!function_name) {
+ continue;
+ }
+ // Match "interceptor_via_fun" suppressions.
+ if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
+ &s)) {
+ frames->ClearAll();
+ return true;
+ }
+ }
+ frames->ClearAll();
+ }
+ }
+ return false;
+}
+
+} // namespace __asan
diff --git a/libsanitizer/asan/asan_suppressions.h b/libsanitizer/asan/asan_suppressions.h
new file mode 100644
index 00000000000..331d7224548
--- /dev/null
+++ b/libsanitizer/asan/asan_suppressions.h
@@ -0,0 +1,28 @@
+//===-- asan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_suppressions.cc.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_SUPPRESSIONS_H
+#define ASAN_SUPPRESSIONS_H
+
+#include "asan_internal.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __asan {
+
+void InitializeSuppressions();
+bool IsInterceptorSuppressed(const char *interceptor_name);
+bool HaveStackTraceBasedSuppressions();
+bool IsStackTraceSuppressed(const StackTrace *stack);
+bool IsODRViolationSuppressed(const char *global_var_name);
+
+} // namespace __asan
+
+#endif // ASAN_SUPPRESSIONS_H
diff --git a/libsanitizer/asan/asan_thread.cc b/libsanitizer/asan/asan_thread.cc
index 5a9c2dddffb..92f968bdee4 100644
--- a/libsanitizer/asan/asan_thread.cc
+++ b/libsanitizer/asan/asan_thread.cc
@@ -18,23 +18,29 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan/lsan_common.h"
namespace __asan {
// AsanThreadContext implementation.
+struct CreateThreadContextArgs {
+ AsanThread *thread;
+ StackTrace *stack;
+};
+
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack)
- stack_id = StackDepotPut(args->stack->trace, args->stack->size);
+ stack_id = StackDepotPut(*args->stack);
thread = args->thread;
thread->set_context(this);
}
void AsanThreadContext::OnFinished() {
// Drop the link to the AsanThread object.
- thread = 0;
+ thread = nullptr;
}
// MIPS requires aligned address
@@ -72,41 +78,44 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
// AsanThread implementation.
-AsanThread *AsanThread::Create(thread_callback_t start_routine,
- void *arg) {
+AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack,
+ bool detached) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
- AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
+ AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
+ CreateThreadContextArgs args = { thread, stack };
+ asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
+ parent_tid, &args);
return thread;
}
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
- if (common_flags()->verbosity >= 1)
- Report("T%d TSDDtor\n", context->tid);
+ VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
- if (common_flags()->verbosity >= 1) {
- Report("T%d exited\n", tid());
- }
+ int tid = this->tid();
+ VReport(1, "T%d exited\n", tid);
malloc_storage().CommitBack();
- if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
- asanThreadRegistry().FinishThread(tid());
+ if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
+ asanThreadRegistry().FinishThread(tid);
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStackAndTLS();
- DeleteFakeStack();
+ DeleteFakeStack(tid);
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
+ DTLS_Destroy();
}
// We want to create the FakeStack lazyly on the first use, but not eralier
@@ -114,46 +123,52 @@ void AsanThread::Destroy() {
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
uptr stack_size = this->stack_size();
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
- return 0;
+ return nullptr;
uptr old_val = 0;
// fake_stack_ has 3 states:
// 0 -- not initialized
// 1 -- being initialized
// ptr -- initialized
// This CAS checks if the state was 0 and if so changes it to state 1,
- // if that was successfull, it initilizes the pointer.
+ // if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
- if (flags()->uar_stack_size_log)
- stack_size_log = static_cast<uptr>(flags()->uar_stack_size_log);
+ CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
+ stack_size_log =
+ Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
+ stack_size_log =
+ Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
}
- return 0;
+ return nullptr;
}
void AsanThread::Init() {
+ fake_stack_ = nullptr; // Will be initialized lazily if needed.
+ CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls();
+ CHECK_GT(this->stack_size(), 0U);
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS();
- if (common_flags()->verbosity >= 1) {
- int local = 0;
- Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
- tid(), (void*)stack_bottom_, (void*)stack_top_,
- stack_top_ - stack_bottom_, &local);
- }
- fake_stack_ = 0; // Will be initialized lazily if needed.
- AsanPlatformThreadInit();
+ int local = 0;
+ VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
+ (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
+ &local);
}
-thread_return_t AsanThread::ThreadStart(uptr os_id) {
+thread_return_t AsanThread::ThreadStart(
+ uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
Init();
- asanThreadRegistry().StartThread(tid(), os_id, 0);
- if (flags()->use_sigaltstack) SetAlternateSignalStack();
+ asanThreadRegistry().StartThread(tid(), os_id, nullptr);
+ if (signal_thread_is_registered)
+ atomic_store(signal_thread_is_registered, 1, memory_order_release);
+
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
@@ -193,17 +208,18 @@ void AsanThread::ClearShadowForThreadStackAndTLS() {
PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0);
}
-const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset,
- uptr *frame_pc) {
+bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
+ StackFrameAccess *access) {
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
} else if (has_fake_stack()) {
bottom = fake_stack()->AddrIsInFakeStack(addr);
CHECK(bottom);
- *offset = addr - bottom;
- *frame_pc = ((uptr*)bottom)[2];
- return (const char *)((uptr*)bottom)[1];
+ access->offset = addr - bottom;
+ access->frame_pc = ((uptr*)bottom)[2];
+ access->frame_descr = (const char *)((uptr*)bottom)[1];
+ return true;
}
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
@@ -220,15 +236,15 @@ const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset,
}
if (shadow_ptr < shadow_bottom) {
- *offset = 0;
- return "UNKNOWN";
+ return false;
}
uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
CHECK(ptr[0] == kCurrentStackFrameMagic);
- *offset = addr - (uptr)ptr;
- *frame_pc = ptr[2];
- return (const char*)ptr[1];
+ access->offset = addr - (uptr)ptr;
+ access->frame_pc = ptr[2];
+ access->frame_descr = (const char*)ptr[1];
+ return true;
}
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
@@ -258,17 +274,15 @@ AsanThread *GetCurrentThread() {
return tctx->thread;
}
}
- return 0;
+ return nullptr;
}
return context->thread;
}
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
- if (common_flags()->verbosity >= 2) {
- Report("SetCurrentThread: %p for thread %p\n",
- t->context(), (void*)GetThreadSelf());
- }
+ VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
+ (void *)GetThreadSelf());
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
AsanTSDSet(t->context());
@@ -285,7 +299,7 @@ AsanThread *FindThreadByStackAddress(uptr addr) {
AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
(void *)addr));
- return tctx ? tctx->thread : 0;
+ return tctx ? tctx->thread : nullptr;
}
void EnsureMainThreadIDIsCorrect() {
@@ -298,10 +312,10 @@ void EnsureMainThreadIDIsCorrect() {
__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
- if (!context) return 0;
+ if (!context) return nullptr;
return context->thread;
}
-} // namespace __asan
+} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
@@ -338,4 +352,4 @@ void UnlockThreadRegistry() {
void EnsureMainThreadIDIsCorrect() {
__asan::EnsureMainThreadIDIsCorrect();
}
-} // namespace __lsan
+} // namespace __lsan
diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h
index 5a917fa9a3d..27a3367d8e7 100644
--- a/libsanitizer/asan/asan_thread.h
+++ b/libsanitizer/asan/asan_thread.h
@@ -9,13 +9,13 @@
//
// ASan-private header for asan_thread.cc.
//===----------------------------------------------------------------------===//
+
#ifndef ASAN_THREAD_H
#define ASAN_THREAD_H
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_fake_stack.h"
-#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -33,19 +33,16 @@ class AsanThread;
class AsanThreadContext : public ThreadContextBase {
public:
explicit AsanThreadContext(int tid)
- : ThreadContextBase(tid),
- announced(false),
- destructor_iterations(kPthreadDestructorIterations),
- stack_id(0),
- thread(0) {
- }
+ : ThreadContextBase(tid), announced(false),
+ destructor_iterations(GetPthreadDestructorIterations()), stack_id(0),
+ thread(nullptr) {}
bool announced;
u8 destructor_iterations;
u32 stack_id;
AsanThread *thread;
- void OnCreated(void *arg);
- void OnFinished();
+ void OnCreated(void *arg) override;
+ void OnFinished() override;
};
// AsanThreadContext objects are never freed, so we need many of them.
@@ -54,12 +51,14 @@ COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
public:
- static AsanThread *Create(thread_callback_t start_routine, void *arg);
+ static AsanThread *Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack, bool detached);
static void TSDDtor(void *tsd);
void Destroy();
void Init(); // Should be called from the thread itself.
- thread_return_t ThreadStart(uptr os_id);
+ thread_return_t ThreadStart(uptr os_id,
+ atomic_uintptr_t *signal_thread_is_registered);
uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; }
@@ -70,18 +69,23 @@ class AsanThread {
AsanThreadContext *context() { return context_; }
void set_context(AsanThreadContext *context) { context_ = context; }
- const char *GetFrameNameByAddr(uptr addr, uptr *offset, uptr *frame_pc);
+ struct StackFrameAccess {
+ uptr offset;
+ uptr frame_pc;
+ const char *frame_descr;
+ };
+ bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
}
- void DeleteFakeStack() {
+ void DeleteFakeStack(int tid) {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
- fake_stack_ = 0;
- SetTLSFakeStack(0);
- t->Destroy();
+ fake_stack_ = nullptr;
+ SetTLSFakeStack(nullptr);
+ t->Destroy(tid);
}
bool has_fake_stack() {
@@ -90,7 +94,7 @@ class AsanThread {
FakeStack *fake_stack() {
if (!__asan_option_detect_stack_use_after_return)
- return 0;
+ return nullptr;
if (!has_fake_stack())
return AsyncSignalSafeLazyInitFakeStack();
return fake_stack_;
@@ -102,6 +106,10 @@ class AsanThread {
bool isUnwinding() const { return unwinding_; }
void setUnwinding(bool b) { unwinding_ = b; }
+ // True if we are in a deadly signal handler.
+ bool isInDeadlySignal() const { return in_deadly_signal_; }
+ void setInDeadlySignal(bool b) { in_deadly_signal_ = b; }
+
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
@@ -127,6 +135,7 @@ class AsanThread {
AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_;
bool unwinding_;
+ bool in_deadly_signal_;
};
// ScopedUnwinding is a scope for stacktracing member of a context
@@ -141,9 +150,18 @@ class ScopedUnwinding {
AsanThread *thread;
};
-struct CreateThreadContextArgs {
+// ScopedDeadlySignal is a scope for handling deadly signals.
+class ScopedDeadlySignal {
+ public:
+ explicit ScopedDeadlySignal(AsanThread *t) : thread(t) {
+ if (thread) thread->setInDeadlySignal(true);
+ }
+ ~ScopedDeadlySignal() {
+ if (thread) thread->setInDeadlySignal(false);
+ }
+
+ private:
AsanThread *thread;
- StackTrace *stack;
};
// Returns a single instance of registry.
@@ -160,6 +178,6 @@ AsanThread *FindThreadByStackAddress(uptr addr);
// Used to handle fork().
void EnsureMainThreadIDIsCorrect();
-} // namespace __asan
+} // namespace __asan
-#endif // ASAN_THREAD_H
+#endif // ASAN_THREAD_H
diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc
index 8ffa58faa37..6c12523498a 100644
--- a/libsanitizer/asan/asan_win.cc
+++ b/libsanitizer/asan/asan_win.cc
@@ -12,33 +12,142 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
+#define WIN32_LEAN_AND_MEAN
#include <windows.h>
-#include <dbghelp.h>
#include <stdlib.h>
#include "asan_interceptors.h"
#include "asan_internal.h"
+#include "asan_report.h"
+#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
+using namespace __asan; // NOLINT
+
extern "C" {
- SANITIZER_INTERFACE_ATTRIBUTE
- int __asan_should_detect_stack_use_after_return() {
- __asan_init();
- return __asan_option_detect_stack_use_after_return;
- }
+SANITIZER_INTERFACE_ATTRIBUTE
+int __asan_should_detect_stack_use_after_return() {
+ __asan_init();
+ return __asan_option_detect_stack_use_after_return;
+}
+
+// -------------------- A workaround for the abscence of weak symbols ----- {{{
+// We don't have a direct equivalent of weak symbols when using MSVC, but we can
+// use the /alternatename directive to tell the linker to default a specific
+// symbol to a specific value, which works nicely for allocator hooks and
+// __asan_default_options().
+void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
+void __sanitizer_default_free_hook(void *ptr) { }
+const char* __asan_default_default_options() { return ""; }
+const char* __asan_default_default_suppressions() { return ""; }
+void __asan_default_on_error() {}
+#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
+#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
+#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
+#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
+#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
+// }}}
+} // extern "C"
+
+// ---------------------- Windows-specific inteceptors ---------------- {{{
+INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(RaiseException));
+ __asan_handle_no_return();
+ REAL(RaiseException)(a, b, c, d);
}
+INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler3));
+ __asan_handle_no_return();
+ return REAL(_except_handler3)(a, b, c, d);
+}
+
+#if ASAN_DYNAMIC
+// This handler is named differently in -MT and -MD CRTs.
+#define _except_handler4 _except_handler4_common
+#endif
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler4));
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
+}
+
+static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
+ AsanThread *t = (AsanThread*)arg;
+ SetCurrentThread(t);
+ return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
+}
+
+INTERCEPTOR_WINAPI(DWORD, CreateThread,
+ void* security, uptr stack_size,
+ DWORD (__stdcall *start_routine)(void*), void* arg,
+ DWORD thr_flags, void* tid) {
+ // Strict init-order checking is thread-hostile.
+ if (flags()->strict_init_order)
+ StopInitOrderChecking();
+ GET_STACK_TRACE_THREAD;
+ // FIXME: The CreateThread interceptor is not the same as a pthread_create
+ // one. This is a bandaid fix for PR22025.
+ bool detached = false; // FIXME: how can we determine it on Windows?
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+ return REAL(CreateThread)(security, stack_size,
+ asan_thread_start, t, thr_flags, tid);
+}
+
+namespace {
+BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
+
+void EnsureWorkerThreadRegistered() {
+ // FIXME: GetCurrentThread relies on TSD, which might not play well with
+ // system thread pools. We might want to use something like reference
+ // counting to zero out GetCurrentThread() underlying storage when the last
+ // work item finishes? Or can we disable reclaiming of threads in the pool?
+ BlockingMutexLock l(&mu_for_thread_tracking);
+ if (__asan::GetCurrentThread())
+ return;
+
+ AsanThread *t = AsanThread::Create(
+ /* start_routine */ nullptr, /* arg */ nullptr,
+ /* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
+ t->Init();
+ asanThreadRegistry().StartThread(t->tid(), 0, 0);
+ SetCurrentThread(t);
+}
+} // namespace
+
+INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
+ // NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
+ // query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
+ // System worker pool threads are created at arbitraty point in time and
+ // without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
+ // instead and don't register a specific parent_tid/stack.
+ EnsureWorkerThreadRegistered();
+ return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
+}
+
+// }}}
+
namespace __asan {
-// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
-static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
-static bool dbghelp_initialized = false;
-#pragma comment(lib, "dbghelp.lib")
+void InitializePlatformInterceptors() {
+ ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(RaiseException);
+ ASAN_INTERCEPT_FUNC(_except_handler3);
+ ASAN_INTERCEPT_FUNC(_except_handler4);
+
+ // NtWaitForWorkViaWorkerFactory is always linked dynamically.
+ CHECK(::__interception::OverrideFunction(
+ "NtWaitForWorkViaWorkerFactory",
+ (uptr)WRAP(NtWaitForWorkViaWorkerFactory),
+ (uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
+}
-// ---------------------- TSD ---------------- {{{1
+// ---------------------- TSD ---------------- {{{
static bool tsd_key_inited = false;
static __declspec(thread) void *fake_tsd = 0;
@@ -61,7 +170,13 @@ void AsanTSDSet(void *tsd) {
void PlatformTSDDtor(void *tsd) {
AsanThread::TSDDtor(tsd);
}
-// ---------------------- Various stuff ---------------- {{{1
+// }}}
+
+// ---------------------- Various stuff ---------------- {{{
+void DisableReexec() {
+ // No need to re-exec on Windows.
+}
+
void MaybeReexec() {
// No need to re-exec on Windows.
}
@@ -73,74 +188,73 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
-void SetAlternateSignalStack() {
- // FIXME: Decide what to do on Windows.
-}
+void AsanCheckDynamicRTPrereqs() {}
-void UnsetAlternateSignalStack() {
- // FIXME: Decide what to do on Windows.
-}
+void AsanCheckIncompatibleRT() {}
-void InstallSignalHandlers() {
- // FIXME: Decide what to do on Windows.
-}
-
-void AsanPlatformThreadInit() {
- // Nothing here for now.
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+ UNIMPLEMENTED();
}
-void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+void AsanOnDeadlySignal(int, void *siginfo, void *context) {
UNIMPLEMENTED();
}
-} // namespace __asan
+static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
-bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
- BlockingMutexLock lock(&dbghelp_lock);
- if (!dbghelp_initialized) {
- SymSetOptions(SYMOPT_DEFERRED_LOADS |
- SYMOPT_UNDNAME |
- SYMOPT_LOAD_LINES);
- CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
- // FIXME: We don't call SymCleanup() on exit yet - should we?
- dbghelp_initialized = true;
+ if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
+ exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
+ const char *description =
+ (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
+ ? "access-violation"
+ : "in-page-error";
+ SignalContext sig = SignalContext::Create(exception_record, context);
+ ReportDeadlySignal(description, sig);
}
- // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
- char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
- PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
- symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
- symbol->MaxNameLen = MAX_SYM_NAME;
- DWORD64 offset = 0;
- BOOL got_objname = SymFromAddr(GetCurrentProcess(),
- (DWORD64)addr, &offset, symbol);
- if (!got_objname)
- return false;
-
- DWORD unused;
- IMAGEHLP_LINE64 info;
- info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
- BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(),
- (DWORD64)addr, &unused, &info);
- int written = 0;
- out_buffer[0] = '\0';
- // FIXME: it might be useful to print out 'obj' or 'obj+offset' info too.
- if (got_fileline) {
- written += internal_snprintf(out_buffer + written, buffer_size - written,
- " %s %s:%d", symbol->Name,
- info.FileName, info.LineNumber);
- } else {
- written += internal_snprintf(out_buffer + written, buffer_size - written,
- " %s+0x%p", symbol->Name, offset);
- }
- return true;
+ // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+
+ return default_seh_handler(info);
+}
+
+// We want to install our own exception handler (EH) to print helpful reports
+// on access violations and whatnot. Unfortunately, the CRT initializers assume
+// they are run before any user code and drop any previously-installed EHs on
+// the floor, so we can't install our handler inside __asan_init.
+// (See crt0dat.c in the CRT sources for the details)
+//
+// Things get even more complicated with the dynamic runtime, as it finishes its
+// initialization before the .exe module CRT begins to initialize.
+//
+// For the static runtime (-MT), it's enough to put a callback to
+// __asan_set_seh_filter in the last section for C initializers.
+//
+// For the dynamic runtime (-MD), we want link the same
+// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter
+// will be called for each instrumented module. This ensures that at least one
+// __asan_set_seh_filter call happens after the .exe module CRT is initialized.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __asan_set_seh_filter() {
+ // We should only store the previous handler if it's not our own handler in
+ // order to avoid loops in the EH chain.
+ auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler);
+ if (prev_seh_handler != &SEHHandler)
+ default_seh_handler = prev_seh_handler;
+ return 0;
}
-} // extern "C"
+#if !ASAN_DYNAMIC
+// Put a pointer to __asan_set_seh_filter at the end of the global list
+// of C initializers, after the default EH is set by the CRT.
+#pragma section(".CRT$XIZ", long, read) // NOLINT
+__declspec(allocate(".CRT$XIZ"))
+ int (*__intercept_seh)() = __asan_set_seh_filter;
+#endif
+// }}}
+} // namespace __asan
#endif // _WIN32
diff --git a/libsanitizer/asan/asan_win_dll_thunk.cc b/libsanitizer/asan/asan_win_dll_thunk.cc
new file mode 100644
index 00000000000..691aaf36fd0
--- /dev/null
+++ b/libsanitizer/asan/asan_win_dll_thunk.cc
@@ -0,0 +1,425 @@
+//===-- asan_win_dll_thunk.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have ASan instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
+// details.
+//===----------------------------------------------------------------------===//
+
+// Only compile this code when buidling asan_dll_thunk.lib
+// Using #ifdef rather than relying on Makefiles etc.
+// simplifies the build procedure.
+#ifdef ASAN_DLL_THUNK
+#include "asan_init_version.h"
+#include "interception/interception.h"
+
+// ---------- Function interception helper functions and macros ----------- {{{1
+extern "C" {
+void *__stdcall GetModuleHandleA(const char *module_name);
+void *__stdcall GetProcAddress(void *module, const char *proc_name);
+void abort();
+}
+
+static uptr getRealProcAddressOrDie(const char *name) {
+ uptr ret =
+ __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
+ if (!ret)
+ abort();
+ return ret;
+}
+
+// We need to intercept some functions (e.g. ASan interface, memory allocator --
+// let's call them "hooks") exported by the DLL thunk and forward the hooks to
+// the runtime in the main module.
+// However, we don't want to keep two lists of these hooks.
+// To avoid that, the list of hooks should be defined using the
+// INTERCEPT_WHEN_POSSIBLE macro. Then, all these hooks can be intercepted
+// at once by calling INTERCEPT_HOOKS().
+
+// Use macro+template magic to automatically generate the list of hooks.
+// Each hook at line LINE defines a template class with a static
+// FunctionInterceptor<LINE>::Execute() method intercepting the hook.
+// The default implementation of FunctionInterceptor<LINE> is to call
+// the Execute() method corresponding to the previous line.
+template<int LINE>
+struct FunctionInterceptor {
+ static void Execute() { FunctionInterceptor<LINE-1>::Execute(); }
+};
+
+// There shouldn't be any hooks with negative definition line number.
+template<>
+struct FunctionInterceptor<0> {
+ static void Execute() {}
+};
+
+#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
+ template <> struct FunctionInterceptor<__LINE__> { \
+ static void Execute() { \
+ uptr wrapper = getRealProcAddressOrDie(main_function); \
+ if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \
+ abort(); \
+ FunctionInterceptor<__LINE__ - 1>::Execute(); \
+ } \
+ };
+
+// Special case of hooks -- ASan own interface functions. Those are only called
+// after __asan_init, thus an empty implementation is sufficient.
+#define INTERFACE_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8); (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name)
+
+// INTERCEPT_HOOKS must be used after the last INTERCEPT_WHEN_POSSIBLE.
+#define INTERCEPT_HOOKS FunctionInterceptor<__LINE__>::Execute
+
+// We can't define our own version of strlen etc. because that would lead to
+// link-time or even type mismatch errors. Instead, we can declare a function
+// just to be able to get its address. Me may miss the first few calls to the
+// functions since it can be called before __asan_init, but that would lead to
+// false negatives in the startup code before user's global initializers, which
+// isn't a big deal.
+#define INTERCEPT_LIBRARY_FUNCTION(name) \
+ extern "C" void name(); \
+ INTERCEPT_WHEN_POSSIBLE(WRAPPER_NAME(name), name)
+
+// Disable compiler warnings that show up if we declare our own version
+// of a compiler intrinsic (e.g. strlen).
+#pragma warning(disable: 4391)
+#pragma warning(disable: 4392)
+
+static void InterceptHooks();
+// }}}
+
+// ---------- Function wrapping helpers ----------------------------------- {{{1
+#define WRAP_V_V(name) \
+ extern "C" void name() { \
+ typedef void (*fntype)(); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_W(name) \
+ extern "C" void name(void *arg) { \
+ typedef void (*fntype)(void *arg); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_WW(name) \
+ extern "C" void name(void *arg1, void *arg2) { \
+ typedef void (*fntype)(void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg1, arg2); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_WWW(name) \
+ extern "C" void name(void *arg1, void *arg2, void *arg3) { \
+ typedef void *(*fntype)(void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_V(name) \
+ extern "C" void *name() { \
+ typedef void *(*fntype)(); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_W(name) \
+ extern "C" void *name(void *arg) { \
+ typedef void *(*fntype)(void *arg); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WW(name) \
+ extern "C" void *name(void *arg1, void *arg2) { \
+ typedef void *(*fntype)(void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
+ typedef void *(*fntype)(void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
+ typedef void *(*fntype)(void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5) { \
+ typedef void *(*fntype)(void *, void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5, void *arg6) { \
+ typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+// }}}
+
+// ----------------- ASan own interface functions --------------------
+// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
+// want to call it in the __asan_init interceptor.
+WRAP_W_V(__asan_should_detect_stack_use_after_return)
+
+extern "C" {
+ int __asan_option_detect_stack_use_after_return;
+
+ // Manually wrap __asan_init as we need to initialize
+ // __asan_option_detect_stack_use_after_return afterwards.
+ void __asan_init() {
+ typedef void (*fntype)();
+ static fntype fn = 0;
+ // __asan_init is expected to be called by only one thread.
+ if (fn) return;
+
+ fn = (fntype)getRealProcAddressOrDie("__asan_init");
+ fn();
+ __asan_option_detect_stack_use_after_return =
+ (__asan_should_detect_stack_use_after_return() != 0);
+
+ InterceptHooks();
+ }
+}
+
+extern "C" void __asan_version_mismatch_check() {
+ // Do nothing.
+}
+
+INTERFACE_FUNCTION(__asan_handle_no_return)
+
+INTERFACE_FUNCTION(__asan_report_store1)
+INTERFACE_FUNCTION(__asan_report_store2)
+INTERFACE_FUNCTION(__asan_report_store4)
+INTERFACE_FUNCTION(__asan_report_store8)
+INTERFACE_FUNCTION(__asan_report_store16)
+INTERFACE_FUNCTION(__asan_report_store_n)
+
+INTERFACE_FUNCTION(__asan_report_load1)
+INTERFACE_FUNCTION(__asan_report_load2)
+INTERFACE_FUNCTION(__asan_report_load4)
+INTERFACE_FUNCTION(__asan_report_load8)
+INTERFACE_FUNCTION(__asan_report_load16)
+INTERFACE_FUNCTION(__asan_report_load_n)
+
+INTERFACE_FUNCTION(__asan_store1)
+INTERFACE_FUNCTION(__asan_store2)
+INTERFACE_FUNCTION(__asan_store4)
+INTERFACE_FUNCTION(__asan_store8)
+INTERFACE_FUNCTION(__asan_store16)
+INTERFACE_FUNCTION(__asan_storeN)
+
+INTERFACE_FUNCTION(__asan_load1)
+INTERFACE_FUNCTION(__asan_load2)
+INTERFACE_FUNCTION(__asan_load4)
+INTERFACE_FUNCTION(__asan_load8)
+INTERFACE_FUNCTION(__asan_load16)
+INTERFACE_FUNCTION(__asan_loadN)
+
+INTERFACE_FUNCTION(__asan_memcpy);
+INTERFACE_FUNCTION(__asan_memset);
+INTERFACE_FUNCTION(__asan_memmove);
+
+INTERFACE_FUNCTION(__asan_alloca_poison);
+INTERFACE_FUNCTION(__asan_allocas_unpoison);
+
+INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_unregister_globals)
+
+INTERFACE_FUNCTION(__asan_before_dynamic_init)
+INTERFACE_FUNCTION(__asan_after_dynamic_init)
+
+INTERFACE_FUNCTION(__asan_poison_stack_memory)
+INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+
+INTERFACE_FUNCTION(__asan_poison_memory_region)
+INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+
+INTERFACE_FUNCTION(__asan_address_is_poisoned)
+INTERFACE_FUNCTION(__asan_region_is_poisoned)
+
+INTERFACE_FUNCTION(__asan_get_current_fake_stack)
+INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+
+INTERFACE_FUNCTION(__asan_stack_malloc_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_10)
+
+INTERFACE_FUNCTION(__asan_stack_free_0)
+INTERFACE_FUNCTION(__asan_stack_free_1)
+INTERFACE_FUNCTION(__asan_stack_free_2)
+INTERFACE_FUNCTION(__asan_stack_free_4)
+INTERFACE_FUNCTION(__asan_stack_free_5)
+INTERFACE_FUNCTION(__asan_stack_free_6)
+INTERFACE_FUNCTION(__asan_stack_free_7)
+INTERFACE_FUNCTION(__asan_stack_free_8)
+INTERFACE_FUNCTION(__asan_stack_free_9)
+INTERFACE_FUNCTION(__asan_stack_free_10)
+
+// FIXME: we might want to have a sanitizer_win_dll_thunk?
+INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
+INTERFACE_FUNCTION(__sanitizer_cov)
+INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
+INTERFACE_FUNCTION(__sanitizer_cov_init)
+INTERFACE_FUNCTION(__sanitizer_cov_module_init)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
+INTERFACE_FUNCTION(__sanitizer_cov_with_check)
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
+INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_heap_size)
+INTERFACE_FUNCTION(__sanitizer_get_ownership)
+INTERFACE_FUNCTION(__sanitizer_get_total_unique_caller_callee_pairs)
+INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
+INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
+INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
+INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
+INTERFACE_FUNCTION(__sanitizer_ptr_sub)
+INTERFACE_FUNCTION(__sanitizer_report_error_summary)
+INTERFACE_FUNCTION(__sanitizer_reset_coverage)
+INTERFACE_FUNCTION(__sanitizer_get_number_of_counters)
+INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
+INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
+INTERFACE_FUNCTION(__sanitizer_set_death_callback)
+INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
+INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+
+// TODO(timurrrr): Add more interface functions on the as-needed basis.
+
+// ----------------- Memory allocation functions ---------------------
+WRAP_V_W(free)
+WRAP_V_WW(_free_dbg)
+
+WRAP_W_W(malloc)
+WRAP_W_WWWW(_malloc_dbg)
+
+WRAP_W_WW(calloc)
+WRAP_W_WWWWW(_calloc_dbg)
+WRAP_W_WWW(_calloc_impl)
+
+WRAP_W_WW(realloc)
+WRAP_W_WWW(_realloc_dbg)
+WRAP_W_WWW(_recalloc)
+
+WRAP_W_W(_msize)
+WRAP_W_W(_expand)
+WRAP_W_W(_expand_dbg)
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
+// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
+
+INTERCEPT_LIBRARY_FUNCTION(atoi);
+INTERCEPT_LIBRARY_FUNCTION(atol);
+INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
+
+// _except_handler4 checks -GS cookie which is different for each module, so we
+// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
+}
+
+INTERCEPT_LIBRARY_FUNCTION(frexp);
+INTERCEPT_LIBRARY_FUNCTION(longjmp);
+INTERCEPT_LIBRARY_FUNCTION(memchr);
+INTERCEPT_LIBRARY_FUNCTION(memcmp);
+INTERCEPT_LIBRARY_FUNCTION(memcpy);
+INTERCEPT_LIBRARY_FUNCTION(memmove);
+INTERCEPT_LIBRARY_FUNCTION(memset);
+INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strchr);
+INTERCEPT_LIBRARY_FUNCTION(strcmp);
+INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strcspn);
+INTERCEPT_LIBRARY_FUNCTION(strlen);
+INTERCEPT_LIBRARY_FUNCTION(strncat);
+INTERCEPT_LIBRARY_FUNCTION(strncmp);
+INTERCEPT_LIBRARY_FUNCTION(strncpy);
+INTERCEPT_LIBRARY_FUNCTION(strnlen);
+INTERCEPT_LIBRARY_FUNCTION(strpbrk);
+INTERCEPT_LIBRARY_FUNCTION(strspn);
+INTERCEPT_LIBRARY_FUNCTION(strstr);
+INTERCEPT_LIBRARY_FUNCTION(strtol);
+INTERCEPT_LIBRARY_FUNCTION(wcslen);
+
+// Must be after all the interceptor declarations due to the way INTERCEPT_HOOKS
+// is defined.
+void InterceptHooks() {
+ INTERCEPT_HOOKS();
+ INTERCEPT_FUNCTION(_except_handler4);
+}
+
+// We want to call __asan_init before C/C++ initializers/constructors are
+// executed, otherwise functions like memset might be invoked.
+// For some strange reason, merely linking in asan_preinit.cc doesn't work
+// as the callback is never called... Is link.exe doing something too smart?
+
+// In DLLs, the callbacks are expected to return 0,
+// otherwise CRT initialization fails.
+static int call_asan_init() {
+ __asan_init();
+ return 0;
+}
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
+
+#endif // ASAN_DLL_THUNK
diff --git a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc
new file mode 100644
index 00000000000..f7358539a1c
--- /dev/null
+++ b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc
@@ -0,0 +1,96 @@
+//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines things that need to be present in the application modules
+// to interact with the ASan DLL runtime correctly and can't be implemented
+// using the default "import library" generated when linking the DLL RTL.
+//
+// This includes:
+// - forwarding the detect_stack_use_after_return runtime option
+// - working around deficiencies of the MD runtime
+// - installing a custom SEH handlerx
+//
+//===----------------------------------------------------------------------===//
+
+// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
+// Using #ifdef rather than relying on Makefiles etc.
+// simplifies the build procedure.
+#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+// First, declare CRT sections we'll be using in this file
+#pragma section(".CRT$XID", long, read) // NOLINT
+#pragma section(".CRT$XIZ", long, read) // NOLINT
+#pragma section(".CRT$XTW", long, read) // NOLINT
+#pragma section(".CRT$XTY", long, read) // NOLINT
+
+////////////////////////////////////////////////////////////////////////////////
+// Define a copy of __asan_option_detect_stack_use_after_return that should be
+// used when linking an MD runtime with a set of object files on Windows.
+//
+// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return',
+// so normally we would just dllimport it. Unfortunately, the dllimport
+// attribute adds __imp_ prefix to the symbol name of a variable.
+// Since in general we don't know if a given TU is going to be used
+// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
+// just to work around this issue, let's clone the a variable that is
+// constant after initialization anyways.
+extern "C" {
+__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
+int __asan_option_detect_stack_use_after_return =
+ __asan_should_detect_stack_use_after_return();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
+// unload or on exit. ASan relies on LLVM global_dtors to call
+// __asan_unregister_globals on these events, which unfortunately doesn't work
+// with the MD runtime, see PR22545 for the details.
+// To work around this, for each DLL we schedule a call to UnregisterGlobals
+// using atexit() that calls a small subset of C terminators
+// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
+// are there.
+extern "C" void __cdecl _initterm(void *a, void *b);
+
+namespace {
+__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0;
+__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0;
+
+void UnregisterGlobals() {
+ _initterm(&before_global_dtors, &after_global_dtors);
+}
+
+int ScheduleUnregisterGlobals() {
+ return atexit(UnregisterGlobals);
+}
+
+// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
+// atexit() is initialized (.CRT$XIC). As this is executed before C++
+// initializers (think ctors for globals), UnregisterGlobals gets executed after
+// dtors for C++ globals.
+__declspec(allocate(".CRT$XID"))
+int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+// ASan SEH handling.
+// We need to set the ASan-specific SEH handler at the end of CRT initialization
+// of each module (see also asan_win.cc).
+extern "C" {
+__declspec(dllimport) int __asan_set_seh_filter();
+static int SetSEHFilter() { return __asan_set_seh_filter(); }
+
+// Unfortunately, putting a pointer to __asan_set_seh_filter into
+// __asan_intercept_seh gets optimized out, so we have to use an extra function.
+__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
+}
+
+#endif // ASAN_DYNAMIC_RUNTIME_THUNK
diff --git a/libsanitizer/asan/libtool-version b/libsanitizer/asan/libtool-version
index 9a16cf57844..7e838a5740b 100644
--- a/libsanitizer/asan/libtool-version
+++ b/libsanitizer/asan/libtool-version
@@ -3,4 +3,4 @@
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
-1:0:0
+3:0:0
diff --git a/libsanitizer/config.h.in b/libsanitizer/config.h.in
index e4b2786324e..7195840f2db 100644
--- a/libsanitizer/config.h.in
+++ b/libsanitizer/config.h.in
@@ -43,6 +43,9 @@
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
+/* Define to 1 if you have the <rpc/xdr.h> header file. */
+#undef HAVE_RPC_XDR_H
+
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
@@ -67,6 +70,9 @@
/* Define to 1 if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H
+/* Define to 1 if you have the <tirpc/rpc/xdr.h> header file. */
+#undef HAVE_TIRPC_RPC_XDR_H
+
/* Define to 1 if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
diff --git a/libsanitizer/configure b/libsanitizer/configure
index c550ad61057..12f2b8d26a9 100755
--- a/libsanitizer/configure
+++ b/libsanitizer/configure
@@ -604,8 +604,11 @@ ac_subst_vars='am__EXEEXT_FALSE
am__EXEEXT_TRUE
LTLIBOBJS
LIBOBJS
+TSAN_TARGET_DEPENDENT_OBJECTS
LIBBACKTRACE_SUPPORTED_FALSE
LIBBACKTRACE_SUPPORTED_TRUE
+OBSTACK_DEFS
+RPC_DEFS
BACKTRACE_SUPPORTS_THREADS
BACKTRACE_USES_MALLOC
ALLOC_FILE
@@ -12020,7 +12023,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12023 "configure"
+#line 12031 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -12126,7 +12129,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12129 "configure"
+#line 12137 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -15509,6 +15512,50 @@ done
# Common libraries that we need to link against for all sanitizer libs.
link_sanitizer_common='-lpthread -ldl -lm'
+# At least for glibc, shm_open is in librt. But don't pull that
+# in if it still doesn't give us the function we want. This
+# test is copied from libgomp.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for shm_open in -lrt" >&5
+$as_echo_n "checking for shm_open in -lrt... " >&6; }
+if test "${ac_cv_lib_rt_shm_open+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_check_lib_save_LIBS=$LIBS
+LIBS="-lrt $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+/* Override any GCC internal prototype to avoid an error.
+ Use char because int might match the return type of a GCC
+ builtin and then its argument prototype would still apply. */
+#ifdef __cplusplus
+extern "C"
+#endif
+char shm_open ();
+int
+main ()
+{
+return shm_open ();
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+ ac_cv_lib_rt_shm_open=yes
+else
+ ac_cv_lib_rt_shm_open=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_rt_shm_open" >&5
+$as_echo "$ac_cv_lib_rt_shm_open" >&6; }
+if test "x$ac_cv_lib_rt_shm_open" = x""yes; then :
+ link_sanitizer_common="-lrt $link_sanitizer_common"
+fi
+
+
# Set up the set of additional libraries that we need to link against for libasan.
link_libasan=$link_sanitizer_common
@@ -15525,11 +15572,11 @@ link_libubsan=$link_sanitizer_common
link_liblsan=$link_sanitizer_common
+
# At least for glibc, clock_gettime is in librt. But don't pull that
# in if it still doesn't give us the function we want. This
# test is copied from libgomp.
-if test $ac_cv_func_clock_gettime = no; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for clock_gettime in -lrt" >&5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for clock_gettime in -lrt" >&5
$as_echo_n "checking for clock_gettime in -lrt... " >&6; }
if test "${ac_cv_lib_rt_clock_gettime+set}" = set; then :
$as_echo_n "(cached) " >&6
@@ -15572,7 +15619,6 @@ link_libtsan="-lrt $link_libtsan"
fi
-fi
case "$host" in
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;;
@@ -16340,6 +16386,80 @@ $as_echo "#define HAVE_GETEXECNAME 1" >>confdefs.h
fi
+# Check for rpc/xdr.h
+for ac_header in rpc/xdr.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "rpc/xdr.h" "ac_cv_header_rpc_xdr_h" "$ac_includes_default"
+if test "x$ac_cv_header_rpc_xdr_h" = x""yes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_RPC_XDR_H 1
+_ACEOF
+
+fi
+
+done
+
+if test x"$ac_cv_header_rpc_xdr_h" = xyes; then
+ rpc_defs="$rpc_defs -DHAVE_RPC_XDR_H=1"
+else
+ rpc_defs="$rpc_defs -DHAVE_RPC_XDR_H=0"
+fi
+
+# Check for tirpc/rpc/xdr.h
+for ac_header in tirpc/rpc/xdr.h
+do :
+ ac_fn_c_check_header_mongrel "$LINENO" "tirpc/rpc/xdr.h" "ac_cv_header_tirpc_rpc_xdr_h" "$ac_includes_default"
+if test "x$ac_cv_header_tirpc_rpc_xdr_h" = x""yes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_TIRPC_RPC_XDR_H 1
+_ACEOF
+
+fi
+
+done
+
+if test x"$ac_cv_header_tirpc_rpc_xdr_h" = xyes; then
+ rpc_defs="$rpc_defs -DHAVE_TIRPC_RPC_XDR_H=1"
+else
+ rpc_defs="$rpc_defs -DHAVE_TIRPC_RPC_XDR_H=0"
+fi
+
+RPC_DEFS=$rpc_defs
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking obstack params" >&5
+$as_echo_n "checking obstack params... " >&6; }
+if test "${libsanitizer_cv_sys_obstack+set}" = set; then :
+ $as_echo_n "(cached) " >&6
+else
+ save_cppflags=$CPPFLAGS
+CPPFLAGS="-I${srcdir}/../include -o conftest.iii $CPPFLAGS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include "obstack.h"
+#ifdef _OBSTACK_SIZE_T
+_OBSTACK_SIZE_T
+#else
+int
+#endif
+
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ libsanitizer_cv_sys_obstack=`sed -e '/^#/d;/^[ ]*$/d' conftest.iii | sed -e '$!d;s/size_t/SIZE_T/'`
+else
+ libsanitizer_cv_sys_obstack=int
+fi
+rm -f conftest.err conftest.$ac_ext
+CPPFLAGS=$save_cppflags
+rm -f conftest.iii
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libsanitizer_cv_sys_obstack" >&5
+$as_echo "$libsanitizer_cv_sys_obstack" >&6; }
+OBSTACK_DEFS=-D_OBSTACK_SIZE_T=\"$libsanitizer_cv_sys_obstack\"
+
+
if test "x${BACKTRACE_SUPPORTED}x${BACKTRACE_USES_MALLOC}" = "x1x0"; then
LIBBACKTRACE_SUPPORTED_TRUE=
LIBBACKTRACE_SUPPORTED_FALSE='#'
@@ -16363,6 +16483,12 @@ if test "x$TSAN_SUPPORTED" = "xyes"; then
fi
+case "${target}" in
+ x86_64-*-linux-*) TSAN_TARGET_DEPENDENT_OBJECTS='tsan_rtl_amd64.lo' ;;
+ *) TSAN_TARGET_DEPENDENT_OBJECTS='' ;;
+esac
+
+
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
diff --git a/libsanitizer/configure.ac b/libsanitizer/configure.ac
index eb8b3ba0d41..8d39b6a9119 100644
--- a/libsanitizer/configure.ac
+++ b/libsanitizer/configure.ac
@@ -98,6 +98,12 @@ AC_CHECK_FUNCS(clock_getres clock_gettime clock_settime)
# Common libraries that we need to link against for all sanitizer libs.
link_sanitizer_common='-lpthread -ldl -lm'
+# At least for glibc, shm_open is in librt. But don't pull that
+# in if it still doesn't give us the function we want. This
+# test is copied from libgomp.
+AC_CHECK_LIB(rt, shm_open,
+ [link_sanitizer_common="-lrt $link_sanitizer_common"])
+
# Set up the set of additional libraries that we need to link against for libasan.
link_libasan=$link_sanitizer_common
AC_SUBST(link_libasan)
@@ -114,16 +120,15 @@ AC_SUBST(link_libubsan)
link_liblsan=$link_sanitizer_common
AC_SUBST(link_liblsan)
+
# At least for glibc, clock_gettime is in librt. But don't pull that
# in if it still doesn't give us the function we want. This
# test is copied from libgomp.
-if test $ac_cv_func_clock_gettime = no; then
- AC_CHECK_LIB(rt, clock_gettime,
- [link_libasan="-lrt $link_libasan"
+AC_CHECK_LIB(rt, clock_gettime,
+ [link_libasan="-lrt $link_libasan"
link_libtsan="-lrt $link_libtsan"
# Other sanitizers do not override clock_* API
])
-fi
case "$host" in
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;;
@@ -312,6 +317,48 @@ if test "$have_getexecname" = "yes"; then
AC_DEFINE(HAVE_GETEXECNAME, 1, [Define if getexecname is available.])
fi
+# Check for rpc/xdr.h
+AC_CHECK_HEADERS(rpc/xdr.h)
+if test x"$ac_cv_header_rpc_xdr_h" = xyes; then
+ rpc_defs="$rpc_defs -DHAVE_RPC_XDR_H=1"
+else
+ rpc_defs="$rpc_defs -DHAVE_RPC_XDR_H=0"
+fi
+
+# Check for tirpc/rpc/xdr.h
+AC_CHECK_HEADERS(tirpc/rpc/xdr.h)
+if test x"$ac_cv_header_tirpc_rpc_xdr_h" = xyes; then
+ rpc_defs="$rpc_defs -DHAVE_TIRPC_RPC_XDR_H=1"
+else
+ rpc_defs="$rpc_defs -DHAVE_TIRPC_RPC_XDR_H=0"
+fi
+
+AC_SUBST([RPC_DEFS], [$rpc_defs])
+
+dnl If this file is processed by autoconf-2.67 or later then the CPPFLAGS
+dnl "-o conftest.iii" can disappear, conftest.iii be replaced with
+dnl conftest.i in the sed command line, and the rm deleted.
+dnl Not all cpp's accept -o, and gcc -E does not accept a second file
+dnl argument as the output file.
+AC_CACHE_CHECK([obstack params],
+[libsanitizer_cv_sys_obstack],
+[save_cppflags=$CPPFLAGS
+CPPFLAGS="-I${srcdir}/../include -o conftest.iii $CPPFLAGS"
+AC_PREPROC_IFELSE([AC_LANG_SOURCE([
+#include "obstack.h"
+#ifdef _OBSTACK_SIZE_T
+_OBSTACK_SIZE_T
+#else
+int
+#endif
+])],
+[libsanitizer_cv_sys_obstack=`sed -e '/^#/d;/^[ ]*$/d' conftest.iii | sed -e '$!d;s/size_t/SIZE_T/'`],
+[libsanitizer_cv_sys_obstack=int])
+CPPFLAGS=$save_cppflags
+rm -f conftest.iii
+])
+AC_SUBST([OBSTACK_DEFS], [-D_OBSTACK_SIZE_T=\"$libsanitizer_cv_sys_obstack\"])
+
AM_CONDITIONAL(LIBBACKTRACE_SUPPORTED,
[test "x${BACKTRACE_SUPPORTED}x${BACKTRACE_USES_MALLOC}" = "x1x0"])
@@ -347,4 +394,10 @@ _EOF
])
fi
+case "${target}" in
+ x86_64-*-linux-*) TSAN_TARGET_DEPENDENT_OBJECTS='tsan_rtl_amd64.lo' ;;
+ *) TSAN_TARGET_DEPENDENT_OBJECTS='' ;;
+esac
+AC_SUBST([TSAN_TARGET_DEPENDENT_OBJECTS])
+
AC_OUTPUT
diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt
index 6de4a65715b..db77b000732 100644
--- a/libsanitizer/configure.tgt
+++ b/libsanitizer/configure.tgt
@@ -35,6 +35,13 @@ case "${target}" in
;;
arm*-*-linux*)
;;
+ aarch64*-*-linux*)
+ if test x$ac_cv_sizeof_void_p = x8; then
+ TSAN_SUPPORTED=yes
+ LSAN_SUPPORTED=yes
+ TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_aarch64.lo
+ fi
+ ;;
x86_64-*-darwin[1]* | i?86-*-darwin[1]*)
TSAN_SUPPORTED=no
;;
diff --git a/libsanitizer/include/sanitizer/allocator_interface.h b/libsanitizer/include/sanitizer/allocator_interface.h
new file mode 100644
index 00000000000..97a72a258eb
--- /dev/null
+++ b/libsanitizer/include/sanitizer/allocator_interface.h
@@ -0,0 +1,64 @@
+//===-- allocator_interface.h ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface header for allocator used in sanitizers (ASan/TSan/MSan).
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Returns the estimated number of bytes that will be reserved by allocator
+ for request of "size" bytes. If allocator can't allocate that much
+ memory, returns the maximal possible allocation size, otherwise returns
+ "size". */
+ size_t __sanitizer_get_estimated_allocated_size(size_t size);
+
+ /* Returns true if p was returned by the allocator and
+ is not yet freed. */
+ int __sanitizer_get_ownership(const volatile void *p);
+
+ /* Returns the number of bytes reserved for the pointer p.
+ Requires (get_ownership(p) == true) or (p == 0). */
+ size_t __sanitizer_get_allocated_size(const volatile void *p);
+
+ /* Number of bytes, allocated and not yet freed by the application. */
+ size_t __sanitizer_get_current_allocated_bytes();
+
+ /* Number of bytes, mmaped by the allocator to fulfill allocation requests.
+ Generally, for request of X bytes, allocator can reserve and add to free
+ lists a large number of chunks of size X to use them for future requests.
+ All these chunks count toward the heap size. Currently, allocator never
+ releases memory to OS (instead, it just puts freed chunks to free
+ lists). */
+ size_t __sanitizer_get_heap_size();
+
+ /* Number of bytes, mmaped by the allocator, which can be used to fulfill
+ allocation requests. When a user program frees memory chunk, it can first
+ fall into quarantine and will count toward __sanitizer_get_free_bytes()
+ later. */
+ size_t __sanitizer_get_free_bytes();
+
+ /* Number of bytes in unmapped pages, that are released to OS. Currently,
+ always returns 0. */
+ size_t __sanitizer_get_unmapped_bytes();
+
+ /* Malloc hooks that may be optionally provided by user.
+ __sanitizer_malloc_hook(ptr, size) is called immediately after
+ allocation of "size" bytes, which returned "ptr".
+ __sanitizer_free_hook(ptr) is called immediately before
+ deallocation of "ptr". */
+ void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
+ void __sanitizer_free_hook(const volatile void *ptr);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
diff --git a/libsanitizer/include/sanitizer/asan_interface.h b/libsanitizer/include/sanitizer/asan_interface.h
index 0016339e486..448a0bcba01 100644
--- a/libsanitizer/include/sanitizer/asan_interface.h
+++ b/libsanitizer/include/sanitizer/asan_interface.h
@@ -48,29 +48,67 @@ extern "C" {
((void)(addr), (void)(size))
#endif
- // Returns true iff addr is poisoned (i.e. 1-byte read/write access to this
+ // Returns 1 if addr is poisoned (i.e. 1-byte read/write access to this
// address will result in error report from AddressSanitizer).
- bool __asan_address_is_poisoned(void const volatile *addr);
+ // Otherwise returns 0.
+ int __asan_address_is_poisoned(void const volatile *addr);
- // If at least on byte in [beg, beg+size) is poisoned, return the address
+ // If at least one byte in [beg, beg+size) is poisoned, return the address
// of the first such byte. Otherwise return 0.
void *__asan_region_is_poisoned(void *beg, size_t size);
// Print the description of addr (useful when debugging in gdb).
void __asan_describe_address(void *addr);
+ // Useful for calling from a debugger to get information about an ASan error.
+ // Returns 1 if an error has been (or is being) reported, otherwise returns 0.
+ int __asan_report_present();
+
+ // Useful for calling from a debugger to get information about an ASan error.
+ // If an error has been (or is being) reported, the following functions return
+ // the pc, bp, sp, address, access type (0 = read, 1 = write), access size and
+ // bug description (e.g. "heap-use-after-free"). Otherwise they return 0.
+ void *__asan_get_report_pc();
+ void *__asan_get_report_bp();
+ void *__asan_get_report_sp();
+ void *__asan_get_report_address();
+ int __asan_get_report_access_type();
+ size_t __asan_get_report_access_size();
+ const char *__asan_get_report_description();
+
+ // Useful for calling from the debugger to get information about a pointer.
+ // Returns the category of the given pointer as a constant string.
+ // Possible return values are "global", "stack", "stack-fake", "heap",
+ // "heap-invalid", "shadow-low", "shadow-gap", "shadow-high", "unknown".
+ // If global or stack, tries to also return the variable name, address and
+ // size. If heap, tries to return the chunk address and size. 'name' should
+ // point to an allocated buffer of size 'name_size'.
+ const char *__asan_locate_address(void *addr, char *name, size_t name_size,
+ void **region_address, size_t *region_size);
+
+ // Useful for calling from the debugger to get the allocation stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the free stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the current shadow memory
+ // mapping.
+ void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
+
// This is an internal function that is called to report an error.
// However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger.
void __asan_report_error(void *pc, void *bp, void *sp,
- void *addr, bool is_write, size_t access_size);
-
- // Sets the exit code to use when reporting an error.
- // Returns the old value.
- int __asan_set_error_exit_code(int exit_code);
+ void *addr, int is_write, size_t access_size);
- // Sets the callback to be called right before death on error.
- // Passing 0 will unset the callback.
+ // Deprecated. Call __sanitizer_set_death_callback instead.
void __asan_set_death_callback(void (*callback)(void));
void __asan_set_error_report_callback(void (*callback)(const char*));
@@ -80,40 +118,6 @@ extern "C" {
// the program crashes before ASan report is printed.
void __asan_on_error();
- // User may provide its own implementation for symbolization function.
- // It should print the description of instruction at address "pc" to
- // "out_buffer". Description should be at most "out_size" bytes long.
- // User-specified function should return true if symbolization was
- // successful.
- bool __asan_symbolize(const void *pc, char *out_buffer,
- int out_size);
-
- // Returns the estimated number of bytes that will be reserved by allocator
- // for request of "size" bytes. If ASan allocator can't allocate that much
- // memory, returns the maximal possible allocation size, otherwise returns
- // "size".
- size_t __asan_get_estimated_allocated_size(size_t size);
- // Returns true if p was returned by the ASan allocator and
- // is not yet freed.
- bool __asan_get_ownership(const void *p);
- // Returns the number of bytes reserved for the pointer p.
- // Requires (get_ownership(p) == true) or (p == 0).
- size_t __asan_get_allocated_size(const void *p);
- // Number of bytes, allocated and not yet freed by the application.
- size_t __asan_get_current_allocated_bytes();
- // Number of bytes, mmaped by asan allocator to fulfill allocation requests.
- // Generally, for request of X bytes, allocator can reserve and add to free
- // lists a large number of chunks of size X to use them for future requests.
- // All these chunks count toward the heap size. Currently, allocator never
- // releases memory to OS (instead, it just puts freed chunks to free lists).
- size_t __asan_get_heap_size();
- // Number of bytes, mmaped by asan allocator, which can be used to fulfill
- // allocation requests. When a user program frees memory chunk, it can first
- // fall into quarantine and will count toward __asan_get_free_bytes() later.
- size_t __asan_get_free_bytes();
- // Number of bytes in unmapped pages, that are released to OS. Currently,
- // always returns 0.
- size_t __asan_get_unmapped_bytes();
// Prints accumulated stats to stderr. Used for debugging.
void __asan_print_accumulated_stats();
@@ -121,13 +125,23 @@ extern "C" {
// a string containing ASan runtime options. See asan_flags.h for details.
const char* __asan_default_options();
- // Malloc hooks that may be optionally provided by user.
- // __asan_malloc_hook(ptr, size) is called immediately after
- // allocation of "size" bytes, which returned "ptr".
- // __asan_free_hook(ptr) is called immediately before
- // deallocation of "ptr".
- void __asan_malloc_hook(void *ptr, size_t size);
- void __asan_free_hook(void *ptr);
+ // The following 2 functions facilitate garbage collection in presence of
+ // asan's fake stack.
+
+ // Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
+ // Returns NULL if the current thread does not have a fake stack.
+ void *__asan_get_current_fake_stack();
+
+ // If fake_stack is non-NULL and addr belongs to a fake frame in
+ // fake_stack, returns the address on real stack that corresponds to
+ // the fake frame and sets beg/end to the boundaries of this fake frame.
+ // Otherwise returns NULL and does not touch beg/end.
+ // If beg/end are NULL, they are not touched.
+ // This function may be called from a thread other than the owner of
+ // fake_stack, but the owner thread need to be alive.
+ void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/include/sanitizer/common_interface_defs.h b/libsanitizer/include/sanitizer/common_interface_defs.h
index db8b3b543e3..35463e08fbb 100644
--- a/libsanitizer/include/sanitizer/common_interface_defs.h
+++ b/libsanitizer/include/sanitizer/common_interface_defs.h
@@ -22,13 +22,28 @@
#ifdef __cplusplus
extern "C" {
#endif
+ // Arguments for __sanitizer_sandbox_on_notify() below.
+ typedef struct {
+ // Enable sandbox support in sanitizer coverage.
+ int coverage_sandboxed;
+ // File descriptor to write coverage data to. If -1 is passed, a file will
+ // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
+ // effect if coverage_sandboxed == 0.
+ intptr_t coverage_fd;
+ // If non-zero, split the coverage data into well-formed blocks. This is
+ // useful when coverage_fd is a socket descriptor. Each block will contain
+ // a header, allowing data from multiple processes to be sent over the same
+ // socket.
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
- void __sanitizer_sandbox_on_notify(void *reserved);
+ void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
@@ -45,16 +60,13 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
- // Record and dump coverage info.
- void __sanitizer_cov_dump();
-
// Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar.
// A contiguous container is a container that keeps all of its elements
// in a contiguous region of memory. The container owns the region of memory
// [beg, end); the memory [beg, mid) is used to store the current elements
// and the memory [mid, end) is reserved for future elements;
- // end <= mid <= end. For example, in "std::vector<> v"
+ // beg <= mid <= end. For example, in "std::vector<> v"
// beg = &v[0];
// end = beg + v.capacity() * sizeof(v[0]);
// mid = beg + v.size() * sizeof(v[0]);
@@ -82,10 +94,38 @@ extern "C" {
const void *end,
const void *old_mid,
const void *new_mid);
+ // Returns true if the contiguous container [beg, end) is properly poisoned
+ // (e.g. with __sanitizer_annotate_contiguous_container), i.e. if
+ // - [beg, mid) is addressable,
+ // - [mid, end) is unaddressable.
+ // Full verification requires O(end-beg) time; this function tries to avoid
+ // such complexity by touching only parts of the container around beg/mid/end.
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+
+ // Similar to __sanitizer_verify_contiguous_container but returns the address
+ // of the first improperly poisoned byte otherwise. Returns null if the area
+ // is poisoned properly.
+ const void *__sanitizer_contiguous_container_find_bad_address(
+ const void *beg, const void *mid, const void *end);
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
+ // Sets the callback to be called right before death on error.
+ // Passing 0 will unset the callback.
+ void __sanitizer_set_death_callback(void (*callback)(void));
+
+ // Interceptor hooks.
+ // Whenever a libc function interceptor is called it checks if the
+ // corresponding weak hook is defined, and it so -- calls it.
+ // The primary use case is data-flow-guided fuzzing, where the fuzzer needs
+ // to know what is being passed to libc functions, e.g. memcmp.
+ // FIXME: implement more hooks.
+ void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
+ const void *s2, size_t n);
+ void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
+ const char *s2, size_t n);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/include/sanitizer/coverage_interface.h b/libsanitizer/include/sanitizer/coverage_interface.h
new file mode 100644
index 00000000000..37c133aae3c
--- /dev/null
+++ b/libsanitizer/include/sanitizer/coverage_interface.h
@@ -0,0 +1,63 @@
+//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface for sanitizer coverage.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_COVERAG_INTERFACE_H
+#define SANITIZER_COVERAG_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ // Initialize coverage.
+ void __sanitizer_cov_init();
+ // Record and dump coverage info.
+ void __sanitizer_cov_dump();
+ // Open <name>.sancov.packed in the coverage directory and return the file
+ // descriptor. Returns -1 on failure, or if coverage dumping is disabled.
+ // This is intended for use by sandboxing code.
+ intptr_t __sanitizer_maybe_open_cov_file(const char *name);
+ // Get the number of unique covered blocks (or edges).
+ // This can be useful for coverage-directed in-process fuzzers.
+ uintptr_t __sanitizer_get_total_unique_coverage();
+ // Get the number of unique indirect caller-callee pairs.
+ uintptr_t __sanitizer_get_total_unique_caller_callee_pairs();
+
+ // Reset the basic-block (edge) coverage to the initial state.
+ // Useful for in-process fuzzing to start collecting coverage from scratch.
+ // Experimental, will likely not work for multi-threaded process.
+ void __sanitizer_reset_coverage();
+ // Set *data to the array of covered PCs and return the size of that array.
+ // Some of the entries in *data will be zero.
+ uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
+
+ // The coverage instrumentation may optionally provide imprecise counters.
+ // Rather than exposing the counter values to the user we instead map
+ // the counters to a bitset.
+ // Every counter is associated with 8 bits in the bitset.
+ // We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+
+ // The i-th bit is set to 1 if the counter value is in the i-th range.
+ // This counter-based coverage implementation is *not* thread-safe.
+
+ // Returns the number of registered coverage counters.
+ uintptr_t __sanitizer_get_number_of_counters();
+ // Updates the counter 'bitset', clears the counters and returns the number of
+ // new bits in 'bitset'.
+ // If 'bitset' is nullptr, only clears the counters.
+ // Otherwise 'bitset' should be at least
+ // __sanitizer_get_number_of_counters bytes long and 8-aligned.
+ uintptr_t
+ __sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_COVERAG_INTERFACE_H
diff --git a/libsanitizer/include/sanitizer/dfsan_interface.h b/libsanitizer/include/sanitizer/dfsan_interface.h
index dd938483d18..0cebccf945e 100644
--- a/libsanitizer/include/sanitizer/dfsan_interface.h
+++ b/libsanitizer/include/sanitizer/dfsan_interface.h
@@ -37,6 +37,9 @@ struct dfsan_label_info {
void *userdata;
};
+/// Signature of the callback argument to dfsan_set_write_callback().
+typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
+
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
/// the process.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
@@ -72,6 +75,32 @@ int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
+/// Returns the number of labels allocated.
+size_t dfsan_get_label_count(void);
+
+/// Sets a callback to be invoked on calls to write(). The callback is invoked
+/// before the write is done. The write is not guaranteed to succeed when the
+/// callback executes. Pass in NULL to remove any callback.
+void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
+
+/// Writes the labels currently used by the program to the given file
+/// descriptor. The lines of the output have the following format:
+///
+/// <label> <parent label 1> <parent label 2> <label description if any>
+void dfsan_dump_labels(int fd);
+
+/// Interceptor hooks.
+/// Whenever a dfsan's custom function is called the corresponding
+/// hook is called it non-zero. The hooks should be defined by the user.
+/// The primary use case is taint-guided fuzzing, where the fuzzer
+/// needs to see the parameters of the function and the labels.
+/// FIXME: implement more hooks.
+void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label);
+void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label);
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/include/sanitizer/lsan_interface.h b/libsanitizer/include/sanitizer/lsan_interface.h
index ec9c730eee3..bdbe39084f7 100644
--- a/libsanitizer/include/sanitizer/lsan_interface.h
+++ b/libsanitizer/include/sanitizer/lsan_interface.h
@@ -21,20 +21,52 @@ extern "C" {
// be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable();
void __lsan_enable();
+
// The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p);
+
+ // Memory regions registered through this interface will be treated as sources
+ // of live pointers during leak checking. Useful if you store pointers in
+ // mapped memory.
+ // Points of note:
+ // - __lsan_unregister_root_region() must be called with the same pointer and
+ // size that have earlier been passed to __lsan_register_root_region()
+ // - LSan will skip any inaccessible memory when scanning a root region. E.g.,
+ // if you map memory within a larger region that you have mprotect'ed, you can
+ // register the entire large region.
+ // - the implementation is not optimized for performance. This interface is
+ // intended to be used for a small number of relatively static regions.
+ void __lsan_register_root_region(const void *p, size_t size);
+ void __lsan_unregister_root_region(const void *p, size_t size);
+
+ // Check for leaks now. This function behaves identically to the default
+ // end-of-process leak check. In particular, it will terminate the process if
+ // leaks are found and the exitcode runtime flag is non-zero.
+ // Subsequent calls to this function will have no effect and end-of-process
+ // leak check will not run. Effectively, end-of-process leak check is moved to
+ // the time of first invocation of this function.
+ // By calling this function early during process shutdown, you can instruct
+ // LSan to ignore shutdown-only leaks which happen later on.
+ void __lsan_do_leak_check();
+
+ // Check for leaks now. Returns zero if no leaks have been found or if leak
+ // detection is disabled, non-zero otherwise.
+ // This function may be called repeatedly, e.g. to periodically check a
+ // long-running process. It prints a leak report if appropriate, but does not
+ // terminate the process. It does not affect the behavior of
+ // __lsan_do_leak_check() or the end-of-process leak check, and is not
+ // affected by them.
+ int __lsan_do_recoverable_leak_check();
+
// The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
int __lsan_is_turned_off();
- // Calling this function makes LSan enter the leak checking phase immediately.
- // Use this if normal end-of-process leak checking happens too late (e.g. if
- // you have intentional memory leaks in your shutdown code). Calling this
- // function overrides end-of-process leak checking; it must be called at
- // most once per process. This function will terminate the process if there
- // are memory leaks and the exit_code flag is non-zero.
- void __lsan_do_leak_check();
+
+ // This function may be optionally provided by the user and should return
+ // a string containing LSan suppressions.
+ const char *__lsan_default_suppressions();
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/include/sanitizer/msan_interface.h b/libsanitizer/include/sanitizer/msan_interface.h
index f531cf347c9..92793a19bde 100644
--- a/libsanitizer/include/sanitizer/msan_interface.h
+++ b/libsanitizer/include/sanitizer/msan_interface.h
@@ -17,19 +17,17 @@
#ifdef __cplusplus
extern "C" {
#endif
-
-#if __has_feature(memory_sanitizer)
- /* Returns a string describing a stack origin.
- Return NULL if the origin is invalid, or is not a stack origin. */
- const char *__msan_get_origin_descr_if_stack(uint32_t id);
-
-
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
/* Get raw origin for an address. */
uint32_t __msan_get_origin(const volatile void *a);
+ /* Test that this_id is a descendant of prev_id (or they are simply equal).
+ * "descendant" here means they are part of the same chain, created with
+ * __msan_chain_origin. */
+ int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
+
/* Returns non-zero if tracking origins. */
int __msan_get_track_origins();
@@ -39,7 +37,13 @@ extern "C" {
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
- /* Make memory region fully uninitialized (without changing its contents). */
+ /* Make a null-terminated string fully initialized (without changing its
+ contents). */
+ void __msan_unpoison_string(const volatile char *a);
+
+ /* Make memory region fully uninitialized (without changing its contents).
+ This is a legacy interface that does not update origin information. Use
+ __msan_allocated_memory() instead. */
void __msan_poison(const volatile void *a, size_t size);
/* Make memory region partially uninitialized (without changing its contents).
@@ -51,9 +55,9 @@ extern "C" {
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
- /* Set exit code when error(s) were detected.
- Value of 0 means don't change the program exit code. */
- void __msan_set_exit_code(int exit_code);
+ /* Checks that memory range is fully initialized, and reports an error if it
+ * is not. */
+ void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
/* For testing:
__msan_set_expect_umr(1);
@@ -67,13 +71,13 @@ extern "C" {
modules that were compiled without the corresponding compiler flag. */
void __msan_set_keep_going(int keep_going);
- /* Print shadow and origin for the memory range to stdout in a human-readable
+ /* Print shadow and origin for the memory range to stderr in a human-readable
format. */
void __msan_print_shadow(const volatile void *x, size_t size);
- /* Print current function arguments shadow and origin to stdout in a
+ /* Print shadow for the memory range to stderr in a minimalistic
human-readable format. */
- void __msan_print_param_shadow();
+ void __msan_dump_shadow(const volatile void *x, size_t size);
/* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component();
@@ -82,77 +86,22 @@ extern "C" {
Memory will be marked uninitialized, with origin at the call site. */
void __msan_allocated_memory(const volatile void* data, size_t size);
+ /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
+ void __sanitizer_dtor_callback(const volatile void* data, size_t size);
+
/* This function may be optionally provided by user and should return
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
+ /* Deprecated. Call __sanitizer_set_death_callback instead. */
+ void __msan_set_death_callback(void (*callback)(void));
- /***********************************/
- /* Allocator statistics interface. */
-
- /* Returns the estimated number of bytes that will be reserved by allocator
- for request of "size" bytes. If Msan allocator can't allocate that much
- memory, returns the maximal possible allocation size, otherwise returns
- "size". */
- size_t __msan_get_estimated_allocated_size(size_t size);
-
- /* Returns true if p was returned by the Msan allocator and
- is not yet freed. */
- int __msan_get_ownership(const volatile void *p);
-
- /* Returns the number of bytes reserved for the pointer p.
- Requires (get_ownership(p) == true) or (p == 0). */
- size_t __msan_get_allocated_size(const volatile void *p);
-
- /* Number of bytes, allocated and not yet freed by the application. */
- size_t __msan_get_current_allocated_bytes();
-
- /* Number of bytes, mmaped by msan allocator to fulfill allocation requests.
- Generally, for request of X bytes, allocator can reserve and add to free
- lists a large number of chunks of size X to use them for future requests.
- All these chunks count toward the heap size. Currently, allocator never
- releases memory to OS (instead, it just puts freed chunks to free
- lists). */
- size_t __msan_get_heap_size();
-
- /* Number of bytes, mmaped by msan allocator, which can be used to fulfill
- allocation requests. When a user program frees memory chunk, it can first
- fall into quarantine and will count toward __msan_get_free_bytes()
- later. */
- size_t __msan_get_free_bytes();
-
- /* Number of bytes in unmapped pages, that are released to OS. Currently,
- always returns 0. */
- size_t __msan_get_unmapped_bytes();
-
- /* Malloc hooks that may be optionally provided by user.
- __msan_malloc_hook(ptr, size) is called immediately after
- allocation of "size" bytes, which returned "ptr".
- __msan_free_hook(ptr) is called immediately before
- deallocation of "ptr". */
- void __msan_malloc_hook(const volatile void *ptr, size_t size);
- void __msan_free_hook(const volatile void *ptr);
-
-#else // __has_feature(memory_sanitizer)
-
-#define __msan_get_origin_descr_if_stack(id) ((const char*)0)
-#define __msan_set_origin(a, size, origin)
-#define __msan_get_origin(a) ((uint32_t)-1)
-#define __msan_get_track_origins() (0)
-#define __msan_get_umr_origin() ((uint32_t)-1)
-#define __msan_unpoison(a, size)
-#define __msan_poison(a, size)
-#define __msan_partial_poison(data, shadow, size)
-#define __msan_test_shadow(x, size) ((intptr_t)-1)
-#define __msan_set_exit_code(exit_code)
-#define __msan_set_expect_umr(expect_umr)
-#define __msan_print_shadow(x, size)
-#define __msan_print_param_shadow()
-#define __msan_has_dynamic_component() (0)
-#define __msan_allocated_memory(data, size)
-
-#endif // __has_feature(memory_sanitizer)
-
+ /* Update shadow for the application copy of size bytes from src to dst.
+ Src and dst are application addresses. This function does not copy the
+ actual application memory, it only updates shadow and origin for such
+ copy. Source and destination regions can overlap. */
+ void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
+ size_t size);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/tsan/tsan_interface_atomic.h b/libsanitizer/include/sanitizer/tsan_interface_atomic.h
index c500614acc4..d19c9109416 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.h
+++ b/libsanitizer/include/sanitizer/tsan_interface_atomic.h
@@ -7,14 +7,11 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
+// Public interface header for TSan atomics.
//===----------------------------------------------------------------------===//
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
-#ifndef INTERFACE_ATTRIBUTE
-# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
-#endif
-
#ifdef __cplusplus
extern "C" {
#endif
@@ -23,14 +20,12 @@ typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
-
#if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
__extension__ typedef __int128 __tsan_atomic128;
-#define __TSAN_HAS_INT128 1
+# define __TSAN_HAS_INT128 1
#else
-typedef char __tsan_atomic128;
-#define __TSAN_HAS_INT128 0
+# define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
@@ -45,159 +40,181 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#endif
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
- __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#endif
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order fail_mo);
+#endif
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#endif
-void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
#endif
-#undef INTERFACE_ATTRIBUTE
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+#endif // TSAN_INTERFACE_ATOMIC_H
diff --git a/libsanitizer/include/system/asm/ptrace.h b/libsanitizer/include/system/asm/ptrace.h
new file mode 100644
index 00000000000..5d2fe9becf0
--- /dev/null
+++ b/libsanitizer/include/system/asm/ptrace.h
@@ -0,0 +1,7 @@
+#include_next <asm/ptrace.h>
+/* ARM_VFPREGS_SIZE has been added in 3.0 */
+#if defined(__arm__) && !defined(ARM_VFPREGS_SIZE)
+/* The size of the user-visible VFP state as seen by PTRACE_GET/SETVFPREGS
+ and core dumps. */
+#define ARM_VFPREGS_SIZE ( 32 * 8 /*fpregs*/ + 4 /*fpscr*/ )
+#endif
diff --git a/libsanitizer/interception/Makefile.am b/libsanitizer/interception/Makefile.am
index e9fbe6a467d..4fb69a963d3 100644
--- a/libsanitizer/interception/Makefile.am
+++ b/libsanitizer/interception/Makefile.am
@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libinterception.la
diff --git a/libsanitizer/interception/Makefile.in b/libsanitizer/interception/Makefile.in
index 5ac217dcc4b..be7a09d279e 100644
--- a/libsanitizer/interception/Makefile.in
+++ b/libsanitizer/interception/Makefile.in
@@ -135,6 +135,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -146,10 +147,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -225,7 +228,8 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libinterception.la
interception_files = \
diff --git a/libsanitizer/interception/interception.h b/libsanitizer/interception/interception.h
index 7393f4c26be..f2d48c9332f 100644
--- a/libsanitizer/interception/interception.h
+++ b/libsanitizer/interception/interception.h
@@ -13,7 +13,8 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
-#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && \
+ !defined(__APPLE__) && !defined(_WIN32)
# error "Interception doesn't work on this operating system."
#endif
@@ -119,19 +120,23 @@ const interpose_substitution substitution_##func_name[] \
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif defined(_WIN32)
-# if defined(_DLL) // DLL CRT
-# define WRAP(x) x
-# define WRAPPER_NAME(x) #x
-# define INTERCEPTOR_ATTRIBUTE
-# else // Static CRT
-# define WRAP(x) wrap_##x
-# define WRAPPER_NAME(x) "wrap_"#x
-# define INTERCEPTOR_ATTRIBUTE
-# endif
+# define WRAP(x) __asan_wrap_##x
+# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
+#elif defined(__FreeBSD__)
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
+// priority than weak ones so weak aliases won't work for indirect calls
+// in position-independent (-fPIC / -fPIE) mode.
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((alias("__interceptor_" #func), visibility("default")));
#else
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
@@ -212,7 +217,6 @@ const interpose_substitution substitution_##func_name[] \
namespace __interception { \
FUNC_TYPE(func) PTR_TO_REAL(func); \
} \
- DECLARE_WRAPPER_WINAPI(ret_type, func, __VA_ARGS__) \
extern "C" \
INTERCEPTOR_ATTRIBUTE \
ret_type __stdcall WRAP(func)(__VA_ARGS__)
@@ -233,11 +237,11 @@ typedef unsigned long uptr; // NOLINT
#define INCLUDED_FROM_INTERCEPTION_LIB
-#if defined(__linux__)
+#if defined(__linux__) || defined(__FreeBSD__)
# include "interception_linux.h"
-# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
- INTERCEPT_FUNCTION_VER_LINUX(func, symver)
+ INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
diff --git a/libsanitizer/interception/interception_linux.cc b/libsanitizer/interception/interception_linux.cc
index 0a8df474ab4..0a8305b0bf8 100644
--- a/libsanitizer/interception/interception_linux.cc
+++ b/libsanitizer/interception/interception_linux.cc
@@ -10,10 +10,10 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+#if defined(__linux__) || defined(__FreeBSD__)
#include "interception.h"
-#include <dlfcn.h> // for dlsym
+#include <dlfcn.h> // for dlsym() and dlvsym()
namespace __interception {
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
@@ -31,4 +31,4 @@ void *GetFuncAddrVer(const char *func_name, const char *ver) {
} // namespace __interception
-#endif // __linux__
+#endif // __linux__ || __FreeBSD__
diff --git a/libsanitizer/interception/interception_linux.h b/libsanitizer/interception/interception_linux.h
index 5ab24db438e..61bf48a7233 100644
--- a/libsanitizer/interception/interception_linux.h
+++ b/libsanitizer/interception/interception_linux.h
@@ -10,7 +10,7 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#ifdef __linux__
+#if defined(__linux__) || defined(__FreeBSD__)
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
@@ -26,20 +26,20 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
void *GetFuncAddrVer(const char *func_name, const char *ver);
} // namespace __interception
-#define INTERCEPT_FUNCTION_LINUX(func) \
- ::__interception::GetRealFunctionAddress( \
- #func, (::__interception::uptr*)&REAL(func), \
- (::__interception::uptr)&(func), \
- (::__interception::uptr)&WRAP(func))
+#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
+ ::__interception::GetRealFunctionAddress( \
+ #func, (::__interception::uptr *)&__interception::PTR_TO_REAL(func), \
+ (::__interception::uptr) & (func), \
+ (::__interception::uptr) & WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
-# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
- ::__interception::real_##func = (func##_f)(unsigned long) \
- ::__interception::GetFuncAddrVer(#func, symver)
+#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ (::__interception::real_##func = (func##_f)( \
+ unsigned long)::__interception::GetFuncAddrVer(#func, symver))
#else
-# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
- INTERCEPT_FUNCTION_LINUX(func)
+#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
-#endif // __linux__
+#endif // __linux__ || __FreeBSD__
diff --git a/libsanitizer/interception/interception_type_test.cc b/libsanitizer/interception/interception_type_test.cc
index f664eeeb04f..6ba45e26dda 100644
--- a/libsanitizer/interception/interception_type_test.cc
+++ b/libsanitizer/interception/interception_type_test.cc
@@ -17,13 +17,13 @@
#include <stddef.h>
#include <stdint.h>
-COMPILER_CHECK(sizeof(SIZE_T) == sizeof(size_t));
-COMPILER_CHECK(sizeof(SSIZE_T) == sizeof(ssize_t));
-COMPILER_CHECK(sizeof(PTRDIFF_T) == sizeof(ptrdiff_t));
-COMPILER_CHECK(sizeof(INTMAX_T) == sizeof(intmax_t));
+COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
+COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
+COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
+COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#ifndef __APPLE__
-COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
+COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif
// The following are the cases when pread (and friends) is used instead of
@@ -31,7 +31,7 @@ COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64
-COMPILER_CHECK(sizeof(OFF_T) == sizeof(off_t));
+COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif
#endif
diff --git a/libsanitizer/interception/interception_win.cc b/libsanitizer/interception/interception_win.cc
index 443bdce1859..df51fa21ead 100644
--- a/libsanitizer/interception/interception_win.cc
+++ b/libsanitizer/interception/interception_win.cc
@@ -13,24 +13,11 @@
#ifdef _WIN32
#include "interception.h"
+#define WIN32_LEAN_AND_MEAN
#include <windows.h>
namespace __interception {
-bool GetRealFunctionAddress(const char *func_name, uptr *func_addr) {
- const char *DLLS[] = {
- "msvcr80.dll",
- "msvcr90.dll",
- "kernel32.dll",
- NULL
- };
- *func_addr = 0;
- for (size_t i = 0; *func_addr == 0 && DLLS[i]; ++i) {
- *func_addr = (uptr)GetProcAddress(GetModuleHandleA(DLLS[i]), func_name);
- }
- return (*func_addr != 0);
-}
-
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
@@ -54,96 +41,231 @@ static void WriteJumpInstruction(char *jmp_from, char *to) {
*(ptrdiff_t*)(jmp_from + 1) = offset;
}
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
-#ifdef _WIN64
-# error OverrideFunction was not tested on x64
-#endif
- // Basic idea:
- // We write 5 bytes (jmp-to-new_func) at the beginning of the 'old_func'
- // to override it. We want to be able to execute the original 'old_func' from
- // the wrapper, so we need to keep the leading 5+ bytes ('head') of the
- // original instructions somewhere with a "jmp old_func+head".
- // We call these 'head'+5 bytes of instructions a "trampoline".
-
+static char *GetMemoryForTrampoline(size_t size) {
// Trampolines are allocated from a common pool.
const int POOL_SIZE = 1024;
static char *pool = NULL;
static size_t pool_used = 0;
- if (pool == NULL) {
- pool = (char*)VirtualAlloc(NULL, POOL_SIZE,
- MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
- // FIXME: set PAGE_EXECUTE_READ access after setting all interceptors?
- if (pool == NULL)
- return false;
+ if (!pool) {
+ pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ // FIXME: Might want to apply PAGE_EXECUTE_READ access after all the
+ // interceptors are in place.
+ if (!pool)
+ return NULL;
_memset(pool, 0xCC /* int 3 */, POOL_SIZE);
}
- char* old_bytes = (char*)old_func;
- char* trampoline = pool + pool_used;
+ if (pool_used + size > POOL_SIZE)
+ return NULL;
+
+ char *ret = pool + pool_used;
+ pool_used += size;
+ return ret;
+}
- // Find out the number of bytes of the instructions we need to copy to the
- // island and store it in 'head'.
- size_t head = 0;
- while (head < 5) {
- switch (old_bytes[head]) {
+// Returns 0 on error.
+static size_t RoundUpToInstrBoundary(size_t size, char *code) {
+ size_t cursor = 0;
+ while (cursor < size) {
+ switch (code[cursor]) {
+ case '\x51': // push ecx
+ case '\x52': // push edx
+ case '\x53': // push ebx
+ case '\x54': // push esp
case '\x55': // push ebp
case '\x56': // push esi
case '\x57': // push edi
- head++;
+ case '\x5D': // pop ebp
+ cursor++;
+ continue;
+ case '\x6A': // 6A XX = push XX
+ cursor += 2;
+ continue;
+ case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
+ case '\xB8': // B8 XX YY ZZ WW = mov eax, WWZZYYXX
+ cursor += 5;
continue;
}
- switch (*(unsigned short*)(old_bytes + head)) { // NOLINT
+ switch (*(unsigned short*)(code + cursor)) { // NOLINT
case 0xFF8B: // 8B FF = mov edi, edi
case 0xEC8B: // 8B EC = mov ebp, esp
case 0xC033: // 33 C0 = xor eax, eax
- head += 2;
+ cursor += 2;
continue;
+ case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
+ case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
case 0xEC83: // 83 EC XX = sub esp, XX
- head += 3;
+ case 0x75FF: // FF 75 XX = push dword ptr [ebp+XXh]
+ cursor += 3;
continue;
case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
- head += 6;
+ case 0x25FF: // FF 25 XX YY ZZ WW = jmp dword ptr ds:[WWZZYYXX]
+ cursor += 6;
+ continue;
+ case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
+ cursor += 7;
continue;
}
- switch (0x00FFFFFF & *(unsigned int*)(old_bytes + head)) {
+ switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
+ case 0x24448B: // 8B 44 24 XX = mov eax, dword ptr [esp+XXh]
case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
+ case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
- head += 4;
+ cursor += 4;
continue;
}
// Unknown instruction!
- return false;
+ // FIXME: Unknown instruction failures might happen when we add a new
+ // interceptor or a new compiler version. In either case, they should result
+ // in visible and readable error messages. However, merely calling abort()
+ // leads to an infinite recursion in CheckFailed.
+ // Do we have a good way to abort with an error message here?
+ __debugbreak();
+ return 0;
}
- if (pool_used + head + 5 > POOL_SIZE)
- return false;
+ return cursor;
+}
+
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
+#ifdef _WIN64
+#error OverrideFunction is not yet supported on x64
+#endif
+ // Function overriding works basically like this:
+ // We write "jmp <new_func>" (5 bytes) at the beginning of the 'old_func'
+ // to override it.
+ // We might want to be able to execute the original 'old_func' from the
+ // wrapper, in this case we need to keep the leading 5+ bytes ('head')
+ // of the original code somewhere with a "jmp <old_func+head>".
+ // We call these 'head'+5 bytes of instructions a "trampoline".
+ char *old_bytes = (char *)old_func;
+
+ // We'll need at least 5 bytes for a 'jmp'.
+ size_t head = 5;
+ if (orig_old_func) {
+ // Find out the number of bytes of the instructions we need to copy
+ // to the trampoline and store it in 'head'.
+ head = RoundUpToInstrBoundary(head, old_bytes);
+ if (!head)
+ return false;
- // Now put the "jump to trampoline" instruction into the original code.
+ // Put the needed instructions into the trampoline bytes.
+ char *trampoline = GetMemoryForTrampoline(head + 5);
+ if (!trampoline)
+ return false;
+ _memcpy(trampoline, old_bytes, head);
+ WriteJumpInstruction(trampoline + head, old_bytes + head);
+ *orig_old_func = (uptr)trampoline;
+ }
+
+ // Now put the "jmp <new_func>" instruction at the original code location.
+ // We should preserve the EXECUTE flag as some of our own code might be
+ // located in the same page (sic!). FIXME: might consider putting the
+ // __interception code into a separate section or something?
DWORD old_prot, unused_prot;
- if (!VirtualProtect((void*)old_func, head, PAGE_EXECUTE_READWRITE,
+ if (!VirtualProtect((void *)old_bytes, head, PAGE_EXECUTE_READWRITE,
&old_prot))
return false;
- // Put the needed instructions into the trampoline bytes.
- _memcpy(trampoline, old_bytes, head);
- WriteJumpInstruction(trampoline + head, old_bytes + head);
- *orig_old_func = (uptr)trampoline;
- pool_used += head + 5;
-
- // Intercept the 'old_func'.
- WriteJumpInstruction(old_bytes, (char*)new_func);
+ WriteJumpInstruction(old_bytes, (char *)new_func);
_memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
- if (!VirtualProtect((void*)old_func, head, old_prot, &unused_prot))
+ // Restore the original permissions.
+ if (!VirtualProtect((void *)old_bytes, head, old_prot, &unused_prot))
return false; // not clear if this failure bothers us.
return true;
}
+static void **InterestingDLLsAvailable() {
+ const char *InterestingDLLs[] = {
+ "kernel32.dll",
+ "msvcr110.dll", // VS2012
+ "msvcr120.dll", // VS2013
+ // NTDLL should go last as it exports some functions that we should override
+ // in the CRT [presumably only used internally].
+ "ntdll.dll", NULL
+ };
+ static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
+ if (!result[0]) {
+ for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
+ if (HMODULE h = GetModuleHandleA(InterestingDLLs[i]))
+ result[j++] = (void *)h;
+ }
+ }
+ return &result[0];
+}
+
+namespace {
+// Utility for reading loaded PE images.
+template <typename T> class RVAPtr {
+ public:
+ RVAPtr(void *module, uptr rva)
+ : ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {}
+ operator T *() { return ptr_; }
+ T *operator->() { return ptr_; }
+ T *operator++() { return ++ptr_; }
+
+ private:
+ T *ptr_;
+};
+} // namespace
+
+// Internal implementation of GetProcAddress. At least since Windows 8,
+// GetProcAddress appears to initialize DLLs before returning function pointers
+// into them. This is problematic for the sanitizers, because they typically
+// want to intercept malloc *before* MSVCRT initializes. Our internal
+// implementation walks the export list manually without doing initialization.
+uptr InternalGetProcAddress(void *module, const char *func_name) {
+ // Check that the module header is full and present.
+ RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
+ RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
+ if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
+ headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
+ headers->FileHeader.SizeOfOptionalHeader <
+ sizeof(IMAGE_OPTIONAL_HEADER)) {
+ return 0;
+ }
+
+ IMAGE_DATA_DIRECTORY *export_directory =
+ &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
+ RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
+ export_directory->VirtualAddress);
+ RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
+ RVAPtr<DWORD> names(module, exports->AddressOfNames);
+ RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals);
+
+ for (DWORD i = 0; i < exports->NumberOfNames; i++) {
+ RVAPtr<char> name(module, names[i]);
+ if (!strcmp(func_name, name)) {
+ DWORD index = ordinals[i];
+ RVAPtr<char> func(module, functions[index]);
+ return (uptr)(char *)func;
+ }
+ }
+
+ return 0;
+}
+
+static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
+ *func_addr = 0;
+ void **DLLs = InterestingDLLsAvailable();
+ for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
+ *func_addr = InternalGetProcAddress(DLLs[i], func_name);
+ return (*func_addr != 0);
+}
+
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
+ uptr orig_func;
+ if (!GetFunctionAddressInDLLs(name, &orig_func))
+ return false;
+ return OverrideFunction(orig_func, new_func, orig_old_func);
+}
+
} // namespace __interception
#endif // _WIN32
diff --git a/libsanitizer/interception/interception_win.h b/libsanitizer/interception/interception_win.h
index b46ad0dc66f..6388209dcc1 100644
--- a/libsanitizer/interception/interception_win.h
+++ b/libsanitizer/interception/interception_win.h
@@ -20,27 +20,33 @@
#define INTERCEPTION_WIN_H
namespace __interception {
-// returns true if a function with the given name was found.
-bool GetRealFunctionAddress(const char *func_name, uptr *func_addr);
+// All the functions in the OverrideFunction() family return true on success,
+// false on failure (including "couldn't find the function").
+
+// Overrides a function by its address.
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
+
+// Overrides a function in a system DLL or DLL CRT by its exported name.
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
+
+// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
+uptr InternalGetProcAddress(void *module, const char *func_name);
-// returns true if the old function existed, false on failure.
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func);
} // namespace __interception
-#if defined(_DLL)
-# define INTERCEPT_FUNCTION_WIN(func) \
- ::__interception::GetRealFunctionAddress( \
- #func, (::__interception::uptr*)&REAL(func))
+#if defined(INTERCEPTION_DYNAMIC_CRT)
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction(#func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
#else
-# define INTERCEPT_FUNCTION_WIN(func) \
- ::__interception::OverrideFunction( \
- (::__interception::uptr)func, \
- (::__interception::uptr)WRAP(func), \
- (::__interception::uptr*)&REAL(func))
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction((::__interception::uptr)func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
#endif
-#define INTERCEPT_FUNCTION_VER_WIN(func, symver) \
- INTERCEPT_FUNCTION_WIN(func)
+#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
#endif // INTERCEPTION_WIN_H
#endif // _WIN32
diff --git a/libsanitizer/libbacktrace/Makefile.am b/libsanitizer/libbacktrace/Makefile.am
index fd9eb65f7eb..34bfb1eef27 100644
--- a/libsanitizer/libbacktrace/Makefile.am
+++ b/libsanitizer/libbacktrace/Makefile.am
@@ -40,6 +40,7 @@ C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-styl
CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter
AM_CFLAGS = $(C_WARN_FLAGS)
AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions
+AM_CXXFLAGS += -std=gnu++11
noinst_LTLIBRARIES = libsanitizer_libbacktrace.la
diff --git a/libsanitizer/libbacktrace/Makefile.in b/libsanitizer/libbacktrace/Makefile.in
index 0dfb53d2c93..78d08a97b30 100644
--- a/libsanitizer/libbacktrace/Makefile.in
+++ b/libsanitizer/libbacktrace/Makefile.in
@@ -177,6 +177,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -188,10 +189,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -270,7 +273,7 @@ WARN_FLAGS = -W -Wall -Wwrite-strings -Wmissing-format-attribute \
C_WARN_FLAGS = $(WARN_FLAGS) -Wstrict-prototypes -Wmissing-prototypes -Wold-style-definition
CXX_WARN_FLAGS = $(WARN_FLAGS) -Wno-unused-parameter
AM_CFLAGS = $(C_WARN_FLAGS)
-AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions
+AM_CXXFLAGS = $(CXX_WARN_FLAGS) -fno-rtti -fno-exceptions -std=gnu++11
noinst_LTLIBRARIES = libsanitizer_libbacktrace.la
libsanitizer_libbacktrace_la_SOURCES = \
../../libbacktrace/backtrace.h \
diff --git a/libsanitizer/lsan/Makefile.am b/libsanitizer/lsan/Makefile.am
index 7a508c12686..03ec4fe91fe 100644
--- a/libsanitizer/lsan/Makefile.am
+++ b/libsanitizer/lsan/Makefile.am
@@ -6,6 +6,7 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la
diff --git a/libsanitizer/lsan/Makefile.in b/libsanitizer/lsan/Makefile.in
index 47caebc09f8..df95b859f8c 100644
--- a/libsanitizer/lsan/Makefile.in
+++ b/libsanitizer/lsan/Makefile.in
@@ -170,6 +170,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -181,10 +182,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -260,7 +263,8 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la
@LSAN_SUPPORTED_TRUE@toolexeclib_LTLIBRARIES = liblsan.la
diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc
index 270979a78e7..6e7429c95a5 100644
--- a/libsanitizer/lsan/lsan.cc
+++ b/libsanitizer/lsan/lsan.cc
@@ -13,6 +13,7 @@
#include "lsan.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
@@ -23,27 +24,53 @@ bool lsan_init_is_running;
namespace __lsan {
-static void InitializeCommonFlags() {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = 30;
- cf->detect_leaks = true;
-
- ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+ return false;
}
} // namespace __lsan
using namespace __lsan; // NOLINT
+static void InitializeFlags() {
+ // Set all the default values.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 30;
+ cf.detect_leaks = true;
+ cf.exitcode = 23;
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterLsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+ parser.ParseString(GetEnv("LSAN_OPTIONS"));
+
+ SetVerbosity(common_flags()->verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+}
+
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
return;
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
- InitializeCommonFlags();
+ CacheBinaryName();
+ InitializeFlags();
+ InitCommonLsan();
InitializeAllocator();
InitTlsSize();
InitializeInterceptors();
@@ -53,16 +80,17 @@ extern "C" void __lsan_init() {
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
- // Start symbolizer process if necessary.
- if (common_flags()->symbolize) {
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- } else {
- Symbolizer::Disable();
- }
-
- InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
lsan_inited = true;
lsan_init_is_running = false;
}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_STACK_TRACE_FATAL;
+ stack.Print();
+}
diff --git a/libsanitizer/lsan/lsan.h b/libsanitizer/lsan/lsan.h
index 8a5030ce878..ee2fc02cf08 100644
--- a/libsanitizer/lsan/lsan.h
+++ b/libsanitizer/lsan/lsan.h
@@ -13,6 +13,26 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#define GET_STACK_TRACE(max_size, fast) \
+ BufferedStackTrace stack; \
+ { \
+ uptr stack_top = 0, stack_bottom = 0; \
+ ThreadContext *t; \
+ if (fast && (t = CurrentThreadContext())) { \
+ stack_top = t->stack_end(); \
+ stack_bottom = t->stack_begin(); \
+ } \
+ stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
+ /* context */ 0, stack_top, stack_bottom, fast); \
+ }
+
+#define GET_STACK_TRACE_FATAL \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
+ common_flags()->fast_unwind_on_malloc)
+
namespace __lsan {
void InitializeInterceptors();
diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc
index ce47dfcd215..22b5f7e1a4a 100644
--- a/libsanitizer/lsan/lsan_allocator.cc
+++ b/libsanitizer/lsan/lsan_allocator.cc
@@ -13,6 +13,7 @@
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
@@ -22,19 +23,29 @@ extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-
struct ChunkMetadata {
- bool allocated : 8; // Must be first.
+ u8 allocated : 8; // Must be first.
ChunkTag tag : 2;
uptr requested_size : 54;
u32 stack_trace_id;
};
+#if defined(__mips64) || defined(__aarch64__)
+static const uptr kMaxAllowedMallocSize = 4UL << 30;
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
+ sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
+ PrimaryAllocator;
+#else
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
+#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@@ -44,14 +55,14 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.Init();
+ allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
}
void AllocatorThreadFinish() {
allocator.SwallowCache(&cache);
}
-static ChunkMetadata *Metadata(void *p) {
+static ChunkMetadata *Metadata(const void *p) {
return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
}
@@ -60,7 +71,7 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
ChunkMetadata *m = Metadata(p);
CHECK(m);
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
- m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
+ m->stack_trace_id = StackDepotPut(stack);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
}
@@ -78,17 +89,19 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
size = 1;
if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
- return 0;
+ return nullptr;
}
void *p = allocator.Allocate(&cache, size, alignment, false);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size);
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
return p;
}
void Deallocate(void *p) {
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RegisterDeallocation(p);
allocator.Deallocate(&cache, p);
}
@@ -99,7 +112,7 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(&cache, p);
- return 0;
+ return nullptr;
}
p = allocator.Reallocate(&cache, p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
@@ -111,7 +124,7 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(cache);
}
-uptr GetMallocUsableSize(void *p) {
+uptr GetMallocUsableSize(const void *p) {
ChunkMetadata *m = Metadata(p);
if (!m) return 0;
return m->requested_size;
@@ -141,7 +154,11 @@ uptr PointsIntoChunk(void* p) {
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
- if (m->allocated && addr < chunk + m->requested_size)
+ if (!m->allocated)
+ return 0;
+ if (addr < chunk + m->requested_size)
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
return chunk;
return 0;
}
@@ -193,4 +210,39 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
return kIgnoreObjectInvalid;
}
}
-} // namespace __lsan
+} // namespace __lsan
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_free_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+} // extern "C"
diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h
index 61ea86572ef..aae0d28dc4a 100644
--- a/libsanitizer/lsan/lsan_allocator.h
+++ b/libsanitizer/lsan/lsan_allocator.h
@@ -23,7 +23,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
void Deallocate(void *p);
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment);
-uptr GetMallocUsableSize(void *p);
+uptr GetMallocUsableSize(const void *p);
template<typename Callable>
void ForEachChunk(const Callable &callback);
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index bbc5b5f0378..6d674c5e437 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -14,17 +14,19 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#if CAN_SANITIZE_LEAKS
namespace __lsan {
-// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
THREADLOCAL int disable_counter;
@@ -32,65 +34,63 @@ bool DisabledInThisThread() { return disable_counter > 0; }
Flags lsan_flags;
-static void InitializeFlags() {
- Flags *f = flags();
- // Default values.
- f->report_objects = false;
- f->resolution = 0;
- f->max_leaks = 0;
- f->exitcode = 23;
- f->suppressions="";
- f->use_registers = true;
- f->use_globals = true;
- f->use_stacks = true;
- f->use_tls = true;
- f->use_unaligned = false;
- f->verbosity = 0;
- f->log_pointers = false;
- f->log_threads = false;
-
- const char *options = GetEnv("LSAN_OPTIONS");
- if (options) {
- ParseFlag(options, &f->use_registers, "use_registers");
- ParseFlag(options, &f->use_globals, "use_globals");
- ParseFlag(options, &f->use_stacks, "use_stacks");
- ParseFlag(options, &f->use_tls, "use_tls");
- ParseFlag(options, &f->use_unaligned, "use_unaligned");
- ParseFlag(options, &f->report_objects, "report_objects");
- ParseFlag(options, &f->resolution, "resolution");
- CHECK_GE(&f->resolution, 0);
- ParseFlag(options, &f->max_leaks, "max_leaks");
- CHECK_GE(&f->max_leaks, 0);
- ParseFlag(options, &f->verbosity, "verbosity");
- ParseFlag(options, &f->log_pointers, "log_pointers");
- ParseFlag(options, &f->log_threads, "log_threads");
- ParseFlag(options, &f->exitcode, "exitcode");
- ParseFlag(options, &f->suppressions, "suppressions");
- }
+void Flags::SetDefaults() {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+}
+
+void RegisterLsanFlags(FlagParser *parser, Flags *f) {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
}
-SuppressionContext *suppression_ctx;
+#define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) Report(__VA_ARGS__); \
+ } while (0);
+
+#define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) Report(__VA_ARGS__); \
+ } while (0);
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kSuppressionLeak[] = "leak";
+static const char *kSuppressionTypes[] = { kSuppressionLeak };
void InitializeSuppressions() {
- CHECK(!suppression_ctx);
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- suppression_ctx = new(placeholder_) SuppressionContext;
- char *suppressions_from_file;
- uptr buffer_size;
- if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
- &buffer_size, 1 << 26 /* max_len */))
- suppression_ctx->Parse(suppressions_from_file);
- if (flags()->suppressions[0] && !buffer_size) {
- Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
- flags()->suppressions);
- Die();
- }
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
suppression_ctx->Parse(__lsan_default_suppressions());
}
+static SuppressionContext *GetSuppressionContext() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+struct RootRegion {
+ const void *begin;
+ uptr size;
+};
+
+InternalMmapVector<RootRegion> *root_regions;
+
+void InitializeRootRegions() {
+ CHECK(!root_regions);
+ ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
+ root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
+}
+
void InitCommonLsan() {
- InitializeFlags();
+ InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
@@ -99,9 +99,9 @@ void InitCommonLsan() {
}
}
-class Decorator: private __sanitizer::AnsiColorDecorator {
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
const char *End() { return Default(); }
@@ -112,9 +112,15 @@ static inline bool CanBeAHeapPointer(uptr p) {
// bound on heap addresses.
const uptr kMinAddress = 4 * 4096;
if (p < kMinAddress) return false;
-#ifdef __x86_64__
+#if defined(__x86_64__)
// Accept only canonical form user-space addresses.
return ((p >> 47) == 0);
+#elif defined(__mips64)
+ return ((p >> 40) == 0);
+#elif defined(__aarch64__)
+ unsigned runtimeVMA =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+ return ((p >> runtimeVMA) == 0);
#else
return true;
#endif
@@ -122,16 +128,16 @@ static inline bool CanBeAHeapPointer(uptr p) {
// Scans the memory range, looking for byte patterns that point into allocator
// chunks. Marks those chunks with |tag| and adds them to |frontier|.
-// There are two usage modes for this function: finding reachable or ignored
-// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
+// There are two usage modes for this function: finding reachable chunks
+// (|tag| = kReachable) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0.
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
+ CHECK(tag == kReachable || tag == kIndirectlyLeaked);
const uptr alignment = flags()->pointer_alignment();
- if (flags()->log_pointers)
- Report("Scanning %s range %p-%p.\n", region_type, begin, end);
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
@@ -143,13 +149,20 @@ void ScanRangeForPointers(uptr begin, uptr end,
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
if (chunk == begin) continue;
LsanMetadata m(chunk);
- // Reachable beats ignored beats leaked.
- if (m.tag() == kReachable) continue;
- if (m.tag() == kIgnored && tag != kReachable) continue;
+ if (m.tag() == kReachable || m.tag() == kIgnored) continue;
+
+ // Do this check relatively late so we can log only the interesting cases.
+ if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+ LOG_POINTERS(
+ "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+ "%zu.\n",
+ pp, p, chunk, chunk + m.requested_size(), m.requested_size());
+ continue;
+ }
+
m.set_tag(tag);
- if (flags()->log_pointers)
- Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
- chunk, chunk + m.requested_size(), m.requested_size());
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
+ chunk, chunk + m.requested_size(), m.requested_size());
if (frontier)
frontier->push_back(chunk);
}
@@ -168,7 +181,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
uptr registers_end = registers_begin + registers.size();
for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
- if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
+ LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
@@ -176,8 +189,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
- if (flags()->log_threads)
- Report("Thread %d not found in registry.\n", os_id);
+ LOG_THREADS("Thread %d not found in registry.\n", os_id);
continue;
}
uptr sp;
@@ -194,14 +206,12 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
"REGISTERS", kReachable);
if (flags()->use_stacks) {
- if (flags()->log_threads)
- Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack). Again, consider the entire stack
// range to be reachable.
- if (flags()->log_threads)
- Report("WARNING: stack pointer not in stack range.\n");
+ LOG_THREADS("WARNING: stack pointer not in stack range.\n");
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
@@ -212,7 +222,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_tls) {
- if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
if (cache_begin == cache_end) {
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
} else {
@@ -230,6 +240,37 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
}
+static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
+ uptr root_end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr begin, end, prot;
+ while (proc_maps.Next(&begin, &end,
+ /*offset*/ nullptr, /*filename*/ nullptr,
+ /*filename_size*/ 0, &prot)) {
+ uptr intersection_begin = Max(root_begin, begin);
+ uptr intersection_end = Min(end, root_end);
+ if (intersection_begin >= intersection_end) continue;
+ bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_begin, root_end, begin, end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier,
+ "ROOT", kReachable);
+ }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+ if (!flags()->use_root_regions) return;
+ CHECK(root_regions);
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ uptr begin_addr = reinterpret_cast<uptr>(region.begin);
+ ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
+ }
+}
+
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
while (frontier->size()) {
uptr next_chunk = frontier->back();
@@ -247,7 +288,7 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
ScanRangeForPointers(chunk, chunk + m.requested_size(),
- /* frontier */ 0, "HEAP", kIndirectlyLeaked);
+ /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
}
}
@@ -257,48 +298,54 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
CHECK(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
- if (m.allocated() && m.tag() == kIgnored)
+ if (m.allocated() && m.tag() == kIgnored) {
+ LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
+ chunk, chunk + m.requested_size(), m.requested_size());
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
+ }
}
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
- Frontier frontier(GetPageSizeCached());
+ Frontier frontier(1);
- if (flags()->use_globals)
- ProcessGlobalRegions(&frontier);
+ ForEachChunk(CollectIgnoredCb, &frontier);
+ ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier);
+ ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
+
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
- if (flags()->log_pointers)
- Report("Processing platform-specific allocations.\n");
+ LOG_POINTERS("Processing platform-specific allocations.\n");
+ CHECK_EQ(0, frontier.size());
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
- if (flags()->log_pointers)
- Report("Scanning ignored chunks.\n");
- CHECK_EQ(0, frontier.size());
- ForEachChunk(CollectIgnoredCb, &frontier);
- FloodFillTag(&frontier, kIgnored);
-
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
- if (flags()->log_pointers)
- Report("Scanning leaked chunks.\n");
- ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
+ LOG_POINTERS("Scanning leaked chunks.\n");
+ ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
+}
+
+// ForEachChunk callback. Resets the tags to pre-leak-check state.
+static void ResetTagsCb(uptr chunk, void *arg) {
+ (void)arg;
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kIgnored)
+ m.set_tag(kDirectlyLeaked);
}
static void PrintStackTraceById(u32 stack_trace_id) {
CHECK(stack_trace_id);
- uptr size = 0;
- const uptr *trace = StackDepotGet(stack_trace_id, &size);
- StackTrace::PrintStack(trace, size);
+ StackDepotGet(stack_trace_id).Print();
}
-// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
@@ -306,33 +353,23 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- uptr resolution = flags()->resolution;
+ u32 resolution = flags()->resolution;
+ u32 stack_trace_id = 0;
if (resolution > 0) {
- uptr size = 0;
- const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
- size = Min(size, resolution);
- leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
+ StackTrace stack = StackDepotGet(m.stack_trace_id());
+ stack.size = Min(stack.size, resolution);
+ stack_trace_id = StackDepotPut(stack);
} else {
- leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
+ stack_trace_id = m.stack_trace_id();
}
- }
-}
-
-// ForEachChunkCallback. Prints addresses of unreachable chunks.
-static void PrintLeakedCb(uptr chunk, void *arg) {
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (!m.allocated()) return;
- if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- Printf("%s leaked %zu byte object at %p.\n",
- m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
- m.requested_size(), chunk);
+ leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
+ m.tag());
}
}
static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1);
- suppression_ctx->GetMatched(&matched);
+ GetSuppressionContext()->GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
@@ -340,49 +377,38 @@ static void PrintMatchedSuppressions() {
Printf("Suppressions used:\n");
Printf(" count bytes template\n");
for (uptr i = 0; i < matched.size(); i++)
- Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
- matched[i]->weight, matched[i]->templ);
+ Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
+ &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
Printf("%s\n\n", line);
}
-static void PrintLeaked() {
- Printf("\n");
- Printf("Reporting individual objects:\n");
- ForEachChunk(PrintLeakedCb, 0 /* arg */);
-}
-
-struct DoLeakCheckParam {
+struct CheckForLeaksParam {
bool success;
LeakReport leak_report;
};
-static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
- void *arg) {
- DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
+static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
+ void *arg) {
+ CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
CHECK(param);
CHECK(!param->success);
- CHECK(param->leak_report.IsEmpty());
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
- if (!param->leak_report.IsEmpty() && flags()->report_objects)
- PrintLeaked();
+ // Clean up for subsequent leak checks. This assumes we did not overwrite any
+ // kIgnored tags.
+ ForEachChunk(ResetTagsCb, nullptr);
param->success = true;
}
-void DoLeakCheck() {
- EnsureMainThreadIDIsCorrect();
- BlockingMutexLock l(&global_mutex);
- static bool already_done;
- if (already_done) return;
- already_done = true;
+static bool CheckForLeaks() {
if (&__lsan_is_turned_off && __lsan_is_turned_off())
- return;
-
- DoLeakCheckParam param;
+ return false;
+ EnsureMainThreadIDIsCorrect();
+ CheckForLeaksParam param;
param.success = false;
LockThreadRegistry();
LockAllocator();
- StopTheWorld(DoLeakCheckCallback, &param);
+ DoStopTheWorld(CheckForLeaksCallback, &param);
UnlockAllocator();
UnlockThreadRegistry();
@@ -390,8 +416,9 @@ void DoLeakCheck() {
Report("LeakSanitizer has encountered a fatal error.\n");
Die();
}
- uptr have_unsuppressed = param.leak_report.ApplySuppressions();
- if (have_unsuppressed) {
+ param.leak_report.ApplySuppressions();
+ uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
+ if (unsuppressed_count > 0) {
Decorator d;
Printf("\n"
"================================================================="
@@ -399,66 +426,101 @@ void DoLeakCheck() {
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.End());
- param.leak_report.PrintLargest(flags()->max_leaks);
+ param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
- if (have_unsuppressed || (flags()->verbosity >= 1)) {
+ if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
+ if (unsuppressed_count > 0) {
param.leak_report.PrintSummary();
+ return true;
}
- if (have_unsuppressed && flags()->exitcode)
- internal__exit(flags()->exitcode);
+ return false;
+}
+
+void DoLeakCheck() {
+ BlockingMutexLock l(&global_mutex);
+ static bool already_done;
+ if (already_done) return;
+ already_done = true;
+ bool have_leaks = CheckForLeaks();
+ if (!have_leaks) {
+ return;
+ }
+ if (common_flags()->exitcode) {
+ Die();
+ }
+}
+
+static int DoRecoverableLeakCheck() {
+ BlockingMutexLock l(&global_mutex);
+ bool have_leaks = CheckForLeaks();
+ return have_leaks ? 1 : 0;
}
static Suppression *GetSuppressionForAddr(uptr addr) {
- static const uptr kMaxAddrFrames = 16;
- InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
- for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
- addr, addr_frames.data(), kMaxAddrFrames);
- for (uptr i = 0; i < addr_frames_num; i++) {
- Suppression* s;
- if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
+ Suppression *s = nullptr;
+
+ // Suppress by module name.
+ SuppressionContext *suppressions = GetSuppressionContext();
+ if (const char *module_name =
+ Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
+ if (suppressions->Match(module_name, kSuppressionLeak, &s))
return s;
+
+ // Suppress by file or function name.
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
+ suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
+ break;
+ }
}
- return 0;
+ frames->ClearAll();
+ return s;
}
static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
- uptr size = 0;
- const uptr *trace = StackDepotGet(stack_trace_id, &size);
- for (uptr i = 0; i < size; i++) {
- Suppression *s =
- GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ for (uptr i = 0; i < stack.size; i++) {
+ Suppression *s = GetSuppressionForAddr(
+ StackTrace::GetPreviousInstructionPc(stack.trace[i]));
if (s) return s;
}
- return 0;
+ return nullptr;
}
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
-// in LeakReport::Add(). We don't expect to ever see this many leaks in
-// real-world applications.
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
// FIXME: Get rid of this limit by changing the implementation of LeakReport to
// use a hash table.
const uptr kMaxLeaksConsidered = 5000;
-void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
+void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
+ uptr leaked_size, ChunkTag tag) {
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
bool is_directly_leaked = (tag == kDirectlyLeaked);
- for (uptr i = 0; i < leaks_.size(); i++)
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
if (leaks_[i].stack_trace_id == stack_trace_id &&
leaks_[i].is_directly_leaked == is_directly_leaked) {
leaks_[i].hit_count++;
leaks_[i].total_size += leaked_size;
- return;
+ break;
}
- if (leaks_.size() == kMaxLeaksConsidered) return;
- Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
- is_directly_leaked, /* is_suppressed */ false };
- leaks_.push_back(leak);
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered) return;
+ Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false };
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ leaked_objects_.push_back(obj);
+ }
}
static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
@@ -468,7 +530,7 @@ static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
return leak1.is_directly_leaked;
}
-void LeakReport::PrintLargest(uptr num_leaks_to_print) {
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
@@ -476,31 +538,49 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) {
"reported.\n",
kMaxLeaksConsidered);
- uptr unsuppressed_count = 0;
- for (uptr i = 0; i < leaks_.size(); i++)
- if (!leaks_[i].is_suppressed) unsuppressed_count++;
- if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
- Printf("The %zu largest leak(s):\n", num_leaks_to_print);
+ uptr unsuppressed_count = UnsuppressedLeakCount();
+ if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+ Printf("The %zu top leak(s):\n", num_leaks_to_report);
InternalSort(&leaks_, leaks_.size(), LeakComparator);
- uptr leaks_printed = 0;
- Decorator d;
+ uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
- Printf("%s", d.Leak());
- Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
- leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
- leaks_[i].total_size, leaks_[i].hit_count);
- Printf("%s", d.End());
- PrintStackTraceById(leaks_[i].stack_trace_id);
- leaks_printed++;
- if (leaks_printed == num_leaks_to_print) break;
+ PrintReportForLeak(i);
+ leaks_reported++;
+ if (leaks_reported == num_leaks_to_report) break;
}
- if (leaks_printed < unsuppressed_count) {
- uptr remaining = unsuppressed_count - leaks_printed;
+ if (leaks_reported < unsuppressed_count) {
+ uptr remaining = unsuppressed_count - leaks_reported;
Printf("Omitting %zu more leak(s).\n", remaining);
}
}
+void LeakReport::PrintReportForLeak(uptr index) {
+ Decorator d;
+ Printf("%s", d.Leak());
+ Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+ leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+ leaks_[index].total_size, leaks_[index].hit_count);
+ Printf("%s", d.End());
+
+ PrintStackTraceById(leaks_[index].stack_trace_id);
+
+ if (flags()->report_objects) {
+ Printf("Objects leaked above:\n");
+ PrintLeakedObjectsForLeak(index);
+ Printf("\n");
+ }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+ u32 leak_id = leaks_[index].id;
+ for (uptr j = 0; j < leaked_objects_.size(); j++) {
+ if (leaked_objects_[j].leak_id == leak_id)
+ Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
+ leaked_objects_[j].size);
+ }
+}
+
void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
@@ -509,29 +589,33 @@ void LeakReport::PrintSummary() {
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
}
- InternalScopedBuffer<char> summary(kMaxSummaryLength);
- internal_snprintf(summary.data(), summary.size(),
- "%zu byte(s) leaked in %zu allocation(s).", bytes,
- allocations);
+ InternalScopedString summary(kMaxSummaryLength);
+ summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
+ allocations);
ReportErrorSummary(summary.data());
}
-uptr LeakReport::ApplySuppressions() {
- uptr unsuppressed_count = 0;
+void LeakReport::ApplySuppressions() {
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
- s->hit_count += leaks_[i].hit_count;
+ atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
+ leaks_[i].hit_count);
leaks_[i].is_suppressed = true;
- } else {
- unsuppressed_count++;
}
}
- return unsuppressed_count;
}
-} // namespace __lsan
-#endif // CAN_SANITIZE_LEAKS
+
+uptr LeakReport::UnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed) result++;
+ return result;
+}
+
+} // namespace __lsan
+#endif // CAN_SANITIZE_LEAKS
using namespace __lsan; // NOLINT
@@ -545,14 +629,52 @@ void __lsan_ignore_object(const void *p) {
// locked.
BlockingMutexLock l(&global_mutex);
IgnoreObjectResult res = IgnoreObjectLocked(p);
- if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): no heap object found at %p", p);
- if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
- Report("__lsan_ignore_object(): "
+ if (res == kIgnoreObjectInvalid)
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
+ if (res == kIgnoreObjectAlreadyIgnored)
+ VReport(1, "__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p);
- if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
- Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
-#endif // CAN_SANITIZE_LEAKS
+ if (res == kIgnoreObjectSuccess)
+ VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ RootRegion region = {begin, size};
+ root_regions->push_back(region);
+ VReport(1, "Registered root region at %p of size %llu\n", begin, size);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ bool removed = false;
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ if (region.begin == begin && region.size == size) {
+ removed = true;
+ uptr last_index = root_regions->size() - 1;
+ (*root_regions)[i] = (*root_regions)[last_index];
+ root_regions->pop_back();
+ VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
+ break;
+ }
+ }
+ if (!removed) {
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %llu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+ }
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -578,7 +700,16 @@ void __lsan_do_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
__lsan::DoLeakCheck();
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __lsan_do_recoverable_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ return __lsan::DoRecoverableLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+ return 0;
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
@@ -587,4 +718,4 @@ int __lsan_is_turned_off() {
return 0;
}
#endif
-} // extern "C"
+} // extern "C"
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index 5d9b4eb62e1..b415567e7f3 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -17,14 +17,20 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if SANITIZER_LINUX && defined(__x86_64__)
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID) && (SANITIZER_WORDSIZE == 64) \
+ && (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
#endif
+namespace __sanitizer {
+class FlagParser;
+}
+
namespace __lsan {
// Chunk tags.
@@ -36,47 +42,22 @@ enum ChunkTag {
};
struct Flags {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+
+ void SetDefaults();
uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr);
}
-
- // Print addresses of leaked objects after main leak report.
- bool report_objects;
- // Aggregate two objects into one leak if this many stack frames match. If
- // zero, the entire stack trace must match.
- int resolution;
- // The number of leaks reported.
- int max_leaks;
- // If nonzero kill the process with this exit code upon finding leaks.
- int exitcode;
- // Suppressions file name.
- const char* suppressions;
-
- // Flags controlling the root set of reachable memory.
- // Global variables (.data and .bss).
- bool use_globals;
- // Thread stacks.
- bool use_stacks;
- // Thread registers.
- bool use_registers;
- // TLS and thread-specific storage.
- bool use_tls;
-
- // Consider unaligned pointers valid.
- bool use_unaligned;
-
- // User-visible verbosity.
- int verbosity;
-
- // Debug logging.
- bool log_pointers;
- bool log_threads;
};
extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
+void RegisterLsanFlags(FlagParser *parser, Flags *f);
struct Leak {
+ u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
@@ -84,17 +65,31 @@ struct Leak {
bool is_suppressed;
};
+struct LeakedObject {
+ u32 leak_id;
+ uptr addr;
+ uptr size;
+};
+
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
- LeakReport() : leaks_(1) {}
- void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
- void PrintLargest(uptr max_leaks);
+ LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
+ void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
+ ChunkTag tag);
+ void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
- bool IsEmpty() { return leaks_.size() == 0; }
- uptr ApplySuppressions();
+ void ApplySuppressions();
+ uptr UnsuppressedLeakCount();
+
+
private:
+ void PrintReportForLeak(uptr index);
+ void PrintLeakedObjectsForLeak(uptr index);
+
+ u32 next_id_;
InternalMmapVector<Leak> leaks_;
+ InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
@@ -103,6 +98,8 @@ typedef InternalMmapVector<uptr> Frontier;
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
+// Run stoptheworld while holding any platform-specific locks.
+void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
@@ -119,6 +116,15 @@ void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate one word for the array size (0) and store a pointer
+// to the end of allocated chunk.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
@@ -127,6 +133,8 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc
index 80d2459a9ad..0456dce890a 100644
--- a/libsanitizer/lsan/lsan_common_linux.cc
+++ b/libsanitizer/lsan/lsan_common_linux.cc
@@ -17,6 +17,7 @@
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -26,7 +27,7 @@ static const char kLinkerName[] = "ld";
// We request 2 modules matching "ld", so we can print a warning if there's more
// than one match. But only the first one is actually used.
static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
-static LoadedModule *linker = 0;
+static LoadedModule *linker = nullptr;
static bool IsLinker(const char* full_name) {
return LibraryNameIs(full_name, kLinkerName);
@@ -41,12 +42,12 @@ void InitializePlatformSpecificModules() {
return;
}
if (num_matches == 0)
- Report("LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
+ VReport(1, "LeakSanitizer: Dynamic linker not found. "
+ "TLS will not be handled correctly.\n");
else if (num_matches > 1)
- Report("LeakSanitizer: Multiple modules match \"%s\". "
- "TLS will not be handled correctly.\n", kLinkerName);
- linker = 0;
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS will not be handled correctly.\n", kLinkerName);
+ linker = nullptr;
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
@@ -81,20 +82,16 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
- // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
- // deadlocking by running this under StopTheWorld. However, the lock is
- // reentrant, so we should be able to fix this by acquiring the lock before
- // suspending threads.
+ if (!flags()->use_globals) return;
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
CHECK(stack_id);
- uptr size = 0;
- const uptr *trace = map->Get(stack_id, &size);
+ StackTrace stack = map->Get(stack_id);
// The top frame is our malloc/calloc/etc. The next frame is the caller.
- if (size >= 2)
- return trace[1];
+ if (stack.size >= 2)
+ return stack.trace[1];
return 0;
}
@@ -111,7 +108,7 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
- if (m.allocated() && m.tag() != kReachable) {
+ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
u32 stack_id = m.stack_trace_id();
uptr caller_pc = 0;
if (stack_id > 0)
@@ -127,6 +124,21 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
@@ -135,5 +147,31 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
-} // namespace __lsan
-#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
+struct DoStopTheWorldParam {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+ StopTheWorld(param->callback, param->argument);
+ return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ DoStopTheWorldParam param = {callback, argument};
+ dl_iterate_phdr(DoStopTheWorldCallback, &param);
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
diff --git a/libsanitizer/lsan/lsan_flags.inc b/libsanitizer/lsan/lsan_flags.inc
new file mode 100644
index 00000000000..73a980e1724
--- /dev/null
+++ b/libsanitizer/lsan/lsan_flags.inc
@@ -0,0 +1,41 @@
+//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LSAN_FLAG
+# error "Define LSAN_FLAG prior to including this file!"
+#endif
+
+// LSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+LSAN_FLAG(bool, report_objects, false,
+ "Print addresses of leaked objects after main leak report.")
+LSAN_FLAG(
+ int, resolution, 0,
+ "Aggregate two objects into one leak if this many stack frames match. If "
+ "zero, the entire stack trace must match.")
+LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
+
+// Flags controlling the root set of reachable memory.
+LSAN_FLAG(bool, use_globals, true,
+ "Root set: include global variables (.data and .bss)")
+LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
+LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
+LSAN_FLAG(bool, use_tls, true,
+ "Root set: include TLS and thread-specific storage")
+LSAN_FLAG(bool, use_root_regions, true,
+ "Root set: include regions added via __lsan_register_root_region().")
+
+LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
+LSAN_FLAG(bool, use_poisoned, false,
+ "Consider pointers found in poisoned memory to be valid.")
+LSAN_FLAG(bool, log_pointers, false, "Debug logging")
+LSAN_FLAG(bool, log_threads, false, "Debug logging")
+LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc
index 1940902ef83..57581e855c4 100644
--- a/libsanitizer/lsan/lsan_interceptors.cc
+++ b/libsanitizer/lsan/lsan_interceptors.cc
@@ -32,21 +32,6 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
-#define GET_STACK_TRACE \
- StackTrace stack; \
- { \
- uptr stack_top = 0, stack_bottom = 0; \
- ThreadContext *t; \
- bool fast = common_flags()->fast_unwind_on_malloc; \
- if (fast && (t = CurrentThreadContext())) { \
- stack_top = t->stack_end(); \
- stack_bottom = t->stack_begin(); \
- } \
- stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
- StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
- }
-
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
@@ -63,7 +48,7 @@ namespace std {
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, 1, kAlwaysClearMemory);
}
@@ -84,28 +69,34 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
CHECK(allocated < kCallocPoolSize);
return mem;
}
- if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
+ if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
size *= nmemb;
return Allocate(stack, size, 1, true);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
return Reallocate(stack, q, size, 1);
}
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
+ return Allocate(stack, size, alignment, kAlwaysClearMemory);
+}
+
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
@@ -113,7 +104,7 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
@@ -140,7 +131,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
@@ -150,11 +141,11 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
-INTERCEPTOR(void, cfree, void *p) ALIAS("free");
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
#define OPERATOR_NEW_BODY \
ENSURE_LSAN_INITED; \
- GET_STACK_TRACE; \
+ GET_STACK_TRACE_MALLOC; \
return Allocate(stack, size, 1, kAlwaysClearMemory);
INTERCEPTOR_ATTRIBUTE
@@ -171,9 +162,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
-void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
@@ -183,7 +174,8 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
-INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
+INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
+ ALIAS(WRAPPER_NAME(memalign));
///// Thread initialization and finalization. /////
@@ -214,16 +206,16 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
if (pthread_setspecific(g_thread_finalize_key,
- (void*)kPthreadDestructorIterations)) {
+ (void*)GetPthreadDestructorIterations())) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
- atomic_store(&p->tid, 0, memory_order_release);
SetCurrentThread(tid);
ThreadStart(tid, GetTid());
+ atomic_store(&p->tid, 0, memory_order_release);
return callback(param);
}
@@ -232,11 +224,11 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect();
__sanitizer_pthread_attr_t myattr;
- if (attr == 0) {
+ if (!attr) {
pthread_attr_init(&myattr);
attr = &myattr;
}
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
@@ -290,4 +282,4 @@ void InitializeInterceptors() {
}
}
-} // namespace __lsan
+} // namespace __lsan
diff --git a/libsanitizer/lsan/lsan_preinit.cc b/libsanitizer/lsan/lsan_preinit.cc
index 856f9f78787..d1efd31406b 100644
--- a/libsanitizer/lsan/lsan_preinit.cc
+++ b/libsanitizer/lsan/lsan_preinit.cc
@@ -12,11 +12,7 @@
#include "lsan.h"
-#ifndef LSAN_USE_PREINIT_ARRAY
-#define LSAN_USE_PREINIT_ARRAY 1
-#endif
-
-#if LSAN_USE_PREINIT_ARRAY && !defined(PIC)
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
// We force __lsan_init to be called before anyone else by placing it into
// .preinit_array section.
__attribute__((section(".preinit_array"), used))
diff --git a/libsanitizer/lsan/lsan_thread.cc b/libsanitizer/lsan/lsan_thread.cc
index 07f9d0ab439..1313ea2dce1 100644
--- a/libsanitizer/lsan/lsan_thread.cc
+++ b/libsanitizer/lsan/lsan_thread.cc
@@ -77,7 +77,7 @@ void ThreadContext::OnFinished() {
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
return thread_registry->CreateThread(user_id, detached, parent_tid,
- /* arg */ 0);
+ /* arg */ nullptr);
}
void ThreadStart(u32 tid, uptr os_id) {
@@ -97,9 +97,9 @@ void ThreadFinish() {
}
ThreadContext *CurrentThreadContext() {
- if (!thread_registry) return 0;
+ if (!thread_registry) return nullptr;
if (GetCurrentThread() == kInvalidTid)
- return 0;
+ return nullptr;
// No lock needed when getting current thread.
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
}
@@ -118,7 +118,7 @@ u32 ThreadTid(uptr uid) {
void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid);
- thread_registry->JoinThread(tid, /* arg */0);
+ thread_registry->JoinThread(tid, /* arg */nullptr);
}
void EnsureMainThreadIDIsCorrect() {
@@ -155,4 +155,4 @@ void UnlockThreadRegistry() {
thread_registry->Unlock();
}
-} // namespace __lsan
+} // namespace __lsan
diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h
index cd13fdb5c52..70c3ff92616 100644
--- a/libsanitizer/lsan/lsan_thread.h
+++ b/libsanitizer/lsan/lsan_thread.h
@@ -20,8 +20,8 @@ namespace __lsan {
class ThreadContext : public ThreadContextBase {
public:
explicit ThreadContext(int tid);
- void OnStarted(void *arg);
- void OnFinished();
+ void OnStarted(void *arg) override;
+ void OnFinished() override;
uptr stack_begin() { return stack_begin_; }
uptr stack_end() { return stack_end_; }
uptr tls_begin() { return tls_begin_; }
diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am
index 8e9038dc8cf..ee7a3f1dd2c 100644
--- a/libsanitizer/sanitizer_common/Makefile.am
+++ b/libsanitizer/sanitizer_common/Makefile.am
@@ -3,9 +3,10 @@ AM_CPPFLAGS = -I $(top_srcdir)/include -isystem $(top_srcdir)/include/system
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS @RPC_DEFS@
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
if LIBBACKTRACE_SUPPORTED
AM_CXXFLAGS += -DSANITIZER_LIBBACKTRACE -DSANITIZER_CP_DEMANGLE \
-I $(top_srcdir)/../libbacktrace \
@@ -21,31 +22,45 @@ sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage.cc \
+ sanitizer_coverage_libcdep.cc \
+ sanitizer_coverage_mapping_libcdep.cc \
+ sanitizer_deadlock_detector1.cc \
+ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
+ sanitizer_flag_parser.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
+ sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
- sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
+ sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_common.cc \
+ sanitizer_procmaps_freebsd.cc \
+ sanitizer_procmaps_linux.cc \
+ sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
+ sanitizer_symbolizer_mac.cc \
+ sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
- sanitizer_symbolizer_posix_libcdep.cc \
- sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
+ sanitizer_symbolizer_posix_libcdep.cc \
+ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
+ sanitizer_tls_get_addr.cc \
+ sanitizer_unwind_linux_libcdep.cc \
sanitizer_win.cc
+
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in
index e9fd115e976..36294e0e6a3 100644
--- a/libsanitizer/sanitizer_common/Makefile.in
+++ b/libsanitizer/sanitizer_common/Makefile.in
@@ -64,19 +64,29 @@ CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
- sanitizer_common_libcdep.lo sanitizer_coverage.lo \
- sanitizer_flags.lo sanitizer_libc.lo sanitizer_libignore.lo \
- sanitizer_linux.lo sanitizer_linux_libcdep.lo sanitizer_mac.lo \
+ sanitizer_common_libcdep.lo sanitizer_coverage_libcdep.lo \
+ sanitizer_coverage_mapping_libcdep.lo \
+ sanitizer_deadlock_detector1.lo \
+ sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
+ sanitizer_flag_parser.lo sanitizer_libc.lo \
+ sanitizer_libignore.lo sanitizer_linux.lo \
+ sanitizer_linux_libcdep.lo sanitizer_mac.lo \
+ sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_linux.lo \
- sanitizer_platform_limits_posix.lo sanitizer_posix_libcdep.lo \
- sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
- sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
+ sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
+ sanitizer_posix_libcdep.lo sanitizer_printf.lo \
+ sanitizer_procmaps_common.lo sanitizer_procmaps_freebsd.lo \
+ sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
+ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
+ sanitizer_stacktrace_libcdep.lo sanitizer_symbolizer_mac.lo \
+ sanitizer_stacktrace_printer.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
- sanitizer_suppressions.lo \
- sanitizer_symbolizer_posix_libcdep.lo \
- sanitizer_symbolizer_win.lo sanitizer_symbolizer.lo \
+ sanitizer_suppressions.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \
- sanitizer_symbolizer_libcdep.lo sanitizer_thread_registry.lo \
+ sanitizer_symbolizer_libcdep.lo \
+ sanitizer_symbolizer_posix_libcdep.lo \
+ sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
+ sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
@@ -120,7 +130,7 @@ CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS @RPC_DEFS@
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
@@ -154,6 +164,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -165,10 +176,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -245,36 +258,49 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
- $(am__append_1)
+ -std=gnu++11 $(am__append_1)
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_common.la
sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage.cc \
+ sanitizer_coverage_libcdep.cc \
+ sanitizer_coverage_mapping_libcdep.cc \
+ sanitizer_deadlock_detector1.cc \
+ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
+ sanitizer_flag_parser.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
+ sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
- sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
+ sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_common.cc \
+ sanitizer_procmaps_freebsd.cc \
+ sanitizer_procmaps_linux.cc \
+ sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
+ sanitizer_symbolizer_mac.cc \
+ sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
- sanitizer_symbolizer_posix_libcdep.cc \
- sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
+ sanitizer_symbolizer_posix_libcdep.cc \
+ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
+ sanitizer_tls_get_addr.cc \
+ sanitizer_unwind_linux_libcdep.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
@@ -373,29 +399,42 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flag_parser.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_freebsd.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_printer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o:
diff --git a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
new file mode 100644
index 00000000000..3bc40ef64f0
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h
@@ -0,0 +1,340 @@
+//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Concurrent uptr->T hashmap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ADDRHASHMAP_H
+#define SANITIZER_ADDRHASHMAP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_allocator_internal.h"
+
+namespace __sanitizer {
+
+// Concurrent uptr->T hashmap.
+// T must be a POD type, kSize is preferably a prime but can be any number.
+// Usage example:
+//
+// typedef AddrHashMap<uptr, 11> Map;
+// Map m;
+// {
+// Map::Handle h(&m, addr);
+// use h.operator->() to access the data
+// if h.created() then the element was just created, and the current thread
+// has exclusive access to it
+// otherwise the current thread has only read access to the data
+// }
+// {
+// Map::Handle h(&m, addr, true);
+// this will remove the data from the map in Handle dtor
+// the current thread has exclusive access to the data
+// if !h.exists() then the element never existed
+// }
+template<typename T, uptr kSize>
+class AddrHashMap {
+ private:
+ struct Cell {
+ atomic_uintptr_t addr;
+ T val;
+ };
+
+ struct AddBucket {
+ uptr cap;
+ uptr size;
+ Cell cells[1]; // variable len
+ };
+
+ static const uptr kBucketSize = 3;
+
+ struct Bucket {
+ RWMutex mtx;
+ atomic_uintptr_t add;
+ Cell cells[kBucketSize];
+ };
+
+ public:
+ AddrHashMap();
+
+ class Handle {
+ public:
+ Handle(AddrHashMap<T, kSize> *map, uptr addr);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
+
+ ~Handle();
+ T *operator->();
+ bool created() const;
+ bool exists() const;
+
+ private:
+ friend AddrHashMap<T, kSize>;
+ AddrHashMap<T, kSize> *map_;
+ Bucket *bucket_;
+ Cell *cell_;
+ uptr addr_;
+ uptr addidx_;
+ bool created_;
+ bool remove_;
+ bool create_;
+ };
+
+ private:
+ friend class Handle;
+ Bucket *table_;
+
+ void acquire(Handle *h);
+ void release(Handle *h);
+ uptr calcHash(uptr addr);
+};
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = false;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove, bool create) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = create;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::~Handle() {
+ map_->release(this);
+}
+
+template <typename T, uptr kSize>
+T *AddrHashMap<T, kSize>::Handle::operator->() {
+ return &cell_->val;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::created() const {
+ return created_;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::exists() const {
+ return cell_ != nullptr;
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::AddrHashMap() {
+ table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) {
+ uptr addr = h->addr_;
+ uptr hash = calcHash(addr);
+ Bucket *b = &table_[hash];
+
+ h->created_ = false;
+ h->addidx_ = -1U;
+ h->bucket_ = b;
+ h->cell_ = nullptr;
+
+ // If we want to remove the element, we need exclusive access to the bucket,
+ // so skip the lock-free phase.
+ if (h->remove_)
+ goto locked;
+
+ retry:
+ // First try to find an existing element w/o read mutex.
+ CHECK(!h->remove_);
+ // Check the embed cells.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 == addr) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Check the add cells with read lock.
+ if (atomic_load(&b->add, memory_order_relaxed)) {
+ b->mtx.ReadLock();
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ }
+ b->mtx.ReadUnlock();
+ }
+
+ locked:
+ // Re-check existence under write lock.
+ // Embed cells.
+ b->mtx.Lock();
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+
+ // Add cells.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (add) {
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+ }
+
+ // The element does not exist, no need to create it if we want to remove.
+ if (h->remove_ || !h->create_) {
+ b->mtx.Unlock();
+ return;
+ }
+
+ // Now try to create it under the mutex.
+ h->created_ = true;
+ // See if we have a free embed cell.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == 0) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Store in the add cells.
+ if (!add) {
+ // Allocate a new add array.
+ const uptr kInitSize = 64;
+ add = (AddBucket*)InternalAlloc(kInitSize);
+ internal_memset(add, 0, kInitSize);
+ add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add->size = 0;
+ atomic_store(&b->add, (uptr)add, memory_order_relaxed);
+ }
+ if (add->size == add->cap) {
+ // Grow existing add array.
+ uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
+ uptr newsize = oldsize * 2;
+ AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
+ internal_memset(add1, 0, newsize);
+ add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add1->size = add->size;
+ internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
+ InternalFree(add);
+ atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
+ add = add1;
+ }
+ // Store.
+ uptr i = add->size++;
+ Cell *c = &add->cells[i];
+ CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
+ h->addidx_ = i;
+ h->cell_ = c;
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::release(Handle *h) {
+ if (!h->cell_)
+ return;
+ Bucket *b = h->bucket_;
+ Cell *c = h->cell_;
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (h->created_) {
+ // Denote completion of insertion.
+ CHECK_EQ(addr1, 0);
+ // After the following store, the element becomes available
+ // for lock-free reads.
+ atomic_store(&c->addr, h->addr_, memory_order_release);
+ b->mtx.Unlock();
+ } else if (h->remove_) {
+ // Denote that the cell is empty now.
+ CHECK_EQ(addr1, h->addr_);
+ atomic_store(&c->addr, 0, memory_order_release);
+ // See if we need to compact the bucket.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (h->addidx_ == -1U) {
+ // Removed from embed array, move an add element into the freed cell.
+ if (add && add->size != 0) {
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ c->val = c1->val;
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+ atomic_store(&c->addr, addr1, memory_order_release);
+ atomic_store(&c1->addr, 0, memory_order_release);
+ }
+ } else {
+ // Removed from add array, compact it.
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ if (c != c1) {
+ *c = *c1;
+ atomic_store(&c1->addr, 0, memory_order_relaxed);
+ }
+ }
+ if (add && add->size == 0) {
+ // FIXME(dvyukov): free add?
+ }
+ b->mtx.Unlock();
+ } else {
+ CHECK_EQ(addr1, h->addr_);
+ if (h->addidx_ != -1U)
+ b->mtx.ReadUnlock();
+ }
+}
+
+template<typename T, uptr kSize>
+uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
+ addr += addr << 10;
+ addr ^= addr >> 6;
+ return addr % kSize;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ADDRHASHMAP_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
index 9d38e9429ac..b5e0bab91f1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
@@ -9,15 +9,15 @@
// run-time libraries.
// This allocator is used inside run-times.
//===----------------------------------------------------------------------===//
+
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
-#include "sanitizer_flags.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO)
+#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
extern "C" void __libc_free(void *ptr);
@@ -43,7 +43,7 @@ InternalAllocator *internal_allocator() {
return 0;
}
-#else // SANITIZER_GO
+#else // SANITIZER_GO
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@@ -59,7 +59,7 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init();
+ internal_allocator_instance->Init(/* may_return_null*/ false);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@@ -76,29 +76,29 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
- if (cache == 0) {
+ if (!cache) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
}
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // SANITIZER_GO
+#endif // SANITIZER_GO
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
if (size + sizeof(u64) < size)
- return 0;
+ return nullptr;
void *p = RawInternalAlloc(size + sizeof(u64), cache);
- if (p == 0)
- return 0;
+ if (!p)
+ return nullptr;
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
void InternalFree(void *addr, InternalAllocatorCache *cache) {
- if (addr == 0)
+ if (!addr)
return;
addr = (char*)addr - sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
@@ -115,7 +115,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = Max(size, GetPageSizeCached());
allocated_current_ =
- (char*)MmapOrDie(size_to_allocate, __FUNCTION__);
+ (char*)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
low_level_alloc_callback((uptr)allocated_current_,
@@ -138,14 +138,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
-void *AllocatorReturnNull() {
- if (common_flags()->allocator_may_return_null)
- return 0;
+void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0);
- return 0;
+ Die();
}
-} // namespace __sanitizer
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h
index 8ba825f14ec..d98528c722c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -21,8 +21,8 @@
namespace __sanitizer {
-// Depending on allocator_may_return_null either return 0 or crash.
-void *AllocatorReturnNull();
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull();
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
@@ -196,14 +196,12 @@ template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
// Memory allocator statistics
enum AllocatorStat {
- AllocatorStatMalloced,
- AllocatorStatFreed,
- AllocatorStatMmapped,
- AllocatorStatUnmapped,
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
AllocatorStatCount
};
-typedef u64 AllocatorStatCounters[AllocatorStatCount];
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
@@ -211,17 +209,23 @@ class AllocatorStats {
void Init() {
internal_memset(this, 0, sizeof(*this));
}
+ void InitLinkerInitialized() {}
- void Add(AllocatorStat i, u64 v) {
+ void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- void Set(AllocatorStat i, u64 v) {
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- u64 Get(AllocatorStat i) const {
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
return atomic_load(&stats_[i], memory_order_relaxed);
}
@@ -229,17 +233,20 @@ class AllocatorStats {
friend class AllocatorGlobalStats;
AllocatorStats *next_;
AllocatorStats *prev_;
- atomic_uint64_t stats_[AllocatorStatCount];
+ atomic_uintptr_t stats_[AllocatorStatCount];
};
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
+ void InitLinkerInitialized() {
next_ = this;
prev_ = this;
}
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
@@ -258,7 +265,7 @@ class AllocatorGlobalStats : public AllocatorStats {
}
void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
for (;;) {
@@ -268,6 +275,9 @@ class AllocatorGlobalStats : public AllocatorStats {
if (stats == this)
break;
}
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
}
private:
@@ -311,7 +321,7 @@ class SizeClassAllocator64 {
void Init() {
CHECK_EQ(kSpaceBeg,
- reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
+ reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize)));
MapWithCallback(kSpaceEnd, AdditionalSize());
}
@@ -335,7 +345,7 @@ class SizeClassAllocator64 {
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id);
Batch *b = region->free_list.Pop();
- if (b == 0)
+ if (!b)
b = PopulateFreeList(stat, c, class_id, region);
region->n_allocated += b->count;
return b;
@@ -359,16 +369,16 @@ class SizeClassAllocator64 {
void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
- if (!size) return 0;
+ if (!size) return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return 0;
+ if (class_id >= kNumClasses) return nullptr;
RegionInfo *region = GetRegionInfo(class_id);
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
- return 0;
+ return nullptr;
}
static uptr GetActuallyAllocatedSize(void *p) {
@@ -453,6 +463,11 @@ class SizeClassAllocator64 {
}
}
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
@@ -482,11 +497,6 @@ class SizeClassAllocator64 {
};
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
RegionInfo *GetRegionInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
@@ -520,7 +530,7 @@ class SizeClassAllocator64 {
map_size += kUserMapSize;
CHECK_GE(region->mapped_user + map_size, end_idx);
MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
region->mapped_user += map_size;
}
uptr total_count = (region->mapped_user - beg_idx - size)
@@ -597,6 +607,7 @@ class TwoLevelByteMap {
internal_memset(map1_, 0, sizeof(map1_));
mu_.Init();
}
+
void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) {
u8 *p = Get(i);
@@ -810,6 +821,10 @@ class SizeClassAllocator32 {
void PrintStats() {
}
+ static uptr AdditionalSize() {
+ return 0;
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
@@ -839,7 +854,7 @@ class SizeClassAllocator32 {
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
"SizeClassAllocator32"));
MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMmapped, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
@@ -856,9 +871,9 @@ class SizeClassAllocator32 {
uptr reg = AllocateRegion(stat, class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = SizeClassMap::MaxCached(class_id);
- Batch *b = 0;
+ Batch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
- if (b == 0) {
+ if (!b) {
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
else
@@ -869,7 +884,7 @@ class SizeClassAllocator32 {
if (b->count == max_count) {
CHECK_GT(b->count, 0);
sci->free_list.push_back(b);
- b = 0;
+ b = nullptr;
}
}
if (b) {
@@ -905,7 +920,7 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
+ stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@@ -920,7 +935,7 @@ struct SizeClassAllocatorLocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
+ stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
@@ -994,9 +1009,14 @@ struct SizeClassAllocatorLocalCache {
template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
+ void InitLinkerInitialized(bool may_return_null) {
page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void Init(bool may_return_null) {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized(may_return_null);
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@@ -1004,15 +1024,20 @@ class LargeMmapAllocator {
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
map_size += alignment;
- if (map_size < size) return AllocatorReturnNull(); // Overflow.
+ // Overflow.
+ if (map_size < size)
+ return ReturnNullOrDie();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
+ CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1));
- CHECK_EQ(0, res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
CHECK_LE(res + size, map_end);
Header *h = GetHeader(res);
h->size = size;
@@ -1031,12 +1056,22 @@ class LargeMmapAllocator {
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatMalloced, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
}
return reinterpret_cast<void*>(res);
}
+ void *ReturnNullOrDie() {
+ if (atomic_load(&may_return_null_, memory_order_acquire))
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
@@ -1050,8 +1085,8 @@ class LargeMmapAllocator {
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
- stat->Add(AllocatorStatFreed, h->map_size);
- stat->Add(AllocatorStatUnmapped, h->map_size);
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
@@ -1069,7 +1104,7 @@ class LargeMmapAllocator {
}
bool PointerIsMine(const void *p) {
- return GetBlockBegin(p) != 0;
+ return GetBlockBegin(p) != nullptr;
}
uptr GetActuallyAllocatedSize(void *p) {
@@ -1098,13 +1133,13 @@ class LargeMmapAllocator {
nearest_chunk = ch;
}
if (!nearest_chunk)
- return 0;
+ return nullptr;
Header *h = reinterpret_cast<Header *>(nearest_chunk);
CHECK_GE(nearest_chunk, h->map_beg);
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
CHECK_LE(nearest_chunk, p);
if (h->map_beg + h->map_size <= p)
- return 0;
+ return nullptr;
return GetUser(h);
}
@@ -1114,7 +1149,7 @@ class LargeMmapAllocator {
mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
- if (!n) return 0;
+ if (!n) return nullptr;
if (!chunks_sorted_) {
// Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
SortArray(reinterpret_cast<uptr*>(chunks_), n);
@@ -1126,7 +1161,7 @@ class LargeMmapAllocator {
chunks_[n - 1]->map_size;
}
if (p < min_mmap_ || p >= max_mmap_)
- return 0;
+ return nullptr;
uptr beg = 0, end = n - 1;
// This loop is a log(n) lower_bound. It does not check for the exact match
// to avoid expensive cache-thrashing loads.
@@ -1147,7 +1182,7 @@ class LargeMmapAllocator {
Header *h = chunks_[beg];
if (h->map_beg + h->map_size <= p || p < h->map_beg)
- return 0;
+ return nullptr;
return GetUser(h);
}
@@ -1215,6 +1250,7 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
+ atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
@@ -1228,19 +1264,32 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void Init() {
+ void InitCommon(bool may_return_null) {
primary_.Init();
- secondary_.Init();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void InitLinkerInitialized(bool may_return_null) {
+ secondary_.InitLinkerInitialized(may_return_null);
+ stats_.InitLinkerInitialized();
+ InitCommon(may_return_null);
+ }
+
+ void Init(bool may_return_null) {
+ secondary_.Init(may_return_null);
stats_.Init();
+ InitCommon(may_return_null);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false) {
+ bool cleared = false, bool check_rss_limit = false) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
if (size + alignment < size)
- return AllocatorReturnNull();
+ return ReturnNullOrDie();
+ if (check_rss_limit && RssLimitIsExceeded())
+ return ReturnNullOrDie();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
@@ -1256,6 +1305,30 @@ class CombinedAllocator {
return res;
}
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDie() {
+ if (MayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ bool RssLimitIsExceeded() {
+ return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+ }
+
+ void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+ atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+ memory_order_release);
+ }
+
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
@@ -1270,7 +1343,7 @@ class CombinedAllocator {
return Allocate(cache, new_size, alignment);
if (!new_size) {
Deallocate(cache, p);
- return 0;
+ return nullptr;
}
CHECK(PointerIsMine(p));
uptr old_size = GetActuallyAllocatedSize(p);
@@ -1368,11 +1441,13 @@ class CombinedAllocator {
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
+ atomic_uint8_t rss_limit_is_exceeded_;
};
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // SANITIZER_ALLOCATOR_H
+#endif // SANITIZER_ALLOCATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h b/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h
new file mode 100644
index 00000000000..fd5bed30c4b
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h
@@ -0,0 +1,36 @@
+//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Re-declaration of functions from public sanitizer allocator interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include "sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ /* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ /* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+} // extern "C"
+
+#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
index efdb89e3682..99d8516d8cf 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
@@ -1,4 +1,4 @@
-//===-- sanitizer_allocator_internal.h -------------------------- C++ -----===//
+//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@@ -22,45 +22,40 @@ namespace __sanitizer {
typedef CompactSizeClassMap InternalSizeClassMap;
static const uptr kInternalAllocatorSpace = 0;
-#if SANITIZER_WORDSIZE == 32
-static const u64 kInternalAllocatorSize = (1ULL << 32);
+static const u64 kInternalAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kInternalAllocatorRegionSizeLog = 20;
+#if SANITIZER_WORDSIZE == 32
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
-static const u64 kInternalAllocatorSize = (1ULL << 47);
-static const uptr kInternalAllocatorRegionSizeLog = 24;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap;
#endif
typedef SizeClassAllocator32<
- kInternalAllocatorSpace, kInternalAllocatorSize, 16, InternalSizeClassMap,
+ kInternalAllocatorSpace, kInternalAllocatorSize, 0, InternalSizeClassMap,
kInternalAllocatorRegionSizeLog, ByteMap> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
-// We don't want our internal allocator to do any map/unmap operations from
-// LargeMmapAllocator.
-struct CrashOnMapUnmap {
- void OnMap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");
- }
- void OnUnmap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!");
- }
-};
-
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<CrashOnMapUnmap> >
- InternalAllocator;
+ LargeMmapAllocator<> > InternalAllocator;
-void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0);
-void InternalFree(void *p, InternalAllocatorCache *cache = 0);
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr);
+void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
InternalAllocator *internal_allocator();
-} // namespace __sanitizer
+enum InternalAllocEnum {
+ INTERNAL_ALLOC
+};
+
+} // namespace __sanitizer
+
+inline void *operator new(__sanitizer::operator_new_size_type size,
+ InternalAllocEnum) {
+ return InternalAlloc(size);
+}
-#endif // SANITIZER_ALLOCATOR_INTERNAL_H
+#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_asm.h b/libsanitizer/sanitizer_common/sanitizer_asm.h
index c632f360295..985f59c23c2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_asm.h
+++ b/libsanitizer/sanitizer_common/sanitizer_asm.h
@@ -21,8 +21,11 @@
# define CFI_STARTPROC .cfi_startproc
# define CFI_ENDPROC .cfi_endproc
# define CFI_ADJUST_CFA_OFFSET(n) .cfi_adjust_cfa_offset n
+# define CFI_DEF_CFA_OFFSET(n) .cfi_def_cfa_offset n
# define CFI_REL_OFFSET(reg, n) .cfi_rel_offset reg, n
+# define CFI_OFFSET(reg, n) .cfi_offset reg, n
# define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
+# define CFI_DEF_CFA(reg, n) .cfi_def_cfa reg, n
# define CFI_RESTORE(reg) .cfi_restore reg
#else // No CFI
@@ -30,7 +33,10 @@
# define CFI_STARTPROC
# define CFI_ENDPROC
# define CFI_ADJUST_CFA_OFFSET(n)
+# define CFI_DEF_CFA_OFFSET(n)
# define CFI_REL_OFFSET(reg, n)
+# define CFI_OFFSET(reg, n)
# define CFI_DEF_CFA_REGISTER(reg)
+# define CFI_DEF_CFA(reg, n)
# define CFI_RESTORE(reg)
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic.h b/libsanitizer/sanitizer_common/sanitizer_atomic.h
index f2bf23588a4..4973b7d4e88 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic.h
@@ -42,7 +42,8 @@ struct atomic_uint32_t {
struct atomic_uint64_t {
typedef u64 Type;
- volatile Type val_dont_use;
+ // On 32-bit platforms u64 is not necessary aligned on 8 bytes.
+ volatile ALIGNED(8) Type val_dont_use;
};
struct atomic_uintptr_t {
@@ -52,7 +53,7 @@ struct atomic_uintptr_t {
} // namespace __sanitizer
-#if defined(__GNUC__)
+#if defined(__clang__) || defined(__GNUC__)
# include "sanitizer_atomic_clang.h"
#elif defined(_MSC_VER)
# include "sanitizer_atomic_msvc.h"
@@ -60,4 +61,20 @@ struct atomic_uintptr_t {
# error "Unsupported compiler"
#endif
+namespace __sanitizer {
+
+// Clutter-reducing helpers.
+
+template<typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
+ return atomic_load(a, memory_order_relaxed);
+}
+
+template<typename T>
+INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
+ atomic_store(a, v, memory_order_relaxed);
+}
+
+} // namespace __sanitizer
+
#endif // SANITIZER_ATOMIC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
index 88819e32a73..c600999e67a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
@@ -13,8 +13,26 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
+#if defined(__i386__) || defined(__x86_64__)
+# include "sanitizer_atomic_clang_x86.h"
+#else
+# include "sanitizer_atomic_clang_other.h"
+#endif
+
namespace __sanitizer {
+// We would like to just use compiler builtin atomic operations
+// for loads and stores, but they are mostly broken in clang:
+// - they lead to vastly inefficient code generation
+// (http://llvm.org/bugs/show_bug.cgi?id=17281)
+// - 64-bit atomic operations are not implemented on x86_32
+// (http://llvm.org/bugs/show_bug.cgi?id=15034)
+// - they are not implemented on ARM
+// error: undefined reference to '__atomic_load_4'
+
+// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+// for mappings of the memory model to different processors.
+
INLINE void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
@@ -23,59 +41,6 @@ INLINE void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
-INLINE void proc_yield(int cnt) {
- __asm__ __volatile__("" ::: "memory");
-#if defined(__i386__) || defined(__x86_64__)
- for (int i = 0; i < cnt; i++)
- __asm__ __volatile__("pause");
-#endif
- __asm__ __volatile__("" ::: "memory");
-}
-
-template<typename T>
-INLINE typename T::Type atomic_load(
- const volatile T *a, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_consume
- | memory_order_acquire | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- typename T::Type v;
- // FIXME:
- // 64-bit atomic operations are not atomic on 32-bit platforms.
- // The implementation lacks necessary memory fences on ARM/PPC.
- // We would like to use compiler builtin atomic operations,
- // but they are mostly broken:
- // - they lead to vastly inefficient code generation
- // (http://llvm.org/bugs/show_bug.cgi?id=17281)
- // - 64-bit atomic operations are not implemented on x86_32
- // (http://llvm.org/bugs/show_bug.cgi?id=15034)
- // - they are not implemented on ARM
- // error: undefined reference to '__atomic_load_4'
- if (mo == memory_order_relaxed) {
- v = a->val_dont_use;
- } else {
- atomic_signal_fence(memory_order_seq_cst);
- v = a->val_dont_use;
- atomic_signal_fence(memory_order_seq_cst);
- }
- return v;
-}
-
-template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
- DCHECK(mo & (memory_order_relaxed | memory_order_release
- | memory_order_seq_cst));
- DCHECK(!((uptr)a % sizeof(*a)));
- if (mo == memory_order_relaxed) {
- a->val_dont_use = v;
- } else {
- atomic_signal_fence(memory_order_seq_cst);
- a->val_dont_use = v;
- atomic_signal_fence(memory_order_seq_cst);
- }
- if (mo == memory_order_seq_cst)
- atomic_thread_fence(memory_order_seq_cst);
-}
-
template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h
new file mode 100644
index 00000000000..c66c0992e1b
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -0,0 +1,95 @@
+//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
+#define SANITIZER_ATOMIC_CLANG_OTHER_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __sync_synchronize();
+ } else { // seq_cst
+ // E.g. on POWER we need a hw fence even before the store.
+ __sync_synchronize();
+ v = a->val_dont_use;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ // Gross, but simple and reliable.
+ // Assume that it is not in read-only memory.
+ v = __sync_fetch_and_add(
+ const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ // Gross, but simple and reliable.
+ typename T::Type cmp = a->val_dont_use;
+ typename T::Type cur;
+ for (;;) {
+ cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
+ if (cmp == v)
+ break;
+ cmp = cur;
+ }
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h
new file mode 100644
index 00000000000..5df210eca79
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang_x86.h
@@ -0,0 +1,114 @@
+//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_X86_H
+#define SANITIZER_ATOMIC_CLANG_X86_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+ for (int i = 0; i < cnt; i++)
+ __asm__ __volatile__("pause");
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ // On x86 loads are implicitly acquire.
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 plain MOV is enough for seq_cst store.
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;" // (ptr could be read-only)
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (v)
+ : "m" (a->val_dont_use)
+ : // mark the FP stack and mmx registers as clobbered
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;"
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (a->val_dont_use)
+ : "m" (v)
+ : // mark the FP stack and mmx registers as clobbered
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
index dac7c19199b..4ac3b90769f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
@@ -19,33 +19,41 @@ extern "C" void _mm_mfence();
#pragma intrinsic(_mm_mfence)
extern "C" void _mm_pause();
#pragma intrinsic(_mm_pause)
+extern "C" char _InterlockedExchange8( // NOLINT
+ char volatile *Addend, char Value); // NOLINT
+#pragma intrinsic(_InterlockedExchange8)
+extern "C" short _InterlockedExchange16( // NOLINT
+ short volatile *Addend, short Value); // NOLINT
+#pragma intrinsic(_InterlockedExchange16)
+extern "C" long _InterlockedExchange( // NOLINT
+ long volatile *Addend, long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchange)
extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
-
-#ifdef _WIN64
+extern "C" short _InterlockedCompareExchange16( // NOLINT
+ short volatile *Destination, // NOLINT
+ short Exchange, short Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange16)
+extern "C"
+long long _InterlockedCompareExchange64( // NOLINT
+ long long volatile *Destination, // NOLINT
+ long long Exchange, long long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange64)
extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand);
#pragma intrinsic(_InterlockedCompareExchangePointer)
-#else
-// There's no _InterlockedCompareExchangePointer intrinsic on x86,
-// so call _InterlockedCompareExchange instead.
extern "C"
long __cdecl _InterlockedCompareExchange( // NOLINT
long volatile *Destination, // NOLINT
long Exchange, long Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange)
-inline static void *_InterlockedCompareExchangePointer(
- void *volatile *Destination,
- void *Exchange, void *Comparand) {
- return reinterpret_cast<void*>(
- _InterlockedCompareExchange(
- reinterpret_cast<long volatile*>(Destination), // NOLINT
- reinterpret_cast<long>(Exchange), // NOLINT
- reinterpret_cast<long>(Comparand))); // NOLINT
-}
+#ifdef _WIN64
+extern "C" long long _InterlockedExchangeAdd64( // NOLINT
+ long long volatile * Addend, long long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd64)
#endif
namespace __sanitizer {
@@ -106,32 +114,63 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(volatile long*)&a->val_dont_use, (long)v); // NOLINT
}
+INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, (long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, (long)v); // NOLINT
+#endif
+}
+
+INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+}
+
+INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, -(long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+#endif
+}
+
INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
- __asm {
- mov eax, a
- mov cl, v
- xchg [eax], cl // NOLINT
- mov v, cl
- }
- return v;
+ return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
}
INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
u16 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
- __asm {
- mov eax, a
- mov cx, v
- xchg [eax], cx // NOLINT
- mov v, cx
- }
- return v;
+ return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
+}
+
+INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
}
+#ifndef _WIN64
+
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp,
u8 xchgv,
@@ -153,6 +192,8 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
return false;
}
+#endif
+
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
@@ -166,6 +207,45 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
+ u16 *cmp,
+ u16 xchg,
+ memory_order mo) {
+ u16 cmpv = *cmp;
+ u16 prev = (u16)_InterlockedCompareExchange16(
+ (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
+ u32 *cmp,
+ u32 xchg,
+ memory_order mo) {
+ u32 cmpv = *cmp;
+ u32 prev = (u32)_InterlockedCompareExchange(
+ (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
+ u64 *cmp,
+ u64 xchg,
+ memory_order mo) {
+ u64 cmpv = *cmp;
+ u64 prev = (u64)_InterlockedCompareExchange64(
+ (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
diff --git a/libsanitizer/sanitizer_common/sanitizer_bitvector.h b/libsanitizer/sanitizer_common/sanitizer_bitvector.h
new file mode 100644
index 00000000000..bb2872facde
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_bitvector.h
@@ -0,0 +1,349 @@
+//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Specializer BitVector implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BITVECTOR_H
+#define SANITIZER_BITVECTOR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Fixed size bit vector based on a single basic integer.
+template <class basic_int_t = uptr>
+class BasicBitVector {
+ public:
+ enum SizeEnum { kSize = sizeof(basic_int_t) * 8 };
+
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() { bits_ = 0; }
+ void setAll() { bits_ = ~(basic_int_t)0; }
+ bool empty() const { return bits_ == 0; }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ |= mask(idx);
+ return bits_ != old;
+ }
+
+ // Returns true if the bit has changed from 1 to 0.
+ bool clearBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ &= ~mask(idx);
+ return bits_ != old;
+ }
+
+ bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
+
+ uptr getAndClearFirstOne() {
+ CHECK(!empty());
+ uptr idx = LeastSignificantSetBitIndex(bits_);
+ clearBit(idx);
+ return idx;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ |= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= ~v.bits_;
+ return bits_ != old;
+ }
+
+ void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const BasicBitVector &v) const {
+ return (bits_ & v.bits_) != 0;
+ }
+
+ // for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
+ bool hasNext() const { return !bv_.empty(); }
+ uptr next() { return bv_.getAndClearFirstOne(); }
+ void clear() { bv_.clear(); }
+ private:
+ BasicBitVector bv_;
+ };
+
+ private:
+ basic_int_t mask(uptr idx) const {
+ CHECK_LT(idx, size());
+ return (basic_int_t)1UL << idx;
+ }
+ basic_int_t bits_;
+};
+
+// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
+// The implementation is optimized for better performance on
+// sparse bit vectors, i.e. the those with few set bits.
+template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
+class TwoLevelBitVector {
+ // This is essentially a 2-level bit vector.
+ // Set bit in the first level BV indicates that there are set bits
+ // in the corresponding BV of the second level.
+ // This structure allows O(kLevel1Size) time for clear() and empty(),
+ // as well fast handling of sparse BVs.
+ public:
+ enum SizeEnum { kSize = BV::kSize * BV::kSize * kLevel1Size };
+ // No CTOR.
+
+ uptr size() const { return kSize; }
+
+ void clear() {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ l1_[i].clear();
+ }
+
+ void setAll() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ l1_[i0].setAll();
+ for (uptr i1 = 0; i1 < BV::kSize; i1++)
+ l2_[i0][i1].setAll();
+ }
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ if (!l1_[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ if (!l1_[i0].getBit(i1)) {
+ l1_[i0].setBit(i1);
+ l2_[i0][i1].clear();
+ }
+ bool res = l2_[i0][i1].setBit(i2);
+ // Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
+ // idx, i0, i1, i2, res);
+ return res;
+ }
+
+ bool clearBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ bool res = false;
+ if (l1_[i0].getBit(i1)) {
+ res = l2_[i0][i1].clearBit(i2);
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ return res;
+ }
+
+ bool getBit(uptr idx) const {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ // Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
+ return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
+ }
+
+ uptr getAndClearFirstOne() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].empty()) continue;
+ uptr i1 = l1_[i0].getAndClearFirstOne();
+ uptr i2 = l2_[i0][i1].getAndClearFirstOne();
+ if (!l2_[i0][i1].empty())
+ l1_[i0].setBit(i1);
+ uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
+ // Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
+ return res;
+ }
+ CHECK(0);
+ return 0;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = v.l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l1_[i0].setBit(i1))
+ l2_[i0][i1].clear();
+ if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].setIntersection(v.l1_[i0]))
+ res = true;
+ if (!l1_[i0].empty()) {
+ BV t = l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ return res;
+ }
+
+ void copyFrom(const TwoLevelBitVector &v) {
+ clear();
+ setUnion(v);
+ }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const TwoLevelBitVector &v) const {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (!v.l1_[i0].getBit(i1)) continue;
+ if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
+ it1_.clear();
+ it2_.clear();
+ }
+
+ bool hasNext() const {
+ if (it1_.hasNext()) return true;
+ for (uptr i = i0_; i < kLevel1Size; i++)
+ if (!bv_.l1_[i].empty()) return true;
+ return false;
+ }
+
+ uptr next() {
+ // Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ if (!it1_.hasNext() && !it2_.hasNext()) {
+ for (; i0_ < kLevel1Size; i0_++) {
+ if (bv_.l1_[i0_].empty()) continue;
+ it1_ = typename BV::Iterator(bv_.l1_[i0_]);
+ // Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ break;
+ }
+ }
+ if (!it2_.hasNext()) {
+ CHECK(it1_.hasNext());
+ i1_ = it1_.next();
+ it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
+ // Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ }
+ CHECK(it2_.hasNext());
+ uptr i2 = it2_.next();
+ uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
+ // Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
+ // it1_.hasNext(), it2_.hasNext(), kSize, res);
+ if (!it1_.hasNext() && !it2_.hasNext())
+ i0_++;
+ return res;
+ }
+
+ private:
+ const TwoLevelBitVector &bv_;
+ uptr i0_, i1_;
+ typename BV::Iterator it1_, it2_;
+ };
+
+ private:
+ void check(uptr idx) const { CHECK_LE(idx, size()); }
+
+ uptr idx0(uptr idx) const {
+ uptr res = idx / (BV::kSize * BV::kSize);
+ CHECK_LE(res, kLevel1Size);
+ return res;
+ }
+
+ uptr idx1(uptr idx) const {
+ uptr res = (idx / BV::kSize) % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ uptr idx2(uptr idx) const {
+ uptr res = idx % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ BV l1_[kLevel1Size];
+ BV l2_[kLevel1Size][BV::kSize];
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BITVECTOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_bvgraph.h b/libsanitizer/sanitizer_common/sanitizer_bvgraph.h
new file mode 100644
index 00000000000..6ef0e81e044
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_bvgraph.h
@@ -0,0 +1,163 @@
+//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// BVGraph -- a directed graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BVGRAPH_H
+#define SANITIZER_BVGRAPH_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bitvector.h"
+
+namespace __sanitizer {
+
+// Directed graph of fixed size implemented as an array of bit vectors.
+// Not thread-safe, all accesses should be protected by an external lock.
+template<class BV>
+class BVGraph {
+ public:
+ enum SizeEnum { kSize = BV::kSize };
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() {
+ for (uptr i = 0; i < size(); i++)
+ v[i].clear();
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < size(); i++)
+ if (!v[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if a new edge was added.
+ bool addEdge(uptr from, uptr to) {
+ check(from, to);
+ return v[from].setBit(to);
+ }
+
+ // Returns true if at least one new edge was added.
+ uptr addEdges(const BV &from, uptr to, uptr added_edges[],
+ uptr max_added_edges) {
+ uptr res = 0;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr node = t1.getAndClearFirstOne();
+ if (v[node].setBit(to))
+ if (res < max_added_edges)
+ added_edges[res++] = node;
+ }
+ return res;
+ }
+
+ // *EXPERIMENTAL*
+ // Returns true if an edge from=>to exist.
+ // This function does not use any global state except for 'this' itself,
+ // and thus can be called from different threads w/o locking.
+ // This would be racy.
+ // FIXME: investigate how much we can prove about this race being "benign".
+ bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }
+
+ // Returns true if the edge from=>to was removed.
+ bool removeEdge(uptr from, uptr to) {
+ return v[from].clearBit(to);
+ }
+
+ // Returns true if at least one edge *=>to was removed.
+ bool removeEdgesTo(const BV &to) {
+ bool res = 0;
+ for (uptr from = 0; from < size(); from++) {
+ if (v[from].setDifference(to))
+ res = true;
+ }
+ return res;
+ }
+
+ // Returns true if at least one edge from=>* was removed.
+ bool removeEdgesFrom(const BV &from) {
+ bool res = false;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr idx = t1.getAndClearFirstOne();
+ if (!v[idx].empty()) {
+ v[idx].clear();
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ void removeEdgesFrom(uptr from) {
+ return v[from].clear();
+ }
+
+ bool hasEdge(uptr from, uptr to) const {
+ check(from, to);
+ return v[from].getBit(to);
+ }
+
+ // Returns true if there is a path from the node 'from'
+ // to any of the nodes in 'targets'.
+ bool isReachable(uptr from, const BV &targets) {
+ BV &to_visit = t1,
+ &visited = t2;
+ to_visit.copyFrom(v[from]);
+ visited.clear();
+ visited.setBit(from);
+ while (!to_visit.empty()) {
+ uptr idx = to_visit.getAndClearFirstOne();
+ if (visited.setBit(idx))
+ to_visit.setUnion(v[idx]);
+ }
+ return targets.intersectsWith(visited);
+ }
+
+ // Finds a path from 'from' to one of the nodes in 'target',
+ // stores up to 'path_size' items of the path into 'path',
+ // returns the path length, or 0 if there is no path of size 'path_size'.
+ uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {
+ if (path_size == 0)
+ return 0;
+ path[0] = from;
+ if (targets.getBit(from))
+ return 1;
+ // The function is recursive, so we don't want to create BV on stack.
+ // Instead of a getAndClearFirstOne loop we use the slower iterator.
+ for (typename BV::Iterator it(v[from]); it.hasNext(); ) {
+ uptr idx = it.next();
+ if (uptr res = findPath(idx, targets, path + 1, path_size - 1))
+ return res + 1;
+ }
+ return 0;
+ }
+
+ // Same as findPath, but finds a shortest path.
+ uptr findShortestPath(uptr from, const BV &targets, uptr *path,
+ uptr path_size) {
+ for (uptr p = 1; p <= path_size; p++)
+ if (findPath(from, targets, path, p) == p)
+ return p;
+ return 0;
+ }
+
+ private:
+ void check(uptr idx1, uptr idx2) const {
+ CHECK_LT(idx1, size());
+ CHECK_LT(idx2, size());
+ }
+ BV v[kSize];
+ // Keep temporary vectors here since we can not create large objects on stack.
+ BV t1, t2;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BVGRAPH_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cc b/libsanitizer/sanitizer_common/sanitizer_common.cc
index 0d93527aa5e..4529e63eba9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common.cc
@@ -10,15 +10,19 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
-#include "sanitizer_stacktrace.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool";
+atomic_uint32_t current_verbosity;
+
uptr GetPageSizeCached() {
static uptr PageSize;
if (!PageSize)
@@ -26,19 +30,71 @@ uptr GetPageSizeCached() {
return PageSize;
}
+StaticSpinMutex report_file_mu;
+ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
+
+void RawWrite(const char *buffer) {
+ report_file.Write(buffer, internal_strlen(buffer));
+}
-// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
-// isn't equal to the current PID, try to obtain file descriptor by opening
-// file "report_path_prefix.<PID>".
-fd_t report_fd = kStderrFd;
+void ReportFile::ReopenIfNecessary() {
+ mu->CheckLocked();
+ if (fd == kStdoutFd || fd == kStderrFd) return;
+
+ uptr pid = internal_getpid();
+ // If in tracer, use the parent's file.
+ if (pid == stoptheworld_tracer_pid)
+ pid = stoptheworld_tracer_ppid;
+ if (fd != kInvalidFd) {
+ // If the report file is already opened by the current process,
+ // do nothing. Otherwise the report file was opened by the parent
+ // process, close it now.
+ if (fd_pid == pid)
+ return;
+ else
+ CloseFile(fd);
+ }
-// Set via __sanitizer_set_report_path.
-bool log_to_file = false;
-char report_path_prefix[sizeof(report_path_prefix)];
+ const char *exe_name = GetProcessName();
+ if (common_flags()->log_exe_name && exe_name) {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
+ exe_name, pid);
+ } else {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
+ }
+ fd = OpenFile(full_path, WrOnly);
+ if (fd == kInvalidFd) {
+ const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
+ WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+ WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
+ Die();
+ }
+ fd_pid = pid;
+}
-// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
-// child thread will be different from |report_fd_pid|.
-uptr report_fd_pid = 0;
+void ReportFile::SetReportPath(const char *path) {
+ if (!path)
+ return;
+ uptr len = internal_strlen(path);
+ if (len > sizeof(path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+ Die();
+ }
+
+ SpinMutexLock l(mu);
+ if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
+ CloseFile(fd);
+ fd = kInvalidFd;
+ if (internal_strcmp(path, "stdout") == 0) {
+ fd = kStdoutFd;
+ } else if (internal_strcmp(path, "stderr") == 0) {
+ fd = kStderrFd;
+ } else {
+ internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+ }
+}
// PID of the tracer task in StopTheWorld. It shares the address space with the
// main process, but has a different PID and thus requires special handling.
@@ -47,20 +103,47 @@ uptr stoptheworld_tracer_pid = 0;
// writing to the same log file.
uptr stoptheworld_tracer_ppid = 0;
-static DieCallbackType DieCallback;
-void SetDieCallback(DieCallbackType callback) {
- DieCallback = callback;
+static const int kMaxNumOfInternalDieCallbacks = 5;
+static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
+
+bool AddDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == nullptr) {
+ InternalDieCallbacks[i] = callback;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool RemoveDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == callback) {
+ internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
+ sizeof(InternalDieCallbacks[0]) *
+ (kMaxNumOfInternalDieCallbacks - i - 1));
+ InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
+ return true;
+ }
+ }
+ return false;
}
-DieCallbackType GetDieCallback() {
- return DieCallback;
+static DieCallbackType UserDieCallback;
+void SetUserDieCallback(DieCallbackType callback) {
+ UserDieCallback = callback;
}
void NORETURN Die() {
- if (DieCallback) {
- DieCallback();
+ if (UserDieCallback)
+ UserDieCallback();
+ for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
+ if (InternalDieCallbacks[i])
+ InternalDieCallbacks[i]();
}
- internal__exit(1);
+ if (common_flags()->abort_on_error)
+ Abort();
+ internal__exit(common_flags()->exitcode);
}
static CheckFailedCallbackType CheckFailedCallback;
@@ -78,37 +161,59 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
Die();
}
-uptr ReadFileToBuffer(const char *file_name, char **buff,
- uptr *buff_size, uptr max_len) {
+void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
+ const char *mmap_type, error_t err) {
+ static int recursion_count;
+ if (recursion_count) {
+ // The Report() and CHECK calls below may call mmap recursively and fail.
+ // If we went into recursion, just die.
+ RawWrite("ERROR: Failed to mmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report("ERROR: %s failed to "
+ "%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ SanitizerToolName, mmap_type, size, size, mem_type, err);
+#ifndef SANITIZER_GO
+ DumpProcessMap();
+#endif
+ UNREACHABLE("unable to mmap");
+}
+
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len, error_t *errno_p) {
uptr PageSize = GetPageSizeCached();
uptr kMinFileLen = PageSize;
- uptr read_len = 0;
- *buff = 0;
+ *buff = nullptr;
*buff_size = 0;
+ *read_len = 0;
// The files we usually open are not seekable, so try different buffer sizes.
for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
- uptr openrv = OpenFile(file_name, /*write*/ false);
- if (internal_iserror(openrv)) return 0;
- fd_t fd = openrv;
+ fd_t fd = OpenFile(file_name, RdOnly, errno_p);
+ if (fd == kInvalidFd) return false;
UnmapOrDie(*buff, *buff_size);
- *buff = (char*)MmapOrDie(size, __FUNCTION__);
+ *buff = (char*)MmapOrDie(size, __func__);
*buff_size = size;
+ *read_len = 0;
// Read up to one page at a time.
- read_len = 0;
bool reached_eof = false;
- while (read_len + PageSize <= size) {
- uptr just_read = internal_read(fd, *buff + read_len, PageSize);
+ while (*read_len + PageSize <= size) {
+ uptr just_read;
+ if (!ReadFromFile(fd, *buff + *read_len, PageSize, &just_read, errno_p)) {
+ UnmapOrDie(*buff, *buff_size);
+ return false;
+ }
if (just_read == 0) {
reached_eof = true;
break;
}
- read_len += just_read;
+ *read_len += just_read;
}
- internal_close(fd);
+ CloseFile(fd);
if (reached_eof) // We've read the whole file.
break;
}
- return read_len;
+ return true;
}
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
@@ -145,141 +250,222 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) {
- if (filepath == 0) return 0;
- if (strip_path_prefix == 0) return filepath;
- const char *pos = internal_strstr(filepath, strip_path_prefix);
- if (pos == 0) return filepath;
- pos += internal_strlen(strip_path_prefix);
- if (pos[0] == '.' && pos[1] == '/')
- pos += 2;
- return pos;
-}
-
-void PrintSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column) {
- CHECK(file);
- buffer->append("%s",
- StripPathPrefix(file, common_flags()->strip_path_prefix));
- if (line > 0) {
- buffer->append(":%d", line);
- if (column > 0)
- buffer->append(":%d", column);
- }
+ if (!filepath) return nullptr;
+ if (!strip_path_prefix) return filepath;
+ const char *res = filepath;
+ if (const char *pos = internal_strstr(filepath, strip_path_prefix))
+ res = pos + internal_strlen(strip_path_prefix);
+ if (res[0] == '.' && res[1] == '/')
+ res += 2;
+ return res;
}
-void PrintModuleAndOffset(InternalScopedString *buffer, const char *module,
- uptr offset) {
- buffer->append("(%s+0x%zx)",
- StripPathPrefix(module, common_flags()->strip_path_prefix),
- offset);
+const char *StripModuleName(const char *module) {
+ if (!module)
+ return nullptr;
+ if (SANITIZER_WINDOWS) {
+ // On Windows, both slash and backslash are possible.
+ // Pick the one that goes last.
+ if (const char *bslash_pos = internal_strrchr(module, '\\'))
+ return StripModuleName(bslash_pos + 1);
+ }
+ if (const char *slash_pos = internal_strrchr(module, '/')) {
+ return slash_pos + 1;
+ }
+ return module;
}
void ReportErrorSummary(const char *error_message) {
if (!common_flags()->print_summary)
return;
- InternalScopedBuffer<char> buff(kMaxSummaryLength);
- internal_snprintf(buff.data(), buff.size(),
- "SUMMARY: %s: %s", SanitizerToolName, error_message);
+ InternalScopedString buff(kMaxSummaryLength);
+ buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
}
-void ReportErrorSummary(const char *error_type, const char *file,
- int line, const char *function) {
+#ifndef SANITIZER_GO
+void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
if (!common_flags()->print_summary)
return;
- InternalScopedBuffer<char> buff(kMaxSummaryLength);
- internal_snprintf(
- buff.data(), buff.size(), "%s %s:%d %s", error_type,
- file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??",
- line, function ? function : "??");
+ InternalScopedString buff(kMaxSummaryLength);
+ buff.append("%s ", error_type);
+ RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data());
}
-
-void ReportErrorSummary(const char *error_type, StackTrace *stack) {
- if (!common_flags()->print_summary)
- return;
- AddressInfo ai;
-#if !SANITIZER_GO
- if (stack->size > 0 && Symbolizer::Get()->IsAvailable()) {
- // Currently, we include the first stack frame into the report summary.
- // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
- uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
- Symbolizer::Get()->SymbolizeCode(pc, &ai, 1);
- }
#endif
- ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
-}
-LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
+void LoadedModule::set(const char *module_name, uptr base_address) {
+ clear();
full_name_ = internal_strdup(module_name);
base_address_ = base_address;
- n_ranges_ = 0;
}
-void LoadedModule::addAddressRange(uptr beg, uptr end) {
- CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
- ranges_[n_ranges_].beg = beg;
- ranges_[n_ranges_].end = end;
- n_ranges_++;
+void LoadedModule::clear() {
+ InternalFree(full_name_);
+ full_name_ = nullptr;
+ while (!ranges_.empty()) {
+ AddressRange *r = ranges_.front();
+ ranges_.pop_front();
+ InternalFree(r);
+ }
+}
+
+void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
+ void *mem = InternalAlloc(sizeof(AddressRange));
+ AddressRange *r = new(mem) AddressRange(beg, end, executable);
+ ranges_.push_back(r);
}
bool LoadedModule::containsAddress(uptr address) const {
- for (uptr i = 0; i < n_ranges_; i++) {
- if (ranges_[i].beg <= address && address < ranges_[i].end)
+ for (Iterator iter = ranges(); iter.hasNext();) {
+ const AddressRange *r = iter.next();
+ if (r->beg <= address && address < r->end)
return true;
}
return false;
}
-char *StripModuleName(const char *module) {
- if (module == 0)
- return 0;
- const char *short_module_name = internal_strrchr(module, '/');
- if (short_module_name)
- short_module_name += 1;
- else
- short_module_name = module;
- return internal_strdup(short_module_name);
+static atomic_uintptr_t g_total_mmaped;
+
+void IncreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ uptr total_mmaped =
+ atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
+ // Since for now mmap_limit_mb is not a user-facing flag, just kill
+ // a program. Use RAW_CHECK to avoid extra mmaps in reporting.
+ RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
}
-} // namespace __sanitizer
+void DecreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
+}
-using namespace __sanitizer; // NOLINT
+bool TemplateMatch(const char *templ, const char *str) {
+ if ((!str) || str[0] == 0)
+ return false;
+ bool start = false;
+ if (templ && templ[0] == '^') {
+ start = true;
+ templ++;
+ }
+ bool asterisk = false;
+ while (templ && templ[0]) {
+ if (templ[0] == '*') {
+ templ++;
+ start = false;
+ asterisk = true;
+ continue;
+ }
+ if (templ[0] == '$')
+ return str[0] == 0 || asterisk;
+ if (str[0] == 0)
+ return false;
+ char *tpos = (char*)internal_strchr(templ, '*');
+ char *tpos1 = (char*)internal_strchr(templ, '$');
+ if ((!tpos) || (tpos1 && tpos1 < tpos))
+ tpos = tpos1;
+ if (tpos)
+ tpos[0] = 0;
+ const char *str0 = str;
+ const char *spos = internal_strstr(str, templ);
+ str = spos + internal_strlen(templ);
+ templ = tpos;
+ if (tpos)
+ tpos[0] = tpos == tpos1 ? '$' : '*';
+ if (!spos)
+ return false;
+ if (start && spos != str0)
+ return false;
+ start = false;
+ asterisk = false;
+ }
+ return true;
+}
-extern "C" {
-void __sanitizer_set_report_path(const char *path) {
+static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
+
+char *FindPathToBinary(const char *name) {
+ const char *path = GetEnv("PATH");
if (!path)
- return;
- uptr len = internal_strlen(path);
- if (len > sizeof(report_path_prefix) - 100) {
- Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
- path[0], path[1], path[2], path[3],
- path[4], path[5], path[6], path[7]);
- Die();
+ return nullptr;
+ uptr name_len = internal_strlen(name);
+ InternalScopedBuffer<char> buffer(kMaxPathLength);
+ const char *beg = path;
+ while (true) {
+ const char *end = internal_strchrnul(beg, kPathSeparator);
+ uptr prefix_len = end - beg;
+ if (prefix_len + name_len + 2 <= kMaxPathLength) {
+ internal_memcpy(buffer.data(), beg, prefix_len);
+ buffer[prefix_len] = '/';
+ internal_memcpy(&buffer[prefix_len + 1], name, name_len);
+ buffer[prefix_len + 1 + name_len] = '\0';
+ if (FileExists(buffer.data()))
+ return internal_strdup(buffer.data());
+ }
+ if (*end == '\0') break;
+ beg = end + 1;
}
- if (report_fd != kStdoutFd &&
- report_fd != kStderrFd &&
- report_fd != kInvalidFd)
- internal_close(report_fd);
- report_fd = kInvalidFd;
- log_to_file = false;
- if (internal_strcmp(path, "stdout") == 0) {
- report_fd = kStdoutFd;
- } else if (internal_strcmp(path, "stderr") == 0) {
- report_fd = kStderrFd;
- } else {
- internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
- report_path_prefix[len] = '\0';
- log_to_file = true;
+ return nullptr;
+}
+
+static char binary_name_cache_str[kMaxPathLength];
+static char process_name_cache_str[kMaxPathLength];
+
+const char *GetProcessName() {
+ return process_name_cache_str;
+}
+
+static uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) {
+ ReadLongProcessName(buf, buf_len);
+ char *s = const_cast<char *>(StripModuleName(buf));
+ uptr len = internal_strlen(s);
+ if (s != buf) {
+ internal_memmove(buf, s, len);
+ buf[len] = '\0';
}
+ return len;
+}
+
+void UpdateProcessName() {
+ ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
}
-void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
- (void)reserved;
- PrepareForSandboxing();
+// Call once to make sure that binary_name_cache_str is initialized
+void CacheBinaryName() {
+ if (binary_name_cache_str[0] != '\0')
+ return;
+ ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str));
+ ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
+}
+
+uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
+ CacheBinaryName();
+ uptr name_len = internal_strlen(binary_name_cache_str);
+ name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1;
+ if (buf_len == 0)
+ return 0;
+ internal_memcpy(buf, binary_name_cache_str, name_len);
+ buf[name_len] = '\0';
+ return name_len;
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+void __sanitizer_set_report_path(const char *path) {
+ report_file.SetReportPath(path);
}
void __sanitizer_report_error_summary(const char *error_summary) {
Printf("%s\n", error_summary);
}
-} // extern "C"
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_death_callback(void (*callback)(void)) {
+ SetUserDieCallback(callback);
+}
+} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 07d1b63db58..6fb2dd882aa 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -5,8 +5,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries.
+// This file is shared between run-time libraries of sanitizers.
+//
// It declares common functions and classes that are used in both runtimes.
// Implementation of some functions are provided in sanitizer_common, while
// others must be defined by run-time library itself.
@@ -14,27 +14,53 @@
#ifndef SANITIZER_COMMON_H
#define SANITIZER_COMMON_H
+#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
+#ifdef _MSC_VER
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+#endif
+
namespace __sanitizer {
struct StackTrace;
+struct AddressInfo;
// Constants.
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
#if defined(__powerpc__) || defined(__powerpc64__)
-const uptr kCacheLineSize = 128;
+ const uptr kCacheLineSize = 128;
#else
-const uptr kCacheLineSize = 64;
+ const uptr kCacheLineSize = 64;
#endif
-const uptr kMaxPathLength = 512;
+const uptr kMaxPathLength = 4096;
+
+// 16K loaded modules should be enough for everyone.
+static const uptr kMaxNumberOfModules = 1 << 14;
+
+const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
+
+// Denotes fake PC values that come from JIT/JAVA/etc.
+// For such PC values __tsan_symbolize_external() will be called.
+const u64 kExternalPCBit = 1ULL << 60;
extern const char *SanitizerToolName; // Can be changed by the tool.
+extern atomic_uint32_t current_verbosity;
+INLINE void SetVerbosity(int verbosity) {
+ atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
+}
+INLINE int Verbosity() {
+ return atomic_load(&current_verbosity, memory_order_relaxed);
+}
+
uptr GetPageSize();
uptr GetPageSizeCached();
uptr GetMmapGranularity();
@@ -50,14 +76,27 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
// Memory management
void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size);
-void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
+ const char *name = nullptr);
+void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
-void *Mprotect(uptr fixed_addr, uptr size);
+void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
// Map aligned chunk of address space; size and alignment are powers of two.
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
+// Disallow access to a memory range. Use MmapNoAccess to allocate an
+// unaccessible memory.
+bool MprotectNoAccess(uptr addr, uptr size);
+
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
void FlushUnneededShadowMemory(uptr addr, uptr size);
+void IncreaseTotalMmap(uptr size);
+void DecreaseTotalMmap(uptr size);
+uptr GetRSS();
+void NoHugePagesInRegion(uptr addr, uptr length);
+void DontDumpShadowMemory(uptr addr, uptr length);
+// Check if the built VMA size matches the runtime one.
+void CheckVMASize();
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
@@ -120,56 +159,136 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
// IO
void RawWrite(const char *buffer);
-bool PrintsToTty();
-// Caching version of PrintsToTty(). Not thread-safe.
-bool PrintsToTtyCached();
+bool ColorizeReports();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
+#define VReport(level, ...) \
+ do { \
+ if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
+ } while (0)
+#define VPrintf(level, ...) \
+ do { \
+ if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
+ } while (0)
// Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex;
-void MaybeOpenReportFile();
-extern fd_t report_fd;
-extern bool log_to_file;
-extern char report_path_prefix[4096];
-extern uptr report_fd_pid;
+
+struct ReportFile {
+ void Write(const char *buffer, uptr length);
+ bool SupportsColors();
+ void SetReportPath(const char *path);
+
+ // Don't use fields directly. They are only declared public to allow
+ // aggregate initialization.
+
+ // Protects fields below.
+ StaticSpinMutex *mu;
+ // Opened file descriptor. Defaults to stderr. It may be equal to
+ // kInvalidFd, in which case new file will be opened when necessary.
+ fd_t fd;
+ // Path prefix of report file, set via __sanitizer_set_report_path.
+ char path_prefix[kMaxPathLength];
+ // Full path to report, obtained as <path_prefix>.PID
+ char full_path[kMaxPathLength];
+ // PID of the process that opened fd. If a fork() occurs,
+ // the PID of child will be different from fd_pid.
+ uptr fd_pid;
+
+ private:
+ void ReopenIfNecessary();
+};
+extern ReportFile report_file;
+
extern uptr stoptheworld_tracer_pid;
extern uptr stoptheworld_tracer_ppid;
-uptr OpenFile(const char *filename, bool write);
+enum FileAccessMode {
+ RdOnly,
+ WrOnly,
+ RdWr
+};
+
+// Returns kInvalidFd on error.
+fd_t OpenFile(const char *filename, FileAccessMode mode,
+ error_t *errno_p = nullptr);
+void CloseFile(fd_t);
+
+// Return true on success, false on error.
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
+ uptr *bytes_read = nullptr, error_t *error_p = nullptr);
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
+ uptr *bytes_written = nullptr, error_t *error_p = nullptr);
+
+bool RenameFile(const char *oldpath, const char *newpath,
+ error_t *error_p = nullptr);
+
+// Scoped file handle closer.
+struct FileCloser {
+ explicit FileCloser(fd_t fd) : fd(fd) {}
+ ~FileCloser() { CloseFile(fd); }
+ fd_t fd;
+};
+
+bool SupportsColoredOutput(fd_t fd);
+
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
-// The size of the mmaped region is stored in '*buff_size',
-// Returns the number of read bytes or 0 if file can not be opened.
-uptr ReadFileToBuffer(const char *file_name, char **buff,
- uptr *buff_size, uptr max_len);
+// The size of the mmaped region is stored in '*buff_size'.
+// The total number of read bytes is stored in '*read_len'.
+// Returns true if file was successfully opened and read.
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len = 1 << 26,
+ error_t *errno_p = nullptr);
// Maps given file to virtual memory, and returns pointer to it
-// (or NULL if the mapping failes). Stores the size of mmaped region
+// (or NULL if mapping fails). Stores the size of mmaped region
// in '*buff_size'.
void *MapFileToMemory(const char *file_name, uptr *buff_size);
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size);
// Error report formatting.
const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix);
-void PrintSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column);
-void PrintModuleAndOffset(InternalScopedString *buffer,
- const char *module, uptr offset);
+// Strip the directories from the module name.
+const char *StripModuleName(const char *module);
// OS
-void DisableCoreDumper();
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
+uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
+const char *GetProcessName();
+void UpdateProcessName();
+void CacheBinaryName();
+void DisableCoreDumperIfNecessary();
void DumpProcessMap();
bool FileExists(const char *filename);
const char *GetEnv(const char *name);
bool SetEnv(const char *name, const char *value);
const char *GetPwd();
char *FindPathToBinary(const char *name);
+bool IsPathSeparator(const char c);
+bool IsAbsolutePath(const char *path);
+
u32 GetUid();
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
-void PrepareForSandboxing();
+bool AddressSpaceIsUnlimited();
+void SetAddressSpaceUnlimited();
+void AdjustStackSize(void *attr);
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void SetSandboxingCallback(void (*f)());
+
+void CoverageUpdateMapping();
+void CovBeforeFork();
+void CovAfterFork(int child_pid);
+
+void InitializeCoverage(bool enabled, const char *coverage_dir);
+void ReInitializeCoverage(bool enabled, const char *coverage_dir);
void InitTlsSize();
uptr GetTlsSize();
@@ -180,15 +299,15 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
-// Strip the directories from the module name, return a new string allocated
-// with internal_strdup.
-char *StripModuleName(const char *module);
+bool TemplateMatch(const char *templ, const char *str);
// Exit
void NORETURN Abort();
void NORETURN Die();
void NORETURN
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
+void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
+ const char *mmap_type, error_t err);
// Set the name of the current thread to 'name', return true on succees.
// The name may be truncated to a system-dependent limit.
@@ -200,12 +319,34 @@ bool SanitizerGetThreadName(char *name, int max_len);
// Specific tools may override behavior of "Die" and "CheckFailed" functions
// to do tool-specific job.
typedef void (*DieCallbackType)(void);
-void SetDieCallback(DieCallbackType);
-DieCallbackType GetDieCallback();
+
+// It's possible to add several callbacks that would be run when "Die" is
+// called. The callbacks will be run in the opposite order. The tools are
+// strongly recommended to setup all callbacks during initialization, when there
+// is only a single thread.
+bool AddDieCallback(DieCallbackType callback);
+bool RemoveDieCallback(DieCallbackType callback);
+
+void SetUserDieCallback(DieCallbackType callback);
+
typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
+// Callback will be called if soft_rss_limit_mb is given and the limit is
+// exceeded (exceeded==true) or if rss went down below the limit
+// (exceeded==false).
+// The callback should be registered once at the tool init time.
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
+
+// Functions related to signal handling.
+typedef void (*SignalHandlerType)(int, void *, void *);
+bool IsDeadlySignal(int signum);
+void InstallDeadlySignalHandlers(SignalHandlerType handler);
+// Alternative signal stack (POSIX-only).
+void SetAlternateSignalStack();
+void UnsetAlternateSignalStack();
+
// We don't want a summary too long.
const int kMaxSummaryLength = 1024;
// Construct a one-line string:
@@ -213,9 +354,9 @@ const int kMaxSummaryLength = 1024;
// and pass it to __sanitizer_report_error_summary.
void ReportErrorSummary(const char *error_message);
// Same as above, but construct error_message as:
-// error_type: file:line function
-void ReportErrorSummary(const char *error_type, const char *file,
- int line, const char *function);
+// error_type file:line[:column][ function]
+void ReportErrorSummary(const char *error_type, const AddressInfo &info);
+// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
void ReportErrorSummary(const char *error_type, StackTrace *trace);
// Math
@@ -234,7 +375,11 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up; // NOLINT
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+# ifdef _WIN64
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
+# else
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
+# endif
#elif defined(_WIN64)
_BitScanReverse64(&up, x);
#else
@@ -243,6 +388,23 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
return up;
}
+INLINE uptr LeastSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up; // NOLINT
+#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+# ifdef _WIN64
+ up = __builtin_ctzll(x);
+# else
+ up = __builtin_ctzl(x);
+# endif
+#elif defined(_WIN64)
+ _BitScanForward64(&up, x);
+#else
+ _BitScanForward(&up, x);
+#endif
+ return up;
+}
+
INLINE bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
@@ -254,7 +416,7 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
uptr up = MostSignificantSetBitIndex(size);
CHECK(size < (1ULL << (up + 1)));
CHECK(size > (1ULL << up));
- return 1UL << (up + 1);
+ return 1ULL << (up + 1);
}
INLINE uptr RoundUpTo(uptr size, uptr boundary) {
@@ -272,17 +434,7 @@ INLINE bool IsAligned(uptr a, uptr alignment) {
INLINE uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
-#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
- return __builtin_ctzl(x);
-#elif defined(_WIN64)
- unsigned long ret; // NOLINT
- _BitScanForward64(&ret, x);
- return ret;
-#else
- unsigned long ret; // NOLINT
- _BitScanForward(&ret, x);
- return ret;
-#endif
+ return LeastSignificantSetBitIndex(x);
}
// Don't use std::min, std::max or std::swap, to minimize dependency
@@ -307,24 +459,18 @@ INLINE int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
-#if SANITIZER_WORDSIZE == 64
-# define FIRST_32_SECOND_64(a, b) (b)
-#else
-# define FIRST_32_SECOND_64(a, b) (a)
-#endif
-
// A low-level vector based on mmap. May incur a significant memory overhead for
// small vectors.
// WARNING: The current implementation supports only POD types.
template<typename T>
-class InternalMmapVector {
+class InternalMmapVectorNoCtor {
public:
- explicit InternalMmapVector(uptr initial_capacity) {
+ void Initialize(uptr initial_capacity) {
capacity_ = Max(initial_capacity, (uptr)1);
size_ = 0;
- data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
+ data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
}
- ~InternalMmapVector() {
+ void Destroy() {
UnmapOrDie(data_, capacity_ * sizeof(T));
}
T &operator[](uptr i) {
@@ -357,11 +503,15 @@ class InternalMmapVector {
const T *data() const {
return data_;
}
+ T *data() {
+ return data_;
+ }
uptr capacity() const {
return capacity_;
}
void clear() { size_ = 0; }
+ bool empty() const { return size() == 0; }
private:
void Resize(uptr new_capacity) {
@@ -375,15 +525,24 @@ class InternalMmapVector {
UnmapOrDie(old_data, capacity_ * sizeof(T));
capacity_ = new_capacity;
}
- // Disallow evil constructors.
- InternalMmapVector(const InternalMmapVector&);
- void operator=(const InternalMmapVector&);
T *data_;
uptr capacity_;
uptr size_;
};
+template<typename T>
+class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
+ public:
+ explicit InternalMmapVector(uptr initial_capacity) {
+ InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
+ }
+ ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
+ // Disallow evil constructors.
+ InternalMmapVector(const InternalMmapVector&);
+ void operator=(const InternalMmapVector&);
+};
+
// HeapSort for arrays and InternalMmapVector.
template<class Container, class Compare>
void InternalSort(Container *v, uptr size, Compare comp) {
@@ -441,23 +600,32 @@ uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
// executable or a shared object).
class LoadedModule {
public:
- LoadedModule(const char *module_name, uptr base_address);
- void addAddressRange(uptr beg, uptr end);
+ LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
+ void set(const char *module_name, uptr base_address);
+ void clear();
+ void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
- private:
struct AddressRange {
+ AddressRange *next;
uptr beg;
uptr end;
+ bool executable;
+
+ AddressRange(uptr beg, uptr end, bool executable)
+ : next(nullptr), beg(beg), end(end), executable(executable) {}
};
- char *full_name_;
+
+ typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
+ Iterator ranges() const { return Iterator(&ranges_); }
+
+ private:
+ char *full_name_; // Owned.
uptr base_address_;
- static const uptr kMaxNumberOfAddressRanges = 6;
- AddressRange ranges_[kMaxNumberOfAddressRanges];
- uptr n_ranges_;
+ IntrusiveList<AddressRange> ranges_;
};
// OS-dependent function that fills array with descriptions of at most
@@ -468,15 +636,80 @@ typedef bool (*string_predicate_t)(const char *);
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter);
-#if SANITIZER_POSIX
-const uptr kPthreadDestructorIterations = 4;
+// Callback type for iterating over a set of memory ranges.
+typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
+
+enum AndroidApiLevel {
+ ANDROID_NOT_ANDROID = 0,
+ ANDROID_KITKAT = 19,
+ ANDROID_LOLLIPOP_MR1 = 22,
+ ANDROID_POST_LOLLIPOP = 23
+};
+
+#if SANITIZER_LINUX
+// Initialize Android logging. Any writes before this are silently lost.
+void AndroidLogInit();
+void WriteToSyslog(const char *buffer);
+#else
+INLINE void AndroidLogInit() {}
+INLINE void WriteToSyslog(const char *buffer) {}
+#endif
+
+#if SANITIZER_ANDROID
+void GetExtraActivationFlags(char *buf, uptr size);
+void SanitizerInitializeUnwinder();
+AndroidApiLevel AndroidGetApiLevel();
+#else
+INLINE void AndroidLogWrite(const char *buffer_unused) {}
+INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
+INLINE void SanitizerInitializeUnwinder() {}
+INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
+#endif
+
+INLINE uptr GetPthreadDestructorIterations() {
+#if SANITIZER_ANDROID
+ return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
+#elif SANITIZER_POSIX
+ return 4;
#else
// Unused on Windows.
-const uptr kPthreadDestructorIterations = 0;
+ return 0;
#endif
+}
+
+void *internal_start_thread(void(*func)(void*), void *arg);
+void internal_join_thread(void *th);
+void MaybeStartBackgroudThread();
+
+// Make the compiler think that something is going on there.
+// Use this inside a loop that looks like memset/memcpy/etc to prevent the
+// compiler from recognising it and turning it into an actual call to
+// memset/memcpy/etc.
+static inline void SanitizerBreakOptimization(void *arg) {
+#if _MSC_VER && !defined(__clang__)
+ _ReadWriteBarrier();
+#else
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+#endif
+}
+
+struct SignalContext {
+ void *context;
+ uptr addr;
+ uptr pc;
+ uptr sp;
+ uptr bp;
+
+ SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
+ context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
+ }
+
+ // Creates signal context in a platform-specific manner.
+ static SignalContext Create(void *siginfo, void *context);
+};
+
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
-// Callback type for iterating over a set of memory ranges.
-typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
@@ -484,4 +717,9 @@ inline void *operator new(__sanitizer::operator_new_size_type size,
return alloc.Allocate(size);
}
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
#endif // SANITIZER_COMMON_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
index e301dc17bd3..8223778cac4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -11,31 +11,66 @@
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_ENTER_NOIGNORE
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_INITIALIZE_RANGE
+// COMMON_INTERCEPTOR_DIR_ACQUIRE
// COMMON_INTERCEPTOR_FD_ACQUIRE
// COMMON_INTERCEPTOR_FD_RELEASE
// COMMON_INTERCEPTOR_FD_ACCESS
// COMMON_INTERCEPTOR_SET_THREAD_NAME
+// COMMON_INTERCEPTOR_ON_DLOPEN
// COMMON_INTERCEPTOR_ON_EXIT
// COMMON_INTERCEPTOR_MUTEX_LOCK
// COMMON_INTERCEPTOR_MUTEX_UNLOCK
// COMMON_INTERCEPTOR_MUTEX_REPAIR
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
//===----------------------------------------------------------------------===//
+
#include "interception/interception.h"
+#include "sanitizer_addrhashmap.h"
+#include "sanitizer_placement_new.h"
#include "sanitizer_platform_interceptors.h"
+#include "sanitizer_tls_get_addr.h"
#include <stdarg.h>
+#if SANITIZER_INTERCEPTOR_HOOKS
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \
+ do { \
+ if (f) \
+ f(__VA_ARGS__); \
+ } while (false);
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
+ extern "C" { \
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__); \
+ } // extern "C"
+#else
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...)
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)
+
+#endif // SANITIZER_INTERCEPTOR_HOOKS
+
#if SANITIZER_WINDOWS && !defined(va_copy)
#define va_copy(dst, src) ((dst) = (src))
#endif // _WIN32
+#if SANITIZER_FREEBSD
+#define pthread_setname_np pthread_set_name_np
+#define inet_aton __inet_aton
+#define inet_pton __inet_pton
+#define iconv __bsd_iconv
+#endif
+
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
-#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, p, size) {}
+#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_UNPOISON_PARAM
+#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) {}
#endif
#ifndef COMMON_INTERCEPTOR_FD_ACCESS
@@ -58,14 +93,111 @@
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
#endif
+#ifndef COMMON_INTERCEPTOR_FILE_OPEN
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FILE_CLOSE
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ENTER_NOIGNORE
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, ...) \
+ COMMON_INTERCEPTOR_ENTER(ctx, __VA_ARGS__)
+#endif
+
+#ifndef COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
+#endif
+
+#define COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n) \
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n) )
+
+#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \
+ COMMON_INTERCEPTOR_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
+
+#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) *begin = *end = 0;
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ACQUIRE
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_RELEASE
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) {}
+#endif
+
+struct FileMetadata {
+ // For open_memstream().
+ char **addr;
+ SIZE_T *size;
+};
+
+struct CommonInterceptorMetadata {
+ enum {
+ CIMT_INVALID = 0,
+ CIMT_FILE
+ } type;
+ union {
+ FileMetadata file;
+ };
+};
+
+typedef AddrHashMap<CommonInterceptorMetadata, 31051> MetadataHashMap;
+
+static MetadataHashMap *interceptor_metadata_map;
+
+#if SI_NOT_WINDOWS
+UNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr,
+ const FileMetadata &file) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr);
+ CHECK(h.created());
+ h->type = CommonInterceptorMetadata::CIMT_FILE;
+ h->file = file;
+}
+
+UNUSED static const FileMetadata *GetInterceptorMetadata(
+ __sanitizer_FILE *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr,
+ /* remove */ false,
+ /* create */ false);
+ if (h.exists()) {
+ CHECK(!h.created());
+ CHECK(h->type == CommonInterceptorMetadata::CIMT_FILE);
+ return &h->file;
+ } else {
+ return 0;
+ }
+}
+
+UNUSED static void DeleteInterceptorMetadata(void *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true);
+ CHECK(h.exists());
+}
+#endif // SI_NOT_WINDOWS
+
#if SANITIZER_INTERCEPT_TEXTDOMAIN
INTERCEPTOR(char*, textdomain, const char *domainname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
- char* domain = REAL(textdomain)(domainname);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
+ char *domain = REAL(textdomain)(domainname);
if (domain) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, domain,
- REAL(strlen)(domain) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
}
return domain;
}
@@ -79,9 +211,14 @@ static inline int CharCmpX(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
+ const char *s1, const char *s2)
+
INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strcmp, s1, s2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
+ s2);
unsigned char c1, c2;
uptr i;
for (i = 0;; i++) {
@@ -89,14 +226,21 @@ INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, i + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
return CharCmpX(c1, c2);
}
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, uptr called_pc,
+ const char *s1, const char *s2, uptr n)
+
INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strncmp(s1, s2, size);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
+ s2, size);
unsigned char c1 = 0, c2 = 0;
uptr i;
for (i = 0; i < size; i++) {
@@ -133,8 +277,8 @@ INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, i + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
return CharCaseCmp(c1, c2);
}
@@ -160,12 +304,176 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
#define INIT_STRNCASECMP
#endif
+#if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR
+static inline void StrstrCheck(void *ctx, char *r, const char *s1,
+ const char *s2) {
+ uptr len1 = REAL(strlen)(s1);
+ uptr len2 = REAL(strlen)(s2);
+ COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s1, len1,
+ r ? r - s1 + len2 : len1 + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_STRSTR
+INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strstr(s1, s2);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strstr, s1, s2);
+ char *r = REAL(strstr)(s1, s2);
+ if (common_flags()->intercept_strstr)
+ StrstrCheck(ctx, r, s1, s2);
+ return r;
+}
+
+#define INIT_STRSTR COMMON_INTERCEPT_FUNCTION(strstr);
+#else
+#define INIT_STRSTR
+#endif
+
+#if SANITIZER_INTERCEPT_STRCASESTR
+INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcasestr, s1, s2);
+ char *r = REAL(strcasestr)(s1, s2);
+ if (common_flags()->intercept_strstr)
+ StrstrCheck(ctx, r, s1, s2);
+ return r;
+}
+
+#define INIT_STRCASESTR COMMON_INTERCEPT_FUNCTION(strcasestr);
+#else
+#define INIT_STRCASESTR
+#endif
+
+#if SANITIZER_INTERCEPT_STRSPN
+INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2);
+ SIZE_T r = REAL(strspn)(s1, s2);
+ if (common_flags()->intercept_strspn) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
+ }
+ return r;
+}
+
+INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2);
+ SIZE_T r = REAL(strcspn)(s1, s2);
+ if (common_flags()->intercept_strspn) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
+ }
+ return r;
+}
+
+#define INIT_STRSPN \
+ COMMON_INTERCEPT_FUNCTION(strspn); \
+ COMMON_INTERCEPT_FUNCTION(strcspn);
+#else
+#define INIT_STRSPN
+#endif
+
+#if SANITIZER_INTERCEPT_STRPBRK
+INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2);
+ char *r = REAL(strpbrk)(s1, s2);
+ if (common_flags()->intercept_strpbrk) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1,
+ r ? r - s1 + 1 : REAL(strlen)(s1) + 1);
+ }
+ return r;
+}
+
+#define INIT_STRPBRK COMMON_INTERCEPT_FUNCTION(strpbrk);
+#else
+#define INIT_STRPBRK
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCMP
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
+ const void *s1, const void *s2, uptr n)
+
+INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size);
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memcmp(a1, a2, size);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1,
+ a2, size);
+ if (common_flags()->intercept_memcmp) {
+ if (common_flags()->strict_memcmp) {
+ // Check the entire regions even if the first bytes of the buffers are
+ // different.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, a1, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, a2, size);
+ // Fallthrough to REAL(memcmp) below.
+ } else {
+ unsigned char c1 = 0, c2 = 0;
+ const unsigned char *s1 = (const unsigned char*)a1;
+ const unsigned char *s2 = (const unsigned char*)a2;
+ uptr i;
+ for (i = 0; i < size; i++) {
+ c1 = s1[i];
+ c2 = s2[i];
+ if (c1 != c2) break;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
+ return CharCmpX(c1, c2);
+ }
+ }
+ return REAL(memcmp(a1, a2, size));
+}
+
+#define INIT_MEMCMP COMMON_INTERCEPT_FUNCTION(memcmp)
+#else
+#define INIT_MEMCMP
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCHR
+INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memchr(s, c, n);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+ void *res = REAL(memchr)(s, c, n);
+ uptr len = res ? (char *)res - (const char *)s + 1 : n;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
+ return res;
+}
+
+#define INIT_MEMCHR COMMON_INTERCEPT_FUNCTION(memchr)
+#else
+#define INIT_MEMCHR
+#endif
+
+#if SANITIZER_INTERCEPT_MEMRCHR
+INTERCEPTOR(void*, memrchr, const void *s, int c, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memrchr, s, c, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, n);
+ return REAL(memrchr)(s, c, n);
+}
+
+#define INIT_MEMRCHR COMMON_INTERCEPT_FUNCTION(memrchr)
+#else
+#define INIT_MEMRCHR
+#endif
+
#if SANITIZER_INTERCEPT_FREXP
INTERCEPTOR(double, frexp, double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp);
- double res = REAL(frexp)(x, exp);
+ // Assuming frexp() always writes to |exp|.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ double res = REAL(frexp)(x, exp);
return res;
}
@@ -178,6 +486,9 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
INTERCEPTOR(float, frexpf, float x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(frexpf)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
@@ -186,6 +497,9 @@ INTERCEPTOR(float, frexpf, float x, int *exp) {
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(frexpl)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
@@ -224,6 +538,9 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -239,6 +556,9 @@ INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -254,6 +574,9 @@ INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -432,9 +755,11 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2,
INTERCEPTOR(unsigned long, time, unsigned long *t) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, time, t);
- unsigned long res = REAL(time)(t);
+ unsigned long local_t;
+ unsigned long res = REAL(time)(&local_t);
if (t && res != (unsigned long)-1) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t));
+ *t = local_t;
}
return res;
}
@@ -449,7 +774,7 @@ static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
if (tm->tm_zone) {
// Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
// can point to shared memory and tsan would report a data race.
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, tm->tm_zone,
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
REAL(strlen(tm->tm_zone)) + 1);
}
}
@@ -496,6 +821,9 @@ INTERCEPTOR(__sanitizer_tm *, gmtime_r, unsigned long *timep, void *result) {
INTERCEPTOR(char *, ctime, unsigned long *timep) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ctime)(timep);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
@@ -506,6 +834,9 @@ INTERCEPTOR(char *, ctime, unsigned long *timep) {
INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ctime_r)(timep, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
@@ -516,6 +847,9 @@ INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(asctime)(tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
@@ -526,6 +860,9 @@ INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(asctime_r)(tm, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
@@ -533,6 +870,20 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
}
return res;
}
+INTERCEPTOR(long, mktime, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mktime, tm);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_sec, sizeof(tm->tm_sec));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_min, sizeof(tm->tm_min));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_hour, sizeof(tm->tm_hour));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mday, sizeof(tm->tm_mday));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mon, sizeof(tm->tm_mon));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_year, sizeof(tm->tm_year));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_isdst, sizeof(tm->tm_isdst));
+ long res = REAL(mktime)(tm);
+ if (res != -1) unpoison_tm(ctx, tm);
+ return res;
+}
#define INIT_LOCALTIME_AND_FRIENDS \
COMMON_INTERCEPT_FUNCTION(localtime); \
COMMON_INTERCEPT_FUNCTION(localtime_r); \
@@ -541,7 +892,8 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
COMMON_INTERCEPT_FUNCTION(ctime); \
COMMON_INTERCEPT_FUNCTION(ctime_r); \
COMMON_INTERCEPT_FUNCTION(asctime); \
- COMMON_INTERCEPT_FUNCTION(asctime_r);
+ COMMON_INTERCEPT_FUNCTION(asctime_r); \
+ COMMON_INTERCEPT_FUNCTION(mktime);
#else
#define INIT_LOCALTIME_AND_FRIENDS
#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
@@ -552,13 +904,16 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
if (format)
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(strptime)(s, format, tm);
- if (res) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s, res - s);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, res ? res - s : 0);
+ if (res && tm) {
// Do not call unpoison_tm here, because strptime does not, in fact,
// initialize the entire struct tm. For example, tm_zone pointer is left
// uninitialized.
- if (tm) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));
}
return res;
}
@@ -567,9 +922,23 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
#define INIT_STRPTIME
#endif
-#if SANITIZER_INTERCEPT_SCANF
+#if SANITIZER_INTERCEPT_SCANF || SANITIZER_INTERCEPT_PRINTF
+#include "sanitizer_common_interceptors_format.inc"
+
+#define FORMAT_INTERCEPTOR_IMPL(name, vname, ...) \
+ { \
+ void *ctx; \
+ va_list ap; \
+ va_start(ap, format); \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
+ int res = WRAP(vname)(__VA_ARGS__, ap); \
+ va_end(ap); \
+ return res; \
+ }
+
+#endif
-#include "sanitizer_common_interceptors_scanf.inc"
+#if SANITIZER_INTERCEPT_SCANF
#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \
{ \
@@ -605,35 +974,24 @@ INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
-#define SCANF_INTERCEPTOR_IMPL(name, vname, ...) \
- { \
- void *ctx; \
- va_list ap; \
- va_start(ap, format); \
- COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
- int res = vname(__VA_ARGS__, ap); \
- va_end(ap); \
- return res; \
- }
-
INTERCEPTOR(int, scanf, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(scanf, vscanf, format)
+FORMAT_INTERCEPTOR_IMPL(scanf, vscanf, format)
INTERCEPTOR(int, fscanf, void *stream, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
+FORMAT_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
INTERCEPTOR(int, sscanf, const char *str, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
+FORMAT_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
#if SANITIZER_INTERCEPT_ISOC99_SCANF
INTERCEPTOR(int, __isoc99_scanf, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
-SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#endif
#endif
@@ -662,10 +1020,212 @@ SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#define INIT_ISOC99_SCANF
#endif
+#if SANITIZER_INTERCEPT_PRINTF
+
+#define VPRINTF_INTERCEPTOR_ENTER(vname, ...) \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap);
+
+#define VPRINTF_INTERCEPTOR_RETURN() \
+ va_end(aq);
+
+#define VPRINTF_INTERCEPTOR_IMPL(vname, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, __VA_ARGS__); \
+ if (common_flags()->check_printf) \
+ printf_common(ctx, format, aq); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, size, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, Min(size, (SIZE_T)(res + 1))); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, strp, sizeof(char *)); \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(strp, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *strp, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vprintf, format, ap)
+
+INTERCEPTOR(int, vfprintf, __sanitizer_FILE *stream, const char *format,
+ va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+
+#if SANITIZER_INTERCEPT_PRINTF_L
+INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap)
+
+INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format)
+#endif // SANITIZER_INTERCEPT_PRINTF_L
+
+INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
+
+INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap)
+VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfprintf, __sanitizer_FILE *stream,
+ const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(__isoc99_vsnprintf, str, size, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsprintf, char *str, const char *format,
+ va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(__isoc99_vsprintf, str, format,
+ ap)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+INTERCEPTOR(int, printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(printf, vprintf, format)
+
+INTERCEPTOR(int, fprintf, __sanitizer_FILE *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(fprintf, vfprintf, stream, format)
+
+INTERCEPTOR(int, sprintf, char *str, const char *format, ...) // NOLINT
+FORMAT_INTERCEPTOR_IMPL(sprintf, vsprintf, str, format) // NOLINT
+
+INTERCEPTOR(int, snprintf, char *str, SIZE_T size, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf, vsnprintf, str, size, format)
+
+INTERCEPTOR(int, asprintf, char **strp, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format)
+
+INTERCEPTOR(int, __isoc99_fprintf, __sanitizer_FILE *stream, const char *format,
+ ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fprintf, __isoc99_vfprintf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sprintf, char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sprintf, __isoc99_vsprintf, str, format)
+
+INTERCEPTOR(int, __isoc99_snprintf, char *str, SIZE_T size,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
+ format)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+#endif // SANITIZER_INTERCEPT_PRINTF
+
+#if SANITIZER_INTERCEPT_PRINTF
+#define INIT_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(printf); \
+ COMMON_INTERCEPT_FUNCTION(sprintf); \
+ COMMON_INTERCEPT_FUNCTION(snprintf); \
+ COMMON_INTERCEPT_FUNCTION(asprintf); \
+ COMMON_INTERCEPT_FUNCTION(fprintf); \
+ COMMON_INTERCEPT_FUNCTION(vprintf); \
+ COMMON_INTERCEPT_FUNCTION(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION(vfprintf);
+#else
+#define INIT_PRINTF
+#endif
+
+#if SANITIZER_INTERCEPT_PRINTF_L
+#define INIT_PRINTF_L \
+ COMMON_INTERCEPT_FUNCTION(snprintf_l); \
+ COMMON_INTERCEPT_FUNCTION(vsnprintf_l);
+#else
+#define INIT_PRINTF_L
+#endif
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define INIT_ISOC99_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_sprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_snprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_fprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfprintf);
+#else
+#define INIT_ISOC99_PRINTF
+#endif
+
#if SANITIZER_INTERCEPT_IOCTL
#include "sanitizer_common_interceptors_ioctl.inc"
-INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
- void *ctx;
+INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
+ // We need a frame pointer, because we call into ioctl_common_[pre|post] which
+ // can trigger a report and we need to be able to unwind through this
+ // function. On Mac in debug mode we might not have a frame pointer, because
+ // ioctl_common_[pre|post] doesn't get inlined here.
+ ENABLE_FRAME_POINTER;
+
+ void *ctx;
+ va_list ap;
+ va_start(ap, request);
+ void *arg = va_arg(ap, void *);
+ va_end(ap);
COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg);
CHECK(ioctl_initialized);
@@ -674,8 +1234,19 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
// This effectively disables ioctl handling in TSan.
if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
+ // Although request is unsigned long, the rest of the interceptor uses it
+ // as just "unsigned" to save space, because we know that all values fit in
+ // "unsigned" - they are compile-time constants.
+
const ioctl_desc *desc = ioctl_lookup(request);
- if (!desc) Printf("WARNING: unknown ioctl %x\n", request);
+ ioctl_desc decoded_desc;
+ if (!desc) {
+ VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+ if (!ioctl_decode(request, &decoded_desc))
+ Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+ else
+ desc = &decoded_desc;
+ }
if (desc) ioctl_common_pre(ctx, desc, d, request, arg);
int res = REAL(ioctl)(d, request, arg);
@@ -690,35 +1261,88 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
#define INIT_IOCTL
#endif
+#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS || \
+ SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT || \
+ SANITIZER_INTERCEPT_GETPWENT_R || SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
+ if (pwd) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
+ if (pwd->pw_name)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_name,
+ REAL(strlen)(pwd->pw_name) + 1);
+ if (pwd->pw_passwd)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_passwd,
+ REAL(strlen)(pwd->pw_passwd) + 1);
+#if !SANITIZER_ANDROID
+ if (pwd->pw_gecos)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_gecos,
+ REAL(strlen)(pwd->pw_gecos) + 1);
+#endif
+#if SANITIZER_MAC
+ if (pwd->pw_class)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_class,
+ REAL(strlen)(pwd->pw_class) + 1);
+#endif
+ if (pwd->pw_dir)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_dir,
+ REAL(strlen)(pwd->pw_dir) + 1);
+ if (pwd->pw_shell)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(pwd->pw_shell,
+ REAL(strlen)(pwd->pw_shell) + 1);
+ }
+}
+
+static void unpoison_group(void *ctx, __sanitizer_group *grp) {
+ if (grp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
+ if (grp->gr_name)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_name,
+ REAL(strlen)(grp->gr_name) + 1);
+ if (grp->gr_passwd)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_passwd,
+ REAL(strlen)(grp->gr_passwd) + 1);
+ char **p = grp->gr_mem;
+ for (; *p; ++p) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*p, REAL(strlen)(*p) + 1);
+ }
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(grp->gr_mem,
+ (p - grp->gr_mem + 1) * sizeof(*p));
+ }
+}
+#endif // SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS ||
+ // SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT ||
+ // SANITIZER_INTERCEPT_GETPWENT_R ||
+ // SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+
#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
-INTERCEPTOR(void *, getpwnam, const char *name) {
+INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
- void *res = REAL(getpwnam)(name);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ __sanitizer_passwd *res = REAL(getpwnam)(name);
+ if (res) unpoison_passwd(ctx, res);
return res;
}
-INTERCEPTOR(void *, getpwuid, u32 uid) {
+INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid);
- void *res = REAL(getpwuid)(uid);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz);
+ __sanitizer_passwd *res = REAL(getpwuid)(uid);
+ if (res) unpoison_passwd(ctx, res);
return res;
}
-INTERCEPTOR(void *, getgrnam, const char *name) {
+INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
- void *res = REAL(getgrnam)(name);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
+ __sanitizer_group *res = REAL(getgrnam)(name);
+ if (res) unpoison_group(ctx, res);
return res;
}
-INTERCEPTOR(void *, getgrgid, u32 gid) {
+INTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid);
- void *res = REAL(getgrgid)(gid);
- if (res != 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz);
+ __sanitizer_group *res = REAL(getgrgid)(gid);
+ if (res) unpoison_group(ctx, res);
return res;
}
#define INIT_GETPWNAM_AND_FRIENDS \
@@ -731,50 +1355,66 @@ INTERCEPTOR(void *, getgrgid, u32 gid) {
#endif
#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
-INTERCEPTOR(int, getpwnam_r, const char *name, void *pwd, char *buf,
- SIZE_T buflen, void **result) {
+INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
+ char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ if (result && *result) unpoison_passwd(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getpwuid_r, u32 uid, void *pwd, char *buf, SIZE_T buflen,
- void **result) {
+INTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz);
+ if (result && *result) unpoison_passwd(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getgrnam_r, const char *name, void *grp, char *buf,
- SIZE_T buflen, void **result) {
+INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
+ char *buf, SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ if (result && *result) unpoison_group(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
-INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp, char *buf, SIZE_T buflen,
- void **result) {
+INTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf,
+ SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
if (!res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz);
+ if (result && *result) unpoison_group(ctx, *result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
}
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
}
#define INIT_GETPWNAM_R_AND_FRIENDS \
@@ -786,10 +1426,160 @@ INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp, char *buf, SIZE_T buflen,
#define INIT_GETPWNAM_R_AND_FRIENDS
#endif
+#if SANITIZER_INTERCEPT_GETPWENT
+INTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy);
+ __sanitizer_passwd *res = REAL(getpwent)(dummy);
+ if (res) unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy);
+ __sanitizer_group *res = REAL(getgrent)(dummy);
+ if (res) unpoison_group(ctx, res);;
+ return res;
+}
+#define INIT_GETPWENT \
+ COMMON_INTERCEPT_FUNCTION(getpwent); \
+ COMMON_INTERCEPT_FUNCTION(getgrent);
+#else
+#define INIT_GETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_FGETPWENT
+INTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp);
+ __sanitizer_passwd *res = REAL(fgetpwent)(fp);
+ if (res) unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp);
+ __sanitizer_group *res = REAL(fgetgrent)(fp);
+ if (res) unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_FGETPWENT \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent);
+#else
+#define INIT_FGETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWENT_R
+INTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen,
+ __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res) {
+ if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen);
+ }
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_GETPWENT_R \
+ COMMON_INTERCEPT_FUNCTION(getpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrent_r); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent_r);
+#else
+#define INIT_GETPWENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_SETPWENT
+// The only thing these interceptors do is disable any nested interceptors.
+// These functions may open nss modules and call uninstrumented functions from
+// them, and we don't want things like strlen() to trigger.
+INTERCEPTOR(void, setpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setpwent, dummy);
+ REAL(setpwent)(dummy);
+}
+INTERCEPTOR(void, endpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endpwent, dummy);
+ REAL(endpwent)(dummy);
+}
+INTERCEPTOR(void, setgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setgrent, dummy);
+ REAL(setgrent)(dummy);
+}
+INTERCEPTOR(void, endgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endgrent, dummy);
+ REAL(endgrent)(dummy);
+}
+#define INIT_SETPWENT \
+ COMMON_INTERCEPT_FUNCTION(setpwent); \
+ COMMON_INTERCEPT_FUNCTION(endpwent); \
+ COMMON_INTERCEPT_FUNCTION(setgrent); \
+ COMMON_INTERCEPT_FUNCTION(endgrent);
+#else
+#define INIT_SETPWENT
+#endif
+
#if SANITIZER_INTERCEPT_CLOCK_GETTIME
INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(clock_getres)(clk_id, tp);
if (!res && tp) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
@@ -799,6 +1589,9 @@ INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(clock_gettime)(clk_id, tp);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
@@ -823,6 +1616,9 @@ INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
INTERCEPTOR(int, getitimer, int which, void *curr_value) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getitimer)(which, curr_value);
if (!res && curr_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);
@@ -834,6 +1630,9 @@ INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) {
COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value);
if (new_value)
COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(setitimer)(which, new_value, old_value);
if (!res && old_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);
@@ -861,33 +1660,32 @@ static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
}
static THREADLOCAL __sanitizer_glob_t *pglob_copy;
-static THREADLOCAL void *glob_ctx;
static void wrapped_gl_closedir(void *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
pglob_copy->gl_closedir(dir);
}
static void *wrapped_gl_readdir(void *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
return pglob_copy->gl_readdir(dir);
}
static void *wrapped_gl_opendir(const char *s) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
return pglob_copy->gl_opendir(s);
}
static int wrapped_gl_lstat(const char *s, void *st) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
return pglob_copy->gl_lstat(s, st);
}
static int wrapped_gl_stat(const char *s, void *st) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(glob_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(glob_ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
return pglob_copy->gl_stat(s, st);
}
@@ -896,6 +1694,7 @@ INTERCEPTOR(int, glob, const char *pattern, int flags,
__sanitizer_glob_t *pglob) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
__sanitizer_glob_t glob_copy = {
0, 0, 0,
0, wrapped_gl_closedir, wrapped_gl_readdir,
@@ -907,7 +1706,6 @@ INTERCEPTOR(int, glob, const char *pattern, int flags,
Swap(pglob->gl_lstat, glob_copy.gl_lstat);
Swap(pglob->gl_stat, glob_copy.gl_stat);
pglob_copy = &glob_copy;
- glob_ctx = ctx;
}
int res = REAL(glob)(pattern, flags, errfunc, pglob);
if (flags & glob_altdirfunc) {
@@ -918,7 +1716,6 @@ INTERCEPTOR(int, glob, const char *pattern, int flags,
Swap(pglob->gl_stat, glob_copy.gl_stat);
}
pglob_copy = 0;
- glob_ctx = 0;
if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
return res;
}
@@ -928,6 +1725,7 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
__sanitizer_glob_t *pglob) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
__sanitizer_glob_t glob_copy = {
0, 0, 0,
0, wrapped_gl_closedir, wrapped_gl_readdir,
@@ -939,7 +1737,6 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
Swap(pglob->gl_lstat, glob_copy.gl_lstat);
Swap(pglob->gl_stat, glob_copy.gl_stat);
pglob_copy = &glob_copy;
- glob_ctx = ctx;
}
int res = REAL(glob64)(pattern, flags, errfunc, pglob);
if (flags & glob_altdirfunc) {
@@ -950,7 +1747,6 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
Swap(pglob->gl_stat, glob_copy.gl_stat);
}
pglob_copy = 0;
- glob_ctx = 0;
if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
return res;
}
@@ -968,15 +1764,27 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait, status);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait)(status);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
return res;
}
+// On FreeBSD id_t is always 64-bit wide.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, long long id, void *infop,
+ int options) {
+#else
INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
int options) {
+#endif
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(waitid)(idtype, id, infop, options);
if (res != -1 && infop)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
@@ -985,6 +1793,9 @@ INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(waitpid)(pid, status, options);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -993,6 +1804,9 @@ INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait3)(status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1000,9 +1814,28 @@ INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
}
return res;
}
+#if SANITIZER_ANDROID
+INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(__wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(__wait4);
+#else
INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1010,14 +1843,16 @@ INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
}
return res;
}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(wait4);
+#endif // SANITIZER_ANDROID
#define INIT_WAIT \
COMMON_INTERCEPT_FUNCTION(wait); \
COMMON_INTERCEPT_FUNCTION(waitid); \
COMMON_INTERCEPT_FUNCTION(waitpid); \
- COMMON_INTERCEPT_FUNCTION(wait3); \
- COMMON_INTERCEPT_FUNCTION(wait4);
+ COMMON_INTERCEPT_FUNCTION(wait3);
#else
#define INIT_WAIT
+#define INIT_WAIT4
#endif
#if SANITIZER_INTERCEPT_INET
@@ -1027,6 +1862,9 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
uptr sz = __sanitizer_in_addr_sz(af);
if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz);
// FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(inet_ntop)(af, src, dst, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -1034,7 +1872,11 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, src, 0);
// FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(inet_pton)(af, src, dst);
if (res == 1) {
uptr sz = __sanitizer_in_addr_sz(af);
@@ -1054,6 +1896,9 @@ INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(inet_aton)(cp, dst);
if (res != 0) {
uptr sz = __sanitizer_in_addr_sz(af_inet);
@@ -1070,6 +1915,9 @@ INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_getschedparam)(thread, policy, param);
if (res == 0) {
if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));
@@ -1094,6 +1942,9 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
if (hints)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getaddrinfo)(node, service, hints, out);
if (res == 0 && out) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out));
@@ -1123,6 +1974,9 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
serv, servlen, flags);
// FIXME: consider adding READ_RANGE(sockaddr, salen)
// There is padding in in_addr that may make this too noisy
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
if (res == 0) {
@@ -1144,6 +1998,9 @@ INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
int addrlen_in = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getsockname)(sock_fd, addr, addrlen);
if (res == 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
@@ -1221,23 +2078,54 @@ INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) {
#endif
#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
+ char *buf, SIZE_T buflen, __sanitizer_hostent **result,
+ int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
+ h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYNAME_R COMMON_INTERCEPT_FUNCTION(gethostbyname_r);
+#else
+#define INIT_GETHOSTBYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTENT_R
INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,
h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
+#define INIT_GETHOSTENT_R \
+ COMMON_INTERCEPT_FUNCTION(gethostent_r);
+#else
+#define INIT_GETHOSTENT_R
+#endif
+#if SANITIZER_INTERCEPT_GETHOSTBYADDR_R
INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
__sanitizer_hostent **result, int *h_errnop) {
@@ -1245,62 +2133,49 @@ INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf,
buflen, result, h_errnop);
COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
- }
- return res;
-}
-
-INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
- char *buf, SIZE_T buflen, __sanitizer_hostent **result,
- int *h_errnop) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
- h_errnop);
- int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
+#define INIT_GETHOSTBYADDR_R \
+ COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r);
+#else
+#define INIT_GETHOSTBYADDR_R
+#endif
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
__sanitizer_hostent **result, int *h_errnop) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen,
result, h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
- if (res == 0) {
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (*result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
}
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
-#define INIT_GETHOSTBYNAME_R \
- COMMON_INTERCEPT_FUNCTION(gethostent_r); \
- COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r); \
- COMMON_INTERCEPT_FUNCTION(gethostbyname_r); \
+#define INIT_GETHOSTBYNAME2_R \
COMMON_INTERCEPT_FUNCTION(gethostbyname2_r);
#else
-#define INIT_GETHOSTBYNAME_R
+#define INIT_GETHOSTBYNAME2_R
#endif
#if SANITIZER_INTERCEPT_GETSOCKOPT
@@ -1310,6 +2185,9 @@ INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval,
optlen);
if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);
if (res == 0)
if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);
@@ -1324,7 +2202,7 @@ INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, accept, fd, addr, addrlen);
- unsigned addrlen0;
+ unsigned addrlen0 = 0;
if (addrlen) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
addrlen0 = *addrlen;
@@ -1346,11 +2224,14 @@ INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, accept4, fd, addr, addrlen, f);
- unsigned addrlen0;
+ unsigned addrlen0 = 0;
if (addrlen) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
addrlen0 = *addrlen;
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int fd2 = REAL(accept4)(fd, addr, addrlen, f);
if (fd2 >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
@@ -1368,6 +2249,9 @@ INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
INTERCEPTOR(double, modf, double x, double *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(modf)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -1377,6 +2261,9 @@ INTERCEPTOR(double, modf, double x, double *iptr) {
INTERCEPTOR(float, modff, float x, float *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(modff)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -1386,6 +2273,9 @@ INTERCEPTOR(float, modff, float x, float *iptr) {
INTERCEPTOR(long double, modfl, long double x, long double *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(modfl)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -1418,6 +2308,9 @@ INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,
int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(recvmsg)(fd, msg, flags);
if (res >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -1439,6 +2332,9 @@ INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);
unsigned addr_sz;
if (addrlen) addr_sz = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpeername)(sockfd, addr, addrlen);
if (!res && addr && addrlen)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
@@ -1452,6 +2348,9 @@ INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
#if SANITIZER_INTERCEPT_SYSINFO
INTERCEPTOR(int, sysinfo, void *info) {
void *ctx;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info);
int res = REAL(sysinfo)(info);
if (!res && info)
@@ -1464,9 +2363,22 @@ INTERCEPTOR(int, sysinfo, void *info) {
#endif
#if SANITIZER_INTERCEPT_READDIR
+INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ __sanitizer_dirent *res = REAL(opendir)(path);
+ if (res)
+ COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
+ return res;
+}
+
INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_dirent *res = REAL(readdir)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
@@ -1476,6 +2388,9 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
__sanitizer_dirent **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(readdir_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -1486,6 +2401,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
}
#define INIT_READDIR \
+ COMMON_INTERCEPT_FUNCTION(opendir); \
COMMON_INTERCEPT_FUNCTION(readdir); \
COMMON_INTERCEPT_FUNCTION(readdir_r);
#else
@@ -1496,6 +2412,9 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
@@ -1505,6 +2424,9 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
__sanitizer_dirent64 **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(readdir64_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -1524,6 +2446,7 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ptrace, request, pid, addr, data);
+ __sanitizer_iovec local_iovec;
if (data) {
if (request == ptrace_setregs)
@@ -1532,18 +2455,29 @@ INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpregs_struct_sz);
else if (request == ptrace_setfpxregs)
COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
+ else if (request == ptrace_setvfpregs)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_vfpregs_struct_sz);
else if (request == ptrace_setsiginfo)
COMMON_INTERCEPTOR_READ_RANGE(ctx, data, siginfo_t_sz);
- else if (request == ptrace_setregset) {
- __sanitizer_iovec *iov = (__sanitizer_iovec *)data;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, iov->iov_base, iov->iov_len);
+ // Some kernel might zero the iovec::iov_base in case of invalid
+ // write access. In this case copy the invalid address for further
+ // inspection.
+ else if (request == ptrace_setregset || request == ptrace_getregset) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec));
+ local_iovec = *iovec;
+ if (request == ptrace_setregset)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec->iov_base, iovec->iov_len);
}
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
uptr res = REAL(ptrace)(request, pid, addr, data);
if (!res && data) {
- // Note that PEEK* requests assing different meaning to the return value.
+ // Note that PEEK* requests assign different meaning to the return value.
// This function does not handle them (nor does it need to).
if (request == ptrace_getregs)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_regs_struct_sz);
@@ -1551,11 +2485,17 @@ INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpregs_struct_sz);
else if (request == ptrace_getfpxregs)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
+ else if (request == ptrace_getvfpregs)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_vfpregs_struct_sz);
else if (request == ptrace_getsiginfo)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, siginfo_t_sz);
+ else if (request == ptrace_geteventmsg)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(unsigned long));
else if (request == ptrace_getregset) {
- __sanitizer_iovec *iov = (__sanitizer_iovec *)data;
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iov->iov_base, iov->iov_len);
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec, sizeof(*iovec));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, local_iovec.iov_base,
+ local_iovec.iov_len);
}
}
return res;
@@ -1586,6 +2526,9 @@ INTERCEPTOR(char *, setlocale, int category, char *locale) {
INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(getcwd)(buf, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -1599,6 +2542,9 @@ INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
INTERCEPTOR(char *, get_current_dir_name, int fake) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(get_current_dir_name)(fake);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -1610,20 +2556,59 @@ INTERCEPTOR(char *, get_current_dir_name, int fake) {
#define INIT_GET_CURRENT_DIR_NAME
#endif
+UNUSED static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
+ CHECK(endptr);
+ if (nptr == *endptr) {
+ // No digits were found at strtol call, we need to find out the last
+ // symbol accessed by strtoll on our own.
+ // We get this symbol by skipping leading blanks and optional +/- sign.
+ while (IsSpace(*nptr)) nptr++;
+ if (*nptr == '+' || *nptr == '-') nptr++;
+ *endptr = const_cast<char *>(nptr);
+ }
+ CHECK(*endptr >= nptr);
+}
+
+UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
+ char **endptr, char *real_endptr, int base) {
+ if (endptr) {
+ *endptr = real_endptr;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
+ }
+ // If base has unsupported value, strtol can exit with EINVAL
+ // without reading any characters. So do additional checks only
+ // if base is valid.
+ bool is_valid_base = (base == 0) || (2 <= base && base <= 36);
+ if (is_valid_base) {
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ }
+ COMMON_INTERCEPTOR_READ_STRING(ctx, nptr, is_valid_base ?
+ (real_endptr - nptr) + 1 : 0);
+}
+
+
#if SANITIZER_INTERCEPT_STRTOIMAX
INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
- INTMAX_T res = REAL(strtoimax)(nptr, endptr, base);
- if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ char *real_endptr;
+ INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return res;
}
INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
- INTMAX_T res = REAL(strtoumax)(nptr, endptr, base);
- if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ char *real_endptr;
+ INTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return res;
}
@@ -1638,6 +2623,9 @@ INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbstowcs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
@@ -1652,6 +2640,9 @@ INTERCEPTOR(SIZE_T, mbsrtowcs, wchar_t *dest, const char **src, SIZE_T len,
COMMON_INTERCEPTOR_ENTER(ctx, mbsrtowcs, dest, src, len, ps);
if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
// This function, and several others, may or may not write the terminating
@@ -1679,6 +2670,9 @@ INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,
if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
}
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -1696,6 +2690,9 @@ INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,
INTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcstombs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
@@ -1710,6 +2707,9 @@ INTERCEPTOR(SIZE_T, wcsrtombs, char *dest, const wchar_t **src, SIZE_T len,
COMMON_INTERCEPTOR_ENTER(ctx, wcsrtombs, dest, src, len, ps);
if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps);
if (res != (SIZE_T) - 1 && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -1735,8 +2735,11 @@ INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,
if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
}
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps);
- if (res != (SIZE_T) - 1 && dest && src) {
+ if (res != ((SIZE_T)-1) && dest && src) {
SIZE_T write_cnt = res + !*src;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
}
@@ -1748,10 +2751,35 @@ INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,
#define INIT_WCSNRTOMBS
#endif
+
+#if SANITIZER_INTERCEPT_WCRTOMB
+INTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcrtomb, dest, src, ps);
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SIZE_T res = REAL(wcrtomb)(dest, src, ps);
+ if (res != ((SIZE_T)-1) && dest) {
+ SIZE_T write_cnt = res;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
+ }
+ return res;
+}
+
+#define INIT_WCRTOMB COMMON_INTERCEPT_FUNCTION(wcrtomb);
+#else
+#define INIT_WCRTOMB
+#endif
+
#if SANITIZER_INTERCEPT_TCGETATTR
INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(tcgetattr)(fd, termios_p);
if (!res && termios_p)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz);
@@ -1773,7 +2801,7 @@ INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
// version of a versioned symbol. For realpath(), this gives us something
// (called __old_realpath) that does not handle NULL in the second argument.
// Handle it as part of the interceptor.
- char *allocated_path = 0;
+ char *allocated_path = nullptr;
if (!resolved_path)
allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1);
@@ -1806,6 +2834,9 @@ INTERCEPTOR(char *, canonicalize_file_name, const char *path) {
INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(confstr)(name, buf, len);
if (buf && res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len);
@@ -1820,6 +2851,9 @@ INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sched_getaffinity)(pid, cpusetsize, mask);
if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
return res;
@@ -1829,12 +2863,25 @@ INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
#define INIT_SCHED_GETAFFINITY
#endif
+#if SANITIZER_INTERCEPT_SCHED_GETPARAM
+INTERCEPTOR(int, sched_getparam, int pid, void *param) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param);
+ int res = REAL(sched_getparam)(pid, param);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz);
+ return res;
+}
+#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam);
+#else
+#define INIT_SCHED_GETPARAM
+#endif
+
#if SANITIZER_INTERCEPT_STRERROR
INTERCEPTOR(char *, strerror, int errnum) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
char *res = REAL(strerror)(errnum);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
@@ -1846,6 +2893,9 @@ INTERCEPTOR(char *, strerror, int errnum) {
INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
// There are 2 versions of strerror_r:
// * POSIX version returns 0 on success, negative error code on failure,
@@ -1874,6 +2924,9 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
// This version always returns a null-terminated string.
if (buf && buflen)
@@ -1890,23 +2943,22 @@ typedef int (*scandir_filter_f)(const struct __sanitizer_dirent *);
typedef int (*scandir_compar_f)(const struct __sanitizer_dirent **,
const struct __sanitizer_dirent **);
-static THREADLOCAL void *scandir_ctx;
static THREADLOCAL scandir_filter_f scandir_filter;
static THREADLOCAL scandir_compar_f scandir_compar;
static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
return scandir_filter(dir);
}
static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
const struct __sanitizer_dirent **b) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, a, sizeof(*a));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, *a, (*a)->d_reclen);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, b, sizeof(*b));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir_ctx, *b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
return scandir_compar(a, b);
}
@@ -1915,15 +2967,16 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
- CHECK_EQ(0, scandir_ctx);
- scandir_ctx = ctx;
scandir_filter = filter;
scandir_compar = compar;
- int res = REAL(scandir)(dirp, namelist, filter ? wrapped_scandir_filter : 0,
- compar ? wrapped_scandir_compar : 0);
- scandir_ctx = 0;
- scandir_filter = 0;
- scandir_compar = 0;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(scandir)(dirp, namelist,
+ filter ? wrapped_scandir_filter : nullptr,
+ compar ? wrapped_scandir_compar : nullptr);
+ scandir_filter = nullptr;
+ scandir_compar = nullptr;
if (namelist && res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
@@ -1943,23 +2996,22 @@ typedef int (*scandir64_filter_f)(const struct __sanitizer_dirent64 *);
typedef int (*scandir64_compar_f)(const struct __sanitizer_dirent64 **,
const struct __sanitizer_dirent64 **);
-static THREADLOCAL void *scandir64_ctx;
static THREADLOCAL scandir64_filter_f scandir64_filter;
static THREADLOCAL scandir64_compar_f scandir64_compar;
static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir64_ctx, 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
return scandir64_filter(dir);
}
static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
const struct __sanitizer_dirent64 **b) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(scandir64_ctx, 2);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, a, sizeof(*a));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, *a, (*a)->d_reclen);
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, b, sizeof(*b));
- COMMON_INTERCEPTOR_WRITE_RANGE(scandir64_ctx, *b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
return scandir64_compar(a, b);
}
@@ -1968,16 +3020,17 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
- CHECK_EQ(0, scandir64_ctx);
- scandir64_ctx = ctx;
scandir64_filter = filter;
scandir64_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
- REAL(scandir64)(dirp, namelist, filter ? wrapped_scandir64_filter : 0,
- compar ? wrapped_scandir64_compar : 0);
- scandir64_ctx = 0;
- scandir64_filter = 0;
- scandir64_compar = 0;
+ REAL(scandir64)(dirp, namelist,
+ filter ? wrapped_scandir64_filter : nullptr,
+ compar ? wrapped_scandir64_compar : nullptr);
+ scandir64_filter = nullptr;
+ scandir64_compar = nullptr;
if (namelist && res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
@@ -1996,6 +3049,9 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
INTERCEPTOR(int, getgroups, int size, u32 *lst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgroups)(size, lst);
if (res && lst) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
return res;
@@ -2059,6 +3115,9 @@ INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wordexp)(s, p, flags);
if (!res && p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
@@ -2082,6 +3141,9 @@ INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigwait)(set, sig);
if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
return res;
@@ -2096,6 +3158,9 @@ INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigwaitinfo)(set, info);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
@@ -2112,6 +3177,9 @@ INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout);
if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigtimedwait)(set, info, timeout);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
@@ -2125,6 +3193,9 @@ INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigemptyset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -2133,6 +3204,9 @@ INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigfillset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -2148,6 +3222,9 @@ INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigpending)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -2163,6 +3240,9 @@ INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigprocmask)(how, set, oldset);
if (!res && oldset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
@@ -2177,6 +3257,9 @@ INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
INTERCEPTOR(int, backtrace, void **buffer, int size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(backtrace)(buffer, size);
if (res && buffer)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
@@ -2188,6 +3271,9 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
if (buffer && size)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char **res = REAL(backtrace_symbols)(buffer, size);
if (res && size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
@@ -2243,53 +3329,6 @@ INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
#define INIT_PTHREAD_MUTEX_UNLOCK
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_COND
-INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_wait, c, m);
- COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- int res = REAL(pthread_cond_wait)(c, m);
- COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
- return res;
-}
-
-INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_init, c, a);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_init)(c, a);
-}
-
-INTERCEPTOR(int, pthread_cond_signal, void *c) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_signal, c);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_signal)(c);
-}
-
-INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_cond_broadcast, c);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, c, pthread_cond_t_sz);
- return REAL(pthread_cond_broadcast)(c);
-}
-
-#define INIT_PTHREAD_COND_WAIT \
- INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_INIT \
- INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_SIGNAL \
- INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2")
-#define INIT_PTHREAD_COND_BROADCAST \
- INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2")
-#else
-#define INIT_PTHREAD_COND_WAIT
-#define INIT_PTHREAD_COND_INIT
-#define INIT_PTHREAD_COND_SIGNAL
-#define INIT_PTHREAD_COND_BROADCAST
-#endif
-
#if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R
static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
@@ -2340,6 +3379,9 @@ INTERCEPTOR(int, statfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
@@ -2347,6 +3389,9 @@ INTERCEPTOR(int, statfs, char *path, void *buf) {
INTERCEPTOR(int, fstatfs, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
@@ -2363,6 +3408,9 @@ INTERCEPTOR(int, statfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
@@ -2370,6 +3418,9 @@ INTERCEPTOR(int, statfs64, char *path, void *buf) {
INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
@@ -2386,6 +3437,9 @@ INTERCEPTOR(int, statvfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statvfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -2393,6 +3447,9 @@ INTERCEPTOR(int, statvfs, char *path, void *buf) {
INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatvfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -2409,6 +3466,9 @@ INTERCEPTOR(int, statvfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statvfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
@@ -2416,6 +3476,9 @@ INTERCEPTOR(int, statvfs64, char *path, void *buf) {
INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatvfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
@@ -2440,13 +3503,13 @@ INTERCEPTOR(int, initgroups, char *user, u32 group) {
#define INIT_INITGROUPS
#endif
-#if SANITIZER_INTERCEPT_ETHER
+#if SANITIZER_INTERCEPT_ETHER_NTOA_ATON
INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
char *res = REAL(ether_ntoa)(addr);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
@@ -2454,13 +3517,24 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
__sanitizer_ether_addr *res = REAL(ether_aton)(buf);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, sizeof(*res));
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
return res;
}
+#define INIT_ETHER_NTOA_ATON \
+ COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
+ COMMON_INTERCEPT_FUNCTION(ether_aton);
+#else
+#define INIT_ETHER_NTOA_ATON
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_HOST
INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntohost, hostname, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_ntohost)(hostname, addr);
if (!res && hostname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
@@ -2471,6 +3545,9 @@ INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
if (hostname)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_hostton)(hostname, addr);
if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
return res;
@@ -2480,6 +3557,9 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_line)(line, addr, hostname);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
@@ -2488,14 +3568,12 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
}
return res;
}
-#define INIT_ETHER \
- COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
- COMMON_INTERCEPT_FUNCTION(ether_aton); \
+#define INIT_ETHER_HOST \
COMMON_INTERCEPT_FUNCTION(ether_ntohost); \
COMMON_INTERCEPT_FUNCTION(ether_hostton); \
COMMON_INTERCEPT_FUNCTION(ether_line);
#else
-#define INIT_ETHER
+#define INIT_ETHER_HOST
#endif
#if SANITIZER_INTERCEPT_ETHER_R
@@ -2503,6 +3581,9 @@ INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa_r, addr, buf);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ether_ntoa_r)(addr, buf);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -2512,6 +3593,9 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res));
return res;
@@ -2527,6 +3611,9 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(shmctl)(shmid, cmd, buf);
if (res >= 0) {
unsigned sz = 0;
@@ -2549,6 +3636,9 @@ INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
INTERCEPTOR(int, random_r, void *buf, u32 *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(random_r)(buf, result);
if (!res && result)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -2559,16 +3649,33 @@ INTERCEPTOR(int, random_r, void *buf, u32 *result) {
#define INIT_RANDOM_R
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
- SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED
-#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
- INTERCEPTOR(int, pthread_attr_get##what, void *attr, void *r) { \
- void *ctx; \
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_get##what, attr, r); \
- int res = REAL(pthread_attr_get##what)(attr, r); \
- if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
- return res; \
+// FIXME: under ASan the REAL() call below may write to freed memory and corrupt
+// its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \
+ SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GET
+#define INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(fn, sz) \
+ INTERCEPTOR(int, fn, void *attr, void *r) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, fn, attr, r); \
+ int res = REAL(fn)(attr, r); \
+ if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
+ return res; \
}
+#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_attr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_MUTEXATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_mutexattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_rwlockattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_CONDATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_condattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_BARRIERATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_barrierattr_get##what, sz)
#endif
#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
@@ -2581,6 +3688,9 @@ INTERCEPTOR_PTHREAD_ATTR_GET(stacksize, sizeof(SIZE_T))
INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_attr_getstack)(attr, addr, size);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
@@ -2592,9 +3702,13 @@ INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
// We may need to call the real pthread_attr_getstack from the run-time
// in sanitizer_common, but we don't want to include the interception headers
// there. So, just define this function here.
-int __sanitizer_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
+namespace __sanitizer {
+extern "C" {
+int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
return REAL(pthread_attr_getstack)(attr, addr, size);
}
+} // extern "C"
+} // namespace __sanitizer
#define INIT_PTHREAD_ATTR_GET \
COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \
@@ -2623,6 +3737,9 @@ INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getaffinity_np, attr, cpusetsize,
cpuset);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset);
if (!res && cpusetsize && cpuset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
@@ -2635,6 +3752,94 @@ INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
#define INIT_PTHREAD_ATTR_GETAFFINITY_NP
#endif
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getpshared);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(type, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_gettype);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(protocol, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprotocol);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(prioceiling, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprioceiling);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust_np, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust_np);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getpshared);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(kind_np, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getkind_np);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_CONDATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getpshared);
+#else
+#define INIT_PTHREAD_CONDATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+INTERCEPTOR_PTHREAD_CONDATTR_GET(clock, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETCLOCK \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getclock);
+#else
+#define INIT_PTHREAD_CONDATTR_GETCLOCK
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_BARRIERATTR_GET(pshared, sizeof(int)) // !mac !android
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_barrierattr_getpshared);
+#else
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED
+#endif
+
#if SANITIZER_INTERCEPT_TMPNAM
INTERCEPTOR(char *, tmpnam, char *s) {
void *ctx;
@@ -2642,9 +3847,12 @@ INTERCEPTOR(char *, tmpnam, char *s) {
char *res = REAL(tmpnam)(s);
if (res) {
if (s)
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
else
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
}
return res;
}
@@ -2657,6 +3865,9 @@ INTERCEPTOR(char *, tmpnam, char *s) {
INTERCEPTOR(char *, tmpnam_r, char *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(tmpnam_r)(s);
if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
return res;
@@ -2673,7 +3884,7 @@ INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
char *res = REAL(tempnam)(dir, pfx);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
@@ -2685,6 +3896,7 @@ INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name);
return REAL(pthread_setname_np)(thread, name);
}
@@ -2697,6 +3909,9 @@ INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) {
INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincos)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -2704,6 +3919,9 @@ INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincosf)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -2711,6 +3929,9 @@ INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincosl)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -2727,6 +3948,9 @@ INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
INTERCEPTOR(double, remquo, double x, double y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(remquo)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -2734,6 +3958,9 @@ INTERCEPTOR(double, remquo, double x, double y, int *quo) {
INTERCEPTOR(float, remquof, float x, float y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(remquof)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -2741,6 +3968,9 @@ INTERCEPTOR(float, remquof, float x, float y, int *quo) {
INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(remquol)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -2788,6 +4018,9 @@ INTERCEPTOR(long double, lgammal, long double x) {
INTERCEPTOR(double, lgamma_r, double x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(lgamma_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
@@ -2795,29 +4028,43 @@ INTERCEPTOR(double, lgamma_r, double x, int *signp) {
INTERCEPTOR(float, lgammaf_r, float x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(lgammaf_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
}
+#define INIT_LGAMMA_R \
+ COMMON_INTERCEPT_FUNCTION(lgamma_r); \
+ COMMON_INTERCEPT_FUNCTION(lgammaf_r);
+#else
+#define INIT_LGAMMA_R
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMAL_R
INTERCEPTOR(long double, lgammal_r, long double x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(lgammal_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
}
-#define INIT_LGAMMA_R \
- COMMON_INTERCEPT_FUNCTION(lgamma_r); \
- COMMON_INTERCEPT_FUNCTION(lgammaf_r); \
- COMMON_INTERCEPT_FUNCTION(lgammal_r);
+#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION(lgammal_r);
#else
-#define INIT_LGAMMA_R
+#define INIT_LGAMMAL_R
#endif
#if SANITIZER_INTERCEPT_DRAND48_R
INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(drand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
@@ -2825,6 +4072,9 @@ INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(lrand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
@@ -2836,10 +4086,25 @@ INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
#define INIT_DRAND48_R
#endif
+#if SANITIZER_INTERCEPT_RAND_R
+INTERCEPTOR(int, rand_r, unsigned *seedp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rand_r, seedp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, seedp, sizeof(*seedp));
+ return REAL(rand_r)(seedp);
+}
+#define INIT_RAND_R COMMON_INTERCEPT_FUNCTION(rand_r);
+#else
+#define INIT_RAND_R
+#endif
+
#if SANITIZER_INTERCEPT_GETLINE
INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(getline)(lineptr, n, stream);
if (res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
@@ -2848,20 +4113,36 @@ INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
}
return res;
}
-INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
- void *stream) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, getdelim, lineptr, n, delim, stream);
- SSIZE_T res = REAL(getdelim)(lineptr, n, delim, stream);
- if (res > 0) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1);
+
+// FIXME: under ASan the call below may write to freed memory and corrupt its
+// metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define GETDELIM_INTERCEPTOR_IMPL(vname) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, lineptr, n, delim, stream); \
+ SSIZE_T res = REAL(vname)(lineptr, n, delim, stream); \
+ if (res > 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr)); \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1); \
+ } \
+ return res; \
}
- return res;
-}
-#define INIT_GETLINE \
- COMMON_INTERCEPT_FUNCTION(getline); \
+
+INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream)
+GETDELIM_INTERCEPTOR_IMPL(__getdelim)
+
+// There's no __getdelim() on FreeBSD so we supply the getdelim() interceptor
+// with its own body.
+INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream)
+GETDELIM_INTERCEPTOR_IMPL(getdelim)
+
+#define INIT_GETLINE \
+ COMMON_INTERCEPT_FUNCTION(getline); \
+ COMMON_INTERCEPT_FUNCTION(__getdelim); \
COMMON_INTERCEPT_FUNCTION(getdelim);
#else
#define INIT_GETLINE
@@ -2879,7 +4160,10 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
COMMON_INTERCEPTOR_READ_RANGE(ctx, *inbuf, *inbytesleft);
if (outbytesleft)
COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft));
- void *outbuf_orig = outbuf ? *outbuf : 0;
+ void *outbuf_orig = outbuf ? *outbuf : nullptr;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
if (res != (SIZE_T) - 1 && outbuf && *outbuf > outbuf_orig) {
SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
@@ -2896,6 +4180,9 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, times, tms);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_clock_t res = REAL(times)(tms);
if (res != (__sanitizer_clock_t)-1 && tms)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz);
@@ -2906,117 +4193,1267 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
#define INIT_TIMES
#endif
-#define SANITIZER_COMMON_INTERCEPTORS_INIT \
- INIT_TEXTDOMAIN; \
- INIT_STRCMP; \
- INIT_STRNCMP; \
- INIT_STRCASECMP; \
- INIT_STRNCASECMP; \
- INIT_READ; \
- INIT_PREAD; \
- INIT_PREAD64; \
- INIT_READV; \
- INIT_PREADV; \
- INIT_PREADV64; \
- INIT_WRITE; \
- INIT_PWRITE; \
- INIT_PWRITE64; \
- INIT_WRITEV; \
- INIT_PWRITEV; \
- INIT_PWRITEV64; \
- INIT_PRCTL; \
- INIT_LOCALTIME_AND_FRIENDS; \
- INIT_STRPTIME; \
- INIT_SCANF; \
- INIT_ISOC99_SCANF; \
- INIT_FREXP; \
- INIT_FREXPF_FREXPL; \
- INIT_GETPWNAM_AND_FRIENDS; \
- INIT_GETPWNAM_R_AND_FRIENDS; \
- INIT_CLOCK_GETTIME; \
- INIT_GETITIMER; \
- INIT_TIME; \
- INIT_GLOB; \
- INIT_WAIT; \
- INIT_INET; \
- INIT_PTHREAD_GETSCHEDPARAM; \
- INIT_GETADDRINFO; \
- INIT_GETNAMEINFO; \
- INIT_GETSOCKNAME; \
- INIT_GETHOSTBYNAME; \
- INIT_GETHOSTBYNAME_R; \
- INIT_GETSOCKOPT; \
- INIT_ACCEPT; \
- INIT_ACCEPT4; \
- INIT_MODF; \
- INIT_RECVMSG; \
- INIT_GETPEERNAME; \
- INIT_IOCTL; \
- INIT_INET_ATON; \
- INIT_SYSINFO; \
- INIT_READDIR; \
- INIT_READDIR64; \
- INIT_PTRACE; \
- INIT_SETLOCALE; \
- INIT_GETCWD; \
- INIT_GET_CURRENT_DIR_NAME; \
- INIT_STRTOIMAX; \
- INIT_MBSTOWCS; \
- INIT_MBSNRTOWCS; \
- INIT_WCSTOMBS; \
- INIT_WCSNRTOMBS; \
- INIT_TCGETATTR; \
- INIT_REALPATH; \
- INIT_CANONICALIZE_FILE_NAME; \
- INIT_CONFSTR; \
- INIT_SCHED_GETAFFINITY; \
- INIT_STRERROR; \
- INIT_STRERROR_R; \
- INIT_XPG_STRERROR_R; \
- INIT_SCANDIR; \
- INIT_SCANDIR64; \
- INIT_GETGROUPS; \
- INIT_POLL; \
- INIT_PPOLL; \
- INIT_WORDEXP; \
- INIT_SIGWAIT; \
- INIT_SIGWAITINFO; \
- INIT_SIGTIMEDWAIT; \
- INIT_SIGSETOPS; \
- INIT_SIGPENDING; \
- INIT_SIGPROCMASK; \
- INIT_BACKTRACE; \
- INIT__EXIT; \
- INIT_PTHREAD_MUTEX_LOCK; \
- INIT_PTHREAD_MUTEX_UNLOCK; \
- INIT_PTHREAD_COND_WAIT; \
- INIT_PTHREAD_COND_INIT; \
- INIT_PTHREAD_COND_SIGNAL; \
- INIT_PTHREAD_COND_BROADCAST; \
- INIT_GETMNTENT; \
- INIT_GETMNTENT_R; \
- INIT_STATFS; \
- INIT_STATFS64; \
- INIT_STATVFS; \
- INIT_STATVFS64; \
- INIT_INITGROUPS; \
- INIT_ETHER; \
- INIT_ETHER_R; \
- INIT_SHMCTL; \
- INIT_RANDOM_R; \
- INIT_PTHREAD_ATTR_GET; \
- INIT_PTHREAD_ATTR_GETINHERITSCHED; \
- INIT_PTHREAD_ATTR_GETAFFINITY_NP; \
- INIT_TMPNAM; \
- INIT_TMPNAM_R; \
- INIT_TEMPNAM; \
- INIT_PTHREAD_SETNAME_NP; \
- INIT_SINCOS; \
- INIT_REMQUO; \
- INIT_LGAMMA; \
- INIT_LGAMMA_R; \
- INIT_DRAND48_R; \
- INIT_GETLINE; \
- INIT_ICONV; \
- INIT_TIMES; \
-/**/
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
+// If you see any crashes around this functions, there are 2 known issues with
+// it: 1. __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// 2. It can be called recursively if sanitizer code uses __tls_get_addr
+// to access thread local variables (it should not happen normally,
+// because sanitizers use initial-exec tls model).
+INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
+ void *res = REAL(__tls_get_addr)(arg);
+ uptr tls_begin, tls_end;
+ COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+#else
+#define INIT_TLS_GET_ADDR
+#endif
+
+#if SANITIZER_INTERCEPT_LISTXATTR
+INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(listxattr)(path, list, size);
+ // Here and below, size == 0 is a special case where nothing is written to the
+ // buffer, and res contains the desired buffer size.
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(llistxattr)(path, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(flistxattr)(fd, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+#define INIT_LISTXATTR \
+ COMMON_INTERCEPT_FUNCTION(listxattr); \
+ COMMON_INTERCEPT_FUNCTION(llistxattr); \
+ COMMON_INTERCEPT_FUNCTION(flistxattr);
+#else
+#define INIT_LISTXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETXATTR
+INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(getxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(lgetxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+#define INIT_GETXATTR \
+ COMMON_INTERCEPT_FUNCTION(getxattr); \
+ COMMON_INTERCEPT_FUNCTION(lgetxattr); \
+ COMMON_INTERCEPT_FUNCTION(fgetxattr);
+#else
+#define INIT_GETXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETRESID
+INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getresuid)(ruid, euid, suid);
+ if (res >= 0) {
+ if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);
+ if (euid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, euid, uid_t_sz);
+ if (suid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, suid, uid_t_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getresgid)(rgid, egid, sgid);
+ if (res >= 0) {
+ if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);
+ if (egid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, egid, gid_t_sz);
+ if (sgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sgid, gid_t_sz);
+ }
+ return res;
+}
+#define INIT_GETRESID \
+ COMMON_INTERCEPT_FUNCTION(getresuid); \
+ COMMON_INTERCEPT_FUNCTION(getresgid);
+#else
+#define INIT_GETRESID
+#endif
+
+#if SANITIZER_INTERCEPT_GETIFADDRS
+// As long as getifaddrs()/freeifaddrs() use calloc()/free(), we don't need to
+// intercept freeifaddrs(). If that ceases to be the case, we might need to
+// intercept it to poison the memory again.
+INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(getifaddrs)(ifap);
+ if (res == 0 && ifap) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));
+ __sanitizer_ifaddrs *p = *ifap;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
+ if (p->ifa_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
+ REAL(strlen)(p->ifa_name) + 1);
+ if (p->ifa_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
+ if (p->ifa_netmask)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_netmask, struct_sockaddr_sz);
+ // On Linux this is a union, but the other member also points to a
+ // struct sockaddr, so the following is sufficient.
+ if (p->ifa_dstaddr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_dstaddr, struct_sockaddr_sz);
+ // FIXME(smatveev): Unpoison p->ifa_data as well.
+ p = p->ifa_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETIFADDRS \
+ COMMON_INTERCEPT_FUNCTION(getifaddrs);
+#else
+#define INIT_GETIFADDRS
+#endif
+
+#if SANITIZER_INTERCEPT_IF_INDEXTONAME
+INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ char *res = REAL(if_indextoname)(ifindex, ifname);
+ if (res && ifname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return res;
+}
+INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
+ if (ifname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return REAL(if_nametoindex)(ifname);
+}
+#define INIT_IF_INDEXTONAME \
+ COMMON_INTERCEPT_FUNCTION(if_indextoname); \
+ COMMON_INTERCEPT_FUNCTION(if_nametoindex);
+#else
+#define INIT_IF_INDEXTONAME
+#endif
+
+#if SANITIZER_INTERCEPT_CAPGET
+INTERCEPTOR(int, capget, void *hdrp, void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(capget)(hdrp, datap);
+ if (res == 0 && datap)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ // We can also return -1 and write to hdrp->version if the version passed in
+ // hdrp->version is unsupported. But that's not a trivial condition to check,
+ // and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
+ return res;
+}
+INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ if (datap)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ return REAL(capset)(hdrp, datap);
+}
+#define INIT_CAPGET \
+ COMMON_INTERCEPT_FUNCTION(capget); \
+ COMMON_INTERCEPT_FUNCTION(capset);
+#else
+#define INIT_CAPGET
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr)
+DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr)
+DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr)
+
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ return WRAP(memmove)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ return WRAP(memcpy)(to, from, size);
+}
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ return WRAP(memset)(block, c, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
+
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ return WRAP(memset)(block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_FTIME
+INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(ftime)(tp);
+ if (tp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));
+ return res;
+}
+#define INIT_FTIME COMMON_INTERCEPT_FUNCTION(ftime);
+#else
+#define INIT_FTIME
+#endif // SANITIZER_INTERCEPT_FTIME
+
+#if SANITIZER_INTERCEPT_XDR
+INTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr,
+ unsigned size, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ REAL(xdrmem_create)(xdrs, addr, size, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+ if (op == __sanitizer_XDR_ENCODE) {
+ // It's not obvious how much data individual xdr_ routines write.
+ // Simply unpoison the entire target buffer in advance.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (void *)addr, size);
+ }
+}
+
+INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ REAL(xdrstdio_create)(xdrs, file, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+}
+
+// FIXME: under ASan the call below may write to freed memory and corrupt
+// its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#define XDR_INTERCEPTOR(F, T) \
+ INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, F, xdrs, p); \
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); \
+ int res = REAL(F)(xdrs, p); \
+ if (res && p && xdrs->x_op == __sanitizer_XDR_DECODE) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); \
+ return res; \
+ }
+
+XDR_INTERCEPTOR(xdr_short, short)
+XDR_INTERCEPTOR(xdr_u_short, unsigned short)
+XDR_INTERCEPTOR(xdr_int, int)
+XDR_INTERCEPTOR(xdr_u_int, unsigned)
+XDR_INTERCEPTOR(xdr_long, long)
+XDR_INTERCEPTOR(xdr_u_long, unsigned long)
+XDR_INTERCEPTOR(xdr_hyper, long long)
+XDR_INTERCEPTOR(xdr_u_hyper, unsigned long long)
+XDR_INTERCEPTOR(xdr_longlong_t, long long)
+XDR_INTERCEPTOR(xdr_u_longlong_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_int8_t, u8)
+XDR_INTERCEPTOR(xdr_uint8_t, u8)
+XDR_INTERCEPTOR(xdr_int16_t, u16)
+XDR_INTERCEPTOR(xdr_uint16_t, u16)
+XDR_INTERCEPTOR(xdr_int32_t, u32)
+XDR_INTERCEPTOR(xdr_uint32_t, u32)
+XDR_INTERCEPTOR(xdr_int64_t, u64)
+XDR_INTERCEPTOR(xdr_uint64_t, u64)
+XDR_INTERCEPTOR(xdr_quad_t, long long)
+XDR_INTERCEPTOR(xdr_u_quad_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_bool, bool)
+XDR_INTERCEPTOR(xdr_enum, int)
+XDR_INTERCEPTOR(xdr_char, char)
+XDR_INTERCEPTOR(xdr_u_char, unsigned char)
+XDR_INTERCEPTOR(xdr_float, float)
+XDR_INTERCEPTOR(xdr_double, double)
+
+// FIXME: intercept xdr_array, opaque, union, vector, reference, pointer,
+// wrapstring, sizeof
+
+INTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_bytes, xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizep, sizeof(*sizep));
+ if (res && *p && *sizep) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, *sizep);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(xdr_string)(xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (res && *p)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ return res;
+}
+
+#define INIT_XDR \
+ COMMON_INTERCEPT_FUNCTION(xdrmem_create); \
+ COMMON_INTERCEPT_FUNCTION(xdrstdio_create); \
+ COMMON_INTERCEPT_FUNCTION(xdr_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bool); \
+ COMMON_INTERCEPT_FUNCTION(xdr_enum); \
+ COMMON_INTERCEPT_FUNCTION(xdr_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_float); \
+ COMMON_INTERCEPT_FUNCTION(xdr_double); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bytes); \
+ COMMON_INTERCEPT_FUNCTION(xdr_string);
+#else
+#define INIT_XDR
+#endif // SANITIZER_INTERCEPT_XDR
+
+#if SANITIZER_INTERCEPT_TSEARCH
+INTERCEPTOR(void *, tsearch, void *key, void **rootp,
+ int (*compar)(const void *, const void *)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ void *res = REAL(tsearch)(key, rootp, compar);
+ if (res && *(void **)res == key)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));
+ return res;
+}
+#define INIT_TSEARCH COMMON_INTERCEPT_FUNCTION(tsearch);
+#else
+#define INIT_TSEARCH
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS || SANITIZER_INTERCEPT_FOPEN || \
+ SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+void unpoison_file(__sanitizer_FILE *fp) {
+#if SANITIZER_HAS_STRUCT_FILE
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp, sizeof(*fp));
+ if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
+ fp->_IO_read_end - fp->_IO_read_base);
+#endif // SANITIZER_HAS_STRUCT_FILE
+}
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS
+// These guys are called when a .c source is built with -O2.
+INTERCEPTOR(int, __uflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __uflow, fp);
+ int res = REAL(__uflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __underflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __underflow, fp);
+ int res = REAL(__underflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __overflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __overflow, fp, ch);
+ int res = REAL(__overflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wuflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wuflow, fp);
+ int res = REAL(__wuflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wunderflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wunderflow, fp);
+ int res = REAL(__wunderflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __woverflow, fp, ch);
+ int res = REAL(__woverflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+#define INIT_LIBIO_INTERNALS \
+ COMMON_INTERCEPT_FUNCTION(__uflow); \
+ COMMON_INTERCEPT_FUNCTION(__underflow); \
+ COMMON_INTERCEPT_FUNCTION(__overflow); \
+ COMMON_INTERCEPT_FUNCTION(__wuflow); \
+ COMMON_INTERCEPT_FUNCTION(__wunderflow); \
+ COMMON_INTERCEPT_FUNCTION(__woverflow);
+#else
+#define INIT_LIBIO_INTERNALS
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN
+INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fdopen)(fd, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN \
+ COMMON_INTERCEPT_FUNCTION(fopen); \
+ COMMON_INTERCEPT_FUNCTION(fdopen); \
+ COMMON_INTERCEPT_FUNCTION(freopen);
+#else
+#define INIT_FOPEN
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN64
+INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen64)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN64 \
+ COMMON_INTERCEPT_FUNCTION(fopen64); \
+ COMMON_INTERCEPT_FUNCTION(freopen64);
+#else
+#define INIT_FOPEN64
+#endif
+
+#if SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ __sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, open_wmemstream, wchar_t **ptr,
+ SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_wmemstream, ptr, sizeloc);
+ __sanitizer_FILE *res = REAL(open_wmemstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {(char **)ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size,
+ const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ __sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_OPEN_MEMSTREAM \
+ COMMON_INTERCEPT_FUNCTION(open_memstream); \
+ COMMON_INTERCEPT_FUNCTION(open_wmemstream); \
+ COMMON_INTERCEPT_FUNCTION(fmemopen);
+#else
+#define INIT_OPEN_MEMSTREAM
+#endif
+
+#if SANITIZER_INTERCEPT_OBSTACK
+static void initialize_obstack(__sanitizer_obstack *obstack) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack, sizeof(*obstack));
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack->chunk,
+ sizeof(*obstack->chunk));
+}
+
+INTERCEPTOR(int, _obstack_begin_1, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr arg, uptr sz),
+ void (*free_fn)(uptr arg, void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin_1, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin_1)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(int, _obstack_begin, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr sz), void (*free_fn)(void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_newchunk, obstack, length);
+ REAL(_obstack_newchunk)(obstack, length);
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(
+ obstack->chunk, obstack->next_free - (char *)obstack->chunk);
+}
+#define INIT_OBSTACK \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin_1); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_newchunk);
+#else
+#define INIT_OBSTACK
+#endif
+
+#if SANITIZER_INTERCEPT_FFLUSH
+INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
+ int res = REAL(fflush)(fp);
+ // FIXME: handle fp == NULL
+ if (fp) {
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (m) COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ }
+ return res;
+}
+#define INIT_FFLUSH COMMON_INTERCEPT_FUNCTION(fflush);
+#else
+#define INIT_FFLUSH
+#endif
+
+#if SANITIZER_INTERCEPT_FCLOSE
+INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ int res = REAL(fclose)(fp);
+ if (m) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ DeleteInterceptorMetadata(fp);
+ }
+ return res;
+}
+#define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose);
+#else
+#define INIT_FCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
+ if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
+ COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
+ void *res = REAL(dlopen)(filename, flag);
+ COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
+ return res;
+}
+
+INTERCEPTOR(int, dlclose, void *handle) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle);
+ int res = REAL(dlclose)(handle);
+ COMMON_INTERCEPTOR_LIBRARY_UNLOADED();
+ return res;
+}
+#define INIT_DLOPEN_DLCLOSE \
+ COMMON_INTERCEPT_FUNCTION(dlopen); \
+ COMMON_INTERCEPT_FUNCTION(dlclose);
+#else
+#define INIT_DLOPEN_DLCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_GETPASS
+INTERCEPTOR(char *, getpass, const char *prompt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
+ if (prompt)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+ char *res = REAL(getpass)(prompt);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+ return res;
+}
+
+#define INIT_GETPASS COMMON_INTERCEPT_FUNCTION(getpass);
+#else
+#define INIT_GETPASS
+#endif
+
+#if SANITIZER_INTERCEPT_TIMERFD
+INTERCEPTOR(int, timerfd_settime, int fd, int flags, void *new_value,
+ void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_settime, fd, flags, new_value,
+ old_value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerspec_sz);
+ int res = REAL(timerfd_settime)(fd, flags, new_value, old_value);
+ if (res != -1 && old_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerspec_sz);
+ return res;
+}
+
+INTERCEPTOR(int, timerfd_gettime, int fd, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_gettime, fd, curr_value);
+ int res = REAL(timerfd_gettime)(fd, curr_value);
+ if (res != -1 && curr_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerspec_sz);
+ return res;
+}
+#define INIT_TIMERFD \
+ COMMON_INTERCEPT_FUNCTION(timerfd_settime); \
+ COMMON_INTERCEPT_FUNCTION(timerfd_gettime);
+#else
+#define INIT_TIMERFD
+#endif
+
+#if SANITIZER_INTERCEPT_MLOCKX
+// Linux kernel has a bug that leads to kernel deadlock if a process
+// maps TBs of memory and then calls mlock().
+static void MlockIsUnsupported() {
+ static atomic_uint8_t printed;
+ if (atomic_exchange(&printed, 1, memory_order_relaxed))
+ return;
+ VPrintf(1, "%s ignores mlock/mlockall/munlock/munlockall\n",
+ SanitizerToolName);
+}
+
+INTERCEPTOR(int, mlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, mlockall, int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlockall, void) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+#define INIT_MLOCKX \
+ COMMON_INTERCEPT_FUNCTION(mlock); \
+ COMMON_INTERCEPT_FUNCTION(munlock); \
+ COMMON_INTERCEPT_FUNCTION(mlockall); \
+ COMMON_INTERCEPT_FUNCTION(munlockall);
+
+#else
+#define INIT_MLOCKX
+#endif // SANITIZER_INTERCEPT_MLOCKX
+
+#if SANITIZER_INTERCEPT_FOPENCOOKIE
+struct WrappedCookie {
+ void *real_cookie;
+ __sanitizer_cookie_io_functions_t real_io_funcs;
+};
+
+static uptr wrapped_read(void *cookie, char *buf, uptr size) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_read real_read = wrapped_cookie->real_io_funcs.read;
+ return real_read ? real_read(wrapped_cookie->real_cookie, buf, size) : 0;
+}
+
+static uptr wrapped_write(void *cookie, const char *buf, uptr size) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_write real_write = wrapped_cookie->real_io_funcs.write;
+ return real_write ? real_write(wrapped_cookie->real_cookie, buf, size) : size;
+}
+
+static int wrapped_seek(void *cookie, u64 *offset, int whence) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(offset, sizeof(*offset));
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_seek real_seek = wrapped_cookie->real_io_funcs.seek;
+ return real_seek ? real_seek(wrapped_cookie->real_cookie, offset, whence)
+ : -1;
+}
+
+static int wrapped_close(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_close real_close = wrapped_cookie->real_io_funcs.close;
+ int res = real_close ? real_close(wrapped_cookie->real_cookie) : 0;
+ InternalFree(wrapped_cookie);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_FILE *, fopencookie, void *cookie, const char *mode,
+ __sanitizer_cookie_io_functions_t io_funcs) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopencookie, cookie, mode, io_funcs);
+ WrappedCookie *wrapped_cookie =
+ (WrappedCookie *)InternalAlloc(sizeof(WrappedCookie));
+ wrapped_cookie->real_cookie = cookie;
+ wrapped_cookie->real_io_funcs = io_funcs;
+ __sanitizer_FILE *res =
+ REAL(fopencookie)(wrapped_cookie, mode, {wrapped_read, wrapped_write,
+ wrapped_seek, wrapped_close});
+ return res;
+}
+
+#define INIT_FOPENCOOKIE COMMON_INTERCEPT_FUNCTION(fopencookie);
+#else
+#define INIT_FOPENCOOKIE
+#endif // SANITIZER_INTERCEPT_FOPENCOOKIE
+
+#if SANITIZER_INTERCEPT_SEM
+INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);
+ // Workaround a bug in glibc's "old" semaphore implementation by
+ // zero-initializing the sem_t contents. This has to be done here because
+ // interceptors bind to the lowest symbols version by default, hitting the
+ // buggy code path while the non-sanitized build of the same code works fine.
+ REAL(memset)(s, 0, sizeof(*s));
+ int res = REAL(sem_init)(s, pshared, value);
+ return res;
+}
+
+INTERCEPTOR(int, sem_destroy, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_destroy, s);
+ int res = REAL(sem_destroy)(s);
+ return res;
+}
+
+INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_wait, s);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_wait)(s);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_timedwait, __sanitizer_sem_t *s, void *abstime) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_timedwait, s, abstime);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, abstime, struct_timespec_sz);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_timedwait)(s, abstime);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_post, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_post, s);
+ COMMON_INTERCEPTOR_RELEASE(ctx, (uptr)s);
+ int res = REAL(sem_post)(s);
+ return res;
+}
+
+INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_getvalue, s, sval);
+ int res = REAL(sem_getvalue)(s, sval);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sval, sizeof(*sval));
+ }
+ return res;
+}
+#define INIT_SEM \
+ COMMON_INTERCEPT_FUNCTION(sem_init); \
+ COMMON_INTERCEPT_FUNCTION(sem_destroy); \
+ COMMON_INTERCEPT_FUNCTION(sem_wait); \
+ COMMON_INTERCEPT_FUNCTION(sem_trywait); \
+ COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
+ COMMON_INTERCEPT_FUNCTION(sem_post); \
+ COMMON_INTERCEPT_FUNCTION(sem_getvalue);
+#else
+#define INIT_SEM
+#endif // SANITIZER_INTERCEPT_SEM
+
+#if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
+INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcancelstate, state, oldstate);
+ int res = REAL(pthread_setcancelstate)(state, oldstate);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldstate, sizeof(*oldstate));
+ return res;
+}
+
+INTERCEPTOR(int, pthread_setcanceltype, int type, int *oldtype) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcanceltype, type, oldtype);
+ int res = REAL(pthread_setcanceltype)(type, oldtype);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldtype, sizeof(*oldtype));
+ return res;
+}
+#define INIT_PTHREAD_SETCANCEL \
+ COMMON_INTERCEPT_FUNCTION(pthread_setcancelstate); \
+ COMMON_INTERCEPT_FUNCTION(pthread_setcanceltype);
+#else
+#define INIT_PTHREAD_SETCANCEL
+#endif
+
+#if SANITIZER_INTERCEPT_MINCORE
+INTERCEPTOR(int, mincore, void *addr, uptr length, unsigned char *vec) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mincore, addr, length, vec);
+ int res = REAL(mincore)(addr, length, vec);
+ if (res == 0) {
+ uptr page_size = GetPageSizeCached();
+ uptr vec_size = ((length + page_size - 1) & (~(page_size - 1))) / page_size;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, vec, vec_size);
+ }
+ return res;
+}
+#define INIT_MINCORE COMMON_INTERCEPT_FUNCTION(mincore);
+#else
+#define INIT_MINCORE
+#endif
+
+#if SANITIZER_INTERCEPT_PROCESS_VM_READV
+INTERCEPTOR(SSIZE_T, process_vm_readv, int pid, __sanitizer_iovec *local_iov,
+ uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,
+ uptr flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, process_vm_readv, pid, local_iov, liovcnt,
+ remote_iov, riovcnt, flags);
+ SSIZE_T res = REAL(process_vm_readv)(pid, local_iov, liovcnt, remote_iov,
+ riovcnt, flags);
+ if (res > 0)
+ write_iovec(ctx, local_iov, liovcnt, res);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, process_vm_writev, int pid, __sanitizer_iovec *local_iov,
+ uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,
+ uptr flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, process_vm_writev, pid, local_iov, liovcnt,
+ remote_iov, riovcnt, flags);
+ SSIZE_T res = REAL(process_vm_writev)(pid, local_iov, liovcnt, remote_iov,
+ riovcnt, flags);
+ if (res > 0)
+ read_iovec(ctx, local_iov, liovcnt, res);
+ return res;
+}
+#define INIT_PROCESS_VM_READV \
+ COMMON_INTERCEPT_FUNCTION(process_vm_readv); \
+ COMMON_INTERCEPT_FUNCTION(process_vm_writev);
+#else
+#define INIT_PROCESS_VM_READV
+#endif
+
+static void InitializeCommonInterceptors() {
+ static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
+ interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
+
+ INIT_TEXTDOMAIN;
+ INIT_STRCMP;
+ INIT_STRNCMP;
+ INIT_STRCASECMP;
+ INIT_STRNCASECMP;
+ INIT_STRSTR;
+ INIT_STRCASESTR;
+ INIT_STRSPN;
+ INIT_STRPBRK;
+ INIT_MEMCHR;
+ INIT_MEMCMP;
+ INIT_MEMRCHR;
+ INIT_READ;
+ INIT_PREAD;
+ INIT_PREAD64;
+ INIT_READV;
+ INIT_PREADV;
+ INIT_PREADV64;
+ INIT_WRITE;
+ INIT_PWRITE;
+ INIT_PWRITE64;
+ INIT_WRITEV;
+ INIT_PWRITEV;
+ INIT_PWRITEV64;
+ INIT_PRCTL;
+ INIT_LOCALTIME_AND_FRIENDS;
+ INIT_STRPTIME;
+ INIT_SCANF;
+ INIT_ISOC99_SCANF;
+ INIT_PRINTF;
+ INIT_PRINTF_L;
+ INIT_ISOC99_PRINTF;
+ INIT_FREXP;
+ INIT_FREXPF_FREXPL;
+ INIT_GETPWNAM_AND_FRIENDS;
+ INIT_GETPWNAM_R_AND_FRIENDS;
+ INIT_GETPWENT;
+ INIT_FGETPWENT;
+ INIT_GETPWENT_R;
+ INIT_SETPWENT;
+ INIT_CLOCK_GETTIME;
+ INIT_GETITIMER;
+ INIT_TIME;
+ INIT_GLOB;
+ INIT_WAIT;
+ INIT_WAIT4;
+ INIT_INET;
+ INIT_PTHREAD_GETSCHEDPARAM;
+ INIT_GETADDRINFO;
+ INIT_GETNAMEINFO;
+ INIT_GETSOCKNAME;
+ INIT_GETHOSTBYNAME;
+ INIT_GETHOSTBYNAME_R;
+ INIT_GETHOSTBYNAME2_R;
+ INIT_GETHOSTBYADDR_R;
+ INIT_GETHOSTENT_R;
+ INIT_GETSOCKOPT;
+ INIT_ACCEPT;
+ INIT_ACCEPT4;
+ INIT_MODF;
+ INIT_RECVMSG;
+ INIT_GETPEERNAME;
+ INIT_IOCTL;
+ INIT_INET_ATON;
+ INIT_SYSINFO;
+ INIT_READDIR;
+ INIT_READDIR64;
+ INIT_PTRACE;
+ INIT_SETLOCALE;
+ INIT_GETCWD;
+ INIT_GET_CURRENT_DIR_NAME;
+ INIT_STRTOIMAX;
+ INIT_MBSTOWCS;
+ INIT_MBSNRTOWCS;
+ INIT_WCSTOMBS;
+ INIT_WCSNRTOMBS;
+ INIT_WCRTOMB;
+ INIT_TCGETATTR;
+ INIT_REALPATH;
+ INIT_CANONICALIZE_FILE_NAME;
+ INIT_CONFSTR;
+ INIT_SCHED_GETAFFINITY;
+ INIT_SCHED_GETPARAM;
+ INIT_STRERROR;
+ INIT_STRERROR_R;
+ INIT_XPG_STRERROR_R;
+ INIT_SCANDIR;
+ INIT_SCANDIR64;
+ INIT_GETGROUPS;
+ INIT_POLL;
+ INIT_PPOLL;
+ INIT_WORDEXP;
+ INIT_SIGWAIT;
+ INIT_SIGWAITINFO;
+ INIT_SIGTIMEDWAIT;
+ INIT_SIGSETOPS;
+ INIT_SIGPENDING;
+ INIT_SIGPROCMASK;
+ INIT_BACKTRACE;
+ INIT__EXIT;
+ INIT_PTHREAD_MUTEX_LOCK;
+ INIT_PTHREAD_MUTEX_UNLOCK;
+ INIT_GETMNTENT;
+ INIT_GETMNTENT_R;
+ INIT_STATFS;
+ INIT_STATFS64;
+ INIT_STATVFS;
+ INIT_STATVFS64;
+ INIT_INITGROUPS;
+ INIT_ETHER_NTOA_ATON;
+ INIT_ETHER_HOST;
+ INIT_ETHER_R;
+ INIT_SHMCTL;
+ INIT_RANDOM_R;
+ INIT_PTHREAD_ATTR_GET;
+ INIT_PTHREAD_ATTR_GETINHERITSCHED;
+ INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_PTHREAD_MUTEXATTR_GETPSHARED;
+ INIT_PTHREAD_MUTEXATTR_GETTYPE;
+ INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;
+ INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST_NP;
+ INIT_PTHREAD_RWLOCKATTR_GETPSHARED;
+ INIT_PTHREAD_RWLOCKATTR_GETKIND_NP;
+ INIT_PTHREAD_CONDATTR_GETPSHARED;
+ INIT_PTHREAD_CONDATTR_GETCLOCK;
+ INIT_PTHREAD_BARRIERATTR_GETPSHARED;
+ INIT_TMPNAM;
+ INIT_TMPNAM_R;
+ INIT_TEMPNAM;
+ INIT_PTHREAD_SETNAME_NP;
+ INIT_SINCOS;
+ INIT_REMQUO;
+ INIT_LGAMMA;
+ INIT_LGAMMA_R;
+ INIT_LGAMMAL_R;
+ INIT_DRAND48_R;
+ INIT_RAND_R;
+ INIT_GETLINE;
+ INIT_ICONV;
+ INIT_TIMES;
+ INIT_TLS_GET_ADDR;
+ INIT_LISTXATTR;
+ INIT_GETXATTR;
+ INIT_GETRESID;
+ INIT_GETIFADDRS;
+ INIT_IF_INDEXTONAME;
+ INIT_CAPGET;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_FTIME;
+ INIT_XDR;
+ INIT_TSEARCH;
+ INIT_LIBIO_INTERNALS;
+ INIT_FOPEN;
+ INIT_FOPEN64;
+ INIT_OPEN_MEMSTREAM;
+ INIT_OBSTACK;
+ INIT_FFLUSH;
+ INIT_FCLOSE;
+ INIT_DLOPEN_DLCLOSE;
+ INIT_GETPASS;
+ INIT_TIMERFD;
+ INIT_MLOCKX;
+ INIT_FOPENCOOKIE;
+ INIT_SEM;
+ INIT_PTHREAD_SETCANCEL;
+ INIT_MINCORE;
+ INIT_PROCESS_VM_READV;
+}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc
new file mode 100644
index 00000000000..9133be7097d
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -0,0 +1,558 @@
+//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf/printf implementation for use in *Sanitizer interceptors.
+// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
+// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html
+// with a few common GNU extensions.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdarg.h>
+
+static const char *parse_number(const char *p, int *out) {
+ *out = internal_atoll(p);
+ while (*p >= '0' && *p <= '9')
+ ++p;
+ return p;
+}
+
+static const char *maybe_parse_param_index(const char *p, int *out) {
+ // n$
+ if (*p >= '0' && *p <= '9') {
+ int number;
+ const char *q = parse_number(p, &number);
+ CHECK(q);
+ if (*q == '$') {
+ *out = number;
+ p = q + 1;
+ }
+ }
+
+ // Otherwise, do not change p. This will be re-parsed later as the field
+ // width.
+ return p;
+}
+
+static bool char_is_one_of(char c, const char *s) {
+ return !!internal_strchr(s, c);
+}
+
+static const char *maybe_parse_length_modifier(const char *p, char ll[2]) {
+ if (char_is_one_of(*p, "jztLq")) {
+ ll[0] = *p;
+ ++p;
+ } else if (*p == 'h') {
+ ll[0] = 'h';
+ ++p;
+ if (*p == 'h') {
+ ll[1] = 'h';
+ ++p;
+ }
+ } else if (*p == 'l') {
+ ll[0] = 'l';
+ ++p;
+ if (*p == 'l') {
+ ll[1] = 'l';
+ ++p;
+ }
+ }
+ return p;
+}
+
+// Returns true if the character is an integer conversion specifier.
+static bool format_is_integer_conv(char c) {
+ return char_is_one_of(c, "diouxXn");
+}
+
+// Returns true if the character is an floating point conversion specifier.
+static bool format_is_float_conv(char c) {
+ return char_is_one_of(c, "aAeEfFgG");
+}
+
+// Returns string output character size for string-like conversions,
+// or 0 if the conversion is invalid.
+static int format_get_char_size(char convSpecifier,
+ const char lengthModifier[2]) {
+ if (char_is_one_of(convSpecifier, "CS")) {
+ return sizeof(wchar_t);
+ }
+
+ if (char_is_one_of(convSpecifier, "cs[")) {
+ if (lengthModifier[0] == 'l' && lengthModifier[1] == '\0')
+ return sizeof(wchar_t);
+ else if (lengthModifier[0] == '\0')
+ return sizeof(char);
+ }
+
+ return 0;
+}
+
+enum FormatStoreSize {
+ // Store size not known in advance; can be calculated as wcslen() of the
+ // destination buffer.
+ FSS_WCSLEN = -2,
+ // Store size not known in advance; can be calculated as strlen() of the
+ // destination buffer.
+ FSS_STRLEN = -1,
+ // Invalid conversion specifier.
+ FSS_INVALID = 0
+};
+
+// Returns the memory size of a format directive (if >0), or a value of
+// FormatStoreSize.
+static int format_get_value_size(char convSpecifier,
+ const char lengthModifier[2],
+ bool promote_float) {
+ if (format_is_integer_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'h':
+ return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
+ case 'q':
+ return sizeof(long long);
+ case 'L':
+ return sizeof(long long);
+ case 'j':
+ return sizeof(INTMAX_T);
+ case 'z':
+ return sizeof(SIZE_T);
+ case 't':
+ return sizeof(PTRDIFF_T);
+ case 0:
+ return sizeof(int);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (format_is_float_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'L':
+ case 'q':
+ return sizeof(long double);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long double)
+ : sizeof(double);
+ case 0:
+ // Printf promotes floats to doubles but scanf does not
+ return promote_float ? sizeof(double) : sizeof(float);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (convSpecifier == 'p') {
+ if (lengthModifier[0] != 0)
+ return FSS_INVALID;
+ return sizeof(void *);
+ }
+
+ return FSS_INVALID;
+}
+
+struct ScanfDirective {
+ int argIdx; // argument index, or -1 if not specified ("%n$")
+ int fieldWidth;
+ const char *begin;
+ const char *end;
+ bool suppressed; // suppress assignment ("*")
+ bool allocate; // allocate space ("m")
+ char lengthModifier[2];
+ char convSpecifier;
+ bool maybeGnuMalloc;
+};
+
+// Parse scanf format string. If a valid directive in encountered, it is
+// returned in dir. This function returns the pointer to the first
+// unprocessed character, or 0 in case of error.
+// In case of the end-of-string, a pointer to the closing \0 is returned.
+static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
+ ScanfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return nullptr;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->argIdx);
+ CHECK(p);
+ // *
+ if (*p == '*') {
+ dir->suppressed = true;
+ ++p;
+ }
+ // Field width
+ if (*p >= '0' && *p <= '9') {
+ p = parse_number(p, &dir->fieldWidth);
+ CHECK(p);
+ if (dir->fieldWidth <= 0) // Width if at all must be non-zero
+ return nullptr;
+ }
+ // m
+ if (*p == 'm') {
+ dir->allocate = true;
+ ++p;
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ // Consume %[...] expression.
+ if (dir->convSpecifier == '[') {
+ if (*p == '^')
+ ++p;
+ if (*p == ']')
+ ++p;
+ while (*p && *p != ']')
+ ++p;
+ if (*p == 0)
+ return nullptr; // unexpected end of string
+ // Consume the closing ']'.
+ ++p;
+ }
+ // This is unfortunately ambiguous between old GNU extension
+ // of %as, %aS and %a[...] and newer POSIX %a followed by
+ // letters s, S or [.
+ if (allowGnuMalloc && dir->convSpecifier == 'a' &&
+ !dir->lengthModifier[0]) {
+ if (*p == 's' || *p == 'S') {
+ dir->maybeGnuMalloc = true;
+ ++p;
+ } else if (*p == '[') {
+ // Watch for %a[h-j%d], if % appears in the
+ // [...] range, then we need to give up, we don't know
+ // if scanf will parse it as POSIX %a [h-j %d ] or
+ // GNU allocation of string with range dh-j plus %.
+ const char *q = p + 1;
+ if (*q == '^')
+ ++q;
+ if (*q == ']')
+ ++q;
+ while (*q && *q != ']' && *q != '%')
+ ++q;
+ if (*q == 0 || *q == '%')
+ return nullptr;
+ p = q + 1; // Consume the closing ']'.
+ dir->maybeGnuMalloc = true;
+ }
+ }
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int scanf_get_value_size(ScanfDirective *dir) {
+ if (dir->allocate) {
+ if (!char_is_one_of(dir->convSpecifier, "cCsS["))
+ return FSS_INVALID;
+ return sizeof(char *);
+ }
+
+ if (dir->maybeGnuMalloc) {
+ if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
+ return FSS_INVALID;
+ // This is ambiguous, so check the smaller size of char * (if it is
+ // a GNU extension of %as, %aS or %a[...]) and float (if it is
+ // POSIX %a followed by s, S or [ letters).
+ return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS[")) {
+ bool needsTerminator = char_is_one_of(dir->convSpecifier, "sS[");
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (dir->fieldWidth == 0) {
+ if (!needsTerminator)
+ return charSize;
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return (dir->fieldWidth + needsTerminator) * charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);
+}
+
+// Common part of *scanf interceptors.
+// Process format string and va_list, and report all store ranges.
+// Stops when "consuming" n_inputs input items.
+static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
+ const char *format, va_list aq) {
+ CHECK_GT(n_inputs, 0);
+ const char *p = format;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ while (*p) {
+ ScanfDirective dir;
+ p = scanf_parse_next(p, allowGnuMalloc, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.suppressed)
+ continue;
+ int size = scanf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("WARNING: unexpected format specifier in scanf interceptor: "
+ "%.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ void *argp = va_arg(aq, void *);
+ if (dir.convSpecifier != 'n')
+ --n_inputs;
+ if (n_inputs < 0)
+ break;
+ if (size == FSS_STRLEN) {
+ size = internal_strlen((const char *)argp) + 1;
+ } else if (size == FSS_WCSLEN) {
+ // FIXME: actually use wcslen() to calculate it.
+ size = 0;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ }
+}
+
+#if SANITIZER_INTERCEPT_PRINTF
+
+struct PrintfDirective {
+ int fieldWidth;
+ int fieldPrecision;
+ int argIdx; // width argument index, or -1 if not specified ("%*n$")
+ int precisionIdx; // precision argument index, or -1 if not specified (".*n$")
+ const char *begin;
+ const char *end;
+ bool starredWidth;
+ bool starredPrecision;
+ char lengthModifier[2];
+ char convSpecifier;
+};
+
+static const char *maybe_parse_number(const char *p, int *out) {
+ if (*p >= '0' && *p <= '9')
+ p = parse_number(p, out);
+ return p;
+}
+
+static const char *maybe_parse_number_or_star(const char *p, int *out,
+ bool *star) {
+ if (*p == '*') {
+ *star = true;
+ ++p;
+ } else {
+ *star = false;
+ p = maybe_parse_number(p, out);
+ }
+ return p;
+}
+
+// Parse printf format string. Same as scanf_parse_next.
+static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+ dir->precisionIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return nullptr;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ // Flags
+ while (char_is_one_of(*p, "'-+ #0")) {
+ ++p;
+ }
+ // Field width
+ p = maybe_parse_number_or_star(p, &dir->fieldWidth,
+ &dir->starredWidth);
+ if (!p)
+ return nullptr;
+ // Precision
+ if (*p == '.') {
+ ++p;
+ // Actual precision is optional (surprise!)
+ p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
+ &dir->starredPrecision);
+ if (!p)
+ return nullptr;
+ // m$
+ if (dir->starredPrecision) {
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ }
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int printf_get_value_size(PrintfDirective *dir) {
+ if (dir->convSpecifier == 'm') {
+ return sizeof(char *);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS")) {
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (char_is_one_of(dir->convSpecifier, "sS")) {
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);
+}
+
+#define SKIP_SCALAR_ARG(aq, convSpecifier, size) \
+ do { \
+ if (format_is_float_conv(convSpecifier)) { \
+ switch (size) { \
+ case 8: \
+ va_arg(*aq, double); \
+ break; \
+ case 12: \
+ va_arg(*aq, long double); \
+ break; \
+ case 16: \
+ va_arg(*aq, long double); \
+ break; \
+ default: \
+ Report("WARNING: unexpected floating-point arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } else { \
+ switch (size) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ va_arg(*aq, u32); \
+ break; \
+ case 8: \
+ va_arg(*aq, u64); \
+ break; \
+ default: \
+ Report("WARNING: unexpected arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } \
+ } while (0)
+
+// Common part of *printf interceptors.
+// Process format string and va_list, and report all load ranges.
+static void printf_common(void *ctx, const char *format, va_list aq) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ const char *p = format;
+
+ while (*p) {
+ PrintfDirective dir;
+ p = printf_parse_next(p, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1 || dir.precisionIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.starredWidth) {
+ // Dynamic width
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ if (dir.starredPrecision) {
+ // Dynamic precision
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ int size = printf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("WARNING: unexpected format specifier in printf "
+ "interceptor: %.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ if (dir.convSpecifier == 'n') {
+ void *argp = va_arg(aq, void *);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ continue;
+ } else if (size == FSS_STRLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ if (dir.starredPrecision) {
+ // FIXME: properly support starred precision for strings.
+ size = 0;
+ } else if (dir.fieldPrecision > 0) {
+ // Won't read more than "precision" symbols.
+ size = internal_strnlen((const char *)argp, dir.fieldPrecision);
+ if (size < dir.fieldPrecision) size++;
+ } else {
+ // Whole string will be accessed.
+ size = internal_strlen((const char *)argp) + 1;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else if (size == FSS_WCSLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ // FIXME: Properly support wide-character strings (via wcsrtombs).
+ size = 0;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else {
+ // Skip non-pointer args
+ SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);
+ }
+ }
+}
+
+#endif // SANITIZER_INTERCEPT_PRINTF
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index ac8cdae5b81..6c5fda09fbf 100755
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -12,14 +12,18 @@
struct ioctl_desc {
unsigned req;
- // FIXME: support read+write arguments. Those are currently marked as WRITE.
+ // FIXME: support read+write arguments. Currently READWRITE and WRITE do the
+ // same thing.
+ // XXX: The declarations below may use WRITE instead of READWRITE, unless
+ // explicitly noted.
enum {
NONE,
READ,
WRITE,
+ READWRITE,
CUSTOM
- } type : 2;
- unsigned size : 30;
+ } type : 3;
+ unsigned size : 29;
const char* name;
};
@@ -487,11 +491,15 @@ static void ioctl_init() {
// Handle the most evil ioctls that encode argument value as part of request id.
static unsigned ioctl_request_fixup(unsigned req) {
#if SANITIZER_LINUX
- if ((req & ~0x3fff001fU) == IOCTL_EVIOCGBIT)
+ // Strip size and event number.
+ const unsigned kEviocgbitMask =
+ (IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;
+ if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)
return IOCTL_EVIOCGBIT;
- if ((req & ~0x3fU) == IOCTL_EVIOCGABS)
+ // Strip absolute axis number.
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)
return IOCTL_EVIOCGABS;
- if ((req & ~0x3fU) == IOCTL_EVIOCSABS)
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)
return IOCTL_EVIOCSABS;
#endif
return req;
@@ -510,7 +518,38 @@ static const ioctl_desc *ioctl_table_lookup(unsigned req) {
if (left == right && ioctl_table[left].req == req)
return ioctl_table + left;
else
- return 0;
+ return nullptr;
+}
+
+static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
+ CHECK(desc);
+ desc->req = req;
+ desc->name = "<DECODED_IOCTL>";
+ desc->size = IOC_SIZE(req);
+ // Sanity check.
+ if (desc->size > 0xFFFF) return false;
+ unsigned dir = IOC_DIR(req);
+ switch (dir) {
+ case IOC_NONE:
+ desc->type = ioctl_desc::NONE;
+ break;
+ case IOC_READ | IOC_WRITE:
+ desc->type = ioctl_desc::READWRITE;
+ break;
+ case IOC_READ:
+ desc->type = ioctl_desc::WRITE;
+ break;
+ case IOC_WRITE:
+ desc->type = ioctl_desc::READ;
+ break;
+ default:
+ return false;
+ }
+ // Size can be 0 iff type is NONE.
+ if ((desc->type == IOC_NONE) != (desc->size == 0)) return false;
+ // Sanity check.
+ if (IOC_TYPE(req) == 0) return false;
+ return true;
}
static const ioctl_desc *ioctl_lookup(unsigned req) {
@@ -519,48 +558,41 @@ static const ioctl_desc *ioctl_lookup(unsigned req) {
if (desc) return desc;
// Try stripping access size from the request id.
- desc = ioctl_table_lookup(req & ~0x3fff0000U);
+ desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
// Sanity check: requests that encode access size are either read or write and
// have size of 0 in the table.
if (desc && desc->size == 0 &&
- (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READ))
+ (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
+ desc->type == ioctl_desc::READ))
return desc;
- return 0;
+ return nullptr;
}
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
unsigned request, void *arg) {
- if (desc->type == ioctl_desc::READ) {
+ if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
}
if (desc->type != ioctl_desc::CUSTOM)
return;
- switch (request) {
- case 0x00008912: { // SIOCGIFCONF
- struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
- break;
- }
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
}
- return;
}
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
unsigned request, void *arg) {
- if (desc->type == ioctl_desc::WRITE) {
+ if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
// FIXME: add verbose output
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
}
if (desc->type != ioctl_desc::CUSTOM)
return;
- switch (request) {
- case 0x00008912: { // SIOCGIFCONF
- struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
- break;
- }
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
}
- return;
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
deleted file mode 100644
index 2660dada2be..00000000000
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
+++ /dev/null
@@ -1,309 +0,0 @@
-//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Scanf implementation for use in *Sanitizer interceptors.
-// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
-// with a few common GNU extensions.
-//
-//===----------------------------------------------------------------------===//
-#include <stdarg.h>
-
-struct ScanfDirective {
- int argIdx; // argument index, or -1 of not specified ("%n$")
- int fieldWidth;
- bool suppressed; // suppress assignment ("*")
- bool allocate; // allocate space ("m")
- char lengthModifier[2];
- char convSpecifier;
- bool maybeGnuMalloc;
-};
-
-static const char *parse_number(const char *p, int *out) {
- *out = internal_atoll(p);
- while (*p >= '0' && *p <= '9')
- ++p;
- return p;
-}
-
-static bool char_is_one_of(char c, const char *s) {
- return !!internal_strchr(s, c);
-}
-
-// Parse scanf format string. If a valid directive in encountered, it is
-// returned in dir. This function returns the pointer to the first
-// unprocessed character, or 0 in case of error.
-// In case of the end-of-string, a pointer to the closing \0 is returned.
-static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
- ScanfDirective *dir) {
- internal_memset(dir, 0, sizeof(*dir));
- dir->argIdx = -1;
-
- while (*p) {
- if (*p != '%') {
- ++p;
- continue;
- }
- ++p;
- // %%
- if (*p == '%') {
- ++p;
- continue;
- }
- if (*p == '\0') {
- return 0;
- }
- // %n$
- if (*p >= '0' && *p <= '9') {
- int number;
- const char *q = parse_number(p, &number);
- if (*q == '$') {
- dir->argIdx = number;
- p = q + 1;
- }
- // Otherwise, do not change p. This will be re-parsed later as the field
- // width.
- }
- // *
- if (*p == '*') {
- dir->suppressed = true;
- ++p;
- }
- // Field width.
- if (*p >= '0' && *p <= '9') {
- p = parse_number(p, &dir->fieldWidth);
- if (dir->fieldWidth <= 0)
- return 0;
- }
- // m
- if (*p == 'm') {
- dir->allocate = true;
- ++p;
- }
- // Length modifier.
- if (char_is_one_of(*p, "jztLq")) {
- dir->lengthModifier[0] = *p;
- ++p;
- } else if (*p == 'h') {
- dir->lengthModifier[0] = 'h';
- ++p;
- if (*p == 'h') {
- dir->lengthModifier[1] = 'h';
- ++p;
- }
- } else if (*p == 'l') {
- dir->lengthModifier[0] = 'l';
- ++p;
- if (*p == 'l') {
- dir->lengthModifier[1] = 'l';
- ++p;
- }
- }
- // Conversion specifier.
- dir->convSpecifier = *p++;
- // Consume %[...] expression.
- if (dir->convSpecifier == '[') {
- if (*p == '^')
- ++p;
- if (*p == ']')
- ++p;
- while (*p && *p != ']')
- ++p;
- if (*p == 0)
- return 0; // unexpected end of string
- // Consume the closing ']'.
- ++p;
- }
- // This is unfortunately ambiguous between old GNU extension
- // of %as, %aS and %a[...] and newer POSIX %a followed by
- // letters s, S or [.
- if (allowGnuMalloc && dir->convSpecifier == 'a' &&
- !dir->lengthModifier[0]) {
- if (*p == 's' || *p == 'S') {
- dir->maybeGnuMalloc = true;
- ++p;
- } else if (*p == '[') {
- // Watch for %a[h-j%d], if % appears in the
- // [...] range, then we need to give up, we don't know
- // if scanf will parse it as POSIX %a [h-j %d ] or
- // GNU allocation of string with range dh-j plus %.
- const char *q = p + 1;
- if (*q == '^')
- ++q;
- if (*q == ']')
- ++q;
- while (*q && *q != ']' && *q != '%')
- ++q;
- if (*q == 0 || *q == '%')
- return 0;
- p = q + 1; // Consume the closing ']'.
- dir->maybeGnuMalloc = true;
- }
- }
- break;
- }
- return p;
-}
-
-// Returns true if the character is an integer conversion specifier.
-static bool scanf_is_integer_conv(char c) {
- return char_is_one_of(c, "diouxXn");
-}
-
-// Returns true if the character is an floating point conversion specifier.
-static bool scanf_is_float_conv(char c) {
- return char_is_one_of(c, "aAeEfFgG");
-}
-
-// Returns string output character size for string-like conversions,
-// or 0 if the conversion is invalid.
-static int scanf_get_char_size(ScanfDirective *dir) {
- if (char_is_one_of(dir->convSpecifier, "CS")) {
- // wchar_t
- return 0;
- }
-
- if (char_is_one_of(dir->convSpecifier, "cs[")) {
- if (dir->lengthModifier[0] == 'l')
- // wchar_t
- return 0;
- else if (dir->lengthModifier[0] == 0)
- return sizeof(char);
- else
- return 0;
- }
-
- return 0;
-}
-
-enum ScanfStoreSize {
- // Store size not known in advance; can be calculated as strlen() of the
- // destination buffer.
- SSS_STRLEN = -1,
- // Invalid conversion specifier.
- SSS_INVALID = 0
-};
-
-// Returns the store size of a scanf directive (if >0), or a value of
-// ScanfStoreSize.
-static int scanf_get_store_size(ScanfDirective *dir) {
- if (dir->allocate) {
- if (!char_is_one_of(dir->convSpecifier, "cCsS["))
- return SSS_INVALID;
- return sizeof(char *);
- }
-
- if (dir->maybeGnuMalloc) {
- if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
- return SSS_INVALID;
- // This is ambiguous, so check the smaller size of char * (if it is
- // a GNU extension of %as, %aS or %a[...]) and float (if it is
- // POSIX %a followed by s, S or [ letters).
- return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
- }
-
- if (scanf_is_integer_conv(dir->convSpecifier)) {
- switch (dir->lengthModifier[0]) {
- case 'h':
- return dir->lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
- case 'l':
- return dir->lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
- case 'L':
- return sizeof(long long);
- case 'j':
- return sizeof(INTMAX_T);
- case 'z':
- return sizeof(SIZE_T);
- case 't':
- return sizeof(PTRDIFF_T);
- case 0:
- return sizeof(int);
- default:
- return SSS_INVALID;
- }
- }
-
- if (scanf_is_float_conv(dir->convSpecifier)) {
- switch (dir->lengthModifier[0]) {
- case 'L':
- case 'q':
- return sizeof(long double);
- case 'l':
- return dir->lengthModifier[1] == 'l' ? sizeof(long double)
- : sizeof(double);
- case 0:
- return sizeof(float);
- default:
- return SSS_INVALID;
- }
- }
-
- if (char_is_one_of(dir->convSpecifier, "sS[")) {
- unsigned charSize = scanf_get_char_size(dir);
- if (charSize == 0)
- return SSS_INVALID;
- if (dir->fieldWidth == 0)
- return SSS_STRLEN;
- return (dir->fieldWidth + 1) * charSize;
- }
-
- if (char_is_one_of(dir->convSpecifier, "cC")) {
- unsigned charSize = scanf_get_char_size(dir);
- if (charSize == 0)
- return SSS_INVALID;
- if (dir->fieldWidth == 0)
- return charSize;
- return dir->fieldWidth * charSize;
- }
-
- if (dir->convSpecifier == 'p') {
- if (dir->lengthModifier[1] != 0)
- return SSS_INVALID;
- return sizeof(void *);
- }
-
- return SSS_INVALID;
-}
-
-// Common part of *scanf interceptors.
-// Process format string and va_list, and report all store ranges.
-// Stops when "consuming" n_inputs input items.
-static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
- const char *format, va_list aq) {
- CHECK_GT(n_inputs, 0);
- const char *p = format;
-
- while (*p) {
- ScanfDirective dir;
- p = scanf_parse_next(p, allowGnuMalloc, &dir);
- if (!p)
- break;
- if (dir.convSpecifier == 0) {
- // This can only happen at the end of the format string.
- CHECK_EQ(*p, 0);
- break;
- }
- // Here the directive is valid. Do what it says.
- if (dir.argIdx != -1) {
- // Unsupported.
- break;
- }
- if (dir.suppressed)
- continue;
- int size = scanf_get_store_size(&dir);
- if (size == SSS_INVALID)
- break;
- void *argp = va_arg(aq, void *);
- if (dir.convSpecifier != 'n')
- --n_inputs;
- if (n_inputs < 0)
- break;
- if (size == SSS_STRLEN) {
- size = internal_strlen((const char *)argp) + 1;
- }
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
- }
-}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
index 215a61deab6..5a76c4ebd8b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
@@ -10,26 +10,125 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_stackdepot.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+
+#if SANITIZER_POSIX
+#include "sanitizer_posix.h"
+#endif
namespace __sanitizer {
-bool PrintsToTty() {
- MaybeOpenReportFile();
- return internal_isatty(report_fd) != 0;
+bool ReportFile::SupportsColors() {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ return SupportsColoredOutput(fd);
}
-bool PrintsToTtyCached() {
+bool ColorizeReports() {
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
// printing on Windows.
if (SANITIZER_WINDOWS)
- return 0;
+ return false;
+
+ const char *flag = common_flags()->color;
+ return internal_strcmp(flag, "always") == 0 ||
+ (internal_strcmp(flag, "auto") == 0 && report_file.SupportsColors());
+}
+
+static void (*sandboxing_callback)();
+void SetSandboxingCallback(void (*f)()) {
+ sandboxing_callback = f;
+}
- static int cached = 0;
- static bool prints_to_tty;
- if (!cached) { // Not thread-safe.
- prints_to_tty = PrintsToTty();
- cached = 1;
+void ReportErrorSummary(const char *error_type, StackTrace *stack) {
+ if (!common_flags()->print_summary)
+ return;
+ if (stack->size == 0) {
+ ReportErrorSummary(error_type);
+ return;
}
- return prints_to_tty;
+ // Currently, we include the first stack frame into the report summary.
+ // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
+ uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
+ SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ ReportErrorSummary(error_type, frame->info);
+ frame->ClearAll();
+}
+
+static void (*SoftRssLimitExceededCallback)(bool exceeded);
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
+ CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
+ SoftRssLimitExceededCallback = Callback;
}
+
+void BackgroundThread(void *arg) {
+ uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
+ uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+ uptr prev_reported_rss = 0;
+ uptr prev_reported_stack_depot_size = 0;
+ bool reached_soft_rss_limit = false;
+ while (true) {
+ SleepForMillis(100);
+ uptr current_rss_mb = GetRSS() >> 20;
+ if (Verbosity()) {
+ // If RSS has grown 10% since last time, print some information.
+ if (prev_reported_rss * 11 / 10 < current_rss_mb) {
+ Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
+ prev_reported_rss = current_rss_mb;
+ }
+ // If stack depot has grown 10% since last time, print it too.
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ if (prev_reported_stack_depot_size * 11 / 10 <
+ stack_depot_stats->allocated) {
+ Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
+ SanitizerToolName,
+ stack_depot_stats->n_uniq_ids,
+ stack_depot_stats->allocated >> 20);
+ prev_reported_stack_depot_size = stack_depot_stats->allocated;
+ }
+ }
+ // Check RSS against the limit.
+ if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
+ Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
+ DumpProcessMap();
+ Die();
+ }
+ if (soft_rss_limit_mb) {
+ if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
+ reached_soft_rss_limit = true;
+ Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
+ if (SoftRssLimitExceededCallback)
+ SoftRssLimitExceededCallback(true);
+ } else if (soft_rss_limit_mb >= current_rss_mb &&
+ reached_soft_rss_limit) {
+ reached_soft_rss_limit = false;
+ if (SoftRssLimitExceededCallback)
+ SoftRssLimitExceededCallback(false);
+ }
+ }
+ }
+}
+
+void MaybeStartBackgroudThread() {
+#if SANITIZER_LINUX // Need to implement/test on other platforms.
+ // Start the background thread if one of the rss limits is given.
+ if (!common_flags()->hard_rss_limit_mb &&
+ !common_flags()->soft_rss_limit_mb) return;
+ if (!&real_pthread_create) return; // Can't spawn the thread anyway.
+ internal_start_thread(BackgroundThread, nullptr);
+#endif
+}
+
} // namespace __sanitizer
+
+void NOINLINE
+__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
+ PrepareForSandboxing(args);
+ if (sandboxing_callback)
+ sandboxing_callback();
+}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
index 75f7d1d1e89..616b39cfa95 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
@@ -232,6 +232,7 @@ POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(adjtimex)(void *txc_p) {}
POST_SYSCALL(adjtimex)(long res, void *txc_p) {
@@ -239,6 +240,7 @@ POST_SYSCALL(adjtimex)(long res, void *txc_p) {
if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
}
}
+#endif
PRE_SYSCALL(times)(void *tbuf) {}
@@ -384,24 +386,21 @@ PRE_SYSCALL(acct)(const void *name) {
POST_SYSCALL(acct)(long res, const void *name) {}
-PRE_SYSCALL(capget)(void *header, void *dataptr) {}
+PRE_SYSCALL(capget)(void *header, void *dataptr) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
+}
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
- if (res >= 0) {
- if (header) POST_WRITE(header, __user_cap_header_struct_sz);
+ if (res >= 0)
if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
- }
}
PRE_SYSCALL(capset)(void *header, const void *data) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
if (data) PRE_READ(data, __user_cap_data_struct_sz);
}
-POST_SYSCALL(capset)(long res, void *header, const void *data) {
- if (res >= 0) {
- if (header) POST_WRITE(header, __user_cap_header_struct_sz);
- }
-}
+POST_SYSCALL(capset)(long res, void *header, const void *data) {}
PRE_SYSCALL(personality)(long personality) {}
@@ -494,6 +493,7 @@ POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
@@ -501,6 +501,7 @@ POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
if (tx) POST_WRITE(tx, struct_timex_sz);
}
}
+#endif
PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
@@ -828,6 +829,7 @@ POST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(statfs)(const void *path, void *buf) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
@@ -865,6 +867,7 @@ POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
if (buf) POST_WRITE(buf, struct_statfs64_sz);
}
}
+#endif // !SANITIZER_ANDROID
PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
if (filename)
@@ -918,6 +921,7 @@ POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
@@ -925,6 +929,7 @@ POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
}
}
+#endif // !SANITIZER_ANDROID
PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
if (filename)
@@ -1002,8 +1007,8 @@ PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
void *value, long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1017,8 +1022,8 @@ PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
void *value, long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1029,8 +1034,8 @@ PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
long size) {
- if (res >= 0) {
- if (value) POST_WRITE(value, size);
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
}
}
@@ -1040,8 +1045,8 @@ PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
@@ -1051,16 +1056,16 @@ PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
- if (res >= 0) {
- if (list) POST_WRITE(list, size);
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
}
}
@@ -1319,13 +1324,13 @@ PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
} else if (op == iocb_cmd_pread && buf && len) {
POST_WRITE(buf, len);
} else if (op == iocb_cmd_pwritev) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
for (uptr v = 0; v < len; v++)
- PRE_READ(iovec[i].iov_base, iovec[i].iov_len);
+ PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
} else if (op == iocb_cmd_preadv) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
for (uptr v = 0; v < len; v++)
- POST_WRITE(iovec[i].iov_base, iovec[i].iov_len);
+ POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
}
// See comment in io_getevents.
COMMON_SYSCALL_RELEASE(data);
@@ -2080,6 +2085,7 @@ POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
@@ -2087,6 +2093,7 @@ POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
}
}
+#endif
PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
@@ -2288,7 +2295,9 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if defined(__i386) || defined (__x86_64)
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__))
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2307,7 +2316,9 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if defined(__i386) || defined (__x86_64)
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__))
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage.cc b/libsanitizer/sanitizer_common/sanitizer_coverage.cc
deleted file mode 100644
index e87b76c0081..00000000000
--- a/libsanitizer/sanitizer_common/sanitizer_coverage.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-//===-- sanitizer_coverage.cc ---------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Sanitizer Coverage.
-// This file implements run-time support for a poor man's coverage tool.
-//
-// Compiler instrumentation:
-// For every function F the compiler injects the following code:
-// if (*Guard) {
-// __sanitizer_cov(&F);
-// *Guard = 1;
-// }
-// It's fine to call __sanitizer_cov more than once for a given function.
-//
-// Run-time:
-// - __sanitizer_cov(pc): record that we've executed a given PC.
-// - __sanitizer_cov_dump: dump the coverage data to disk.
-// For every module of the current process that has coverage data
-// this will create a file module_name.PID.sancov. The file format is simple:
-// it's just a sorted sequence of 4-byte offsets in the module.
-//
-// Eventually, this coverage implementation should be obsoleted by a more
-// powerful general purpose Clang/LLVM coverage instrumentation.
-// Consider this implementation as prototype.
-//
-// FIXME: support (or at least test with) dlclose.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_flags.h"
-
-struct CovData {
- BlockingMutex mu;
- InternalMmapVector<uptr> v;
-};
-
-static uptr cov_data_placeholder[sizeof(CovData) / sizeof(uptr)];
-COMPILER_CHECK(sizeof(cov_data_placeholder) >= sizeof(CovData));
-static CovData *cov_data = reinterpret_cast<CovData*>(cov_data_placeholder);
-
-namespace __sanitizer {
-
-// Simply add the pc into the vector under lock. If the function is called more
-// than once for a given PC it will be inserted multiple times, which is fine.
-static void CovAdd(uptr pc) {
- BlockingMutexLock lock(&cov_data->mu);
- cov_data->v.push_back(pc);
-}
-
-static inline bool CompareLess(const uptr &a, const uptr &b) {
- return a < b;
-}
-
-// Dump the coverage on disk.
-void CovDump() {
-#if !SANITIZER_WINDOWS
- BlockingMutexLock lock(&cov_data->mu);
- InternalMmapVector<uptr> &v = cov_data->v;
- InternalSort(&v, v.size(), CompareLess);
- InternalMmapVector<u32> offsets(v.size());
- const uptr *vb = v.data();
- const uptr *ve = vb + v.size();
- MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- uptr mb, me, off, prot;
- InternalScopedBuffer<char> module(4096);
- InternalScopedBuffer<char> path(4096 * 2);
- for (int i = 0;
- proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
- i++) {
- if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
- continue;
- if (vb >= ve) break;
- if (mb <= *vb && *vb < me) {
- offsets.clear();
- const uptr *old_vb = vb;
- CHECK_LE(off, *vb);
- for (; vb < ve && *vb < me; vb++) {
- uptr diff = *vb - (i ? mb : 0) + off;
- CHECK_LE(diff, 0xffffffffU);
- offsets.push_back(static_cast<u32>(diff));
- }
- char *module_name = StripModuleName(module.data());
- internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
- module_name, internal_getpid());
- InternalFree(module_name);
- uptr fd = OpenFile(path.data(), true);
- internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
- internal_close(fd);
- if (common_flags()->verbosity)
- Report(" CovDump: %s: %zd PCs written\n", path.data(), vb - old_vb);
- }
- }
-#endif // !SANITIZER_WINDOWS
-}
-
-} // namespace __sanitizer
-
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc) {
- CovAdd(reinterpret_cast<uptr>(pc));
-}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
-} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc
new file mode 100644
index 00000000000..c67880468b8
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -0,0 +1,956 @@
+//===-- sanitizer_coverage.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage.
+// This file implements run-time support for a poor man's coverage tool.
+//
+// Compiler instrumentation:
+// For every interesting basic block the compiler injects the following code:
+// if (Guard < 0) {
+// __sanitizer_cov(&Guard);
+// }
+// At the module start up time __sanitizer_cov_module_init sets the guards
+// to consecutive negative numbers (-1, -2, -3, ...).
+// It's fine to call __sanitizer_cov more than once for a given block.
+//
+// Run-time:
+// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
+// and atomically set Guard to -Guard.
+// - __sanitizer_cov_dump: dump the coverage data to disk.
+// For every module of the current process that has coverage data
+// this will create a file module_name.PID.sancov.
+//
+// The file format is simple: the first 8 bytes is the magic,
+// one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
+// magic defines the size of the following offsets.
+// The rest of the data is the offsets in the module.
+//
+// Eventually, this coverage implementation should be obsoleted by a more
+// powerful general purpose Clang/LLVM coverage instrumentation.
+// Consider this implementation as prototype.
+//
+// FIXME: support (or at least test with) dlclose.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_flags.h"
+
+static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
+static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
+
+static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
+
+static atomic_uintptr_t coverage_counter;
+static atomic_uintptr_t caller_callee_counter;
+
+static void ResetGlobalCounters() {
+ return atomic_store(&coverage_counter, 0, memory_order_relaxed);
+ return atomic_store(&caller_callee_counter, 0, memory_order_relaxed);
+}
+
+// pc_array is the array containing the covered PCs.
+// To make the pc_array thread- and async-signal-safe it has to be large enough.
+// 128M counters "ought to be enough for anybody" (4M on 32-bit).
+
+// With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
+// In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
+// dump current memory layout to another file.
+
+static bool cov_sandboxed = false;
+static fd_t cov_fd = kInvalidFd;
+static unsigned int cov_max_block_size = 0;
+static bool coverage_enabled = false;
+static const char *coverage_dir;
+
+namespace __sanitizer {
+
+class CoverageData {
+ public:
+ void Init();
+ void Enable();
+ void Disable();
+ void ReInit();
+ void BeforeFork();
+ void AfterFork(int child_pid);
+ void Extend(uptr npcs);
+ void Add(uptr pc, u32 *guard);
+ void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
+ uptr cache_size);
+ void DumpCallerCalleePairs();
+ void DumpTrace();
+ void DumpAsBitSet();
+ void DumpCounters();
+ void DumpOffsets();
+ void DumpAll();
+
+ ALWAYS_INLINE
+ void TraceBasicBlock(s32 *id);
+
+ void InitializeGuardArray(s32 *guards);
+ void InitializeGuards(s32 *guards, uptr n, const char *module_name,
+ uptr caller_pc);
+ void InitializeCounters(u8 *counters, uptr n);
+ void ReinitializeGuards();
+ uptr GetNumberOf8bitCounters();
+ uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
+
+ uptr *data();
+ uptr size();
+
+ private:
+ void DirectOpen();
+ void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
+
+ // Maximal size pc array may ever grow.
+ // We MmapNoReserve this space to ensure that the array is contiguous.
+ static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
+ 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
+ 1 << 27);
+ // The amount file mapping for the pc array is grown by.
+ static const uptr kPcArrayMmapSize = 64 * 1024;
+
+ // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
+ // much RAM as it really needs.
+ uptr *pc_array;
+ // Index of the first available pc_array slot.
+ atomic_uintptr_t pc_array_index;
+ // Array size.
+ atomic_uintptr_t pc_array_size;
+ // Current file mapped size of the pc array.
+ uptr pc_array_mapped_size;
+ // Descriptor of the file mapped pc array.
+ fd_t pc_fd;
+
+ // Vector of coverage guard arrays, protected by mu.
+ InternalMmapVectorNoCtor<s32*> guard_array_vec;
+
+ struct NamedPcRange {
+ const char *copied_module_name;
+ uptr beg, end; // elements [beg,end) in pc_array.
+ };
+
+ // Vector of module and compilation unit pc ranges.
+ InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
+ InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
+
+ struct CounterAndSize {
+ u8 *counters;
+ uptr n;
+ };
+
+ InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
+ uptr num_8bit_counters;
+
+ // Caller-Callee (cc) array, size and current index.
+ static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
+ uptr **cc_array;
+ atomic_uintptr_t cc_array_index;
+ atomic_uintptr_t cc_array_size;
+
+ // Tracing event array, size and current pointer.
+ // We record all events (basic block entries) in a global buffer of u32
+ // values. Each such value is the index in pc_array.
+ // So far the tracing is highly experimental:
+ // - not thread-safe;
+ // - does not support long traces;
+ // - not tuned for performance.
+ static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
+ u32 *tr_event_array;
+ uptr tr_event_array_size;
+ u32 *tr_event_pointer;
+ static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
+
+ StaticSpinMutex mu;
+};
+
+static CoverageData coverage_data;
+
+void CovUpdateMapping(const char *path, uptr caller_pc = 0);
+
+void CoverageData::DirectOpen() {
+ InternalScopedString path(kMaxPathLength);
+ internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
+ coverage_dir, internal_getpid());
+ pc_fd = OpenFile(path.data(), RdWr);
+ if (pc_fd == kInvalidFd) {
+ Report("Coverage: failed to open %s for reading/writing\n", path.data());
+ Die();
+ }
+
+ pc_array_mapped_size = 0;
+ CovUpdateMapping(coverage_dir);
+}
+
+void CoverageData::Init() {
+ pc_fd = kInvalidFd;
+}
+
+void CoverageData::Enable() {
+ if (pc_array)
+ return;
+ pc_array = reinterpret_cast<uptr *>(
+ MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
+ if (common_flags()->coverage_direct) {
+ atomic_store(&pc_array_size, 0, memory_order_relaxed);
+ } else {
+ atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
+ }
+
+ cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
+ sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
+ atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
+ atomic_store(&cc_array_index, 0, memory_order_relaxed);
+
+ // Allocate tr_event_array with a guard page at the end.
+ tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
+ sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
+ "CovInit::tr_event_array"));
+ MprotectNoAccess(
+ reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
+ GetMmapGranularity());
+ tr_event_array_size = kTrEventArrayMaxSize;
+ tr_event_pointer = tr_event_array;
+
+ num_8bit_counters = 0;
+}
+
+void CoverageData::InitializeGuardArray(s32 *guards) {
+ Enable(); // Make sure coverage is enabled at this point.
+ s32 n = guards[0];
+ for (s32 j = 1; j <= n; j++) {
+ uptr idx = atomic_load_relaxed(&pc_array_index);
+ atomic_store_relaxed(&pc_array_index, idx + 1);
+ guards[j] = -static_cast<s32>(idx + 1);
+ }
+}
+
+void CoverageData::Disable() {
+ if (pc_array) {
+ UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
+ pc_array = nullptr;
+ }
+ if (cc_array) {
+ UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
+ cc_array = nullptr;
+ }
+ if (tr_event_array) {
+ UnmapOrDie(tr_event_array,
+ sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
+ GetMmapGranularity());
+ tr_event_array = nullptr;
+ tr_event_pointer = nullptr;
+ }
+ if (pc_fd != kInvalidFd) {
+ CloseFile(pc_fd);
+ pc_fd = kInvalidFd;
+ }
+}
+
+void CoverageData::ReinitializeGuards() {
+ // Assuming single thread.
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
+ for (uptr i = 0; i < guard_array_vec.size(); i++)
+ InitializeGuardArray(guard_array_vec[i]);
+}
+
+void CoverageData::ReInit() {
+ Disable();
+ if (coverage_enabled) {
+ if (common_flags()->coverage_direct) {
+ // In memory-mapped mode we must extend the new file to the known array
+ // size.
+ uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+ uptr npcs = size / sizeof(uptr);
+ Enable();
+ if (size) Extend(npcs);
+ if (coverage_enabled) CovUpdateMapping(coverage_dir);
+ } else {
+ Enable();
+ }
+ }
+ // Re-initialize the guards.
+ // We are single-threaded now, no need to grab any lock.
+ CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
+ ReinitializeGuards();
+}
+
+void CoverageData::BeforeFork() {
+ mu.Lock();
+}
+
+void CoverageData::AfterFork(int child_pid) {
+ // We are single-threaded so it's OK to release the lock early.
+ mu.Unlock();
+ if (child_pid == 0) ReInit();
+}
+
+// Extend coverage PC array to fit additional npcs elements.
+void CoverageData::Extend(uptr npcs) {
+ if (!common_flags()->coverage_direct) return;
+ SpinMutexLock l(&mu);
+
+ uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+ size += npcs * sizeof(uptr);
+
+ if (coverage_enabled && size > pc_array_mapped_size) {
+ if (pc_fd == kInvalidFd) DirectOpen();
+ CHECK_NE(pc_fd, kInvalidFd);
+
+ uptr new_mapped_size = pc_array_mapped_size;
+ while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
+ CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
+
+ // Extend the file and map the new space at the end of pc_array.
+ uptr res = internal_ftruncate(pc_fd, new_mapped_size);
+ int err;
+ if (internal_iserror(res, &err)) {
+ Printf("failed to extend raw coverage file: %d\n", err);
+ Die();
+ }
+
+ uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
+ void *p = MapWritableFileToMemory((void *)next_map_base,
+ new_mapped_size - pc_array_mapped_size,
+ pc_fd, pc_array_mapped_size);
+ CHECK_EQ((uptr)p, next_map_base);
+ pc_array_mapped_size = new_mapped_size;
+ }
+
+ atomic_store(&pc_array_size, size, memory_order_release);
+}
+
+void CoverageData::InitializeCounters(u8 *counters, uptr n) {
+ if (!counters) return;
+ CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
+ n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
+ SpinMutexLock l(&mu);
+ counters_vec.push_back({counters, n});
+ num_8bit_counters += n;
+}
+
+void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
+ uptr range_end) {
+ auto sym = Symbolizer::GetOrInit();
+ if (!sym)
+ return;
+ const char *module_name = sym->GetModuleNameForPc(caller_pc);
+ if (!module_name) return;
+ if (module_name_vec.empty() ||
+ module_name_vec.back().copied_module_name != module_name)
+ module_name_vec.push_back({module_name, range_beg, range_end});
+ else
+ module_name_vec.back().end = range_end;
+}
+
+void CoverageData::InitializeGuards(s32 *guards, uptr n,
+ const char *comp_unit_name,
+ uptr caller_pc) {
+ // The array 'guards' has n+1 elements, we use the element zero
+ // to store 'n'.
+ CHECK_LT(n, 1 << 30);
+ guards[0] = static_cast<s32>(n);
+ InitializeGuardArray(guards);
+ SpinMutexLock l(&mu);
+ uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
+ uptr range_beg = range_end - n;
+ comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
+ guard_array_vec.push_back(guards);
+ UpdateModuleNameVec(caller_pc, range_beg, range_end);
+}
+
+static const uptr kBundleCounterBits = 16;
+
+// When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
+// we insert the global counter into the first 16 bits of the PC.
+uptr BundlePcAndCounter(uptr pc, uptr counter) {
+ if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
+ return pc;
+ static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
+ if (counter > kMaxCounter)
+ counter = kMaxCounter;
+ CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
+ return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
+}
+
+uptr UnbundlePc(uptr bundle) {
+ if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
+ return bundle;
+ return (bundle << kBundleCounterBits) >> kBundleCounterBits;
+}
+
+uptr UnbundleCounter(uptr bundle) {
+ if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
+ return 0;
+ return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
+}
+
+// If guard is negative, atomically set it to -guard and store the PC in
+// pc_array.
+void CoverageData::Add(uptr pc, u32 *guard) {
+ atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
+ s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
+ if (guard_value >= 0) return;
+
+ atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
+ if (!pc_array) return;
+
+ uptr idx = -guard_value - 1;
+ if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
+ return; // May happen after fork when pc_array_index becomes 0.
+ CHECK_LT(idx * sizeof(uptr),
+ atomic_load(&pc_array_size, memory_order_acquire));
+ uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
+ pc_array[idx] = BundlePcAndCounter(pc, counter);
+}
+
+// Registers a pair caller=>callee.
+// When a given caller is seen for the first time, the callee_cache is added
+// to the global array cc_array, callee_cache[0] is set to caller and
+// callee_cache[1] is set to cache_size.
+// Then we are trying to add callee to callee_cache [2,cache_size) if it is
+// not there yet.
+// If the cache is full we drop the callee (may want to fix this later).
+void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
+ uptr cache_size) {
+ if (!cc_array) return;
+ atomic_uintptr_t *atomic_callee_cache =
+ reinterpret_cast<atomic_uintptr_t *>(callee_cache);
+ uptr zero = 0;
+ if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
+ memory_order_seq_cst)) {
+ uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
+ CHECK_LT(idx * sizeof(uptr),
+ atomic_load(&cc_array_size, memory_order_acquire));
+ callee_cache[1] = cache_size;
+ cc_array[idx] = callee_cache;
+ }
+ CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
+ for (uptr i = 2; i < cache_size; i++) {
+ uptr was = 0;
+ if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
+ memory_order_seq_cst)) {
+ atomic_fetch_add(&caller_callee_counter, 1, memory_order_relaxed);
+ return;
+ }
+ if (was == callee) // Already have this callee.
+ return;
+ }
+}
+
+uptr CoverageData::GetNumberOf8bitCounters() {
+ return num_8bit_counters;
+}
+
+// Map every 8bit counter to a 8-bit bitset and clear the counter.
+uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
+ uptr num_new_bits = 0;
+ uptr cur = 0;
+ // For better speed we map 8 counters to 8 bytes of bitset at once.
+ static const uptr kBatchSize = 8;
+ CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
+ for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
+ u8 *c = counters_vec[i].counters;
+ uptr n = counters_vec[i].n;
+ CHECK_EQ(n % 16, 0);
+ CHECK_EQ(cur % kBatchSize, 0);
+ CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
+ if (!bitset) {
+ internal_bzero_aligned16(c, n);
+ cur += n;
+ continue;
+ }
+ for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
+ CHECK_LT(cur, num_8bit_counters);
+ u64 *pc64 = reinterpret_cast<u64*>(c + j);
+ u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
+ u64 c64 = *pc64;
+ u64 old_bits_64 = *pb64;
+ u64 new_bits_64 = old_bits_64;
+ if (c64) {
+ *pc64 = 0;
+ for (uptr k = 0; k < kBatchSize; k++) {
+ u64 x = (c64 >> (8 * k)) & 0xff;
+ if (x) {
+ u64 bit = 0;
+ /**/ if (x >= 128) bit = 128;
+ else if (x >= 32) bit = 64;
+ else if (x >= 16) bit = 32;
+ else if (x >= 8) bit = 16;
+ else if (x >= 4) bit = 8;
+ else if (x >= 3) bit = 4;
+ else if (x >= 2) bit = 2;
+ else if (x >= 1) bit = 1;
+ u64 mask = bit << (8 * k);
+ if (!(new_bits_64 & mask)) {
+ num_new_bits++;
+ new_bits_64 |= mask;
+ }
+ }
+ }
+ *pb64 = new_bits_64;
+ }
+ }
+ }
+ CHECK_EQ(cur, num_8bit_counters);
+ return num_new_bits;
+}
+
+uptr *CoverageData::data() {
+ return pc_array;
+}
+
+uptr CoverageData::size() {
+ return atomic_load(&pc_array_index, memory_order_relaxed);
+}
+
+// Block layout for packed file format: header, followed by module name (no
+// trailing zero), followed by data blob.
+struct CovHeader {
+ int pid;
+ unsigned int module_name_length;
+ unsigned int data_length;
+};
+
+static void CovWritePacked(int pid, const char *module, const void *blob,
+ unsigned int blob_size) {
+ if (cov_fd == kInvalidFd) return;
+ unsigned module_name_length = internal_strlen(module);
+ CovHeader header = {pid, module_name_length, blob_size};
+
+ if (cov_max_block_size == 0) {
+ // Writing to a file. Just go ahead.
+ WriteToFile(cov_fd, &header, sizeof(header));
+ WriteToFile(cov_fd, module, module_name_length);
+ WriteToFile(cov_fd, blob, blob_size);
+ } else {
+ // Writing to a socket. We want to split the data into appropriately sized
+ // blocks.
+ InternalScopedBuffer<char> block(cov_max_block_size);
+ CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
+ uptr header_size_with_module = sizeof(header) + module_name_length;
+ CHECK_LT(header_size_with_module, cov_max_block_size);
+ unsigned int max_payload_size =
+ cov_max_block_size - header_size_with_module;
+ char *block_pos = block.data();
+ internal_memcpy(block_pos, &header, sizeof(header));
+ block_pos += sizeof(header);
+ internal_memcpy(block_pos, module, module_name_length);
+ block_pos += module_name_length;
+ char *block_data_begin = block_pos;
+ const char *blob_pos = (const char *)blob;
+ while (blob_size > 0) {
+ unsigned int payload_size = Min(blob_size, max_payload_size);
+ blob_size -= payload_size;
+ internal_memcpy(block_data_begin, blob_pos, payload_size);
+ blob_pos += payload_size;
+ ((CovHeader *)block.data())->data_length = payload_size;
+ WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
+ }
+ }
+}
+
+// If packed = false: <name>.<pid>.<sancov> (name = module name).
+// If packed = true and name == 0: <pid>.<sancov>.<packed>.
+// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
+// user-supplied).
+static fd_t CovOpenFile(InternalScopedString *path, bool packed,
+ const char *name, const char *extension = "sancov") {
+ path->clear();
+ if (!packed) {
+ CHECK(name);
+ path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
+ extension);
+ } else {
+ if (!name)
+ path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
+ extension);
+ else
+ path->append("%s/%s.%s.packed", coverage_dir, name, extension);
+ }
+ error_t err;
+ fd_t fd = OpenFile(path->data(), WrOnly, &err);
+ if (fd == kInvalidFd)
+ Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
+ path->data(), err);
+ return fd;
+}
+
+// Dump trace PCs and trace events into two separate files.
+void CoverageData::DumpTrace() {
+ uptr max_idx = tr_event_pointer - tr_event_array;
+ if (!max_idx) return;
+ auto sym = Symbolizer::GetOrInit();
+ if (!sym)
+ return;
+ InternalScopedString out(32 << 20);
+ for (uptr i = 0, n = size(); i < n; i++) {
+ const char *module_name = "<unknown>";
+ uptr module_address = 0;
+ sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
+ &module_address);
+ out.append("%s 0x%zx\n", module_name, module_address);
+ }
+ InternalScopedString path(kMaxPathLength);
+ fd_t fd = CovOpenFile(&path, false, "trace-points");
+ if (fd == kInvalidFd) return;
+ WriteToFile(fd, out.data(), out.length());
+ CloseFile(fd);
+
+ fd = CovOpenFile(&path, false, "trace-compunits");
+ if (fd == kInvalidFd) return;
+ out.clear();
+ for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
+ out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
+ WriteToFile(fd, out.data(), out.length());
+ CloseFile(fd);
+
+ fd = CovOpenFile(&path, false, "trace-events");
+ if (fd == kInvalidFd) return;
+ uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
+ u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
+ // The trace file could be huge, and may not be written with a single syscall.
+ while (bytes_to_write) {
+ uptr actually_written;
+ if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
+ actually_written <= bytes_to_write) {
+ bytes_to_write -= actually_written;
+ event_bytes += actually_written;
+ } else {
+ break;
+ }
+ }
+ CloseFile(fd);
+ VReport(1, " CovDump: Trace: %zd PCs written\n", size());
+ VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
+}
+
+// This function dumps the caller=>callee pairs into a file as a sequence of
+// lines like "module_name offset".
+void CoverageData::DumpCallerCalleePairs() {
+ uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
+ if (!max_idx) return;
+ auto sym = Symbolizer::GetOrInit();
+ if (!sym)
+ return;
+ InternalScopedString out(32 << 20);
+ uptr total = 0;
+ for (uptr i = 0; i < max_idx; i++) {
+ uptr *cc_cache = cc_array[i];
+ CHECK(cc_cache);
+ uptr caller = cc_cache[0];
+ uptr n_callees = cc_cache[1];
+ const char *caller_module_name = "<unknown>";
+ uptr caller_module_address = 0;
+ sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
+ &caller_module_address);
+ for (uptr j = 2; j < n_callees; j++) {
+ uptr callee = cc_cache[j];
+ if (!callee) break;
+ total++;
+ const char *callee_module_name = "<unknown>";
+ uptr callee_module_address = 0;
+ sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
+ &callee_module_address);
+ out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
+ caller_module_address, callee_module_name,
+ callee_module_address);
+ }
+ }
+ InternalScopedString path(kMaxPathLength);
+ fd_t fd = CovOpenFile(&path, false, "caller-callee");
+ if (fd == kInvalidFd) return;
+ WriteToFile(fd, out.data(), out.length());
+ CloseFile(fd);
+ VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
+}
+
+// Record the current PC into the event buffer.
+// Every event is a u32 value (index in tr_pc_array_index) so we compute
+// it once and then cache in the provided 'cache' storage.
+//
+// This function will eventually be inlined by the compiler.
+void CoverageData::TraceBasicBlock(s32 *id) {
+ // Will trap here if
+ // 1. coverage is not enabled at run-time.
+ // 2. The array tr_event_array is full.
+ *tr_event_pointer = static_cast<u32>(*id - 1);
+ tr_event_pointer++;
+}
+
+void CoverageData::DumpCounters() {
+ if (!common_flags()->coverage_counters) return;
+ uptr n = coverage_data.GetNumberOf8bitCounters();
+ if (!n) return;
+ InternalScopedBuffer<u8> bitset(n);
+ coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
+ InternalScopedString path(kMaxPathLength);
+
+ for (uptr m = 0; m < module_name_vec.size(); m++) {
+ auto r = module_name_vec[m];
+ CHECK(r.copied_module_name);
+ CHECK_LE(r.beg, r.end);
+ CHECK_LE(r.end, size());
+ const char *base_name = StripModuleName(r.copied_module_name);
+ fd_t fd =
+ CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
+ if (fd == kInvalidFd) return;
+ WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
+ CloseFile(fd);
+ VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
+ base_name);
+ }
+}
+
+void CoverageData::DumpAsBitSet() {
+ if (!common_flags()->coverage_bitset) return;
+ if (!size()) return;
+ InternalScopedBuffer<char> out(size());
+ InternalScopedString path(kMaxPathLength);
+ for (uptr m = 0; m < module_name_vec.size(); m++) {
+ uptr n_set_bits = 0;
+ auto r = module_name_vec[m];
+ CHECK(r.copied_module_name);
+ CHECK_LE(r.beg, r.end);
+ CHECK_LE(r.end, size());
+ for (uptr i = r.beg; i < r.end; i++) {
+ uptr pc = UnbundlePc(pc_array[i]);
+ out[i] = pc ? '1' : '0';
+ if (pc)
+ n_set_bits++;
+ }
+ const char *base_name = StripModuleName(r.copied_module_name);
+ fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
+ if (fd == kInvalidFd) return;
+ WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
+ CloseFile(fd);
+ VReport(1,
+ " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
+ r.end - r.beg, base_name, n_set_bits);
+ }
+}
+
+void CoverageData::DumpOffsets() {
+ auto sym = Symbolizer::GetOrInit();
+ if (!common_flags()->coverage_pcs) return;
+ CHECK_NE(sym, nullptr);
+ InternalMmapVector<uptr> offsets(0);
+ InternalScopedString path(kMaxPathLength);
+ for (uptr m = 0; m < module_name_vec.size(); m++) {
+ offsets.clear();
+ uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
+ for (uptr i = 0; i < num_words_for_magic; i++)
+ offsets.push_back(0);
+ auto r = module_name_vec[m];
+ CHECK(r.copied_module_name);
+ CHECK_LE(r.beg, r.end);
+ CHECK_LE(r.end, size());
+ for (uptr i = r.beg; i < r.end; i++) {
+ uptr pc = UnbundlePc(pc_array[i]);
+ uptr counter = UnbundleCounter(pc_array[i]);
+ if (!pc) continue; // Not visited.
+ uptr offset = 0;
+ sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
+ offsets.push_back(BundlePcAndCounter(offset, counter));
+ }
+
+ CHECK_GE(offsets.size(), num_words_for_magic);
+ SortArray(offsets.data(), offsets.size());
+ for (uptr i = 0; i < offsets.size(); i++)
+ offsets[i] = UnbundlePc(offsets[i]);
+
+ uptr num_offsets = offsets.size() - num_words_for_magic;
+ u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
+ CHECK_EQ(*magic_p, 0ULL);
+ // FIXME: we may want to write 32-bit offsets even in 64-mode
+ // if all the offsets are small enough.
+ *magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
+
+ const char *module_name = StripModuleName(r.copied_module_name);
+ if (cov_sandboxed) {
+ if (cov_fd != kInvalidFd) {
+ CovWritePacked(internal_getpid(), module_name, offsets.data(),
+ offsets.size() * sizeof(offsets[0]));
+ VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
+ }
+ } else {
+ // One file per module per process.
+ fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
+ if (fd == kInvalidFd) continue;
+ WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
+ CloseFile(fd);
+ VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
+ }
+ }
+ if (cov_fd != kInvalidFd)
+ CloseFile(cov_fd);
+}
+
+void CoverageData::DumpAll() {
+ if (!coverage_enabled || common_flags()->coverage_direct) return;
+ if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
+ return;
+ DumpAsBitSet();
+ DumpCounters();
+ DumpTrace();
+ DumpOffsets();
+ DumpCallerCalleePairs();
+}
+
+void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ if (!args) return;
+ if (!coverage_enabled) return;
+ cov_sandboxed = args->coverage_sandboxed;
+ if (!cov_sandboxed) return;
+ cov_max_block_size = args->coverage_max_block_size;
+ if (args->coverage_fd >= 0) {
+ cov_fd = (fd_t)args->coverage_fd;
+ } else {
+ InternalScopedString path(kMaxPathLength);
+ // Pre-open the file now. The sandbox won't allow us to do it later.
+ cov_fd = CovOpenFile(&path, true /* packed */, nullptr);
+ }
+}
+
+fd_t MaybeOpenCovFile(const char *name) {
+ CHECK(name);
+ if (!coverage_enabled) return kInvalidFd;
+ InternalScopedString path(kMaxPathLength);
+ return CovOpenFile(&path, true /* packed */, name);
+}
+
+void CovBeforeFork() {
+ coverage_data.BeforeFork();
+}
+
+void CovAfterFork(int child_pid) {
+ coverage_data.AfterFork(child_pid);
+}
+
+static void MaybeDumpCoverage() {
+ if (common_flags()->coverage)
+ __sanitizer_cov_dump();
+}
+
+void InitializeCoverage(bool enabled, const char *dir) {
+ if (coverage_enabled)
+ return; // May happen if two sanitizer enable coverage in the same process.
+ coverage_enabled = enabled;
+ coverage_dir = dir;
+ coverage_data.Init();
+ if (enabled) coverage_data.Enable();
+ if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
+ AddDieCallback(MaybeDumpCoverage);
+}
+
+void ReInitializeCoverage(bool enabled, const char *dir) {
+ coverage_enabled = enabled;
+ coverage_dir = dir;
+ coverage_data.ReInit();
+}
+
+void CoverageUpdateMapping() {
+ if (coverage_enabled)
+ CovUpdateMapping(coverage_dir);
+}
+
+} // namespace __sanitizer
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
+ coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
+ guard);
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
+ atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
+ if (static_cast<s32>(
+ __sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
+ __sanitizer_cov(guard);
+}
+SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
+ coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
+ callee, callee_cache16, 16);
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
+ coverage_enabled = true;
+ coverage_dir = common_flags()->coverage_dir;
+ coverage_data.Init();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
+ coverage_data.DumpAll();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
+ const char *comp_unit_name) {
+ coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
+ coverage_data.InitializeCounters(counters, npcs);
+ if (!common_flags()->coverage_direct) return;
+ if (SANITIZER_ANDROID && coverage_enabled) {
+ // dlopen/dlclose interceptors do not work on Android, so we rely on
+ // Extend() calls to update .sancov.map.
+ CovUpdateMapping(coverage_dir, GET_CALLER_PC());
+ }
+ coverage_data.Extend(npcs);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+sptr __sanitizer_maybe_open_cov_file(const char *name) {
+ return (sptr)MaybeOpenCovFile(name);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_total_unique_coverage() {
+ return atomic_load(&coverage_counter, memory_order_relaxed);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_total_unique_caller_callee_pairs() {
+ return atomic_load(&caller_callee_counter, memory_order_relaxed);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_cov_trace_func_enter(s32 *id) {
+ coverage_data.TraceBasicBlock(id);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_cov_trace_basic_block(s32 *id) {
+ coverage_data.TraceBasicBlock(id);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_reset_coverage() {
+ ResetGlobalCounters();
+ coverage_data.ReinitializeGuards();
+ internal_bzero_aligned16(
+ coverage_data.data(),
+ RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_coverage_guards(uptr **data) {
+ *data = coverage_data.data();
+ return coverage_data.size();
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_number_of_counters() {
+ return coverage_data.GetNumberOf8bitCounters();
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
+ return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
+}
+// Default empty implementations (weak). Users should redefine them.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_switch() {}
+} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
new file mode 100644
index 00000000000..ebac6812881
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
@@ -0,0 +1,125 @@
+//===-- sanitizer_coverage_mapping.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Mmap-based implementation of sanitizer coverage.
+//
+// This is part of the implementation of code coverage that does not require
+// __sanitizer_cov_dump() call. Data is stored in 2 files per process.
+//
+// $pid.sancov.map describes process memory layout in the following text-based
+// format:
+// <pointer size in bits> // 1 line, 32 or 64
+// <mapping start> <mapping end> <base address> <dso name> // repeated
+// ...
+// Mapping lines are NOT sorted. This file is updated every time memory layout
+// is changed (i.e. in dlopen() and dlclose() interceptors).
+//
+// $pid.sancov.raw is a binary dump of PC values, sizeof(uptr) each. Again, not
+// sorted. This file is extended by 64Kb at a time and mapped into memory. It
+// contains one or more 0 words at the end, up to the next 64Kb aligned offset.
+//
+// To convert these 2 files to the usual .sancov format, run sancov.py rawunpack
+// $pid.sancov.raw.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+static const uptr kMaxTextSize = 64 * 1024;
+
+struct CachedMapping {
+ public:
+ bool NeedsUpdate(uptr pc) {
+ int new_pid = internal_getpid();
+ if (last_pid == new_pid && pc && pc >= last_range_start &&
+ pc < last_range_end)
+ return false;
+ last_pid = new_pid;
+ return true;
+ }
+
+ void SetModuleRange(uptr start, uptr end) {
+ last_range_start = start;
+ last_range_end = end;
+ }
+
+ private:
+ uptr last_range_start, last_range_end;
+ int last_pid;
+};
+
+static CachedMapping cached_mapping;
+static StaticSpinMutex mapping_mu;
+
+void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
+ if (!common_flags()->coverage_direct) return;
+
+ SpinMutexLock l(&mapping_mu);
+
+ if (!cached_mapping.NeedsUpdate(caller_pc))
+ return;
+
+ InternalScopedString text(kMaxTextSize);
+
+ {
+ InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
+ CHECK(modules.data());
+ int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
+ /* filter */ nullptr);
+
+ text.append("%d\n", sizeof(uptr) * 8);
+ for (int i = 0; i < n_modules; ++i) {
+ const char *module_name = StripModuleName(modules[i].full_name());
+ uptr base = modules[i].base_address();
+ for (auto iter = modules[i].ranges(); iter.hasNext();) {
+ const auto *range = iter.next();
+ if (range->executable) {
+ uptr start = range->beg;
+ uptr end = range->end;
+ text.append("%zx %zx %zx %s\n", start, end, base, module_name);
+ if (caller_pc && caller_pc >= start && caller_pc < end)
+ cached_mapping.SetModuleRange(start, end);
+ }
+ }
+ modules[i].clear();
+ }
+ }
+
+ error_t err;
+ InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
+ uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
+ "%s/%zd.sancov.map.tmp", coverage_dir,
+ internal_getpid());
+ CHECK_LE(res, tmp_path.size());
+ fd_t map_fd = OpenFile(tmp_path.data(), WrOnly, &err);
+ if (map_fd == kInvalidFd) {
+ Report("Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
+ err);
+ Die();
+ }
+
+ if (!WriteToFile(map_fd, text.data(), text.length(), nullptr, &err)) {
+ Printf("sancov.map write failed: %d\n", err);
+ Die();
+ }
+ CloseFile(map_fd);
+
+ InternalScopedString path(64 + internal_strlen(coverage_dir));
+ res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
+ coverage_dir, internal_getpid());
+ CHECK_LE(res, path.size());
+ if (!RenameFile(tmp_path.data(), path.data(), &err)) {
+ Printf("sancov.map rename failed: %d\n", err);
+ Die();
+ }
+}
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h
new file mode 100644
index 00000000000..949c486a14a
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h
@@ -0,0 +1,410 @@
+//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// The deadlock detector maintains a directed graph of lock acquisitions.
+// When a lock event happens, the detector checks if the locks already held by
+// the current thread are reachable from the newly acquired lock.
+//
+// The detector can handle only a fixed amount of simultaneously live locks
+// (a lock is alive if it has been locked at least once and has not been
+// destroyed). When the maximal number of locks is reached the entire graph
+// is flushed and the new lock epoch is started. The node ids from the old
+// epochs can not be used with any of the detector methods except for
+// nodeBelongsToCurrentEpoch().
+//
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_H
+#define SANITIZER_DEADLOCK_DETECTOR_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bvgraph.h"
+
+namespace __sanitizer {
+
+// Thread-local state for DeadlockDetector.
+// It contains the locks currently held by the owning thread.
+template <class BV>
+class DeadlockDetectorTLS {
+ public:
+ // No CTOR.
+ void clear() {
+ bv_.clear();
+ epoch_ = 0;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ bool empty() const { return bv_.empty(); }
+
+ void ensureCurrentEpoch(uptr current_epoch) {
+ if (epoch_ == current_epoch) return;
+ bv_.clear();
+ epoch_ = current_epoch;
+ }
+
+ uptr getEpoch() const { return epoch_; }
+
+ // Returns true if this is the first (non-recursive) acquisition of this lock.
+ bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
+ // Printf("addLock: %zx %zx stk %u\n", lock_id, current_epoch, stk);
+ CHECK_EQ(epoch_, current_epoch);
+ if (!bv_.setBit(lock_id)) {
+ // The lock is already held by this thread, it must be recursive.
+ CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
+ recursive_locks[n_recursive_locks++] = lock_id;
+ return false;
+ }
+ CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
+ // lock_id < BV::kSize, can cast to a smaller int.
+ u32 lock_id_short = static_cast<u32>(lock_id);
+ LockWithContext l = {lock_id_short, stk};
+ all_locks_with_contexts_[n_all_locks_++] = l;
+ return true;
+ }
+
+ void removeLock(uptr lock_id) {
+ if (n_recursive_locks) {
+ for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
+ if (recursive_locks[i] == lock_id) {
+ n_recursive_locks--;
+ Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
+ return;
+ }
+ }
+ }
+ // Printf("remLock: %zx %zx\n", lock_id, epoch_);
+ CHECK(bv_.clearBit(lock_id));
+ if (n_all_locks_) {
+ for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
+ Swap(all_locks_with_contexts_[i],
+ all_locks_with_contexts_[n_all_locks_ - 1]);
+ n_all_locks_--;
+ break;
+ }
+ }
+ }
+ }
+
+ u32 findLockContext(uptr lock_id) {
+ for (uptr i = 0; i < n_all_locks_; i++)
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
+ return all_locks_with_contexts_[i].stk;
+ return 0;
+ }
+
+ const BV &getLocks(uptr current_epoch) const {
+ CHECK_EQ(epoch_, current_epoch);
+ return bv_;
+ }
+
+ uptr getNumLocks() const { return n_all_locks_; }
+ uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
+
+ private:
+ BV bv_;
+ uptr epoch_;
+ uptr recursive_locks[64];
+ uptr n_recursive_locks;
+ struct LockWithContext {
+ u32 lock;
+ u32 stk;
+ };
+ LockWithContext all_locks_with_contexts_[64];
+ uptr n_all_locks_;
+};
+
+// DeadlockDetector.
+// For deadlock detection to work we need one global DeadlockDetector object
+// and one DeadlockDetectorTLS object per evey thread.
+// This class is not thread safe, all concurrent accesses should be guarded
+// by an external lock.
+// Most of the methods of this class are not thread-safe (i.e. should
+// be protected by an external lock) unless explicitly told otherwise.
+template <class BV>
+class DeadlockDetector {
+ public:
+ typedef BV BitVector;
+
+ uptr size() const { return g_.size(); }
+
+ // No CTOR.
+ void clear() {
+ current_epoch_ = 0;
+ available_nodes_.clear();
+ recycled_nodes_.clear();
+ g_.clear();
+ n_edges_ = 0;
+ }
+
+ // Allocate new deadlock detector node.
+ // If we are out of available nodes first try to recycle some.
+ // If there is nothing to recycle, flush the graph and increment the epoch.
+ // Associate 'data' (opaque user's object) with the new node.
+ uptr newNode(uptr data) {
+ if (!available_nodes_.empty())
+ return getAvailableNode(data);
+ if (!recycled_nodes_.empty()) {
+ // Printf("recycling: n_edges_ %zd\n", n_edges_);
+ for (sptr i = n_edges_ - 1; i >= 0; i--) {
+ if (recycled_nodes_.getBit(edges_[i].from) ||
+ recycled_nodes_.getBit(edges_[i].to)) {
+ Swap(edges_[i], edges_[n_edges_ - 1]);
+ n_edges_--;
+ }
+ }
+ CHECK(available_nodes_.empty());
+ // removeEdgesFrom was called in removeNode.
+ g_.removeEdgesTo(recycled_nodes_);
+ available_nodes_.setUnion(recycled_nodes_);
+ recycled_nodes_.clear();
+ return getAvailableNode(data);
+ }
+ // We are out of vacant nodes. Flush and increment the current_epoch_.
+ current_epoch_ += size();
+ recycled_nodes_.clear();
+ available_nodes_.setAll();
+ g_.clear();
+ return getAvailableNode(data);
+ }
+
+ // Get data associated with the node created by newNode().
+ uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
+
+ bool nodeBelongsToCurrentEpoch(uptr node) {
+ return node && (node / size() * size()) == current_epoch_;
+ }
+
+ void removeNode(uptr node) {
+ uptr idx = nodeToIndex(node);
+ CHECK(!available_nodes_.getBit(idx));
+ CHECK(recycled_nodes_.setBit(idx));
+ g_.removeEdgesFrom(idx);
+ }
+
+ void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
+ dtls->ensureCurrentEpoch(current_epoch_);
+ }
+
+ // Returns true if there is a cycle in the graph after this lock event.
+ // Ideally should be called before the lock is acquired so that we can
+ // report a deadlock before a real deadlock happens.
+ bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
+ }
+
+ u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ return dtls->findLockContext(nodeToIndex(node));
+ }
+
+ // Add cur_node to the set of locks held currently by dtls.
+ void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ }
+
+ // Experimental *racy* fast path function.
+ // Returns true if all edges from the currently held locks to cur_node exist.
+ bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ uptr local_epoch = dtls->getEpoch();
+ // Read from current_epoch_ is racy.
+ if (cur_node && local_epoch == current_epoch_ &&
+ local_epoch == nodeToEpoch(cur_node)) {
+ uptr cur_idx = nodeToIndexUnchecked(cur_node);
+ for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
+ if (!g_.hasEdge(dtls->getLock(i), cur_idx))
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ // Adds edges from currently held locks to cur_node,
+ // returns the number of added edges, and puts the sources of added edges
+ // into added_edges[].
+ // Should be called before onLockAfter.
+ uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
+ int unique_tid) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ uptr added_edges[40];
+ uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
+ added_edges, ARRAY_SIZE(added_edges));
+ for (uptr i = 0; i < n_added_edges; i++) {
+ if (n_edges_ < ARRAY_SIZE(edges_)) {
+ Edge e = {(u16)added_edges[i], (u16)cur_idx,
+ dtls->findLockContext(added_edges[i]), stk,
+ unique_tid};
+ edges_[n_edges_++] = e;
+ }
+ // Printf("Edge%zd: %u %zd=>%zd in T%d\n",
+ // n_edges_, stk, added_edges[i], cur_idx, unique_tid);
+ }
+ return n_added_edges;
+ }
+
+ bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
+ int *unique_tid) {
+ uptr from_idx = nodeToIndex(from_node);
+ uptr to_idx = nodeToIndex(to_node);
+ for (uptr i = 0; i < n_edges_; i++) {
+ if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
+ *stk_from = edges_[i].stk_from;
+ *stk_to = edges_[i].stk_to;
+ *unique_tid = edges_[i].unique_tid;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Test-only function. Handles the before/after lock events,
+ // returns true if there is a cycle.
+ bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
+ addEdges(dtls, cur_node, stk, 0);
+ onLockAfter(dtls, cur_node, stk);
+ return is_reachable;
+ }
+
+ // Handles the try_lock event, returns false.
+ // When a try_lock event happens (i.e. a try_lock call succeeds) we need
+ // to add this lock to the currently held locks, but we should not try to
+ // change the lock graph or to detect a cycle. We may want to investigate
+ // whether a more aggressive strategy is possible for try_lock.
+ bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ return false;
+ }
+
+ // Returns true iff dtls is empty (no locks are currently held) and we can
+ // add the node to the currently held locks w/o chanding the global state.
+ // This operation is thread-safe as it only touches the dtls.
+ bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (!dtls->empty()) return false;
+ if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ // Finds a path between the lock 'cur_node' (currently not held in dtls)
+ // and some currently held lock, returns the length of the path
+ // or 0 on failure.
+ uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
+ uptr path_size) {
+ tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
+ uptr idx = nodeToIndex(cur_node);
+ CHECK(!tmp_bv_.getBit(idx));
+ uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
+ for (uptr i = 0; i < res; i++)
+ path[i] = indexToNode(path[i]);
+ if (res)
+ CHECK_EQ(path[0], cur_node);
+ return res;
+ }
+
+ // Handle the unlock event.
+ // This operation is thread-safe as it only touches the dtls.
+ void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ if (dtls->getEpoch() == nodeToEpoch(node))
+ dtls->removeLock(nodeToIndexUnchecked(node));
+ }
+
+ // Tries to handle the lock event w/o writing to global state.
+ // Returns true on success.
+ // This operation is thread-safe as it only touches the dtls
+ // (modulo racy nature of hasAllEdges).
+ bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (hasAllEdges(dtls, node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
+ return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
+ }
+
+ uptr testOnlyGetEpoch() const { return current_epoch_; }
+ bool testOnlyHasEdge(uptr l1, uptr l2) {
+ return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
+ }
+ // idx1 and idx2 are raw indices to g_, not lock IDs.
+ bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
+ return g_.hasEdge(idx1, idx2);
+ }
+
+ void Print() {
+ for (uptr from = 0; from < size(); from++)
+ for (uptr to = 0; to < size(); to++)
+ if (g_.hasEdge(from, to))
+ Printf(" %zx => %zx\n", from, to);
+ }
+
+ private:
+ void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
+
+ void check_node(uptr node) const {
+ CHECK_GE(node, size());
+ CHECK_EQ(current_epoch_, nodeToEpoch(node));
+ }
+
+ uptr indexToNode(uptr idx) const {
+ check_idx(idx);
+ return idx + current_epoch_;
+ }
+
+ uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
+
+ uptr nodeToIndex(uptr node) const {
+ check_node(node);
+ return nodeToIndexUnchecked(node);
+ }
+
+ uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
+
+ uptr getAvailableNode(uptr data) {
+ uptr idx = available_nodes_.getAndClearFirstOne();
+ data_[idx] = data;
+ return indexToNode(idx);
+ }
+
+ struct Edge {
+ u16 from;
+ u16 to;
+ u32 stk_from;
+ u32 stk_to;
+ int unique_tid;
+ };
+
+ uptr current_epoch_;
+ BV available_nodes_;
+ BV recycled_nodes_;
+ BV tmp_bv_;
+ BVGraph<BV> g_;
+ uptr data_[BV::kSize];
+ Edge edges_[BV::kSize * 32];
+ uptr n_edges_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
new file mode 100644
index 00000000000..7a318c963f7
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
@@ -0,0 +1,188 @@
+//===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on NxN adjacency bit matrix.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_deadlock_detector.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+
+namespace __sanitizer {
+
+typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
+
+struct DDPhysicalThread {
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ DeadlockDetectorTLS<DDBV> dd;
+ DDReport rep;
+ bool report_pending;
+};
+
+struct DD : public DDetector {
+ SpinMutex mtx;
+ DeadlockDetector<DDBV> dd;
+ DDFlags flags;
+
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread *CreatePhysicalThread() override;
+ void DestroyPhysicalThread(DDPhysicalThread *pt) override;
+
+ DDLogicalThread *CreateLogicalThread(u64 ctx) override;
+ void DestroyLogicalThread(DDLogicalThread *lt) override;
+
+ void MutexInit(DDCallback *cb, DDMutex *m) override;
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) override;
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
+ void MutexDestroy(DDCallback *cb, DDMutex *m) override;
+
+ DDReport *GetReport(DDCallback *cb) override;
+
+ void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
+ void ReportDeadlock(DDCallback *cb, DDMutex *m);
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags) {
+ dd.clear();
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ return nullptr;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
+ lt->ctx = ctx;
+ lt->dd.clear();
+ lt->report_pending = false;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ m->id = 0;
+ m->stk = cb->Unwind();
+}
+
+void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
+ if (!dd.nodeBelongsToCurrentEpoch(m->id))
+ m->id = dd.newNode(reinterpret_cast<uptr>(m));
+ dd.ensureCurrentEpoch(&lt->dd);
+}
+
+void DD::MutexBeforeLock(DDCallback *cb,
+ DDMutex *m, bool wlock) {
+ DDLogicalThread *lt = cb->lt;
+ if (lt->dd.empty()) return; // This will be the first lock held by lt.
+ if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (dd.isHeld(&lt->dd, m->id))
+ return; // FIXME: allow this only for recursive locks.
+ if (dd.onLockBefore(&lt->dd, m->id)) {
+ // Actually add this edge now so that we have all the stack traces.
+ dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
+ ReportDeadlock(cb, m);
+ }
+}
+
+void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
+ DDLogicalThread *lt = cb->lt;
+ uptr path[10];
+ uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
+ CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
+ CHECK_EQ(m->id, path[0]);
+ lt->report_pending = true;
+ DDReport *rep = &lt->rep;
+ rep->n = len;
+ for (uptr i = 0; i < len; i++) {
+ uptr from = path[i];
+ uptr to = path[(i + 1) % len];
+ DDMutex *m0 = (DDMutex*)dd.getData(from);
+ DDMutex *m1 = (DDMutex*)dd.getData(to);
+
+ u32 stk_from = -1U, stk_to = -1U;
+ int unique_tid = 0;
+ dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
+ // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
+ // unique_tid);
+ rep->loop[i].thr_ctx = unique_tid;
+ rep->loop[i].mtx_ctx0 = m0->ctx;
+ rep->loop[i].mtx_ctx1 = m1->ctx;
+ rep->loop[i].stk[0] = stk_to;
+ rep->loop[i].stk[1] = stk_from;
+ }
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
+ DDLogicalThread *lt = cb->lt;
+ u32 stk = 0;
+ if (flags.second_deadlock_stack)
+ stk = cb->Unwind();
+ // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
+ if (dd.onFirstLock(&lt->dd, m->id, stk))
+ return;
+ if (dd.onLockFast(&lt->dd, m->id, stk))
+ return;
+
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (wlock) // Only a recursive rlock may be held.
+ CHECK(!dd.isHeld(&lt->dd, m->id));
+ if (!trylock)
+ dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
+ dd.onLockAfter(&lt->dd, m->id, stk);
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
+ dd.onUnlock(&cb->lt->dd, m->id);
+}
+
+void DD::MutexDestroy(DDCallback *cb,
+ DDMutex *m) {
+ if (!m->id) return;
+ SpinMutexLock lk(&mtx);
+ if (dd.nodeBelongsToCurrentEpoch(m->id))
+ dd.removeNode(m->id);
+ m->id = 0;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->lt->report_pending)
+ return nullptr;
+ cb->lt->report_pending = false;
+ return &cb->lt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc
new file mode 100644
index 00000000000..bc96ad230db
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector2.cc
@@ -0,0 +1,426 @@
+//===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on adjacency lists.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+
+namespace __sanitizer {
+
+const int kMaxNesting = 64;
+const u32 kNoId = -1;
+const u32 kEndId = -2;
+const int kMaxLink = 8;
+const int kL1Size = 1024;
+const int kL2Size = 1024;
+const int kMaxMutex = kL1Size * kL2Size;
+
+struct Id {
+ u32 id;
+ u32 seq;
+
+ explicit Id(u32 id = 0, u32 seq = 0)
+ : id(id)
+ , seq(seq) {
+ }
+};
+
+struct Link {
+ u32 id;
+ u32 seq;
+ u32 tid;
+ u32 stk0;
+ u32 stk1;
+
+ explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
+ : id(id)
+ , seq(seq)
+ , tid(tid)
+ , stk0(s0)
+ , stk1(s1) {
+ }
+};
+
+struct DDPhysicalThread {
+ DDReport rep;
+ bool report_pending;
+ bool visited[kMaxMutex];
+ Link pending[kMaxMutex];
+ Link path[kMaxMutex];
+};
+
+struct ThreadMutex {
+ u32 id;
+ u32 stk;
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ ThreadMutex locked[kMaxNesting];
+ int nlocked;
+};
+
+struct Mutex {
+ StaticSpinMutex mtx;
+ u32 seq;
+ int nlink;
+ Link link[kMaxLink];
+};
+
+struct DD : public DDetector {
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread* CreatePhysicalThread();
+ void DestroyPhysicalThread(DDPhysicalThread *pt);
+
+ DDLogicalThread* CreateLogicalThread(u64 ctx);
+ void DestroyLogicalThread(DDLogicalThread *lt);
+
+ void MutexInit(DDCallback *cb, DDMutex *m);
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock);
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexDestroy(DDCallback *cb, DDMutex *m);
+
+ DDReport *GetReport(DDCallback *cb);
+
+ void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
+ void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
+ u32 allocateId(DDCallback *cb);
+ Mutex *getMutex(u32 id);
+ u32 getMutexId(Mutex *m);
+
+ DDFlags flags;
+
+ Mutex* mutex[kL1Size];
+
+ SpinMutex mtx;
+ InternalMmapVector<u32> free_id;
+ int id_gen;
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags)
+ , free_id(1024) {
+ id_gen = 0;
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
+ "deadlock detector (physical thread)");
+ return pt;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+ pt->~DDPhysicalThread();
+ UnmapOrDie(pt, sizeof(DDPhysicalThread));
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
+ sizeof(DDLogicalThread));
+ lt->ctx = ctx;
+ lt->nlocked = 0;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
+ m->id = kNoId;
+ m->recursion = 0;
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+}
+
+Mutex *DD::getMutex(u32 id) {
+ return &mutex[id / kL2Size][id % kL2Size];
+}
+
+u32 DD::getMutexId(Mutex *m) {
+ for (int i = 0; i < kL1Size; i++) {
+ Mutex *tab = mutex[i];
+ if (tab == 0)
+ break;
+ if (m >= tab && m < tab + kL2Size)
+ return i * kL2Size + (m - tab);
+ }
+ return -1;
+}
+
+u32 DD::allocateId(DDCallback *cb) {
+ u32 id = -1;
+ SpinMutexLock l(&mtx);
+ if (free_id.size() > 0) {
+ id = free_id.back();
+ free_id.pop_back();
+ } else {
+ CHECK_LT(id_gen, kMaxMutex);
+ if ((id_gen % kL2Size) == 0) {
+ mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
+ "deadlock detector (mutex table)");
+ }
+ id = id_gen++;
+ }
+ CHECK_LE(id, kMaxMutex);
+ VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
+ return id;
+}
+
+void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDPhysicalThread *pt = cb->pt;
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+
+ // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+ if (lt->nlocked == 1) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ bool added = false;
+ Mutex *mtx = getMutex(m->id);
+ for (int i = 0; i < lt->nlocked - 1; i++) {
+ u32 id1 = lt->locked[i].id;
+ u32 stk1 = lt->locked[i].stk;
+ Mutex *mtx1 = getMutex(id1);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->nlink == kMaxLink) {
+ // FIXME(dvyukov): check stale links
+ continue;
+ }
+ int li = 0;
+ for (; li < mtx1->nlink; li++) {
+ Link *link = &mtx1->link[li];
+ if (link->id == m->id) {
+ if (link->seq != mtx->seq) {
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ break;
+ }
+ }
+ if (li == mtx1->nlink) {
+ // FIXME(dvyukov): check stale links
+ Link *link = &mtx1->link[mtx1->nlink++];
+ link->id = m->id;
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ }
+
+ if (!added || mtx->nlink == 0) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CycleCheck(pt, lt, m);
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {
+ VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
+ CHECK(wlock);
+ m->recursion++;
+ return;
+ }
+ CHECK_EQ(owner, 0);
+ if (wlock) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
+ CHECK_EQ(m->recursion, 0);
+ m->recursion = 1;
+ atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
+ }
+
+ if (!trylock)
+ return;
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
+ if (--m->recursion > 0)
+ return;
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+ }
+ CHECK_NE(m->id, kNoId);
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (cb->lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+}
+
+void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
+ cb->lt->ctx, m);
+ DDLogicalThread *lt = cb->lt;
+
+ if (m->id == kNoId)
+ return;
+
+ // Remove the mutex from lt->locked if there.
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+
+ // Clear and invalidate the mutex descriptor.
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ mtx->seq++;
+ mtx->nlink = 0;
+ }
+
+ // Return id to cache.
+ {
+ SpinMutexLock l(&mtx);
+ free_id.push_back(m->id);
+ }
+}
+
+void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
+ DDMutex *m) {
+ internal_memset(pt->visited, 0, sizeof(pt->visited));
+ int npath = 0;
+ int npending = 0;
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ for (int li = 0; li < mtx->nlink; li++)
+ pt->pending[npending++] = mtx->link[li];
+ }
+ while (npending > 0) {
+ Link link = pt->pending[--npending];
+ if (link.id == kEndId) {
+ npath--;
+ continue;
+ }
+ if (pt->visited[link.id])
+ continue;
+ Mutex *mtx1 = getMutex(link.id);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->seq != link.seq)
+ continue;
+ pt->visited[link.id] = true;
+ if (mtx1->nlink == 0)
+ continue;
+ pt->path[npath++] = link;
+ pt->pending[npending++] = Link(kEndId);
+ if (link.id == m->id)
+ return Report(pt, lt, npath); // Bingo!
+ for (int li = 0; li < mtx1->nlink; li++) {
+ Link *link1 = &mtx1->link[li];
+ // Mutex *mtx2 = getMutex(link->id);
+ // FIXME(dvyukov): fast seq check
+ // FIXME(dvyukov): fast nlink != 0 check
+ // FIXME(dvyukov): fast pending check?
+ // FIXME(dvyukov): npending can be larger than kMaxMutex
+ pt->pending[npending++] = *link1;
+ }
+ }
+}
+
+void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
+ DDReport *rep = &pt->rep;
+ rep->n = npath;
+ for (int i = 0; i < npath; i++) {
+ Link *link = &pt->path[i];
+ Link *link0 = &pt->path[i ? i - 1 : npath - 1];
+ rep->loop[i].thr_ctx = link->tid;
+ rep->loop[i].mtx_ctx0 = link0->id;
+ rep->loop[i].mtx_ctx1 = link->id;
+ rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
+ rep->loop[i].stk[1] = link->stk1;
+ }
+ pt->report_pending = true;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->pt->report_pending)
+ return 0;
+ cb->pt->report_pending = false;
+ return &cb->pt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
new file mode 100644
index 00000000000..07c3755155f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
@@ -0,0 +1,91 @@
+//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// Abstract deadlock detector interface.
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION
+# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1
+#endif
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+// dd - deadlock detector.
+// lt - logical (user) thread.
+// pt - physical (OS) thread.
+
+struct DDPhysicalThread;
+struct DDLogicalThread;
+
+struct DDMutex {
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+ uptr id;
+ u32 stk; // creation stack
+#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+ u32 id;
+ u32 recursion;
+ atomic_uintptr_t owner;
+#else
+# error "BAD SANITIZER_DEADLOCK_DETECTOR_VERSION"
+#endif
+ u64 ctx;
+};
+
+struct DDFlags {
+ bool second_deadlock_stack;
+};
+
+struct DDReport {
+ enum { kMaxLoopSize = 8 };
+ int n; // number of entries in loop
+ struct {
+ u64 thr_ctx; // user thread context
+ u64 mtx_ctx0; // user mutex context, start of the edge
+ u64 mtx_ctx1; // user mutex context, end of the edge
+ u32 stk[2]; // stack ids for the edge
+ } loop[kMaxLoopSize];
+};
+
+struct DDCallback {
+ DDPhysicalThread *pt;
+ DDLogicalThread *lt;
+
+ virtual u32 Unwind() { return 0; }
+ virtual int UniqueTid() { return 0; }
+};
+
+struct DDetector {
+ static DDetector *Create(const DDFlags *flags);
+
+ virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; }
+ virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
+
+ virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; }
+ virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
+
+ virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
+ virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {}
+ virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
+
+ virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_flag_parser.cc b/libsanitizer/sanitizer_common/sanitizer_flag_parser.cc
new file mode 100644
index 00000000000..1fc6b2edb0c
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_flag_parser.cc
@@ -0,0 +1,169 @@
+//===-- sanitizer_flag_parser.cc ------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flag_parser.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_flag_parser.h"
+
+namespace __sanitizer {
+
+LowLevelAllocator FlagParser::Alloc;
+
+class UnknownFlags {
+ static const int kMaxUnknownFlags = 20;
+ const char *unknown_flags_[kMaxUnknownFlags];
+ int n_unknown_flags_;
+
+ public:
+ void Add(const char *name) {
+ CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
+ unknown_flags_[n_unknown_flags_++] = name;
+ }
+
+ void Report() {
+ if (!n_unknown_flags_) return;
+ Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
+ for (int i = 0; i < n_unknown_flags_; ++i)
+ Printf(" %s\n", unknown_flags_[i]);
+ n_unknown_flags_ = 0;
+ }
+};
+
+UnknownFlags unknown_flags;
+
+void ReportUnrecognizedFlags() {
+ unknown_flags.Report();
+}
+
+char *FlagParser::ll_strndup(const char *s, uptr n) {
+ uptr len = internal_strnlen(s, n);
+ char *s2 = (char*)Alloc.Allocate(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+void FlagParser::PrintFlagDescriptions() {
+ Printf("Available flags for %s:\n", SanitizerToolName);
+ for (int i = 0; i < n_flags_; ++i)
+ Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
+}
+
+void FlagParser::fatal_error(const char *err) {
+ Printf("ERROR: %s\n", err);
+ Die();
+}
+
+bool FlagParser::is_space(char c) {
+ return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
+ c == '\r';
+}
+
+void FlagParser::skip_whitespace() {
+ while (is_space(buf_[pos_])) ++pos_;
+}
+
+void FlagParser::parse_flag() {
+ uptr name_start = pos_;
+ while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != '=') fatal_error("expected '='");
+ char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
+
+ uptr value_start = ++pos_;
+ char *value;
+ if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
+ char quote = buf_[pos_++];
+ while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
+ if (buf_[pos_] == 0) fatal_error("unterminated string");
+ value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
+ ++pos_; // consume the closing quote
+ } else {
+ while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
+ fatal_error("expected separator or eol");
+ value = ll_strndup(buf_ + value_start, pos_ - value_start);
+ }
+
+ bool res = run_handler(name, value);
+ if (!res) fatal_error("Flag parsing failed.");
+}
+
+void FlagParser::parse_flags() {
+ while (true) {
+ skip_whitespace();
+ if (buf_[pos_] == 0) break;
+ parse_flag();
+ }
+
+ // Do a sanity check for certain flags.
+ if (common_flags_dont_use.malloc_context_size < 1)
+ common_flags_dont_use.malloc_context_size = 1;
+}
+
+void FlagParser::ParseString(const char *s) {
+ if (!s) return;
+ // Backup current parser state to allow nested ParseString() calls.
+ const char *old_buf_ = buf_;
+ uptr old_pos_ = pos_;
+ buf_ = s;
+ pos_ = 0;
+
+ parse_flags();
+
+ buf_ = old_buf_;
+ pos_ = old_pos_;
+}
+
+bool FlagParser::ParseFile(const char *path, bool ignore_missing) {
+ static const uptr kMaxIncludeSize = 1 << 15;
+ char *data;
+ uptr data_mapped_size;
+ error_t err;
+ uptr len;
+ if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len,
+ Max(kMaxIncludeSize, GetPageSizeCached()), &err)) {
+ if (ignore_missing)
+ return true;
+ Printf("Failed to read options from '%s': error %d\n", path, err);
+ return false;
+ }
+ ParseString(data);
+ UnmapOrDie(data, data_mapped_size);
+ return true;
+}
+
+bool FlagParser::run_handler(const char *name, const char *value) {
+ for (int i = 0; i < n_flags_; ++i) {
+ if (internal_strcmp(name, flags_[i].name) == 0)
+ return flags_[i].handler->Parse(value);
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ unknown_flags.Add(name);
+ return true;
+}
+
+void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc) {
+ CHECK_LT(n_flags_, kMaxFlags);
+ flags_[n_flags_].name = name;
+ flags_[n_flags_].desc = desc;
+ flags_[n_flags_].handler = handler;
+ ++n_flags_;
+}
+
+FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
+ flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
+}
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_flag_parser.h b/libsanitizer/sanitizer_common/sanitizer_flag_parser.h
new file mode 100644
index 00000000000..7827d735770
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_flag_parser.h
@@ -0,0 +1,120 @@
+//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAG_REGISTRY_H
+#define SANITIZER_FLAG_REGISTRY_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class FlagHandlerBase {
+ public:
+ virtual bool Parse(const char *value) { return false; }
+};
+
+template <typename T>
+class FlagHandler : public FlagHandlerBase {
+ T *t_;
+
+ public:
+ explicit FlagHandler(T *t) : t_(t) {}
+ bool Parse(const char *value) final;
+};
+
+template <>
+inline bool FlagHandler<bool>::Parse(const char *value) {
+ if (internal_strcmp(value, "0") == 0 ||
+ internal_strcmp(value, "no") == 0 ||
+ internal_strcmp(value, "false") == 0) {
+ *t_ = false;
+ return true;
+ }
+ if (internal_strcmp(value, "1") == 0 ||
+ internal_strcmp(value, "yes") == 0 ||
+ internal_strcmp(value, "true") == 0) {
+ *t_ = true;
+ return true;
+ }
+ Printf("ERROR: Invalid value for bool option: '%s'\n", value);
+ return false;
+}
+
+template <>
+inline bool FlagHandler<const char *>::Parse(const char *value) {
+ *t_ = internal_strdup(value);
+ return true;
+}
+
+template <>
+inline bool FlagHandler<int>::Parse(const char *value) {
+ char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
+ return ok;
+}
+
+template <>
+inline bool FlagHandler<uptr>::Parse(const char *value) {
+ char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
+ return ok;
+}
+
+class FlagParser {
+ static const int kMaxFlags = 200;
+ struct Flag {
+ const char *name;
+ const char *desc;
+ FlagHandlerBase *handler;
+ } *flags_;
+ int n_flags_;
+
+ const char *buf_;
+ uptr pos_;
+
+ public:
+ FlagParser();
+ void RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc);
+ void ParseString(const char *s);
+ bool ParseFile(const char *path, bool ignore_missing);
+ void PrintFlagDescriptions();
+
+ static LowLevelAllocator Alloc;
+
+ private:
+ void fatal_error(const char *err);
+ bool is_space(char c);
+ void skip_whitespace();
+ void parse_flags();
+ void parse_flag();
+ bool run_handler(const char *name, const char *value);
+ char *ll_strndup(const char *s, uptr n);
+};
+
+template <typename T>
+static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
+ T *var) {
+ FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var); // NOLINT
+ parser->RegisterHandler(name, fh, desc);
+}
+
+void ReportUnrecognizedFlags();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAG_REGISTRY_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.cc b/libsanitizer/sanitizer_common/sanitizer_flags.cc
index 90bb57dff98..a24ff900011 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.cc
@@ -13,129 +13,88 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_flag_parser.h"
namespace __sanitizer {
-void SetCommonFlagsDefaults(CommonFlags *f) {
- f->symbolize = true;
- f->external_symbolizer_path = 0;
- f->strip_path_prefix = "";
- f->fast_unwind_on_fatal = false;
- f->fast_unwind_on_malloc = true;
- f->handle_ioctl = false;
- f->malloc_context_size = 1;
- f->log_path = "stderr";
- f->verbosity = 0;
- f->detect_leaks = false;
- f->leak_check_at_exit = true;
- f->allocator_may_return_null = false;
- f->print_summary = true;
+CommonFlags common_flags_dont_use;
+
+struct FlagDescription {
+ const char *name;
+ const char *description;
+ FlagDescription *next;
+};
+
+IntrusiveList<FlagDescription> flag_descriptions;
+
+// If set, the tool will install its own SEGV signal handler by default.
+#ifndef SANITIZER_NEEDS_SEGV
+# define SANITIZER_NEEDS_SEGV 1
+#endif
+
+void CommonFlags::SetDefaults() {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
}
-void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
- ParseFlag(str, &f->symbolize, "symbolize");
- ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path");
- ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
- ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
- ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
- ParseFlag(str, &f->handle_ioctl, "handle_ioctl");
- ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
- ParseFlag(str, &f->log_path, "log_path");
- ParseFlag(str, &f->verbosity, "verbosity");
- ParseFlag(str, &f->detect_leaks, "detect_leaks");
- ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit");
- ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null");
- ParseFlag(str, &f->print_summary, "print_summary");
-
- // Do a sanity check for certain flags.
- if (f->malloc_context_size < 1)
- f->malloc_context_size = 1;
+void CommonFlags::CopyFrom(const CommonFlags &other) {
+ internal_memcpy(this, &other, sizeof(*this));
}
-static bool GetFlagValue(const char *env, const char *name,
- const char **value, int *value_length) {
- if (env == 0)
- return false;
- const char *pos = 0;
- for (;;) {
- pos = internal_strstr(env, name);
- if (pos == 0)
- return false;
- if (pos != env && ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) {
- // Seems to be middle of another flag name or value.
- env = pos + 1;
- continue;
- }
- break;
- }
- pos += internal_strlen(name);
- const char *end;
- if (pos[0] != '=') {
- end = pos;
- } else {
- pos += 1;
- if (pos[0] == '"') {
- pos += 1;
- end = internal_strchr(pos, '"');
- } else if (pos[0] == '\'') {
- pos += 1;
- end = internal_strchr(pos, '\'');
- } else {
- // Read until the next space or colon.
- end = pos + internal_strcspn(pos, " :");
- }
- if (end == 0)
- end = pos + internal_strlen(pos);
+// Copy the string from "s" to "out", replacing "%b" with the binary basename.
+static void SubstituteBinaryName(const char *s, char *out, uptr out_size) {
+ char *out_end = out + out_size;
+ while (*s && out < out_end - 1) {
+ if (s[0] != '%' || s[1] != 'b') { *out++ = *s++; continue; }
+ const char *base = GetProcessName();
+ CHECK(base);
+ while (*base && out < out_end - 1)
+ *out++ = *base++;
+ s += 2; // skip "%b"
}
- *value = pos;
- *value_length = end - pos;
- return true;
+ *out = '\0';
}
-static bool StartsWith(const char *flag, int flag_length, const char *value) {
- if (!flag || !value)
- return false;
- int value_length = internal_strlen(value);
- return (flag_length >= value_length) &&
- (0 == internal_strncmp(flag, value, value_length));
-}
+class FlagHandlerInclude : public FlagHandlerBase {
+ FlagParser *parser_;
+ bool ignore_missing_;
-void ParseFlag(const char *env, bool *flag, const char *name) {
- const char *value;
- int value_length;
- if (!GetFlagValue(env, name, &value, &value_length))
- return;
- if (StartsWith(value, value_length, "0") ||
- StartsWith(value, value_length, "no") ||
- StartsWith(value, value_length, "false"))
- *flag = false;
- if (StartsWith(value, value_length, "1") ||
- StartsWith(value, value_length, "yes") ||
- StartsWith(value, value_length, "true"))
- *flag = true;
-}
+ public:
+ explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)
+ : parser_(parser), ignore_missing_(ignore_missing) {}
+ bool Parse(const char *value) final {
+ if (internal_strchr(value, '%')) {
+ char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
+ SubstituteBinaryName(value, buf, kMaxPathLength);
+ bool res = parser_->ParseFile(buf, ignore_missing_);
+ UnmapOrDie(buf, kMaxPathLength);
+ return res;
+ }
+ return parser_->ParseFile(value, ignore_missing_);
+ }
+};
-void ParseFlag(const char *env, int *flag, const char *name) {
- const char *value;
- int value_length;
- if (!GetFlagValue(env, name, &value, &value_length))
- return;
- *flag = static_cast<int>(internal_atoll(value));
+void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
+ FlagHandlerInclude *fh_include = new (FlagParser::Alloc) // NOLINT
+ FlagHandlerInclude(parser, /*ignore_missing*/ false);
+ parser->RegisterHandler("include", fh_include,
+ "read more options from the given file");
+ FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc) // NOLINT
+ FlagHandlerInclude(parser, /*ignore_missing*/ true);
+ parser->RegisterHandler(
+ "include_if_exists", fh_include_if_exists,
+ "read more options from the given file (if it exists)");
}
-static LowLevelAllocator allocator_for_flags;
-
-void ParseFlag(const char *env, const char **flag, const char *name) {
- const char *value;
- int value_length;
- if (!GetFlagValue(env, name, &value, &value_length))
- return;
- // Copy the flag value. Don't use locks here, as flags are parsed at
- // tool startup.
- char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
- internal_memcpy(value_copy, value, value_length);
- value_copy[value_length] = '\0';
- *flag = value_copy;
+void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &cf->Name);
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+ RegisterIncludeFlags(parser, cf);
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.h b/libsanitizer/sanitizer_common/sanitizer_flags.h
index 46ec0928193..f1b872b79e2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.h
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.h
@@ -16,52 +16,38 @@
namespace __sanitizer {
-void ParseFlag(const char *env, bool *flag, const char *name);
-void ParseFlag(const char *env, int *flag, const char *name);
-void ParseFlag(const char *env, const char **flag, const char *name);
-
struct CommonFlags {
- // If set, use the online symbolizer from common sanitizer runtime.
- bool symbolize;
- // Path to external symbolizer. If it is NULL, symbolizer will be looked for
- // in PATH. If it is empty, external symbolizer will not be started.
- const char *external_symbolizer_path;
- // Strips this prefix from file paths in error reports.
- const char *strip_path_prefix;
- // Use fast (frame-pointer-based) unwinder on fatal errors (if available).
- bool fast_unwind_on_fatal;
- // Use fast (frame-pointer-based) unwinder on malloc/free (if available).
- bool fast_unwind_on_malloc;
- // Intercept and handle ioctl requests.
- bool handle_ioctl;
- // Max number of stack frames kept for each allocation/deallocation.
- int malloc_context_size;
- // Write logs to "log_path.pid".
- // The special values are "stdout" and "stderr".
- // The default is "stderr".
- const char *log_path;
- // Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
- int verbosity;
- // Enable memory leak detection.
- bool detect_leaks;
- // Invoke leak checking in an atexit handler. Has no effect if
- // detect_leaks=false, or if __lsan_do_leak_check() is called before the
- // handler has a chance to run.
- bool leak_check_at_exit;
- // If false, the allocator will crash instead of returning 0 on out-of-memory.
- bool allocator_may_return_null;
- // If false, disable printing error summaries in addition to error reports.
- bool print_summary;
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+ void SetDefaults();
+ void CopyFrom(const CommonFlags &other);
};
-inline CommonFlags *common_flags() {
- static CommonFlags f;
- return &f;
+// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
+extern CommonFlags common_flags_dont_use;
+inline const CommonFlags *common_flags() {
+ return &common_flags_dont_use;
}
-void SetCommonFlagsDefaults(CommonFlags *f);
-void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
+inline void SetCommonFlagsDefaults() {
+ common_flags_dont_use.SetDefaults();
+}
+
+// This function can only be used to setup tool-specific overrides for
+// CommonFlags defaults. Generally, it should only be used right after
+// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
+// only during the flags initialization (i.e. before they are used for
+// the first time).
+inline void OverrideCommonFlags(const CommonFlags &cf) {
+ common_flags_dont_use.CopyFrom(cf);
+}
+class FlagParser;
+void RegisterCommonFlags(FlagParser *parser,
+ CommonFlags *cf = &common_flags_dont_use);
+void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
} // namespace __sanitizer
#endif // SANITIZER_FLAGS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.inc b/libsanitizer/sanitizer_common/sanitizer_flags.inc
new file mode 100644
index 00000000000..bca1c4b4d0f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.inc
@@ -0,0 +1,192 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes common flags available in all sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef COMMON_FLAG
+#error "Define COMMON_FLAG prior to including this file!"
+#endif
+
+// COMMON_FLAG(Type, Name, DefaultValue, Description)
+// Supported types: bool, const char *, int, uptr.
+// Default value must be a compile-time constant.
+// Description must be a string literal.
+
+COMMON_FLAG(
+ bool, symbolize, true,
+ "If set, use the online symbolizer from common sanitizer runtime to turn "
+ "virtual addresses to file/line locations.")
+COMMON_FLAG(
+ const char *, external_symbolizer_path, nullptr,
+ "Path to external symbolizer. If empty, the tool will search $PATH for "
+ "the symbolizer.")
+COMMON_FLAG(
+ bool, allow_addr2line, false,
+ "If set, allows online symbolizer to run addr2line binary to symbolize "
+ "stack traces (addr2line will only be used if llvm-symbolizer binary is "
+ "unavailable.")
+COMMON_FLAG(const char *, strip_path_prefix, "",
+ "Strips this prefix from file paths in error reports.")
+COMMON_FLAG(bool, fast_unwind_on_check, false,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "internal CHECK failures.")
+COMMON_FLAG(bool, fast_unwind_on_fatal, false,
+ "If available, use the fast frame-pointer-based unwinder on fatal "
+ "errors.")
+COMMON_FLAG(bool, fast_unwind_on_malloc, true,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "malloc/free.")
+COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
+COMMON_FLAG(int, malloc_context_size, 1,
+ "Max number of stack frames kept for each allocation/deallocation.")
+COMMON_FLAG(
+ const char *, log_path, "stderr",
+ "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
+ "\"stderr\". The default is \"stderr\".")
+COMMON_FLAG(
+ bool, log_exe_name, false,
+ "Mention name of executable when reporting error and "
+ "append executable name to logs (as in \"log_path.exe_name.pid\").")
+COMMON_FLAG(
+ bool, log_to_syslog, SANITIZER_ANDROID,
+ "Write all sanitizer output to syslog in addition to other means of "
+ "logging.")
+COMMON_FLAG(
+ int, verbosity, 0,
+ "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
+COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
+COMMON_FLAG(
+ bool, leak_check_at_exit, true,
+ "Invoke leak checking in an atexit handler. Has no effect if "
+ "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
+ "handler has a chance to run.")
+COMMON_FLAG(bool, allocator_may_return_null, false,
+ "If false, the allocator will crash instead of returning 0 on "
+ "out-of-memory.")
+COMMON_FLAG(bool, print_summary, true,
+ "If false, disable printing error summaries in addition to error "
+ "reports.")
+COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
+COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
+ "If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
+COMMON_FLAG(bool, handle_abort, false,
+ "If set, registers the tool's custom SIGABRT handler.")
+COMMON_FLAG(bool, handle_sigfpe, true,
+ "If set, registers the tool's custom SIGFPE handler.")
+COMMON_FLAG(bool, allow_user_segv_handler, false,
+ "If set, allows user to register a SEGV handler even if the tool "
+ "registers one.")
+COMMON_FLAG(bool, use_sigaltstack, true,
+ "If set, uses alternate stack for signal handling.")
+COMMON_FLAG(bool, detect_deadlocks, false,
+ "If set, deadlock detection is enabled.")
+COMMON_FLAG(
+ uptr, clear_shadow_mmap_threshold, 64 * 1024,
+ "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
+ "memset(). This is the threshold size in bytes.")
+COMMON_FLAG(const char *, color, "auto",
+ "Colorize reports: (always|never|auto).")
+COMMON_FLAG(
+ bool, legacy_pthread_cond, false,
+ "Enables support for dynamic libraries linked with libpthread 2.2.5.")
+COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
+COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
+COMMON_FLAG(uptr, mmap_limit_mb, 0,
+ "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
+ "not a user-facing flag, used mosly for testing the tools")
+COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
+ "Hard RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS and aborts the process if the"
+ " limit is reached")
+COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
+ "Soft RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS. If the limit is reached"
+ " all subsequent malloc/new calls will fail or return NULL"
+ " (depending on the value of allocator_may_return_null)"
+ " until the RSS goes below the soft limit."
+ " This limit does not affect memory allocations other than"
+ " malloc/new.")
+COMMON_FLAG(bool, can_use_proc_maps_statm, true,
+ "If false, do not attempt to read /proc/maps/statm."
+ " Mostly useful for testing sanitizers.")
+COMMON_FLAG(
+ bool, coverage, false,
+ "If set, coverage information will be dumped at program shutdown (if the "
+ "coverage instrumentation was enabled at compile time).")
+COMMON_FLAG(bool, coverage_pcs, true,
+ "If set (and if 'coverage' is set too), the coverage information "
+ "will be dumped as a set of PC offsets for every module.")
+COMMON_FLAG(bool, coverage_order_pcs, false,
+ "If true, the PCs will be dumped in the order they've"
+ " appeared during the execution.")
+COMMON_FLAG(bool, coverage_bitset, false,
+ "If set (and if 'coverage' is set too), the coverage information "
+ "will also be dumped as a bitset to a separate file.")
+COMMON_FLAG(bool, coverage_counters, false,
+ "If set (and if 'coverage' is set too), the bitmap that corresponds"
+ " to coverage counters will be dumped.")
+COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
+ "If set, coverage information will be dumped directly to a memory "
+ "mapped file. This way data is not lost even if the process is "
+ "suddenly killed.")
+COMMON_FLAG(const char *, coverage_dir, ".",
+ "Target directory for coverage dumps. Defaults to the current "
+ "directory.")
+COMMON_FLAG(bool, full_address_space, false,
+ "Sanitize complete address space; "
+ "by default kernel area on 32-bit platforms will not be sanitized")
+COMMON_FLAG(bool, print_suppressions, true,
+ "Print matched suppressions at exit.")
+COMMON_FLAG(
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
+ "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
+ "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ "default and for sanitizers that don't reserve lots of virtual memory.")
+COMMON_FLAG(bool, use_madv_dontdump, true,
+ "If set, instructs kernel to not store the (huge) shadow "
+ "in core file.")
+COMMON_FLAG(bool, symbolize_inline_frames, true,
+ "Print inlined frames in stacktraces. Defaults to true.")
+COMMON_FLAG(bool, symbolize_vs_style, false,
+ "Print file locations in Visual Studio style (e.g: "
+ " file(10,42): ...")
+COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
+ "Format string used to render stack frames. "
+ "See sanitizer_stacktrace_printer.h for the format description. "
+ "Use DEFAULT to get default format.")
+COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
+ "If true, the shadow is not allowed to use huge pages. ")
+COMMON_FLAG(bool, strict_string_checks, false,
+ "If set check that string arguments are properly null-terminated")
+COMMON_FLAG(bool, intercept_strstr, true,
+ "If set, uses custom wrappers for strstr and strcasestr functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strspn, true,
+ "If set, uses custom wrappers for strspn and strcspn function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strpbrk, true,
+ "If set, uses custom wrappers for strpbrk function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_memcmp, true,
+ "If set, uses custom wrappers for memcmp function "
+ "to find more errors.")
+COMMON_FLAG(bool, strict_memcmp, true,
+ "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+ "comparing p1 and p2.")
+COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
+ "mappings in /proc/self/maps with "
+ "user-readable names")
+COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
+ "found an error")
+COMMON_FLAG(
+ bool, abort_on_error, SANITIZER_MAC,
+ "If set, the tool calls abort() instead of _exit() after printing the "
+ "error report.")
diff --git a/libsanitizer/sanitizer_common/sanitizer_freebsd.h b/libsanitizer/sanitizer_common/sanitizer_freebsd.h
new file mode 100644
index 00000000000..47bb1313e6f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_freebsd.h
@@ -0,0 +1,135 @@
+//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime. It contains FreeBSD-specific
+// definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FREEBSD_H
+#define SANITIZER_FREEBSD_H
+
+#include "sanitizer_internal_defs.h"
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+# include <osreldate.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# include <link.h>
+# include <sys/param.h>
+# include <ucontext.h>
+
+namespace __sanitizer {
+
+typedef unsigned long long __xuint64_t;
+
+typedef __int32_t __xregister_t;
+
+typedef struct __xmcontext {
+ __xregister_t mc_onstack;
+ __xregister_t mc_gs;
+ __xregister_t mc_fs;
+ __xregister_t mc_es;
+ __xregister_t mc_ds;
+ __xregister_t mc_edi;
+ __xregister_t mc_esi;
+ __xregister_t mc_ebp;
+ __xregister_t mc_isp;
+ __xregister_t mc_ebx;
+ __xregister_t mc_edx;
+ __xregister_t mc_ecx;
+ __xregister_t mc_eax;
+ __xregister_t mc_trapno;
+ __xregister_t mc_err;
+ __xregister_t mc_eip;
+ __xregister_t mc_cs;
+ __xregister_t mc_eflags;
+ __xregister_t mc_esp;
+ __xregister_t mc_ss;
+
+ int mc_len;
+ int mc_fpformat;
+ int mc_ownedfp;
+ __xregister_t mc_flags;
+
+ int mc_fpstate[128] __aligned(16);
+ __xregister_t mc_fsbase;
+ __xregister_t mc_gsbase;
+ __xregister_t mc_xfpustate;
+ __xregister_t mc_xfpustate_len;
+
+ int mc_spare2[4];
+} xmcontext_t;
+
+typedef struct __xucontext {
+ sigset_t uc_sigmask;
+ xmcontext_t uc_mcontext;
+
+ struct __ucontext *uc_link;
+ stack_t uc_stack;
+ int uc_flags;
+ int __spare__[4];
+} xucontext_t;
+
+struct xkinfo_vmentry {
+ int kve_structsize;
+ int kve_type;
+ __xuint64_t kve_start;
+ __xuint64_t kve_end;
+ __xuint64_t kve_offset;
+ __xuint64_t kve_vn_fileid;
+ __uint32_t kve_vn_fsid;
+ int kve_flags;
+ int kve_resident;
+ int kve_private_resident;
+ int kve_protection;
+ int kve_ref_count;
+ int kve_shadow_count;
+ int kve_vn_type;
+ __xuint64_t kve_vn_size;
+ __uint32_t kve_vn_rdev;
+ __uint16_t kve_vn_mode;
+ __uint16_t kve_status;
+ int _kve_ispare[12];
+ char kve_path[PATH_MAX];
+};
+
+typedef struct {
+ __uint32_t p_type;
+ __uint32_t p_offset;
+ __uint32_t p_vaddr;
+ __uint32_t p_paddr;
+ __uint32_t p_filesz;
+ __uint32_t p_memsz;
+ __uint32_t p_flags;
+ __uint32_t p_align;
+} XElf32_Phdr;
+
+struct xdl_phdr_info {
+ Elf_Addr dlpi_addr;
+ const char *dlpi_name;
+ const XElf32_Phdr *dlpi_phdr;
+ Elf_Half dlpi_phnum;
+ unsigned long long int dlpi_adds;
+ unsigned long long int dlpi_subs;
+ size_t dlpi_tls_modid;
+ void *dlpi_tls_data;
+};
+
+typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info*, size_t, void*);
+typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void*);
+
+#define xdl_iterate_phdr(callback, param) \
+ (((xdl_iterate_phdr_t*) dl_iterate_phdr)((callback), (param)))
+
+} // namespace __sanitizer
+
+# endif // __FreeBSD_version <= 902001
+#endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+
+#endif // SANITIZER_FREEBSD_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_interface_internal.h b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h
new file mode 100644
index 00000000000..0547f9927cb
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h
@@ -0,0 +1,59 @@
+//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+//
+// This header declares the sanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/common_interface_defs.h
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_INTERFACE_INTERNAL_H
+#define SANITIZER_INTERFACE_INTERNAL_H
+
+#include "sanitizer_internal_defs.h"
+
+extern "C" {
+ // Tell the tools to write their reports to "path.<pid>" instead of stderr.
+ // The special values are "stdout" and "stderr".
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_set_report_path(const char *path);
+
+ typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
+ // Notify the tools that the sandbox is going to be turned on.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+
+ // This function is called by the tool when it has just finished reporting
+ // an error. 'error_summary' is a one-line string that summarizes
+ // the error message. This function can be overridden by the client.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_report_error_summary(const char *error_summary);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_annotate_contiguous_container(const void *beg,
+ const void *end,
+ const void *old_mid,
+ const void *new_mid);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const void *__sanitizer_contiguous_container_find_bad_address(
+ const void *beg, const void *mid, const void *end);
+ } // extern "C"
+
+#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
index 0dab7c2f6c7..d76ed7570a7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
+++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
@@ -13,6 +13,10 @@
#include "sanitizer_platform.h"
+#ifndef SANITIZER_DEBUG
+# define SANITIZER_DEBUG 0
+#endif
+
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
#if SANITIZER_WINDOWS
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
@@ -26,16 +30,19 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if SANITIZER_LINUX && !defined(SANITIZER_GO)
+#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
-#if __LP64__ || defined(_WIN64)
-# define SANITIZER_WORDSIZE 64
+// We can use .preinit_array section on Linux to call sanitizer initialization
+// functions very early in the process startup (unless PIC macro is defined).
+// FIXME: do we have anything like this on Mac?
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && !defined(PIC)
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 1
#else
-# define SANITIZER_WORDSIZE 32
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 0
#endif
// GCC does not understand __has_feature
@@ -57,7 +64,7 @@ typedef unsigned long uptr; // NOLINT
typedef signed long sptr; // NOLINT
#endif // defined(_WIN64)
#if defined(__x86_64__)
-// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
+// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
// 64-bit pointer to unwind stack frame.
typedef unsigned long long uhwptr; // NOLINT
#else
@@ -71,13 +78,22 @@ typedef signed char s8;
typedef signed short s16; // NOLINT
typedef signed int s32;
typedef signed long long s64; // NOLINT
+#if SANITIZER_WINDOWS
+// On Windows, files are HANDLE, which is a synonim of void*.
+// Use void* to avoid including <windows.h> everywhere.
+typedef void* fd_t;
+typedef unsigned error_t;
+#else
typedef int fd_t;
+typedef int error_t;
+#endif
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
// like pread and mmap, as opposed to pread64 and mmap64.
-// Mac and Linux/x86-64 are special.
-#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__))
+// FreeBSD, Mac and Linux/x86-64 are special.
+#if SANITIZER_FREEBSD || SANITIZER_MAC || \
+ (SANITIZER_LINUX && defined(__x86_64__))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
@@ -91,33 +107,6 @@ typedef u32 operator_new_size_type;
#endif
} // namespace __sanitizer
-extern "C" {
- // Tell the tools to write their reports to "path.<pid>" instead of stderr.
- // The special values are "stdout" and "stderr".
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_set_report_path(const char *path);
-
- // Notify the tools that the sandbox is going to be turned on. The reserved
- // parameter will be used in the future to hold a structure with functions
- // that the tools may call to bypass the sandbox.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_sandbox_on_notify(void *reserved);
-
- // This function is called by the tool when it has just finished reporting
- // an error. 'error_summary' is a one-line string that summarizes
- // the error message. This function can be overridden by the client.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_report_error_summary(const char *error_summary);
-
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc);
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_annotate_contiguous_container(const void *beg,
- const void *end,
- const void *old_mid,
- const void *new_mid);
-} // extern "C"
-
using namespace __sanitizer; // NOLINT
// ----------- ATTENTION -------------
@@ -138,11 +127,8 @@ using namespace __sanitizer; // NOLINT
# define NOINLINE __declspec(noinline)
# define NORETURN __declspec(noreturn)
# define THREADLOCAL __declspec(thread)
-# define NOTHROW
# define LIKELY(x) (x)
# define UNLIKELY(x) (x)
-# define UNUSED
-# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
@@ -154,11 +140,8 @@ using namespace __sanitizer; // NOLINT
# define NOINLINE __attribute__((noinline))
# define NORETURN __attribute__((noreturn))
# define THREADLOCAL __thread
-# define NOTHROW throw()
# define LIKELY(x) __builtin_expect(!!(x), 1)
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
-# define UNUSED __attribute__((unused))
-# define USED __attribute__((used))
# if defined(__i386__) || defined(__x86_64__)
// __builtin_prefetch(x) generates prefetchnt0 on x86
# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
@@ -167,6 +150,20 @@ using namespace __sanitizer; // NOLINT
# endif
#endif // _MSC_VER
+#if !defined(_MSC_VER) || defined(__clang__)
+# define UNUSED __attribute__((unused))
+# define USED __attribute__((used))
+#else
+# define UNUSED
+# define USED
+#endif
+
+#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900)
+# define NOEXCEPT noexcept
+#else
+# define NOEXCEPT throw()
+#endif
+
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
@@ -197,7 +194,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
// Check macro
#define RAW_CHECK_MSG(expr, msg) do { \
- if (!(expr)) { \
+ if (UNLIKELY(!(expr))) { \
RawWrite(msg); \
Die(); \
} \
@@ -209,7 +206,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
do { \
__sanitizer::u64 v1 = (u64)(c1); \
__sanitizer::u64 v2 = (u64)(c2); \
- if (!(v1 op v2)) \
+ if (UNLIKELY(!(v1 op v2))) \
__sanitizer::CheckFailed(__FILE__, __LINE__, \
"(" #c1 ") " #op " (" #c2 ")", v1, v2); \
} while (false) \
@@ -223,7 +220,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
#define DCHECK(a) CHECK(a)
#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
#define DCHECK_NE(a, b) CHECK_NE(a, b)
@@ -303,4 +300,12 @@ extern "C" void* _ReturnAddress(void);
} while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
}
+// Forces the compiler to generate a frame pointer in the function.
+#define ENABLE_FRAME_POINTER \
+ do { \
+ volatile uptr enable_fp; \
+ enable_fp = GET_CURRENT_FRAME(); \
+ (void)enable_fp; \
+ } while (0)
+
#endif // SANITIZER_DEFS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_lfstack.h b/libsanitizer/sanitizer_common/sanitizer_lfstack.h
index 8033b9a7f9c..8bd0e91c400 100644
--- a/libsanitizer/sanitizer_common/sanitizer_lfstack.h
+++ b/libsanitizer/sanitizer_common/sanitizer_lfstack.h
@@ -47,8 +47,8 @@ struct LFStack {
u64 cmp = atomic_load(&head_, memory_order_acquire);
for (;;) {
T *cur = (T*)(uptr)(cmp & kPtrMask);
- if (cur == 0)
- return 0;
+ if (!cur)
+ return nullptr;
T *nxt = cur->next;
u64 cnt = (cmp & kCounterMask);
u64 xch = (u64)(uptr)nxt | cnt;
@@ -66,6 +66,6 @@ struct LFStack {
atomic_uint64_t head_;
};
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // #ifndef SANITIZER_LFSTACK_H
+#endif // SANITIZER_LFSTACK_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.cc b/libsanitizer/sanitizer_common/sanitizer_libc.cc
index 53c87555092..05fdca622e4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.cc
@@ -8,37 +8,37 @@
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries. See sanitizer_libc.h for details.
//===----------------------------------------------------------------------===//
+
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
namespace __sanitizer {
-// Make the compiler think that something is going on there.
-static inline void break_optimization(void *arg) {
-#if SANITIZER_WINDOWS
- // FIXME: make sure this is actually enough.
- __asm;
-#else
- __asm__ __volatile__("" : : "r" (arg) : "memory");
-#endif
-}
-
s64 internal_atoll(const char *nptr) {
- return internal_simple_strtoll(nptr, (char**)0, 10);
+ return internal_simple_strtoll(nptr, nullptr, 10);
}
void *internal_memchr(const void *s, int c, uptr n) {
- const char* t = (char*)s;
+ const char *t = (const char *)s;
for (uptr i = 0; i < n; ++i, ++t)
if (*t == c)
- return (void*)t;
- return 0;
+ return reinterpret_cast<void *>(const_cast<char *>(t));
+ return nullptr;
+}
+
+void *internal_memrchr(const void *s, int c, uptr n) {
+ const char *t = (const char *)s;
+ void *res = nullptr;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
+ }
+ return res;
}
int internal_memcmp(const void* s1, const void* s2, uptr n) {
- const char* t1 = (char*)s1;
- const char* t2 = (char*)s2;
+ const char *t1 = (const char *)s1;
+ const char *t2 = (const char *)s2;
for (uptr i = 0; i < n; ++i, ++t1, ++t2)
if (*t1 != *t2)
return *t1 < *t2 ? -1 : 1;
@@ -47,7 +47,7 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) {
void *internal_memcpy(void *dest, const void *src, uptr n) {
char *d = (char*)dest;
- char *s = (char*)src;
+ const char *s = (const char *)src;
for (uptr i = 0; i < n; ++i)
d[i] = s[i];
return dest;
@@ -55,7 +55,7 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
void *internal_memmove(void *dest, const void *src, uptr n) {
char *d = (char*)dest;
- char *s = (char*)src;
+ const char *s = (const char *)src;
sptr i, signed_n = (sptr)n;
CHECK_GE(signed_n, 0);
if (d < s) {
@@ -76,7 +76,8 @@ void internal_bzero_aligned16(void *s, uptr n) {
CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
p->a = p->b = 0;
- break_optimization(0); // Make sure this does not become memset.
+ // Make sure this does not become memset.
+ SanitizerBreakOptimization(nullptr);
}
}
@@ -95,7 +96,7 @@ void *internal_memset(void* s, int c, uptr n) {
uptr internal_strcspn(const char *s, const char *reject) {
uptr i;
for (i = 0; s[i]; i++) {
- if (internal_strchr(reject, s[i]) != 0)
+ if (internal_strchr(reject, s[i]))
return i;
}
return i;
@@ -109,6 +110,14 @@ char* internal_strdup(const char *s) {
return s2;
}
+char* internal_strndup(const char *s, uptr n) {
+ uptr len = internal_strnlen(s, n);
+ char *s2 = (char*)InternalAlloc(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
int internal_strcmp(const char *s1, const char *s2) {
while (true) {
unsigned c1 = *s1;
@@ -136,9 +145,9 @@ int internal_strncmp(const char *s1, const char *s2, uptr n) {
char* internal_strchr(const char *s, int c) {
while (true) {
if (*s == (char)c)
- return (char*)s;
+ return const_cast<char *>(s);
if (*s == 0)
- return 0;
+ return nullptr;
s++;
}
}
@@ -146,16 +155,16 @@ char* internal_strchr(const char *s, int c) {
char *internal_strchrnul(const char *s, int c) {
char *res = internal_strchr(s, c);
if (!res)
- res = (char*)s + internal_strlen(s);
+ res = const_cast<char *>(s) + internal_strlen(s);
return res;
}
char *internal_strrchr(const char *s, int c) {
- const char *res = 0;
+ const char *res = nullptr;
for (uptr i = 0; s[i]; i++) {
if (s[i] == c) res = s + i;
}
- return (char*)res;
+ return const_cast<char *>(res);
}
uptr internal_strlen(const char *s) {
@@ -191,12 +200,12 @@ char *internal_strstr(const char *haystack, const char *needle) {
// This is O(N^2), but we are not using it in hot places.
uptr len1 = internal_strlen(haystack);
uptr len2 = internal_strlen(needle);
- if (len1 < len2) return 0;
+ if (len1 < len2) return nullptr;
for (uptr pos = 0; pos <= len1 - len2; pos++) {
if (internal_memcmp(haystack + pos, needle, len2) == 0)
- return (char*)haystack + pos;
+ return const_cast<char *>(haystack) + pos;
}
- return 0;
+ return nullptr;
}
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
@@ -205,7 +214,7 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
int sgn = 1;
u64 res = 0;
bool have_digits = false;
- char *old_nptr = (char*)nptr;
+ char *old_nptr = const_cast<char *>(nptr);
if (*nptr == '+') {
sgn = 1;
nptr++;
@@ -220,8 +229,8 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
have_digits = true;
nptr++;
}
- if (endptr != 0) {
- *endptr = (have_digits) ? (char*)nptr : old_nptr;
+ if (endptr) {
+ *endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr;
}
if (sgn > 0) {
return (s64)(Min((u64)INT64_MAX, res));
@@ -249,4 +258,4 @@ bool mem_is_zero(const char *beg, uptr size) {
return all == 0;
}
-} // namespace __sanitizer
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.h b/libsanitizer/sanitizer_common/sanitizer_libc.h
index ae23bc45e9d..1b3f8edfadb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.h
@@ -9,7 +9,9 @@
// run-time libraries.
// These tools can not use some of the libc functions directly because those
// functions are intercepted. Instead, we implement a tiny subset of libc here.
+// FIXME: Some of functions declared in this file are in fact POSIX, not libc.
//===----------------------------------------------------------------------===//
+
#ifndef SANITIZER_LIBC_H
#define SANITIZER_LIBC_H
@@ -24,6 +26,7 @@ namespace __sanitizer {
// String functions
s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
+void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n);
@@ -36,6 +39,7 @@ char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
uptr internal_strcspn(const char *s, const char *reject);
char *internal_strdup(const char *s);
+char *internal_strndup(const char *s, uptr n);
uptr internal_strlen(const char *s);
char *internal_strncat(char *dst, const char *src, uptr n);
int internal_strncmp(const char *s1, const char *s2, uptr n);
@@ -52,40 +56,17 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...);
// Optimized for the case when the result is true.
bool mem_is_zero(const char *mem, uptr size);
-
-// Memory
-uptr internal_mmap(void *addr, uptr length, int prot, int flags,
- int fd, u64 offset);
-uptr internal_munmap(void *addr, uptr length);
-
// I/O
-const fd_t kInvalidFd = -1;
+const fd_t kInvalidFd = (fd_t)-1;
const fd_t kStdinFd = 0;
-const fd_t kStdoutFd = 1;
-const fd_t kStderrFd = 2;
-uptr internal_close(fd_t fd);
-int internal_isatty(fd_t fd);
-
-// Use __sanitizer::OpenFile() instead.
-uptr internal_open(const char *filename, int flags);
-uptr internal_open(const char *filename, int flags, u32 mode);
+const fd_t kStdoutFd = (fd_t)1;
+const fd_t kStderrFd = (fd_t)2;
-uptr internal_read(fd_t fd, void *buf, uptr count);
-uptr internal_write(fd_t fd, const void *buf, uptr count);
+uptr internal_ftruncate(fd_t fd, uptr size);
// OS
-uptr internal_filesize(fd_t fd); // -1 on error.
-uptr internal_stat(const char *path, void *buf);
-uptr internal_lstat(const char *path, void *buf);
-uptr internal_fstat(fd_t fd, void *buf);
-uptr internal_dup2(int oldfd, int newfd);
-uptr internal_readlink(const char *path, char *buf, uptr bufsize);
-uptr internal_unlink(const char *path);
void NORETURN internal__exit(int exitcode);
-uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
-uptr internal_ptrace(int request, int pid, void *addr, void *data);
-uptr internal_waitpid(int pid, int *status, int options);
uptr internal_getpid();
uptr internal_getppid();
@@ -93,8 +74,8 @@ uptr internal_getppid();
uptr internal_sched_yield();
// Error handling
-bool internal_iserror(uptr retval, int *rverrno = 0);
+bool internal_iserror(uptr retval, int *rverrno = nullptr);
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // SANITIZER_LIBC_H
+#endif // SANITIZER_LIBC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.cc b/libsanitizer/sanitizer_common/sanitizer_libignore.cc
index 310e811df1d..4b8cbed5ee3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libignore.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libignore.cc
@@ -6,10 +6,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC
#include "sanitizer_libignore.h"
#include "sanitizer_flags.h"
+#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h"
namespace __sanitizer {
@@ -17,35 +19,29 @@ namespace __sanitizer {
LibIgnore::LibIgnore(LinkerInitialized) {
}
-void LibIgnore::Init(const SuppressionContext &supp) {
+void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
BlockingMutexLock lock(&mutex_);
- CHECK_EQ(count_, 0);
- const uptr n = supp.SuppressionCount();
- for (uptr i = 0; i < n; i++) {
- const Suppression *s = supp.SuppressionAt(i);
- if (s->type != SuppressionLib)
- continue;
- if (count_ >= kMaxLibs) {
- Report("%s: too many called_from_lib suppressions (max: %d)\n",
- SanitizerToolName, kMaxLibs);
- Die();
- }
- Lib *lib = &libs_[count_++];
- lib->templ = internal_strdup(s->templ);
- lib->name = 0;
- lib->loaded = false;
+ if (count_ >= kMaxLibs) {
+ Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
+ kMaxLibs);
+ Die();
}
+ Lib *lib = &libs_[count_++];
+ lib->templ = internal_strdup(name_templ);
+ lib->name = nullptr;
+ lib->real_name = nullptr;
+ lib->loaded = false;
}
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target.
- InternalScopedBuffer<char> buf(4096);
- if (name != 0 && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
- buf.data()[0]) {
+ InternalScopedString buf(kMaxPathLength);
+ if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
+ buf[0]) {
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
- if (!lib->loaded && lib->real_name == 0 &&
+ if (!lib->loaded && (!lib->real_name) &&
TemplateMatch(lib->templ, name))
lib->real_name = internal_strdup(buf.data());
}
@@ -53,7 +49,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
// Scan suppressions list and find newly loaded and unloaded libraries.
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- InternalScopedBuffer<char> module(4096);
+ InternalScopedString module(kMaxPathLength);
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
bool loaded = false;
@@ -63,7 +59,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue;
if (TemplateMatch(lib->templ, module.data()) ||
- (lib->real_name != 0 &&
+ (lib->real_name &&
internal_strcmp(lib->real_name, module.data()) == 0)) {
if (loaded) {
Report("%s: called_from_lib suppression '%s' is matched against"
@@ -74,9 +70,10 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
loaded = true;
if (lib->loaded)
continue;
- if (common_flags()->verbosity)
- Report("Matched called_from_lib suppression '%s' against library"
- " '%s'\n", lib->templ, module.data());
+ VReport(1,
+ "Matched called_from_lib suppression '%s' against library"
+ " '%s'\n",
+ lib->templ, module.data());
lib->loaded = true;
lib->name = internal_strdup(module.data());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
@@ -95,9 +92,9 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
}
void LibIgnore::OnLibraryUnloaded() {
- OnLibraryLoaded(0);
+ OnLibraryLoaded(nullptr);
}
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // #if SANITIZER_LINUX
+#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.h b/libsanitizer/sanitizer_common/sanitizer_libignore.h
index 2089c4bbeeb..84419d14fed 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libignore.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libignore.h
@@ -6,8 +6,8 @@
//===----------------------------------------------------------------------===//
//
// LibIgnore allows to ignore all interceptors called from a particular set
-// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions
-// from the provided SuppressionContext; finds code ranges for the libraries;
+// of dynamic libraries. LibIgnore can be initialized with several templates
+// of names of libraries to be ignored. It finds code ranges for the libraries;
// and checks whether the provided PC value belongs to the code ranges.
//
//===----------------------------------------------------------------------===//
@@ -17,7 +17,6 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
-#include "sanitizer_suppressions.h"
#include "sanitizer_atomic.h"
#include "sanitizer_mutex.h"
@@ -27,8 +26,8 @@ class LibIgnore {
public:
explicit LibIgnore(LinkerInitialized);
- // Fetches all "called_from_lib" suppressions from the SuppressionContext.
- void Init(const SuppressionContext &supp);
+ // Must be called during initialization.
+ void AddIgnoredLibrary(const char *name_templ);
// Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name);
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc
index 69c9c1063f0..2cefa20a5f0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -11,9 +11,11 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
@@ -23,13 +25,26 @@
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
+#if !SANITIZER_FREEBSD
#include <asm/param.h>
+#endif
+
+// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
+// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
+// access stat from asm/stat.h, without conflicting with definition in
+// sys/stat.h, we use this trick.
+#if defined(__mips64)
+#include <asm/unistd.h>
+#include <sys/types.h>
+#define stat kernel_stat
+#include <asm/stat.h>
+#undef stat
+#endif
+
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
-#if !SANITIZER_ANDROID
#include <link.h>
-#endif
#include <pthread.h>
#include <sched.h>
#include <sys/mman.h>
@@ -39,13 +54,25 @@
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <ucontext.h>
#include <unistd.h>
-#include <unwind.h>
+
+#if SANITIZER_FREEBSD
+#include <sys/sysctl.h>
+#include <machine/atomic.h>
+extern "C" {
+// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
+// FreeBSD 9.2 and 10.0.
+#include <sys/umtx.h>
+}
+extern char **environ; // provided by crt1
+#endif // SANITIZER_FREEBSD
#if !SANITIZER_ANDROID
#include <sys/signal.h>
#endif
+#if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
long tv_sec;
@@ -55,11 +82,12 @@ struct kernel_timeval {
// <linux/futex.h> is broken on some linux distributions.
const int FUTEX_WAIT = 0;
const int FUTEX_WAKE = 1;
+#endif // SANITIZER_LINUX
-// Are we using 32-bit or 64-bit syscalls?
+// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-#if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_WORDSIZE == 64)
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
#else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
@@ -67,57 +95,79 @@ const int FUTEX_WAKE = 1;
namespace __sanitizer {
-#ifdef __x86_64__
+#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
+#elif SANITIZER_LINUX && defined(__aarch64__)
+#include "sanitizer_syscall_linux_aarch64.inc"
#else
#include "sanitizer_syscall_generic.inc"
#endif
// --------------- sanitizer_libc.h
-uptr internal_mmap(void *addr, uptr length, int prot, int flags,
- int fd, u64 offset) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd,
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ OFF_T offset) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
#else
- return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
+ // mmap2 specifies file offset in 4096-byte units.
+ CHECK(IsAligned(offset, 4096));
+ return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
+ offset / 4096);
#endif
}
uptr internal_munmap(void *addr, uptr length) {
- return internal_syscall(__NR_munmap, (uptr)addr, length);
+ return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
}
uptr internal_close(fd_t fd) {
- return internal_syscall(__NR_close, fd);
+ return internal_syscall(SYSCALL(close), fd);
}
uptr internal_open(const char *filename, int flags) {
- return internal_syscall(__NR_open, (uptr)filename, flags);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags);
+#endif
}
uptr internal_open(const char *filename, int flags, u32 mode) {
- return internal_syscall(__NR_open, (uptr)filename, flags, mode);
-}
-
-uptr OpenFile(const char *filename, bool write) {
- return internal_open(filename,
- write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
+ mode);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
+#endif
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(__NR_read, fd, (uptr)buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf,
+ count));
return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(__NR_write, fd, (uptr)buf, count));
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf,
+ count));
return res;
}
-#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ sptr res;
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,
+ (OFF_T)size));
+ return res;
+}
+
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -137,34 +187,72 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
}
#endif
+#if defined(__mips64)
+static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = in->st_dev;
+ out->st_ino = in->st_ino;
+ out->st_mode = in->st_mode;
+ out->st_nlink = in->st_nlink;
+ out->st_uid = in->st_uid;
+ out->st_gid = in->st_gid;
+ out->st_rdev = in->st_rdev;
+ out->st_size = in->st_size;
+ out->st_blksize = in->st_blksize;
+ out->st_blocks = in->st_blocks;
+ out->st_atime = in->st_atime_nsec;
+ out->st_mtime = in->st_mtime_nsec;
+ out->st_ctime = in->st_ctime_nsec;
+ out->st_ino = in->st_ino;
+}
+#endif
+
uptr internal_stat(const char *path, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_stat, (uptr)path, (uptr)buf);
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(stat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, 0);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if defined(__mips64)
+ // For mips64, stat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(stat), path, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
+ return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
+# endif
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_stat64, path, &buf64);
+ int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_lstat(const char *path, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_lstat, (uptr)path, (uptr)buf);
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(lstat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_lstat64, path, &buf64);
+ int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_fstat(fd_t fd, void *buf) {
-#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
- return internal_syscall(__NR_fstat, fd, (uptr)buf);
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
#else
struct stat64 buf64;
- int res = internal_syscall(__NR_fstat64, fd, &buf64);
+ int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
@@ -178,77 +266,130 @@ uptr internal_filesize(fd_t fd) {
}
uptr internal_dup2(int oldfd, int newfd) {
- return internal_syscall(__NR_dup2, oldfd, newfd);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
+#else
+ return internal_syscall(SYSCALL(dup2), oldfd, newfd);
+#endif
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
- return internal_syscall(__NR_readlink, (uptr)path, (uptr)buf, bufsize);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(readlinkat), AT_FDCWD,
+ (uptr)path, (uptr)buf, bufsize);
+#else
+ return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
+#endif
}
uptr internal_unlink(const char *path) {
- return internal_syscall(__NR_unlink, (uptr)path);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
+#else
+ return internal_syscall(SYSCALL(unlink), (uptr)path);
+#endif
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+ (uptr)newpath);
+#else
+ return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
+#endif
}
uptr internal_sched_yield() {
- return internal_syscall(__NR_sched_yield);
+ return internal_syscall(SYSCALL(sched_yield));
}
void internal__exit(int exitcode) {
- internal_syscall(__NR_exit_group, exitcode);
+#if SANITIZER_FREEBSD
+ internal_syscall(SYSCALL(exit), exitcode);
+#else
+ internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
Die(); // Unreachable.
}
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]) {
- return internal_syscall(__NR_execve, (uptr)filename, (uptr)argv, (uptr)envp);
+ return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
+ (uptr)envp);
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
+#else
if (internal_stat(filename, &st))
+#endif
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
}
uptr GetTid() {
- return internal_syscall(__NR_gettid);
+#if SANITIZER_FREEBSD
+ return (uptr)pthread_self();
+#else
+ return internal_syscall(SYSCALL(gettid));
+#endif
}
u64 NanoTime() {
+#if SANITIZER_FREEBSD
+ timeval tv;
+#else
kernel_timeval tv;
+#endif
internal_memset(&tv, 0, sizeof(tv));
- internal_syscall(__NR_gettimeofday, (uptr)&tv, 0);
+ internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
-// Like getenv, but reads env directly from /proc and does not use libc.
-// This function should be called first inside __asan_init.
+// Like getenv, but reads env directly from /proc (on Linux) or parses the
+// 'environ' array (on FreeBSD) and does not use libc. This function should be
+// called first inside __asan_init.
const char *GetEnv(const char *name) {
+#if SANITIZER_FREEBSD
+ if (::environ != 0) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = ::environ; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return 0; // Not found.
+#elif SANITIZER_LINUX
static char *environ;
static uptr len;
static bool inited;
if (!inited) {
inited = true;
uptr environ_size;
- len = ReadFileToBuffer("/proc/self/environ",
- &environ, &environ_size, 1 << 26);
+ if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len))
+ environ = nullptr;
}
- if (!environ || len == 0) return 0;
+ if (!environ || len == 0) return nullptr;
uptr namelen = internal_strlen(name);
const char *p = environ;
while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
// proc file has the format NAME=value\0NAME=value\0NAME=value\0...
const char* endp =
(char*)internal_memchr(p, '\0', len - (p - environ));
- if (endp == 0) // this entry isn't NUL terminated
- return 0;
+ if (!endp) // this entry isn't NUL terminated
+ return nullptr;
else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
return p + namelen + 1; // point after =
p = endp + 1;
}
- return 0; // Not found.
+ return nullptr; // Not found.
+#else
+#error "Unsupported platform"
+#endif
}
extern "C" {
@@ -259,9 +400,13 @@ extern "C" {
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
- uptr buff_size = 0;
+ uptr buff_size;
+ uptr buff_len;
*arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
- ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
+ if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) {
+ (*arr)[0] = nullptr;
+ return;
+ }
(*arr)[0] = buff;
int count, i;
for (count = 1, i = 1; ; i++) {
@@ -272,7 +417,7 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
count++;
}
}
- (*arr)[count] = 0;
+ (*arr)[count] = nullptr;
}
#endif
@@ -303,254 +448,41 @@ void ReExec() {
Die();
}
-void PrepareForSandboxing() {
- // Some kinds of sandboxes may forbid filesystem access, so we won't be able
- // to read the file mappings from /proc/self/maps. Luckily, neither the
- // process will be able to load additional libraries, so it's fine to use the
- // cached mappings.
- MemoryMappingLayout::CacheMemoryMappings();
- // Same for /proc/self/exe in the symbolizer.
-#if !SANITIZER_GO
- if (Symbolizer *sym = Symbolizer::GetOrNull())
- sym->PrepareForSandboxing();
-#endif
-}
-
-// ----------------- sanitizer_procmaps.h
-// Linker initialized.
-ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
-StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
-
-MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- proc_self_maps_.len =
- ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
- &proc_self_maps_.mmaped_size, 1 << 26);
- if (cache_enabled) {
- if (proc_self_maps_.mmaped_size == 0) {
- LoadFromCache();
- CHECK_GT(proc_self_maps_.len, 0);
- }
- } else {
- CHECK_GT(proc_self_maps_.mmaped_size, 0);
- }
- Reset();
- // FIXME: in the future we may want to cache the mappings on demand only.
- if (cache_enabled)
- CacheMemoryMappings();
-}
-
-MemoryMappingLayout::~MemoryMappingLayout() {
- // Only unmap the buffer if it is different from the cached one. Otherwise
- // it will be unmapped when the cache is refreshed.
- if (proc_self_maps_.data != cached_proc_self_maps_.data) {
- UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
- }
-}
-
-void MemoryMappingLayout::Reset() {
- current_ = proc_self_maps_.data;
-}
-
-// static
-void MemoryMappingLayout::CacheMemoryMappings() {
- SpinMutexLock l(&cache_lock_);
- // Don't invalidate the cache if the mappings are unavailable.
- ProcSelfMapsBuff old_proc_self_maps;
- old_proc_self_maps = cached_proc_self_maps_;
- cached_proc_self_maps_.len =
- ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
- &cached_proc_self_maps_.mmaped_size, 1 << 26);
- if (cached_proc_self_maps_.mmaped_size == 0) {
- cached_proc_self_maps_ = old_proc_self_maps;
- } else {
- if (old_proc_self_maps.mmaped_size) {
- UnmapOrDie(old_proc_self_maps.data,
- old_proc_self_maps.mmaped_size);
- }
- }
-}
-
-void MemoryMappingLayout::LoadFromCache() {
- SpinMutexLock l(&cache_lock_);
- if (cached_proc_self_maps_.data) {
- proc_self_maps_ = cached_proc_self_maps_;
- }
-}
-
-// Parse a hex value in str and update str.
-static uptr ParseHex(char **str) {
- uptr x = 0;
- char *s;
- for (s = *str; ; s++) {
- char c = *s;
- uptr v = 0;
- if (c >= '0' && c <= '9')
- v = c - '0';
- else if (c >= 'a' && c <= 'f')
- v = c - 'a' + 10;
- else if (c >= 'A' && c <= 'F')
- v = c - 'A' + 10;
- else
- break;
- x = x * 16 + v;
- }
- *str = s;
- return x;
-}
-
-static bool IsOneOf(char c, char c1, char c2) {
- return c == c1 || c == c2;
-}
-
-static bool IsDecimal(char c) {
- return c >= '0' && c <= '9';
-}
-
-static bool IsHex(char c) {
- return (c >= '0' && c <= '9')
- || (c >= 'a' && c <= 'f');
-}
-
-static uptr ReadHex(const char *p) {
- uptr v = 0;
- for (; IsHex(p[0]); p++) {
- if (p[0] >= '0' && p[0] <= '9')
- v = v * 16 + p[0] - '0';
- else
- v = v * 16 + p[0] - 'a' + 10;
- }
- return v;
-}
-
-static uptr ReadDecimal(const char *p) {
- uptr v = 0;
- for (; IsDecimal(p[0]); p++)
- v = v * 10 + p[0] - '0';
- return v;
-}
-
-
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- char *last = proc_self_maps_.data + proc_self_maps_.len;
- if (current_ >= last) return false;
- uptr dummy;
- if (!start) start = &dummy;
- if (!end) end = &dummy;
- if (!offset) offset = &dummy;
- char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
- if (next_line == 0)
- next_line = last;
- // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
- *start = ParseHex(&current_);
- CHECK_EQ(*current_++, '-');
- *end = ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- uptr local_protection = 0;
- CHECK(IsOneOf(*current_, '-', 'r'));
- if (*current_++ == 'r')
- local_protection |= kProtectionRead;
- CHECK(IsOneOf(*current_, '-', 'w'));
- if (*current_++ == 'w')
- local_protection |= kProtectionWrite;
- CHECK(IsOneOf(*current_, '-', 'x'));
- if (*current_++ == 'x')
- local_protection |= kProtectionExecute;
- CHECK(IsOneOf(*current_, 's', 'p'));
- if (*current_++ == 's')
- local_protection |= kProtectionShared;
- if (protection) {
- *protection = local_protection;
- }
- CHECK_EQ(*current_++, ' ');
- *offset = ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- ParseHex(&current_);
- CHECK_EQ(*current_++, ':');
- ParseHex(&current_);
- CHECK_EQ(*current_++, ' ');
- while (IsDecimal(*current_))
- current_++;
- // Qemu may lack the trailing space.
- // http://code.google.com/p/address-sanitizer/issues/detail?id=160
- // CHECK_EQ(*current_++, ' ');
- // Skip spaces.
- while (current_ < next_line && *current_ == ' ')
- current_++;
- // Fill in the filename.
- uptr i = 0;
- while (current_ < next_line) {
- if (filename && i < filename_size - 1)
- filename[i++] = *current_;
- current_++;
- }
- if (filename && i < filename_size)
- filename[i] = 0;
- current_ = next_line + 1;
- return true;
-}
-
-// Gets the object name and the offset by walking MemoryMappingLayout.
-bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size,
- uptr *protection) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
- protection);
-}
-
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
- char *smaps = 0;
- uptr smaps_cap = 0;
- uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
- &smaps, &smaps_cap, 64<<20);
- uptr start = 0;
- bool file = false;
- const char *pos = smaps;
- while (pos < smaps + smaps_len) {
- if (IsHex(pos[0])) {
- start = ReadHex(pos);
- for (; *pos != '/' && *pos > '\n'; pos++) {}
- file = *pos == '/';
- } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
- for (; *pos < '0' || *pos > '9'; pos++) {}
- uptr rss = ReadDecimal(pos) * 1024;
- cb(start, rss, file, stats, stats_size);
- }
- while (*pos++ != '\n') {}
- }
- UnmapOrDie(smaps, smaps_cap);
-}
-
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
MtxSleeping = 2
};
-BlockingMutex::BlockingMutex(LinkerInitialized) {
- CHECK_EQ(owner_, 0);
-}
-
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
- internal_syscall(__NR_futex, (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+#endif
+ }
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
- if (v == MtxSleeping)
- internal_syscall(__NR_futex, (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
+ if (v == MtxSleeping) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
+#endif
+ }
}
void BlockingMutex::CheckLocked() {
@@ -563,71 +495,142 @@ void BlockingMutex::CheckLocked() {
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
struct linux_dirent {
+#if SANITIZER_X32 || defined(__aarch64__)
+ u64 d_ino;
+ u64 d_off;
+#else
unsigned long d_ino;
unsigned long d_off;
+#endif
unsigned short d_reclen;
+#ifdef __aarch64__
+ unsigned char d_type;
+#endif
char d_name[256];
};
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
- return internal_syscall(__NR_ptrace, request, pid, (uptr)addr, (uptr)data);
+ return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
+ (uptr)data);
}
uptr internal_waitpid(int pid, int *status, int options) {
- return internal_syscall(__NR_wait4, pid, (uptr)status, options,
+ return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
0 /* rusage */);
}
uptr internal_getpid() {
- return internal_syscall(__NR_getpid);
+ return internal_syscall(SYSCALL(getpid));
}
uptr internal_getppid() {
- return internal_syscall(__NR_getppid);
+ return internal_syscall(SYSCALL(getppid));
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
- return internal_syscall(__NR_getdents, fd, (uptr)dirp, count);
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
+#else
+ return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
+#endif
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
- return internal_syscall(__NR_lseek, fd, offset, whence);
+ return internal_syscall(SYSCALL(lseek), fd, offset, whence);
}
+#if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
- return internal_syscall(__NR_prctl, option, arg2, arg3, arg4, arg5);
+ return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
+#endif
uptr internal_sigaltstack(const struct sigaltstack *ss,
struct sigaltstack *oss) {
- return internal_syscall(__NR_sigaltstack, (uptr)ss, (uptr)oss);
+ return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
+}
+
+int internal_fork() {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
+#else
+ return internal_syscall(SYSCALL(fork));
+#endif
}
-uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact) {
- return internal_syscall(__NR_rt_sigaction, signum, act, oldact,
- sizeof(__sanitizer_kernel_sigset_t));
+#if SANITIZER_LINUX
+#define SA_RESTORER 0x04000000
+// Doesn't set sa_restorer, use with caution (see below).
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
+ __sanitizer_kernel_sigaction_t k_act, k_oldact;
+ internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act;
+ __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;
+ if (u_act) {
+ k_act.handler = u_act->handler;
+ k_act.sigaction = u_act->sigaction;
+ internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL).
+ k_act.sa_flags = u_act->sa_flags | SA_RESTORER;
+ // FIXME: most often sa_restorer is unset, however the kernel requires it
+ // to point to a valid signal restorer that calls the rt_sigreturn syscall.
+ // If sa_restorer passed to the kernel is NULL, the program may crash upon
+ // signal delivery or fail to unwind the stack in the signal handler.
+ // libc implementation of sigaction() passes its own restorer to
+ // rt_sigaction, so we need to do the same (we'll need to reimplement the
+ // restorers; for x86_64 the restorer address can be obtained from
+ // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
+ k_act.sa_restorer = u_act->sa_restorer;
+ }
+
+ uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
+ (uptr)(u_act ? &k_act : nullptr),
+ (uptr)(u_oldact ? &k_oldact : nullptr),
+ (uptr)sizeof(__sanitizer_kernel_sigset_t));
+
+ if ((result == 0) && u_oldact) {
+ u_oldact->handler = k_oldact.handler;
+ u_oldact->sigaction = k_oldact.sigaction;
+ internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ u_oldact->sa_flags = k_oldact.sa_flags;
+ u_oldact->sa_restorer = k_oldact.sa_restorer;
+ }
+ return result;
}
+#endif // SANITIZER_LINUX
-uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
- __sanitizer_kernel_sigset_t *oldset) {
- return internal_syscall(__NR_rt_sigprocmask, (uptr)how, &set->sig[0],
- &oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t));
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
+#else
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
+ return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
+ (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
+ sizeof(__sanitizer_kernel_sigset_t));
+#endif
}
-void internal_sigfillset(__sanitizer_kernel_sigset_t *set) {
+void internal_sigfillset(__sanitizer_sigset_t *set) {
internal_memset(set, 0xff, sizeof(*set));
}
-void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum) {
+#if SANITIZER_LINUX
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
CHECK_LT(signum, sizeof(*set) * 8);
- const uptr idx = signum / (sizeof(set->sig[0]) * 8);
- const uptr bit = signum % (sizeof(set->sig[0]) * 8);
- set->sig[idx] &= ~(1 << bit);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ k_set->sig[idx] &= ~(1 << bit);
}
+#endif // SANITIZER_LINUX
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
@@ -698,44 +701,52 @@ bool ThreadLister::GetDirectoryEntries() {
}
uptr GetPageSize() {
-#if defined(__x86_64__) || defined(__i386__)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
#endif
}
-static char proc_self_exe_cache_str[kMaxPathLength];
-static uptr proc_self_exe_cache_len = 0;
-
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+#if SANITIZER_FREEBSD
+ const int Mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+ const char *default_module_name = "kern.proc.pathname";
+ size_t Size = buf_len;
+ bool IsErr = (sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);
+ int readlink_error = IsErr ? errno : 0;
+ uptr module_name_len = Size;
+#else
+ const char *default_module_name = "/proc/self/exe";
uptr module_name_len = internal_readlink(
- "/proc/self/exe", buf, buf_len);
+ default_module_name, buf, buf_len);
int readlink_error;
- if (internal_iserror(module_name_len, &readlink_error)) {
- if (proc_self_exe_cache_len) {
- // If available, use the cached module name.
- CHECK_LE(proc_self_exe_cache_len, buf_len);
- internal_strncpy(buf, proc_self_exe_cache_str, buf_len);
- module_name_len = internal_strlen(proc_self_exe_cache_str);
- } else {
- // We can't read /proc/self/exe for some reason, assume the name of the
- // binary is unknown.
- Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, "
- "some stack frames may not be symbolized\n", readlink_error);
- module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe");
- }
+ bool IsErr = internal_iserror(module_name_len, &readlink_error);
+#endif
+ if (IsErr) {
+ // We can't read binary name for some reason, assume it's unknown.
+ Report("WARNING: reading executable name failed with errno %d, "
+ "some stack frames may not be symbolized\n", readlink_error);
+ module_name_len = internal_snprintf(buf, buf_len, "%s",
+ default_module_name);
CHECK_LT(module_name_len, buf_len);
- buf[module_name_len] = '\0';
}
return module_name_len;
}
-void CacheBinaryName() {
- if (!proc_self_exe_cache_len) {
- proc_self_exe_cache_len =
- ReadBinaryName(proc_self_exe_cache_str, kMaxPathLength);
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+#if SANITIZER_LINUX
+ char *tmpbuf;
+ uptr tmpsize;
+ uptr tmplen;
+ if (ReadFileToBuffer("/proc/self/cmdline", &tmpbuf, &tmpsize, &tmplen,
+ 1024 * 1024)) {
+ internal_strncpy(buf, tmpbuf, buf_len);
+ UnmapOrDie(tmpbuf, tmpsize);
+ return internal_strlen(buf);
}
+#endif
+ return ReadBinaryName(buf, buf_len);
}
// Match full names of the form /path/to/base_name{-,.}*
@@ -753,8 +764,11 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
#if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
+ CHECK_NE(map, nullptr);
+#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
+#endif // !SANITIZER_FREEBSD
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
@@ -788,7 +802,7 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
#endif
-#if defined(__x86_64__)
+#if defined(__x86_64__) && SANITIZER_LINUX
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@@ -807,7 +821,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__(
- /* %rax = syscall(%rax = __NR_clone,
+ /* %rax = syscall(%rax = SYSCALL(clone),
* %rdi = flags,
* %rsi = child_stack,
* %rdx = parent_tidptr,
@@ -841,7 +855,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
/* Return to parent. */
"1:\n"
: "=a" (res)
- : "a"(__NR_clone), "i"(__NR_exit),
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
"S"(child_stack),
"D"(flags),
"d"(parent_tidptr),
@@ -850,7 +864,283 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "rsp", "memory", "r11", "rcx");
return res;
}
-#endif // defined(__x86_64__)
-} // namespace __sanitizer
+#elif defined(__mips__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+ register void *a3 __asm__("$7") = newtls;
+ register int *a4 __asm__("$8") = child_tidptr;
+ // We don't have proper CFI directives here because it requires alot of code
+ // for very marginal benefits.
+ __asm__ __volatile__(
+ /* $v0 = syscall($v0 = __NR_clone,
+ * $a0 = flags,
+ * $a1 = child_stack,
+ * $a2 = parent_tidptr,
+ * $a3 = new_tls,
+ * $a4 = child_tidptr)
+ */
+ ".cprestore 16;\n"
+ "move $4,%1;\n"
+ "move $5,%2;\n"
+ "move $6,%3;\n"
+ "move $7,%4;\n"
+ /* Store the fifth argument on stack
+ * if we are using 32-bit abi.
+ */
+#if SANITIZER_WORDSIZE == 32
+ "lw %5,16($29);\n"
+#else
+ "move $8,%5;\n"
+#endif
+ "li $2,%6;\n"
+ "syscall;\n"
-#endif // SANITIZER_LINUX
+ /* if ($v0 != 0)
+ * return;
+ */
+ "bnez $2,1f;\n"
+
+ /* Call "fn(arg)". */
+ "ld $25,0($29);\n"
+ "ld $4,8($29);\n"
+ "jal $25;\n"
+
+ /* Call _exit($v0). */
+ "move $4,$2;\n"
+ "li $2,%7;\n"
+ "syscall;\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r" (res)
+ : "r"(flags),
+ "r"(child_stack),
+ "r"(parent_tidptr),
+ "r"(a3),
+ "r"(a4),
+ "i"(__NR_clone),
+ "i"(__NR_exit)
+ : "memory", "$29" );
+ return res;
+}
+#elif defined(__aarch64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+
+ register int (*__fn)(void *) __asm__("x0") = fn;
+ register void *__stack __asm__("x1") = child_stack;
+ register int __flags __asm__("x2") = flags;
+ register void *__arg __asm__("x3") = arg;
+ register int *__ptid __asm__("x4") = parent_tidptr;
+ register void *__tls __asm__("x5") = newtls;
+ register int *__ctid __asm__("x6") = child_tidptr;
+
+ __asm__ __volatile__(
+ "mov x0,x2\n" /* flags */
+ "mov x2,x4\n" /* ptid */
+ "mov x3,x5\n" /* tls */
+ "mov x4,x6\n" /* ctid */
+ "mov x8,%9\n" /* clone */
+
+ "svc 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp x0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldp x1, x0, [sp], #16\n"
+ "blr x1\n"
+
+ /* Call _exit(%r0). */
+ "mov x8, %10\n"
+ "svc 0x0\n"
+ "1:\n"
+
+ : "=r" (res)
+ : "i"(-EINVAL),
+ "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid),
+ "i"(__NR_clone), "i"(__NR_exit)
+ : "x30", "memory");
+ return res;
+}
+#endif // defined(__x86_64__) && SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+#define PROP_VALUE_MAX 92
+extern "C" SANITIZER_WEAK_ATTRIBUTE int __system_property_get(const char *name,
+ char *value);
+void GetExtraActivationFlags(char *buf, uptr size) {
+ CHECK(size > PROP_VALUE_MAX);
+ CHECK(&__system_property_get);
+ __system_property_get("asan.options", buf);
+}
+
+#if __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
+static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ // Any name starting with "lib" indicates a bug in L where library base names
+ // are returned instead of paths.
+ if (info->dlpi_name && info->dlpi_name[0] == 'l' &&
+ info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') {
+ *(bool *)data = true;
+ return 1;
+ }
+ return 0;
+}
+
+static atomic_uint32_t android_api_level;
+
+static AndroidApiLevel AndroidDetectApiLevel() {
+ if (!&dl_iterate_phdr)
+ return ANDROID_KITKAT; // K or lower
+ bool base_name_seen = false;
+ dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);
+ if (base_name_seen)
+ return ANDROID_LOLLIPOP_MR1; // L MR1
+ return ANDROID_POST_LOLLIPOP; // post-L
+ // Plain L (API level 21) is completely broken wrt ASan and not very
+ // interesting to detect.
+}
+
+AndroidApiLevel AndroidGetApiLevel() {
+ AndroidApiLevel level =
+ (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);
+ if (level) return level;
+ level = AndroidDetectApiLevel();
+ atomic_store(&android_api_level, level, memory_order_relaxed);
+ return level;
+}
+
+#endif
+
+bool IsDeadlySignal(int signum) {
+ if (common_flags()->handle_abort && signum == SIGABRT)
+ return true;
+ if (common_flags()->handle_sigfpe && signum == SIGFPE)
+ return true;
+ return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+}
+
+#ifndef SANITIZER_GO
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
+ internal_sigfillset(&set);
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
+ // on any thread, setuid call hangs (see test/tsan/setuid.c).
+ internal_sigdelset(&set, 33);
+#endif
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ void *th;
+ real_pthread_create(&th, nullptr, (void*(*)(void *arg))func, arg);
+ internal_sigprocmask(SIG_SETMASK, &old, nullptr);
+ return th;
+}
+
+void internal_join_thread(void *th) {
+ real_pthread_join(th, nullptr);
+}
+#else
+void *internal_start_thread(void (*func)(void *), void *arg) { return 0; }
+
+void internal_join_thread(void *th) {}
+#endif
+
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+#if defined(__arm__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.arm_pc;
+ *bp = ucontext->uc_mcontext.arm_fp;
+ *sp = ucontext->uc_mcontext.arm_sp;
+#elif defined(__aarch64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.regs[29];
+ *sp = ucontext->uc_mcontext.sp;
+#elif defined(__hppa__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.sc_iaoq[0];
+ /* GCC uses %r3 whenever a frame pointer is needed. */
+ *bp = ucontext->uc_mcontext.sc_gr[3];
+ *sp = ucontext->uc_mcontext.sc_gr[30];
+#elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_rip;
+ *bp = ucontext->uc_mcontext.mc_rbp;
+ *sp = ucontext->uc_mcontext.mc_rsp;
+# else
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_RIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_RBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_RSP];
+# endif
+#elif defined(__i386__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_eip;
+ *bp = ucontext->uc_mcontext.mc_ebp;
+ *sp = ucontext->uc_mcontext.mc_esp;
+# else
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_EIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_EBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_ESP];
+# endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.regs->nip;
+ *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
+ // The powerpc{,64}-linux ABIs do not specify r31 as the frame
+ // pointer, but GCC always uses r31 when we need a frame pointer.
+ *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
+#elif defined(__sparc__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ uptr *stk_ptr;
+# if defined (__arch64__)
+ *pc = ucontext->uc_mcontext.mc_gregs[MC_PC];
+ *sp = ucontext->uc_mcontext.mc_gregs[MC_O6];
+ stk_ptr = (uptr *) (*sp + 2047);
+ *bp = stk_ptr[15];
+# else
+ *pc = ucontext->uc_mcontext.gregs[REG_PC];
+ *sp = ucontext->uc_mcontext.gregs[REG_O6];
+ stk_ptr = (uptr *) *sp;
+ *bp = stk_ptr[15];
+# endif
+#elif defined(__mips__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.gregs[30];
+ *sp = ucontext->uc_mcontext.gregs[29];
+#else
+# error "Unsupported arch"
+#endif
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h
index 6422df142e7..44977020bce 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.h
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.h
@@ -12,9 +12,10 @@
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_posix.h"
#include "sanitizer_platform_limits_posix.h"
struct link_map; // Opaque type returned by dlopen().
@@ -27,20 +28,25 @@ struct linux_dirent;
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
-uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
-uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact);
-uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
- __sanitizer_kernel_sigset_t *oldset);
-void internal_sigfillset(__sanitizer_kernel_sigset_t *set);
-void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum);
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset);
+void internal_sigfillset(__sanitizer_sigset_t *set);
-#ifdef __x86_64__
+// Linux-only syscalls.
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
+// (like the process-wide error reporting SEGV handler) must use
+// internal_sigaction instead.
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
+#endif // SANITIZER_LINUX
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
@@ -64,8 +70,6 @@ class ThreadLister {
int bytes_read_;
};
-void AdjustStackSizeLinux(void *attr);
-
// Exposed for testing.
uptr ThreadDescriptorSize();
uptr ThreadSelf();
@@ -75,14 +79,9 @@ uptr ThreadSelfOffset();
// information).
bool LibraryNameIs(const char *full_name, const char *base_name);
-// Read the name of the current binary from /proc/self/exe.
-uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
-// Cache the value of /proc/self/exe.
-void CacheBinaryName();
-
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
#endif // SANITIZER_LINUX_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
index c9eb435be91..ff69664e7b9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -11,39 +11,69 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_freebsd.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
-#include "sanitizer_atomic.h"
-#include <dlfcn.h>
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD
+#include <dlfcn.h> // for dlsym()
+#endif
+
+#include <link.h>
#include <pthread.h>
-#include <sys/prctl.h>
+#include <signal.h>
#include <sys/resource.h>
-#include <unwind.h>
+
+#if SANITIZER_FREEBSD
+#include <pthread_np.h>
+#include <osreldate.h>
+#define pthread_getattr_np pthread_attr_get_np
+#endif
+
+#if SANITIZER_LINUX
+#include <sys/prctl.h>
+#endif
+
+#if SANITIZER_ANDROID
+#include <android/api-level.h>
+#endif
+
+#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+#include <android/log.h>
+#else
+#include <syslog.h>
+#endif
#if !SANITIZER_ANDROID
#include <elf.h>
-#include <link.h>
#include <unistd.h>
#endif
-// This function is defined elsewhere if we intercepted pthread_attr_getstack.
-SANITIZER_WEAK_ATTRIBUTE
-int __sanitizer_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
- return pthread_attr_getstack((pthread_attr_t*)attr, addr, size);
-}
-
namespace __sanitizer {
+SANITIZER_WEAK_ATTRIBUTE int
+real_sigaction(int signum, const void *act, void *oldact);
+
+int internal_sigaction(int signum, const void *act, void *oldact) {
+#if !SANITIZER_GO
+ if (&real_sigaction)
+ return real_sigaction(signum, act, oldact);
+#endif
+ return sigaction(signum, (const struct sigaction *)act,
+ (struct sigaction *)oldact);
+}
+
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
- static const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
CHECK(stack_top);
CHECK(stack_bottom);
if (at_initialization) {
@@ -55,7 +85,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset;
uptr prev_end = 0;
- while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
+ while (proc_maps.Next(&start, &end, &offset, nullptr, 0,
+ /* protection */nullptr)) {
if ((uptr)&rl < end)
break;
prev_end = end;
@@ -77,10 +108,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
return;
}
pthread_attr_t attr;
+ pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0;
- void *stackaddr = 0;
- __sanitizer_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
+ void *stackaddr = nullptr;
+ my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
@@ -88,11 +120,10 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_bottom = (uptr)stackaddr;
}
-// Does not compile for Go because dlsym() requires -ldl
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
- if (f == 0)
+ if (!f)
return false;
typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
setenv_ft setenv_f;
@@ -123,61 +154,9 @@ bool SanitizerGetThreadName(char *name, int max_len) {
#endif
}
-#ifndef SANITIZER_GO
-//------------------------- SlowUnwindStack -----------------------------------
-#ifdef __arm__
-#define UNWIND_STOP _URC_END_OF_STACK
-#define UNWIND_CONTINUE _URC_NO_REASON
-#else
-#define UNWIND_STOP _URC_NORMAL_STOP
-#define UNWIND_CONTINUE _URC_NO_REASON
-#endif
-
-uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#ifdef __arm__
- uptr val;
- _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
- 15 /* r15 = PC */, _UVRSD_UINT32, &val);
- CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
- // Clear the Thumb bit.
- return val & ~(uptr)1;
-#else
- return _Unwind_GetIP(ctx);
-#endif
-}
-
-struct UnwindTraceArg {
- StackTrace *stack;
- uptr max_depth;
-};
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
- UnwindTraceArg *arg = (UnwindTraceArg*)param;
- CHECK_LT(arg->stack->size, arg->max_depth);
- uptr pc = Unwind_GetIP(ctx);
- arg->stack->trace[arg->stack->size++] = pc;
- if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
- return UNWIND_CONTINUE;
-}
-
-void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
- size = 0;
- if (max_depth == 0)
- return;
- UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
- _Unwind_Backtrace(Unwind_Trace, &arg);
- // We need to pop a few frames so that pc is on top.
- uptr to_pop = LocatePcInTrace(pc);
- // trace[0] belongs to the current function so we always pop it.
- if (to_pop == 0)
- to_pop = 1;
- PopStackFrames(to_pop);
- trace[0] = pc;
-}
-
-#endif // !SANITIZER_GO
-
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
static uptr g_tls_size;
+#endif
#ifdef __i386__
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
@@ -185,8 +164,28 @@ static uptr g_tls_size;
# define DL_INTERNAL_FUNCTION
#endif
+#if defined(__mips__) || defined(__powerpc64__)
+// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
+// head structure. It lies before the static tls blocks.
+static uptr TlsPreTcbSize() {
+# if defined(__mips__)
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+# elif defined(__powerpc64__)
+ const uptr kTcbHead = 88; // sizeof (tcbhead_t)
+# endif
+ const uptr kTlsAlign = 16;
+ const uptr kTlsPreTcbSize =
+ (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
+ InitTlsSize();
+ g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
+ return kTlsPreTcbSize;
+}
+#endif
+
void InitTlsSize() {
-#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
+// all current supported platforms have 16 bytes stack alignment
+ const size_t kStackAlign = 16;
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
get_tls_func get_tls;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
@@ -197,31 +196,34 @@ void InitTlsSize() {
size_t tls_size = 0;
size_t tls_align = 0;
get_tls(&tls_size, &tls_align);
- g_tls_size = tls_size;
-#endif
-}
-
-uptr GetTlsSize() {
- return g_tls_size;
+ if (tls_align < kStackAlign)
+ tls_align = kStackAlign;
+ g_tls_size = RoundUpTo(tls_size, tls_align);
+#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
}
-#if defined(__x86_64__) || defined(__i386__)
-// sizeof(struct thread) from glibc.
+#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \
+ || defined(__aarch64__) || defined(__powerpc64__)) \
+ && SANITIZER_LINUX && !SANITIZER_ANDROID
+// sizeof(struct pthread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
uptr ThreadDescriptorSize() {
- char buf[64];
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
if (val)
return val;
+#if defined(__x86_64__) || defined(__i386__)
#ifdef _CS_GNU_LIBC_VERSION
+ char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) {
char *end;
int minor = internal_simple_strtoll(buf + 8, &end, 10);
if (end != buf + 8 && (*end == '\0' || *end == '.')) {
- /* sizeof(struct thread) values from various glibc versions. */
- if (minor <= 3)
+ /* sizeof(struct pthread) values from various glibc versions. */
+ if (SANITIZER_X32)
+ val = 1728; // Assume only one particular version for x32.
+ else if (minor <= 3)
val = FIRST_32_SECOND_64(1104, 1696);
else if (minor == 4)
val = FIRST_32_SECOND_64(1120, 1728);
@@ -233,6 +235,8 @@ uptr ThreadDescriptorSize() {
val = FIRST_32_SECOND_64(1168, 1776);
else if (minor <= 12)
val = FIRST_32_SECOND_64(1168, 2288);
+ else if (minor == 13)
+ val = FIRST_32_SECOND_64(1168, 2304);
else
val = FIRST_32_SECOND_64(1216, 2304);
}
@@ -241,6 +245,22 @@ uptr ThreadDescriptorSize() {
return val;
}
#endif
+#elif defined(__mips__)
+ // TODO(sagarthakur): add more values as per different glibc versions.
+ val = FIRST_32_SECOND_64(1152, 1776);
+ if (val)
+ atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
+ return val;
+#elif defined(__aarch64__)
+ // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
+ val = 1776;
+ atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
+ return val;
+#elif defined(__powerpc64__)
+ val = 1776; // from glibc.ppc64le 2.20-8.fc21
+ atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
+ return val;
+#endif
return 0;
}
@@ -253,27 +273,116 @@ uptr ThreadSelfOffset() {
uptr ThreadSelf() {
uptr descr_addr;
-#ifdef __i386__
+# if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#else
+# elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#endif
+# elif defined(__mips__)
+ // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
+ // points to the end of the TCB + 0x7000. The pthread_descr structure is
+ // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
+ // TCB and the size of pthread_descr.
+ const uptr kTlsTcbOffset = 0x7000;
+ uptr thread_pointer;
+ asm volatile(".set push;\
+ .set mips64r2;\
+ rdhwr %0,$29;\
+ .set pop" : "=r" (thread_pointer));
+ descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
+# elif defined(__aarch64__)
+ descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
+# elif defined(__powerpc64__)
+ // PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
+ // points to the end of the TCB + 0x7000. The pthread_descr structure is
+ // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
+ // TCB and the size of pthread_descr.
+ const uptr kTlsTcbOffset = 0x7000;
+ uptr thread_pointer;
+ asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
+ descr_addr = thread_pointer - TlsPreTcbSize();
+# else
+# error "unsupported CPU arch"
+# endif
return descr_addr;
}
-#endif // defined(__x86_64__) || defined(__i386__)
+#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
+
+#if SANITIZER_FREEBSD
+static void **ThreadSelfSegbase() {
+ void **segbase = 0;
+# if defined(__i386__)
+ // sysarch(I386_GET_GSBASE, segbase);
+ __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
+# elif defined(__x86_64__)
+ // sysarch(AMD64_GET_FSBASE, segbase);
+ __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
+# else
+# error "unsupported CPU arch for FreeBSD platform"
+# endif
+ return segbase;
+}
+
+uptr ThreadSelf() {
+ return (uptr)ThreadSelfSegbase()[2];
+}
+#endif // SANITIZER_FREEBSD
+
+#if !SANITIZER_GO
+static void GetTls(uptr *addr, uptr *size) {
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if defined(__x86_64__) || defined(__i386__)
+ *addr = ThreadSelf();
+ *size = GetTlsSize();
+ *addr -= *size;
+ *addr += ThreadDescriptorSize();
+# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__)
+ *addr = ThreadSelf();
+ *size = GetTlsSize();
+# else
+ *addr = 0;
+ *size = 0;
+# endif
+#elif SANITIZER_FREEBSD
+ void** segbase = ThreadSelfSegbase();
+ *addr = 0;
+ *size = 0;
+ if (segbase != 0) {
+ // tcbalign = 16
+ // tls_size = round(tls_static_space, tcbalign);
+ // dtv = segbase[1];
+ // dtv[2] = segbase - tls_static_space;
+ void **dtv = (void**) segbase[1];
+ *addr = (uptr) dtv[2];
+ *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
+ }
+#elif SANITIZER_ANDROID
+ *addr = 0;
+ *size = 0;
+#else
+# error "Unknown OS"
+#endif
+}
+#endif
+
+#if !SANITIZER_GO
+uptr GetTlsSize() {
+#if SANITIZER_FREEBSD || SANITIZER_ANDROID
+ uptr addr, size;
+ GetTls(&addr, &size);
+ return size;
+#else
+ return g_tls_size;
+#endif
+}
+#endif
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifndef SANITIZER_GO
-#if defined(__x86_64__) || defined(__i386__)
- *tls_addr = ThreadSelf();
- *tls_size = GetTlsSize();
- *tls_addr -= *tls_size;
- *tls_addr += ThreadDescriptorSize();
+#if SANITIZER_GO
+ // Stub implementation for Go.
+ *stk_addr = *stk_size = *tls_addr = *tls_size = 0;
#else
- *tls_addr = 0;
- *tls_size = 0;
-#endif
+ GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
@@ -289,45 +398,16 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*tls_addr = *stk_addr + *stk_size;
}
}
-#else // SANITIZER_GO
- *stk_addr = 0;
- *stk_size = 0;
- *tls_addr = 0;
- *tls_size = 0;
-#endif // SANITIZER_GO
-}
-
-void AdjustStackSizeLinux(void *attr_) {
- pthread_attr_t *attr = (pthread_attr_t *)attr_;
- uptr stackaddr = 0;
- size_t stacksize = 0;
- __sanitizer_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
- // GLibC will return (0 - stacksize) as the stack address in the case when
- // stacksize is set, but stackaddr is not.
- bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
- // We place a lot of tool data into TLS, account for that.
- const uptr minstacksize = GetTlsSize() + 128*1024;
- if (stacksize < minstacksize) {
- if (!stack_set) {
- if (common_flags()->verbosity && stacksize != 0)
- Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
- minstacksize);
- pthread_attr_setstacksize(attr, minstacksize);
- } else {
- Printf("Sanitizer: pre-allocated stack size is insufficient: "
- "%zu < %zu\n", stacksize, minstacksize);
- Printf("Sanitizer: pthread_create is likely to fail.\n");
- }
- }
+#endif
}
-#if SANITIZER_ANDROID
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
- string_predicate_t filter) {
- return 0;
-}
-#else // SANITIZER_ANDROID
+# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
+# elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
+# define Elf_Phdr XElf32_Phdr
+# define dl_phdr_info xdl_phdr_info
+# define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
+# endif
struct DlIteratePhdrData {
LoadedModule *modules;
@@ -341,43 +421,144 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
if (data->current_n == data->max_n)
return 0;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
- module_name.data()[0] = '\0';
+ InternalScopedString module_name(kMaxPathLength);
if (data->first) {
data->first = false;
// First module is the binary itself.
- ReadBinaryName(module_name.data(), module_name.size());
+ ReadBinaryNameCached(module_name.data(), module_name.size());
} else if (info->dlpi_name) {
- internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
+ module_name.append("%s", info->dlpi_name);
}
- if (module_name.data()[0] == '\0')
+ if (module_name[0] == '\0')
return 0;
if (data->filter && !data->filter(module_name.data()))
return 0;
- void *mem = &data->modules[data->current_n];
- LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
- info->dlpi_addr);
+ LoadedModule *cur_module = &data->modules[data->current_n];
+ cur_module->set(module_name.data(), info->dlpi_addr);
data->current_n++;
for (int i = 0; i < info->dlpi_phnum; i++) {
const Elf_Phdr *phdr = &info->dlpi_phdr[i];
if (phdr->p_type == PT_LOAD) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
- cur_module->addAddressRange(cur_beg, cur_end);
+ bool executable = phdr->p_flags & PF_X;
+ cur_module->addAddressRange(cur_beg, cur_end, executable);
}
}
return 0;
}
+#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
+#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
+ u32 api_level = AndroidGetApiLevel();
+ // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
+ // The runtime check allows the same library to work with
+ // both K and L (and future) Android releases.
+ if (api_level <= ANDROID_LOLLIPOP_MR1) { // L or earlier
+ MemoryMappingLayout memory_mapping(false);
+ return memory_mapping.DumpListOfModules(modules, max_modules, filter);
+ }
+#endif
CHECK(modules);
DlIteratePhdrData data = {modules, 0, true, max_modules, filter};
dl_iterate_phdr(dl_iterate_phdr_cb, &data);
return data.current_n;
}
+
+// getrusage does not give us the current RSS, only the max RSS.
+// Still, this is better than nothing if /proc/self/statm is not available
+// for some reason, e.g. due to a sandbox.
+static uptr GetRSSFromGetrusage() {
+ struct rusage usage;
+ if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox.
+ return 0;
+ return usage.ru_maxrss << 10; // ru_maxrss is in Kb.
+}
+
+uptr GetRSS() {
+ if (!common_flags()->can_use_proc_maps_statm)
+ return GetRSSFromGetrusage();
+ fd_t fd = OpenFile("/proc/self/statm", RdOnly);
+ if (fd == kInvalidFd)
+ return GetRSSFromGetrusage();
+ char buf[64];
+ uptr len = internal_read(fd, buf, sizeof(buf) - 1);
+ internal_close(fd);
+ if ((sptr)len <= 0)
+ return 0;
+ buf[len] = 0;
+ // The format of the file is:
+ // 1084 89 69 11 0 79 0
+ // We need the second number which is RSS in pages.
+ char *pos = buf;
+ // Skip the first number.
+ while (*pos >= '0' && *pos <= '9')
+ pos++;
+ // Skip whitespaces.
+ while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
+ pos++;
+ // Read the number.
+ uptr rss = 0;
+ while (*pos >= '0' && *pos <= '9')
+ rss = rss * 10 + *pos++ - '0';
+ return rss * GetPageSizeCached();
+}
+
+// 64-bit Android targets don't provide the deprecated __android_log_write.
+// Starting with the L release, syslog() works and is preferable to
+// __android_log_write.
+#if SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+static atomic_uint8_t android_log_initialized;
+
+void AndroidLogInit() {
+ atomic_store(&android_log_initialized, 1, memory_order_release);
+}
+
+static bool IsSyslogAvailable() {
+ return atomic_load(&android_log_initialized, memory_order_acquire);
+}
+#else
+void AndroidLogInit() {}
+
+static bool IsSyslogAvailable() { return true; }
#endif // SANITIZER_ANDROID
-} // namespace __sanitizer
+static void WriteOneLineToSyslog(const char *s) {
+#if SANITIZER_ANDROID &&__ANDROID_API__ < 21
+ __android_log_write(ANDROID_LOG_INFO, NULL, s);
+#else
+ syslog(LOG_INFO, "%s", s);
+#endif
+}
+
+void WriteToSyslog(const char *buffer) {
+ if (!IsSyslogAvailable())
+ return;
+ char *copy = internal_strdup(buffer);
+ char *p = copy;
+ char *q;
+ // syslog, at least on Android, has an implicit message length limit.
+ // Print one line at a time.
+ do {
+ q = internal_strchr(p, '\n');
+ if (q)
+ *q = '\0';
+ WriteOneLineToSyslog(p);
+ if (q)
+ p = q + 1;
+ } while (q);
+ InternalFree(copy);
+}
+#endif // SANITIZER_LINUX
+
+} // namespace __sanitizer
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_list.h b/libsanitizer/sanitizer_common/sanitizer_list.h
index 9692e01b8e0..9216ede67fb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_list.h
+++ b/libsanitizer/sanitizer_common/sanitizer_list.h
@@ -9,6 +9,7 @@
// ThreadSanitizer, etc run-times.
//
//===----------------------------------------------------------------------===//
+
#ifndef SANITIZER_LIST_H
#define SANITIZER_LIST_H
@@ -24,8 +25,10 @@ namespace __sanitizer {
// non-zero-initialized objects before using.
template<class Item>
struct IntrusiveList {
+ friend class Iterator;
+
void clear() {
- first_ = last_ = 0;
+ first_ = last_ = nullptr;
size_ = 0;
}
@@ -34,11 +37,11 @@ struct IntrusiveList {
void push_back(Item *x) {
if (empty()) {
- x->next = 0;
+ x->next = nullptr;
first_ = last_ = x;
size_ = 1;
} else {
- x->next = 0;
+ x->next = nullptr;
last_->next = x;
last_ = x;
size_++;
@@ -47,7 +50,7 @@ struct IntrusiveList {
void push_front(Item *x) {
if (empty()) {
- x->next = 0;
+ x->next = nullptr;
first_ = last_ = x;
size_ = 1;
} else {
@@ -60,8 +63,8 @@ struct IntrusiveList {
void pop_front() {
CHECK(!empty());
first_ = first_->next;
- if (first_ == 0)
- last_ = 0;
+ if (!first_)
+ last_ = nullptr;
size_--;
}
@@ -111,12 +114,31 @@ struct IntrusiveList {
}
}
+ template<class ListTy, class ItemTy>
+ class IteratorBase {
+ public:
+ explicit IteratorBase(ListTy *list)
+ : list_(list), current_(list->first_) { }
+ ItemTy *next() {
+ ItemTy *ret = current_;
+ if (current_) current_ = current_->next;
+ return ret;
+ }
+ bool hasNext() const { return current_ != nullptr; }
+ private:
+ ListTy *list_;
+ ItemTy *current_;
+ };
+
+ typedef IteratorBase<IntrusiveList<Item>, Item> Iterator;
+ typedef IteratorBase<const IntrusiveList<Item>, const Item> ConstIterator;
+
// private, don't use directly.
uptr size_;
Item *first_;
Item *last_;
};
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // SANITIZER_LIST_H
+#endif // SANITIZER_LIST_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc
index 288e31ca078..159db76f01e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -5,9 +5,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries and implements mac-specific functions from
-// sanitizer_libc.h.
+// This file is shared between various sanitizers' runtime libraries and
+// implements OSX-specific functions.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
@@ -21,24 +20,36 @@
#include <stdio.h>
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_mac.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_procmaps.h"
+#if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetEnviron
+#else
+extern char **environ;
+#endif
+
+#include <errno.h>
#include <fcntl.h>
+#include <libkern/OSAtomic.h>
#include <mach-o/dyld.h>
-#include <mach-o/loader.h>
+#include <mach/mach.h>
+#include <mach/vm_statistics.h>
#include <pthread.h>
#include <sched.h>
+#include <signal.h>
+#include <stdlib.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
+#include <sys/sysctl.h>
#include <sys/types.h>
#include <unistd.h>
-#include <libkern/OSAtomic.h>
-#include <errno.h>
namespace __sanitizer {
@@ -47,6 +58,7 @@ namespace __sanitizer {
// ---------------------- sanitizer_libc.h
uptr internal_mmap(void *addr, size_t length, int prot, int flags,
int fd, u64 offset) {
+ if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL);
return (uptr)mmap(addr, length, prot, flags, fd, offset);
}
@@ -54,6 +66,10 @@ uptr internal_munmap(void *addr, uptr length) {
return munmap(addr, length);
}
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return mprotect(addr, length, prot);
+}
+
uptr internal_close(fd_t fd) {
return close(fd);
}
@@ -66,11 +82,6 @@ uptr internal_open(const char *filename, int flags, u32 mode) {
return open(filename, flags, mode);
}
-uptr OpenFile(const char *filename, bool write) {
- return internal_open(filename,
- write ? O_WRONLY | O_CREAT : O_RDONLY, 0660);
-}
-
uptr internal_read(fd_t fd, void *buf, uptr count) {
return read(fd, buf, count);
}
@@ -106,6 +117,10 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
return readlink(path, buf, bufsize);
}
+uptr internal_unlink(const char *path) {
+ return unlink(path);
+}
+
uptr internal_sched_yield() {
return sched_yield();
}
@@ -118,6 +133,31 @@ uptr internal_getpid() {
return getpid();
}
+int internal_sigaction(int signum, const void *act, void *oldact) {
+ return sigaction(signum,
+ (struct sigaction *)act, (struct sigaction *)oldact);
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); }
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ return sigprocmask(how, set, oldset);
+}
+
+int internal_fork() {
+ // TODO(glider): this may call user's pthread_atfork() handlers which is bad.
+ return fork();
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ return rename(oldpath, newpath);
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ return ftruncate(fd, size);
+}
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
@@ -136,12 +176,27 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK(stack_top);
CHECK(stack_bottom);
uptr stacksize = pthread_get_stacksize_np(pthread_self());
+ // pthread_get_stacksize_np() returns an incorrect stack size for the main
+ // thread on Mavericks. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=261
+ if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization &&
+ stacksize == (1 << 19)) {
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+ // Most often rl.rlim_cur will be the desired 8M.
+ if (rl.rlim_cur < kMaxThreadStackSize) {
+ stacksize = rl.rlim_cur;
+ } else {
+ stacksize = kMaxThreadStackSize;
+ }
+ }
void *stackaddr = pthread_get_stackaddr_np(pthread_self());
*stack_top = (uptr)stackaddr;
*stack_bottom = *stack_top - stacksize;
}
-const char *GetEnv(const char *name) {
+char **GetEnviron() {
+#if !SANITIZER_IOS
char ***env_ptr = _NSGetEnviron();
if (!env_ptr) {
Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
@@ -149,178 +204,53 @@ const char *GetEnv(const char *name) {
CHECK(env_ptr);
}
char **environ = *env_ptr;
+#endif
CHECK(environ);
+ return environ;
+}
+
+const char *GetEnv(const char *name) {
+ char **env = GetEnviron();
uptr name_len = internal_strlen(name);
- while (*environ != 0) {
- uptr len = internal_strlen(*environ);
+ while (*env != 0) {
+ uptr len = internal_strlen(*env);
if (len > name_len) {
- const char *p = *environ;
+ const char *p = *env;
if (!internal_memcmp(p, name, name_len) &&
p[name_len] == '=') { // Match.
- return *environ + name_len + 1; // String starting after =.
+ return *env + name_len + 1; // String starting after =.
}
}
- environ++;
+ env++;
}
return 0;
}
-void ReExec() {
- UNIMPLEMENTED();
-}
-
-void PrepareForSandboxing() {
- // Nothing here for now.
-}
-
-uptr GetPageSize() {
- return sysconf(_SC_PAGESIZE);
-}
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ CHECK_LE(kMaxPathLength, buf_len);
-// ----------------- sanitizer_procmaps.h
-
-MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- Reset();
-}
-
-MemoryMappingLayout::~MemoryMappingLayout() {
-}
-
-// More information about Mach-O headers can be found in mach-o/loader.h
-// Each Mach-O image has a header (mach_header or mach_header_64) starting with
-// a magic number, and a list of linker load commands directly following the
-// header.
-// A load command is at least two 32-bit words: the command type and the
-// command size in bytes. We're interested only in segment load commands
-// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
-// into the task's address space.
-// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
-// segment_command_64 correspond to the memory address, memory size and the
-// file offset of the current memory segment.
-// Because these fields are taken from the images as is, one needs to add
-// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
-
-void MemoryMappingLayout::Reset() {
- // Count down from the top.
- // TODO(glider): as per man 3 dyld, iterating over the headers with
- // _dyld_image_count is thread-unsafe. We need to register callbacks for
- // adding and removing images which will invalidate the MemoryMappingLayout
- // state.
- current_image_ = _dyld_image_count();
- current_load_cmd_count_ = -1;
- current_load_cmd_addr_ = 0;
- current_magic_ = 0;
- current_filetype_ = 0;
-}
-
-// static
-void MemoryMappingLayout::CacheMemoryMappings() {
- // No-op on Mac for now.
-}
-
-void MemoryMappingLayout::LoadFromCache() {
- // No-op on Mac for now.
-}
-
-// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
-// Google Perftools, http://code.google.com/p/google-perftools.
-
-// NextSegmentLoad scans the current image for the next segment load command
-// and returns the start and end addresses and file offset of the corresponding
-// segment.
-// Note that the segment addresses are not necessarily sorted.
-template<u32 kLCSegment, typename SegmentCommand>
-bool MemoryMappingLayout::NextSegmentLoad(
- uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection) {
- if (protection)
- UNIMPLEMENTED();
- const char* lc = current_load_cmd_addr_;
- current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
- if (((const load_command *)lc)->cmd == kLCSegment) {
- const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
- const SegmentCommand* sc = (const SegmentCommand *)lc;
- if (start) *start = sc->vmaddr + dlloff;
- if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
- if (offset) {
- if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
- *offset = sc->vmaddr;
- } else {
- *offset = sc->fileoff;
- }
- }
- if (filename) {
- internal_strncpy(filename, _dyld_get_image_name(current_image_),
- filename_size);
- }
- return true;
+ // On OS X the executable path is saved to the stack by dyld. Reading it
+ // from there is much faster than calling dladdr, especially for large
+ // binaries with symbols.
+ InternalScopedString exe_path(kMaxPathLength);
+ uint32_t size = exe_path.size();
+ if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
+ realpath(exe_path.data(), buf) != 0) {
+ return internal_strlen(buf);
}
- return false;
-}
-
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- for (; current_image_ >= 0; current_image_--) {
- const mach_header* hdr = _dyld_get_image_header(current_image_);
- if (!hdr) continue;
- if (current_load_cmd_count_ < 0) {
- // Set up for this image;
- current_load_cmd_count_ = hdr->ncmds;
- current_magic_ = hdr->magic;
- current_filetype_ = hdr->filetype;
- switch (current_magic_) {
-#ifdef MH_MAGIC_64
- case MH_MAGIC_64: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
- break;
- }
-#endif
- case MH_MAGIC: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
- break;
- }
- default: {
- continue;
- }
- }
- }
+ return 0;
+}
- for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
- switch (current_magic_) {
- // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
-#ifdef MH_MAGIC_64
- case MH_MAGIC_64: {
- if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- start, end, offset, filename, filename_size, protection))
- return true;
- break;
- }
-#endif
- case MH_MAGIC: {
- if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
- start, end, offset, filename, filename_size, protection))
- return true;
- break;
- }
- }
- }
- // If we get here, no more load_cmd's in this image talk about
- // segments. Go on to the next image.
- }
- return false;
+uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
}
-bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[],
- uptr filename_size,
- uptr *protection) {
- return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
- protection);
+void ReExec() {
+ UNIMPLEMENTED();
}
-BlockingMutex::BlockingMutex(LinkerInitialized) {
- // We assume that OS_SPINLOCK_INIT is zero
+uptr GetPageSize() {
+ return sysconf(_SC_PAGESIZE);
}
BlockingMutex::BlockingMutex() {
@@ -377,32 +307,106 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
MemoryMappingLayout memory_mapping(false);
- memory_mapping.Reset();
- uptr cur_beg, cur_end, cur_offset;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
- uptr n_modules = 0;
- for (uptr i = 0;
- n_modules < max_modules &&
- memory_mapping.Next(&cur_beg, &cur_end, &cur_offset,
- module_name.data(), module_name.size(), 0);
- i++) {
- const char *cur_name = module_name.data();
- if (cur_name[0] == '\0')
- continue;
- if (filter && !filter(cur_name))
- continue;
- LoadedModule *cur_module = 0;
- if (n_modules > 0 &&
- 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
- cur_module = &modules[n_modules - 1];
- } else {
- void *mem = &modules[n_modules];
- cur_module = new(mem) LoadedModule(cur_name, cur_beg);
- n_modules++;
+ return memory_mapping.DumpListOfModules(modules, max_modules, filter);
+}
+
+bool IsDeadlySignal(int signum) {
+ return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+}
+
+MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
+
+MacosVersion GetMacosVersionInternal() {
+ int mib[2] = { CTL_KERN, KERN_OSRELEASE };
+ char version[100];
+ uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
+ for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
+ // Get the version length.
+ CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
+ CHECK_LT(len, maxlen);
+ CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
+ switch (version[0]) {
+ case '9': return MACOS_VERSION_LEOPARD;
+ case '1': {
+ switch (version[1]) {
+ case '0': return MACOS_VERSION_SNOW_LEOPARD;
+ case '1': return MACOS_VERSION_LION;
+ case '2': return MACOS_VERSION_MOUNTAIN_LION;
+ case '3': return MACOS_VERSION_MAVERICKS;
+ case '4': return MACOS_VERSION_YOSEMITE;
+ default:
+ if (IsDigit(version[1]))
+ return MACOS_VERSION_UNKNOWN_NEWER;
+ else
+ return MACOS_VERSION_UNKNOWN;
+ }
}
- cur_module->addAddressRange(cur_beg, cur_end);
+ default: return MACOS_VERSION_UNKNOWN;
+ }
+}
+
+MacosVersion GetMacosVersion() {
+ atomic_uint32_t *cache =
+ reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
+ MacosVersion result =
+ static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
+ if (result == MACOS_VERSION_UNINITIALIZED) {
+ result = GetMacosVersionInternal();
+ atomic_store(cache, result, memory_order_release);
+ }
+ return result;
+}
+
+uptr GetRSS() {
+ struct task_basic_info info;
+ unsigned count = TASK_BASIC_INFO_COUNT;
+ kern_return_t result =
+ task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count);
+ if (UNLIKELY(result != KERN_SUCCESS)) {
+ Report("Cannot get task info. Error: %d\n", result);
+ Die();
}
- return n_modules;
+ return info.resident_size;
+}
+
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
+ internal_sigfillset(&set);
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ pthread_t th;
+ pthread_create(&th, 0, (void*(*)(void *arg))func, arg);
+ internal_sigprocmask(SIG_SETMASK, &old, 0);
+ return th;
+}
+
+void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
+
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if defined(__aarch64__)
+ *pc = ucontext->uc_mcontext->__ss.__pc;
+# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
+ *bp = ucontext->uc_mcontext->__ss.__fp;
+# else
+ *bp = ucontext->uc_mcontext->__ss.__lr;
+# endif
+ *sp = ucontext->uc_mcontext->__ss.__sp;
+# elif defined(__x86_64__)
+ *pc = ucontext->uc_mcontext->__ss.__rip;
+ *bp = ucontext->uc_mcontext->__ss.__rbp;
+ *sp = ucontext->uc_mcontext->__ss.__rsp;
+# elif defined(__arm__)
+ *pc = ucontext->uc_mcontext->__ss.__pc;
+ *bp = ucontext->uc_mcontext->__ss.__r[7];
+ *sp = ucontext->uc_mcontext->__ss.__sp;
+# elif defined(__i386__)
+ *pc = ucontext->uc_mcontext->__ss.__eip;
+ *bp = ucontext->uc_mcontext->__ss.__ebp;
+ *sp = ucontext->uc_mcontext->__ss.__esp;
+# else
+# error "Unknown architecture"
+# endif
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
new file mode 100644
index 00000000000..2aac83e3657
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -0,0 +1,39 @@
+//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// provides definitions for OSX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_MAC_H
+#define SANITIZER_MAC_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_posix.h"
+
+namespace __sanitizer {
+
+enum MacosVersion {
+ MACOS_VERSION_UNINITIALIZED = 0,
+ MACOS_VERSION_UNKNOWN,
+ MACOS_VERSION_LEOPARD,
+ MACOS_VERSION_SNOW_LEOPARD,
+ MACOS_VERSION_LION,
+ MACOS_VERSION_MOUNTAIN_LION,
+ MACOS_VERSION_MAVERICKS,
+ MACOS_VERSION_YOSEMITE,
+ MACOS_VERSION_UNKNOWN_NEWER
+};
+
+MacosVersion GetMacosVersion();
+
+char **GetEnviron();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+#endif // SANITIZER_MAC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc b/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc
new file mode 100644
index 00000000000..6ed0f69de97
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc
@@ -0,0 +1,337 @@
+//===-- sanitizer_malloc_mac.inc --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains Mac-specific malloc interceptors and a custom zone
+// implementation, which together replace the system allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if !SANITIZER_MAC
+#error "This file should only be compiled on Darwin."
+#endif
+
+#include <AvailabilityMacros.h>
+#include <CoreFoundation/CFBase.h>
+#include <dlfcn.h>
+#include <malloc/malloc.h>
+#include <sys/mman.h>
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_mac.h"
+
+// Similar code is used in Google Perftools,
+// http://code.google.com/p/google-perftools.
+
+static malloc_zone_t sanitizer_zone;
+
+INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
+ vm_size_t start_size, unsigned zone_flags) {
+ COMMON_MALLOC_ENTER();
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ COMMON_MALLOC_MEMALIGN(page_size, allocated_size);
+ malloc_zone_t *new_zone = (malloc_zone_t *)p;
+ internal_memcpy(new_zone, &sanitizer_zone, sizeof(sanitizer_zone));
+ new_zone->zone_name = NULL; // The name will be changed anyway.
+ if (GetMacosVersion() >= MACOS_VERSION_LION) {
+ // Prevent the client app from overwriting the zone contents.
+ // Library functions that need to modify the zone will set PROT_WRITE on it.
+ // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
+ mprotect(new_zone, allocated_size, PROT_READ);
+ }
+ return new_zone;
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
+ COMMON_MALLOC_ENTER();
+ return &sanitizer_zone;
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
+ // FIXME: ASan should support purgeable allocations.
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=139
+ COMMON_MALLOC_ENTER();
+ return &sanitizer_zone;
+}
+
+INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ COMMON_MALLOC_ENTER();
+}
+
+INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ COMMON_MALLOC_ENTER();
+ // Must return 0 if the contents were not purged since the last call to
+ // malloc_make_purgeable().
+ return 0;
+}
+
+INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
+ COMMON_MALLOC_ENTER();
+ // Allocate |sizeof(COMMON_MALLOC_ZONE_NAME "-") + internal_strlen(name)|
+ // bytes.
+ size_t buflen =
+ sizeof(COMMON_MALLOC_ZONE_NAME "-") + (name ? internal_strlen(name) : 0);
+ InternalScopedString new_name(buflen);
+ if (name && zone->introspect == sanitizer_zone.introspect) {
+ new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
+ name = new_name.data();
+ }
+
+ // Call the system malloc's implementation for both external and our zones,
+ // since that appropriately changes VM region protections on the zone.
+ REAL(malloc_set_zone_name)(zone, name);
+}
+
+INTERCEPTOR(void *, malloc, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MALLOC(size);
+ return p;
+}
+
+INTERCEPTOR(void, free, void *ptr) {
+ COMMON_MALLOC_ENTER();
+ if (!ptr) return;
+ COMMON_MALLOC_FREE(ptr);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_REALLOC(ptr, size);
+ return p;
+}
+
+INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_CALLOC(nmemb, size);
+ return p;
+}
+
+INTERCEPTOR(void *, valloc, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_VALLOC(size);
+ return p;
+}
+
+INTERCEPTOR(size_t, malloc_good_size, size_t size) {
+ COMMON_MALLOC_ENTER();
+ return sanitizer_zone.introspect->good_size(&sanitizer_zone, size);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
+ COMMON_MALLOC_ENTER();
+ CHECK(memptr);
+ COMMON_MALLOC_MEMALIGN(alignment, size);
+ if (p) {
+ *memptr = p;
+ return 0;
+ }
+ return -1;
+}
+
+namespace {
+
+// TODO(glider): the __sanitizer_mz_* functions should be united with the Linux
+// wrappers, as they are basically copied from there.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+size_t __sanitizer_mz_size(malloc_zone_t* zone, const void* ptr) {
+ COMMON_MALLOC_SIZE(ptr);
+ return size;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MALLOC(size);
+ return p;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
+ if (UNLIKELY(!COMMON_MALLOC_SANITIZER_INITIALIZED)) {
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ const size_t kCallocPoolSize = 1024;
+ static uptr calloc_memory_for_dlsym[kCallocPoolSize];
+ static size_t allocated;
+ size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
+ void *mem = (void*)&calloc_memory_for_dlsym[allocated];
+ allocated += size_in_words;
+ CHECK(allocated < kCallocPoolSize);
+ return mem;
+ }
+ COMMON_MALLOC_CALLOC(nmemb, size);
+ return p;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_valloc(malloc_zone_t *zone, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_VALLOC(size);
+ return p;
+}
+
+#define GET_ZONE_FOR_PTR(ptr) \
+ malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \
+ const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name
+
+void ALWAYS_INLINE free_common(void *context, void *ptr) {
+ if (!ptr) return;
+ // FIXME: need to retire this flag.
+ if (!COMMON_MALLOC_IGNORE_INVALID_FREE) {
+ COMMON_MALLOC_FREE(ptr);
+ } else {
+ GET_ZONE_FOR_PTR(ptr);
+ COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name);
+ }
+}
+
+// TODO(glider): the allocation callbacks need to be refactored.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
+ free_common(zone, ptr);
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_realloc(malloc_zone_t *zone, void *ptr, size_t new_size) {
+ if (!ptr) {
+ COMMON_MALLOC_MALLOC(new_size);
+ return p;
+ } else {
+ COMMON_MALLOC_SIZE(ptr);
+ if (size) {
+ COMMON_MALLOC_REALLOC(ptr, new_size);
+ return p;
+ } else {
+ // We can't recover from reallocating an unknown address, because
+ // this would require reading at most |new_size| bytes from
+ // potentially unaccessible memory.
+ GET_ZONE_FOR_PTR(ptr);
+ COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name);
+ return nullptr;
+ }
+ }
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_mz_destroy(malloc_zone_t* zone) {
+ // A no-op -- we will not be destroyed!
+ Report("__sanitizer_mz_destroy() called -- ignoring\n");
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MEMALIGN(align, size);
+ return p;
+}
+
+// This function is currently unused, and we build with -Werror.
+#if 0
+void __sanitizer_mz_free_definite_size(
+ malloc_zone_t* zone, void *ptr, size_t size) {
+ // TODO(glider): check that |size| is valid.
+ UNIMPLEMENTED();
+}
+#endif
+
+kern_return_t mi_enumerator(task_t task, void *,
+ unsigned type_mask, vm_address_t zone_address,
+ memory_reader_t reader,
+ vm_range_recorder_t recorder) {
+ // Should enumerate all the pointers we have. Seems like a lot of work.
+ return KERN_FAILURE;
+}
+
+size_t mi_good_size(malloc_zone_t *zone, size_t size) {
+ // I think it's always safe to return size, but we maybe could do better.
+ return size;
+}
+
+boolean_t mi_check(malloc_zone_t *zone) {
+ UNIMPLEMENTED();
+}
+
+void mi_print(malloc_zone_t *zone, boolean_t verbose) {
+ UNIMPLEMENTED();
+}
+
+void mi_log(malloc_zone_t *zone, void *address) {
+ // I don't think we support anything like this
+}
+
+void mi_force_lock(malloc_zone_t *zone) {
+ COMMON_MALLOC_FORCE_LOCK();
+}
+
+void mi_force_unlock(malloc_zone_t *zone) {
+ COMMON_MALLOC_FORCE_UNLOCK();
+}
+
+void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
+ COMMON_MALLOC_FILL_STATS(zone, stats);
+}
+
+boolean_t mi_zone_locked(malloc_zone_t *zone) {
+ // UNIMPLEMENTED();
+ return false;
+}
+
+} // unnamed namespace
+
+namespace COMMON_MALLOC_NAMESPACE {
+
+void ReplaceSystemMalloc() {
+ static malloc_introspection_t sanitizer_zone_introspection;
+ // Ok to use internal_memset, these places are not performance-critical.
+ internal_memset(&sanitizer_zone_introspection, 0,
+ sizeof(sanitizer_zone_introspection));
+
+ sanitizer_zone_introspection.enumerator = &mi_enumerator;
+ sanitizer_zone_introspection.good_size = &mi_good_size;
+ sanitizer_zone_introspection.check = &mi_check;
+ sanitizer_zone_introspection.print = &mi_print;
+ sanitizer_zone_introspection.log = &mi_log;
+ sanitizer_zone_introspection.force_lock = &mi_force_lock;
+ sanitizer_zone_introspection.force_unlock = &mi_force_unlock;
+ sanitizer_zone_introspection.statistics = &mi_statistics;
+ sanitizer_zone_introspection.zone_locked = &mi_zone_locked;
+
+ internal_memset(&sanitizer_zone, 0, sizeof(malloc_zone_t));
+
+ // Use version 6 for OSX >= 10.6.
+ sanitizer_zone.version = 6;
+ sanitizer_zone.zone_name = COMMON_MALLOC_ZONE_NAME;
+ sanitizer_zone.size = &__sanitizer_mz_size;
+ sanitizer_zone.malloc = &__sanitizer_mz_malloc;
+ sanitizer_zone.calloc = &__sanitizer_mz_calloc;
+ sanitizer_zone.valloc = &__sanitizer_mz_valloc;
+ sanitizer_zone.free = &__sanitizer_mz_free;
+ sanitizer_zone.realloc = &__sanitizer_mz_realloc;
+ sanitizer_zone.destroy = &__sanitizer_mz_destroy;
+ sanitizer_zone.batch_malloc = 0;
+ sanitizer_zone.batch_free = 0;
+ sanitizer_zone.free_definite_size = 0;
+ sanitizer_zone.memalign = &__sanitizer_mz_memalign;
+ sanitizer_zone.introspect = &sanitizer_zone_introspection;
+
+ // Register the zone.
+ malloc_zone_register(&sanitizer_zone);
+}
+
+} // namespace COMMON_MALLOC_NAMESPACE
diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.h b/libsanitizer/sanitizer_common/sanitizer_mutex.h
index d78f43edaae..75f495a4d65 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mutex.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mutex.h
@@ -71,7 +71,13 @@ class SpinMutex : public StaticSpinMutex {
class BlockingMutex {
public:
+#if SANITIZER_WINDOWS
+ // Windows does not currently support LinkerInitialized
explicit BlockingMutex(LinkerInitialized);
+#else
+ explicit constexpr BlockingMutex(LinkerInitialized)
+ : opaque_storage_ {0, }, owner_(0) {}
+#endif
BlockingMutex();
void Lock();
void Unlock();
@@ -81,6 +87,88 @@ class BlockingMutex {
uptr owner_; // for debugging
};
+// Reader-writer spin mutex.
+class RWMutex {
+ public:
+ RWMutex() {
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+ }
+
+ ~RWMutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ void Lock() {
+ u32 cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ LockSlow();
+ }
+
+ void Unlock() {
+ u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ DCHECK_NE(prev & kWriteLock, 0);
+ (void)prev;
+ }
+
+ void ReadLock() {
+ u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ ReadLockSlow();
+ }
+
+ void ReadUnlock() {
+ u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+ (void)prev;
+ }
+
+ void CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ private:
+ atomic_uint32_t state_;
+
+ enum {
+ kUnlocked = 0,
+ kWriteLock = 1,
+ kReadLock = 2
+ };
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 cmp = atomic_load(&state_, memory_order_relaxed);
+ if (cmp == kUnlocked &&
+ atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ }
+ }
+
+ void NOINLINE ReadLockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ }
+ }
+
+ RWMutex(const RWMutex&);
+ void operator = (const RWMutex&);
+};
+
template<typename MutexType>
class GenericScopedLock {
public:
@@ -121,6 +209,8 @@ class GenericScopedReadLock {
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
+typedef GenericScopedLock<RWMutex> RWMutexLock;
+typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc
new file mode 100644
index 00000000000..b989ed0c90f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.cc
@@ -0,0 +1,17 @@
+//===-- sanitizer_persistent_allocator.cc -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+PersistentAllocator thePersistentAllocator;
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h
new file mode 100644
index 00000000000..71185033eff
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h
@@ -0,0 +1,70 @@
+//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A fast memory allocator that does not support free() nor realloc().
+// All allocations are forever.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
+#define SANITIZER_PERSISTENT_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class PersistentAllocator {
+ public:
+ void *alloc(uptr size);
+
+ private:
+ void *tryAlloc(uptr size);
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for Node's.
+ atomic_uintptr_t region_end;
+};
+
+inline void *PersistentAllocator::tryAlloc(uptr size) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(&region_pos, memory_order_acquire);
+ uptr end = atomic_load(&region_end, memory_order_acquire);
+ if (cmp == 0 || cmp + size > end) return nullptr;
+ if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
+ memory_order_acquire))
+ return (void *)cmp;
+ }
+}
+
+inline void *PersistentAllocator::alloc(uptr size) {
+ // First, try to allocate optimisitically.
+ void *s = tryAlloc(size);
+ if (s) return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&mtx);
+ for (;;) {
+ s = tryAlloc(size);
+ if (s) return s;
+ atomic_store(&region_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < size) allocsz = size;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ atomic_store(&region_end, mem + allocsz, memory_order_release);
+ atomic_store(&region_pos, mem, memory_order_release);
+ }
+}
+
+extern PersistentAllocator thePersistentAllocator;
+inline void *PersistentAlloc(uptr sz) {
+ return thePersistentAllocator.alloc(sz);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h
index 7693fe7ff17..7d0ff2896e9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h
@@ -11,7 +11,8 @@
#ifndef SANITIZER_PLATFORM_H
#define SANITIZER_PLATFORM_H
-#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && \
+ !defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported"
#endif
@@ -21,6 +22,12 @@
# define SANITIZER_LINUX 0
#endif
+#if defined(__FreeBSD__)
+# define SANITIZER_FREEBSD 1
+#else
+# define SANITIZER_FREEBSD 0
+#endif
+
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
@@ -29,9 +36,15 @@
# else
# define SANITIZER_IOS 0
# endif
+# if TARGET_IPHONE_SIMULATOR
+# define SANITIZER_IOSSIM 1
+# else
+# define SANITIZER_IOSSIM 0
+# endif
#else
# define SANITIZER_MAC 0
# define SANITIZER_IOS 0
+# define SANITIZER_IOSSIM 0
#endif
#if defined(_WIN32)
@@ -40,12 +53,111 @@
# define SANITIZER_WINDOWS 0
#endif
-#if defined(__ANDROID__) || defined(ANDROID)
+#if defined(__ANDROID__)
# define SANITIZER_ANDROID 1
#else
# define SANITIZER_ANDROID 0
#endif
-#define SANITIZER_POSIX (SANITIZER_LINUX || SANITIZER_MAC)
+#define SANITIZER_POSIX (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC)
+
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
+
+#if SANITIZER_WORDSIZE == 64
+# define FIRST_32_SECOND_64(a, b) (b)
+#else
+# define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#if defined(__x86_64__) && !defined(_LP64)
+# define SANITIZER_X32 1
+#else
+# define SANITIZER_X32 0
+#endif
+
+// VMA size definition for architecture that support multiple sizes.
+// AArch64 has 3 VMA sizes: 39, 42 and 48.
+#if !defined(SANITIZER_AARCH64_VMA)
+# define SANITIZER_AARCH64_VMA 39
+#else
+# if SANITIZER_AARCH64_VMA != 39 && SANITIZER_AARCH64_VMA != 42
+# error "invalid SANITIZER_AARCH64_VMA size"
+# endif
+#endif
+
+// By default we allow to use SizeClassAllocator64 on 64-bit platform.
+// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
+// does not work well and we need to fallback to SizeClassAllocator32.
+// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
+// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
+#ifndef SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__mips64) || defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
+#endif
+
+// The range of addresses which can be returned my mmap.
+// FIXME: this value should be different on different platforms. Larger values
+// will still work but will consume more memory for TwoLevelByteMap.
+#if defined(__mips__)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+#else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// The AArch64 linux port uses the canonical syscall set as mandated by
+// the upstream linux community for all new ports. Other ports may still
+// use legacy syscalls.
+#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if defined(__aarch64__) && SANITIZER_LINUX
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+# else
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+# endif
+#endif
+
+// udi16 syscalls can only be used when the following conditions are
+// met:
+// * target is one of arm32, x86-32, sparc32, sh or m68k
+// * libc version is libc5, glibc-2.0, glibc-2.1 or glibc-2.2 to 2.15
+// built against > linux-2.2 kernel headers
+// Since we don't want to include libc headers here, we check the
+// target only.
+#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
+#define SANITIZER_USES_UID16_SYSCALLS 1
+#else
+#define SANITIZER_USES_UID16_SYSCALLS 0
+#endif
+
+#if defined(__mips__) || (defined(__aarch64__) && SANITIZER_AARCH64_VMA == 39)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
+#elif defined(__aarch64__) && SANITIZER_AARCH64_VMA == 42
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 11)
+#else
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+#endif
+
+// Assume obsolete RPC headers are available by default
+#if !defined(HAVE_RPC_XDR_H) && !defined(HAVE_TIRPC_RPC_XDR_H)
+# define HAVE_RPC_XDR_H (SANITIZER_LINUX && !SANITIZER_ANDROID)
+# define HAVE_TIRPC_RPC_XDR_H 0
+#endif
+
+/// \macro MSC_PREREQ
+/// \brief Is the compiler MSVC of at least the specified version?
+/// The common \param version values to check for are:
+/// * 1800: Microsoft Visual Studio 2013 / 12.0
+/// * 1900: Microsoft Visual Studio 2015 / 14.0
+#ifdef _MSC_VER
+# define MSC_PREREQ(version) (_MSC_VER >= (version))
+#else
+# define MSC_PREREQ(version) 0
+#endif
#endif // SANITIZER_PLATFORM_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
index f37d84b041d..040e030e088 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -27,6 +27,12 @@
# define SI_LINUX_NOT_ANDROID 0
#endif
+#if SANITIZER_FREEBSD
+# define SI_FREEBSD 1
+#else
+# define SI_FREEBSD 0
+#endif
+
#if SANITIZER_LINUX
# define SI_LINUX 1
#else
@@ -45,14 +51,21 @@
# define SI_IOS 0
#endif
-# define SANITIZER_INTERCEPT_STRCMP 1
-# define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRCMP 1
+#define SANITIZER_INTERCEPT_STRSTR 1
+#define SANITIZER_INTERCEPT_STRCASESTR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRSPN 1
+#define SANITIZER_INTERCEPT_STRPBRK 1
+#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MEMCMP 1
+#define SANITIZER_INTERCEPT_MEMCHR 1
+#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
-# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
@@ -60,114 +73,192 @@
#define SANITIZER_INTERCEPT_READV SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WRITEV SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_PREADV SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PREADV SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PRCTL SI_LINUX
-
-# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
-
-# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
-
-# define SANITIZER_INTERCEPT_FREXP 1
-# define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
-
-# define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
- SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
-# define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
-# define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX
-# define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
-# define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
-# define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX
-# define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX
-# define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_WORDEXP SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SIGSETOPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
-# define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_STATFS64 \
- (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_SHMCTL \
- (SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
-# define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
+#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
+
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
+
+#define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
+
+#ifndef SANITIZER_INTERCEPT_PRINTF
+# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PRINTF_L SI_FREEBSD
+# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
+#endif
+
+#define SANITIZER_INTERCEPT_FREXP 1
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
+
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_FREEBSD || SI_LINUX
+#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_FREEBSD || SI_LINUX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
+#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__))
+#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_WCSNRTOMBS \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCRTOMB \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_CONFSTR \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WORDEXP \
+ SI_FREEBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGSETOPS \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_BACKTRACE SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
+#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS64 \
+ (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER_HOST \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ETHER_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SHMCTL \
+ ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64)
+#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
SI_MAC || SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_SINCOS SI_LINUX
-# define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
-# define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
+#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_LGAMMA_R SI_FREEBSD || SI_LINUX
+#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_RAND_R \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ICONV SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
// FIXME: getline seems to be available on OSX 10.7
-# define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETLINE SI_FREEBSD || SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD || SI_MAC
+
+#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
+#define SANITIZER_INTERCEPT_GETIFADDRS \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_AEABI_MEM SI_LINUX && defined(__arm__)
+#define SANITIZER_INTERCEPT___BZERO SI_MAC
+#define SANITIZER_INTERCEPT_FTIME !SI_FREEBSD && SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FOPEN SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
-# define SANITIZER_INTERCEPT__EXIT SI_LINUX
+#define SANITIZER_INTERCEPT_MLOCKX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SEM SI_LINUX || SI_FREEBSD
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MINCORE SI_LINUX
+#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
-# define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_COND SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
index ac56f33c843..a1f04325033 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
@@ -27,7 +27,7 @@
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <asm/posix_types.h>
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t
@@ -49,21 +49,19 @@
#include <linux/aio_abi.h>
-#if SANITIZER_ANDROID
-#include <asm/statfs.h>
-#else
-#include <sys/statfs.h>
-#endif
-
#if !SANITIZER_ANDROID
+#include <sys/statfs.h>
#include <linux/perf_event.h>
#endif
namespace __sanitizer {
+#if !SANITIZER_ANDROID
unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif
} // namespace __sanitizer
-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__sparc__)
+#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
+ && !defined(__mips__) && !defined(__sparc__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
index 196eb3b3c64..4793bc18625 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -10,27 +10,35 @@
// Sizes and layouts of platform-specific POSIX data structures.
//===----------------------------------------------------------------------===//
-
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
-
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_posix.h"
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
+// Tests in this file assume that off_t-dependent data structures match the
+// libc ABI. For example, struct dirent here is what readdir() function (as
+// exported from libc) returns, and not the user-facing "dirent", which
+// depends on _FILE_OFFSET_BITS setting.
+// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
+#ifdef _FILE_OFFSET_BITS
+#undef _FILE_OFFSET_BITS
+#endif
+#if SANITIZER_FREEBSD
+#define _WANT_RTENTRY
+#include <sys/param.h>
+#include <sys/socketvar.h>
+#endif
#include <arpa/inet.h>
#include <dirent.h>
#include <errno.h>
#include <grp.h>
#include <limits.h>
#include <net/if.h>
-#include <net/if_arp.h>
-#include <net/route.h>
#include <netdb.h>
#include <poll.h>
#include <pthread.h>
#include <pwd.h>
#include <signal.h>
#include <stddef.h>
+#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/stat.h>
@@ -42,12 +50,19 @@
#include <time.h>
#include <wchar.h>
+#if !SANITIZER_IOS
+#include <net/route.h>
+#endif
+
+#if !SANITIZER_ANDROID
+#include <sys/mount.h>
+#include <sys/timeb.h>
+#endif
+
#if SANITIZER_LINUX
+#include <malloc.h>
#include <mntent.h>
#include <netinet/ether.h>
-#include <utime.h>
-#include <sys/mount.h>
-#include <sys/ptrace.h>
#include <sys/sysinfo.h>
#include <sys/vt.h>
#include <linux/cdrom.h>
@@ -60,26 +75,85 @@
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include <linux/posix_types.h>
+#include <net/if_arp.h>
+#endif
+
+#if SANITIZER_FREEBSD
+# include <sys/mount.h>
+# include <sys/sockio.h>
+# include <sys/socket.h>
+# include <sys/filio.h>
+# include <sys/signal.h>
+# include <sys/timespec.h>
+# include <sys/timex.h>
+# include <sys/mqueue.h>
+# include <sys/msg.h>
+# include <sys/ipc.h>
+# include <sys/msg.h>
+# include <sys/statvfs.h>
+# include <sys/soundcard.h>
+# include <sys/mtio.h>
+# include <sys/consio.h>
+# include <sys/kbio.h>
+# include <sys/link_elf.h>
+# include <netinet/ip_mroute.h>
+# include <netinet/in.h>
+# include <net/ethernet.h>
+# include <net/ppp_defs.h>
+# include <glob.h>
+# include <term.h>
+
+#define _KERNEL // to declare 'shminfo' structure
+# include <sys/shm.h>
+#undef _KERNEL
+
+#undef INLINE // to avoid clashes with sanitizers' definitions
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_IOS
+#undef IOC_DIRMASK
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+# include <utime.h>
+# include <sys/ptrace.h>
+# if defined(__mips64) || defined(__aarch64__) || defined(__arm__)
+# include <asm/ptrace.h>
+# ifdef __arm__
+typedef struct user_fpregs elf_fpregset_t;
+# endif
+# endif
+# include <semaphore.h>
#endif
#if !SANITIZER_ANDROID
+#include <ifaddrs.h>
#include <sys/ucontext.h>
#include <wordexp.h>
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
#include <glob.h>
+#include <obstack.h>
#include <mqueue.h>
#include <net/if_ppp.h>
#include <netax25/ax25.h>
#include <netipx/ipx.h>
#include <netrom/netrom.h>
+#if HAVE_RPC_XDR_H
+# include <rpc/xdr.h>
+#elif HAVE_TIRPC_RPC_XDR_H
+# include <tirpc/rpc/xdr.h>
+#endif
#include <scsi/scsi.h>
#include <sys/mtio.h>
#include <sys/kd.h>
#include <sys/shm.h>
#include <sys/statvfs.h>
#include <sys/timex.h>
+#if defined(__mips64)
+# include <sys/procfs.h>
+#endif
#include <sys/user.h>
#include <sys/ustat.h>
#include <linux/cyclades.h>
@@ -92,7 +166,6 @@
#include <linux/serial.h>
#include <sys/msg.h>
#include <sys/ipc.h>
-#include <sys/shm.h>
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_ANDROID
@@ -112,16 +185,19 @@
#if SANITIZER_MAC
#include <net/ethernet.h>
#include <sys/filio.h>
-#include <sys/mount.h>
#include <sys/sockio.h>
#endif
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
-#if !SANITIZER_IOS
+#if !SANITIZER_IOS && !SANITIZER_FREEBSD
unsigned struct_stat64_sz = sizeof(struct stat64);
-#endif // !SANITIZER_IOS
+#endif // !SANITIZER_IOS && !SANITIZER_FREEBSD
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@@ -134,46 +210,55 @@ namespace __sanitizer {
unsigned pid_t_sz = sizeof(pid_t);
unsigned timeval_sz = sizeof(timeval);
unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
unsigned mbstate_t_sz = sizeof(mbstate_t);
unsigned sigset_t_sz = sizeof(sigset_t);
unsigned struct_timezone_sz = sizeof(struct timezone);
unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
- unsigned struct_statfs_sz = sizeof(struct statfs);
+
#if SANITIZER_MAC && !SANITIZER_IOS
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // SANITIZER_MAC && !SANITIZER_IOS
#if !SANITIZER_ANDROID
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
#endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
- unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
- unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned __user_cap_header_struct_sz =
sizeof(struct __user_cap_header_struct);
unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
- unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
- unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
- unsigned struct_ustat_sz = sizeof(struct ustat);
#endif // SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+ unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+ unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_ustat_sz = sizeof(struct ustat);
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
+ unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
unsigned struct_timex_sz = sizeof(struct timex);
unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
unsigned struct_statvfs_sz = sizeof(struct statvfs);
- unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
uptr sig_ign = (uptr)SIG_IGN;
uptr sig_dfl = (uptr)SIG_DFL;
@@ -184,15 +269,17 @@ namespace __sanitizer {
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
unsigned struct_shminfo_sz = sizeof(struct shminfo);
unsigned struct_shm_info_sz = sizeof(struct shm_info);
int shmctl_ipc_stat = (int)IPC_STAT;
int shmctl_ipc_info = (int)IPC_INFO;
int shmctl_shm_info = (int)SHM_INFO;
- int shmctl_shm_stat = (int)SHM_INFO;
+ int shmctl_shm_stat = (int)SHM_STAT;
#endif
+ int map_fixed = MAP_FIXED;
+
int af_inet = (int)AF_INET;
int af_inet6 = (int)AF_INET6;
@@ -205,50 +292,103 @@ namespace __sanitizer {
return 0;
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_LINUX
+unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
+#elif SANITIZER_FREEBSD
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+#endif
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__))
+#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
+#elif defined(__aarch64__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
+#else
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
-#ifdef __x86_64
+#endif // __mips64 || __powerpc64__ || __aarch64__
+#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
+ defined(__aarch64__) || defined(__arm__)
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
+#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
+#ifdef __arm__
+ unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
+#else
+ unsigned struct_user_vfpregs_struct_sz = 0;
#endif
int ptrace_peektext = PTRACE_PEEKTEXT;
int ptrace_peekdata = PTRACE_PEEKDATA;
int ptrace_peekuser = PTRACE_PEEKUSER;
+#if (defined(PTRACE_GETREGS) && defined(PTRACE_SETREGS)) || \
+ (defined(PT_GETREGS) && defined(PT_SETREGS))
int ptrace_getregs = PTRACE_GETREGS;
int ptrace_setregs = PTRACE_SETREGS;
+#else
+ int ptrace_getregs = -1;
+ int ptrace_setregs = -1;
+#endif
+#if (defined(PTRACE_GETFPREGS) && defined(PTRACE_SETFPREGS)) || \
+ (defined(PT_GETFPREGS) && defined(PT_SETFPREGS))
int ptrace_getfpregs = PTRACE_GETFPREGS;
int ptrace_setfpregs = PTRACE_SETFPREGS;
+#else
+ int ptrace_getfpregs = -1;
+ int ptrace_setfpregs = -1;
+#endif
+#if (defined(PTRACE_GETFPXREGS) && defined(PTRACE_SETFPXREGS)) || \
+ (defined(PT_GETFPXREGS) && defined(PT_SETFPXREGS))
int ptrace_getfpxregs = PTRACE_GETFPXREGS;
int ptrace_setfpxregs = PTRACE_SETFPXREGS;
+#else
+ int ptrace_getfpxregs = -1;
+ int ptrace_setfpxregs = -1;
+#endif // PTRACE_GETFPXREGS/PTRACE_SETFPXREGS
+#if defined(PTRACE_GETVFPREGS) && defined(PTRACE_SETVFPREGS)
+ int ptrace_getvfpregs = PTRACE_GETVFPREGS;
+ int ptrace_setvfpregs = PTRACE_SETVFPREGS;
+#else
+ int ptrace_getvfpregs = -1;
+ int ptrace_setvfpregs = -1;
+#endif
+ int ptrace_geteventmsg = PTRACE_GETEVENTMSG;
+#if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \
+ (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO))
int ptrace_getsiginfo = PTRACE_GETSIGINFO;
int ptrace_setsiginfo = PTRACE_SETSIGINFO;
+#else
+ int ptrace_getsiginfo = -1;
+ int ptrace_setsiginfo = -1;
+#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO
#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
int ptrace_getregset = PTRACE_GETREGSET;
int ptrace_setregset = PTRACE_SETREGSET;
#else
int ptrace_getregset = -1;
int ptrace_setregset = -1;
-#endif
+#endif // PTRACE_GETREGSET/PTRACE_SETREGSET
#endif
unsigned path_max = PATH_MAX;
// ioctl arguments
- unsigned struct_arpreq_sz = sizeof(struct arpreq);
unsigned struct_ifreq_sz = sizeof(struct ifreq);
unsigned struct_termios_sz = sizeof(struct termios);
unsigned struct_winsize_sz = sizeof(struct winsize);
#if SANITIZER_LINUX
+ unsigned struct_arpreq_sz = sizeof(struct arpreq);
unsigned struct_cdrom_msf_sz = sizeof(struct cdrom_msf);
unsigned struct_cdrom_multisession_sz = sizeof(struct cdrom_multisession);
unsigned struct_cdrom_read_audio_sz = sizeof(struct cdrom_read_audio);
@@ -257,15 +397,6 @@ namespace __sanitizer {
unsigned struct_cdrom_tocentry_sz = sizeof(struct cdrom_tocentry);
unsigned struct_cdrom_tochdr_sz = sizeof(struct cdrom_tochdr);
unsigned struct_cdrom_volctrl_sz = sizeof(struct cdrom_volctrl);
-#if SOUND_VERSION >= 0x040000
- unsigned struct_copr_buffer_sz = 0;
- unsigned struct_copr_debug_buf_sz = 0;
- unsigned struct_copr_msg_sz = 0;
-#else
- unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
- unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
- unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
-#endif
unsigned struct_ff_effect_sz = sizeof(struct ff_effect);
unsigned struct_floppy_drive_params_sz = sizeof(struct floppy_drive_params);
unsigned struct_floppy_drive_struct_sz = sizeof(struct floppy_drive_struct);
@@ -279,23 +410,34 @@ namespace __sanitizer {
unsigned struct_hd_geometry_sz = sizeof(struct hd_geometry);
unsigned struct_input_absinfo_sz = sizeof(struct input_absinfo);
unsigned struct_input_id_sz = sizeof(struct input_id);
+ unsigned struct_mtpos_sz = sizeof(struct mtpos);
+ unsigned struct_termio_sz = sizeof(struct termio);
+ unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
+ unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
+ unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+#if SOUND_VERSION >= 0x040000
+ unsigned struct_copr_buffer_sz = 0;
+ unsigned struct_copr_debug_buf_sz = 0;
+ unsigned struct_copr_msg_sz = 0;
+#else
+ unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
+ unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
+ unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
+#endif
unsigned struct_midi_info_sz = sizeof(struct midi_info);
unsigned struct_mtget_sz = sizeof(struct mtget);
unsigned struct_mtop_sz = sizeof(struct mtop);
- unsigned struct_mtpos_sz = sizeof(struct mtpos);
unsigned struct_rtentry_sz = sizeof(struct rtentry);
unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);
unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);
unsigned struct_synth_info_sz = sizeof(struct synth_info);
- unsigned struct_termio_sz = sizeof(struct termio);
- unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
- unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
- unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
#if EV_VERSION > (0x010000)
@@ -310,7 +452,6 @@ namespace __sanitizer {
unsigned struct_kbsentry_sz = sizeof(struct kbsentry);
unsigned struct_mtconfiginfo_sz = sizeof(struct mtconfiginfo);
unsigned struct_nr_parms_struct_sz = sizeof(struct nr_parms_struct);
- unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
unsigned struct_scc_modem_sz = sizeof(struct scc_modem);
unsigned struct_scc_stat_sz = sizeof(struct scc_stat);
unsigned struct_serial_multiport_struct_sz
@@ -319,14 +460,19 @@ namespace __sanitizer {
unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+ unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
- unsigned IOCTL_NOT_PRESENT = 0;
+ const unsigned IOCTL_NOT_PRESENT = 0;
unsigned IOCTL_FIOASYNC = FIOASYNC;
unsigned IOCTL_FIOCLEX = FIOCLEX;
@@ -372,10 +518,11 @@ namespace __sanitizer {
unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
unsigned IOCTL_TIOCSTI = TIOCSTI;
unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#if ((SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID)
unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
#endif
+
#if SANITIZER_LINUX
unsigned IOCTL_EVIOCGABS = EVIOCGABS(0);
unsigned IOCTL_EVIOCGBIT = EVIOCGBIT(0, 0);
@@ -466,9 +613,7 @@ namespace __sanitizer {
unsigned IOCTL_HDIO_SET_MULTCOUNT = HDIO_SET_MULTCOUNT;
unsigned IOCTL_HDIO_SET_NOWERR = HDIO_SET_NOWERR;
unsigned IOCTL_HDIO_SET_UNMASKINTR = HDIO_SET_UNMASKINTR;
- unsigned IOCTL_MTIOCGET = MTIOCGET;
unsigned IOCTL_MTIOCPOS = MTIOCPOS;
- unsigned IOCTL_MTIOCTOP = MTIOCTOP;
unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;
unsigned IOCTL_PPPIOCGDEBUG = PPPIOCGDEBUG;
unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;
@@ -501,7 +646,7 @@ namespace __sanitizer {
unsigned IOCTL_SIOCSIFMEM = SIOCSIFMEM;
unsigned IOCTL_SIOCSIFSLAVE = SIOCSIFSLAVE;
unsigned IOCTL_SIOCSRARP = SIOCSRARP;
-#if SOUND_VERSION >= 0x040000
+# if SOUND_VERSION >= 0x040000
unsigned IOCTL_SNDCTL_COPR_HALT = IOCTL_NOT_PRESENT;
unsigned IOCTL_SNDCTL_COPR_LOAD = IOCTL_NOT_PRESENT;
unsigned IOCTL_SNDCTL_COPR_RCODE = IOCTL_NOT_PRESENT;
@@ -518,7 +663,7 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_PCM_READ_RATE = IOCTL_NOT_PRESENT;
unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = IOCTL_NOT_PRESENT;
unsigned IOCTL_SOUND_PCM_WRITE_FILTER = IOCTL_NOT_PRESENT;
-#else
+# else // SOUND_VERSION
unsigned IOCTL_SNDCTL_COPR_HALT = SNDCTL_COPR_HALT;
unsigned IOCTL_SNDCTL_COPR_LOAD = SNDCTL_COPR_LOAD;
unsigned IOCTL_SNDCTL_COPR_RCODE = SNDCTL_COPR_RCODE;
@@ -535,7 +680,39 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;
unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS;
unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
-#endif
+#endif // SOUND_VERSION
+ unsigned IOCTL_TCFLSH = TCFLSH;
+ unsigned IOCTL_TCGETA = TCGETA;
+ unsigned IOCTL_TCGETS = TCGETS;
+ unsigned IOCTL_TCSBRK = TCSBRK;
+ unsigned IOCTL_TCSBRKP = TCSBRKP;
+ unsigned IOCTL_TCSETA = TCSETA;
+ unsigned IOCTL_TCSETAF = TCSETAF;
+ unsigned IOCTL_TCSETAW = TCSETAW;
+ unsigned IOCTL_TCSETS = TCSETS;
+ unsigned IOCTL_TCSETSF = TCSETSF;
+ unsigned IOCTL_TCSETSW = TCSETSW;
+ unsigned IOCTL_TCXONC = TCXONC;
+ unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
+ unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
+ unsigned IOCTL_TIOCINQ = TIOCINQ;
+ unsigned IOCTL_TIOCLINUX = TIOCLINUX;
+ unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
+ unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
+ unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
+ unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
+ unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
+ unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
+ unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
+ unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
+ unsigned IOCTL_VT_RESIZE = VT_RESIZE;
+ unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
+ unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
@@ -620,40 +797,14 @@ namespace __sanitizer {
unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;
unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;
unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;
- unsigned IOCTL_TCFLSH = TCFLSH;
- unsigned IOCTL_TCGETA = TCGETA;
- unsigned IOCTL_TCGETS = TCGETS;
- unsigned IOCTL_TCSBRK = TCSBRK;
- unsigned IOCTL_TCSBRKP = TCSBRKP;
- unsigned IOCTL_TCSETA = TCSETA;
- unsigned IOCTL_TCSETAF = TCSETAF;
- unsigned IOCTL_TCSETAW = TCSETAW;
- unsigned IOCTL_TCSETS = TCSETS;
- unsigned IOCTL_TCSETSF = TCSETSF;
- unsigned IOCTL_TCSETSW = TCSETSW;
- unsigned IOCTL_TCXONC = TCXONC;
- unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
- unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
- unsigned IOCTL_TIOCINQ = TIOCINQ;
- unsigned IOCTL_TIOCLINUX = TIOCLINUX;
- unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
- unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
- unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
- unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
- unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
- unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
- unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
unsigned IOCTL_VT_GETMODE = VT_GETMODE;
- unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
unsigned IOCTL_VT_RELDISP = VT_RELDISP;
- unsigned IOCTL_VT_RESIZE = VT_RESIZE;
- unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
- unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
unsigned IOCTL_VT_SETMODE = VT_SETMODE;
unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
@@ -685,37 +836,25 @@ namespace __sanitizer {
unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
unsigned IOCTL_GIO_CMAP = GIO_CMAP;
unsigned IOCTL_GIO_FONT = GIO_FONT;
- unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
unsigned IOCTL_GIO_UNISCRNMAP = GIO_UNISCRNMAP;
unsigned IOCTL_KDADDIO = KDADDIO;
unsigned IOCTL_KDDELIO = KDDELIO;
- unsigned IOCTL_KDDISABIO = KDDISABIO;
- unsigned IOCTL_KDENABIO = KDENABIO;
unsigned IOCTL_KDGETKEYCODE = KDGETKEYCODE;
- unsigned IOCTL_KDGETLED = KDGETLED;
- unsigned IOCTL_KDGETMODE = KDGETMODE;
unsigned IOCTL_KDGKBDIACR = KDGKBDIACR;
unsigned IOCTL_KDGKBENT = KDGKBENT;
unsigned IOCTL_KDGKBLED = KDGKBLED;
unsigned IOCTL_KDGKBMETA = KDGKBMETA;
- unsigned IOCTL_KDGKBMODE = KDGKBMODE;
unsigned IOCTL_KDGKBSENT = KDGKBSENT;
- unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
unsigned IOCTL_KDMAPDISP = KDMAPDISP;
- unsigned IOCTL_KDMKTONE = KDMKTONE;
unsigned IOCTL_KDSETKEYCODE = KDSETKEYCODE;
- unsigned IOCTL_KDSETLED = KDSETLED;
- unsigned IOCTL_KDSETMODE = KDSETMODE;
unsigned IOCTL_KDSIGACCEPT = KDSIGACCEPT;
unsigned IOCTL_KDSKBDIACR = KDSKBDIACR;
unsigned IOCTL_KDSKBENT = KDSKBENT;
unsigned IOCTL_KDSKBLED = KDSKBLED;
unsigned IOCTL_KDSKBMETA = KDSKBMETA;
- unsigned IOCTL_KDSKBMODE = KDSKBMODE;
unsigned IOCTL_KDSKBSENT = KDSKBSENT;
unsigned IOCTL_KDUNMAPDISP = KDUNMAPDISP;
- unsigned IOCTL_KIOCSOUND = KIOCSOUND;
unsigned IOCTL_LPABORT = LPABORT;
unsigned IOCTL_LPABORTOPEN = LPABORTOPEN;
unsigned IOCTL_LPCAREFUL = LPCAREFUL;
@@ -730,7 +869,6 @@ namespace __sanitizer {
unsigned IOCTL_MTIOCSETCONFIG = MTIOCSETCONFIG;
unsigned IOCTL_PIO_CMAP = PIO_CMAP;
unsigned IOCTL_PIO_FONT = PIO_FONT;
- unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
@@ -752,21 +890,41 @@ namespace __sanitizer {
unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
- unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
- unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
+ unsigned IOCTL_KDDISABIO = KDDISABIO;
+ unsigned IOCTL_KDENABIO = KDENABIO;
+ unsigned IOCTL_KDGETLED = KDGETLED;
+ unsigned IOCTL_KDGETMODE = KDGETMODE;
+ unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+ unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+ unsigned IOCTL_KDMKTONE = KDMKTONE;
+ unsigned IOCTL_KDSETLED = KDSETLED;
+ unsigned IOCTL_KDSETMODE = KDSETMODE;
+ unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+ unsigned IOCTL_KIOCSOUND = KIOCSOUND;
+ unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
+ unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+ unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+ const int errno_EINVAL = EINVAL;
// EOWNERDEAD is not present in some older platforms.
#if defined(EOWNERDEAD)
- extern const int errno_EOWNERDEAD = EOWNERDEAD;
+ const int errno_EOWNERDEAD = EOWNERDEAD;
#else
- extern const int errno_EOWNERDEAD = -1;
+ const int errno_EOWNERDEAD = -1;
#endif
-} // namespace __sanitizer
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
@@ -774,6 +932,31 @@ COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
CHECK_TYPE_SIZE(pthread_key_t);
#if SANITIZER_LINUX
+// FIXME: We define those on Linux and Mac, but only check on Linux.
+COMPILER_CHECK(IOC_NRBITS == _IOC_NRBITS);
+COMPILER_CHECK(IOC_TYPEBITS == _IOC_TYPEBITS);
+COMPILER_CHECK(IOC_SIZEBITS == _IOC_SIZEBITS);
+COMPILER_CHECK(IOC_DIRBITS == _IOC_DIRBITS);
+COMPILER_CHECK(IOC_NRMASK == _IOC_NRMASK);
+COMPILER_CHECK(IOC_TYPEMASK == _IOC_TYPEMASK);
+COMPILER_CHECK(IOC_SIZEMASK == _IOC_SIZEMASK);
+COMPILER_CHECK(IOC_DIRMASK == _IOC_DIRMASK);
+COMPILER_CHECK(IOC_NRSHIFT == _IOC_NRSHIFT);
+COMPILER_CHECK(IOC_TYPESHIFT == _IOC_TYPESHIFT);
+COMPILER_CHECK(IOC_SIZESHIFT == _IOC_SIZESHIFT);
+COMPILER_CHECK(IOC_DIRSHIFT == _IOC_DIRSHIFT);
+COMPILER_CHECK(IOC_NONE == _IOC_NONE);
+COMPILER_CHECK(IOC_WRITE == _IOC_WRITE);
+COMPILER_CHECK(IOC_READ == _IOC_READ);
+COMPILER_CHECK(EVIOC_ABS_MAX == ABS_MAX);
+COMPILER_CHECK(EVIOC_EV_MAX == EV_MAX);
+COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
+COMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678));
+COMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678));
+COMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678));
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
// There are more undocumented fields in dl_phdr_info that we are not interested
// in.
COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
@@ -781,11 +964,9 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
-COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(glob_t);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
@@ -837,6 +1018,8 @@ COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
CHECK_SIZE_AND_OFFSET(dirent, d_ino);
#if SANITIZER_MAC
CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
+#elif SANITIZER_FREEBSD
+// There is no 'd_off' field on FreeBSD.
#else
CHECK_SIZE_AND_OFFSET(dirent, d_off);
#endif
@@ -921,15 +1104,26 @@ CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
CHECK_TYPE_SIZE(ether_addr);
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(ipc_perm);
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+# else
CHECK_SIZE_AND_OFFSET(ipc_perm, __key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
+# endif
CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+#if !defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21)
+/* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
-CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
+#endif
CHECK_TYPE_SIZE(shmid_ds);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
@@ -944,4 +1138,124 @@ CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
CHECK_TYPE_SIZE(clock_t);
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(clockid_t);
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+# else
+COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) ==
+ sizeof(((ifaddrs *)nullptr)->ifa_ifu));
+COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==
+ offsetof(ifaddrs, ifa_ifu));
+# endif // SANITIZER_FREEBSD
+#else
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+#endif // SANITIZER_LINUX
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+#endif
+
+#if SANITIZER_LINUX
+COMPILER_CHECK(sizeof(__sanitizer_mallinfo) == sizeof(struct mallinfo));
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+#endif
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+#if !SANITIZER_ANDROID
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+#endif
+
+#if SANITIZER_MAC
+CHECK_SIZE_AND_OFFSET(passwd, pw_change);
+CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
+CHECK_SIZE_AND_OFFSET(passwd, pw_class);
+#endif
+
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#if HAVE_RPC_XDR_H || HAVE_TIRPC_RPC_XDR_H
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
+CHECK_SIZE_AND_OFFSET(FILE, _flags);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_backup_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);
+CHECK_SIZE_AND_OFFSET(FILE, _markers);
+CHECK_SIZE_AND_OFFSET(FILE, _chain);
+CHECK_SIZE_AND_OFFSET(FILE, _fileno);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
+CHECK_TYPE_SIZE(obstack);
+CHECK_SIZE_AND_OFFSET(obstack, chunk_size);
+CHECK_SIZE_AND_OFFSET(obstack, chunk);
+CHECK_SIZE_AND_OFFSET(obstack, object_base);
+CHECK_SIZE_AND_OFFSET(obstack, next_free);
+
+CHECK_TYPE_SIZE(cookie_io_functions_t);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+CHECK_TYPE_SIZE(sem_t);
+#endif
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index aec950454b3..fc1fb8071f7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -16,15 +16,22 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD
+// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that
+// incroporates the map structure.
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
+#else
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
+#endif // !SANITIZER_FREEBSD
+
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if !SANITIZER_IOS
+#if !SANITIZER_FREEBSD && !SANITIZER_IOS
extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz;
- extern unsigned struct_passwd_sz;
- extern unsigned struct_group_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
extern unsigned pthread_t_sz;
@@ -32,40 +39,54 @@ namespace __sanitizer {
extern unsigned pid_t_sz;
extern unsigned timeval_sz;
extern unsigned uid_t_sz;
+ extern unsigned gid_t_sz;
extern unsigned mbstate_t_sz;
extern unsigned struct_timezone_sz;
extern unsigned struct_tms_sz;
extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_sched_param_sz;
- extern unsigned struct_statfs_sz;
extern unsigned struct_statfs64_sz;
#if !SANITIZER_ANDROID
+ extern unsigned struct_statfs_sz;
+ extern unsigned struct_sockaddr_sz;
extern unsigned ucontext_t_sz;
#endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
#if defined(__x86_64__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 96;
#elif defined(__arm__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__aarch64__)
+ const unsigned struct_kernel_stat_sz = 128;
+ const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc__) && !defined(__powerpc64__)
- const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 72;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc64__)
+ const unsigned struct_kernel_stat_sz = 144;
+ const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__sparc__) && defined(__arch64__)
const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 104;
+ const unsigned struct_kernel_stat64_sz = 144;
+#elif defined(__sparc__) && !defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 64;
+#elif defined(__mips__)
+ #if SANITIZER_WORDSIZE == 64
+ const unsigned struct_kernel_stat_sz = 216;
+ #else
const unsigned struct_kernel_stat_sz = 144;
+ #endif
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__sparc__) && defined(__arch64__)
const unsigned struct___old_kernel_stat_sz = 0;
@@ -82,23 +103,28 @@ namespace __sanitizer {
// More fields that vary with the kernel version.
};
- extern unsigned struct_utimbuf_sz;
+ extern unsigned struct_epoll_event_sz;
+ extern unsigned struct_sysinfo_sz;
+ extern unsigned __user_cap_header_struct_sz;
+ extern unsigned __user_cap_data_struct_sz;
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
- extern unsigned struct_msqid_ds_sz;
- extern unsigned struct_mq_attr_sz;
- extern unsigned struct_timex_sz;
- extern unsigned struct_ustat_sz;
+
+ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if defined(__powerpc64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+#elif !defined(__sparc__)
+ const unsigned struct___old_kernel_stat_sz = 32;
+#endif
extern unsigned struct_rlimit_sz;
- extern unsigned struct_epoll_event_sz;
- extern unsigned struct_sysinfo_sz;
+ extern unsigned struct_utimbuf_sz;
extern unsigned struct_timespec_sz;
- extern unsigned __user_cap_header_struct_sz;
- extern unsigned __user_cap_data_struct_sz;
- const unsigned old_sigset_t_sz = sizeof(unsigned long);
- const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
struct __sanitizer_iocb {
u64 aio_data;
@@ -135,11 +161,35 @@ namespace __sanitizer {
uptr newlen;
unsigned long ___unused[4];
};
-#endif // SANITIZER_LINUX
+
+ const unsigned old_sigset_t_sz = sizeof(unsigned long);
+
+ struct __sanitizer_sem_t {
+#if SANITIZER_ANDROID && defined(_LP64)
+ int data[4];
+#elif SANITIZER_ANDROID && !defined(_LP64)
+ int data;
+#elif SANITIZER_LINUX
+ uptr data[4];
+#elif SANITIZER_FREEBSD
+ u32 data[4];
+#endif
+ };
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if SANITIZER_ANDROID
+ struct __sanitizer_mallinfo {
+ uptr v[10];
+ };
+#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_mallinfo {
+ int v[10];
+ };
+
+ extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
- extern unsigned struct_statvfs_sz;
extern unsigned struct_statvfs64_sz;
struct __sanitizer_ipc_perm {
@@ -153,6 +203,12 @@ namespace __sanitizer {
unsigned __seq;
u64 __unused1;
u64 __unused2;
+#elif defined(__mips__) || defined(__aarch64__)
+ unsigned int mode;
+ unsigned short __seq;
+ unsigned short __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
#elif defined(__sparc__)
# if defined(__arch64__)
unsigned mode;
@@ -213,15 +269,15 @@ namespace __sanitizer {
u64 shm_ctime;
#else
uptr shm_atime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused1;
#endif
uptr shm_dtime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused2;
#endif
uptr shm_ctime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused3;
#endif
#endif
@@ -241,19 +297,121 @@ namespace __sanitizer {
#endif
#endif
};
- #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_ipc_perm {
+ unsigned int cuid;
+ unsigned int cgid;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned short mode;
+ unsigned short seq;
+ long key;
+ };
+
+ struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ unsigned int shm_lpid;
+ unsigned int shm_cpid;
+ int shm_nattch;
+ unsigned long shm_atime;
+ unsigned long shm_dtime;
+ unsigned long shm_ctime;
+ };
+#endif
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned struct_msqid_ds_sz;
+ extern unsigned struct_mq_attr_sz;
+ extern unsigned struct_timex_sz;
+ extern unsigned struct_statvfs_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
struct __sanitizer_iovec {
- void *iov_base;
+ void *iov_base;
uptr iov_len;
};
+#if !SANITIZER_ANDROID
+ struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ // This is a union on Linux.
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ };
+#endif // !SANITIZER_ANDROID
+
#if SANITIZER_MAC
typedef unsigned long __sanitizer_pthread_key_t;
#else
typedef unsigned __sanitizer_pthread_key_t;
#endif
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ struct __sanitizer_XDR {
+ int x_op;
+ void *x_ops;
+ uptr x_public;
+ uptr x_private;
+ uptr x_base;
+ unsigned x_handy;
+ };
+
+ const int __sanitizer_XDR_ENCODE = 0;
+ const int __sanitizer_XDR_DECODE = 1;
+ const int __sanitizer_XDR_FREE = 2;
+#endif
+
+ struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+#if SANITIZER_MAC || SANITIZER_FREEBSD
+ long pw_change;
+ char *pw_class;
+#endif
+#if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32))
+ char *pw_gecos;
+#endif
+ char *pw_dir;
+ char *pw_shell;
+#if SANITIZER_MAC || SANITIZER_FREEBSD
+ long pw_expire;
+#endif
+#if SANITIZER_FREEBSD
+ int pw_fields;
+#endif
+ };
+
+ struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+ };
+
+#if defined(__x86_64__) && !defined(_LP64)
+ typedef long long __sanitizer_time_t;
+#else
+ typedef long __sanitizer_time_t;
+#endif
+
+ struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+ };
+
struct __sanitizer_ether_addr {
u8 octet[6];
};
@@ -283,7 +441,7 @@ namespace __sanitizer {
};
#endif
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_MAC || SANITIZER_FREEBSD
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
@@ -322,6 +480,12 @@ namespace __sanitizer {
unsigned short d_reclen;
// more fields that we don't care about
};
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_dirent {
+ unsigned int d_fileno;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
#elif SANITIZER_ANDROID || defined(__x86_64__)
struct __sanitizer_dirent {
unsigned long long d_ino;
@@ -347,14 +511,22 @@ namespace __sanitizer {
};
#endif
-#if defined(__x86_64__) && !defined(_LP64)
+// 'clock_t' is 32 bits wide on x64 FreeBSD
+#if SANITIZER_FREEBSD
+ typedef int __sanitizer_clock_t;
+#elif defined(__x86_64__) && !defined(_LP64)
typedef long long __sanitizer_clock_t;
#else
typedef long __sanitizer_clock_t;
#endif
#if SANITIZER_LINUX
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)
+ typedef int __sanitizer_clockid_t;
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)\
+ || defined(__mips__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
#else
@@ -367,7 +539,7 @@ namespace __sanitizer {
typedef long __sanitizer___kernel_off_t;
#endif
-#if defined(__powerpc__)
+#if defined(__powerpc__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
#else
@@ -398,32 +570,82 @@ namespace __sanitizer {
// The size is determined by looking at sizeof of real sigset_t on linux.
uptr val[128 / sizeof(uptr)];
};
+#elif SANITIZER_FREEBSD
+ struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+ };
#endif
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64)
struct __sanitizer_sigaction {
+ unsigned sa_flags;
union {
- void (*sa_handler)(int sig);
- void (*sa_sigaction)(int sig, void *siginfo, void *uctx);
+ void (*sigaction)(int sig, void *siginfo, void *uctx);
+ void (*handler)(int sig);
};
__sanitizer_sigset_t sa_mask;
+ void (*sa_restorer)();
+ };
+#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)
+ struct __sanitizer_sigaction {
+ union {
+ void (*sigaction)(int sig, void *siginfo, void *uctx);
+ void (*handler)(int sig);
+ };
+ __sanitizer_sigset_t sa_mask;
+ uptr sa_flags;
+ void (*sa_restorer)();
+ };
+#else // !SANITIZER_ANDROID
+ struct __sanitizer_sigaction {
+#if defined(__mips__) && !SANITIZER_FREEBSD
+ unsigned int sa_flags;
+#endif
+ union {
+ void (*sigaction)(int sig, void *siginfo, void *uctx);
+ void (*handler)(int sig);
+ };
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
+ __sanitizer_sigset_t sa_mask;
+#ifndef __mips__
#if defined(__sparc__)
unsigned long sa_flags;
#else
int sa_flags;
#endif
+#endif
+#endif
#if SANITIZER_LINUX
void (*sa_restorer)();
#endif
+#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)
+ int sa_resv[1];
+#endif
};
+#endif // !SANITIZER_ANDROID
+#if SANITIZER_FREEBSD
+ typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+#elif defined(__mips__)
+ struct __sanitizer_kernel_sigset_t {
+ u8 sig[16];
+ };
+#else
struct __sanitizer_kernel_sigset_t {
u8 sig[8];
};
+#endif
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
struct __sanitizer_kernel_sigaction_t {
union {
- void (*sigaction)(int signo, void *info, void *ctx);
void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
};
unsigned long sa_flags;
void (*sa_restorer)(void);
@@ -442,13 +664,15 @@ namespace __sanitizer {
extern int af_inet6;
uptr __sanitizer_in_addr_sz(int af);
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
struct __sanitizer_dl_phdr_info {
uptr dlpi_addr;
const char *dlpi_name;
const void *dlpi_phdr;
short dlpi_phnum;
};
+
+ extern unsigned struct_ElfW_Phdr_sz;
#endif
struct __sanitizer_addrinfo {
@@ -456,7 +680,7 @@ namespace __sanitizer {
int ai_family;
int ai_socktype;
int ai_protocol;
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
unsigned ai_addrlen;
char *ai_canonname;
void *ai_addr;
@@ -482,13 +706,14 @@ namespace __sanitizer {
short revents;
};
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD
typedef unsigned __sanitizer_nfds_t;
#else
typedef unsigned long __sanitizer_nfds_t;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if !SANITIZER_ANDROID
+# if SANITIZER_LINUX
struct __sanitizer_glob_t {
uptr gl_pathc;
char **gl_pathv;
@@ -501,10 +726,27 @@ namespace __sanitizer {
int (*gl_lstat)(const char *, void *);
int (*gl_stat)(const char *, void *);
};
+# elif SANITIZER_FREEBSD
+ struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char*, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char*);
+ int (*gl_lstat)(const char*, void* /* struct stat* */);
+ int (*gl_stat)(const char*, void* /* struct stat* */);
+ };
+# endif // SANITIZER_FREEBSD
+# if SANITIZER_LINUX || SANITIZER_FREEBSD
extern int glob_nomatch;
extern int glob_altdirfunc;
-#endif
+# endif
+#endif // !SANITIZER_ANDROID
extern unsigned path_max;
@@ -512,13 +754,43 @@ namespace __sanitizer {
uptr we_wordc;
char **we_wordv;
uptr we_offs;
+#if SANITIZER_FREEBSD
+ char *we_strings;
+ uptr we_nbytes;
+#endif
};
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_FILE {
+ int _flags;
+ char *_IO_read_ptr;
+ char *_IO_read_end;
+ char *_IO_read_base;
+ char *_IO_write_base;
+ char *_IO_write_ptr;
+ char *_IO_write_end;
+ char *_IO_buf_base;
+ char *_IO_buf_end;
+ char *_IO_save_base;
+ char *_IO_backup_base;
+ char *_IO_save_end;
+ void *_markers;
+ __sanitizer_FILE *_chain;
+ int _fileno;
+ };
+# define SANITIZER_HAS_STRUCT_FILE 1
+#else
+ typedef void __sanitizer_FILE;
+# define SANITIZER_HAS_STRUCT_FILE 0
+#endif
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__))
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
+ extern unsigned struct_user_vfpregs_struct_sz;
extern int ptrace_peektext;
extern int ptrace_peekdata;
@@ -529,13 +801,16 @@ namespace __sanitizer {
extern int ptrace_setfpregs;
extern int ptrace_getfpxregs;
extern int ptrace_setfpxregs;
+ extern int ptrace_getvfpregs;
+ extern int ptrace_setvfpregs;
extern int ptrace_getsiginfo;
extern int ptrace_setsiginfo;
extern int ptrace_getregset;
extern int ptrace_setregset;
+ extern int ptrace_geteventmsg;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
extern unsigned struct_shminfo_sz;
extern unsigned struct_shm_info_sz;
extern int shmctl_ipc_stat;
@@ -544,6 +819,8 @@ namespace __sanitizer {
extern int shmctl_shm_stat;
#endif
+ extern int map_fixed;
+
// ioctl arguments
struct __sanitizer_ifconf {
int ifc_len;
@@ -556,6 +833,68 @@ namespace __sanitizer {
};
#endif
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer__obstack_chunk {
+ char *limit;
+ struct __sanitizer__obstack_chunk *prev;
+};
+
+struct __sanitizer_obstack {
+ long chunk_size;
+ struct __sanitizer__obstack_chunk *chunk;
+ char *object_base;
+ char *next_free;
+ uptr more_fields[7];
+};
+
+typedef uptr (*__sanitizer_cookie_io_read)(void *cookie, char *buf, uptr size);
+typedef uptr (*__sanitizer_cookie_io_write)(void *cookie, const char *buf,
+ uptr size);
+typedef int (*__sanitizer_cookie_io_seek)(void *cookie, u64 *offset,
+ int whence);
+typedef int (*__sanitizer_cookie_io_close)(void *cookie);
+
+struct __sanitizer_cookie_io_functions_t {
+ __sanitizer_cookie_io_read read;
+ __sanitizer_cookie_io_write write;
+ __sanitizer_cookie_io_seek seek;
+ __sanitizer_cookie_io_close close;
+};
+#endif
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || defined(__sparc__)
+#define IOC_SIZEBITS 13
+#define IOC_DIRBITS 3
+#define IOC_NONE 1U
+#define IOC_WRITE 4U
+#define IOC_READ 2U
+#else
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#endif
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#if defined(IOC_DIRMASK)
+#undef IOC_DIRMASK
+#endif
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
#if defined(__sparc__)
// In sparc the 14 bits SIZE field overlaps with the
// least significant bit of DIR, so either IOC_READ or
@@ -564,15 +903,15 @@ namespace __sanitizer {
((((((nr) >> 29) & 0x7) & (4U|2U)) == 0)? \
0 : (((nr) >> 16) & 0x3fff))
#else
-# define IOC_SIZE(nr) (((nr) >> 16) & 0x3fff)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
#endif
- extern unsigned struct_arpreq_sz;
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
extern unsigned struct_winsize_sz;
#if SANITIZER_LINUX
+ extern unsigned struct_arpreq_sz;
extern unsigned struct_cdrom_msf_sz;
extern unsigned struct_cdrom_multisession_sz;
extern unsigned struct_cdrom_read_audio_sz;
@@ -581,9 +920,6 @@ namespace __sanitizer {
extern unsigned struct_cdrom_tocentry_sz;
extern unsigned struct_cdrom_tochdr_sz;
extern unsigned struct_cdrom_volctrl_sz;
- extern unsigned struct_copr_buffer_sz;
- extern unsigned struct_copr_debug_buf_sz;
- extern unsigned struct_copr_msg_sz;
extern unsigned struct_ff_effect_sz;
extern unsigned struct_floppy_drive_params_sz;
extern unsigned struct_floppy_drive_struct_sz;
@@ -597,23 +933,28 @@ namespace __sanitizer {
extern unsigned struct_hd_geometry_sz;
extern unsigned struct_input_absinfo_sz;
extern unsigned struct_input_id_sz;
+ extern unsigned struct_mtpos_sz;
+ extern unsigned struct_termio_sz;
+ extern unsigned struct_vt_consize_sz;
+ extern unsigned struct_vt_sizes_sz;
+ extern unsigned struct_vt_stat_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ extern unsigned struct_copr_buffer_sz;
+ extern unsigned struct_copr_debug_buf_sz;
+ extern unsigned struct_copr_msg_sz;
extern unsigned struct_midi_info_sz;
extern unsigned struct_mtget_sz;
extern unsigned struct_mtop_sz;
- extern unsigned struct_mtpos_sz;
extern unsigned struct_rtentry_sz;
extern unsigned struct_sbi_instrument_sz;
extern unsigned struct_seq_event_rec_sz;
extern unsigned struct_synth_info_sz;
- extern unsigned struct_termio_sz;
- extern unsigned struct_vt_consize_sz;
extern unsigned struct_vt_mode_sz;
- extern unsigned struct_vt_sizes_sz;
- extern unsigned struct_vt_stat_sz;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- extern unsigned struct_audio_buf_info_sz;
extern unsigned struct_ax25_parms_struct_sz;
extern unsigned struct_cyclades_monitor_sz;
extern unsigned struct_input_keymap_entry_sz;
@@ -624,7 +965,6 @@ namespace __sanitizer {
extern unsigned struct_kbsentry_sz;
extern unsigned struct_mtconfiginfo_sz;
extern unsigned struct_nr_parms_struct_sz;
- extern unsigned struct_ppp_stats_sz;
extern unsigned struct_scc_modem_sz;
extern unsigned struct_scc_stat_sz;
extern unsigned struct_serial_multiport_struct_sz;
@@ -632,7 +972,12 @@ namespace __sanitizer {
extern unsigned struct_sockaddr_ax25_sz;
extern unsigned struct_unimapdesc_sz;
extern unsigned struct_unimapinit_sz;
-#endif
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned struct_audio_buf_info_sz;
+ extern unsigned struct_ppp_stats_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
extern unsigned struct_sioc_sg_req_sz;
@@ -643,7 +988,7 @@ namespace __sanitizer {
// A special value to mark ioctls that are not present on the target platform,
// when it can not be determined without including any system headers.
- extern unsigned IOCTL_NOT_PRESENT;
+ extern const unsigned IOCTL_NOT_PRESENT;
extern unsigned IOCTL_FIOASYNC;
extern unsigned IOCTL_FIOCLEX;
@@ -689,7 +1034,7 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSPGRP;
extern unsigned IOCTL_TIOCSTI;
extern unsigned IOCTL_TIOCSWINSZ;
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
extern unsigned IOCTL_SIOCGETSGCNT;
extern unsigned IOCTL_SIOCGETVIFCNT;
#endif
@@ -783,9 +1128,7 @@ namespace __sanitizer {
extern unsigned IOCTL_HDIO_SET_MULTCOUNT;
extern unsigned IOCTL_HDIO_SET_NOWERR;
extern unsigned IOCTL_HDIO_SET_UNMASKINTR;
- extern unsigned IOCTL_MTIOCGET;
extern unsigned IOCTL_MTIOCPOS;
- extern unsigned IOCTL_MTIOCTOP;
extern unsigned IOCTL_PPPIOCGASYNCMAP;
extern unsigned IOCTL_PPPIOCGDEBUG;
extern unsigned IOCTL_PPPIOCGFLAGS;
@@ -797,9 +1140,7 @@ namespace __sanitizer {
extern unsigned IOCTL_PPPIOCSMAXCID;
extern unsigned IOCTL_PPPIOCSMRU;
extern unsigned IOCTL_PPPIOCSXASYNCMAP;
- extern unsigned IOCTL_SIOCADDRT;
extern unsigned IOCTL_SIOCDARP;
- extern unsigned IOCTL_SIOCDELRT;
extern unsigned IOCTL_SIOCDRARP;
extern unsigned IOCTL_SIOCGARP;
extern unsigned IOCTL_SIOCGIFENCAP;
@@ -828,6 +1169,39 @@ namespace __sanitizer {
extern unsigned IOCTL_SNDCTL_COPR_SENDMSG;
extern unsigned IOCTL_SNDCTL_COPR_WCODE;
extern unsigned IOCTL_SNDCTL_COPR_WDATA;
+ extern unsigned IOCTL_TCFLSH;
+ extern unsigned IOCTL_TCGETA;
+ extern unsigned IOCTL_TCGETS;
+ extern unsigned IOCTL_TCSBRK;
+ extern unsigned IOCTL_TCSBRKP;
+ extern unsigned IOCTL_TCSETA;
+ extern unsigned IOCTL_TCSETAF;
+ extern unsigned IOCTL_TCSETAW;
+ extern unsigned IOCTL_TCSETS;
+ extern unsigned IOCTL_TCSETSF;
+ extern unsigned IOCTL_TCSETSW;
+ extern unsigned IOCTL_TCXONC;
+ extern unsigned IOCTL_TIOCGLCKTRMIOS;
+ extern unsigned IOCTL_TIOCGSOFTCAR;
+ extern unsigned IOCTL_TIOCINQ;
+ extern unsigned IOCTL_TIOCLINUX;
+ extern unsigned IOCTL_TIOCSERCONFIG;
+ extern unsigned IOCTL_TIOCSERGETLSR;
+ extern unsigned IOCTL_TIOCSERGWILD;
+ extern unsigned IOCTL_TIOCSERSWILD;
+ extern unsigned IOCTL_TIOCSLCKTRMIOS;
+ extern unsigned IOCTL_TIOCSSOFTCAR;
+ extern unsigned IOCTL_VT_DISALLOCATE;
+ extern unsigned IOCTL_VT_GETSTATE;
+ extern unsigned IOCTL_VT_RESIZE;
+ extern unsigned IOCTL_VT_RESIZEX;
+ extern unsigned IOCTL_VT_SENDSIG;
+#endif // SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+ extern unsigned IOCTL_MTIOCGET;
+ extern unsigned IOCTL_MTIOCTOP;
+ extern unsigned IOCTL_SIOCADDRT;
+ extern unsigned IOCTL_SIOCDELRT;
extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
@@ -918,40 +1292,14 @@ namespace __sanitizer {
extern unsigned IOCTL_SOUND_PCM_READ_RATE;
extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
- extern unsigned IOCTL_TCFLSH;
- extern unsigned IOCTL_TCGETA;
- extern unsigned IOCTL_TCGETS;
- extern unsigned IOCTL_TCSBRK;
- extern unsigned IOCTL_TCSBRKP;
- extern unsigned IOCTL_TCSETA;
- extern unsigned IOCTL_TCSETAF;
- extern unsigned IOCTL_TCSETAW;
- extern unsigned IOCTL_TCSETS;
- extern unsigned IOCTL_TCSETSF;
- extern unsigned IOCTL_TCSETSW;
- extern unsigned IOCTL_TCXONC;
- extern unsigned IOCTL_TIOCGLCKTRMIOS;
- extern unsigned IOCTL_TIOCGSOFTCAR;
- extern unsigned IOCTL_TIOCINQ;
- extern unsigned IOCTL_TIOCLINUX;
- extern unsigned IOCTL_TIOCSERCONFIG;
- extern unsigned IOCTL_TIOCSERGETLSR;
- extern unsigned IOCTL_TIOCSERGWILD;
- extern unsigned IOCTL_TIOCSERSWILD;
- extern unsigned IOCTL_TIOCSLCKTRMIOS;
- extern unsigned IOCTL_TIOCSSOFTCAR;
extern unsigned IOCTL_VT_ACTIVATE;
- extern unsigned IOCTL_VT_DISALLOCATE;
extern unsigned IOCTL_VT_GETMODE;
- extern unsigned IOCTL_VT_GETSTATE;
extern unsigned IOCTL_VT_OPENQRY;
extern unsigned IOCTL_VT_RELDISP;
- extern unsigned IOCTL_VT_RESIZE;
- extern unsigned IOCTL_VT_RESIZEX;
- extern unsigned IOCTL_VT_SENDSIG;
extern unsigned IOCTL_VT_SETMODE;
extern unsigned IOCTL_VT_WAITACTIVE;
-#endif
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned IOCTL_CYGETDEFTHRESH;
extern unsigned IOCTL_CYGETDEFTIMEOUT;
@@ -977,37 +1325,25 @@ namespace __sanitizer {
extern unsigned IOCTL_FS_IOC_SETVERSION;
extern unsigned IOCTL_GIO_CMAP;
extern unsigned IOCTL_GIO_FONT;
- extern unsigned IOCTL_GIO_SCRNMAP;
extern unsigned IOCTL_GIO_UNIMAP;
extern unsigned IOCTL_GIO_UNISCRNMAP;
extern unsigned IOCTL_KDADDIO;
extern unsigned IOCTL_KDDELIO;
- extern unsigned IOCTL_KDDISABIO;
- extern unsigned IOCTL_KDENABIO;
extern unsigned IOCTL_KDGETKEYCODE;
- extern unsigned IOCTL_KDGETLED;
- extern unsigned IOCTL_KDGETMODE;
extern unsigned IOCTL_KDGKBDIACR;
extern unsigned IOCTL_KDGKBENT;
extern unsigned IOCTL_KDGKBLED;
extern unsigned IOCTL_KDGKBMETA;
- extern unsigned IOCTL_KDGKBMODE;
extern unsigned IOCTL_KDGKBSENT;
- extern unsigned IOCTL_KDGKBTYPE;
extern unsigned IOCTL_KDMAPDISP;
- extern unsigned IOCTL_KDMKTONE;
extern unsigned IOCTL_KDSETKEYCODE;
- extern unsigned IOCTL_KDSETLED;
- extern unsigned IOCTL_KDSETMODE;
extern unsigned IOCTL_KDSIGACCEPT;
extern unsigned IOCTL_KDSKBDIACR;
extern unsigned IOCTL_KDSKBENT;
extern unsigned IOCTL_KDSKBLED;
extern unsigned IOCTL_KDSKBMETA;
- extern unsigned IOCTL_KDSKBMODE;
extern unsigned IOCTL_KDSKBSENT;
extern unsigned IOCTL_KDUNMAPDISP;
- extern unsigned IOCTL_KIOCSOUND;
extern unsigned IOCTL_LPABORT;
extern unsigned IOCTL_LPABORTOPEN;
extern unsigned IOCTL_LPCAREFUL;
@@ -1022,7 +1358,6 @@ namespace __sanitizer {
extern unsigned IOCTL_MTIOCSETCONFIG;
extern unsigned IOCTL_PIO_CMAP;
extern unsigned IOCTL_PIO_FONT;
- extern unsigned IOCTL_PIO_SCRNMAP;
extern unsigned IOCTL_PIO_UNIMAP;
extern unsigned IOCTL_PIO_UNIMAPCLR;
extern unsigned IOCTL_PIO_UNISCRNMAP;
@@ -1050,9 +1385,29 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSERGETMULTI;
extern unsigned IOCTL_TIOCSERSETMULTI;
extern unsigned IOCTL_TIOCSSERIAL;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+ extern unsigned IOCTL_GIO_SCRNMAP;
+ extern unsigned IOCTL_KDDISABIO;
+ extern unsigned IOCTL_KDENABIO;
+ extern unsigned IOCTL_KDGETLED;
+ extern unsigned IOCTL_KDGETMODE;
+ extern unsigned IOCTL_KDGKBMODE;
+ extern unsigned IOCTL_KDGKBTYPE;
+ extern unsigned IOCTL_KDMKTONE;
+ extern unsigned IOCTL_KDSETLED;
+ extern unsigned IOCTL_KDSETMODE;
+ extern unsigned IOCTL_KDSKBMODE;
+ extern unsigned IOCTL_KIOCSOUND;
+ extern unsigned IOCTL_PIO_SCRNMAP;
#endif
+ extern const int errno_EINVAL;
extern const int errno_EOWNERDEAD;
+
+ extern const int si_SEGV_MAPERR;
+ extern const int si_SEGV_ACCERR;
} // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cc b/libsanitizer/sanitizer_common/sanitizer_posix.cc
index ef5cb0b03b8..ed44633bc18 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.cc
@@ -7,19 +7,38 @@
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries and implements POSIX-specific functions from
-// sanitizer_libc.h.
+// sanitizer_posix.h.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+
+#if SANITIZER_POSIX
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
+#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
+#include <fcntl.h>
+#include <signal.h>
#include <sys/mman.h>
+#if SANITIZER_LINUX
+#include <sys/utsname.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <sys/personality.h>
+#endif
+
+#if SANITIZER_FREEBSD
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+
namespace __sanitizer {
// ------------- sanitizer_common.h
@@ -27,44 +46,79 @@ uptr GetMmapGranularity() {
return GetPageSize();
}
+#if SANITIZER_WORDSIZE == 32
+// Take care of unusable kernel area in top gigabyte.
+static uptr GetKernelAreaSize() {
+#if SANITIZER_LINUX && !SANITIZER_X32
+ const uptr gbyte = 1UL << 30;
+
+ // Firstly check if there are writable segments
+ // mapped to top gigabyte (e.g. stack).
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr end, prot;
+ while (proc_maps.Next(/*start*/nullptr, &end,
+ /*offset*/nullptr, /*filename*/nullptr,
+ /*filename_size*/0, &prot)) {
+ if ((end >= 3 * gbyte)
+ && (prot & MemoryMappingLayout::kProtectionWrite) != 0)
+ return 0;
+ }
+
+#if !SANITIZER_ANDROID
+ // Even if nothing is mapped, top Gb may still be accessible
+ // if we are running on 64-bit kernel.
+ // Uname may report misleading results if personality type
+ // is modified (e.g. under schroot) so check this as well.
+ struct utsname uname_info;
+ int pers = personality(0xffffffffUL);
+ if (!(pers & PER_MASK)
+ && uname(&uname_info) == 0
+ && internal_strstr(uname_info.machine, "64"))
+ return 0;
+#endif // SANITIZER_ANDROID
+
+ // Top gigabyte is reserved for kernel.
+ return gbyte;
+#else
+ return 0;
+#endif // SANITIZER_LINUX && !SANITIZER_X32
+}
+#endif // SANITIZER_WORDSIZE == 32
+
uptr GetMaxVirtualAddress() {
#if SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__)
+# if defined(__powerpc64__) || defined(__aarch64__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
- // We somehow need to figure our which one we are using now and choose
+ // We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
// of the address space, so simply checking the stack address is not enough.
- return (1ULL << 44) - 1; // 0x00000fffffffffffUL
+ // This should (does) work for both PowerPC64 Endian modes.
+ // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+ return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
+# elif defined(__mips64)
+ return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
# else
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# endif
#else // SANITIZER_WORDSIZE == 32
- // FIXME: We can probably lower this on Android?
- return (1ULL << 32) - 1; // 0xffffffff;
+ uptr res = (1ULL << 32) - 1; // 0xffffffff;
+ if (!common_flags()->full_address_space)
+ res -= GetKernelAreaSize();
+ CHECK_LT(reinterpret_cast<uptr>(&res), res);
+ return res;
#endif // SANITIZER_WORDSIZE
}
void *MmapOrDie(uptr size, const char *mem_type) {
size = RoundUpTo(size, GetPageSizeCached());
- uptr res = internal_mmap(0, size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
+ uptr res = internal_mmap(nullptr, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
int reserrno;
- if (internal_iserror(res, &reserrno)) {
- static int recursion_count;
- if (recursion_count) {
- // The Report() and CHECK calls below may call mmap recursively and fail.
- // If we went into recursion, just die.
- RawWrite("ERROR: Failed to mmap\n");
- Die();
- }
- recursion_count++;
- Report("ERROR: %s failed to allocate 0x%zx (%zd) bytes of %s: %d\n",
- SanitizerToolName, size, size, mem_type, reserrno);
- DumpProcessMap();
- CHECK("unable to mmap" && 0);
- }
+ if (internal_iserror(res, &reserrno))
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
+ IncreaseTotalMmap(size);
return (void *)res;
}
@@ -76,20 +130,20 @@ void UnmapOrDie(void *addr, uptr size) {
SanitizerToolName, size, size, addr);
CHECK("unable to unmap" && 0);
}
+ DecreaseTotalMmap(size);
}
-void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
uptr PageSize = GetPageSizeCached();
- uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
- RoundUpTo(size, PageSize),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
- -1, 0);
+ uptr p = internal_mmap(nullptr,
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ -1, 0);
int reserrno;
if (internal_iserror(p, &reserrno))
- Report("ERROR: "
- "%s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
- SanitizerToolName, size, size, fixed_addr, reserrno);
+ ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
+ IncreaseTotalMmap(size);
return (void *)p;
}
@@ -102,33 +156,84 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
-1, 0);
int reserrno;
if (internal_iserror(p, &reserrno)) {
- Report("ERROR:"
- " %s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
- SanitizerToolName, size, size, fixed_addr, reserrno);
- CHECK("unable to mmap" && 0);
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
}
+ IncreaseTotalMmap(size);
return (void *)p;
}
-void *Mprotect(uptr fixed_addr, uptr size) {
- return (void *)internal_mmap((void*)fixed_addr, size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED |
- MAP_NORESERVE, -1, 0);
+bool MprotectNoAccess(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
+}
+
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
+ int flags;
+ switch (mode) {
+ case RdOnly: flags = O_RDONLY; break;
+ case WrOnly: flags = O_WRONLY | O_CREAT; break;
+ case RdWr: flags = O_RDWR | O_CREAT; break;
+ }
+ fd_t res = internal_open(filename, flags, 0660);
+ if (internal_iserror(res, errno_p))
+ return kInvalidFd;
+ return res;
+}
+
+void CloseFile(fd_t fd) {
+ internal_close(fd);
+}
+
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ uptr res = internal_read(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_read)
+ *bytes_read = res;
+ return true;
+}
+
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ uptr res = internal_write(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_written)
+ *bytes_written = res;
+ return true;
+}
+
+bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
+ uptr res = internal_rename(oldpath, newpath);
+ return !internal_iserror(res, error_p);
}
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
- uptr openrv = OpenFile(file_name, false);
- CHECK(!internal_iserror(openrv));
- fd_t fd = openrv;
+ fd_t fd = OpenFile(file_name, RdOnly);
+ CHECK(fd != kInvalidFd);
uptr fsize = internal_filesize(fd);
CHECK_NE(fsize, (uptr)-1);
CHECK_GT(fsize, 0);
*buff_size = RoundUpTo(fsize, GetPageSizeCached());
- uptr map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
- return internal_iserror(map) ? 0 : (void *)map;
+ uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ return internal_iserror(map) ? nullptr : (void *)map;
}
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
+ uptr flags = MAP_SHARED;
+ if (addr) flags |= MAP_FIXED;
+ uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
+ int mmap_errno = 0;
+ if (internal_iserror(p, &mmap_errno)) {
+ Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
+ fd, (long long)offset, size, p, mmap_errno);
+ return nullptr;
+ }
+ return (void *)p;
+}
static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
uptr start2, uptr end2) {
@@ -145,9 +250,11 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
while (proc_maps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0,
- /*protection*/0)) {
- if (!IntervalsAreSeparate(start, end, range_start, range_end))
+ /*offset*/nullptr, /*filename*/nullptr,
+ /*filename_size*/0, /*protection*/nullptr)) {
+ if (start == end) continue; // Empty range.
+ CHECK_NE(0, end);
+ if (!IntervalsAreSeparate(start, end - 1, range_start, range_end))
return false;
}
return true;
@@ -157,10 +264,10 @@ void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
const sptr kBufSize = 4095;
- char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
+ char *filename = (char*)MmapOrDie(kBufSize, __func__);
Report("Process memory map follows:\n");
- while (proc_maps.Next(&start, &end, /* file_offset */0,
- filename, kBufSize, /* protection */0)) {
+ while (proc_maps.Next(&start, &end, /* file_offset */nullptr,
+ filename, kBufSize, /* protection */nullptr)) {
Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
}
Report("End of process memory map.\n");
@@ -171,69 +278,28 @@ const char *GetPwd() {
return GetEnv("PWD");
}
-char *FindPathToBinary(const char *name) {
- const char *path = GetEnv("PATH");
- if (!path)
- return 0;
- uptr name_len = internal_strlen(name);
- InternalScopedBuffer<char> buffer(kMaxPathLength);
- const char *beg = path;
- while (true) {
- const char *end = internal_strchrnul(beg, ':');
- uptr prefix_len = end - beg;
- if (prefix_len + name_len + 2 <= kMaxPathLength) {
- internal_memcpy(buffer.data(), beg, prefix_len);
- buffer[prefix_len] = '/';
- internal_memcpy(&buffer[prefix_len + 1], name, name_len);
- buffer[prefix_len + 1 + name_len] = '\0';
- if (FileExists(buffer.data()))
- return internal_strdup(buffer.data());
- }
- if (*end == '\0') break;
- beg = end + 1;
- }
- return 0;
+bool IsPathSeparator(const char c) {
+ return c == '/';
}
-void MaybeOpenReportFile() {
- if (!log_to_file) return;
- uptr pid = internal_getpid();
- // If in tracer, use the parent's file.
- if (pid == stoptheworld_tracer_pid)
- pid = stoptheworld_tracer_ppid;
- if (report_fd_pid == pid) return;
- InternalScopedBuffer<char> report_path_full(4096);
- internal_snprintf(report_path_full.data(), report_path_full.size(),
- "%s.%d", report_path_prefix, pid);
- uptr openrv = OpenFile(report_path_full.data(), true);
- if (internal_iserror(openrv)) {
- report_fd = kStderrFd;
- log_to_file = false;
- Report("ERROR: Can't open file: %s\n", report_path_full.data());
- Die();
- }
- if (report_fd != kInvalidFd) {
- // We're in the child. Close the parent's log.
- internal_close(report_fd);
- }
- report_fd = openrv;
- report_fd_pid = pid;
+bool IsAbsolutePath(const char *path) {
+ return path != nullptr && IsPathSeparator(path[0]);
}
-void RawWrite(const char *buffer) {
- static const char *kRawWriteError =
- "RawWrite can't output requested buffer!\n";
- uptr length = (uptr)internal_strlen(buffer);
- MaybeOpenReportFile();
- if (length != internal_write(report_fd, buffer, length)) {
- internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError));
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ static const char *kWriteError =
+ "ReportFile::Write() can't output requested buffer!\n";
+ ReopenIfNecessary();
+ if (length != internal_write(fd, buffer, length)) {
+ internal_write(fd, kWriteError, internal_strlen(kWriteError));
Die();
}
}
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
uptr s, e, off, prot;
- InternalScopedString buff(4096);
+ InternalScopedString buff(kMaxPathLength);
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) {
if ((prot & MemoryMappingLayout::kProtectionExecute) != 0
@@ -246,6 +312,29 @@ bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
return false;
}
-} // namespace __sanitizer
+SignalContext SignalContext::Create(void *siginfo, void *context) {
+ uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
+ uptr pc, sp, bp;
+ GetPcSpBp(context, &pc, &sp, &bp);
+ return SignalContext(context, addr, pc, sp, bp);
+}
+
+// This function check is the built VMA matches the runtime one for
+// architectures with multiple VMA size.
+void CheckVMASize() {
+#ifdef __aarch64__
+ static const unsigned kBuiltVMA = SANITIZER_AARCH64_VMA;
+ unsigned maxRuntimeVMA =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+ if (kBuiltVMA != maxRuntimeVMA) {
+ Printf("WARNING: %s runtime VMA is not the one built for.\n",
+ SanitizerToolName);
+ Printf("\tBuilt VMA: %u bits\n", kBuiltVMA);
+ Printf("\tRuntime VMA: %u bits\n", maxRuntimeVMA);
+ }
+#endif
+}
+
+} // namespace __sanitizer
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.h b/libsanitizer/sanitizer_common/sanitizer_posix.h
new file mode 100644
index 00000000000..8dd259e32ba
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.h
@@ -0,0 +1,81 @@
+//===-- sanitizer_posix.h -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and declares some useful POSIX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_POSIX_H
+#define SANITIZER_POSIX_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers from sanitizer runtime.
+#include "sanitizer_internal_defs.h"
+
+#if !SANITIZER_POSIX
+// Make it hard to accidentally use any of functions declared in this file:
+#error This file should only be included on POSIX
+#endif
+
+namespace __sanitizer {
+
+// I/O
+// Don't use directly, use __sanitizer::OpenFile() instead.
+uptr internal_open(const char *filename, int flags);
+uptr internal_open(const char *filename, int flags, u32 mode);
+uptr internal_close(fd_t fd);
+
+uptr internal_read(fd_t fd, void *buf, uptr count);
+uptr internal_write(fd_t fd, const void *buf, uptr count);
+
+// Memory
+uptr internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, OFF_T offset);
+uptr internal_munmap(void *addr, uptr length);
+int internal_mprotect(void *addr, uptr length, int prot);
+
+// OS
+uptr internal_filesize(fd_t fd); // -1 on error.
+uptr internal_stat(const char *path, void *buf);
+uptr internal_lstat(const char *path, void *buf);
+uptr internal_fstat(fd_t fd, void *buf);
+uptr internal_dup2(int oldfd, int newfd);
+uptr internal_readlink(const char *path, char *buf, uptr bufsize);
+uptr internal_unlink(const char *path);
+uptr internal_rename(const char *oldpath, const char *newpath);
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
+
+uptr internal_ptrace(int request, int pid, void *addr, void *data);
+uptr internal_waitpid(int pid, int *status, int options);
+
+int internal_fork();
+
+// These functions call appropriate pthread_ functions directly, bypassing
+// the interceptor. They are weak and may not be present in some tools.
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_create(void *th, void *attr, void *(*callback)(void *),
+ void *param);
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_join(void *th, void **ret);
+
+#define DEFINE_REAL_PTHREAD_FUNCTIONS \
+ namespace __sanitizer { \
+ int real_pthread_create(void *th, void *attr, void *(*callback)(void *), \
+ void *param) { \
+ return REAL(pthread_create)(th, attr, callback, param); \
+ } \
+ int real_pthread_join(void *th, void **ret) { \
+ return REAL(pthread_join(th, ret)); \
+ } \
+ } // namespace __sanitizer
+
+int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
+
+int internal_sigaction(int signum, const void *act, void *oldact);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
index ae782ac39cb..4b7273b4cc0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -12,19 +12,35 @@
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_POSIX
+
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
#include <errno.h>
+#include <fcntl.h>
#include <pthread.h>
+#include <signal.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/resource.h>
+#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
+#if SANITIZER_FREEBSD
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+
namespace __sanitizer {
u32 GetUid() {
@@ -39,30 +55,61 @@ void FlushUnneededShadowMemory(uptr addr, uptr size) {
madvise((void*)addr, size, MADV_DONTNEED);
}
-void DisableCoreDumper() {
- struct rlimit nocore;
- nocore.rlim_cur = 0;
- nocore.rlim_max = 0;
- setrlimit(RLIMIT_CORE, &nocore);
+void NoHugePagesInRegion(uptr addr, uptr size) {
+#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
+ madvise((void *)addr, size, MADV_NOHUGEPAGE);
+#endif // MADV_NOHUGEPAGE
}
-bool StackSizeIsUnlimited() {
- struct rlimit rlim;
- CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
- return (rlim.rlim_cur == (uptr)-1);
+void DontDumpShadowMemory(uptr addr, uptr length) {
+#ifdef MADV_DONTDUMP
+ madvise((void *)addr, length, MADV_DONTDUMP);
+#endif
}
-void SetStackSizeLimitInBytes(uptr limit) {
- struct rlimit rlim;
- rlim.rlim_cur = limit;
- rlim.rlim_max = limit;
- if (setrlimit(RLIMIT_STACK, &rlim)) {
+static rlim_t getlim(int res) {
+ rlimit rlim;
+ CHECK_EQ(0, getrlimit(res, &rlim));
+ return rlim.rlim_cur;
+}
+
+static void setlim(int res, rlim_t lim) {
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile struct rlimit rlim;
+ rlim.rlim_cur = lim;
+ rlim.rlim_max = lim;
+ if (setrlimit(res, const_cast<struct rlimit *>(&rlim))) {
Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
Die();
}
+}
+
+void DisableCoreDumperIfNecessary() {
+ if (common_flags()->disable_coredump) {
+ setlim(RLIMIT_CORE, 0);
+ }
+}
+
+bool StackSizeIsUnlimited() {
+ rlim_t stack_size = getlim(RLIMIT_STACK);
+ return (stack_size == RLIM_INFINITY);
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ setlim(RLIMIT_STACK, (rlim_t)limit);
CHECK(!StackSizeIsUnlimited());
}
+bool AddressSpaceIsUnlimited() {
+ rlim_t as_size = getlim(RLIMIT_AS);
+ return (as_size == RLIM_INFINITY);
+}
+
+void SetAddressSpaceUnlimited() {
+ setlim(RLIMIT_AS, RLIM_INFINITY);
+ CHECK(AddressSpaceIsUnlimited());
+}
+
void SleepForSeconds(int seconds) {
sleep(seconds);
}
@@ -83,10 +130,193 @@ int Atexit(void (*function)(void)) {
#endif
}
-int internal_isatty(fd_t fd) {
- return isatty(fd);
+bool SupportsColoredOutput(fd_t fd) {
+ return isatty(fd) != 0;
+}
+
+#ifndef SANITIZER_GO
+// TODO(glider): different tools may require different altstack size.
+static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
+
+void SetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ CHECK_EQ(0, sigaltstack(nullptr, &oldstack));
+ // If the alternate stack is already in place, do nothing.
+ // Android always sets an alternate stack, but it's too small for us.
+ if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;
+ // TODO(glider): the mapped stack should have the MAP_STACK flag in the
+ // future. It is not required by man 2 sigaltstack now (they're using
+ // malloc()).
+ void* base = MmapOrDie(kAltStackSize, __func__);
+ altstack.ss_sp = (char*) base;
+ altstack.ss_flags = 0;
+ altstack.ss_size = kAltStackSize;
+ CHECK_EQ(0, sigaltstack(&altstack, nullptr));
+}
+
+void UnsetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ altstack.ss_sp = nullptr;
+ altstack.ss_flags = SS_DISABLE;
+ altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
+ CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
+ UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
+}
+
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+static void MaybeInstallSigaction(int signum,
+ SignalHandlerType handler) {
+ if (!IsDeadlySignal(signum))
+ return;
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)handler;
+ // Do not block the signal from being received in that signal's handler.
+ // Clients are responsible for handling this correctly.
+ sigact.sa_flags = SA_SIGINFO | SA_NODEFER;
+ if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
+ CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr));
+ VReport(1, "Installed the sigaction for signal %d\n", signum);
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ // Set the alternate signal stack for the main thread.
+ // This will cause SetAlternateSignalStack to be called twice, but the stack
+ // will be actually set only once.
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
+ MaybeInstallSigaction(SIGSEGV, handler);
+ MaybeInstallSigaction(SIGBUS, handler);
+ MaybeInstallSigaction(SIGABRT, handler);
+ MaybeInstallSigaction(SIGFPE, handler);
+}
+#endif // SANITIZER_GO
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ uptr page_size = GetPageSizeCached();
+ // Checking too large memory ranges is slow.
+ CHECK_LT(size, page_size * 10);
+ int sock_pair[2];
+ if (pipe(sock_pair))
+ return false;
+ uptr bytes_written =
+ internal_write(sock_pair[1], reinterpret_cast<void *>(beg), size);
+ int write_errno;
+ bool result;
+ if (internal_iserror(bytes_written, &write_errno)) {
+ CHECK_EQ(EFAULT, write_errno);
+ result = false;
+ } else {
+ result = (bytes_written == size);
+ }
+ internal_close(sock_pair[0]);
+ internal_close(sock_pair[1]);
+ return result;
+}
+
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ // Some kinds of sandboxes may forbid filesystem access, so we won't be able
+ // to read the file mappings from /proc/self/maps. Luckily, neither the
+ // process will be able to load additional libraries, so it's fine to use the
+ // cached mappings.
+ MemoryMappingLayout::CacheMemoryMappings();
+ // Same for /proc/self/exe in the symbolizer.
+#if !SANITIZER_GO
+ Symbolizer::GetOrInit()->PrepareForSandboxing();
+ CovPrepareForSandboxing(args);
+#endif
+}
+
+#if SANITIZER_ANDROID || SANITIZER_GO
+int GetNamedMappingFd(const char *name, uptr size) {
+ return -1;
+}
+#else
+int GetNamedMappingFd(const char *name, uptr size) {
+ if (!common_flags()->decorate_proc_maps)
+ return -1;
+ char shmname[200];
+ CHECK(internal_strlen(name) < sizeof(shmname) - 10);
+ internal_snprintf(shmname, sizeof(shmname), "%zu [%s]", internal_getpid(),
+ name);
+ int fd = shm_open(shmname, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU);
+ CHECK_GE(fd, 0);
+ int res = internal_ftruncate(fd, size);
+ CHECK_EQ(0, res);
+ res = shm_unlink(shmname);
+ CHECK_EQ(0, res);
+ return fd;
+}
+#endif
+
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
+ int fd = name ? GetNamedMappingFd(name, size) : -1;
+ unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
+ if (fd == -1) flags |= MAP_ANON;
+
+ uptr PageSize = GetPageSizeCached();
+ uptr p = internal_mmap((void *)(fixed_addr & ~(PageSize - 1)),
+ RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE,
+ flags, fd, 0);
+ int reserrno;
+ if (internal_iserror(p, &reserrno))
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, reserrno);
+ IncreaseTotalMmap(size);
+ return (void *)p;
+}
+
+void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ int fd = name ? GetNamedMappingFd(name, size) : -1;
+ unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
+ if (fd == -1) flags |= MAP_ANON;
+
+ return (void *)internal_mmap((void *)fixed_addr, size, PROT_NONE, flags, fd,
+ 0);
}
-} // namespace __sanitizer
+// This function is defined elsewhere if we intercepted pthread_attr_getstack.
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE int
+real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
+} // extern "C"
+int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
+#if !SANITIZER_GO && !SANITIZER_MAC
+ if (&real_pthread_attr_getstack)
+ return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
+ (size_t *)size);
#endif
+ return pthread_attr_getstack((pthread_attr_t *)attr, addr, (size_t *)size);
+}
+
+#if !SANITIZER_GO
+void AdjustStackSize(void *attr_) {
+ pthread_attr_t *attr = (pthread_attr_t *)attr_;
+ uptr stackaddr = 0;
+ uptr stacksize = 0;
+ my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ // GLibC will return (0 - stacksize) as the stack address in the case when
+ // stacksize is set, but stackaddr is not.
+ bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
+ // We place a lot of tool data into TLS, account for that.
+ const uptr minstacksize = GetTlsSize() + 128*1024;
+ if (stacksize < minstacksize) {
+ if (!stack_set) {
+ if (stacksize != 0) {
+ VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
+ minstacksize);
+ pthread_attr_setstacksize(attr, minstacksize);
+ }
+ } else {
+ Printf("Sanitizer: pre-allocated stack size is insufficient: "
+ "%zu < %zu\n", stacksize, minstacksize);
+ Printf("Sanitizer: pthread_create is likely to fail.\n");
+ }
+ }
+}
+#endif // !SANITIZER_GO
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cc b/libsanitizer/sanitizer_common/sanitizer_printf.cc
index 08951c7e247..6688610bf0f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_printf.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_printf.cc
@@ -12,14 +12,15 @@
// inside it.
//===----------------------------------------------------------------------===//
-
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include <stdio.h>
#include <stdarg.h>
-#if SANITIZER_WINDOWS && !defined(va_copy)
+#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
+ !defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
#endif
@@ -92,11 +93,14 @@ static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
minimal_num_length, pad_with_zero, negative);
}
-static int AppendString(char **buff, const char *buff_end, const char *s) {
- if (s == 0)
+static int AppendString(char **buff, const char *buff_end, int precision,
+ const char *s) {
+ if (!s)
s = "<null>";
int result = 0;
for (; *s; s++) {
+ if (precision >= 0 && result >= precision)
+ break;
result += AppendChar(buff, buff_end, *s);
}
return result;
@@ -104,16 +108,16 @@ static int AppendString(char **buff, const char *buff_end, const char *s) {
static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int result = 0;
- result += AppendString(buff, buff_end, "0x");
+ result += AppendString(buff, buff_end, -1, "0x");
result += AppendUnsigned(buff, buff_end, ptr_value, 16,
- (SANITIZER_WORDSIZE == 64) ? 12 : 8, true);
+ SANITIZER_POINTER_FORMAT_LENGTH, true);
return result;
}
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x}; %p; %(\\.\\*)?s; %c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
@@ -133,6 +137,12 @@ int VSNPrintf(char *buff, int buff_length,
width = width * 10 + *cur++ - '0';
}
}
+ bool have_precision = (cur[0] == '.' && cur[1] == '*');
+ int precision = -1;
+ if (have_precision) {
+ cur += 2;
+ precision = va_arg(args, int);
+ }
bool have_z = (*cur == 'z');
cur += have_z;
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
@@ -140,6 +150,8 @@ int VSNPrintf(char *buff, int buff_length,
s64 dval;
u64 uval;
bool have_flags = have_width | have_z | have_ll;
+ // Only %s supports precision for now
+ CHECK(!(precision >= 0 && *cur != 's'));
switch (*cur) {
case 'd': {
dval = have_ll ? va_arg(args, s64)
@@ -165,7 +177,7 @@ int VSNPrintf(char *buff, int buff_length,
}
case 's': {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
- result += AppendString(&buff, buff_end, va_arg(args, char*));
+ result += AppendString(&buff, buff_end, precision, va_arg(args, char*));
break;
}
case 'c': {
@@ -236,28 +248,36 @@ static void SharedPrintfCode(bool append_pid, const char *format,
buffer_size = kLen;
}
needed_length = 0;
+ // Check that data fits into the current buffer.
+# define CHECK_NEEDED_LENGTH \
+ if (needed_length >= buffer_size) { \
+ if (!use_mmap) continue; \
+ RAW_CHECK_MSG(needed_length < kLen, \
+ "Buffer in Report is too short!\n"); \
+ }
if (append_pid) {
int pid = internal_getpid();
- needed_length += internal_snprintf(buffer, buffer_size, "==%d==", pid);
- if (needed_length >= buffer_size) {
- // The pid doesn't fit into the current buffer.
- if (!use_mmap)
- continue;
- RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
+ const char *exe_name = GetProcessName();
+ if (common_flags()->log_exe_name && exe_name) {
+ needed_length += internal_snprintf(buffer, buffer_size,
+ "==%s", exe_name);
+ CHECK_NEEDED_LENGTH
}
+ needed_length += internal_snprintf(buffer + needed_length,
+ buffer_size - needed_length,
+ "==%d==", pid);
+ CHECK_NEEDED_LENGTH
}
needed_length += VSNPrintf(buffer + needed_length,
buffer_size - needed_length, format, args);
- if (needed_length >= buffer_size) {
- // The message doesn't fit into the current buffer.
- if (!use_mmap)
- continue;
- RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n");
- }
+ CHECK_NEEDED_LENGTH
// If the message fit into the buffer, print it and exit.
break;
+# undef CHECK_NEEDED_LENGTH
}
RawWrite(buffer);
+ if (common_flags()->log_to_syslog)
+ WriteToSyslog(buffer);
CallPrintfAndReportCallback(buffer);
// If we had mapped any memory, clean up.
if (buffer != local_buffer)
@@ -265,6 +285,7 @@ static void SharedPrintfCode(bool append_pid, const char *format,
va_end(args2);
}
+FORMAT(1, 2)
void Printf(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -273,6 +294,7 @@ void Printf(const char *format, ...) {
}
// Like Printf, but prints the current PID before the output string.
+FORMAT(1, 2)
void Report(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -284,6 +306,7 @@ void Report(const char *format, ...) {
// Returns the number of symbols that should have been written to buffer
// (not including trailing '\0'). Thus, the string is truncated
// iff return value is not less than "length".
+FORMAT(3, 4)
int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
va_list args;
va_start(args, format);
@@ -292,6 +315,7 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
+FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) {
CHECK_LT(length_, size());
va_list args;
@@ -299,6 +323,7 @@ void InternalScopedString::append(const char *format, ...) {
VSNPrintf(data() + length_, size() - length_, format, args);
va_end(args);
length_ += internal_strlen(data() + length_);
+ CHECK_LT(length_, size());
}
-} // namespace __sanitizer
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps.h b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
index 87887f6b74b..7477abf30b6 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps.h
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
@@ -12,49 +12,38 @@
#ifndef SANITIZER_PROCMAPS_H
#define SANITIZER_PROCMAPS_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
-#if SANITIZER_WINDOWS
-class MemoryMappingLayout {
- public:
- explicit MemoryMappingLayout(bool cache_enabled) {
- (void)cache_enabled;
- }
- bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- UNIMPLEMENTED();
- }
-};
-
-#else // SANITIZER_WINDOWS
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
struct ProcSelfMapsBuff {
char *data;
uptr mmaped_size;
uptr len;
};
-#endif // SANITIZER_LINUX
+
+// Reads process memory map in an OS-specific way.
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
class MemoryMappingLayout {
public:
explicit MemoryMappingLayout(bool cache_enabled);
+ ~MemoryMappingLayout();
bool Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size, uptr *protection);
void Reset();
- // Gets the object file name and the offset in that object for a given
- // address 'addr'. Returns true on success.
- bool GetObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection);
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
static void CacheMemoryMappings();
- ~MemoryMappingLayout();
+
+ // Stores the list of mapped objects into an array.
+ uptr DumpListOfModules(LoadedModule *modules, uptr max_modules,
+ string_predicate_t filter);
// Memory protection masks.
static const uptr kProtectionRead = 1;
@@ -64,41 +53,12 @@ class MemoryMappingLayout {
private:
void LoadFromCache();
- // Default implementation of GetObjectNameAndOffset.
- // Quite slow, because it iterates through the whole process map for each
- // lookup.
- bool IterateForObjectNameAndOffset(uptr addr, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- Reset();
- uptr start, end, file_offset;
- for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size,
- protection);
- i++) {
- if (addr >= start && addr < end) {
- // Don't subtract 'start' for the first entry:
- // * If a binary is compiled w/o -pie, then the first entry in
- // process maps is likely the binary itself (all dynamic libs
- // are mapped higher in address space). For such a binary,
- // instruction offset in binary coincides with the actual
- // instruction address in virtual memory (as code section
- // is mapped to a fixed memory range).
- // * If a binary is compiled with -pie, all the modules are
- // mapped high at address space (in particular, higher than
- // shadow memory of the tool), so the module can't be the
- // first entry.
- *offset = (addr - (i ? start : 0)) + file_offset;
- return true;
- }
- }
- if (filename_size)
- filename[0] = '\0';
- return false;
- }
-
-# if SANITIZER_LINUX
+
+ // FIXME: Hide implementation details for different platforms in
+ // platform-specific files.
+# if SANITIZER_FREEBSD || SANITIZER_LINUX
ProcSelfMapsBuff proc_self_maps_;
- char *current_;
+ const char *current_;
// Static mappings cache.
static ProcSelfMapsBuff cached_proc_self_maps_;
@@ -127,7 +87,10 @@ void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
// Returns code range for the specified module.
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);
-#endif // SANITIZER_WINDOWS
+bool IsDecimal(char c);
+uptr ParseDecimal(const char **p);
+bool IsHex(char c);
+uptr ParseHex(const char **p);
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc
new file mode 100644
index 00000000000..1cf3f8510fb
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc
@@ -0,0 +1,179 @@
+//===-- sanitizer_procmaps_common.cc --------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (common parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+// Linker initialized.
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
+StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+
+static int TranslateDigit(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+// Parse a number and promote 'p' up to the first non-digit character.
+static uptr ParseNumber(const char **p, int base) {
+ uptr n = 0;
+ int d;
+ CHECK(base >= 2 && base <= 16);
+ while ((d = TranslateDigit(**p)) >= 0 && d < base) {
+ n = n * base + d;
+ (*p)++;
+ }
+ return n;
+}
+
+bool IsDecimal(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 10;
+}
+
+uptr ParseDecimal(const char **p) {
+ return ParseNumber(p, 10);
+}
+
+bool IsHex(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 16;
+}
+
+uptr ParseHex(const char **p) {
+ return ParseNumber(p, 16);
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ ReadProcMaps(&proc_self_maps_);
+ if (cache_enabled) {
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ } else {
+ CHECK_GT(proc_self_maps_.mmaped_size, 0);
+ }
+ Reset();
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ if (cache_enabled)
+ CacheMemoryMappings();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (proc_self_maps_.data != cached_proc_self_maps_.data) {
+ UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ }
+}
+
+void MemoryMappingLayout::Reset() {
+ current_ = proc_self_maps_.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ SpinMutexLock l(&cache_lock_);
+ // Don't invalidate the cache if the mappings are unavailable.
+ ProcSelfMapsBuff old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps_;
+ ReadProcMaps(&cached_proc_self_maps_);
+ if (cached_proc_self_maps_.mmaped_size == 0) {
+ cached_proc_self_maps_ = old_proc_self_maps;
+ } else {
+ if (old_proc_self_maps.mmaped_size) {
+ UnmapOrDie(old_proc_self_maps.data,
+ old_proc_self_maps.mmaped_size);
+ }
+ }
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock_);
+ if (cached_proc_self_maps_.data) {
+ proc_self_maps_ = cached_proc_self_maps_;
+ }
+}
+
+uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
+ uptr max_modules,
+ string_predicate_t filter) {
+ Reset();
+ uptr cur_beg, cur_end, cur_offset, prot;
+ InternalScopedString module_name(kMaxPathLength);
+ uptr n_modules = 0;
+ for (uptr i = 0; n_modules < max_modules &&
+ Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
+ module_name.size(), &prot);
+ i++) {
+ const char *cur_name = module_name.data();
+ if (cur_name[0] == '\0')
+ continue;
+ if (filter && !filter(cur_name))
+ continue;
+ // Don't subtract 'cur_beg' from the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ uptr base_address = (i ? cur_beg : 0) - cur_offset;
+ LoadedModule *cur_module = &modules[n_modules];
+ cur_module->set(cur_name, base_address);
+ cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ n_modules++;
+ }
+ return n_modules;
+}
+
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+ char *smaps = nullptr;
+ uptr smaps_cap = 0;
+ uptr smaps_len = 0;
+ if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len))
+ return;
+ uptr start = 0;
+ bool file = false;
+ const char *pos = smaps;
+ while (pos < smaps + smaps_len) {
+ if (IsHex(pos[0])) {
+ start = ParseHex(&pos);
+ for (; *pos != '/' && *pos > '\n'; pos++) {}
+ file = *pos == '/';
+ } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
+ while (!IsDecimal(*pos)) pos++;
+ uptr rss = ParseDecimal(&pos) * 1024;
+ cb(start, rss, file, stats, stats_size);
+ }
+ while (*pos++ != '\n') {}
+ }
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc
new file mode 100644
index 00000000000..fbc55203ab4
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_freebsd.cc
@@ -0,0 +1,86 @@
+//===-- sanitizer_procmaps_freebsd.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (FreeBSD-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD
+#include "sanitizer_common.h"
+#include "sanitizer_freebsd.h"
+#include "sanitizer_procmaps.h"
+
+#include <unistd.h>
+#include <sys/sysctl.h>
+#include <sys/user.h>
+
+// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+# include <osreldate.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# define kinfo_vmentry xkinfo_vmentry
+# endif
+#endif
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() };
+ size_t Size = 0;
+ int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ CHECK_GT(Size, 0);
+
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = MmapedSize;
+ Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+
+ proc_maps->data = (char*)VmMap;
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ if (!protection) protection = &dummy;
+ struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
+
+ *start = (uptr)VmEntry->kve_start;
+ *end = (uptr)VmEntry->kve_end;
+ *offset = (uptr)VmEntry->kve_offset;
+
+ *protection = 0;
+ if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
+ *protection |= kProtectionRead;
+ if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
+ *protection |= kProtectionWrite;
+ if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
+ *protection |= kProtectionExecute;
+
+ if (filename != NULL && filename_size > 0) {
+ internal_snprintf(filename,
+ Min(filename_size, (uptr)PATH_MAX),
+ "%s", VmEntry->kve_path);
+ }
+
+ current_ += VmEntry->kve_structsize;
+
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
new file mode 100644
index 00000000000..e77e33fb2d7
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
@@ -0,0 +1,88 @@
+//===-- sanitizer_procmaps_linux.cc ---------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Linux-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ CHECK(ReadFileToBuffer("/proc/self/maps", &proc_maps->data,
+ &proc_maps->mmaped_size, &proc_maps->len));
+}
+
+static bool IsOneOf(char c, char c1, char c2) {
+ return c == c1 || c == c2;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ if (!protection) protection = &dummy;
+ char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
+ if (next_line == 0)
+ next_line = last;
+ // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
+ *start = ParseHex(&current_);
+ CHECK_EQ(*current_++, '-');
+ *end = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ CHECK(IsOneOf(*current_, '-', 'r'));
+ *protection = 0;
+ if (*current_++ == 'r')
+ *protection |= kProtectionRead;
+ CHECK(IsOneOf(*current_, '-', 'w'));
+ if (*current_++ == 'w')
+ *protection |= kProtectionWrite;
+ CHECK(IsOneOf(*current_, '-', 'x'));
+ if (*current_++ == 'x')
+ *protection |= kProtectionExecute;
+ CHECK(IsOneOf(*current_, 's', 'p'));
+ if (*current_++ == 's')
+ *protection |= kProtectionShared;
+ CHECK_EQ(*current_++, ' ');
+ *offset = ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ':');
+ ParseHex(&current_);
+ CHECK_EQ(*current_++, ' ');
+ while (IsDecimal(*current_))
+ current_++;
+ // Qemu may lack the trailing space.
+ // http://code.google.com/p/address-sanitizer/issues/detail?id=160
+ // CHECK_EQ(*current_++, ' ');
+ // Skip spaces.
+ while (current_ < next_line && *current_ == ' ')
+ current_++;
+ // Fill in the filename.
+ uptr i = 0;
+ while (current_ < next_line) {
+ if (filename && i < filename_size - 1)
+ filename[i++] = *current_;
+ current_++;
+ }
+ if (filename && i < filename_size)
+ filename[i] = 0;
+ current_ = next_line + 1;
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
new file mode 100644
index 00000000000..393243b7c28
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -0,0 +1,188 @@
+//===-- sanitizer_procmaps_mac.cc -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Mac-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+
+namespace __sanitizer {
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ Reset();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+}
+
+// More information about Mach-O headers can be found in mach-o/loader.h
+// Each Mach-O image has a header (mach_header or mach_header_64) starting with
+// a magic number, and a list of linker load commands directly following the
+// header.
+// A load command is at least two 32-bit words: the command type and the
+// command size in bytes. We're interested only in segment load commands
+// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
+// into the task's address space.
+// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
+// segment_command_64 correspond to the memory address, memory size and the
+// file offset of the current memory segment.
+// Because these fields are taken from the images as is, one needs to add
+// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
+
+void MemoryMappingLayout::Reset() {
+ // Count down from the top.
+ // TODO(glider): as per man 3 dyld, iterating over the headers with
+ // _dyld_image_count is thread-unsafe. We need to register callbacks for
+ // adding and removing images which will invalidate the MemoryMappingLayout
+ // state.
+ current_image_ = _dyld_image_count();
+ current_load_cmd_count_ = -1;
+ current_load_cmd_addr_ = 0;
+ current_magic_ = 0;
+ current_filetype_ = 0;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
+}
+
+// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
+// Google Perftools, http://code.google.com/p/google-perftools.
+
+// NextSegmentLoad scans the current image for the next segment load command
+// and returns the start and end addresses and file offset of the corresponding
+// segment.
+// Note that the segment addresses are not necessarily sorted.
+template<u32 kLCSegment, typename SegmentCommand>
+bool MemoryMappingLayout::NextSegmentLoad(
+ uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size, uptr *protection) {
+ const char* lc = current_load_cmd_addr_;
+ current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
+ if (((const load_command *)lc)->cmd == kLCSegment) {
+ const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
+ const SegmentCommand* sc = (const SegmentCommand *)lc;
+ if (start) *start = sc->vmaddr + dlloff;
+ if (protection) {
+ // Return the initial protection.
+ *protection = sc->initprot;
+ }
+ if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
+ if (offset) {
+ if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
+ *offset = sc->vmaddr;
+ } else {
+ *offset = sc->fileoff;
+ }
+ }
+ if (filename) {
+ internal_strncpy(filename, _dyld_get_image_name(current_image_),
+ filename_size);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ for (; current_image_ >= 0; current_image_--) {
+ const mach_header* hdr = _dyld_get_image_header(current_image_);
+ if (!hdr) continue;
+ if (current_load_cmd_count_ < 0) {
+ // Set up for this image;
+ current_load_cmd_count_ = hdr->ncmds;
+ current_magic_ = hdr->magic;
+ current_filetype_ = hdr->filetype;
+ switch (current_magic_) {
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
+ break;
+ }
+ default: {
+ continue;
+ }
+ }
+ }
+
+ for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
+ switch (current_magic_) {
+ // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
+ start, end, offset, filename, filename_size, protection))
+ return true;
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ start, end, offset, filename, filename_size, protection))
+ return true;
+ break;
+ }
+ }
+ }
+ // If we get here, no more load_cmd's in this image talk about
+ // segments. Go on to the next image.
+ }
+ return false;
+}
+
+uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
+ uptr max_modules,
+ string_predicate_t filter) {
+ Reset();
+ uptr cur_beg, cur_end, prot;
+ InternalScopedString module_name(kMaxPathLength);
+ uptr n_modules = 0;
+ for (uptr i = 0; n_modules < max_modules &&
+ Next(&cur_beg, &cur_end, 0, module_name.data(),
+ module_name.size(), &prot);
+ i++) {
+ const char *cur_name = module_name.data();
+ if (cur_name[0] == '\0')
+ continue;
+ if (filter && !filter(cur_name))
+ continue;
+ LoadedModule *cur_module = nullptr;
+ if (n_modules > 0 &&
+ 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
+ cur_module = &modules[n_modules - 1];
+ } else {
+ cur_module = &modules[n_modules];
+ cur_module->set(cur_name, cur_beg);
+ n_modules++;
+ }
+ cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ }
+ return n_modules;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_quarantine.h b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
index 1e8d056a187..a635871be45 100644
--- a/libsanitizer/sanitizer_common/sanitizer_quarantine.h
+++ b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
@@ -47,11 +47,14 @@ class Quarantine {
}
void Init(uptr size, uptr cache_size) {
- max_size_ = size;
- min_size_ = size / 10 * 9; // 90% of max size.
+ atomic_store(&max_size_, size, memory_order_release);
+ atomic_store(&min_size_, size / 10 * 9,
+ memory_order_release); // 90% of max size.
max_cache_size_ = cache_size;
}
+ uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
+
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
if (c->Size() > max_cache_size_)
@@ -63,15 +66,15 @@ class Quarantine {
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
- if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
+ if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
Recycle(cb);
}
private:
// Read-only data.
char pad0_[kCacheLineSize];
- uptr max_size_;
- uptr min_size_;
+ atomic_uintptr_t max_size_;
+ atomic_uintptr_t min_size_;
uptr max_cache_size_;
char pad1_[kCacheLineSize];
SpinMutex cache_mutex_;
@@ -81,9 +84,10 @@ class Quarantine {
void NOINLINE Recycle(Callback cb) {
Cache tmp;
+ uptr min_size = atomic_load(&min_size_, memory_order_acquire);
{
SpinMutexLock l(&cache_mutex_);
- while (cache_.Size() > min_size_) {
+ while (cache_.Size() > min_size) {
QuarantineBatch *b = cache_.DequeueBatch();
tmp.EnqueueBatch(b);
}
@@ -128,6 +132,7 @@ class QuarantineCache {
size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
}
QuarantineBatch *b = list_.back();
+ CHECK(b);
b->batch[b->count++] = ptr;
b->size += size;
SizeAdd(size);
@@ -146,7 +151,7 @@ class QuarantineCache {
QuarantineBatch *DequeueBatch() {
if (list_.empty())
- return 0;
+ return nullptr;
QuarantineBatch *b = list_.front();
list_.pop_front();
SizeSub(b->size);
@@ -166,12 +171,13 @@ class QuarantineCache {
NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ CHECK(b);
b->count = 0;
b->size = 0;
list_.push_back(b);
return b;
}
};
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // #ifndef SANITIZER_QUARANTINE_H
+#endif // SANITIZER_QUARANTINE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_report_decorator.h b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
index c6e79ad0c7c..e9be29fb3d5 100644
--- a/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
@@ -15,13 +15,19 @@
#ifndef SANITIZER_REPORT_DECORATOR_H
#define SANITIZER_REPORT_DECORATOR_H
+#include "sanitizer_common.h"
+
namespace __sanitizer {
-class AnsiColorDecorator {
+class SanitizerCommonDecorator {
// FIXME: This is not portable. It assumes the special strings are printed to
// stdout, which is not the case on Windows (see SetConsoleTextAttribute()).
public:
- explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
+ SanitizerCommonDecorator() : ansi_(ColorizeReports()) {}
const char *Bold() const { return ansi_ ? "\033[1m" : ""; }
+ const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
+ const char *Warning() { return Red(); }
+ const char *EndWarning() { return Default(); }
+ protected:
const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; }
const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; }
const char *Green() const { return ansi_ ? "\033[1m\033[32m" : ""; }
@@ -30,10 +36,10 @@ class AnsiColorDecorator {
const char *Magenta() const { return ansi_ ? "\033[1m\033[35m" : ""; }
const char *Cyan() const { return ansi_ ? "\033[1m\033[36m" : ""; }
const char *White() const { return ansi_ ? "\033[1m\033[37m" : ""; }
- const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
private:
bool ansi_;
};
+
} // namespace __sanitizer
#endif // SANITIZER_REPORT_DECORATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
index e8d9f01e7f9..3c5313c3ae4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
@@ -10,193 +10,121 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_stackdepot.h"
+
#include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
+#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
-const int kTabSize = 1024 * 1024; // Hash table size.
-const int kPartBits = 8;
-const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
-const int kPartCount = 1 << kPartBits; // Number of subparts in the table.
-const int kPartSize = kTabSize / kPartCount;
-const int kMaxId = 1 << kPartShift;
-
-struct StackDesc {
- StackDesc *link;
+struct StackDepotNode {
+ StackDepotNode *link;
u32 id;
- u32 hash;
- uptr size;
+ atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
+ u32 size;
+ u32 tag;
uptr stack[1]; // [size]
-};
-static struct {
- StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
- atomic_uintptr_t region_pos; // Region allocator for StackDesc's.
- atomic_uintptr_t region_end;
- atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's.
- atomic_uint32_t seq[kPartCount]; // Unique id generators.
-} depot;
+ static const u32 kTabSizeLog = 20;
+ // Lower kTabSizeLog bits are equal for all items in one bucket.
+ // We use these bits to store the per-stack use counter.
+ static const u32 kUseCountBits = kTabSizeLog;
+ static const u32 kMaxUseCount = 1 << kUseCountBits;
+ static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
+ static const u32 kHashMask = ~kUseCountMask;
+
+ typedef StackTrace args_type;
+ bool eq(u32 hash, const args_type &args) const {
+ u32 hash_bits =
+ atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
+ if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
+ return false;
+ uptr i = 0;
+ for (; i < size; i++) {
+ if (stack[i] != args.trace[i]) return false;
+ }
+ return true;
+ }
+ static uptr storage_size(const args_type &args) {
+ return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
+ }
+ static u32 hash(const args_type &args) {
+ // murmur2
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed ^ (args.size * sizeof(uptr));
+ for (uptr i = 0; i < args.size; i++) {
+ u32 k = args.trace[i];
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+ }
+ static bool is_valid(const args_type &args) {
+ return args.size > 0 && args.trace;
+ }
+ void store(const args_type &args, u32 hash) {
+ atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
+ size = args.size;
+ tag = args.tag;
+ internal_memcpy(stack, args.trace, size * sizeof(uptr));
+ }
+ args_type load() const {
+ return args_type(&stack[0], size, tag);
+ }
+ StackDepotHandle get_handle() { return StackDepotHandle(this); }
-static StackDepotStats stats;
+ typedef StackDepotHandle handle_type;
+};
-StackDepotStats *StackDepotGetStats() {
- return &stats;
-}
+COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
-static u32 hash(const uptr *stack, uptr size) {
- // murmur2
- const u32 m = 0x5bd1e995;
- const u32 seed = 0x9747b28c;
- const u32 r = 24;
- u32 h = seed ^ (size * sizeof(uptr));
- for (uptr i = 0; i < size; i++) {
- u32 k = stack[i];
- k *= m;
- k ^= k >> r;
- k *= m;
- h *= m;
- h ^= k;
- }
- h ^= h >> 13;
- h *= m;
- h ^= h >> 15;
- return h;
+u32 StackDepotHandle::id() { return node_->id; }
+int StackDepotHandle::use_count() {
+ return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
}
-
-static StackDesc *tryallocDesc(uptr memsz) {
- // Optimisic lock-free allocation, essentially try to bump the region ptr.
- for (;;) {
- uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
- uptr end = atomic_load(&depot.region_end, memory_order_acquire);
- if (cmp == 0 || cmp + memsz > end)
- return 0;
- if (atomic_compare_exchange_weak(
- &depot.region_pos, &cmp, cmp + memsz,
- memory_order_acquire))
- return (StackDesc*)cmp;
- }
+void StackDepotHandle::inc_use_count_unsafe() {
+ u32 prev =
+ atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
+ CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
}
-static StackDesc *allocDesc(uptr size) {
- // First, try to allocate optimisitically.
- uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
- StackDesc *s = tryallocDesc(memsz);
- if (s)
- return s;
- // If failed, lock, retry and alloc new superblock.
- SpinMutexLock l(&depot.mtx);
- for (;;) {
- s = tryallocDesc(memsz);
- if (s)
- return s;
- atomic_store(&depot.region_pos, 0, memory_order_relaxed);
- uptr allocsz = 64 * 1024;
- if (allocsz < memsz)
- allocsz = memsz;
- uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
- stats.mapped += allocsz;
- atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
- atomic_store(&depot.region_pos, mem, memory_order_release);
- }
+// FIXME(dvyukov): this single reserved bit is used in TSan.
+typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
+ StackDepot;
+static StackDepot theDepot;
+
+StackDepotStats *StackDepotGetStats() {
+ return theDepot.GetStats();
}
-static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
- // Searches linked list s for the stack, returns its id.
- for (; s; s = s->link) {
- if (s->hash == hash && s->size == size) {
- uptr i = 0;
- for (; i < size; i++) {
- if (stack[i] != s->stack[i])
- break;
- }
- if (i == size)
- return s->id;
- }
- }
- return 0;
+u32 StackDepotPut(StackTrace stack) {
+ StackDepotHandle h = theDepot.Put(stack);
+ return h.valid() ? h.id() : 0;
}
-static StackDesc *lock(atomic_uintptr_t *p) {
- // Uses the pointer lsb as mutex.
- for (int i = 0;; i++) {
- uptr cmp = atomic_load(p, memory_order_relaxed);
- if ((cmp & 1) == 0
- && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
- memory_order_acquire))
- return (StackDesc*)cmp;
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- }
+StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
+ return theDepot.Put(stack);
}
-static void unlock(atomic_uintptr_t *p, StackDesc *s) {
- DCHECK_EQ((uptr)s & 1, 0);
- atomic_store(p, (uptr)s, memory_order_release);
+StackTrace StackDepotGet(u32 id) {
+ return theDepot.Get(id);
}
-u32 StackDepotPut(const uptr *stack, uptr size) {
- if (stack == 0 || size == 0)
- return 0;
- uptr h = hash(stack, size);
- atomic_uintptr_t *p = &depot.tab[h % kTabSize];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- // First, try to find the existing stack.
- u32 id = find(s, stack, size, h);
- if (id)
- return id;
- // If failed, lock, retry and insert new.
- StackDesc *s2 = lock(p);
- if (s2 != s) {
- id = find(s2, stack, size, h);
- if (id) {
- unlock(p, s2);
- return id;
- }
- }
- uptr part = (h % kTabSize) / kPartSize;
- id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
- stats.n_uniq_ids++;
- CHECK_LT(id, kMaxId);
- id |= part << kPartShift;
- CHECK_NE(id, 0);
- CHECK_EQ(id & (1u << 31), 0);
- s = allocDesc(size);
- s->id = id;
- s->hash = h;
- s->size = size;
- internal_memcpy(s->stack, stack, size * sizeof(uptr));
- s->link = s2;
- unlock(p, s);
- return id;
+void StackDepotLockAll() {
+ theDepot.LockAll();
}
-const uptr *StackDepotGet(u32 id, uptr *size) {
- if (id == 0)
- return 0;
- CHECK_EQ(id & (1u << 31), 0);
- // High kPartBits contain part id, so we need to scan at most kPartSize lists.
- uptr part = id >> kPartShift;
- for (int i = 0; i != kPartSize; i++) {
- uptr idx = part * kPartSize + i;
- CHECK_LT(idx, kTabSize);
- atomic_uintptr_t *p = &depot.tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- for (; s; s = s->link) {
- if (s->id == id) {
- *size = s->size;
- return s->stack;
- }
- }
- }
- *size = 0;
- return 0;
+void StackDepotUnlockAll() {
+ theDepot.UnlockAll();
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
@@ -207,10 +135,10 @@ bool StackDepotReverseMap::IdDescPair::IdComparator(
StackDepotReverseMap::StackDepotReverseMap()
: map_(StackDepotGetStats()->n_uniq_ids + 100) {
- for (int idx = 0; idx < kTabSize; idx++) {
- atomic_uintptr_t *p = &depot.tab[idx];
+ for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
+ atomic_uintptr_t *p = &theDepot.tab[idx];
uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
+ StackDepotNode *s = (StackDepotNode*)(v & ~1);
for (; s; s = s->link) {
IdDescPair pair = {s->id, s};
map_.push_back(pair);
@@ -219,18 +147,15 @@ StackDepotReverseMap::StackDepotReverseMap()
InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
}
-const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
- if (!map_.size()) return 0;
- IdDescPair pair = {id, 0};
+StackTrace StackDepotReverseMap::Get(u32 id) {
+ if (!map_.size())
+ return StackTrace();
+ IdDescPair pair = {id, nullptr};
uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
IdDescPair::IdComparator);
- if (idx > map_.size()) {
- *size = 0;
- return 0;
- }
- StackDesc *desc = map_[idx].desc;
- *size = desc->size;
- return desc->stack;
+ if (idx > map_.size())
+ return StackTrace();
+ return map_[idx].desc->load();
}
-} // namespace __sanitizer
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
index c2c04ef9d64..dfb5349de9f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
@@ -8,29 +8,38 @@
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
+
#ifndef SANITIZER_STACKDEPOT_H
#define SANITIZER_STACKDEPOT_H
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_stacktrace.h"
namespace __sanitizer {
// StackDepot efficiently stores huge amounts of stack traces.
-
-// Maps stack trace to an unique id.
-u32 StackDepotPut(const uptr *stack, uptr size);
-// Retrieves a stored stack trace by the id.
-const uptr *StackDepotGet(u32 id, uptr *size);
-
-struct StackDepotStats {
- uptr n_uniq_ids;
- uptr mapped;
+struct StackDepotNode;
+struct StackDepotHandle {
+ StackDepotNode *node_;
+ StackDepotHandle() : node_(nullptr) {}
+ explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id();
+ int use_count();
+ void inc_use_count_unsafe();
};
+const int kStackDepotMaxUseCount = 1U << 20;
+
StackDepotStats *StackDepotGetStats();
+u32 StackDepotPut(StackTrace stack);
+StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
+// Retrieves a stored stack trace by the id.
+StackTrace StackDepotGet(u32 id);
-struct StackDesc;
+void StackDepotLockAll();
+void StackDepotUnlockAll();
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
@@ -39,12 +48,12 @@ struct StackDesc;
class StackDepotReverseMap {
public:
StackDepotReverseMap();
- const uptr *Get(u32 id, uptr *size);
+ StackTrace Get(u32 id);
private:
struct IdDescPair {
u32 id;
- StackDesc *desc;
+ StackDepotNode *desc;
static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
};
@@ -55,6 +64,7 @@ class StackDepotReverseMap {
StackDepotReverseMap(const StackDepotReverseMap&);
void operator=(const StackDepotReverseMap&);
};
-} // namespace __sanitizer
-#endif // SANITIZER_STACKDEPOT_H
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOT_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
new file mode 100644
index 00000000000..ab4932823a3
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
@@ -0,0 +1,176 @@
+//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a mapping from arbitrary values to unique 32-bit
+// identifiers.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STACKDEPOTBASE_H
+#define SANITIZER_STACKDEPOTBASE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+class StackDepotBase {
+ public:
+ typedef typename Node::args_type args_type;
+ typedef typename Node::handle_type handle_type;
+ // Maps stack trace to an unique id.
+ handle_type Put(args_type args, bool *inserted = nullptr);
+ // Retrieves a stored stack trace by the id.
+ args_type Get(u32 id);
+
+ StackDepotStats *GetStats() { return &stats; }
+
+ void LockAll();
+ void UnlockAll();
+
+ private:
+ static Node *find(Node *s, args_type args, u32 hash);
+ static Node *lock(atomic_uintptr_t *p);
+ static void unlock(atomic_uintptr_t *p, Node *s);
+
+ static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
+ static const int kPartBits = 8;
+ static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
+ static const int kPartCount =
+ 1 << kPartBits; // Number of subparts in the table.
+ static const int kPartSize = kTabSize / kPartCount;
+ static const int kMaxId = 1 << kPartShift;
+
+ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+
+ StackDepotStats stats;
+
+ friend class StackDepotReverseMap;
+};
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
+ args_type args,
+ u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->eq(hash, args)) {
+ return s;
+ }
+ }
+ return nullptr;
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
+ atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
+ return (Node *)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
+ atomic_uintptr_t *p, Node *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
+ bool *inserted) {
+ if (inserted) *inserted = false;
+ if (!Node::is_valid(args)) return handle_type();
+ uptr h = Node::hash(args);
+ atomic_uintptr_t *p = &tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ // First, try to find the existing stack.
+ Node *node = find(s, args, h);
+ if (node) return node->get_handle();
+ // If failed, lock, retry and insert new.
+ Node *s2 = lock(p);
+ if (s2 != s) {
+ node = find(s2, args, h);
+ if (node) {
+ unlock(p, s2);
+ return node->get_handle();
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ uptr memsz = Node::storage_size(args);
+ s = (Node *)PersistentAlloc(memsz);
+ stats.allocated += memsz;
+ s->id = id;
+ s->store(args, h);
+ s->link = s2;
+ unlock(p, s);
+ if (inserted) *inserted = true;
+ return s->get_handle();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
+ if (id == 0) {
+ return args_type();
+ }
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ return s->load();
+ }
+ }
+ }
+ return args_type();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ lock(&tab[i]);
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ atomic_uintptr_t *p = &tab[i];
+ uptr s = atomic_load(p, memory_order_relaxed);
+ unlock(p, (Node *)(s & ~1UL));
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOTBASE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
index 3a9e902537a..796d472a1eb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -11,144 +11,93 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
-#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
-#ifdef __arm__
- // Cancel Thumb bit.
- pc = pc & (~1);
-#endif
-#if defined(__powerpc__) || defined(__powerpc64__)
- // PCs are always 4 byte aligned.
- return pc - 4;
-#elif defined(__sparc__)
- return pc - 8;
+uptr StackTrace::GetNextInstructionPc(uptr pc) {
+#if defined(__mips__)
+ return pc + 8;
+#elif defined(__powerpc__)
+ return pc + 4;
#else
- return pc - 1;
+ return pc + 1;
#endif
}
-static void PrintStackFramePrefix(InternalScopedString *buffer, uptr frame_num,
- uptr pc) {
- buffer->append(" #%zu 0x%zx", frame_num, pc);
+uptr StackTrace::GetCurrentPc() {
+ return GET_CALLER_PC();
}
-void StackTrace::PrintStack(const uptr *addr, uptr size,
- SymbolizeCallback symbolize_callback) {
- if (addr == 0 || size == 0) {
- Printf(" <empty stack>\n\n");
- return;
- }
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
- InternalScopedBuffer<AddressInfo> addr_frames(64);
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
- uptr frame_num = 0;
- for (uptr i = 0; i < size && addr[i]; i++) {
- // PCs in stack traces are actually the return addresses, that is,
- // addresses of the next instructions after the call.
- uptr pc = GetPreviousInstructionPc(addr[i]);
- uptr addr_frames_num = 0; // The number of stack frames for current
- // instruction address.
- if (symbolize_callback) {
- if (symbolize_callback((void*)pc, buff.data(), buff.size())) {
- addr_frames_num = 1;
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- // We can't know anything about the string returned by external
- // symbolizer, but if it starts with filename, try to strip path prefix
- // from it.
- frame_desc.append(
- " %s",
- StripPathPrefix(buff.data(), common_flags()->strip_path_prefix));
- Printf("%s\n", frame_desc.data());
- frame_num++;
- }
- }
- if (common_flags()->symbolize && addr_frames_num == 0) {
- // Use our own (online) symbolizer, if necessary.
- if (Symbolizer *sym = Symbolizer::GetOrNull())
- addr_frames_num =
- sym->SymbolizeCode(pc, addr_frames.data(), addr_frames.size());
- for (uptr j = 0; j < addr_frames_num; j++) {
- AddressInfo &info = addr_frames[j];
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- if (info.function) {
- frame_desc.append(" in %s", info.function);
- }
- if (info.file) {
- frame_desc.append(" ");
- PrintSourceLocation(&frame_desc, info.file, info.line, info.column);
- } else if (info.module) {
- frame_desc.append(" ");
- PrintModuleAndOffset(&frame_desc, info.module, info.module_offset);
- }
- Printf("%s\n", frame_desc.data());
- frame_num++;
- info.Clear();
- }
- }
- if (addr_frames_num == 0) {
- // If online symbolization failed, try to output at least module and
- // offset for instruction.
- frame_desc.clear();
- PrintStackFramePrefix(&frame_desc, frame_num, pc);
- uptr offset;
- if (proc_maps.GetObjectNameAndOffset(pc, &offset,
- buff.data(), buff.size(),
- /* protection */0)) {
- frame_desc.append(" ");
- PrintModuleAndOffset(&frame_desc, buff.data(), offset);
- }
- Printf("%s\n", frame_desc.data());
- frame_num++;
- }
- }
- // Always print a trailing empty line after stack trace.
- Printf("\n");
+void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ size = cnt + !!extra_top_pc;
+ CHECK_LE(size, kStackTraceMax);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+ top_frame_bp = 0;
}
-uptr StackTrace::GetCurrentPc() {
- return GET_CALLER_PC();
+// Check if given pointer points into allocated stack area.
+static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
+ return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
}
-void StackTrace::FastUnwindStack(uptr pc, uptr bp,
- uptr stack_top, uptr stack_bottom,
- uptr max_depth) {
- if (max_depth == 0) {
- size = 0;
- return;
- }
- trace[0] = pc;
+// In GCC on ARM bp points to saved lr, not fp, so we should check the next
+// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
+// pointer to saved frame pointer in any case.
+static inline uhwptr *GetCanonicFrame(uptr bp,
+ uptr stack_top,
+ uptr stack_bottom) {
+#ifdef __arm__
+ if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
+ uhwptr *bp_prev = (uhwptr *)bp;
+ if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
+ // The next frame pointer does not look right. This could be a GCC frame, step
+ // back by 1 word and try again.
+ if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))
+ return bp_prev - 1;
+ // Nope, this does not look right either. This means the frame after next does
+ // not have a valid frame pointer, but we can still extract the caller PC.
+ // Unfortunately, there is no way to decide between GCC and LLVM frame
+ // layouts. Assume GCC.
+ return bp_prev - 1;
+#else
+ return (uhwptr*)bp;
+#endif
+}
+
+void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top,
+ uptr stack_bottom, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ trace_buffer[0] = pc;
size = 1;
- uhwptr *frame = (uhwptr *)bp;
- uhwptr *prev_frame = frame - 1;
if (stack_top < 4096) return; // Sanity check for stack top.
+ uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
+ // Lowest possible address that makes sense as the next frame pointer.
+ // Goes up as we walk the stack.
+ uptr bottom = stack_bottom;
// Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
- while (frame > prev_frame &&
- frame < (uhwptr *)stack_top - 2 &&
- frame > (uhwptr *)stack_bottom &&
+ while (IsValidFrame((uptr)frame, stack_top, bottom) &&
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
+#ifdef __powerpc__
+ // PowerPC ABIs specify that the return address is saved at offset
+ // 16 of the *caller's* stack frame. Thus we must dereference the
+ // back chain to find the caller frame before extracting it.
+ uhwptr *caller_frame = (uhwptr*)frame[0];
+ if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
+ !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
+ break;
+ uhwptr pc1 = caller_frame[2];
+#else
uhwptr pc1 = frame[1];
+#endif
if (pc1 != pc) {
- trace[size++] = (uptr) pc1;
+ trace_buffer[size++] = (uptr) pc1;
}
- prev_frame = frame;
- frame = (uhwptr *)frame[0];
- }
-}
-
-void StackTrace::PopStackFrames(uptr count) {
- CHECK(size >= count);
- size -= count;
- for (uptr i = 0; i < size; i++) {
- trace[i] = trace[i + count];
+ bottom = (uptr)frame;
+ frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
}
}
@@ -156,10 +105,18 @@ static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
}
-uptr StackTrace::LocatePcInTrace(uptr pc) {
+void BufferedStackTrace::PopStackFrames(uptr count) {
+ CHECK_LT(count, size);
+ size -= count;
+ for (uptr i = 0; i < size; ++i) {
+ trace_buffer[i] = trace_buffer[i + count];
+ }
+}
+
+uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
- const int kPcThreshold = 192;
+ const int kPcThreshold = 304;
for (uptr i = 0; i < size; ++i) {
if (MatchPc(pc, trace[i], kPcThreshold))
return i;
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
index d06db5ffa7a..7f22455ac9e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
@@ -15,12 +15,9 @@
namespace __sanitizer {
-static const uptr kStackTraceMax = 256;
+static const u32 kStackTraceMax = 256;
-#if SANITIZER_LINUX && (defined(__arm__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__sparc__) || \
- defined(__mips__))
+#if SANITIZER_LINUX && (defined(__sparc__) || defined(__mips__))
# define SANITIZER_CAN_FAST_UNWIND 0
#elif SANITIZER_WINDOWS
# define SANITIZER_CAN_FAST_UNWIND 0
@@ -28,46 +25,87 @@ static const uptr kStackTraceMax = 256;
# define SANITIZER_CAN_FAST_UNWIND 1
#endif
+// Fast unwind is the only option on Mac for now; we will need to
+// revisit this macro when slow unwind works on Mac, see
+// https://code.google.com/p/address-sanitizer/issues/detail?id=137
+#if SANITIZER_MAC
+# define SANITIZER_CAN_SLOW_UNWIND 0
+#else
+# define SANITIZER_CAN_SLOW_UNWIND 1
+#endif
+
struct StackTrace {
- typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
- int out_size);
- uptr top_frame_bp;
- uptr size;
- uptr trace[kStackTraceMax];
+ const uptr *trace;
+ u32 size;
+ u32 tag;
+
+ static const int TAG_UNKNOWN = 0;
+ static const int TAG_ALLOC = 1;
+ static const int TAG_DEALLOC = 2;
+ static const int TAG_CUSTOM = 100; // Tool specific tags start here.
+
+ StackTrace() : trace(nullptr), size(0), tag(0) {}
+ StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
+ StackTrace(const uptr *trace, u32 size, u32 tag)
+ : trace(trace), size(size), tag(tag) {}
// Prints a symbolized stacktrace, followed by an empty line.
- static void PrintStack(const uptr *addr, uptr size,
- SymbolizeCallback symbolize_callback = 0);
-
- void CopyFrom(const uptr *src, uptr src_size) {
- top_frame_bp = 0;
- size = src_size;
- if (size > kStackTraceMax) size = kStackTraceMax;
- for (uptr i = 0; i < size; i++)
- trace[i] = src[i];
- }
+ void Print() const;
static bool WillUseFastUnwind(bool request_fast_unwind) {
- // Check if fast unwind is available. Fast unwind is the only option on Mac.
if (!SANITIZER_CAN_FAST_UNWIND)
return false;
- else if (SANITIZER_MAC)
+ else if (!SANITIZER_CAN_SLOW_UNWIND)
return true;
return request_fast_unwind;
}
- void Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
- uptr stack_bottom, bool request_fast_unwind);
-
static uptr GetCurrentPc();
- static uptr GetPreviousInstructionPc(uptr pc);
+ static inline uptr GetPreviousInstructionPc(uptr pc);
+ static uptr GetNextInstructionPc(uptr pc);
+ typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
+ int out_size);
+};
+
+// Performance-critical, must be in the header.
+ALWAYS_INLINE
+uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
+#if defined(__arm__)
+ // Cancel Thumb bit.
+ pc = pc & (~1);
+#endif
+#if defined(__powerpc__) || defined(__powerpc64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#elif defined(__sparc__) || defined(__mips__)
+ return pc - 8;
+#else
+ return pc - 1;
+#endif
+}
+
+// StackTrace that owns the buffer used to store the addresses.
+struct BufferedStackTrace : public StackTrace {
+ uptr trace_buffer[kStackTraceMax];
+ uptr top_frame_bp; // Optional bp of a top frame.
+
+ BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
+
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+ void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
+ uptr stack_bottom, bool request_fast_unwind);
private:
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
- uptr max_depth);
- void SlowUnwindStack(uptr pc, uptr max_depth);
+ u32 max_depth);
+ void SlowUnwindStack(uptr pc, u32 max_depth);
+ void SlowUnwindStackWithContext(uptr pc, void *context,
+ u32 max_depth);
void PopStackFrames(uptr count);
uptr LocatePcInTrace(uptr pc);
+
+ BufferedStackTrace(const BufferedStackTrace &);
+ void operator=(const BufferedStackTrace &);
};
} // namespace __sanitizer
@@ -80,6 +118,10 @@ struct StackTrace {
uptr local_stack; \
uptr sp = (uptr)&local_stack
+#define GET_CALLER_PC_BP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC();
+
// Use this macro if you want to print stack trace with the current
// function in the top frame.
#define GET_CURRENT_PC_BP_SP \
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
index ea2f9d07771..addf44f7327 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
@@ -9,18 +9,66 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
#include "sanitizer_stacktrace.h"
+#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
- uptr stack_bottom, bool request_fast_unwind) {
- if (!WillUseFastUnwind(request_fast_unwind))
- SlowUnwindStack(pc, max_depth);
- else
- FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
+void StackTrace::Print() const {
+ if (trace == nullptr || size == 0) {
+ Printf(" <empty stack>\n\n");
+ return;
+ }
+ InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ uptr frame_num = 0;
+ for (uptr i = 0; i < size && trace[i]; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = GetPreviousInstructionPc(trace[i]);
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ CHECK(frames);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ frame_desc.clear();
+ RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
+ cur->info, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ Printf("%s\n", frame_desc.data());
+ }
+ frames->ClearAll();
+ }
+ // Always print a trailing empty line after stack trace.
+ Printf("\n");
+}
- top_frame_bp = size ? bp : 0;
+void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
+ uptr stack_top, uptr stack_bottom,
+ bool request_fast_unwind) {
+ top_frame_bp = (max_depth > 0) ? bp : 0;
+ // Avoid doing any work for small max_depth.
+ if (max_depth == 0) {
+ size = 0;
+ return;
+ }
+ if (max_depth == 1) {
+ size = 1;
+ trace_buffer[0] = pc;
+ return;
+ }
+ if (!WillUseFastUnwind(request_fast_unwind)) {
+#if SANITIZER_CAN_SLOW_UNWIND
+ if (context)
+ SlowUnwindStackWithContext(pc, context, max_depth);
+ else
+ SlowUnwindStack(pc, max_depth);
+#else
+ UNREACHABLE("slow unwind requested but not available");
+#endif
+ } else {
+ FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
+ }
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc
new file mode 100644
index 00000000000..bcc9de78ec1
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc
@@ -0,0 +1,142 @@
+//===-- sanitizer_common.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers' run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_stacktrace_printer.h"
+
+namespace __sanitizer {
+
+static const char *StripFunctionName(const char *function, const char *prefix) {
+ if (!function) return nullptr;
+ if (!prefix) return function;
+ uptr prefix_len = internal_strlen(prefix);
+ if (0 == internal_strncmp(function, prefix, prefix_len))
+ return function + prefix_len;
+ return function;
+}
+
+static const char kDefaultFormat[] = " #%n %p %F %L";
+
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, bool vs_style,
+ const char *strip_path_prefix, const char *strip_func_prefix) {
+ if (0 == internal_strcmp(format, "DEFAULT"))
+ format = kDefaultFormat;
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%') {
+ buffer->append("%c", *p);
+ continue;
+ }
+ p++;
+ switch (*p) {
+ case '%':
+ buffer->append("%%");
+ break;
+ // Frame number and all fields of AddressInfo structure.
+ case 'n':
+ buffer->append("%zu", frame_no);
+ break;
+ case 'p':
+ buffer->append("0x%zx", info.address);
+ break;
+ case 'm':
+ buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
+ break;
+ case 'o':
+ buffer->append("0x%zx", info.module_offset);
+ break;
+ case 'f':
+ buffer->append("%s", StripFunctionName(info.function, strip_func_prefix));
+ break;
+ case 'q':
+ buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
+ ? info.function_offset
+ : 0x0);
+ break;
+ case 's':
+ buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
+ break;
+ case 'l':
+ buffer->append("%d", info.line);
+ break;
+ case 'c':
+ buffer->append("%d", info.column);
+ break;
+ // Smarter special cases.
+ case 'F':
+ // Function name and offset, if file is unknown.
+ if (info.function) {
+ buffer->append("in %s",
+ StripFunctionName(info.function, strip_func_prefix));
+ if (!info.file && info.function_offset != AddressInfo::kUnknown)
+ buffer->append("+0x%zx", info.function_offset);
+ }
+ break;
+ case 'S':
+ // File/line information.
+ RenderSourceLocation(buffer, info.file, info.line, info.column, vs_style,
+ strip_path_prefix);
+ break;
+ case 'L':
+ // Source location, or module location.
+ if (info.file) {
+ RenderSourceLocation(buffer, info.file, info.line, info.column,
+ vs_style, strip_path_prefix);
+ } else if (info.module) {
+ RenderModuleLocation(buffer, info.module, info.module_offset,
+ strip_path_prefix);
+ } else {
+ buffer->append("(<unknown module>)");
+ }
+ break;
+ case 'M':
+ // Module basename and offset, or PC.
+ if (info.address & kExternalPCBit)
+ {} // There PCs are not meaningful.
+ else if (info.module)
+ buffer->append("(%s+%p)", StripModuleName(info.module),
+ (void *)info.module_offset);
+ else
+ buffer->append("(%p)", (void *)info.address);
+ break;
+ default:
+ Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
+ *p);
+ Die();
+ }
+ }
+}
+
+void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, bool vs_style,
+ const char *strip_path_prefix) {
+ if (vs_style && line > 0) {
+ buffer->append("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
+ if (column > 0)
+ buffer->append(",%d", column);
+ buffer->append(")");
+ return;
+ }
+
+ buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
+ if (line > 0) {
+ buffer->append(":%d", line);
+ if (column > 0)
+ buffer->append(":%d", column);
+ }
+}
+
+void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, const char *strip_path_prefix) {
+ buffer->append("(%s+0x%zx)", StripPathPrefix(module, strip_path_prefix),
+ offset);
+}
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h
new file mode 100644
index 00000000000..a553568ea6f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -0,0 +1,62 @@
+//===-- sanitizer_stacktrace_printer.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers' run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_PRINTER_H
+#define SANITIZER_STACKTRACE_PRINTER_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+// Render the contents of "info" structure, which represents the contents of
+// stack frame "frame_no" and appends it to the "buffer". "format" is a
+// string with placeholders, which is copied to the output with
+// placeholders substituted with the contents of "info". For example,
+// format string
+// " frame %n: function %F at %S"
+// will be turned into
+// " frame 10: function foo::bar() at my/file.cc:10"
+// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
+// source files and modules, and "strip_func_prefix" to strip prefixes of
+// function names.
+// Here's the full list of available placeholders:
+// %% - represents a '%' character;
+// %n - frame number (copy of frame_no);
+// %p - PC in hex format;
+// %m - path to module (binary or shared object);
+// %o - offset in the module in hex format;
+// %f - function name;
+// %q - offset in the function in hex format (*if available*);
+// %s - path to source file;
+// %l - line in the source file;
+// %c - column in the source file;
+// %F - if function is known to be <foo>, prints "in <foo>", possibly
+// followed by the offset in this function, but only if source file
+// is unknown;
+// %S - prints file/line/column information;
+// %L - prints location information: file/line/column, if it is known, or
+// module+offset if it is known, or (<unknown module>) string.
+// %M - prints module basename and offset, if it is known, or PC.
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, bool vs_style,
+ const char *strip_path_prefix = "",
+ const char *strip_func_prefix = "");
+
+void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, bool vs_style,
+ const char *strip_path_prefix);
+
+void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, const char *strip_path_prefix);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKTRACE_PRINTER_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h b/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h
index b1241da1f73..c3245266fb9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h
@@ -57,7 +57,8 @@ typedef void (*StopTheWorldCallback)(
// Suspend all threads in the current process and run the callback on the list
// of suspended threads. This function will resume the threads before returning.
-// The callback should not call any libc functions.
+// The callback should not call any libc functions. The callback must not call
+// exit() nor _exit() and instead return to the caller.
// This function should NOT be called from multiple threads simultaneously.
void StopTheWorld(StopTheWorldCallback callback, void *argument);
diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index 6ee63ec3168..c919e4f6e97 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -10,13 +10,15 @@
//
//===----------------------------------------------------------------------===//
-
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX && defined(__x86_64__)
+
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
+ defined(__aarch64__))
#include "sanitizer_stoptheworld.h"
#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_atomic.h"
#include <errno.h>
#include <sched.h> // for CLONE_* definitions
@@ -24,9 +26,15 @@
#include <sys/prctl.h> // for PR_* definitions
#include <sys/ptrace.h> // for PTRACE_* definitions
#include <sys/types.h> // for pid_t
+#include <sys/uio.h> // for iovec
+#include <elf.h> // for NT_PRSTATUS
#if SANITIZER_ANDROID && defined(__arm__)
# include <linux/user.h> // for pt_regs
#else
+# ifdef __aarch64__
+// GLIBC 2.20+ sys/user does not include asm/ptrace.h
+# include <asm/ptrace.h>
+# endif
# include <sys/user.h> // for user_regs_struct
#endif
#include <sys/wait.h> // for signal-related stuff
@@ -68,11 +76,25 @@
COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
namespace __sanitizer {
+
+// Structure for passing arguments into the tracer thread.
+struct TracerThreadArgument {
+ StopTheWorldCallback callback;
+ void *callback_argument;
+ // The tracer thread waits on this mutex while the parent finishes its
+ // preparations.
+ BlockingMutex mutex;
+ // Tracer thread signals its completion by setting done.
+ atomic_uintptr_t done;
+ uptr parent_pid;
+};
+
// This class handles thread suspending/unsuspending in the tracer thread.
class ThreadSuspender {
public:
- explicit ThreadSuspender(pid_t pid)
- : pid_(pid) {
+ explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
+ : arg(arg)
+ , pid_(pid) {
CHECK_GE(pid, 0);
}
bool SuspendAllThreads();
@@ -81,44 +103,58 @@ class ThreadSuspender {
SuspendedThreadsList &suspended_threads_list() {
return suspended_threads_list_;
}
+ TracerThreadArgument *arg;
private:
SuspendedThreadsList suspended_threads_list_;
pid_t pid_;
bool SuspendThread(SuspendedThreadID thread_id);
};
-bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
+bool ThreadSuspender::SuspendThread(SuspendedThreadID tid) {
// Are we already attached to this thread?
// Currently this check takes linear time, however the number of threads is
// usually small.
- if (suspended_threads_list_.Contains(thread_id))
+ if (suspended_threads_list_.Contains(tid))
return false;
int pterrno;
- if (internal_iserror(internal_ptrace(PTRACE_ATTACH, thread_id, NULL, NULL),
+ if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
&pterrno)) {
// Either the thread is dead, or something prevented us from attaching.
// Log this event and move on.
- if (common_flags()->verbosity)
- Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
+ VReport(1, "Could not attach to thread %d (errno %d).\n", tid, pterrno);
return false;
} else {
- if (common_flags()->verbosity)
- Report("Attached to thread %d.\n", thread_id);
+ VReport(2, "Attached to thread %d.\n", tid);
// The thread is not guaranteed to stop before ptrace returns, so we must
- // wait on it.
- uptr waitpid_status;
- HANDLE_EINTR(waitpid_status, internal_waitpid(thread_id, NULL, __WALL));
- int wperrno;
- if (internal_iserror(waitpid_status, &wperrno)) {
- // Got a ECHILD error. I don't think this situation is possible, but it
- // doesn't hurt to report it.
- if (common_flags()->verbosity)
- Report("Waiting on thread %d failed, detaching (errno %d).\n",
- thread_id, wperrno);
- internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
- return false;
+ // wait on it. Note: if the thread receives a signal concurrently,
+ // we can get notification about the signal before notification about stop.
+ // In such case we need to forward the signal to the thread, otherwise
+ // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
+ // any logic relying on signals will break. After forwarding we need to
+ // continue to wait for stopping, because the thread is not stopped yet.
+ // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
+ // as invisible as possible.
+ for (;;) {
+ int status;
+ uptr waitpid_status;
+ HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
+ int wperrno;
+ if (internal_iserror(waitpid_status, &wperrno)) {
+ // Got a ECHILD error. I don't think this situation is possible, but it
+ // doesn't hurt to report it.
+ VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n",
+ tid, wperrno);
+ internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
+ return false;
+ }
+ if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
+ internal_ptrace(PTRACE_CONT, tid, nullptr,
+ (void*)(uptr)WSTOPSIG(status));
+ continue;
+ }
+ break;
}
- suspended_threads_list_.Append(thread_id);
+ suspended_threads_list_.Append(tid);
return true;
}
}
@@ -127,16 +163,14 @@ void ThreadSuspender::ResumeAllThreads() {
for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) {
pid_t tid = suspended_threads_list_.GetThreadID(i);
int pterrno;
- if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL),
+ if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
&pterrno)) {
- if (common_flags()->verbosity)
- Report("Detached from thread %d.\n", tid);
+ VReport(2, "Detached from thread %d.\n", tid);
} else {
// Either the thread is dead, or we are already detached.
// The latter case is possible, for instance, if this function was called
// from a signal handler.
- if (common_flags()->verbosity)
- Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
+ VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
}
}
}
@@ -144,7 +178,7 @@ void ThreadSuspender::ResumeAllThreads() {
void ThreadSuspender::KillAllThreads() {
for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++)
internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
- NULL, NULL);
+ nullptr, nullptr);
}
bool ThreadSuspender::SuspendAllThreads() {
@@ -170,35 +204,11 @@ bool ThreadSuspender::SuspendAllThreads() {
}
// Pointer to the ThreadSuspender instance for use in signal handler.
-static ThreadSuspender *thread_suspender_instance = NULL;
-
-// Signals that should not be blocked (this is used in the parent thread as well
-// as the tracer thread).
-static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV,
- SIGBUS, SIGXCPU, SIGXFSZ };
-
-// Structure for passing arguments into the tracer thread.
-struct TracerThreadArgument {
- StopTheWorldCallback callback;
- void *callback_argument;
- // The tracer thread waits on this mutex while the parent finishes its
- // preparations.
- BlockingMutex mutex;
- uptr parent_pid;
-};
-
-static DieCallbackType old_die_callback;
+static ThreadSuspender *thread_suspender_instance = nullptr;
-// Signal handler to wake up suspended threads when the tracer thread dies.
-void TracerThreadSignalHandler(int signum, void *siginfo, void *) {
- if (thread_suspender_instance != NULL) {
- if (signum == SIGABRT)
- thread_suspender_instance->KillAllThreads();
- else
- thread_suspender_instance->ResumeAllThreads();
- }
- internal__exit((signum == SIGABRT) ? 1 : 2);
-}
+// Synchronous signals that should not be blocked.
+static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
+ SIGXCPU, SIGXFSZ };
static void TracerThreadDieCallback() {
// Generally a call to Die() in the tracer thread should be fatal to the
@@ -207,10 +217,29 @@ static void TracerThreadDieCallback() {
// point. So we correctly handle calls to Die() from within the callback, but
// not those that happen before or after the callback. Hopefully there aren't
// a lot of opportunities for that to happen...
- if (thread_suspender_instance)
- thread_suspender_instance->KillAllThreads();
- if (old_die_callback)
- old_die_callback();
+ ThreadSuspender *inst = thread_suspender_instance;
+ if (inst && stoptheworld_tracer_pid == internal_getpid()) {
+ inst->KillAllThreads();
+ thread_suspender_instance = nullptr;
+ }
+}
+
+// Signal handler to wake up suspended threads when the tracer thread dies.
+static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) {
+ SignalContext ctx = SignalContext::Create(siginfo, uctx);
+ VPrintf(1, "Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n",
+ signum, ctx.addr, ctx.pc, ctx.sp);
+ ThreadSuspender *inst = thread_suspender_instance;
+ if (inst) {
+ if (signum == SIGABRT)
+ inst->KillAllThreads();
+ else
+ inst->ResumeAllThreads();
+ RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+ thread_suspender_instance = nullptr;
+ atomic_store(&inst->arg->done, 1, memory_order_relaxed);
+ }
+ internal__exit((signum == SIGABRT) ? 1 : 2);
}
// Size of alternative stack for signal handlers in the tracer thread.
@@ -230,9 +259,9 @@ static int TracerThread(void* argument) {
tracer_thread_argument->mutex.Lock();
tracer_thread_argument->mutex.Unlock();
- SetDieCallback(TracerThreadDieCallback);
+ RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
- ThreadSuspender thread_suspender(internal_getppid());
+ ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
// Global pointer for the signal handler.
thread_suspender_instance = &thread_suspender;
@@ -242,24 +271,21 @@ static int TracerThread(void* argument) {
internal_memset(&handler_stack, 0, sizeof(handler_stack));
handler_stack.ss_sp = handler_stack_memory.data();
handler_stack.ss_size = kHandlerStackSize;
- internal_sigaltstack(&handler_stack, NULL);
-
- // Install our handler for fatal signals. Other signals should be blocked by
- // the mask we inherited from the caller thread.
- for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
- signal_index++) {
- __sanitizer_kernel_sigaction_t new_sigaction;
- internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
- new_sigaction.sigaction = TracerThreadSignalHandler;
- new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
- internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
+ internal_sigaltstack(&handler_stack, nullptr);
+
+ // Install our handler for synchronous signals. Other signals should be
+ // blocked by the mask we inherited from the parent thread.
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
+ __sanitizer_sigaction act;
+ internal_memset(&act, 0, sizeof(act));
+ act.sigaction = TracerThreadSignalHandler;
+ act.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
}
int exit_code = 0;
if (!thread_suspender.SuspendAllThreads()) {
- if (common_flags()->verbosity)
- Report("Failed suspending threads.\n");
+ VReport(1, "Failed suspending threads.\n");
exit_code = 3;
} else {
tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
@@ -267,9 +293,9 @@ static int TracerThread(void* argument) {
thread_suspender.ResumeAllThreads();
exit_code = 0;
}
- thread_suspender_instance = NULL;
- handler_stack.ss_flags = SS_DISABLE;
- internal_sigaltstack(&handler_stack, NULL);
+ RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+ thread_suspender_instance = nullptr;
+ atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
return exit_code;
}
@@ -282,7 +308,7 @@ class ScopedStackSpaceWithGuard {
// in the future.
guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
"ScopedStackWithGuard");
- CHECK_EQ(guard_start_, (uptr)Mprotect((uptr)guard_start_, guard_size_));
+ CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
}
~ScopedStackSpaceWithGuard() {
UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
@@ -299,55 +325,23 @@ class ScopedStackSpaceWithGuard {
// We have a limitation on the stack frame size, so some stuff had to be moved
// into globals.
-static __sanitizer_kernel_sigset_t blocked_sigset;
-static __sanitizer_kernel_sigset_t old_sigset;
-static __sanitizer_kernel_sigaction_t old_sigactions
- [ARRAY_SIZE(kUnblockedSignals)];
+static __sanitizer_sigset_t blocked_sigset;
+static __sanitizer_sigset_t old_sigset;
class StopTheWorldScope {
public:
StopTheWorldScope() {
- // Block all signals that can be blocked safely, and install
- // default handlers for the remaining signals.
- // We cannot allow user-defined handlers to run while the ThreadSuspender
- // thread is active, because they could conceivably call some libc functions
- // which modify errno (which is shared between the two threads).
- internal_sigfillset(&blocked_sigset);
- for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
- signal_index++) {
- // Remove the signal from the set of blocked signals.
- internal_sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
- // Install the default handler.
- __sanitizer_kernel_sigaction_t new_sigaction;
- internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
- new_sigaction.handler = SIG_DFL;
- internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction,
- &old_sigactions[signal_index]);
- }
- int sigprocmask_status =
- internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
- CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail
// Make this process dumpable. Processes that are not dumpable cannot be
// attached to.
process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
if (!process_was_dumpable_)
internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
- old_die_callback = GetDieCallback();
}
~StopTheWorldScope() {
- SetDieCallback(old_die_callback);
// Restore the dumpable flag.
if (!process_was_dumpable_)
internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
- // Restore the signal handlers.
- for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
- signal_index++) {
- internal_sigaction(kUnblockedSignals[signal_index],
- &old_sigactions[signal_index], NULL);
- }
- internal_sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
}
private:
@@ -375,20 +369,45 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
tracer_thread_argument.callback = callback;
tracer_thread_argument.callback_argument = argument;
tracer_thread_argument.parent_pid = internal_getpid();
+ atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
const uptr kTracerStackSize = 2 * 1024 * 1024;
ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
// Block the execution of TracerThread until after we have set ptrace
// permissions.
tracer_thread_argument.mutex.Lock();
+ // Signal handling story.
+ // We don't want async signals to be delivered to the tracer thread,
+ // so we block all async signals before creating the thread. An async signal
+ // handler can temporary modify errno, which is shared with this thread.
+ // We ought to use pthread_sigmask here, because sigprocmask has undefined
+ // behavior in multithreaded programs. However, on linux sigprocmask is
+ // equivalent to pthread_sigmask with the exception that pthread_sigmask
+ // does not allow to block some signals used internally in pthread
+ // implementation. We are fine with blocking them here, we are really not
+ // going to pthread_cancel the thread.
+ // The tracer thread should not raise any synchronous signals. But in case it
+ // does, we setup a special handler for sync signals that properly kills the
+ // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
+ // in the tracer thread won't interfere with user program. Double note: if a
+ // user does something along the lines of 'kill -11 pid', that can kill the
+ // process even if user setup own handler for SEGV.
+ // Thing to watch out for: this code should not change behavior of user code
+ // in any observable way. In particular it should not override user signal
+ // handlers.
+ internal_sigfillset(&blocked_sigset);
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
+ internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
+ int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
+ CHECK_EQ(rv, 0);
uptr tracer_pid = internal_clone(
TracerThread, tracer_stack.Bottom(),
CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
- &tracer_thread_argument, 0 /* parent_tidptr */, 0 /* newtls */, 0
- /* child_tidptr */);
+ &tracer_thread_argument, nullptr /* parent_tidptr */,
+ nullptr /* newtls */, nullptr /* child_tidptr */);
+ internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
int local_errno = 0;
if (internal_iserror(tracer_pid, &local_errno)) {
- if (common_flags()->verbosity)
- Report("Failed spawning a tracer thread (errno %d).\n", local_errno);
+ VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
tracer_thread_argument.mutex.Unlock();
} else {
ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
@@ -399,15 +418,26 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
#endif
// Allow the tracer thread to start.
tracer_thread_argument.mutex.Unlock();
- // Since errno is shared between this thread and the tracer thread, we
- // must avoid using errno while the tracer thread is running.
- // At this point, any signal will either be blocked or kill us, so waitpid
- // should never return (and set errno) while the tracer thread is alive.
- uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL);
- if (internal_iserror(waitpid_status, &local_errno)) {
- if (common_flags()->verbosity)
- Report("Waiting on the tracer thread failed (errno %d).\n",
- local_errno);
+ // NOTE: errno is shared between this thread and the tracer thread.
+ // internal_waitpid() may call syscall() which can access/spoil errno,
+ // so we can't call it now. Instead we for the tracer thread to finish using
+ // the spin loop below. Man page for sched_yield() says "In the Linux
+ // implementation, sched_yield() always succeeds", so let's hope it does not
+ // spoil errno. Note that this spin loop runs only for brief periods before
+ // the tracer thread has suspended us and when it starts unblocking threads.
+ while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
+ sched_yield();
+ // Now the tracer thread is about to exit and does not touch errno,
+ // wait for it.
+ for (;;) {
+ uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
+ if (!internal_iserror(waitpid_status, &local_errno))
+ break;
+ if (local_errno == EINTR)
+ continue;
+ VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
+ local_errno);
+ break;
}
}
}
@@ -437,6 +467,11 @@ typedef pt_regs regs_struct;
typedef struct user regs_struct;
#define REG_SP regs[EF_REG29]
+#elif defined(__aarch64__)
+typedef struct user_pt_regs regs_struct;
+#define REG_SP sp
+#define ARCH_IOVEC_FOR_GETREGSET
+
#else
#error "Unsupported architecture"
#endif // SANITIZER_ANDROID && defined(__arm__)
@@ -447,11 +482,20 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index,
pid_t tid = GetThreadID(index);
regs_struct regs;
int pterrno;
- if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, &regs),
- &pterrno)) {
- if (common_flags()->verbosity)
- Report("Could not get registers from thread %d (errno %d).\n",
- tid, pterrno);
+#ifdef ARCH_IOVEC_FOR_GETREGSET
+ struct iovec regset_io;
+ regset_io.iov_base = &regs;
+ regset_io.iov_len = sizeof(regs_struct);
+ bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
+ (void*)NT_PRSTATUS, (void*)&regset_io),
+ &pterrno);
+#else
+ bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
+ &regs), &pterrno);
+#endif
+ if (isErr) {
+ VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
+ pterrno);
return -1;
}
@@ -463,6 +507,7 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index,
uptr SuspendedThreadsList::RegisterCount() {
return sizeof(regs_struct) / sizeof(uptr);
}
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // SANITIZER_LINUX && defined(__x86_64__)
+#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
+ // || defined(__aarch64__)
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
index 14f13e62081..2b6a78155fd 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
@@ -5,7 +5,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Suppression parsing/matching code shared between TSan and LSan.
+// Suppression parsing/matching code.
//
//===----------------------------------------------------------------------===//
@@ -13,67 +13,79 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
namespace __sanitizer {
-static const char *const kTypeStrings[SuppressionTypeCount] = {
- "none", "race", "mutex", "thread", "signal", "leak", "called_from_lib"
-};
+SuppressionContext::SuppressionContext(const char *suppression_types[],
+ int suppression_types_num)
+ : suppression_types_(suppression_types),
+ suppression_types_num_(suppression_types_num), suppressions_(1),
+ can_parse_(true) {
+ CHECK_LE(suppression_types_num_, kMaxSuppressionTypes);
+ internal_memset(has_suppression_type_, 0, suppression_types_num_);
+}
-bool TemplateMatch(char *templ, const char *str) {
- if (str == 0 || str[0] == 0)
- return false;
- bool start = false;
- if (templ && templ[0] == '^') {
- start = true;
- templ++;
+static bool GetPathAssumingFileIsRelativeToExec(const char *file_path,
+ /*out*/char *new_file_path,
+ uptr new_file_path_size) {
+ InternalScopedString exec(kMaxPathLength);
+ if (ReadBinaryNameCached(exec.data(), exec.size())) {
+ const char *file_name_pos = StripModuleName(exec.data());
+ uptr path_to_exec_len = file_name_pos - exec.data();
+ internal_strncat(new_file_path, exec.data(),
+ Min(path_to_exec_len, new_file_path_size - 1));
+ internal_strncat(new_file_path, file_path,
+ new_file_path_size - internal_strlen(new_file_path) - 1);
+ return true;
}
- bool asterisk = false;
- while (templ && templ[0]) {
- if (templ[0] == '*') {
- templ++;
- start = false;
- asterisk = true;
- continue;
- }
- if (templ[0] == '$')
- return str[0] == 0 || asterisk;
- if (str[0] == 0)
- return false;
- char *tpos = (char*)internal_strchr(templ, '*');
- char *tpos1 = (char*)internal_strchr(templ, '$');
- if (tpos == 0 || (tpos1 && tpos1 < tpos))
- tpos = tpos1;
- if (tpos != 0)
- tpos[0] = 0;
- const char *str0 = str;
- const char *spos = internal_strstr(str, templ);
- str = spos + internal_strlen(templ);
- templ = tpos;
- if (tpos)
- tpos[0] = tpos == tpos1 ? '$' : '*';
- if (spos == 0)
- return false;
- if (start && spos != str0)
- return false;
- start = false;
- asterisk = false;
+ return false;
+}
+
+void SuppressionContext::ParseFromFile(const char *filename) {
+ if (filename[0] == '\0')
+ return;
+
+ // If we cannot find the file, check if its location is relative to
+ // the location of the executable.
+ InternalScopedString new_file_path(kMaxPathLength);
+ if (!FileExists(filename) && !IsAbsolutePath(filename) &&
+ GetPathAssumingFileIsRelativeToExec(filename, new_file_path.data(),
+ new_file_path.size())) {
+ filename = new_file_path.data();
+ }
+
+ // Read the file.
+ VPrintf(1, "%s: reading suppressions file at %s\n",
+ SanitizerToolName, filename);
+ char *file_contents;
+ uptr buffer_size;
+ uptr contents_size;
+ if (!ReadFileToBuffer(filename, &file_contents, &buffer_size,
+ &contents_size)) {
+ Printf("%s: failed to read suppressions file '%s'\n", SanitizerToolName,
+ filename);
+ Die();
}
- return true;
+
+ Parse(file_contents);
}
-bool SuppressionContext::Match(const char *str, SuppressionType type,
+bool SuppressionContext::Match(const char *str, const char *type,
Suppression **s) {
can_parse_ = false;
- uptr i;
- for (i = 0; i < suppressions_.size(); i++)
- if (type == suppressions_[i].type &&
- TemplateMatch(suppressions_[i].templ, str))
- break;
- if (i == suppressions_.size()) return false;
- *s = &suppressions_[i];
- return true;
+ if (!HasSuppressionType(type))
+ return false;
+ for (uptr i = 0; i < suppressions_.size(); i++) {
+ Suppression &cur = suppressions_[i];
+ if (0 == internal_strcmp(cur.type, type) && TemplateMatch(cur.templ, str)) {
+ *s = &cur;
+ return true;
+ }
+ }
+ return false;
}
static const char *StripPrefix(const char *str, const char *prefix) {
@@ -98,28 +110,28 @@ void SuppressionContext::Parse(const char *str) {
end = line + internal_strlen(line);
if (line != end && line[0] != '#') {
const char *end2 = end;
- while (line != end2 && (end2[-1] == ' ' || end2[-1] == '\t'))
+ while (line != end2 &&
+ (end2[-1] == ' ' || end2[-1] == '\t' || end2[-1] == '\r'))
end2--;
int type;
- for (type = 0; type < SuppressionTypeCount; type++) {
- const char *next_char = StripPrefix(line, kTypeStrings[type]);
+ for (type = 0; type < suppression_types_num_; type++) {
+ const char *next_char = StripPrefix(line, suppression_types_[type]);
if (next_char && *next_char == ':') {
line = ++next_char;
break;
}
}
- if (type == SuppressionTypeCount) {
+ if (type == suppression_types_num_) {
Printf("%s: failed to parse suppressions\n", SanitizerToolName);
Die();
}
- Suppression s;
- s.type = static_cast<SuppressionType>(type);
+ Suppression s = {};
+ s.type = suppression_types_[type];
s.templ = (char*)InternalAlloc(end2 - line + 1);
internal_memcpy(s.templ, line, end2 - line);
s.templ[end2 - line] = 0;
- s.hit_count = 0;
- s.weight = 0;
suppressions_.push_back(s);
+ has_suppression_type_[type] = true;
}
if (end[0] == 0)
break;
@@ -131,6 +143,14 @@ uptr SuppressionContext::SuppressionCount() const {
return suppressions_.size();
}
+bool SuppressionContext::HasSuppressionType(const char *type) const {
+ for (int i = 0; i < suppression_types_num_; i++) {
+ if (0 == internal_strcmp(type, suppression_types_[i]))
+ return has_suppression_type_[i];
+ }
+ return false;
+}
+
const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
CHECK_LT(i, suppressions_.size());
return &suppressions_[i];
@@ -139,13 +159,8 @@ const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
void SuppressionContext::GetMatched(
InternalMmapVector<Suppression *> *matched) {
for (uptr i = 0; i < suppressions_.size(); i++)
- if (suppressions_[i].hit_count)
+ if (atomic_load_relaxed(&suppressions_[i].hit_count))
matched->push_back(&suppressions_[i]);
}
-const char *SuppressionTypeString(SuppressionType t) {
- CHECK(t < SuppressionTypeCount);
- return kTypeStrings[t];
-}
-
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.h b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
index b4c719cb187..efec926476b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.h
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
@@ -5,55 +5,50 @@
//
//===----------------------------------------------------------------------===//
//
-// Suppression parsing/matching code shared between TSan and LSan.
+// Suppression parsing/matching code.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_SUPPRESSIONS_H
#define SANITIZER_SUPPRESSIONS_H
#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
-enum SuppressionType {
- SuppressionNone,
- SuppressionRace,
- SuppressionMutex,
- SuppressionThread,
- SuppressionSignal,
- SuppressionLeak,
- SuppressionLib,
- SuppressionTypeCount
-};
-
struct Suppression {
- SuppressionType type;
+ const char *type;
char *templ;
- unsigned hit_count;
+ atomic_uint32_t hit_count;
uptr weight;
};
class SuppressionContext {
public:
- SuppressionContext() : suppressions_(1), can_parse_(true) {}
+ // Create new SuppressionContext capable of parsing given suppression types.
+ SuppressionContext(const char *supprression_types[],
+ int suppression_types_num);
+
+ void ParseFromFile(const char *filename);
void Parse(const char *str);
- bool Match(const char* str, SuppressionType type, Suppression **s);
+
+ bool Match(const char *str, const char *type, Suppression **s);
uptr SuppressionCount() const;
+ bool HasSuppressionType(const char *type) const;
const Suppression *SuppressionAt(uptr i) const;
void GetMatched(InternalMmapVector<Suppression *> *matched);
private:
+ static const int kMaxSuppressionTypes = 16;
+ const char **const suppression_types_;
+ const int suppression_types_num_;
+
InternalMmapVector<Suppression> suppressions_;
+ bool has_suppression_type_[kMaxSuppressionTypes];
bool can_parse_;
-
- friend class SuppressionContextTest;
};
-const char *SuppressionTypeString(SuppressionType t);
-
-bool TemplateMatch(char *templ, const char *str);
-
} // namespace __sanitizer
#endif // SANITIZER_SUPPRESSIONS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
index f417b087ae2..0e58d5fed6e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
@@ -9,35 +9,63 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator_internal.h"
#include "sanitizer_platform.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
-#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
-Symbolizer *Symbolizer::symbolizer_;
-StaticSpinMutex Symbolizer::init_mu_;
-LowLevelAllocator Symbolizer::symbolizer_allocator_;
+AddressInfo::AddressInfo() {
+ internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
+}
+
+void AddressInfo::Clear() {
+ InternalFree(module);
+ InternalFree(function);
+ InternalFree(file);
+ internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
+}
-Symbolizer *Symbolizer::GetOrNull() {
- SpinMutexLock l(&init_mu_);
- return symbolizer_;
+void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset) {
+ module = internal_strdup(mod_name);
+ module_offset = mod_offset;
}
-Symbolizer *Symbolizer::Get() {
- SpinMutexLock l(&init_mu_);
- RAW_CHECK_MSG(symbolizer_ != 0, "Using uninitialized symbolizer!");
- return symbolizer_;
+SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
+
+SymbolizedStack *SymbolizedStack::New(uptr addr) {
+ void *mem = InternalAlloc(sizeof(SymbolizedStack));
+ SymbolizedStack *res = new(mem) SymbolizedStack();
+ res->info.address = addr;
+ return res;
}
-Symbolizer *Symbolizer::Disable() {
- CHECK_EQ(0, symbolizer_);
- // Initialize a dummy symbolizer.
- symbolizer_ = new(symbolizer_allocator_) Symbolizer;
- return symbolizer_;
+void SymbolizedStack::ClearAll() {
+ info.Clear();
+ if (next)
+ next->ClearAll();
+ InternalFree(this);
}
+DataInfo::DataInfo() {
+ internal_memset(this, 0, sizeof(DataInfo));
+}
+
+void DataInfo::Clear() {
+ InternalFree(module);
+ InternalFree(name);
+ internal_memset(this, 0, sizeof(DataInfo));
+}
+
+Symbolizer *Symbolizer::symbolizer_;
+StaticSpinMutex Symbolizer::init_mu_;
+LowLevelAllocator Symbolizer::symbolizer_allocator_;
+
void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook,
Symbolizer::EndSymbolizationHook end_hook) {
CHECK(start_hook_ == 0 && end_hook_ == 0);
@@ -45,7 +73,29 @@ void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook,
end_hook_ = end_hook;
}
-Symbolizer::Symbolizer() : start_hook_(0), end_hook_(0) {}
+const char *Symbolizer::ModuleNameOwner::GetOwnedCopy(const char *str) {
+ mu_->CheckLocked();
+
+ // 'str' will be the same string multiple times in a row, optimize this case.
+ if (last_match_ && !internal_strcmp(last_match_, str))
+ return last_match_;
+
+ // FIXME: this is linear search.
+ // We should optimize this further if this turns out to be a bottleneck later.
+ for (uptr i = 0; i < storage_.size(); ++i) {
+ if (!internal_strcmp(storage_[i], str)) {
+ last_match_ = storage_[i];
+ return last_match_;
+ }
+ }
+ last_match_ = internal_strdup(str);
+ storage_.push_back(last_match_);
+ return last_match_;
+}
+
+Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)
+ : module_names_(&mu_), n_modules_(0), modules_fresh_(false), tools_(tools),
+ start_hook_(0), end_hook_(0) {}
Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
: sym_(sym) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
index af93de75081..0a443a70115 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -17,91 +17,89 @@
#ifndef SANITIZER_SYMBOLIZER_H
#define SANITIZER_SYMBOLIZER_H
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
struct AddressInfo {
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
uptr address;
+
char *module;
uptr module_offset;
+
+ static const uptr kUnknown = ~(uptr)0;
char *function;
+ uptr function_offset;
+
char *file;
int line;
int column;
- AddressInfo() {
- internal_memset(this, 0, sizeof(AddressInfo));
- }
+ AddressInfo();
+ // Deletes all strings and resets all fields.
+ void Clear();
+ void FillModuleInfo(const char *mod_name, uptr mod_offset);
+};
- // Deletes all strings and sets all fields to zero.
- void Clear() {
- InternalFree(module);
- InternalFree(function);
- InternalFree(file);
- internal_memset(this, 0, sizeof(AddressInfo));
- }
+// Linked list of symbolized frames (each frame is described by AddressInfo).
+struct SymbolizedStack {
+ SymbolizedStack *next;
+ AddressInfo info;
+ static SymbolizedStack *New(uptr addr);
+ // Deletes current, and all subsequent frames in the linked list.
+ // The object cannot be accessed after the call to this function.
+ void ClearAll();
- void FillAddressAndModuleInfo(uptr addr, const char *mod_name,
- uptr mod_offset) {
- address = addr;
- module = internal_strdup(mod_name);
- module_offset = mod_offset;
- }
+ private:
+ SymbolizedStack();
};
+// For now, DataInfo is used to describe global variable.
struct DataInfo {
- uptr address;
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
char *module;
uptr module_offset;
char *name;
uptr start;
uptr size;
+
+ DataInfo();
+ void Clear();
};
-class Symbolizer {
+class SymbolizerTool;
+
+class Symbolizer final {
public:
- /// Returns platform-specific implementation of Symbolizer. The symbolizer
- /// must be initialized (with init or disable) before calling this function.
- static Symbolizer *Get();
- /// Returns platform-specific implementation of Symbolizer, or null if not
- /// initialized.
- static Symbolizer *GetOrNull();
- /// Returns platform-specific implementation of Symbolizer. Will
- /// automatically initialize symbolizer as if by calling Init(0) if needed.
+ /// Initialize and return platform-specific implementation of symbolizer
+ /// (if it wasn't already initialized).
static Symbolizer *GetOrInit();
- /// Initialize and return the symbolizer, given an optional path to an
- /// external symbolizer. The path argument is only required for legacy
- /// reasons as this function will check $PATH for an external symbolizer. Not
- /// thread safe.
- static Symbolizer *Init(const char* path_to_external = 0);
- /// Initialize the symbolizer in a disabled state. Not thread safe.
- static Symbolizer *Disable();
- // Fills at most "max_frames" elements of "frames" with descriptions
- // for a given address (in all inlined functions). Returns the number
- // of descriptions actually filled.
- virtual uptr SymbolizeCode(uptr address, AddressInfo *frames,
- uptr max_frames) {
- return 0;
- }
- virtual bool SymbolizeData(uptr address, DataInfo *info) {
- return false;
- }
- virtual bool IsAvailable() {
- return false;
- }
- virtual bool IsExternalAvailable() {
- return false;
+ // Returns a list of symbolized frames for a given address (containing
+ // all inlined functions, if necessary).
+ SymbolizedStack *SymbolizePC(uptr address);
+ bool SymbolizeData(uptr address, DataInfo *info);
+
+ // The module names Symbolizer returns are stable and unique for every given
+ // module. It is safe to store and compare them as pointers.
+ bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address);
+ const char *GetModuleNameForPc(uptr pc) {
+ const char *module_name = nullptr;
+ uptr unused;
+ if (GetModuleNameAndOffsetForPC(pc, &module_name, &unused))
+ return module_name;
+ return nullptr;
}
+
// Release internal caches (if any).
- virtual void Flush() {}
+ void Flush();
// Attempts to demangle the provided C++ mangled name.
- virtual const char *Demangle(const char *name) {
- return name;
- }
- virtual void PrepareForSandboxing() {}
+ const char *Demangle(const char *name);
+ void PrepareForSandboxing();
// Allow user to install hooks that would be called before/after Symbolizer
// does the actual file/line info fetching. Specific sanitizers may need this
@@ -114,17 +112,53 @@ class Symbolizer {
EndSymbolizationHook end_hook);
private:
+ // GetModuleNameAndOffsetForPC has to return a string to the caller.
+ // Since the corresponding module might get unloaded later, we should create
+ // our owned copies of the strings that we can safely return.
+ // ModuleNameOwner does not provide any synchronization, thus calls to
+ // its method should be protected by |mu_|.
+ class ModuleNameOwner {
+ public:
+ explicit ModuleNameOwner(BlockingMutex *synchronized_by)
+ : storage_(kInitialCapacity), last_match_(nullptr),
+ mu_(synchronized_by) {}
+ const char *GetOwnedCopy(const char *str);
+
+ private:
+ static const uptr kInitialCapacity = 1000;
+ InternalMmapVector<const char*> storage_;
+ const char *last_match_;
+
+ BlockingMutex *mu_;
+ } module_names_;
+
/// Platform-specific function for creating a Symbolizer object.
- static Symbolizer *PlatformInit(const char *path_to_external);
- /// Create a symbolizer and store it to symbolizer_ without checking if one
- /// already exists. Not thread safe.
- static Symbolizer *CreateAndStore(const char *path_to_external);
+ static Symbolizer *PlatformInit();
+
+ bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
+ uptr *module_offset);
+ LoadedModule *FindModuleForAddress(uptr address);
+ LoadedModule modules_[kMaxNumberOfModules];
+ uptr n_modules_;
+ // If stale, need to reload the modules before looking up addresses.
+ bool modules_fresh_;
+
+ // Platform-specific default demangler, must not return nullptr.
+ const char *PlatformDemangle(const char *name);
+ void PlatformPrepareForSandboxing();
static Symbolizer *symbolizer_;
static StaticSpinMutex init_mu_;
- protected:
- Symbolizer();
+ // Mutex locked from public methods of |Symbolizer|, so that the internals
+ // (including individual symbolizer tools and platform-specific methods) are
+ // always synchronized.
+ BlockingMutex mu_;
+
+ typedef IntrusiveList<SymbolizerTool>::Iterator Iterator;
+ IntrusiveList<SymbolizerTool> tools_;
+
+ explicit Symbolizer(IntrusiveList<SymbolizerTool> tools);
static LowLevelAllocator symbolizer_allocator_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h
new file mode 100644
index 00000000000..a87964b4636
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -0,0 +1,149 @@
+//===-- sanitizer_symbolizer_internal.h -------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Header for internal classes and functions to be used by implementations of
+// symbolizers.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H
+#define SANITIZER_SYMBOLIZER_INTERNAL_H
+
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+// Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr
+// is extracted. When extracting a string, a newly allocated (using
+// InternalAlloc) and null-terminataed buffer is returned. They return a pointer
+// to the next characted after the found delimiter.
+const char *ExtractToken(const char *str, const char *delims, char **result);
+const char *ExtractInt(const char *str, const char *delims, int *result);
+const char *ExtractUptr(const char *str, const char *delims, uptr *result);
+const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
+ char **result);
+
+const char *DemangleCXXABI(const char *name);
+
+// SymbolizerTool is an interface that is implemented by individual "tools"
+// that can perform symbolication (external llvm-symbolizer, libbacktrace,
+// Windows DbgHelp symbolizer, etc.).
+class SymbolizerTool {
+ public:
+ // The main |Symbolizer| class implements a "fallback chain" of symbolizer
+ // tools. In a request to symbolize an address, if one tool returns false,
+ // the next tool in the chain will be tried.
+ SymbolizerTool *next;
+
+ SymbolizerTool() : next(nullptr) { }
+
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable.
+
+ // The |stack| parameter is inout. It is pre-filled with the address,
+ // module base and module offset values and is to be used to construct
+ // other stack frames.
+ virtual bool SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ UNIMPLEMENTED();
+ }
+
+ // The |info| parameter is inout. It is pre-filled with the module base
+ // and module offset values.
+ virtual bool SymbolizeData(uptr addr, DataInfo *info) {
+ UNIMPLEMENTED();
+ }
+
+ virtual void Flush() {}
+
+ // Return nullptr to fallback to the default platform-specific demangler.
+ virtual const char *Demangle(const char *name) {
+ return nullptr;
+ }
+};
+
+// SymbolizerProcess encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess.
+// SymbolizerProcess may not be used from two threads simultaneously.
+class SymbolizerProcess {
+ public:
+ explicit SymbolizerProcess(const char *path, bool use_forkpty = false);
+ const char *SendCommand(const char *command);
+
+ protected:
+ virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ UNIMPLEMENTED();
+ }
+
+ /// The maximum number of arguments required to invoke a tool process.
+ enum { kArgVMax = 6 };
+
+ /// Fill in an argv array to invoke the child process.
+ virtual void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual bool ReadFromSymbolizer(char *buffer, uptr max_length);
+
+ private:
+ bool Restart();
+ const char *SendCommandImpl(const char *command);
+ bool WriteToSymbolizer(const char *buffer, uptr length);
+ bool StartSymbolizerSubprocess();
+
+ const char *path_;
+ fd_t input_fd_;
+ fd_t output_fd_;
+
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+
+ static const uptr kMaxTimesRestarted = 5;
+ static const int kSymbolizerStartupTimeMillis = 10;
+ uptr times_restarted_;
+ bool failed_to_start_;
+ bool reported_invalid_path_;
+ bool use_forkpty_;
+};
+
+class LLVMSymbolizerProcess;
+
+// This tool invokes llvm-symbolizer in a subprocess. It should be as portable
+// as the llvm-symbolizer tool is.
+class LLVMSymbolizer : public SymbolizerTool {
+ public:
+ explicit LLVMSymbolizer(const char *path, LowLevelAllocator *allocator);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+
+ private:
+ const char *SendCommand(bool is_data, const char *module_name,
+ uptr module_offset);
+
+ LLVMSymbolizerProcess *symbolizer_process_;
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+};
+
+// Parses one or more two-line strings in the following format:
+// <function_name>
+// <file_name>:<line_number>[:<column_number>]
+// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of
+// them use the same output format. Returns true if any useful debug
+// information was found.
+void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res);
+
+// Parses a two-line string in the following format:
+// <symbol_name>
+// <start_address> <size>
+// Used by LLVMSymbolizer and InternalSymbolizer.
+void ParseSymbolizeDataOutput(const char *str, DataInfo *info);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_INTERNAL_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
index 3023002af47..949ad7845f7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
@@ -31,11 +31,13 @@
namespace __sanitizer {
+static char *DemangleAlloc(const char *name, bool always_alloc);
+
#if SANITIZER_LIBBACKTRACE
namespace {
-#if SANITIZER_CP_DEMANGLE
+# if SANITIZER_CP_DEMANGLE
struct CplusV3DemangleData {
char *buf;
uptr size, allocated;
@@ -62,13 +64,13 @@ static void CplusV3DemangleCallback(const char *s, size_t l, void *vdata) {
}
} // extern "C"
-char *CplusV3Demangle(const char *name, bool always_alloc) {
+char *CplusV3Demangle(const char *name) {
CplusV3DemangleData data;
data.buf = 0;
data.size = 0;
data.allocated = 0;
if (cplus_demangle_v3_callback(name, DMGL_PARAMS | DMGL_ANSI,
- CplusV3DemangleCallback, &data)) {
+ CplusV3DemangleCallback, &data)) {
if (data.size + 64 > data.allocated)
return data.buf;
char *buf = internal_strdup(data.buf);
@@ -77,56 +79,53 @@ char *CplusV3Demangle(const char *name, bool always_alloc) {
}
if (data.buf)
InternalFree(data.buf);
- if (always_alloc)
- return internal_strdup(name);
- return 0;
-}
-#else
-const char *CplusV3Demangle(const char *name, bool always_alloc) {
- if (always_alloc)
- return internal_strdup(name);
return 0;
}
-#endif
-
-struct SymbolizeCodeData {
- AddressInfo *frames;
- uptr n_frames;
- uptr max_frames;
- const char *module_name;
- uptr module_offset;
+# endif // SANITIZER_CP_DEMANGLE
+
+struct SymbolizeCodeCallbackArg {
+ SymbolizedStack *first;
+ SymbolizedStack *last;
+ uptr frames_symbolized;
+
+ AddressInfo *get_new_frame(uintptr_t addr) {
+ CHECK(last);
+ if (frames_symbolized > 0) {
+ SymbolizedStack *cur = SymbolizedStack::New(addr);
+ AddressInfo *info = &cur->info;
+ info->FillModuleInfo(first->info.module, first->info.module_offset);
+ last->next = cur;
+ last = cur;
+ }
+ CHECK_EQ(addr, first->info.address);
+ CHECK_EQ(addr, last->info.address);
+ return &last->info;
+ }
};
extern "C" {
static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,
const char *filename, int lineno,
const char *function) {
- SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata;
+ SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
if (function) {
- AddressInfo *info = &cdata->frames[cdata->n_frames++];
- info->Clear();
- info->FillAddressAndModuleInfo(addr, cdata->module_name,
- cdata->module_offset);
- info->function = CplusV3Demangle(function, true);
+ AddressInfo *info = cdata->get_new_frame(addr);
+ info->function = DemangleAlloc(function, /*always_alloc*/ true);
if (filename)
info->file = internal_strdup(filename);
info->line = lineno;
- if (cdata->n_frames == cdata->max_frames)
- return 1;
+ cdata->frames_symbolized++;
}
return 0;
}
static void SymbolizeCodeCallback(void *vdata, uintptr_t addr,
const char *symname, uintptr_t, uintptr_t) {
- SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata;
+ SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
if (symname) {
- AddressInfo *info = &cdata->frames[0];
- info->Clear();
- info->FillAddressAndModuleInfo(addr, cdata->module_name,
- cdata->module_offset);
- info->function = CplusV3Demangle(symname, true);
- cdata->n_frames = 1;
+ AddressInfo *info = cdata->get_new_frame(addr);
+ info->function = DemangleAlloc(symname, /*always_alloc*/ true);
+ cdata->frames_symbolized++;
}
}
@@ -134,7 +133,7 @@ static void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname,
uintptr_t symval, uintptr_t symsize) {
DataInfo *info = (DataInfo *)vdata;
if (symname && symval) {
- info->name = CplusV3Demangle(symname, true);
+ info->name = DemangleAlloc(symname, /*always_alloc*/ true);
info->start = symval;
info->size = symsize;
}
@@ -154,64 +153,55 @@ LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
return new(*alloc) LibbacktraceSymbolizer(state);
}
-uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
- uptr max_frames,
- const char *module_name,
- uptr module_offset) {
- SymbolizeCodeData data;
- data.frames = frames;
- data.n_frames = 0;
- data.max_frames = max_frames;
- data.module_name = module_name;
- data.module_offset = module_offset;
+bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ SymbolizeCodeCallbackArg data;
+ data.first = stack;
+ data.last = stack;
+ data.frames_symbolized = 0;
backtrace_pcinfo((backtrace_state *)state_, addr, SymbolizeCodePCInfoCallback,
ErrorCallback, &data);
- if (data.n_frames)
- return data.n_frames;
+ if (data.frames_symbolized > 0)
+ return true;
backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeCodeCallback,
ErrorCallback, &data);
- return data.n_frames;
+ return (data.frames_symbolized > 0);
}
-bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
- backtrace_syminfo((backtrace_state *)state_, info->address,
- SymbolizeDataCallback, ErrorCallback, info);
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeDataCallback,
+ ErrorCallback, info);
return true;
}
-const char *LibbacktraceSymbolizer::Demangle(const char *name) {
-#if SANITIZER_CP_DEMANGLE
- const char *demangled = CplusV3Demangle(name, false);
- if (demangled)
- return demangled;
- return name;
-#else
- return 0;
-#endif
-}
-
#else // SANITIZER_LIBBACKTRACE
LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
return 0;
}
-uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
- uptr max_frames,
- const char *module_name,
- uptr module_offset) {
+bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
(void)state_;
- return 0;
+ return false;
}
-bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return false;
}
-const char *LibbacktraceSymbolizer::Demangle(const char *name) {
+#endif // SANITIZER_LIBBACKTRACE
+
+static char *DemangleAlloc(const char *name, bool always_alloc) {
+#if SANITIZER_LIBBACKTRACE && SANITIZER_CP_DEMANGLE
+ if (char *demangled = CplusV3Demangle(name))
+ return demangled;
+#endif
+ if (always_alloc)
+ return internal_strdup(name);
return 0;
}
-#endif // SANITIZER_LIBBACKTRACE
+const char *LibbacktraceSymbolizer::Demangle(const char *name) {
+ return DemangleAlloc(name, /*always_alloc*/ false);
+}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
index 05f0558c3d8..ab1d6f99163 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
@@ -1,4 +1,4 @@
-//===-- sanitizer_symbolizer_libbacktrace.h -------------------------------===//
+//===-- sanitizer_symbolizer_libbacktrace.h ---------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@@ -9,27 +9,34 @@
// run-time libraries.
// Header for libbacktrace symbolizer.
//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
+#define SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
#include "sanitizer_platform.h"
#include "sanitizer_common.h"
-#include "sanitizer_symbolizer.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_symbolizer_internal.h"
#ifndef SANITIZER_LIBBACKTRACE
# define SANITIZER_LIBBACKTRACE 0
#endif
+#ifndef SANITIZER_CP_DEMANGLE
+# define SANITIZER_CP_DEMANGLE 0
+#endif
+
namespace __sanitizer {
-class LibbacktraceSymbolizer {
+class LibbacktraceSymbolizer : public SymbolizerTool {
public:
static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc);
- uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames,
- const char *module_name, uptr module_offset);
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
- bool SymbolizeData(DataInfo *info);
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
- const char *Demangle(const char *name);
+ // May return NULL if demangling failed.
+ const char *Demangle(const char *name) override;
private:
explicit LibbacktraceSymbolizer(void *state) : state_(state) {}
@@ -38,3 +45,4 @@ class LibbacktraceSymbolizer {
};
} // namespace __sanitizer
+#endif // SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
index 2d9caaf4e85..4264b5e3ddc 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
@@ -9,29 +9,418 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator_internal.h"
#include "sanitizer_internal_defs.h"
-#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
-Symbolizer *Symbolizer::CreateAndStore(const char *path_to_external) {
- Symbolizer *platform_symbolizer = PlatformInit(path_to_external);
- if (!platform_symbolizer)
- return Disable();
- symbolizer_ = platform_symbolizer;
- return platform_symbolizer;
+const char *ExtractToken(const char *str, const char *delims, char **result) {
+ uptr prefix_len = internal_strcspn(str, delims);
+ *result = (char*)InternalAlloc(prefix_len + 1);
+ internal_memcpy(*result, str, prefix_len);
+ (*result)[prefix_len] = '\0';
+ const char *prefix_end = str + prefix_len;
+ if (*prefix_end != '\0') prefix_end++;
+ return prefix_end;
}
-Symbolizer *Symbolizer::Init(const char *path_to_external) {
- CHECK_EQ(0, symbolizer_);
- return CreateAndStore(path_to_external);
+const char *ExtractInt(const char *str, const char *delims, int *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (int)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+const char *ExtractUptr(const char *str, const char *delims, uptr *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (uptr)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
+ char **result) {
+ const char *found_delimiter = internal_strstr(str, delimiter);
+ uptr prefix_len =
+ found_delimiter ? found_delimiter - str : internal_strlen(str);
+ *result = (char *)InternalAlloc(prefix_len + 1);
+ internal_memcpy(*result, str, prefix_len);
+ (*result)[prefix_len] = '\0';
+ const char *prefix_end = str + prefix_len;
+ if (*prefix_end != '\0') prefix_end += internal_strlen(delimiter);
+ return prefix_end;
+}
+
+SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
+ BlockingMutexLock l(&mu_);
+ const char *module_name;
+ uptr module_offset;
+ SymbolizedStack *res = SymbolizedStack::New(addr);
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset))
+ return res;
+ // Always fill data about module name and offset.
+ res->info.FillModuleInfo(module_name, module_offset);
+ for (auto iter = Iterator(&tools_); iter.hasNext();) {
+ auto *tool = iter.next();
+ SymbolizerScope sym_scope(this);
+ if (tool->SymbolizePC(addr, res)) {
+ return res;
+ }
+ }
+ return res;
+}
+
+bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ BlockingMutexLock l(&mu_);
+ const char *module_name;
+ uptr module_offset;
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset))
+ return false;
+ info->Clear();
+ info->module = internal_strdup(module_name);
+ info->module_offset = module_offset;
+ for (auto iter = Iterator(&tools_); iter.hasNext();) {
+ auto *tool = iter.next();
+ SymbolizerScope sym_scope(this);
+ if (tool->SymbolizeData(addr, info)) {
+ return true;
+ }
+ }
+ return true;
+}
+
+bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ BlockingMutexLock l(&mu_);
+ const char *internal_module_name = nullptr;
+ if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
+ module_address))
+ return false;
+
+ if (module_name)
+ *module_name = module_names_.GetOwnedCopy(internal_module_name);
+ return true;
+}
+
+void Symbolizer::Flush() {
+ BlockingMutexLock l(&mu_);
+ for (auto iter = Iterator(&tools_); iter.hasNext();) {
+ auto *tool = iter.next();
+ SymbolizerScope sym_scope(this);
+ tool->Flush();
+ }
+}
+
+const char *Symbolizer::Demangle(const char *name) {
+ BlockingMutexLock l(&mu_);
+ for (auto iter = Iterator(&tools_); iter.hasNext();) {
+ auto *tool = iter.next();
+ SymbolizerScope sym_scope(this);
+ if (const char *demangled = tool->Demangle(name))
+ return demangled;
+ }
+ return PlatformDemangle(name);
+}
+
+void Symbolizer::PrepareForSandboxing() {
+ BlockingMutexLock l(&mu_);
+ PlatformPrepareForSandboxing();
+}
+
+bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
+ const char **module_name,
+ uptr *module_offset) {
+ LoadedModule *module = FindModuleForAddress(address);
+ if (module == 0)
+ return false;
+ *module_name = module->full_name();
+ *module_offset = address - module->base_address();
+ return true;
+}
+
+LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
+ bool modules_were_reloaded = false;
+ if (!modules_fresh_) {
+ for (uptr i = 0; i < n_modules_; i++)
+ modules_[i].clear();
+ n_modules_ =
+ GetListOfModules(modules_, kMaxNumberOfModules, /* filter */ nullptr);
+ CHECK_GT(n_modules_, 0);
+ CHECK_LT(n_modules_, kMaxNumberOfModules);
+ modules_fresh_ = true;
+ modules_were_reloaded = true;
+ }
+ for (uptr i = 0; i < n_modules_; i++) {
+ if (modules_[i].containsAddress(address)) {
+ return &modules_[i];
+ }
+ }
+ // Reload the modules and look up again, if we haven't tried it yet.
+ if (!modules_were_reloaded) {
+ // FIXME: set modules_fresh_ from dlopen()/dlclose() interceptors.
+ // It's too aggressive to reload the list of modules each time we fail
+ // to find a module for a given address.
+ modules_fresh_ = false;
+ return FindModuleForAddress(address);
+ }
+ return 0;
}
Symbolizer *Symbolizer::GetOrInit() {
SpinMutexLock l(&init_mu_);
- if (symbolizer_ == 0)
- return CreateAndStore(0);
+ if (symbolizer_)
+ return symbolizer_;
+ symbolizer_ = PlatformInit();
+ CHECK(symbolizer_);
return symbolizer_;
}
+// For now we assume the following protocol:
+// For each request of the form
+// <module_name> <module_offset>
+// passed to STDIN, external symbolizer prints to STDOUT response:
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// ...
+// <empty line>
+class LLVMSymbolizerProcess : public SymbolizerProcess {
+ public:
+ explicit LLVMSymbolizerProcess(const char *path) : SymbolizerProcess(path) {}
+
+ private:
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
+ // Empty line marks the end of llvm-symbolizer output.
+ return length >= 2 && buffer[length - 1] == '\n' &&
+ buffer[length - 2] == '\n';
+ }
+
+ void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const override {
+#if defined(__x86_64h__)
+ const char* const kSymbolizerArch = "--default-arch=x86_64h";
+#elif defined(__x86_64__)
+ const char* const kSymbolizerArch = "--default-arch=x86_64";
+#elif defined(__i386__)
+ const char* const kSymbolizerArch = "--default-arch=i386";
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ const char* const kSymbolizerArch = "--default-arch=powerpc64";
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+ const char* const kSymbolizerArch = "--default-arch=powerpc64le";
+#else
+ const char* const kSymbolizerArch = "--default-arch=unknown";
+#endif
+
+ const char *const inline_flag = common_flags()->symbolize_inline_frames
+ ? "--inlining=true"
+ : "--inlining=false";
+ int i = 0;
+ argv[i++] = path_to_binary;
+ argv[i++] = inline_flag;
+ argv[i++] = kSymbolizerArch;
+ argv[i++] = nullptr;
+ }
+};
+
+LLVMSymbolizer::LLVMSymbolizer(const char *path, LowLevelAllocator *allocator)
+ : symbolizer_process_(new(*allocator) LLVMSymbolizerProcess(path)) {}
+
+// Parse a <file>:<line>[:<column>] buffer. The file path may contain colons on
+// Windows, so extract tokens from the right hand side first. The column info is
+// also optional.
+static const char *ParseFileLineInfo(AddressInfo *info, const char *str) {
+ char *file_line_info = 0;
+ str = ExtractToken(str, "\n", &file_line_info);
+ CHECK(file_line_info);
+ // Parse the last :<int>, which must be there.
+ char *last_colon = internal_strrchr(file_line_info, ':');
+ CHECK(last_colon);
+ int line_or_column = internal_atoll(last_colon + 1);
+ // Truncate the string at the last colon and find the next-to-last colon.
+ *last_colon = '\0';
+ last_colon = internal_strrchr(file_line_info, ':');
+ if (last_colon && IsDigit(last_colon[1])) {
+ // If the second-to-last colon is followed by a digit, it must be the line
+ // number, and the previous parsed number was a column.
+ info->line = internal_atoll(last_colon + 1);
+ info->column = line_or_column;
+ *last_colon = '\0';
+ } else {
+ // Otherwise, we have line info but no column info.
+ info->line = line_or_column;
+ info->column = 0;
+ }
+ ExtractToken(file_line_info, "", &info->file);
+ InternalFree(file_line_info);
+ return str;
+}
+
+// Parses one or more two-line strings in the following format:
+// <function_name>
+// <file_name>:<line_number>[:<column_number>]
+// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of
+// them use the same output format.
+void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {
+ bool top_frame = true;
+ SymbolizedStack *last = res;
+ while (true) {
+ char *function_name = 0;
+ str = ExtractToken(str, "\n", &function_name);
+ CHECK(function_name);
+ if (function_name[0] == '\0') {
+ // There are no more frames.
+ InternalFree(function_name);
+ break;
+ }
+ SymbolizedStack *cur;
+ if (top_frame) {
+ cur = res;
+ top_frame = false;
+ } else {
+ cur = SymbolizedStack::New(res->info.address);
+ cur->info.FillModuleInfo(res->info.module, res->info.module_offset);
+ last->next = cur;
+ last = cur;
+ }
+
+ AddressInfo *info = &cur->info;
+ info->function = function_name;
+ str = ParseFileLineInfo(info, str);
+
+ // Functions and filenames can be "??", in which case we write 0
+ // to address info to mark that names are unknown.
+ if (0 == internal_strcmp(info->function, "??")) {
+ InternalFree(info->function);
+ info->function = 0;
+ }
+ if (0 == internal_strcmp(info->file, "??")) {
+ InternalFree(info->file);
+ info->file = 0;
+ }
+ }
+}
+
+// Parses a two-line string in the following format:
+// <symbol_name>
+// <start_address> <size>
+// Used by LLVMSymbolizer and InternalSymbolizer.
+void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
+ str = ExtractToken(str, "\n", &info->name);
+ str = ExtractUptr(str, " ", &info->start);
+ str = ExtractUptr(str, "\n", &info->size);
+}
+
+bool LLVMSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ if (const char *buf = SendCommand(/*is_data*/ false, stack->info.module,
+ stack->info.module_offset)) {
+ ParseSymbolizePCOutput(buf, stack);
+ return true;
+ }
+ return false;
+}
+
+bool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ if (const char *buf =
+ SendCommand(/*is_data*/ true, info->module, info->module_offset)) {
+ ParseSymbolizeDataOutput(buf, info);
+ info->start += (addr - info->module_offset); // Add the base address.
+ return true;
+ }
+ return false;
+}
+
+const char *LLVMSymbolizer::SendCommand(bool is_data, const char *module_name,
+ uptr module_offset) {
+ CHECK(module_name);
+ internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n",
+ is_data ? "DATA " : "", module_name, module_offset);
+ return symbolizer_process_->SendCommand(buffer_);
+}
+
+SymbolizerProcess::SymbolizerProcess(const char *path, bool use_forkpty)
+ : path_(path),
+ input_fd_(kInvalidFd),
+ output_fd_(kInvalidFd),
+ times_restarted_(0),
+ failed_to_start_(false),
+ reported_invalid_path_(false),
+ use_forkpty_(use_forkpty) {
+ CHECK(path_);
+ CHECK_NE(path_[0], '\0');
+}
+
+const char *SymbolizerProcess::SendCommand(const char *command) {
+ for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
+ // Start or restart symbolizer if we failed to send command to it.
+ if (const char *res = SendCommandImpl(command))
+ return res;
+ Restart();
+ }
+ if (!failed_to_start_) {
+ Report("WARNING: Failed to use and restart external symbolizer!\n");
+ failed_to_start_ = true;
+ }
+ return 0;
+}
+
+const char *SymbolizerProcess::SendCommandImpl(const char *command) {
+ if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
+ return 0;
+ if (!WriteToSymbolizer(command, internal_strlen(command)))
+ return 0;
+ if (!ReadFromSymbolizer(buffer_, kBufferSize))
+ return 0;
+ return buffer_;
+}
+
+bool SymbolizerProcess::Restart() {
+ if (input_fd_ != kInvalidFd)
+ CloseFile(input_fd_);
+ if (output_fd_ != kInvalidFd)
+ CloseFile(output_fd_);
+ return StartSymbolizerSubprocess();
+}
+
+bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {
+ if (max_length == 0)
+ return true;
+ uptr read_len = 0;
+ while (true) {
+ uptr just_read = 0;
+ bool success = ReadFromFile(input_fd_, buffer + read_len,
+ max_length - read_len - 1, &just_read);
+ // We can't read 0 bytes, as we don't expect external symbolizer to close
+ // its stdout.
+ if (!success || just_read == 0) {
+ Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
+ return false;
+ }
+ read_len += just_read;
+ if (ReachedEndOfOutput(buffer, read_len))
+ break;
+ }
+ buffer[read_len] = '\0';
+ return true;
+}
+
+bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) {
+ if (length == 0)
+ return true;
+ uptr write_len = 0;
+ bool success = WriteToFile(output_fd_, buffer, length, &write_len);
+ if (!success || write_len != length) {
+ Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
+ return false;
+ }
+ return true;
+}
+
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc
new file mode 100644
index 00000000000..e65976c18d0
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc
@@ -0,0 +1,182 @@
+//===-- sanitizer_symbolizer_mac.cc ---------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Implementation of Mac-specific "atos" symbolizer.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_mac.h"
+#include "sanitizer_symbolizer_mac.h"
+
+namespace __sanitizer {
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <util.h>
+
+bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ Dl_info info;
+ int result = dladdr((const void *)addr, &info);
+ if (!result) return false;
+ const char *demangled = DemangleCXXABI(info.dli_sname);
+ stack->info.function = internal_strdup(demangled);
+ return true;
+}
+
+bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
+ Dl_info info;
+ int result = dladdr((const void *)addr, &info);
+ if (!result) return false;
+ const char *demangled = DemangleCXXABI(info.dli_sname);
+ datainfo->name = internal_strdup(demangled);
+ datainfo->start = (uptr)info.dli_saddr;
+ return true;
+}
+
+class AtosSymbolizerProcess : public SymbolizerProcess {
+ public:
+ explicit AtosSymbolizerProcess(const char *path, pid_t parent_pid)
+ : SymbolizerProcess(path, /*use_forkpty*/ true) {
+ // Put the string command line argument in the object so that it outlives
+ // the call to GetArgV.
+ internal_snprintf(pid_str_, sizeof(pid_str_), "%d", parent_pid);
+ }
+
+ private:
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
+ return (length >= 1 && buffer[length - 1] == '\n');
+ }
+
+ void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const override {
+ int i = 0;
+ argv[i++] = path_to_binary;
+ argv[i++] = "-p";
+ argv[i++] = &pid_str_[0];
+ if (GetMacosVersion() == MACOS_VERSION_MAVERICKS) {
+ // On Mavericks atos prints a deprecation warning which we suppress by
+ // passing -d. The warning isn't present on other OSX versions, even the
+ // newer ones.
+ argv[i++] = "-d";
+ }
+ argv[i++] = nullptr;
+ }
+
+ char pid_str_[16];
+};
+
+static const char *kAtosErrorMessages[] = {
+ "atos cannot examine process",
+ "unable to get permission to examine process",
+ "An admin user name and password is required",
+ "could not load inserted library",
+ "architecture mismatch between analysis process",
+};
+
+static bool IsAtosErrorMessage(const char *str) {
+ for (uptr i = 0; i < ARRAY_SIZE(kAtosErrorMessages); i++) {
+ if (internal_strstr(str, kAtosErrorMessages[i])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool ParseCommandOutput(const char *str, uptr addr, char **out_name,
+ char **out_module, char **out_file, uptr *line,
+ uptr *start_address) {
+ // Trim ending newlines.
+ char *trim;
+ ExtractTokenUpToDelimiter(str, "\n", &trim);
+
+ // The line from `atos` is in one of these formats:
+ // myfunction (in library.dylib) (sourcefile.c:17)
+ // myfunction (in library.dylib) + 0x1fe
+ // myfunction (in library.dylib) + 15
+ // 0xdeadbeef (in library.dylib) + 0x1fe
+ // 0xdeadbeef (in library.dylib) + 15
+ // 0xdeadbeef (in library.dylib)
+ // 0xdeadbeef
+
+ if (IsAtosErrorMessage(trim)) {
+ Report("atos returned an error: %s\n", trim);
+ InternalFree(trim);
+ return false;
+ }
+
+ const char *rest = trim;
+ char *symbol_name;
+ rest = ExtractTokenUpToDelimiter(rest, " (in ", &symbol_name);
+ if (internal_strncmp(symbol_name, "0x", 2) != 0)
+ *out_name = symbol_name;
+ else
+ InternalFree(symbol_name);
+ rest = ExtractTokenUpToDelimiter(rest, ") ", out_module);
+
+ if (rest[0] == '(') {
+ if (out_file) {
+ rest++;
+ rest = ExtractTokenUpToDelimiter(rest, ":", out_file);
+ char *extracted_line_number;
+ rest = ExtractTokenUpToDelimiter(rest, ")", &extracted_line_number);
+ if (line) *line = (uptr)internal_atoll(extracted_line_number);
+ InternalFree(extracted_line_number);
+ }
+ } else if (rest[0] == '+') {
+ rest += 2;
+ uptr offset = internal_atoll(rest);
+ if (start_address) *start_address = addr - offset;
+ }
+
+ InternalFree(trim);
+ return true;
+}
+
+AtosSymbolizer::AtosSymbolizer(const char *path, LowLevelAllocator *allocator)
+ : process_(new(*allocator) AtosSymbolizerProcess(path, getpid())) {}
+
+bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ if (!process_) return false;
+ char command[32];
+ internal_snprintf(command, sizeof(command), "0x%zx\n", addr);
+ const char *buf = process_->SendCommand(command);
+ if (!buf) return false;
+ uptr line;
+ if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,
+ &stack->info.file, &line, nullptr)) {
+ process_ = nullptr;
+ return false;
+ }
+ stack->info.line = (int)line;
+ return true;
+}
+
+bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ if (!process_) return false;
+ char command[32];
+ internal_snprintf(command, sizeof(command), "0x%zx\n", addr);
+ const char *buf = process_->SendCommand(command);
+ if (!buf) return false;
+ if (!ParseCommandOutput(buf, addr, &info->name, &info->module, nullptr,
+ nullptr, &info->start)) {
+ process_ = nullptr;
+ return false;
+ }
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h
new file mode 100644
index 00000000000..240c538d93d
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h
@@ -0,0 +1,46 @@
+//===-- sanitizer_symbolizer_mac.h ------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Header for Mac-specific "atos" symbolizer.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_MAC_H
+#define SANITIZER_SYMBOLIZER_MAC_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+class DlAddrSymbolizer : public SymbolizerTool {
+ public:
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+};
+
+class AtosSymbolizerProcess;
+
+class AtosSymbolizer : public SymbolizerTool {
+ public:
+ explicit AtosSymbolizer(const char *path, LowLevelAllocator *allocator);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+
+ private:
+ AtosSymbolizerProcess *process_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+
+#endif // SANITIZER_SYMBOLIZER_MAC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index 7aead970382..e4ff525d6b0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -14,18 +14,25 @@
#if SANITIZER_POSIX
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h"
-#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_internal.h"
#include "sanitizer_symbolizer_libbacktrace.h"
+#include "sanitizer_symbolizer_mac.h"
#include <errno.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
+#if SANITIZER_MAC
+#include <util.h> // for forkpty()
+#endif // SANITIZER_MAC
+
// C++ demangling function, as required by Itanium C++ ABI. This is weak,
// because we do not require a C++ ABI library to be linked to a program
// using sanitizers; if it's not present, we'll just use the mangled name.
@@ -38,7 +45,7 @@ namespace __cxxabiv1 {
namespace __sanitizer {
// Attempts to demangle the name via __cxa_demangle from __cxxabiv1.
-static const char *DemangleCXXABI(const char *name) {
+const char *DemangleCXXABI(const char *name) {
// FIXME: __cxa_demangle aggressively insists on allocating memory.
// There's not much we can do about that, short of providing our
// own demangler (libc++abi's implementation could be adapted so that
@@ -52,94 +59,118 @@ static const char *DemangleCXXABI(const char *name) {
return name;
}
-#if defined(__x86_64__)
-static const char* const kSymbolizerArch = "--default-arch=x86_64";
-#elif defined(__i386__)
-static const char* const kSymbolizerArch = "--default-arch=i386";
-#elif defined(__powerpc64__)
-static const char* const kSymbolizerArch = "--default-arch=powerpc64";
-#else
-static const char* const kSymbolizerArch = "--default-arch=unknown";
-#endif
-
-static const int kSymbolizerStartupTimeMillis = 10;
-
-// Creates external symbolizer connected via pipe, user should write
-// to output_fd and read from input_fd.
-static bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
- int *input_fd, int *output_fd) {
- if (!FileExists(path_to_symbolizer)) {
- Report("WARNING: invalid path to external symbolizer!\n");
+bool SymbolizerProcess::StartSymbolizerSubprocess() {
+ if (!FileExists(path_)) {
+ if (!reported_invalid_path_) {
+ Report("WARNING: invalid path to external symbolizer!\n");
+ reported_invalid_path_ = true;
+ }
return false;
}
- int *infd = NULL;
- int *outfd = NULL;
- // The client program may close its stdin and/or stdout and/or stderr
- // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
- // In this case the communication between the forked processes may be
- // broken if either the parent or the child tries to close or duplicate
- // these descriptors. The loop below produces two pairs of file
- // descriptors, each greater than 2 (stderr).
- int sock_pair[5][2];
- for (int i = 0; i < 5; i++) {
- if (pipe(sock_pair[i]) == -1) {
- for (int j = 0; j < i; j++) {
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
+ int pid;
+ if (use_forkpty_) {
+#if SANITIZER_MAC
+ fd_t fd = kInvalidFd;
+ // Use forkpty to disable buffering in the new terminal.
+ pid = forkpty(&fd, 0, 0, 0);
+ if (pid == -1) {
+ // forkpty() failed.
+ Report("WARNING: failed to fork external symbolizer (errno: %d)\n",
+ errno);
return false;
- } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
- if (infd == NULL) {
- infd = sock_pair[i];
- } else {
- outfd = sock_pair[i];
+ } else if (pid == 0) {
+ // Child subprocess.
+ const char *argv[kArgVMax];
+ GetArgV(path_, argv);
+ execv(path_, const_cast<char **>(&argv[0]));
+ internal__exit(1);
+ }
+
+ // Continue execution in parent process.
+ input_fd_ = output_fd_ = fd;
+
+ // Disable echo in the new terminal, disable CR.
+ struct termios termflags;
+ tcgetattr(fd, &termflags);
+ termflags.c_oflag &= ~ONLCR;
+ termflags.c_lflag &= ~ECHO;
+ tcsetattr(fd, TCSANOW, &termflags);
+#else // SANITIZER_MAC
+ UNIMPLEMENTED();
+#endif // SANITIZER_MAC
+ } else {
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
internal_close(sock_pair[j][0]);
internal_close(sock_pair[j][1]);
}
- break;
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
}
}
- }
- CHECK(infd);
- CHECK(outfd);
+ CHECK(infd);
+ CHECK(outfd);
+
+ // Real fork() may call user callbacks registered with pthread_atfork().
+ pid = internal_fork();
+ if (pid == -1) {
+ // Fork() failed.
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ Report("WARNING: failed to fork external symbolizer "
+ " (errno: %d)\n", errno);
+ return false;
+ } else if (pid == 0) {
+ // Child subprocess.
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--)
+ internal_close(fd);
+ const char *argv[kArgVMax];
+ GetArgV(path_, argv);
+ execv(path_, const_cast<char **>(&argv[0]));
+ internal__exit(1);
+ }
- int pid = fork();
- if (pid == -1) {
- // Fork() failed.
- internal_close(infd[0]);
- internal_close(infd[1]);
+ // Continue execution in parent process.
internal_close(outfd[0]);
- internal_close(outfd[1]);
- Report("WARNING: failed to fork external symbolizer "
- " (errno: %d)\n", errno);
- return false;
- } else if (pid == 0) {
- // Child subprocess.
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
- internal_close(fd);
- execl(path_to_symbolizer, path_to_symbolizer, kSymbolizerArch, (char*)0);
- internal__exit(1);
+ input_fd_ = infd[0];
+ output_fd_ = outfd[1];
}
- // Continue execution in parent process.
- internal_close(outfd[0]);
- internal_close(infd[1]);
- *input_fd = infd[0];
- *output_fd = outfd[1];
-
// Check that symbolizer subprocess started successfully.
int pid_status;
SleepForMillis(kSymbolizerStartupTimeMillis);
@@ -153,154 +184,110 @@ static bool StartSymbolizerSubprocess(const char *path_to_symbolizer,
return true;
}
-// Extracts the prefix of "str" that consists of any characters not
-// present in "delims" string, and copies this prefix to "result", allocating
-// space for it.
-// Returns a pointer to "str" after skipping extracted prefix and first
-// delimiter char.
-static const char *ExtractToken(const char *str, const char *delims,
- char **result) {
- uptr prefix_len = internal_strcspn(str, delims);
- *result = (char*)InternalAlloc(prefix_len + 1);
- internal_memcpy(*result, str, prefix_len);
- (*result)[prefix_len] = '\0';
- const char *prefix_end = str + prefix_len;
- if (*prefix_end != '\0') prefix_end++;
- return prefix_end;
-}
+class Addr2LineProcess : public SymbolizerProcess {
+ public:
+ Addr2LineProcess(const char *path, const char *module_name)
+ : SymbolizerProcess(path), module_name_(internal_strdup(module_name)) {}
+
+ const char *module_name() const { return module_name_; }
-// Same as ExtractToken, but converts extracted token to integer.
-static const char *ExtractInt(const char *str, const char *delims,
- int *result) {
- char *buff;
- const char *ret = ExtractToken(str, delims, &buff);
- if (buff != 0) {
- *result = (int)internal_atoll(buff);
+ private:
+ void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const override {
+ int i = 0;
+ argv[i++] = path_to_binary;
+ argv[i++] = "-iCfe";
+ argv[i++] = module_name_;
+ argv[i++] = nullptr;
}
- InternalFree(buff);
- return ret;
-}
-static const char *ExtractUptr(const char *str, const char *delims,
- uptr *result) {
- char *buff;
- const char *ret = ExtractToken(str, delims, &buff);
- if (buff != 0) {
- *result = (uptr)internal_atoll(buff);
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const override;
+
+ bool ReadFromSymbolizer(char *buffer, uptr max_length) override {
+ if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length))
+ return false;
+ // We should cut out output_terminator_ at the end of given buffer,
+ // appended by addr2line to mark the end of its meaningful output.
+ // We cannot scan buffer from it's beginning, because it is legal for it
+ // to start with output_terminator_ in case given offset is invalid. So,
+ // scanning from second character.
+ char *garbage = internal_strstr(buffer + 1, output_terminator_);
+ // This should never be NULL since buffer must end up with
+ // output_terminator_.
+ CHECK(garbage);
+ // Trim the buffer.
+ garbage[0] = '\0';
+ return true;
}
- InternalFree(buff);
- return ret;
+
+ const char *module_name_; // Owned, leaked.
+ static const char output_terminator_[];
+};
+
+const char Addr2LineProcess::output_terminator_[] = "??\n??:0\n";
+
+bool Addr2LineProcess::ReachedEndOfOutput(const char *buffer,
+ uptr length) const {
+ const size_t kTerminatorLen = sizeof(output_terminator_) - 1;
+ // Skip, if we read just kTerminatorLen bytes, because Addr2Line output
+ // should consist at least of two pairs of lines:
+ // 1. First one, corresponding to given offset to be symbolized
+ // (may be equal to output_terminator_, if offset is not valid).
+ // 2. Second one for output_terminator_, itself to mark the end of output.
+ if (length <= kTerminatorLen) return false;
+ // Addr2Line output should end up with output_terminator_.
+ return !internal_memcmp(buffer + length - kTerminatorLen,
+ output_terminator_, kTerminatorLen);
}
-// ExternalSymbolizer encapsulates communication between the tool and
-// external symbolizer program, running in a different subprocess,
-// For now we assume the following protocol:
-// For each request of the form
-// <module_name> <module_offset>
-// passed to STDIN, external symbolizer prints to STDOUT response:
-// <function_name>
-// <file_name>:<line_number>:<column_number>
-// <function_name>
-// <file_name>:<line_number>:<column_number>
-// ...
-// <empty line>
-// ExternalSymbolizer may not be used from two threads simultaneously.
-class ExternalSymbolizer {
+class Addr2LinePool : public SymbolizerTool {
public:
- explicit ExternalSymbolizer(const char *path)
- : path_(path),
- input_fd_(kInvalidFd),
- output_fd_(kInvalidFd),
- times_restarted_(0),
- failed_to_start_(false) {
- CHECK(path_);
- CHECK_NE(path[0], '\0');
- }
-
- char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
- for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
- // Start or restart symbolizer if we failed to send command to it.
- if (char *res = SendCommandImpl(is_data, module_name, module_offset))
- return res;
- Restart();
- }
- if (!failed_to_start_) {
- Report("WARNING: Failed to use and restart external symbolizer!\n");
- failed_to_start_ = true;
+ explicit Addr2LinePool(const char *addr2line_path,
+ LowLevelAllocator *allocator)
+ : addr2line_path_(addr2line_path), allocator_(allocator),
+ addr2line_pool_(16) {}
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
+ if (const char *buf =
+ SendCommand(stack->info.module, stack->info.module_offset)) {
+ ParseSymbolizePCOutput(buf, stack);
+ return true;
}
- return 0;
+ return false;
}
- void Flush() {
+ bool SymbolizeData(uptr addr, DataInfo *info) override {
+ return false;
}
private:
- bool Restart() {
- if (input_fd_ != kInvalidFd)
- internal_close(input_fd_);
- if (output_fd_ != kInvalidFd)
- internal_close(output_fd_);
- return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
- }
-
- char *SendCommandImpl(bool is_data, const char *module_name,
- uptr module_offset) {
- if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
- return 0;
- CHECK(module_name);
- internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n",
- is_data ? "DATA " : "", module_name, module_offset);
- if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
- return 0;
- if (!readFromSymbolizer(buffer_, kBufferSize))
- return 0;
- return buffer_;
- }
-
- bool readFromSymbolizer(char *buffer, uptr max_length) {
- if (max_length == 0)
- return true;
- uptr read_len = 0;
- while (true) {
- uptr just_read = internal_read(input_fd_, buffer + read_len,
- max_length - read_len);
- // We can't read 0 bytes, as we don't expect external symbolizer to close
- // its stdout.
- if (just_read == 0 || just_read == (uptr)-1) {
- Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
- return false;
- }
- read_len += just_read;
- // Empty line marks the end of symbolizer output.
- if (read_len >= 2 && buffer[read_len - 1] == '\n' &&
- buffer[read_len - 2] == '\n') {
+ const char *SendCommand(const char *module_name, uptr module_offset) {
+ Addr2LineProcess *addr2line = 0;
+ for (uptr i = 0; i < addr2line_pool_.size(); ++i) {
+ if (0 ==
+ internal_strcmp(module_name, addr2line_pool_[i]->module_name())) {
+ addr2line = addr2line_pool_[i];
break;
}
}
- return true;
- }
-
- bool writeToSymbolizer(const char *buffer, uptr length) {
- if (length == 0)
- return true;
- uptr write_len = internal_write(output_fd_, buffer, length);
- if (write_len == 0 || write_len == (uptr)-1) {
- Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
- return false;
+ if (!addr2line) {
+ addr2line =
+ new(*allocator_) Addr2LineProcess(addr2line_path_, module_name);
+ addr2line_pool_.push_back(addr2line);
}
- return true;
+ CHECK_EQ(0, internal_strcmp(module_name, addr2line->module_name()));
+ char buffer[kBufferSize];
+ internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n",
+ module_offset, dummy_address_);
+ return addr2line->SendCommand(buffer);
}
- const char *path_;
- int input_fd_;
- int output_fd_;
-
- static const uptr kBufferSize = 16 * 1024;
- char buffer_[kBufferSize];
-
- static const uptr kMaxTimesRestarted = 5;
- uptr times_restarted_;
- bool failed_to_start_;
+ static const uptr kBufferSize = 64;
+ const char *addr2line_path_;
+ LowLevelAllocator *allocator_;
+ InternalMmapVector<Addr2LineProcess*> addr2line_pool_;
+ static const uptr dummy_address_ =
+ FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
};
#if SANITIZER_SUPPORTS_WEAK_HOOKS
@@ -318,10 +305,8 @@ int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
int MaxLength);
} // extern "C"
-class InternalSymbolizer {
+class InternalSymbolizer : public SymbolizerTool {
public:
- typedef bool (*SanitizerSymbolizeFn)(const char*, u64, char*, int);
-
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
if (__sanitizer_symbolize_code != 0 &&
__sanitizer_symbolize_data != 0) {
@@ -330,20 +315,29 @@ class InternalSymbolizer {
return 0;
}
- char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
- SanitizerSymbolizeFn symbolize_fn = is_data ? __sanitizer_symbolize_data
- : __sanitizer_symbolize_code;
- if (symbolize_fn(module_name, module_offset, buffer_, kBufferSize))
- return buffer_;
- return 0;
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
+ bool result = __sanitizer_symbolize_code(
+ stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
+ if (result) ParseSymbolizePCOutput(buffer_, stack);
+ return result;
}
- void Flush() {
+ bool SymbolizeData(uptr addr, DataInfo *info) override {
+ bool result = __sanitizer_symbolize_data(info->module, info->module_offset,
+ buffer_, kBufferSize);
+ if (result) {
+ ParseSymbolizeDataOutput(buffer_, info);
+ info->start += (addr - info->module_offset); // Add the base address.
+ }
+ return result;
+ }
+
+ void Flush() override {
if (__sanitizer_symbolize_flush)
__sanitizer_symbolize_flush();
}
- const char *Demangle(const char *name) {
+ const char *Demangle(const char *name) override {
if (__sanitizer_symbolize_demangle) {
for (uptr res_length = 1024;
res_length <= InternalSizeClassMap::kMaxSize;) {
@@ -370,247 +364,103 @@ class InternalSymbolizer {
};
#else // SANITIZER_SUPPORTS_WEAK_HOOKS
-class InternalSymbolizer {
+class InternalSymbolizer : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
- char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
- return 0;
- }
- void Flush() { }
- const char *Demangle(const char *name) { return name; }
};
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
-class POSIXSymbolizer : public Symbolizer {
- public:
- POSIXSymbolizer(ExternalSymbolizer *external_symbolizer,
- InternalSymbolizer *internal_symbolizer,
- LibbacktraceSymbolizer *libbacktrace_symbolizer)
- : Symbolizer(),
- external_symbolizer_(external_symbolizer),
- internal_symbolizer_(internal_symbolizer),
- libbacktrace_symbolizer_(libbacktrace_symbolizer) {}
-
- uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
- BlockingMutexLock l(&mu_);
- if (max_frames == 0)
- return 0;
- LoadedModule *module = FindModuleForAddress(addr);
- if (module == 0)
- return 0;
- const char *module_name = module->full_name();
- uptr module_offset = addr - module->base_address();
- // First, try to use libbacktrace symbolizer (if it's available).
- if (libbacktrace_symbolizer_ != 0) {
- mu_.CheckLocked();
- uptr res = libbacktrace_symbolizer_->SymbolizeCode(
- addr, frames, max_frames, module_name, module_offset);
- if (res > 0)
- return res;
- }
- const char *str = SendCommand(false, module_name, module_offset);
- if (str == 0) {
- // Symbolizer was not initialized or failed. Fill only data
- // about module name and offset.
- AddressInfo *info = &frames[0];
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- return 1;
- }
- uptr frame_id = 0;
- for (frame_id = 0; frame_id < max_frames; frame_id++) {
- AddressInfo *info = &frames[frame_id];
- char *function_name = 0;
- str = ExtractToken(str, "\n", &function_name);
- CHECK(function_name);
- if (function_name[0] == '\0') {
- // There are no more frames.
- break;
- }
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- info->function = function_name;
- // Parse <file>:<line>:<column> buffer.
- char *file_line_info = 0;
- str = ExtractToken(str, "\n", &file_line_info);
- CHECK(file_line_info);
- const char *line_info = ExtractToken(file_line_info, ":", &info->file);
- line_info = ExtractInt(line_info, ":", &info->line);
- line_info = ExtractInt(line_info, "", &info->column);
- InternalFree(file_line_info);
-
- // Functions and filenames can be "??", in which case we write 0
- // to address info to mark that names are unknown.
- if (0 == internal_strcmp(info->function, "??")) {
- InternalFree(info->function);
- info->function = 0;
- }
- if (0 == internal_strcmp(info->file, "??")) {
- InternalFree(info->file);
- info->file = 0;
- }
- }
- if (frame_id == 0) {
- // Make sure we return at least one frame.
- AddressInfo *info = &frames[0];
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- frame_id = 1;
- }
- return frame_id;
- }
+const char *Symbolizer::PlatformDemangle(const char *name) {
+ return DemangleCXXABI(name);
+}
- bool SymbolizeData(uptr addr, DataInfo *info) {
- BlockingMutexLock l(&mu_);
- LoadedModule *module = FindModuleForAddress(addr);
- if (module == 0)
- return false;
- const char *module_name = module->full_name();
- uptr module_offset = addr - module->base_address();
- internal_memset(info, 0, sizeof(*info));
- info->address = addr;
- info->module = internal_strdup(module_name);
- info->module_offset = module_offset;
- if (libbacktrace_symbolizer_ != 0) {
- mu_.CheckLocked();
- if (libbacktrace_symbolizer_->SymbolizeData(info))
- return true;
- }
- const char *str = SendCommand(true, module_name, module_offset);
- if (str == 0)
- return true;
- str = ExtractToken(str, "\n", &info->name);
- str = ExtractUptr(str, " ", &info->start);
- str = ExtractUptr(str, "\n", &info->size);
- info->start += module->base_address();
- return true;
+void Symbolizer::PlatformPrepareForSandboxing() {}
+
+static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
+ const char *path = common_flags()->external_symbolizer_path;
+ const char *binary_name = path ? StripModuleName(path) : "";
+ if (path && path[0] == '\0') {
+ VReport(2, "External symbolizer is explicitly disabled.\n");
+ return nullptr;
+ } else if (!internal_strcmp(binary_name, "llvm-symbolizer")) {
+ VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
+ return new(*allocator) LLVMSymbolizer(path, allocator);
+ } else if (!internal_strcmp(binary_name, "atos")) {
+#if SANITIZER_MAC
+ VReport(2, "Using atos at user-specified path: %s\n", path);
+ return new(*allocator) AtosSymbolizer(path, allocator);
+#else // SANITIZER_MAC
+ Report("ERROR: Using `atos` is only supported on Darwin.\n");
+ Die();
+#endif // SANITIZER_MAC
+ } else if (!internal_strcmp(binary_name, "addr2line")) {
+ VReport(2, "Using addr2line at user-specified path: %s\n", path);
+ return new(*allocator) Addr2LinePool(path, allocator);
+ } else if (path) {
+ Report("ERROR: External symbolizer path is set to '%s' which isn't "
+ "a known symbolizer. Please set the path to the llvm-symbolizer "
+ "binary or other known tool.\n", path);
+ Die();
}
- bool IsAvailable() {
- return internal_symbolizer_ != 0 || external_symbolizer_ != 0 ||
- libbacktrace_symbolizer_ != 0;
+ // Otherwise symbolizer program is unknown, let's search $PATH
+ CHECK(path == nullptr);
+ if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
+ VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
+ return new(*allocator) LLVMSymbolizer(found_path, allocator);
}
-
- bool IsExternalAvailable() {
- return external_symbolizer_ != 0;
+#if SANITIZER_MAC
+ if (const char *found_path = FindPathToBinary("atos")) {
+ VReport(2, "Using atos found at: %s\n", found_path);
+ return new(*allocator) AtosSymbolizer(found_path, allocator);
}
-
- void Flush() {
- BlockingMutexLock l(&mu_);
- if (internal_symbolizer_ != 0) {
- SymbolizerScope sym_scope(this);
- internal_symbolizer_->Flush();
+#endif // SANITIZER_MAC
+ if (common_flags()->allow_addr2line) {
+ if (const char *found_path = FindPathToBinary("addr2line")) {
+ VReport(2, "Using addr2line found at: %s\n", found_path);
+ return new(*allocator) Addr2LinePool(found_path, allocator);
}
- if (external_symbolizer_ != 0)
- external_symbolizer_->Flush();
}
+ return nullptr;
+}
- const char *Demangle(const char *name) {
- BlockingMutexLock l(&mu_);
- // Run hooks even if we don't use internal symbolizer, as cxxabi
- // demangle may call system functions.
- SymbolizerScope sym_scope(this);
- if (internal_symbolizer_ != 0)
- return internal_symbolizer_->Demangle(name);
- if (libbacktrace_symbolizer_ != 0) {
- const char *demangled = libbacktrace_symbolizer_->Demangle(name);
- if (demangled)
- return demangled;
- }
- return DemangleCXXABI(name);
+static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
+ LowLevelAllocator *allocator) {
+ if (!common_flags()->symbolize) {
+ VReport(2, "Symbolizer is disabled.\n");
+ return;
}
-
- void PrepareForSandboxing() {
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
- BlockingMutexLock l(&mu_);
- // Cache /proc/self/exe on Linux.
- CacheBinaryName();
-#endif
+ if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
+ VReport(2, "Using internal symbolizer.\n");
+ list->push_back(tool);
+ return;
}
-
- private:
- char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
- mu_.CheckLocked();
- // First, try to use internal symbolizer.
- if (internal_symbolizer_) {
- SymbolizerScope sym_scope(this);
- return internal_symbolizer_->SendCommand(is_data, module_name,
- module_offset);
- }
- // Otherwise, fall back to external symbolizer.
- if (external_symbolizer_) {
- return external_symbolizer_->SendCommand(is_data, module_name,
- module_offset);
- }
- return 0;
+ if (SymbolizerTool *tool = LibbacktraceSymbolizer::get(allocator)) {
+ VReport(2, "Using libbacktrace symbolizer.\n");
+ list->push_back(tool);
+ return;
}
- LoadedModule *FindModuleForAddress(uptr address) {
- mu_.CheckLocked();
- bool modules_were_reloaded = false;
- if (modules_ == 0 || !modules_fresh_) {
- modules_ = (LoadedModule*)(symbolizer_allocator_.Allocate(
- kMaxNumberOfModuleContexts * sizeof(LoadedModule)));
- CHECK(modules_);
- n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts,
- /* filter */ 0);
- // FIXME: Return this check when GetListOfModules is implemented on Mac.
- // CHECK_GT(n_modules_, 0);
- CHECK_LT(n_modules_, kMaxNumberOfModuleContexts);
- modules_fresh_ = true;
- modules_were_reloaded = true;
- }
- for (uptr i = 0; i < n_modules_; i++) {
- if (modules_[i].containsAddress(address)) {
- return &modules_[i];
- }
- }
- // Reload the modules and look up again, if we haven't tried it yet.
- if (!modules_were_reloaded) {
- // FIXME: set modules_fresh_ from dlopen()/dlclose() interceptors.
- // It's too aggressive to reload the list of modules each time we fail
- // to find a module for a given address.
- modules_fresh_ = false;
- return FindModuleForAddress(address);
- }
- return 0;
+ if (SymbolizerTool *tool = ChooseExternalSymbolizer(allocator)) {
+ list->push_back(tool);
}
- // 16K loaded modules should be enough for everyone.
- static const uptr kMaxNumberOfModuleContexts = 1 << 14;
- LoadedModule *modules_; // Array of module descriptions is leaked.
- uptr n_modules_;
- // If stale, need to reload the modules before looking up addresses.
- bool modules_fresh_;
- BlockingMutex mu_;
-
- ExternalSymbolizer *external_symbolizer_; // Leaked.
- InternalSymbolizer *const internal_symbolizer_; // Leaked.
- LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked.
-};
+#if SANITIZER_MAC
+ VReport(2, "Using dladdr symbolizer.\n");
+ list->push_back(new(*allocator) DlAddrSymbolizer());
+#endif // SANITIZER_MAC
-Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) {
- InternalSymbolizer* internal_symbolizer =
- InternalSymbolizer::get(&symbolizer_allocator_);
- ExternalSymbolizer *external_symbolizer = 0;
- LibbacktraceSymbolizer *libbacktrace_symbolizer = 0;
-
- if (!internal_symbolizer) {
- libbacktrace_symbolizer =
- LibbacktraceSymbolizer::get(&symbolizer_allocator_);
- if (!libbacktrace_symbolizer) {
- // Find path to llvm-symbolizer if it's not provided.
- if (!path_to_external)
- path_to_external = FindPathToBinary("llvm-symbolizer");
- if (path_to_external && path_to_external[0] != '\0')
- external_symbolizer = new(symbolizer_allocator_)
- ExternalSymbolizer(path_to_external);
- }
+ if (list->size() == 0) {
+ Report("WARNING: no internal or external symbolizer found.\n");
}
+}
- return new(symbolizer_allocator_) POSIXSymbolizer(
- external_symbolizer, internal_symbolizer, libbacktrace_symbolizer);
+Symbolizer *Symbolizer::PlatformInit() {
+ IntrusiveList<SymbolizerTool> list;
+ list.clear();
+ ChooseSymbolizerTools(&list, &symbolizer_allocator_);
+ return new(symbolizer_allocator_) Symbolizer(list);
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
index 446de8af293..dadb0eaaf6e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -12,11 +12,270 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#include "sanitizer_symbolizer.h"
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <dbghelp.h>
+#pragma comment(lib, "dbghelp.lib")
+
+#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
-Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) { return 0; }
+namespace {
+
+class WinSymbolizerTool : public SymbolizerTool {
+ public:
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override {
+ return false;
+ }
+ const char *Demangle(const char *name) override;
+};
+
+bool is_dbghelp_initialized = false;
+
+bool TrySymInitialize() {
+ SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
+ return SymInitialize(GetCurrentProcess(), 0, TRUE);
+ // FIXME: We don't call SymCleanup() on exit yet - should we?
+}
+
+// Initializes DbgHelp library, if it's not yet initialized. Calls to this
+// function should be synchronized with respect to other calls to DbgHelp API
+// (e.g. from WinSymbolizerTool).
+void InitializeDbgHelpIfNeeded() {
+ if (is_dbghelp_initialized)
+ return;
+ if (!TrySymInitialize()) {
+ // OK, maybe the client app has called SymInitialize already.
+ // That's a bit unfortunate for us as all the DbgHelp functions are
+ // single-threaded and we can't coordinate with the app.
+ // FIXME: Can we stop the other threads at this point?
+ // Anyways, we have to reconfigure stuff to make sure that SymInitialize
+ // has all the appropriate options set.
+ // Cross our fingers and reinitialize DbgHelp.
+ Report("*** WARNING: Failed to initialize DbgHelp! ***\n");
+ Report("*** Most likely this means that the app is already ***\n");
+ Report("*** using DbgHelp, possibly with incompatible flags. ***\n");
+ Report("*** Due to technical reasons, symbolization might crash ***\n");
+ Report("*** or produce wrong results. ***\n");
+ SymCleanup(GetCurrentProcess());
+ TrySymInitialize();
+ }
+ is_dbghelp_initialized = true;
+
+ // When an executable is run from a location different from the one where it
+ // was originally built, we may not see the nearby PDB files.
+ // To work around this, let's append the directory of the main module
+ // to the symbol search path. All the failures below are not fatal.
+ const size_t kSymPathSize = 2048;
+ static wchar_t path_buffer[kSymPathSize + 1 + MAX_PATH];
+ if (!SymGetSearchPathW(GetCurrentProcess(), path_buffer, kSymPathSize)) {
+ Report("*** WARNING: Failed to SymGetSearchPathW ***\n");
+ return;
+ }
+ size_t sz = wcslen(path_buffer);
+ if (sz) {
+ CHECK_EQ(0, wcscat_s(path_buffer, L";"));
+ sz++;
+ }
+ DWORD res = GetModuleFileNameW(NULL, path_buffer + sz, MAX_PATH);
+ if (res == 0 || res == MAX_PATH) {
+ Report("*** WARNING: Failed to getting the EXE directory ***\n");
+ return;
+ }
+ // Write the zero character in place of the last backslash to get the
+ // directory of the main module at the end of path_buffer.
+ wchar_t *last_bslash = wcsrchr(path_buffer + sz, L'\\');
+ CHECK_NE(last_bslash, 0);
+ *last_bslash = L'\0';
+ if (!SymSetSearchPathW(GetCurrentProcess(), path_buffer)) {
+ Report("*** WARNING: Failed to SymSetSearchPathW\n");
+ return;
+ }
+}
+
+} // namespace
+
+bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
+ InitializeDbgHelpIfNeeded();
+
+ // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+ char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
+ PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ DWORD64 offset = 0;
+ BOOL got_objname = SymFromAddr(GetCurrentProcess(),
+ (DWORD64)addr, &offset, symbol);
+ if (!got_objname)
+ return false;
+
+ DWORD unused;
+ IMAGEHLP_LINE64 line_info;
+ line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+ BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr,
+ &unused, &line_info);
+ frame->info.function = internal_strdup(symbol->Name);
+ frame->info.function_offset = (uptr)offset;
+ if (got_fileline) {
+ frame->info.file = internal_strdup(line_info.FileName);
+ frame->info.line = line_info.LineNumber;
+ }
+ // Only consider this a successful symbolization attempt if we got file info.
+ // Otherwise, try llvm-symbolizer.
+ return got_fileline;
+}
+
+const char *WinSymbolizerTool::Demangle(const char *name) {
+ CHECK(is_dbghelp_initialized);
+ static char demangle_buffer[1000];
+ if (name[0] == '\01' &&
+ UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer),
+ UNDNAME_NAME_ONLY))
+ return demangle_buffer;
+ else
+ return name;
+}
+
+const char *Symbolizer::PlatformDemangle(const char *name) {
+ return name;
+}
+
+void Symbolizer::PlatformPrepareForSandboxing() {
+ // Do nothing.
+}
+
+namespace {
+struct ScopedHandle {
+ ScopedHandle() : h_(nullptr) {}
+ explicit ScopedHandle(HANDLE h) : h_(h) {}
+ ~ScopedHandle() {
+ if (h_)
+ ::CloseHandle(h_);
+ }
+ HANDLE get() { return h_; }
+ HANDLE *receive() { return &h_; }
+ HANDLE release() {
+ HANDLE h = h_;
+ h_ = nullptr;
+ return h;
+ }
+ HANDLE h_;
+};
+} // namespace
+
+bool SymbolizerProcess::StartSymbolizerSubprocess() {
+ // Create inherited pipes for stdin and stdout.
+ ScopedHandle stdin_read, stdin_write;
+ ScopedHandle stdout_read, stdout_write;
+ SECURITY_ATTRIBUTES attrs;
+ attrs.nLength = sizeof(SECURITY_ATTRIBUTES);
+ attrs.bInheritHandle = TRUE;
+ attrs.lpSecurityDescriptor = nullptr;
+ if (!::CreatePipe(stdin_read.receive(), stdin_write.receive(), &attrs, 0) ||
+ !::CreatePipe(stdout_read.receive(), stdout_write.receive(), &attrs, 0)) {
+ VReport(2, "WARNING: %s CreatePipe failed (error code: %d)\n",
+ SanitizerToolName, path_, GetLastError());
+ return false;
+ }
+
+ // Don't inherit the writing end of stdin or the reading end of stdout.
+ if (!SetHandleInformation(stdin_write.get(), HANDLE_FLAG_INHERIT, 0) ||
+ !SetHandleInformation(stdout_read.get(), HANDLE_FLAG_INHERIT, 0)) {
+ VReport(2, "WARNING: %s SetHandleInformation failed (error code: %d)\n",
+ SanitizerToolName, path_, GetLastError());
+ return false;
+ }
+
+ // Compute the command line. Wrap double quotes around everything.
+ const char *argv[kArgVMax];
+ GetArgV(path_, argv);
+ InternalScopedString command_line(kMaxPathLength * 3);
+ for (int i = 0; argv[i]; i++) {
+ const char *arg = argv[i];
+ int arglen = internal_strlen(arg);
+ // Check that tool command lines are simple and that complete escaping is
+ // unnecessary.
+ CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
+ CHECK(!internal_strstr(arg, "\\\\") &&
+ "double backslashes in args unsupported");
+ CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
+ "args ending in backslash and empty args unsupported");
+ command_line.append("\"%s\" ", arg);
+ }
+ VReport(3, "Launching symbolizer command: %s\n", command_line.data());
+
+ // Launch llvm-symbolizer with stdin and stdout redirected.
+ STARTUPINFOA si;
+ memset(&si, 0, sizeof(si));
+ si.cb = sizeof(si);
+ si.dwFlags |= STARTF_USESTDHANDLES;
+ si.hStdInput = stdin_read.get();
+ si.hStdOutput = stdout_write.get();
+ PROCESS_INFORMATION pi;
+ memset(&pi, 0, sizeof(pi));
+ if (!CreateProcessA(path_, // Executable
+ command_line.data(), // Command line
+ nullptr, // Process handle not inheritable
+ nullptr, // Thread handle not inheritable
+ TRUE, // Set handle inheritance to TRUE
+ 0, // Creation flags
+ nullptr, // Use parent's environment block
+ nullptr, // Use parent's starting directory
+ &si, &pi)) {
+ VReport(2, "WARNING: %s failed to create process for %s (error code: %d)\n",
+ SanitizerToolName, path_, GetLastError());
+ return false;
+ }
+
+ // Process creation succeeded, so transfer handle ownership into the fields.
+ input_fd_ = stdout_read.release();
+ output_fd_ = stdin_write.release();
+
+ // The llvm-symbolizer process is responsible for quitting itself when the
+ // stdin pipe is closed, so we don't need these handles. Close them to prevent
+ // leaks. If we ever want to try to kill the symbolizer process from the
+ // parent, we'll want to hang on to these handles.
+ CloseHandle(pi.hProcess);
+ CloseHandle(pi.hThread);
+ return true;
+}
+
+static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
+ LowLevelAllocator *allocator) {
+ if (!common_flags()->symbolize) {
+ VReport(2, "Symbolizer is disabled.\n");
+ return;
+ }
+
+ // Add llvm-symbolizer in case the binary has dwarf.
+ const char *user_path = common_flags()->external_symbolizer_path;
+ const char *path =
+ user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
+ if (path) {
+ VReport(2, "Using llvm-symbolizer at %spath: %s\n",
+ user_path ? "user-specified " : "", path);
+ list->push_back(new(*allocator) LLVMSymbolizer(path, allocator));
+ } else {
+ if (user_path && user_path[0] == '\0') {
+ VReport(2, "External symbolizer is explicitly disabled.\n");
+ } else {
+ VReport(2, "External symbolizer is not present.\n");
+ }
+ }
+
+ // Add the dbghelp based symbolizer.
+ list->push_back(new(*allocator) WinSymbolizerTool());
+}
+
+Symbolizer *Symbolizer::PlatformInit() {
+ IntrusiveList<SymbolizerTool> list;
+ list.clear();
+ ChooseSymbolizerTools(&list, &symbolizer_allocator_);
+
+ return new(symbolizer_allocator_) Symbolizer(list);
+}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
index 6b2c915a3bc..3bc50d972a4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc
@@ -9,7 +9,17 @@
//
//===----------------------------------------------------------------------===//
-#define internal_syscall syscall
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+# define SYSCALL(name) SYS_ ## name
+#else
+# define SYSCALL(name) __NR_ ## name
+#endif
+
+#if (SANITIZER_FREEBSD || SANITIZER_MAC) && defined(__x86_64__)
+# define internal_syscall __syscall
+# else
+# define internal_syscall syscall
+#endif
bool internal_iserror(uptr retval, int *rverrno) {
if (retval == (uptr)-1) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_aarch64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_aarch64.inc
new file mode 100644
index 00000000000..64d6322651a
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_aarch64.inc
@@ -0,0 +1,136 @@
+//===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/aarch64.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_ ## name
+
+static uptr __internal_syscall(u64 nr) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0");
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall0(n) \
+ (__internal_syscall)(n)
+
+static uptr __internal_syscall(u64 nr, u64 arg1) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall1(n, a1) \
+ (__internal_syscall)(n, (u64)(a1))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4, long arg5) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ register u64 x4 asm("x4") = arg5;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4, long arg5, long arg6) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ register u64 x4 asm("x4") = arg5;
+ register u64 x5 asm("x5") = arg6;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid cobbler errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
index 8810c7faa4b..b610d66be4f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
@@ -9,6 +9,8 @@
//
//===----------------------------------------------------------------------===//
+#define SYSCALL(name) __NR_ ## name
+
static uptr internal_syscall(u64 nr) {
u64 retval;
asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11",
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
index 666955f6c9a..bfa610443c1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
@@ -15,8 +15,9 @@
namespace __sanitizer {
ThreadContextBase::ThreadContextBase(u32 tid)
- : tid(tid), unique_id(0), os_id(0), user_id(0), status(ThreadStatusInvalid),
- detached(false), reuse_count(0), parent_tid(0), next(0) {
+ : tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),
+ status(ThreadStatusInvalid),
+ detached(false), parent_tid(0), next(0) {
name[0] = '\0';
}
@@ -76,7 +77,6 @@ void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,
void ThreadContextBase::Reset() {
status = ThreadStatusInvalid;
- reuse_count++;
SetName(0);
OnReset();
}
@@ -86,10 +86,11 @@ void ThreadContextBase::Reset() {
const u32 ThreadRegistry::kUnknownTid = ~0U;
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size)
+ u32 thread_quarantine_size, u32 max_reuse)
: context_factory_(factory),
max_threads_(max_threads),
thread_quarantine_size_(thread_quarantine_size),
+ max_reuse_(max_reuse),
mtx_(),
n_contexts_(0),
total_threads_(0),
@@ -128,8 +129,13 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
tctx = context_factory_(tid);
threads_[tid] = tctx;
} else {
+#ifndef SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
SanitizerToolName, max_threads_);
+#else
+ Printf("race: limit on %u simultaneously alive goroutines is exceeded,"
+ " dying\n", max_threads_);
+#endif
Die();
}
CHECK_NE(tctx, 0);
@@ -210,7 +216,7 @@ void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
}
}
-void ThreadRegistry::DetachThread(u32 tid) {
+void ThreadRegistry::DetachThread(u32 tid, void *arg) {
BlockingMutexLock l(&mtx_);
CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
@@ -219,6 +225,7 @@ void ThreadRegistry::DetachThread(u32 tid) {
Report("%s: Detach of non-existent thread\n", SanitizerToolName);
return;
}
+ tctx->OnDetached(arg);
if (tctx->status == ThreadStatusFinished) {
tctx->SetDead();
QuarantinePush(tctx);
@@ -275,6 +282,9 @@ void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
dead_threads_.pop_front();
CHECK_EQ(tctx->status, ThreadStatusDead);
tctx->Reset();
+ tctx->reuse_count++;
+ if (max_reuse_ > 0 && tctx->reuse_count >= max_reuse_)
+ return;
invalid_threads_.push_back(tctx);
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
index 81c270945d3..8d5f1ea3988 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h
@@ -36,13 +36,13 @@ class ThreadContextBase {
const u32 tid; // Thread ID. Main thread should have tid = 0.
u64 unique_id; // Unique thread ID.
+ u32 reuse_count; // Number of times this tid was reused.
uptr os_id; // PID (used for reporting).
uptr user_id; // Some opaque user thread id (e.g. pthread_t).
char name[64]; // As annotated by user.
ThreadStatus status;
bool detached;
- int reuse_count;
u32 parent_tid;
ThreadContextBase *next; // For storing thread contexts in a list.
@@ -66,6 +66,7 @@ class ThreadContextBase {
virtual void OnStarted(void *arg) {}
virtual void OnCreated(void *arg) {}
virtual void OnReset() {}
+ virtual void OnDetached(void *arg) {}
};
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
@@ -75,8 +76,9 @@ class ThreadRegistry {
static const u32 kUnknownTid;
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size);
- void GetNumberOfThreads(uptr *total = 0, uptr *running = 0, uptr *alive = 0);
+ u32 thread_quarantine_size, u32 max_reuse = 0);
+ void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
+ uptr *alive = nullptr);
uptr GetMaxAliveThreads();
void Lock() { mtx_.Lock(); }
@@ -108,7 +110,7 @@ class ThreadRegistry {
void SetThreadName(u32 tid, const char *name);
void SetThreadNameByUserId(uptr user_id, const char *name);
- void DetachThread(u32 tid);
+ void DetachThread(u32 tid, void *arg);
void JoinThread(u32 tid, void *arg);
void FinishThread(u32 tid);
void StartThread(u32 tid, uptr os_id, void *arg);
@@ -117,6 +119,7 @@ class ThreadRegistry {
const ThreadContextFactory context_factory_;
const u32 max_threads_;
const u32 thread_quarantine_size_;
+ const u32 max_reuse_;
BlockingMutex mtx_;
@@ -138,6 +141,6 @@ class ThreadRegistry {
typedef GenericScopedLock<ThreadRegistry> ThreadRegistryLock;
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // SANITIZER_THREAD_REGISTRY_H
+#endif // SANITIZER_THREAD_REGISTRY_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
new file mode 100644
index 00000000000..7c6ef4f9028
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
@@ -0,0 +1,144 @@
+//===-- sanitizer_tls_get_addr.cc -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_tls_get_addr.h"
+
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_interceptors.h"
+
+namespace __sanitizer {
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+// The actual parameter that comes to __tls_get_addr
+// is a pointer to a struct with two words in it:
+struct TlsGetAddrParam {
+ uptr dso_id;
+ uptr offset;
+};
+
+// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
+// which has such header.
+struct Glibc_2_19_tls_header {
+ uptr size;
+ uptr start;
+};
+
+// This must be static TLS
+__attribute__((tls_model("initial-exec")))
+static __thread DTLS dtls;
+
+// Make sure we properly destroy the DTLS objects:
+// this counter should never get too large.
+static atomic_uintptr_t number_of_live_dtls;
+
+static const uptr kDestroyedThread = -1;
+
+static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
+ if (!size) return;
+ VPrintf(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
+ UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
+ atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
+}
+
+static inline void DTLS_Resize(uptr new_size) {
+ if (dtls.dtv_size >= new_size) return;
+ new_size = RoundUpToPowerOfTwo(new_size);
+ new_size = Max(new_size, 4096UL / sizeof(DTLS::DTV));
+ DTLS::DTV *new_dtv =
+ (DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
+ uptr num_live_dtls =
+ atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
+ VPrintf(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
+ CHECK_LT(num_live_dtls, 1 << 20);
+ uptr old_dtv_size = dtls.dtv_size;
+ DTLS::DTV *old_dtv = dtls.dtv;
+ if (old_dtv_size)
+ internal_memcpy(new_dtv, dtls.dtv, dtls.dtv_size * sizeof(DTLS::DTV));
+ dtls.dtv = new_dtv;
+ dtls.dtv_size = new_size;
+ if (old_dtv_size)
+ DTLS_Deallocate(old_dtv, old_dtv_size);
+}
+
+void DTLS_Destroy() {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VPrintf(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
+ uptr s = dtls.dtv_size;
+ dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
+ DTLS_Deallocate(dtls.dtv, s);
+}
+
+#if defined(__powerpc64__)
+// This is glibc's TLS_DTV_OFFSET:
+// "Dynamic thread vector pointers point 0x8000 past the start of each
+// TLS block."
+static const uptr kDtvOffset = 0x8000;
+#else
+static const uptr kDtvOffset = 0;
+#endif
+
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
+ uptr static_tls_begin, uptr static_tls_end) {
+ if (!common_flags()->intercept_tls_get_addr) return 0;
+ TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
+ uptr dso_id = arg->dso_id;
+ if (dtls.dtv_size == kDestroyedThread) return 0;
+ DTLS_Resize(dso_id + 1);
+ if (dtls.dtv[dso_id].beg) return 0;
+ uptr tls_size = 0;
+ uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
+ VPrintf(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
+ "num_live_dtls %zd\n",
+ arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
+ atomic_load(&number_of_live_dtls, memory_order_relaxed));
+ if (dtls.last_memalign_ptr == tls_beg) {
+ tls_size = dtls.last_memalign_size;
+ VPrintf(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
+ tls_beg, tls_size);
+ } else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
+ // This is the static TLS block which was initialized / unpoisoned at thread
+ // creation.
+ VPrintf(2, "__tls_get_addr: static tls: %p\n", tls_beg);
+ tls_size = 0;
+ } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
+ // We may want to check gnu_get_libc_version().
+ Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
+ tls_size = header->size;
+ tls_beg = header->start;
+ VPrintf(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
+ tls_beg, tls_size);
+ } else {
+ VPrintf(2, "__tls_get_addr: Can't guess glibc version\n");
+ // This may happen inside the DTOR of main thread, so just ignore it.
+ tls_size = 0;
+ }
+ dtls.dtv[dso_id].beg = tls_beg;
+ dtls.dtv[dso_id].size = tls_size;
+ return dtls.dtv + dso_id;
+}
+
+void DTLS_on_libc_memalign(void *ptr, uptr size) {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VPrintf(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
+ dtls.last_memalign_size = size;
+}
+
+DTLS *DTLS_Get() { return &dtls; }
+
+#else
+void DTLS_on_libc_memalign(void *ptr, uptr size) {}
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res) { return 0; }
+DTLS *DTLS_Get() { return 0; }
+void DTLS_Destroy() {}
+#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h
new file mode 100644
index 00000000000..e4f8c0c535e
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h
@@ -0,0 +1,59 @@
+//===-- sanitizer_tls_get_addr.h --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+// All this magic is specific to glibc and is required to workaround
+// the lack of interface that would tell us about the Dynamic TLS (DTLS).
+// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
+//
+// The matters get worse because the glibc implementation changed between
+// 2.18 and 2.19:
+// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
+//
+// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// which we intercept and thus know where is the DTLS.
+// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
+// which is an internal function that wraps a mmap call, neither of which
+// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
+// header which we can use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_TLS_GET_ADDR_H
+#define SANITIZER_TLS_GET_ADDR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+struct DTLS {
+ // Array of DTLS chunks for the current Thread.
+ // If beg == 0, the chunk is unused.
+ struct DTV {
+ uptr beg, size;
+ };
+
+ uptr dtv_size;
+ DTV *dtv; // dtv_size elements, allocated by MmapOrDie.
+
+ // Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cc
+ uptr last_memalign_size;
+ uptr last_memalign_ptr;
+};
+
+// Returns pointer and size of a linker-allocated TLS block.
+// Each block is returned exactly once.
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin,
+ uptr static_tls_end);
+void DTLS_on_libc_memalign(void *ptr, uptr size);
+DTLS *DTLS_Get();
+void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_TLS_GET_ADDR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc
new file mode 100644
index 00000000000..408c21c913b
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc
@@ -0,0 +1,156 @@
+//===-- sanitizer_unwind_linux_libcdep.cc ---------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the unwind.h-based (aka "slow") stack unwinding routines
+// available to the tools on Linux, Android, and FreeBSD.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace.h"
+
+#if SANITIZER_ANDROID
+#include <dlfcn.h> // for dlopen()
+#endif
+
+#if SANITIZER_FREEBSD
+#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
+#endif
+#include <unwind.h>
+
+namespace __sanitizer {
+
+//------------------------- SlowUnwindStack -----------------------------------
+
+typedef struct {
+ uptr absolute_pc;
+ uptr stack_top;
+ uptr stack_size;
+} backtrace_frame_t;
+
+extern "C" {
+typedef void *(*acquire_my_map_info_list_func)();
+typedef void (*release_my_map_info_list_func)(void *map);
+typedef sptr (*unwind_backtrace_signal_arch_func)(
+ void *siginfo, void *sigcontext, void *map_info_list,
+ backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
+acquire_my_map_info_list_func acquire_my_map_info_list;
+release_my_map_info_list_func release_my_map_info_list;
+unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
+} // extern "C"
+
+#if SANITIZER_ANDROID
+void SanitizerInitializeUnwinder() {
+ void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
+ if (!p) {
+ VReport(1,
+ "Failed to open libcorkscrew.so. You may see broken stack traces "
+ "in SEGV reports.");
+ return;
+ }
+ acquire_my_map_info_list =
+ (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
+ release_my_map_info_list =
+ (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
+ unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
+ p, "unwind_backtrace_signal_arch");
+ if (!acquire_my_map_info_list || !release_my_map_info_list ||
+ !unwind_backtrace_signal_arch) {
+ VReport(1,
+ "Failed to find one of the required symbols in libcorkscrew.so. "
+ "You may see broken stack traces in SEGV reports.");
+ acquire_my_map_info_list = 0;
+ unwind_backtrace_signal_arch = 0;
+ release_my_map_info_list = 0;
+ }
+}
+#endif
+
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#if defined(__arm__) && !SANITIZER_MAC
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+struct UnwindTraceArg {
+ BufferedStackTrace *stack;
+ u32 max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = (UnwindTraceArg*)param;
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = Unwind_GetIP(ctx);
+ arg->stack->trace_buffer[arg->stack->size++] = pc;
+ if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace_buffer[0] belongs to the current function so we always pop it,
+ // unless there is only 1 frame in the stack trace (1 frame is always better
+ // than 0!).
+ // 1-frame stacks don't normally happen, but this depends on the actual
+ // unwinder implementation (libgcc, libunwind, etc) which is outside of our
+ // control.
+ if (to_pop == 0 && size > 1)
+ to_pop = 1;
+ PopStackFrames(to_pop);
+ trace_buffer[0] = pc;
+}
+
+void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ if (!unwind_backtrace_signal_arch) {
+ SlowUnwindStack(pc, max_depth);
+ return;
+ }
+
+ void *map = acquire_my_map_info_list();
+ CHECK(map);
+ InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax);
+ // siginfo argument appears to be unused.
+ sptr res = unwind_backtrace_signal_arch(/* siginfo */ 0, context, map,
+ frames.data(),
+ /* ignore_depth */ 0, max_depth);
+ release_my_map_info_list(map);
+ if (res < 0) return;
+ CHECK_LE((uptr)res, kStackTraceMax);
+
+ size = 0;
+ // +2 compensate for libcorkscrew unwinder returning addresses of call
+ // instructions instead of raw return addresses.
+ for (sptr i = 0; i < res; ++i)
+ trace_buffer[size++] = frames[i].absolute_pc + 2;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_win.cc
index c48274e3642..0c0a29cea44 100644
--- a/libsanitizer/sanitizer_common/sanitizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_win.cc
@@ -15,9 +15,11 @@
#define WIN32_LEAN_AND_MEAN
#define NOGDI
-#include <stdlib.h>
-#include <io.h>
#include <windows.h>
+#include <dbghelp.h>
+#include <io.h>
+#include <psapi.h>
+#include <stdlib.h>
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@@ -31,7 +33,9 @@ namespace __sanitizer {
// --------------------- sanitizer_common.h
uptr GetPageSize() {
- return 1U << 14; // FIXME: is this configurable?
+ // FIXME: there is an API for getting the system page size (GetSystemInfo or
+ // GetNativeSystemInfo), but if we use it here we get test failures elsewhere.
+ return 1U << 14;
}
uptr GetMmapGranularity() {
@@ -45,7 +49,7 @@ uptr GetMaxVirtualAddress() {
}
bool FileExists(const char *filename) {
- UNIMPLEMENTED();
+ return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
}
uptr internal_getpid() {
@@ -62,6 +66,7 @@ uptr GetThreadSelf() {
return GetTid();
}
+#if !SANITIZER_GO
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
CHECK(stack_top);
@@ -74,33 +79,37 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
*stack_bottom = (uptr)mbi.AllocationBase;
}
+#endif // #if !SANITIZER_GO
void *MmapOrDie(uptr size, const char *mem_type) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
- if (rv == 0) {
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s\n",
- size, size, mem_type);
- CHECK("unable to mmap" && 0);
- }
+ if (rv == 0)
+ ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
return rv;
}
void UnmapOrDie(void *addr, uptr size) {
+ if (!size || !addr)
+ return;
+
if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
- Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- size, size, addr);
+ Report("ERROR: %s failed to "
+ "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
+ SanitizerToolName, size, size, addr, GetLastError());
CHECK("unable to unmap" && 0);
}
}
-void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
+void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
+ (void)name; // unsupported
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (p == 0)
- Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at %p (%d)\n",
- size, size, fixed_addr, GetLastError());
+ Report("ERROR: %s failed to "
+ "allocate %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
return p;
}
@@ -108,25 +117,57 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return MmapFixedNoReserve(fixed_addr, size);
}
-void *Mprotect(uptr fixed_addr, uptr size) {
- return VirtualAlloc((LPVOID)fixed_addr, size,
- MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ // FIXME: make this really NoReserve?
+ return MmapOrDie(size, mem_type);
+}
+
+void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ (void)name; // unsupported
+ void *res = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
+ return res;
}
+bool MprotectNoAccess(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
+}
+
+
void FlushUnneededShadowMemory(uptr addr, uptr size) {
// This is almost useless on 32-bits.
- // FIXME: add madvice-analog when we move to 64-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
+}
+
+void NoHugePagesInRegion(uptr addr, uptr size) {
+ // FIXME: probably similar to FlushUnneededShadowMemory.
+}
+
+void DontDumpShadowMemory(uptr addr, uptr length) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
}
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
- // FIXME: shall we do anything here on Windows?
- return true;
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
+ return mbi.Protect == PAGE_NOACCESS &&
+ (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
}
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
UNIMPLEMENTED();
}
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
+ UNIMPLEMENTED();
+}
+
static const int kMaxEnvNameLength = 128;
static const DWORD kMaxEnvValueLength = 32767;
@@ -170,20 +211,65 @@ u32 GetUid() {
UNIMPLEMENTED();
}
+namespace {
+struct ModuleInfo {
+ const char *filepath;
+ uptr base_address;
+ uptr end_address;
+};
+
+#ifndef SANITIZER_GO
+int CompareModulesBase(const void *pl, const void *pr) {
+ const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
+ if (l->base_address < r->base_address)
+ return -1;
+ return l->base_address > r->base_address;
+}
+#endif
+} // namespace
+
+#ifndef SANITIZER_GO
void DumpProcessMap() {
- UNIMPLEMENTED();
+ Report("Dumping process modules:\n");
+ InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
+ uptr num_modules =
+ GetListOfModules(modules.data(), kMaxNumberOfModules, nullptr);
+
+ InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
+ for (size_t i = 0; i < num_modules; ++i) {
+ module_infos[i].filepath = modules[i].full_name();
+ module_infos[i].base_address = modules[i].base_address();
+ module_infos[i].end_address = modules[i].ranges().next()->end;
+ }
+ qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
+ CompareModulesBase);
+
+ for (size_t i = 0; i < num_modules; ++i) {
+ const ModuleInfo &mi = module_infos[i];
+ if (mi.end_address != 0) {
+ Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
+ mi.filepath[0] ? mi.filepath : "[no name]");
+ } else if (mi.filepath[0]) {
+ Printf("\t??\?-??? %s\n", mi.filepath);
+ } else {
+ Printf("\t???\n");
+ }
+ }
}
+#endif
-void DisableCoreDumper() {
- UNIMPLEMENTED();
+void DisableCoreDumperIfNecessary() {
+ // Do nothing.
}
void ReExec() {
UNIMPLEMENTED();
}
-void PrepareForSandboxing() {
- // Nothing here for now.
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+#if !SANITIZER_GO
+ CovPrepareForSandboxing(args);
+#endif
}
bool StackSizeIsUnlimited() {
@@ -194,9 +280,20 @@ void SetStackSizeLimitInBytes(uptr limit) {
UNIMPLEMENTED();
}
-char *FindPathToBinary(const char *name) {
- // Nothing here for now.
- return 0;
+bool AddressSpaceIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetAddressSpaceUnlimited() {
+ UNIMPLEMENTED();
+}
+
+bool IsPathSeparator(const char c) {
+ return c == '\\' || c == '/';
+}
+
+bool IsAbsolutePath(const char *path) {
+ UNIMPLEMENTED();
}
void SleepForSeconds(int seconds) {
@@ -212,99 +309,256 @@ u64 NanoTime() {
}
void Abort() {
- abort();
- internal__exit(-1); // abort is not NORETURN on Windows.
+ if (::IsDebuggerPresent())
+ __debugbreak();
+ internal__exit(3);
+}
+
+// Read the file to extract the ImageBase field from the PE header. If ASLR is
+// disabled and this virtual address is available, the loader will typically
+// load the image at this address. Therefore, we call it the preferred base. Any
+// addresses in the DWARF typically assume that the object has been loaded at
+// this address.
+static uptr GetPreferredBase(const char *modname) {
+ fd_t fd = OpenFile(modname, RdOnly, nullptr);
+ if (fd == kInvalidFd)
+ return 0;
+ FileCloser closer(fd);
+
+ // Read just the DOS header.
+ IMAGE_DOS_HEADER dos_header;
+ uptr bytes_read;
+ if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
+ bytes_read != sizeof(dos_header))
+ return 0;
+
+ // The file should start with the right signature.
+ if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
+ return 0;
+
+ // The layout at e_lfanew is:
+ // "PE\0\0"
+ // IMAGE_FILE_HEADER
+ // IMAGE_OPTIONAL_HEADER
+ // Seek to e_lfanew and read all that data.
+ char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
+ if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
+ INVALID_SET_FILE_POINTER)
+ return 0;
+ if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
+ bytes_read != sizeof(buf))
+ return 0;
+
+ // Check for "PE\0\0" before the PE header.
+ char *pe_sig = &buf[0];
+ if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
+ return 0;
+
+ // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
+ IMAGE_OPTIONAL_HEADER *pe_header =
+ (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
+
+ // Check for more magic in the PE header.
+ if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
+ return 0;
+
+ // Finally, return the ImageBase.
+ return (uptr)pe_header->ImageBase;
}
+#ifndef SANITIZER_GO
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
- UNIMPLEMENTED();
+ HANDLE cur_process = GetCurrentProcess();
+
+ // Query the list of modules. Start by assuming there are no more than 256
+ // modules and retry if that's not sufficient.
+ HMODULE *hmodules = 0;
+ uptr modules_buffer_size = sizeof(HMODULE) * 256;
+ DWORD bytes_required;
+ while (!hmodules) {
+ hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
+ CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
+ &bytes_required));
+ if (bytes_required > modules_buffer_size) {
+ // Either there turned out to be more than 256 hmodules, or new hmodules
+ // could have loaded since the last try. Retry.
+ UnmapOrDie(hmodules, modules_buffer_size);
+ hmodules = 0;
+ modules_buffer_size = bytes_required;
+ }
+ }
+
+ // |num_modules| is the number of modules actually present,
+ // |count| is the number of modules we return.
+ size_t nun_modules = bytes_required / sizeof(HMODULE),
+ count = 0;
+ for (size_t i = 0; i < nun_modules && count < max_modules; ++i) {
+ HMODULE handle = hmodules[i];
+ MODULEINFO mi;
+ if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
+ continue;
+
+ // Get the UTF-16 path and convert to UTF-8.
+ wchar_t modname_utf16[kMaxPathLength];
+ int modname_utf16_len =
+ GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
+ if (modname_utf16_len == 0)
+ modname_utf16[0] = '\0';
+ char module_name[kMaxPathLength];
+ int module_name_len =
+ ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
+ &module_name[0], kMaxPathLength, NULL, NULL);
+ module_name[module_name_len] = '\0';
+
+ if (filter && !filter(module_name))
+ continue;
+
+ uptr base_address = (uptr)mi.lpBaseOfDll;
+ uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
+
+ // Adjust the base address of the module so that we get a VA instead of an
+ // RVA when computing the module offset. This helps llvm-symbolizer find the
+ // right DWARF CU. In the common case that the image is loaded at it's
+ // preferred address, we will now print normal virtual addresses.
+ uptr preferred_base = GetPreferredBase(&module_name[0]);
+ uptr adjusted_base = base_address - preferred_base;
+
+ LoadedModule *cur_module = &modules[count];
+ cur_module->set(module_name, adjusted_base);
+ // We add the whole module as one single address range.
+ cur_module->addAddressRange(base_address, end_address, /*executable*/ true);
+ count++;
+ }
+ UnmapOrDie(hmodules, modules_buffer_size);
+
+ return count;
};
-#ifndef SANITIZER_GO
+// We can't use atexit() directly at __asan_init time as the CRT is not fully
+// initialized at this point. Place the functions into a vector and use
+// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
+InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
+
int Atexit(void (*function)(void)) {
- return atexit(function);
+ atexit_functions.push_back(function);
+ return 0;
+}
+
+static int RunAtexit() {
+ int ret = 0;
+ for (uptr i = 0; i < atexit_functions.size(); ++i) {
+ ret |= atexit(atexit_functions[i]);
+ }
+ return ret;
}
+
+#pragma section(".CRT$XID", long, read) // NOLINT
+__declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
#endif
// ------------------ sanitizer_libc.h
-uptr internal_mmap(void *addr, uptr length, int prot, int flags,
- int fd, u64 offset) {
- UNIMPLEMENTED();
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
+ fd_t res;
+ if (mode == RdOnly) {
+ res = CreateFile(filename, GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+ } else if (mode == WrOnly) {
+ res = CreateFile(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, nullptr);
+ } else {
+ UNIMPLEMENTED();
+ }
+ CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
+ CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
+ if (res == kInvalidFd && last_error)
+ *last_error = GetLastError();
+ return res;
}
-uptr internal_munmap(void *addr, uptr length) {
- UNIMPLEMENTED();
+void CloseFile(fd_t fd) {
+ CloseHandle(fd);
}
-uptr internal_close(fd_t fd) {
- UNIMPLEMENTED();
-}
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ CHECK(fd != kInvalidFd);
-int internal_isatty(fd_t fd) {
- return _isatty(fd);
-}
+ // bytes_read can't be passed directly to ReadFile:
+ // uptr is unsigned long long on 64-bit Windows.
+ unsigned long num_read_long;
-uptr internal_open(const char *filename, int flags) {
- UNIMPLEMENTED();
+ bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
+ if (!success && error_p)
+ *error_p = GetLastError();
+ if (bytes_read)
+ *bytes_read = num_read_long;
+ return success;
}
-uptr internal_open(const char *filename, int flags, u32 mode) {
- UNIMPLEMENTED();
+bool SupportsColoredOutput(fd_t fd) {
+ // FIXME: support colored output.
+ return false;
}
-uptr OpenFile(const char *filename, bool write) {
- UNIMPLEMENTED();
-}
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ CHECK(fd != kInvalidFd);
-uptr internal_read(fd_t fd, void *buf, uptr count) {
- UNIMPLEMENTED();
-}
+ // Handle null optional parameters.
+ error_t dummy_error;
+ error_p = error_p ? error_p : &dummy_error;
+ uptr dummy_bytes_written;
+ bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
-uptr internal_write(fd_t fd, const void *buf, uptr count) {
- if (fd != kStderrFd)
- UNIMPLEMENTED();
- HANDLE err = GetStdHandle(STD_ERROR_HANDLE);
- if (err == 0)
- return 0; // FIXME: this might not work on some apps.
- DWORD ret;
- if (!WriteFile(err, buf, count, &ret, 0))
- return 0;
- return ret;
-}
+ // Initialize output parameters in case we fail.
+ *error_p = 0;
+ *bytes_written = 0;
-uptr internal_stat(const char *path, void *buf) {
- UNIMPLEMENTED();
-}
+ // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
+ // closed, in which case this will fail.
+ if (fd == kStdoutFd || fd == kStderrFd) {
+ fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
+ if (fd == 0) {
+ *error_p = ERROR_INVALID_HANDLE;
+ return false;
+ }
+ }
-uptr internal_lstat(const char *path, void *buf) {
- UNIMPLEMENTED();
+ DWORD bytes_written_32;
+ if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
+ *error_p = GetLastError();
+ return false;
+ } else {
+ *bytes_written = bytes_written_32;
+ return true;
+ }
}
-uptr internal_fstat(fd_t fd, void *buf) {
+bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
UNIMPLEMENTED();
}
-uptr internal_filesize(fd_t fd) {
- UNIMPLEMENTED();
+uptr internal_sched_yield() {
+ Sleep(0);
+ return 0;
}
-uptr internal_dup2(int oldfd, int newfd) {
- UNIMPLEMENTED();
+void internal__exit(int exitcode) {
+ ExitProcess(exitcode);
}
-uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+uptr internal_ftruncate(fd_t fd, uptr size) {
UNIMPLEMENTED();
}
-uptr internal_sched_yield() {
- Sleep(0);
+uptr GetRSS() {
return 0;
}
-void internal__exit(int exitcode) {
- ExitProcess(exitcode);
-}
+void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) { }
// ---------------------- BlockingMutex ---------------- {{{1
const uptr LOCK_UNINITIALIZED = 0;
@@ -374,28 +628,55 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#endif
}
-void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+#if !SANITIZER_GO
+void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
size = CaptureStackBackTrace(2, Min(max_depth, kStackTraceMax),
(void**)trace, 0);
+ if (size == 0)
+ return;
+
// Skip the RTL frames by searching for the PC in the stacktrace.
uptr pc_location = LocatePcInTrace(pc);
PopStackFrames(pc_location);
}
-void MaybeOpenReportFile() {
- // Windows doesn't have native fork, and we don't support Cygwin or other
- // environments that try to fake it, so the initial report_fd will always be
- // correct.
+void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ u32 max_depth) {
+ CONTEXT ctx = *(CONTEXT *)context;
+ STACKFRAME64 stack_frame;
+ memset(&stack_frame, 0, sizeof(stack_frame));
+ size = 0;
+#if defined(_WIN64)
+ int machine_type = IMAGE_FILE_MACHINE_AMD64;
+ stack_frame.AddrPC.Offset = ctx.Rip;
+ stack_frame.AddrFrame.Offset = ctx.Rbp;
+ stack_frame.AddrStack.Offset = ctx.Rsp;
+#else
+ int machine_type = IMAGE_FILE_MACHINE_I386;
+ stack_frame.AddrPC.Offset = ctx.Eip;
+ stack_frame.AddrFrame.Offset = ctx.Ebp;
+ stack_frame.AddrStack.Offset = ctx.Esp;
+#endif
+ stack_frame.AddrPC.Mode = AddrModeFlat;
+ stack_frame.AddrFrame.Mode = AddrModeFlat;
+ stack_frame.AddrStack.Mode = AddrModeFlat;
+ while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
+ &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
+ &SymGetModuleBase64, NULL) &&
+ size < Min(max_depth, kStackTraceMax)) {
+ trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
+ }
}
+#endif // #if !SANITIZER_GO
-void RawWrite(const char *buffer) {
- static const char *kRawWriteError =
- "RawWrite can't output requested buffer!\n";
- uptr length = (uptr)internal_strlen(buffer);
- if (length != internal_write(report_fd, buffer, length)) {
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ if (!WriteToFile(fd, buffer, length)) {
// stderr may be closed, but we may be able to print to the debugger
// instead. This is the case when launching a program from Visual Studio,
// and the following routine should write to its console.
@@ -403,6 +684,81 @@ void RawWrite(const char *buffer) {
}
}
+void SetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void UnsetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ (void)handler;
+ // FIXME: Decide what to do on Windows.
+}
+
+bool IsDeadlySignal(int signum) {
+ // FIXME: Decide what to do on Windows.
+ return false;
+}
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ SYSTEM_INFO si;
+ GetNativeSystemInfo(&si);
+ uptr page_size = si.dwPageSize;
+ uptr page_mask = ~(page_size - 1);
+
+ for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
+ page <= end;) {
+ MEMORY_BASIC_INFORMATION info;
+ if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
+ return false;
+
+ if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
+ info.Protect == PAGE_EXECUTE)
+ return false;
+
+ if (info.RegionSize == 0)
+ return false;
+
+ page += info.RegionSize;
+ }
+
+ return true;
+}
+
+SignalContext SignalContext::Create(void *siginfo, void *context) {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD*)siginfo;
+ CONTEXT *context_record = (CONTEXT*)context;
+
+ uptr pc = (uptr)exception_record->ExceptionAddress;
+#ifdef _WIN64
+ uptr bp = (uptr)context_record->Rbp;
+ uptr sp = (uptr)context_record->Rsp;
+#else
+ uptr bp = (uptr)context_record->Ebp;
+ uptr sp = (uptr)context_record->Esp;
+#endif
+ uptr access_addr = exception_record->ExceptionInformation[1];
+
+ return SignalContext(context, access_addr, pc, sp, bp);
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ // FIXME: Actually implement this function.
+ CHECK_GT(buf_len, 0);
+ buf[0] = 0;
+ return 0;
+}
+
+uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+void CheckVMASize() {
+ // Do nothing.
+}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am
index 39ed2529c2c..6776923363a 100644
--- a/libsanitizer/tsan/Makefile.am
+++ b/libsanitizer/tsan/Makefile.am
@@ -3,45 +3,53 @@ AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DCAN_SANITIZE_UB=0 @OBSTACK_DEFS@
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \
- tsan_clock.cc \
- tsan_interface_atomic.cc \
- tsan_mutex.cc \
- tsan_report.cc \
- tsan_rtl_thread.cc \
- tsan_symbolize.cc \
- tsan_flags.cc \
- tsan_interface.cc \
- tsan_platform_linux.cc \
- tsan_rtl.cc \
- tsan_stat.cc \
- tsan_sync.cc \
- tsan_ignoreset.cc \
- tsan_interceptors.cc \
- tsan_md5.cc \
- tsan_platform_mac.cc \
- tsan_rtl_mutex.cc \
- tsan_suppressions.cc \
- tsan_interface_ann.cc \
- tsan_mman.cc \
- tsan_rtl_report.cc \
+ tsan_clock.cc \
tsan_fd.cc \
- tsan_interface_java.cc \
- tsan_mutexset.cc \
- tsan_symbolize_addr2line_linux.cc \
- tsan_rtl_amd64.S
+ tsan_flags.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_interface_ann.cc \
+ tsan_interface_atomic.cc \
+ tsan_interface.cc \
+ tsan_interface_java.cc \
+ tsan_libdispatch_mac.cc \
+ tsan_malloc_mac.cc \
+ tsan_md5.cc \
+ tsan_mman.cc \
+ tsan_mutex.cc \
+ tsan_mutexset.cc \
+ tsan_new_delete.cc \
+ tsan_platform_linux.cc \
+ tsan_platform_mac.cc \
+ tsan_platform_posix.cc \
+ tsan_platform_windows.cc \
+ tsan_report.cc \
+ tsan_rtl.cc \
+ tsan_rtl_mutex.cc \
+ tsan_rtl_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_stack_trace.cc \
+ tsan_stat.cc \
+ tsan_suppressions.cc \
+ tsan_symbolize.cc \
+ tsan_sync.cc
-libtsan_la_SOURCES = $(tsan_files)
-libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la
+libtsan_la_SOURCES = $(tsan_files)
+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S
+libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS)
+libtsan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS)
if LIBBACKTRACE_SUPPORTED
libtsan_la_LIBADD += $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+libtsan_la_DEPENDENCIES +=$(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
endif
libtsan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)
libtsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libtsan)
diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in
index 01c27b9b7d4..d3fe98a94f2 100644
--- a/libsanitizer/tsan/Makefile.in
+++ b/libsanitizer/tsan/Makefile.in
@@ -36,6 +36,7 @@ build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
@LIBBACKTRACE_SUPPORTED_TRUE@am__append_1 = $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
+@LIBBACKTRACE_SUPPORTED_TRUE@am__append_2 = $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la
subdir = tsan
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -80,19 +81,17 @@ am__base_list = \
am__installdirs = "$(DESTDIR)$(toolexeclibdir)"
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
-libtsan_la_DEPENDENCIES = \
- $(top_builddir)/sanitizer_common/libsanitizer_common.la \
- $(top_builddir)/interception/libinterception.la \
- $(am__append_1) $(am__DEPENDENCIES_1)
-am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \
- tsan_report.lo tsan_rtl_thread.lo tsan_symbolize.lo \
- tsan_flags.lo tsan_interface.lo tsan_platform_linux.lo \
- tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_ignoreset.lo \
- tsan_interceptors.lo tsan_md5.lo tsan_platform_mac.lo \
- tsan_rtl_mutex.lo tsan_suppressions.lo tsan_interface_ann.lo \
- tsan_mman.lo tsan_rtl_report.lo tsan_fd.lo \
- tsan_interface_java.lo tsan_mutexset.lo \
- tsan_symbolize_addr2line_linux.lo tsan_rtl_amd64.lo
+am__objects_1 = tsan_clock.lo tsan_fd.lo tsan_flags.lo \
+ tsan_ignoreset.lo tsan_interceptors.lo tsan_interface_ann.lo \
+ tsan_interface_atomic.lo tsan_interface.lo \
+ tsan_interface_java.lo tsan_libdispatch_mac.lo \
+ tsan_malloc_mac.lo tsan_md5.lo tsan_mman.lo tsan_mutex.lo \
+ tsan_mutexset.lo tsan_new_delete.lo tsan_platform_linux.lo \
+ tsan_platform_mac.lo tsan_platform_posix.lo \
+ tsan_platform_windows.lo tsan_report.lo tsan_rtl.lo \
+ tsan_rtl_mutex.lo tsan_rtl_report.lo tsan_rtl_thread.lo \
+ tsan_stack_trace.lo tsan_stat.lo tsan_suppressions.lo \
+ tsan_symbolize.lo tsan_sync.lo
am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -125,7 +124,7 @@ CCLD = $(CC)
LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
$(LDFLAGS) -o $@
-SOURCES = $(libtsan_la_SOURCES)
+SOURCES = $(libtsan_la_SOURCES) $(EXTRA_libtsan_la_SOURCES)
ETAGS = etags
CTAGS = ctags
ACLOCAL = @ACLOCAL@
@@ -152,7 +151,7 @@ CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DCAN_SANITIZE_UB=0 @OBSTACK_DEFS@
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
@@ -186,6 +185,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -197,10 +197,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -276,42 +278,53 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
tsan_files = \
- tsan_clock.cc \
- tsan_interface_atomic.cc \
- tsan_mutex.cc \
- tsan_report.cc \
- tsan_rtl_thread.cc \
- tsan_symbolize.cc \
- tsan_flags.cc \
- tsan_interface.cc \
- tsan_platform_linux.cc \
- tsan_rtl.cc \
- tsan_stat.cc \
- tsan_sync.cc \
- tsan_ignoreset.cc \
- tsan_interceptors.cc \
- tsan_md5.cc \
- tsan_platform_mac.cc \
- tsan_rtl_mutex.cc \
- tsan_suppressions.cc \
- tsan_interface_ann.cc \
- tsan_mman.cc \
- tsan_rtl_report.cc \
+ tsan_clock.cc \
tsan_fd.cc \
- tsan_interface_java.cc \
- tsan_mutexset.cc \
- tsan_symbolize_addr2line_linux.cc \
- tsan_rtl_amd64.S
-
-libtsan_la_SOURCES = $(tsan_files)
+ tsan_flags.cc \
+ tsan_ignoreset.cc \
+ tsan_interceptors.cc \
+ tsan_interface_ann.cc \
+ tsan_interface_atomic.cc \
+ tsan_interface.cc \
+ tsan_interface_java.cc \
+ tsan_libdispatch_mac.cc \
+ tsan_malloc_mac.cc \
+ tsan_md5.cc \
+ tsan_mman.cc \
+ tsan_mutex.cc \
+ tsan_mutexset.cc \
+ tsan_new_delete.cc \
+ tsan_platform_linux.cc \
+ tsan_platform_mac.cc \
+ tsan_platform_posix.cc \
+ tsan_platform_windows.cc \
+ tsan_report.cc \
+ tsan_rtl.cc \
+ tsan_rtl_mutex.cc \
+ tsan_rtl_report.cc \
+ tsan_rtl_thread.cc \
+ tsan_stack_trace.cc \
+ tsan_stat.cc \
+ tsan_suppressions.cc \
+ tsan_symbolize.cc \
+ tsan_sync.cc
+
+libtsan_la_SOURCES = $(tsan_files)
+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S
libtsan_la_LIBADD = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/interception/libinterception.la \
- $(am__append_1) $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+ $(TSAN_TARGET_DEPENDENT_OBJECTS) $(am__append_1) \
+ $(LIBSTDCXX_RAW_CXX_LDFLAGS)
+libtsan_la_DEPENDENCIES = \
+ $(top_builddir)/sanitizer_common/libsanitizer_common.la \
+ $(top_builddir)/interception/libinterception.la \
+ $(TSAN_TARGET_DEPENDENT_OBJECTS) $(am__append_2)
libtsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libtsan)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
@@ -436,22 +449,28 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_atomic.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_java.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_libdispatch_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_malloc_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_md5.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_new_delete.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_posix.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_windows.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_aarch64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_amd64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mutex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_thread.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stack_trace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stat.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize_addr2line_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_sync.Plo@am__quote@
.S.o:
diff --git a/libsanitizer/tsan/tsan_clock.cc b/libsanitizer/tsan/tsan_clock.cc
index 5d45a5d15fb..037afc83fc6 100644
--- a/libsanitizer/tsan/tsan_clock.cc
+++ b/libsanitizer/tsan/tsan_clock.cc
@@ -10,100 +10,416 @@
//===----------------------------------------------------------------------===//
#include "tsan_clock.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
-// It's possible to optimize clock operations for some important cases
-// so that they are O(1). The cases include singletons, once's, local mutexes.
-// First, SyncClock must be re-implemented to allow indexing by tid.
-// It must not necessarily be a full vector clock, though. For example it may
-// be a multi-level table.
-// Then, each slot in SyncClock must contain a dirty bit (it's united with
-// the clock value, so no space increase). The acquire algorithm looks
-// as follows:
-// void acquire(thr, tid, thr_clock, sync_clock) {
-// if (!sync_clock[tid].dirty)
-// return; // No new info to acquire.
-// // This handles constant reads of singleton pointers and
-// // stop-flags.
-// acquire_impl(thr_clock, sync_clock); // As usual, O(N).
-// sync_clock[tid].dirty = false;
-// sync_clock.dirty_count--;
-// }
-// The release operation looks as follows:
-// void release(thr, tid, thr_clock, sync_clock) {
-// // thr->sync_cache is a simple fixed-size hash-based cache that holds
-// // several previous sync_clock's.
-// if (thr->sync_cache[sync_clock] >= thr->last_acquire_epoch) {
-// // The thread did no acquire operations since last release on this clock.
-// // So update only the thread's slot (other slots can't possibly change).
-// sync_clock[tid].clock = thr->epoch;
-// if (sync_clock.dirty_count == sync_clock.cnt
-// || (sync_clock.dirty_count == sync_clock.cnt - 1
-// && sync_clock[tid].dirty == false))
-// // All dirty flags are set, bail out.
-// return;
-// set all dirty bits, but preserve the thread's bit. // O(N)
-// update sync_clock.dirty_count;
-// return;
+// SyncClock and ThreadClock implement vector clocks for sync variables
+// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
+// ThreadClock contains fixed-size vector clock for maximum number of threads.
+// SyncClock contains growable vector clock for currently necessary number of
+// threads.
+// Together they implement very simple model of operations, namely:
+//
+// void ThreadClock::acquire(const SyncClock *src) {
+// for (int i = 0; i < kMaxThreads; i++)
+// clock[i] = max(clock[i], src->clock[i]);
+// }
+//
+// void ThreadClock::release(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = max(dst->clock[i], clock[i]);
+// }
+//
+// void ThreadClock::ReleaseStore(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = clock[i];
// }
-// release_impl(thr_clock, sync_clock); // As usual, O(N).
-// set all dirty bits, but preserve the thread's bit.
-// // The previous step is combined with release_impl(), so that
-// // we scan the arrays only once.
-// update sync_clock.dirty_count;
-// }
+//
+// void ThreadClock::acq_rel(SyncClock *dst) {
+// acquire(dst);
+// release(dst);
+// }
+//
+// Conformance to this model is extensively verified in tsan_clock_test.cc.
+// However, the implementation is significantly more complex. The complexity
+// allows to implement important classes of use cases in O(1) instead of O(N).
+//
+// The use cases are:
+// 1. Singleton/once atomic that has a single release-store operation followed
+// by zillions of acquire-loads (the acquire-load is O(1)).
+// 2. Thread-local mutex (both lock and unlock can be O(1)).
+// 3. Leaf mutex (unlock is O(1)).
+// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
+// 5. An atomic with a single writer (writes can be O(1)).
+// The implementation dynamically adopts to workload. So if an atomic is in
+// read-only phase, these reads will be O(1); if it later switches to read/write
+// phase, the implementation will correctly handle that by switching to O(N).
+//
+// Thread-safety note: all const operations on SyncClock's are conducted under
+// a shared lock; all non-const operations on SyncClock's are conducted under
+// an exclusive lock; ThreadClock's are private to respective threads and so
+// do not need any protection.
+//
+// Description of ThreadClock state:
+// clk_ - fixed size vector clock.
+// nclk_ - effective size of the vector clock (the rest is zeros).
+// tid_ - index of the thread associated with he clock ("current thread").
+// last_acquire_ - current thread time when it acquired something from
+// other threads.
+//
+// Description of SyncClock state:
+// clk_ - variable size vector clock, low kClkBits hold timestamp,
+// the remaining bits hold "acquired" flag (the actual value is thread's
+// reused counter);
+// if acquried == thr->reused_, then the respective thread has already
+// acquired this clock (except possibly dirty_tids_).
+// dirty_tids_ - holds up to two indeces in the vector clock that other threads
+// need to acquire regardless of "acquired" flag value;
+// release_store_tid_ - denotes that the clock state is a result of
+// release-store operation by the thread with release_store_tid_ index.
+// release_store_reused_ - reuse count of release_store_tid_.
+
+// We don't have ThreadState in these methods, so this is an ugly hack that
+// works only in C++.
+#ifndef SANITIZER_GO
+# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
+#else
+# define CPP_STAT_INC(typ) (void)0
+#endif
namespace __tsan {
-ThreadClock::ThreadClock() {
- nclk_ = 0;
- for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
- clk_[i] = 0;
+ThreadClock::ThreadClock(unsigned tid, unsigned reused)
+ : tid_(tid)
+ , reused_(reused + 1) { // 0 has special meaning
+ CHECK_LT(tid, kMaxTidInClock);
+ CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
+ nclk_ = tid_ + 1;
+ last_acquire_ = 0;
+ internal_memset(clk_, 0, sizeof(clk_));
+ clk_[tid_].reused = reused_;
}
-void ThreadClock::acquire(const SyncClock *src) {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(src->clk_.Size() <= kMaxTid);
+void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(src->size_, kMaxTid);
+ CPP_STAT_INC(StatClockAcquire);
- const uptr nclk = src->clk_.Size();
- if (nclk == 0)
+ // Check if it's empty -> no need to do anything.
+ const uptr nclk = src->size_;
+ if (nclk == 0) {
+ CPP_STAT_INC(StatClockAcquireEmpty);
return;
+ }
+
+ // Check if we've already acquired src after the last release operation on src
+ bool acquired = false;
+ if (nclk > tid_) {
+ CPP_STAT_INC(StatClockAcquireLarge);
+ if (src->elem(tid_).reused == reused_) {
+ CPP_STAT_INC(StatClockAcquireRepeat);
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ u64 epoch = src->elem(tid).epoch;
+ if (clk_[tid].epoch < epoch) {
+ clk_[tid].epoch = epoch;
+ acquired = true;
+ }
+ }
+ }
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
+ }
+ return;
+ }
+ }
+
+ // O(N) acquire.
+ CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
for (uptr i = 0; i < nclk; i++) {
- if (clk_[i] < src->clk_[i])
- clk_[i] = src->clk_[i];
+ u64 epoch = src->elem(i).epoch;
+ if (clk_[i].epoch < epoch) {
+ clk_[i].epoch = epoch;
+ acquired = true;
+ }
+ }
+
+ // Remember that this thread has acquired this clock.
+ if (nclk > tid_)
+ src->elem(tid_).reused = reused_;
+
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_].epoch;
}
}
-void ThreadClock::release(SyncClock *dst) const {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+
+ if (dst->size_ == 0) {
+ // ReleaseStore will correctly set release_store_tid_,
+ // which can be important for future operations.
+ ReleaseStore(c, dst);
+ return;
+ }
+
+ CPP_STAT_INC(StatClockRelease);
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ // Check if we had not acquired anything from other threads
+ // since the last release on dst. If so, we need to update
+ // only dst->elem(tid_).
+ if (dst->elem(tid_).epoch > last_acquire_) {
+ UpdateCurrentThread(dst);
+ if (dst->release_store_tid_ != tid_ ||
+ dst->release_store_reused_ != reused_)
+ dst->release_store_tid_ = kInvalidTid;
+ return;
+ }
- if (dst->clk_.Size() < nclk_)
- dst->clk_.Resize(nclk_);
+ // O(N) release.
+ CPP_STAT_INC(StatClockReleaseFull);
+ // First, remember whether we've acquired dst.
+ bool acquired = IsAlreadyAcquired(dst);
+ if (acquired)
+ CPP_STAT_INC(StatClockReleaseAcquired);
+ // Update dst->clk_.
for (uptr i = 0; i < nclk_; i++) {
- if (dst->clk_[i] < clk_[i])
- dst->clk_[i] = clk_[i];
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = max(ce.epoch, clk_[i].epoch);
+ ce.reused = 0;
}
+ // Clear 'acquired' flag in the remaining elements.
+ if (nclk_ < dst->size_)
+ CPP_STAT_INC(StatClockReleaseClearTail);
+ for (uptr i = nclk_; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = kInvalidTid;
+ dst->release_store_reused_ = 0;
+ // If we've acquired dst, remember this fact,
+ // so that we don't need to acquire it on next acquire.
+ if (acquired)
+ dst->elem(tid_).reused = reused_;
}
-void ThreadClock::ReleaseStore(SyncClock *dst) const {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+ CPP_STAT_INC(StatClockStore);
- if (dst->clk_.Size() < nclk_)
- dst->clk_.Resize(nclk_);
- for (uptr i = 0; i < nclk_; i++)
- dst->clk_[i] = clk_[i];
- for (uptr i = nclk_; i < dst->clk_.Size(); i++)
- dst->clk_[i] = 0;
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ if (dst->release_store_tid_ == tid_ &&
+ dst->release_store_reused_ == reused_ &&
+ dst->elem(tid_).epoch > last_acquire_) {
+ CPP_STAT_INC(StatClockStoreFast);
+ UpdateCurrentThread(dst);
+ return;
+ }
+
+ // O(N) release-store.
+ CPP_STAT_INC(StatClockStoreFull);
+ for (uptr i = 0; i < nclk_; i++) {
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = clk_[i].epoch;
+ ce.reused = 0;
+ }
+ // Clear the tail of dst->clk_.
+ if (nclk_ < dst->size_) {
+ for (uptr i = nclk_; i < dst->size_; i++) {
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = 0;
+ ce.reused = 0;
+ }
+ CPP_STAT_INC(StatClockStoreTail);
+ }
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
+}
+
+void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
+ CPP_STAT_INC(StatClockAcquireRelease);
+ acquire(c, dst);
+ ReleaseStore(c, dst);
+}
+
+// Updates only single element related to the current thread in dst->clk_.
+void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
+ // Update the threads time, but preserve 'acquired' flag.
+ dst->elem(tid_).epoch = clk_[tid_].epoch;
+
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ if (dst->dirty_tids_[i] == tid_) {
+ CPP_STAT_INC(StatClockReleaseFast1);
+ return;
+ }
+ if (dst->dirty_tids_[i] == kInvalidTid) {
+ CPP_STAT_INC(StatClockReleaseFast2);
+ dst->dirty_tids_[i] = tid_;
+ return;
+ }
+ }
+ // Reset all 'acquired' flags, O(N).
+ CPP_STAT_INC(StatClockReleaseSlow);
+ for (uptr i = 0; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dst->dirty_tids_[i] = kInvalidTid;
+}
+
+// Checks whether the current threads has already acquired src.
+bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
+ if (src->elem(tid_).reused != reused_)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ unsigned tid = src->dirty_tids_[i];
+ if (tid != kInvalidTid) {
+ if (clk_[tid].epoch < src->elem(tid).epoch)
+ return false;
+ }
+ }
+ return true;
+}
+
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
+ RoundUpTo(size_, ClockBlock::kClockCount)) {
+ // Growing within the same block.
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (nclk <= ClockBlock::kClockCount) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ size_ = nclk;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ return;
+ }
+ // Growing two-level table.
+ if (size_ == 0) {
+ // Allocate first level table.
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // Transform one-level table to two-level table.
+ u32 old = tab_idx_;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ tab_->table[0] = old;
+ }
+ // At this point we have first level table allocated.
+ // Add second level tables as necessary.
+ for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
+ i < nclk; i += ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
+ tab_->table[i/ClockBlock::kClockCount] = idx;
+ }
+ size_ = nclk;
+}
+
+// Sets a single element in the vector clock.
+// This function is called only from weird places like AcquireGlobal.
+void ThreadClock::set(unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid].epoch);
+ clk_[tid].epoch = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ last_acquire_ = clk_[tid_].epoch;
}
-void ThreadClock::acq_rel(SyncClock *dst) {
- acquire(dst);
- release(dst);
+void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
+ printf("] tid=%u/%u last_acq=%llu",
+ tid_, reused_, last_acquire_);
}
SyncClock::SyncClock()
- : clk_(MBlockClock) {
+ : release_store_tid_(kInvalidTid)
+ , release_store_reused_()
+ , tab_()
+ , tab_idx_()
+ , size_() {
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+SyncClock::~SyncClock() {
+ // Reset must be called before dtor.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+}
+
+void SyncClock::Reset(ClockCache *c) {
+ if (size_ == 0) {
+ // nothing
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // One-level table.
+ ctx->clock_alloc.Free(c, tab_idx_);
+ } else {
+ // Two-level table.
+ for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
+ ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
+ ctx->clock_alloc.Free(c, tab_idx_);
+ }
+ tab_ = 0;
+ tab_idx_ = 0;
+ size_ = 0;
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_tids_[i] = kInvalidTid;
+}
+
+ClockElem &SyncClock::elem(unsigned tid) const {
+ DCHECK_LT(tid, size_);
+ if (size_ <= ClockBlock::kClockCount)
+ return tab_->clock[tid];
+ u32 idx = tab_->table[tid / ClockBlock::kClockCount];
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ return cb->clock[tid % ClockBlock::kClockCount];
+}
+
+void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
+ printf("] release_store_tid=%d/%d dirty_tids=%d/%d",
+ release_store_tid_, release_store_reused_,
+ dirty_tids_[0], dirty_tids_[1]);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_clock.h b/libsanitizer/tsan/tsan_clock.h
index 8e4bf99ca89..3deb7f5198c 100644
--- a/libsanitizer/tsan/tsan_clock.h
+++ b/libsanitizer/tsan/tsan_clock.h
@@ -12,65 +12,114 @@
#define TSAN_CLOCK_H
#include "tsan_defs.h"
-#include "tsan_vector.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits;
+};
+
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
+typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAllocCache ClockCache;
+
// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
SyncClock();
+ ~SyncClock();
uptr size() const {
- return clk_.Size();
+ return size_;
}
- void Reset() {
- clk_.Reset();
+ u64 get(unsigned tid) const {
+ return elem(tid).epoch;
}
+ void Resize(ClockCache *c, uptr nclk);
+ void Reset(ClockCache *c);
+
+ void DebugDump(int(*printf)(const char *s, ...));
+
private:
- Vector<u64> clk_;
friend struct ThreadClock;
+ static const uptr kDirtyTids = 2;
+
+ unsigned release_store_tid_;
+ unsigned release_store_reused_;
+ unsigned dirty_tids_[kDirtyTids];
+ // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc.
+ // If size_ <= 64, then tab_ points to an array with 64 ClockElem's.
+ // Otherwise, tab_ points to an array with 128 u32 elements,
+ // each pointing to the second-level 512b block with 64 ClockElem's.
+ ClockBlock *tab_;
+ u32 tab_idx_;
+ u32 size_;
+
+ ClockElem &elem(unsigned tid) const;
};
// The clock that lives in threads.
struct ThreadClock {
public:
- ThreadClock();
+ typedef DenseSlabAllocCache Cache;
+
+ explicit ThreadClock(unsigned tid, unsigned reused = 0);
u64 get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
- return clk_[tid];
+ return clk_[tid].epoch;
}
- void set(unsigned tid, u64 v) {
- DCHECK_LT(tid, kMaxTid);
- DCHECK_GE(v, clk_[tid]);
- clk_[tid] = v;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void set(unsigned tid, u64 v);
+
+ void set(u64 v) {
+ DCHECK_GE(v, clk_[tid_].epoch);
+ clk_[tid_].epoch = v;
}
- void tick(unsigned tid) {
- DCHECK_LT(tid, kMaxTid);
- clk_[tid]++;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
+ void tick() {
+ clk_[tid_].epoch++;
}
uptr size() const {
return nclk_;
}
- void acquire(const SyncClock *src);
- void release(SyncClock *dst) const;
- void acq_rel(SyncClock *dst);
- void ReleaseStore(SyncClock *dst) const;
+ void acquire(ClockCache *c, const SyncClock *src);
+ void release(ClockCache *c, SyncClock *dst) const;
+ void acq_rel(ClockCache *c, SyncClock *dst);
+ void ReleaseStore(ClockCache *c, SyncClock *dst) const;
+
+ void DebugReset();
+ void DebugDump(int(*printf)(const char *s, ...));
private:
+ static const uptr kDirtyTids = SyncClock::kDirtyTids;
+ const unsigned tid_;
+ const unsigned reused_;
+ u64 last_acquire_;
uptr nclk_;
- u64 clk_[kMaxTidInClock];
+ ClockElem clk_[kMaxTidInClock];
+
+ bool IsAlreadyAcquired(const SyncClock *src) const;
+ void UpdateCurrentThread(SyncClock *dst) const;
};
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h
index 3f20797ff4b..259b30bee5d 100644
--- a/libsanitizer/tsan/tsan_defs.h
+++ b/libsanitizer/tsan/tsan_defs.h
@@ -15,14 +15,24 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "tsan_stat.h"
+#include "ubsan/ubsan_platform.h"
-#ifndef TSAN_DEBUG
-#define TSAN_DEBUG 0
-#endif // TSAN_DEBUG
+// Setup defaults for compile definitions.
+#ifndef TSAN_NO_HISTORY
+# define TSAN_NO_HISTORY 0
+#endif
+
+#ifndef TSAN_COLLECT_STATS
+# define TSAN_COLLECT_STATS 0
+#endif
+
+#ifndef TSAN_CONTAINS_UBSAN
+# define TSAN_CONTAINS_UBSAN (CAN_SANITIZE_UB && !defined(SANITIZER_GO))
+#endif
namespace __tsan {
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
const bool kGoMode = true;
const bool kCppMode = false;
const char *const kTsanOptionsEnv = "GORACE";
@@ -37,22 +47,17 @@ const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
+#ifndef SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
+#else
+const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
+#endif
const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
-const uptr kTraceStackSize = 256;
-
-#ifdef TSAN_SHADOW_COUNT
-# if TSAN_SHADOW_COUNT == 2 \
- || TSAN_SHADOW_COUNT == 4 || TSAN_SHADOW_COUNT == 8
-const uptr kShadowCnt = TSAN_SHADOW_COUNT;
-# else
-# error "TSAN_SHADOW_COUNT must be one of 2,4,8"
-# endif
-#else
+
// Count of shadow values in a shadow cell.
const uptr kShadowCnt = 4;
-#endif
// That many user bytes are mapped onto a single shadow cell.
const uptr kShadowCell = 8;
@@ -63,16 +68,25 @@ const uptr kShadowSize = 8;
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
-#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS
-const bool kCollectStats = true;
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
+#if TSAN_NO_HISTORY
+const bool kCollectHistory = false;
#else
-const bool kCollectStats = false;
+const bool kCollectHistory = true;
#endif
+const unsigned kInvalidTid = (unsigned)-1;
+
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to
// hard to debug crashes.
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
void build_consistency_debug();
#else
void build_consistency_release();
@@ -84,18 +98,8 @@ void build_consistency_stats();
void build_consistency_nostats();
#endif
-#if TSAN_SHADOW_COUNT == 1
-void build_consistency_shadow1();
-#elif TSAN_SHADOW_COUNT == 2
-void build_consistency_shadow2();
-#elif TSAN_SHADOW_COUNT == 4
-void build_consistency_shadow4();
-#else
-void build_consistency_shadow8();
-#endif
-
static inline void USED build_consistency() {
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
build_consistency_debug();
#else
build_consistency_release();
@@ -105,15 +109,6 @@ static inline void USED build_consistency() {
#else
build_consistency_nostats();
#endif
-#if TSAN_SHADOW_COUNT == 1
- build_consistency_shadow1();
-#elif TSAN_SHADOW_COUNT == 2
- build_consistency_shadow2();
-#elif TSAN_SHADOW_COUNT == 4
- build_consistency_shadow4();
-#else
- build_consistency_shadow8();
-#endif
}
template<typename T>
@@ -157,8 +152,15 @@ struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
-class StackTrace;
-struct MBlock;
+
+// Descriptor of user's memory block.
+struct MBlock {
+ u64 siz;
+ u32 stk;
+ u16 tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_dense_alloc.h b/libsanitizer/tsan/tsan_dense_alloc.h
new file mode 100644
index 00000000000..651c112c78a
--- /dev/null
+++ b/libsanitizer/tsan/tsan_dense_alloc.h
@@ -0,0 +1,135 @@
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+ static const uptr kSize = 128;
+ typedef u32 IndexT;
+ uptr pos;
+ IndexT cache[kSize];
+ template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+};
+
+template<typename T, uptr kL1Size, uptr kL2Size>
+class DenseSlabAlloc {
+ public:
+ typedef DenseSlabAllocCache Cache;
+ typedef typename Cache::IndexT IndexT;
+
+ DenseSlabAlloc() {
+ // Check that kL1Size and kL2Size are sane.
+ CHECK_EQ(kL1Size & (kL1Size - 1), 0);
+ CHECK_EQ(kL2Size & (kL2Size - 1), 0);
+ CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
+ // Check that it makes sense to use the dense alloc.
+ CHECK_GE(sizeof(T), sizeof(IndexT));
+ internal_memset(map_, 0, sizeof(map_));
+ freelist_ = 0;
+ fillpos_ = 0;
+ }
+
+ ~DenseSlabAlloc() {
+ for (uptr i = 0; i < kL1Size; i++) {
+ if (map_[i] != 0)
+ UnmapOrDie(map_[i], kL2Size * sizeof(T));
+ }
+ }
+
+ IndexT Alloc(Cache *c) {
+ if (c->pos == 0)
+ Refill(c);
+ return c->cache[--c->pos];
+ }
+
+ void Free(Cache *c, IndexT idx) {
+ DCHECK_NE(idx, 0);
+ if (c->pos == Cache::kSize)
+ Drain(c);
+ c->cache[c->pos++] = idx;
+ }
+
+ T *Map(IndexT idx) {
+ DCHECK_NE(idx, 0);
+ DCHECK_LE(idx, kL1Size * kL2Size);
+ return &map_[idx / kL2Size][idx % kL2Size];
+ }
+
+ void FlushCache(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ while (c->pos) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+
+ void InitCache(Cache *c) {
+ c->pos = 0;
+ internal_memset(c->cache, 0, sizeof(c->cache));
+ }
+
+ private:
+ T *map_[kL1Size];
+ SpinMutex mtx_;
+ IndexT freelist_;
+ uptr fillpos_;
+
+ void Refill(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ if (freelist_ == 0) {
+ if (fillpos_ == kL1Size) {
+ Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
+ Die();
+ }
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
+ // Reserve 0 as invalid index.
+ IndexT start = fillpos_ == 0 ? 1 : 0;
+ for (IndexT i = start; i < kL2Size; i++) {
+ new(batch + i) T();
+ *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+ }
+ *(IndexT*)(batch + kL2Size - 1) = 0;
+ freelist_ = fillpos_ * kL2Size + start;
+ map_[fillpos_++] = batch;
+ }
+ for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+ IndexT idx = freelist_;
+ c->cache[c->pos++] = idx;
+ freelist_ = *(IndexT*)Map(idx);
+ }
+ }
+
+ void Drain(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DENSE_ALLOC_H
diff --git a/libsanitizer/tsan/tsan_fd.cc b/libsanitizer/tsan/tsan_fd.cc
index b7ac3111c89..8f75a28200d 100644
--- a/libsanitizer/tsan/tsan_fd.cc
+++ b/libsanitizer/tsan/tsan_fd.cc
@@ -45,8 +45,9 @@ static bool bogusfd(int fd) {
return fd < 0 || fd >= kTableSize;
}
-static FdSync *allocsync() {
- FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+ FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
+ false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
@@ -63,10 +64,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
- SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
- if (v)
- DestroyAndFree(v);
- internal_free(s);
+ user_free(thr, pc, s, false);
}
}
}
@@ -79,19 +77,20 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
- void *p = user_alloc(thr, pc, size);
+ void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
l1 = (uptr)p;
else
- user_free(thr, pc, p);
+ user_free(thr, pc, p, false);
}
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
}
// pd must be already ref'ed.
-static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
+static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
+ bool write = true) {
FdDesc *d = fddesc(thr, pc, fd);
// As a matter of fact, we don't intercept all close calls.
// See e.g. libc __res_iclose().
@@ -109,8 +108,13 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
}
d->creation_tid = thr->tid;
d->creation_stack = CurrentStackId(thr, pc);
- // To catch races between fd usage and open.
- MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
+ if (write) {
+ // To catch races between fd usage and open.
+ MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
+ } else {
+ // See the dup-related comment in FdClose.
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ }
}
void FdInit() {
@@ -181,13 +185,25 @@ void FdAccess(ThreadState *thr, uptr pc, int fd) {
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
-void FdClose(ThreadState *thr, uptr pc, int fd) {
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
- // To catch races between fd usage and close.
- MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
+ if (write) {
+ // To catch races between fd usage and close.
+ MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
+ } else {
+ // This path is used only by dup2/dup3 calls.
+ // We do read instead of write because there is a number of legitimate
+ // cases where write would lead to false positives:
+ // 1. Some software dups a closed pipe in place of a socket before closing
+ // the socket (to prevent races actually).
+ // 2. Some daemons dup /dev/null in place of stdin/stdout.
+ // On the other hand we have not seen cases when write here catches real
+ // bugs.
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ }
// We need to clear it, because if we do not intercept any call out there
// that creates fd, we will hit false postives.
MemoryResetRange(thr, pc, (uptr)d, 8);
@@ -204,20 +220,20 @@ void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
init(thr, pc, fd, &fdctx.filesync);
}
-void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
if (bogusfd(oldfd) || bogusfd(newfd))
return;
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
MemoryRead(thr, pc, (uptr)od, kSizeLog8);
- FdClose(thr, pc, newfd);
- init(thr, pc, newfd, ref(od->sync));
+ FdClose(thr, pc, newfd, write);
+ init(thr, pc, newfd, ref(od->sync), write);
}
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
- FdSync *s = allocsync();
+ FdSync *s = allocsync(thr, pc);
init(thr, pc, rfd, ref(s));
init(thr, pc, wfd, ref(s));
unref(thr, pc, s);
@@ -227,7 +243,7 @@ void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
@@ -248,7 +264,7 @@ void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
@@ -283,13 +299,13 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
init(thr, pc, fd, &fdctx.socksync);
}
-uptr File2addr(char *path) {
+uptr File2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
}
-uptr Dir2addr(char *path) {
+uptr Dir2addr(const char *path) {
(void)path;
static u64 addr;
return (uptr)&addr;
diff --git a/libsanitizer/tsan/tsan_fd.h b/libsanitizer/tsan/tsan_fd.h
index 3306873223e..4d9236c9903 100644
--- a/libsanitizer/tsan/tsan_fd.h
+++ b/libsanitizer/tsan/tsan_fd.h
@@ -40,9 +40,9 @@ void FdInit();
void FdAcquire(ThreadState *thr, uptr pc, int fd);
void FdRelease(ThreadState *thr, uptr pc, int fd);
void FdAccess(ThreadState *thr, uptr pc, int fd);
-void FdClose(ThreadState *thr, uptr pc, int fd);
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write = true);
void FdFileCreate(ThreadState *thr, uptr pc, int fd);
-void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd);
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write);
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
void FdEventCreate(ThreadState *thr, uptr pc, int fd);
void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
@@ -55,8 +55,8 @@ void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
void FdOnFork(ThreadState *thr, uptr pc);
-uptr File2addr(char *path);
-uptr Dir2addr(char *path);
+uptr File2addr(const char *path);
+uptr Dir2addr(const char *path);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_flags.cc b/libsanitizer/tsan/tsan_flags.cc
index 158e24f8241..4cec0ac09f4 100644
--- a/libsanitizer/tsan/tsan_flags.cc
+++ b/libsanitizer/tsan/tsan_flags.cc
@@ -10,99 +10,94 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
+#include "ubsan/ubsan_flags.h"
namespace __tsan {
Flags *flags() {
- return &CTX()->flags;
+ return &ctx->flags;
}
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
-void OverrideFlags(Flags *f);
extern "C" const char* __tsan_default_options();
#else
-void WEAK OverrideFlags(Flags *f) {
- (void)f;
-}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
const char *WEAK __tsan_default_options() {
return "";
}
#endif
-static void ParseFlags(Flags *f, const char *env) {
- ParseFlag(env, &f->enable_annotations, "enable_annotations");
- ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
- ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
- ParseFlag(env, &f->suppress_java, "suppress_java");
- ParseFlag(env, &f->report_bugs, "report_bugs");
- ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
- ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
- ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
- ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
- ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
- ParseFlag(env, &f->suppressions, "suppressions");
- ParseFlag(env, &f->print_suppressions, "print_suppressions");
- ParseFlag(env, &f->print_benign, "print_benign");
- ParseFlag(env, &f->exitcode, "exitcode");
- ParseFlag(env, &f->halt_on_error, "halt_on_error");
- ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
- ParseFlag(env, &f->profile_memory, "profile_memory");
- ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
- ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
- ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb");
- ParseFlag(env, &f->stop_on_start, "stop_on_start");
- ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind");
- ParseFlag(env, &f->history_size, "history_size");
- ParseFlag(env, &f->io_sync, "io_sync");
+void Flags::SetDefaults() {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ second_deadlock_stack = false;
+}
+
+void RegisterTsanFlags(FlagParser *parser, Flags *f) {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ RegisterFlag(parser, "second_deadlock_stack",
+ "Report where each mutex is locked in deadlock reports",
+ &f->second_deadlock_stack);
}
void InitializeFlags(Flags *f, const char *env) {
- internal_memset(f, 0, sizeof(*f));
-
- // Default values.
- f->enable_annotations = true;
- f->suppress_equal_stacks = true;
- f->suppress_equal_addresses = true;
- f->suppress_java = false;
- f->report_bugs = true;
- f->report_thread_leaks = true;
- f->report_destroy_locked = true;
- f->report_signal_unsafe = true;
- f->report_atomic_races = true;
- f->force_seq_cst_atomics = false;
- f->suppressions = "";
- f->print_suppressions = false;
- f->print_benign = false;
- f->exitcode = 66;
- f->halt_on_error = false;
- f->atexit_sleep_ms = 1000;
- f->profile_memory = "";
- f->flush_memory_ms = 0;
- f->flush_symbolizer_ms = 5000;
- f->memory_limit_mb = 0;
- f->stop_on_start = false;
- f->running_on_valgrind = false;
- f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
- f->io_sync = 1;
-
- SetCommonFlagsDefaults(f);
+ SetCommonFlagsDefaults();
+ {
+ // Override some common flags defaults.
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.allow_addr2line = true;
+ if (kGoMode) {
+ // Does not work as expected for Go: runtime handles SIGABRT and crashes.
+ cf.abort_on_error = false;
+ // Go does not have mutexes.
+ } else {
+ cf.detect_deadlocks = true;
+ }
+ cf.print_suppressions = false;
+ cf.stack_trace_format = " #%n %f %S %M";
+ cf.exitcode = 66;
+ OverrideCommonFlags(cf);
+ }
+
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterTsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
// Let a frontend override.
- OverrideFlags(f);
- ParseFlags(f, __tsan_default_options());
- ParseCommonFlagsFromString(f, __tsan_default_options());
+ parser.ParseString(__tsan_default_options());
+#if TSAN_CONTAINS_UBSAN
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
// Override from command line.
- ParseFlags(f, env);
- ParseCommonFlagsFromString(f, env);
-
- // Copy back to common flags.
- *common_flags() = *f;
+ parser.ParseString(env);
+#if TSAN_CONTAINS_UBSAN
+ ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
+#endif
// Sanity check.
if (!f->report_bugs) {
@@ -111,6 +106,12 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_signal_unsafe = false;
}
+ SetVerbosity(common_flags()->verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
if (f->history_size < 0 || f->history_size > 7) {
Printf("ThreadSanitizer: incorrect value for history_size"
" (must be [0..7])\n");
diff --git a/libsanitizer/tsan/tsan_flags.h b/libsanitizer/tsan/tsan_flags.h
index 05d11a451c6..3d58ff3793e 100644
--- a/libsanitizer/tsan/tsan_flags.h
+++ b/libsanitizer/tsan/tsan_flags.h
@@ -12,79 +12,18 @@
#ifndef TSAN_FLAGS_H
#define TSAN_FLAGS_H
-// ----------- ATTENTION -------------
-// ThreadSanitizer user may provide its implementation of weak
-// symbol __tsan::OverrideFlags(__tsan::Flags). Therefore, this
-// header may be included in the user code, and shouldn't include
-// other headers from TSan or common sanitizer runtime.
-
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
namespace __tsan {
-struct Flags : CommonFlags {
- // Enable dynamic annotations, otherwise they are no-ops.
- bool enable_annotations;
- // Supress a race report if we've already output another race report
- // with the same stack.
- bool suppress_equal_stacks;
- // Supress a race report if we've already output another race report
- // on the same address.
- bool suppress_equal_addresses;
- // Suppress weird race reports that can be seen if JVM is embed
- // into the process.
- bool suppress_java;
- // Turns off bug reporting entirely (useful for benchmarking).
- bool report_bugs;
- // Report thread leaks at exit?
- bool report_thread_leaks;
- // Report destruction of a locked mutex?
- bool report_destroy_locked;
- // Report violations of async signal-safety
- // (e.g. malloc() call from a signal handler).
- bool report_signal_unsafe;
- // Report races between atomic and plain memory accesses.
- bool report_atomic_races;
- // If set, all atomics are effectively sequentially consistent (seq_cst),
- // regardless of what user actually specified.
- bool force_seq_cst_atomics;
- // Suppressions filename.
- const char *suppressions;
- // Print matched suppressions at exit.
- bool print_suppressions;
- // Print matched "benign" races at exit.
- bool print_benign;
- // Override exit status if something was reported.
- int exitcode;
- // Exit after first reported error.
- bool halt_on_error;
- // Sleep in main thread before exiting for that many ms
- // (useful to catch "at exit" races).
- int atexit_sleep_ms;
- // If set, periodically write memory profile to that file.
- const char *profile_memory;
- // Flush shadow memory every X ms.
- int flush_memory_ms;
- // Flush symbolizer caches every X ms.
- int flush_symbolizer_ms;
- // Resident memory limit in MB to aim at.
- // If the process consumes more memory, then TSan will flush shadow memory.
- int memory_limit_mb;
- // Stops on start until __tsan_resume() is called (for debugging).
- bool stop_on_start;
- // Controls whether RunningOnValgrind() returns true or false.
- bool running_on_valgrind;
- // Per-thread history size, controls how many previous memory accesses
- // are remembered per thread. Possible values are [0..7].
- // history_size=0 amounts to 32K memory accesses. Each next value doubles
- // the amount of memory accesses, up to history_size=7 that amounts to
- // 4M memory accesses. The default value is 2 (128K memory accesses).
- int history_size;
- // Controls level of synchronization implied by IO operations.
- // 0 - no synchronization
- // 1 - reasonable level of synchronization (write->read)
- // 2 - global synchronization of all IO operations
- int io_sync;
+struct Flags : DDFlags {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+
+ void SetDefaults();
+ void ParseFromString(const char *str);
};
Flags *flags();
diff --git a/libsanitizer/tsan/tsan_flags.inc b/libsanitizer/tsan/tsan_flags.inc
new file mode 100644
index 00000000000..822e560b622
--- /dev/null
+++ b/libsanitizer/tsan/tsan_flags.inc
@@ -0,0 +1,76 @@
+//===-- tsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FLAG
+# error "Define TSAN_FLAG prior to including this file!"
+#endif
+
+// TSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+TSAN_FLAG(bool, enable_annotations, true,
+ "Enable dynamic annotations, otherwise they are no-ops.")
+// Suppress a race report if we've already output another race report
+// with the same stack.
+TSAN_FLAG(bool, suppress_equal_stacks, true,
+ "Suppress a race report if we've already output another race report "
+ "with the same stack.")
+TSAN_FLAG(bool, suppress_equal_addresses, true,
+ "Suppress a race report if we've already output another race report "
+ "on the same address.")
+
+TSAN_FLAG(bool, report_bugs, true,
+ "Turns off bug reporting entirely (useful for benchmarking).")
+TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
+TSAN_FLAG(bool, report_destroy_locked, true,
+ "Report destruction of a locked mutex?")
+TSAN_FLAG(bool, report_mutex_bugs, true,
+ "Report incorrect usages of mutexes and mutex annotations?")
+TSAN_FLAG(bool, report_signal_unsafe, true,
+ "Report violations of async signal-safety "
+ "(e.g. malloc() call from a signal handler).")
+TSAN_FLAG(bool, report_atomic_races, true,
+ "Report races between atomic and plain memory accesses.")
+TSAN_FLAG(
+ bool, force_seq_cst_atomics, false,
+ "If set, all atomics are effectively sequentially consistent (seq_cst), "
+ "regardless of what user actually specified.")
+TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
+TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
+TSAN_FLAG(int, atexit_sleep_ms, 1000,
+ "Sleep in main thread before exiting for that many ms "
+ "(useful to catch \"at exit\" races).")
+TSAN_FLAG(const char *, profile_memory, "",
+ "If set, periodically write memory profile to that file.")
+TSAN_FLAG(int, flush_memory_ms, 0, "Flush shadow memory every X ms.")
+TSAN_FLAG(int, flush_symbolizer_ms, 5000, "Flush symbolizer caches every X ms.")
+TSAN_FLAG(
+ int, memory_limit_mb, 0,
+ "Resident memory limit in MB to aim at."
+ "If the process consumes more memory, then TSan will flush shadow memory.")
+TSAN_FLAG(bool, stop_on_start, false,
+ "Stops on start until __tsan_resume() is called (for debugging).")
+TSAN_FLAG(bool, running_on_valgrind, false,
+ "Controls whether RunningOnValgrind() returns true or false.")
+TSAN_FLAG(
+ int, history_size, kGoMode ? 1 : 3, // There are a lot of goroutines in Go.
+ "Per-thread history size, controls how many previous memory accesses "
+ "are remembered per thread. Possible values are [0..7]. "
+ "history_size=0 amounts to 32K memory accesses. Each next value doubles "
+ "the amount of memory accesses, up to history_size=7 that amounts to "
+ "4M memory accesses. The default value is 2 (128K memory accesses).")
+TSAN_FLAG(int, io_sync, 1,
+ "Controls level of synchronization implied by IO operations. "
+ "0 - no synchronization "
+ "1 - reasonable level of synchronization (write->read)"
+ "2 - global synchronization of all IO operations.")
+TSAN_FLAG(bool, die_after_fork, true,
+ "Die after multi-threaded fork if the child creates new threads.")
+TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc
index 0574beb91df..0c71c008d70 100644
--- a/libsanitizer/tsan/tsan_interceptors.cc
+++ b/libsanitizer/tsan/tsan_interceptors.cc
@@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "interception/interception.h"
+#include "tsan_interceptors.h"
#include "tsan_interface.h"
#include "tsan_platform.h"
#include "tsan_suppressions.h"
@@ -25,19 +26,62 @@
#include "tsan_mman.h"
#include "tsan_fd.h"
+#if SANITIZER_POSIX
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+
using namespace __tsan; // NOLINT
-const int kSigCount = 64;
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+#define __errno_location __error
+#define stdout __stdoutp
+#define stderr __stderrp
+#endif
+
+#if SANITIZER_FREEBSD
+#define __libc_realloc __realloc
+#define __libc_calloc __calloc
+#elif SANITIZER_MAC
+#define __libc_malloc REAL(malloc)
+#define __libc_realloc REAL(realloc)
+#define __libc_calloc REAL(calloc)
+#define __libc_free REAL(free)
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+#define PTHREAD_CREATE_DETACHED 1
+#elif SANITIZER_MAC
+#define PTHREAD_CREATE_DETACHED 2
+#endif
+
+
+#ifdef __mips__
+const int kSigCount = 129;
+#else
+const int kSigCount = 65;
+#endif
struct my_siginfo_t {
// The size is determined by looking at sizeof of real siginfo_t on linux.
u64 opaque[128 / sizeof(u64)];
};
+#ifdef __mips__
+struct ucontext_t {
+ u64 opaque[768 / sizeof(u64) + 1];
+};
+#else
struct ucontext_t {
// The size is determined by looking at sizeof of real ucontext_t on linux.
u64 opaque[936 / sizeof(u64) + 1];
};
+#endif
+
+#if defined(__x86_64__) || defined(__mips__)
+#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
+#elif defined(__aarch64__)
+#define PTHREAD_ABI_BASE "GLIBC_2.17"
+#endif
extern "C" int pthread_attr_init(void *attr);
extern "C" int pthread_attr_destroy(void *attr);
@@ -45,36 +89,50 @@ DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
-extern "C" int pthread_mutexattr_gettype(void *a, int *type);
-extern "C" int pthread_yield();
+DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
// REAL(sigfillset) defined in common interceptors.
DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set)
+DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
+DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
extern "C" void *pthread_self();
extern "C" void _exit(int status);
extern "C" int *__errno_location();
extern "C" int fileno_unlocked(void *stream);
-extern "C" void *__libc_malloc(uptr size);
extern "C" void *__libc_calloc(uptr size, uptr n);
extern "C" void *__libc_realloc(void *ptr, uptr size);
-extern "C" void __libc_free(void *ptr);
+extern "C" int dirfd(void *dirp);
+#if !SANITIZER_FREEBSD
extern "C" int mallopt(int param, int value);
+#endif
+extern __sanitizer_FILE *stdout, *stderr;
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
const int EINVAL = 22;
const int EBUSY = 16;
const int EOWNERDEAD = 130;
+#if !SANITIZER_MAC
const int EPOLL_CTL_ADD = 1;
+#endif
const int SIGILL = 4;
const int SIGABRT = 6;
const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
+const int SIGTERM = 15;
+#ifdef __mips__
+const int SIGBUS = 10;
+const int SIGSYS = 12;
+#else
const int SIGBUS = 7;
const int SIGSYS = 31;
+#endif
void *const MAP_FAILED = (void*)-1;
+#if !SANITIZER_MAC
const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
+#endif
const int MAP_FIXED = 0x10;
typedef long long_t; // NOLINT
@@ -84,29 +142,47 @@ typedef long long_t; // NOLINT
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
-typedef void (*sighandler_t)(int sig);
-
#define errno (*__errno_location())
+typedef void (*sighandler_t)(int sig);
+typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
+
struct sigaction_t {
+#ifdef __mips__
+ u32 sa_flags;
+#endif
union {
sighandler_t sa_handler;
- void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx);
+ sigactionhandler_t sa_sigaction;
};
+#if SANITIZER_FREEBSD
+ int sa_flags;
__sanitizer_sigset_t sa_mask;
+#else
+ __sanitizer_sigset_t sa_mask;
+#ifndef __mips__
int sa_flags;
+#endif
void (*sa_restorer)();
+#endif
};
const sighandler_t SIG_DFL = (sighandler_t)0;
const sighandler_t SIG_IGN = (sighandler_t)1;
const sighandler_t SIG_ERR = (sighandler_t)-1;
+#if SANITIZER_FREEBSD
+const int SA_SIGINFO = 0x40;
+const int SIG_SETMASK = 3;
+#elif defined(__mips__)
+const int SA_SIGINFO = 8;
+const int SIG_SETMASK = 3;
+#else
const int SA_SIGINFO = 4;
const int SIG_SETMASK = 2;
+#endif
-namespace std {
-struct nothrow_t {};
-} // namespace std
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
+ (!cur_thread()->is_inited)
static sigaction_t sigactions[kSigCount];
@@ -118,10 +194,10 @@ struct SignalDesc {
ucontext_t ctx;
};
-struct SignalContext {
- int in_blocking_func;
+struct ThreadSignalContext {
int int_signal_send;
- int pending_signal_count;
+ atomic_uintptr_t in_blocking_func;
+ atomic_uintptr_t have_pending_signals;
SignalDesc pending_signals[kSigCount];
};
@@ -133,17 +209,22 @@ static LibIgnore *libignore() {
}
void InitializeLibIgnore() {
- libignore()->Init(*GetSuppressionContext());
+ const SuppressionContext &supp = *Suppressions();
+ const uptr n = supp.SuppressionCount();
+ for (uptr i = 0; i < n; i++) {
+ const Suppression *s = supp.SuppressionAt(i);
+ if (0 == internal_strcmp(s->type, kSuppressionLib))
+ libignore()->AddIgnoredLibrary(s->templ);
+ }
libignore()->OnLibraryLoaded(0);
}
} // namespace __tsan
-static SignalContext *SigCtx(ThreadState *thr) {
- SignalContext *ctx = (SignalContext*)thr->signal_ctx;
- if (ctx == 0 && thr->is_alive) {
- ScopedInRtl in_rtl;
- ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext");
+static ThreadSignalContext *SigCtx(ThreadState *thr) {
+ ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
+ if (ctx == 0 && !thr->is_dead) {
+ ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
thr->signal_ctx = ctx;
}
@@ -152,31 +233,16 @@ static SignalContext *SigCtx(ThreadState *thr) {
static unsigned g_thread_finalize_key;
-class ScopedInterceptor {
- public:
- ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
- ~ScopedInterceptor();
- private:
- ThreadState *const thr_;
- const uptr pc_;
- const int in_rtl_;
- bool in_ignored_lib_;
-};
-
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr)
, pc_(pc)
- , in_rtl_(thr->in_rtl)
, in_ignored_lib_(false) {
- if (thr_->in_rtl == 0) {
+ if (!thr_->ignore_interceptors) {
Initialize(thr);
FuncEntry(thr, pc);
- thr_->in_rtl++;
- DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
- } else {
- thr_->in_rtl++;
}
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
in_ignored_lib_ = true;
thr_->in_ignored_lib = true;
@@ -189,50 +255,55 @@ ScopedInterceptor::~ScopedInterceptor() {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
}
- thr_->in_rtl--;
- if (thr_->in_rtl == 0) {
- FuncExit(thr_);
+ if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ CheckNoLocks(thr_);
}
- CHECK_EQ(in_rtl_, thr_->in_rtl);
}
-#define SCOPED_INTERCEPTOR_RAW(func, ...) \
- ThreadState *thr = cur_thread(); \
- StatInc(thr, StatInterceptor); \
- StatInc(thr, StatInt_##func); \
- const uptr caller_pc = GET_CALLER_PC(); \
- ScopedInterceptor si(thr, #func, caller_pc); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
- (void)pc; \
-/**/
+#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+#endif
-#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
- SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
- if (REAL(func) == 0) { \
- Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
- Die(); \
- } \
- if (thr->in_rtl > 1 || thr->in_ignored_lib) \
- return REAL(func)(__VA_ARGS__); \
-/**/
+#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
+ MemoryAccessRange((thr), (pc), (uptr)(s), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n), false)
-#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
-#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#define READ_STRING(thr, pc, s, n) \
+ READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
struct BlockingCall {
explicit BlockingCall(ThreadState *thr)
- : ctx(SigCtx(thr)) {
- ctx->in_blocking_func++;
+ : thr(thr)
+ , ctx(SigCtx(thr)) {
+ for (;;) {
+ atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
}
~BlockingCall() {
- ctx->in_blocking_func--;
+ thr->ignore_interceptors--;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
}
- SignalContext *ctx;
+ ThreadState *thr;
+ ThreadSignalContext *ctx;
};
TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
@@ -256,117 +327,81 @@ TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
return res;
}
-TSAN_INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
- SCOPED_INTERCEPTOR_RAW(dlopen, filename, flag);
- // dlopen will execute global constructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
- void *res = REAL(dlopen)(filename, flag);
- thr->in_rtl = 1;
- libignore()->OnLibraryLoaded(filename);
- return res;
-}
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
+};
-TSAN_INTERCEPTOR(int, dlclose, void *handle) {
- SCOPED_INTERCEPTOR_RAW(dlclose, handle);
- // dlclose will execute global destructors, so it must be not in rtl.
- CHECK_EQ(thr->in_rtl, 1);
- thr->in_rtl = 0;
- int res = REAL(dlclose)(handle);
- thr->in_rtl = 1;
- libignore()->OnLibraryUnloaded();
- return res;
+static void at_exit_wrapper(void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(void *arg))ctx->f)(ctx->arg);
+ __libc_free(ctx);
}
-class AtExitContext {
- public:
- AtExitContext()
- : mtx_(MutexTypeAtExit, StatMtxAtExit)
- , pos_() {
- }
-
- typedef void(*atexit_t)();
-
- int atexit(ThreadState *thr, uptr pc, bool is_on_exit,
- atexit_t f, void *arg) {
- Lock l(&mtx_);
- if (pos_ == kMaxAtExit)
- return 1;
- Release(thr, pc, (uptr)this);
- stack_[pos_] = f;
- args_[pos_] = arg;
- is_on_exits_[pos_] = is_on_exit;
- pos_++;
- return 0;
- }
-
- void exit(ThreadState *thr, uptr pc) {
- CHECK_EQ(thr->in_rtl, 0);
- for (;;) {
- atexit_t f = 0;
- void *arg = 0;
- bool is_on_exit = false;
- {
- Lock l(&mtx_);
- if (pos_) {
- pos_--;
- f = stack_[pos_];
- arg = args_[pos_];
- is_on_exit = is_on_exits_[pos_];
- ScopedInRtl in_rtl;
- Acquire(thr, pc, (uptr)this);
- }
- }
- if (f == 0)
- break;
- DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
- CHECK_EQ(thr->in_rtl, 0);
- if (is_on_exit)
- ((void(*)(int status, void *arg))f)(0, arg);
- else
- ((void(*)(void *arg, void *dso))f)(arg, 0);
- }
- }
-
- private:
- static const int kMaxAtExit = 128;
- Mutex mtx_;
- atexit_t stack_[kMaxAtExit];
- void *args_[kMaxAtExit];
- bool is_on_exits_[kMaxAtExit];
- int pos_;
-};
-
-static AtExitContext *atexit_ctx;
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso);
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(atexit, f);
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0);
+ // We want to setup the atexit callback even if we are in ignored lib
+ // or after fork.
+ SCOPED_INTERCEPTOR_RAW(atexit, f);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
}
-TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg);
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
}
-TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso) {
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(__cxa_atexit)(at_exit_wrapper, ctx, dso);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+#if !SANITIZER_MAC
+static void on_exit_wrapper(int status, void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ __libc_free(ctx);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (cur_thread()->in_symbolizer)
return 0;
- SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- if (dso) {
- // Memory allocation in __cxa_atexit will race with free during exit,
- // because we do not see synchronization around atexit callback list.
- ThreadIgnoreBegin(thr, pc);
- int res = REAL(__cxa_atexit)(f, arg, dso);
- ThreadIgnoreEnd(thr, pc);
- return res;
- }
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg);
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ ctx->f = (void(*)())f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(on_exit)(on_exit_wrapper, ctx);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
}
+#endif
// Cleanup old bufs.
static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
@@ -382,7 +417,7 @@ static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
}
static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
- if (thr->shadow_stack_pos == 0) // called from libc guts during bootstrap
+ if (!thr->is_inited) // called from libc guts during bootstrap
return;
// Cleanup old bufs.
JmpBufGarbageCollect(thr, sp);
@@ -391,10 +426,25 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
buf->sp = sp;
buf->mangled_sp = mangled_sp;
buf->shadow_stack_pos = thr->shadow_stack_pos;
+ ThreadSignalContext *sctx = SigCtx(thr);
+ buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->in_blocking_func = sctx ?
+ atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
+ false;
+ buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
+ memory_order_relaxed);
}
static void LongJmp(ThreadState *thr, uptr *env) {
+#if SANITIZER_FREEBSD
+ uptr mangled_sp = env[2];
+#elif defined(SANITIZER_LINUX)
+# ifdef __aarch64__
+ uptr mangled_sp = env[13];
+# else
uptr mangled_sp = env[6];
+# endif
+#endif // SANITIZER_FREEBSD
// Find the saved buf by mangled_sp.
for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
JmpBuf *buf = &thr->jmp_bufs[i];
@@ -403,6 +453,14 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// Unwind the stack.
while (thr->shadow_stack_pos > buf->shadow_stack_pos)
FuncExit(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx) {
+ sctx->int_signal_send = buf->int_signal_send;
+ atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
+ }
+ atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
+ memory_order_relaxed);
JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
return;
}
@@ -413,7 +471,6 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// FIXME: put everything below into a common extern "C" block?
extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
- ScopedInRtl in_rtl;
SetJmp(cur_thread(), sp, mangled_sp);
}
@@ -472,6 +529,7 @@ TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
REAL(siglongjmp)(env, val);
}
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
if (cur_thread()->in_symbolizer)
return __libc_malloc(size);
@@ -492,14 +550,10 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
if (cur_thread()->in_symbolizer)
return __libc_calloc(size, n);
- if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n))
- return AllocatorReturnNull();
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(calloc, size, n);
- p = user_alloc(thr, pc, n * size);
- if (p)
- internal_memset(p, 0, n * size);
+ p = user_calloc(thr, pc, size, n);
}
invoke_malloc_hook(p, n * size);
return p;
@@ -540,75 +594,9 @@ TSAN_INTERCEPTOR(void, cfree, void *p) {
TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
- return user_alloc_usable_size(thr, pc, p);
-}
-
-#define OPERATOR_NEW_BODY(mangled_name) \
- if (cur_thread()->in_symbolizer) \
- return __libc_malloc(size); \
- void *p = 0; \
- { \
- SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
- p = user_alloc(thr, pc, size); \
- } \
- invoke_malloc_hook(p, size); \
- return p;
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void *operator new(__sanitizer::uptr size);
-void *operator new(__sanitizer::uptr size) {
- OPERATOR_NEW_BODY(_Znwm);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void *operator new[](__sanitizer::uptr size);
-void *operator new[](__sanitizer::uptr size) {
- OPERATOR_NEW_BODY(_Znam);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void *operator new(__sanitizer::uptr size, std::nothrow_t const&);
-void *operator new(__sanitizer::uptr size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void *operator new[](__sanitizer::uptr size, std::nothrow_t const&);
-void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t);
-}
-
-#define OPERATOR_DELETE_BODY(mangled_name) \
- if (ptr == 0) return; \
- if (cur_thread()->in_symbolizer) \
- return __libc_free(ptr); \
- invoke_free_hook(ptr); \
- SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
- user_free(thr, pc, ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete(void *ptr);
-void operator delete(void *ptr) {
- OPERATOR_DELETE_BODY(_ZdlPv);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete[](void *ptr);
-void operator delete[](void *ptr) {
- OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&);
-void operator delete(void *ptr, std::nothrow_t const&) {
- OPERATOR_DELETE_BODY(_ZdaPv);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&);
-void operator delete[](void *ptr, std::nothrow_t const&) {
- OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t);
+ return user_alloc_usable_size(p);
}
+#endif
TSAN_INTERCEPTOR(uptr, strlen, const char *s) {
SCOPED_TSAN_INTERCEPTOR(strlen, s);
@@ -618,67 +606,54 @@ TSAN_INTERCEPTOR(uptr, strlen, const char *s) {
}
TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
- SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size);
- MemoryAccessRange(thr, pc, (uptr)dst, size, true);
+ // On FreeBSD we get here from libthr internals on thread initialization.
+ if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
+ SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size);
+ MemoryAccessRange(thr, pc, (uptr)dst, size, true);
+ }
return internal_memset(dst, v, size);
}
TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
- SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size);
- MemoryAccessRange(thr, pc, (uptr)dst, size, true);
- MemoryAccessRange(thr, pc, (uptr)src, size, false);
- return internal_memcpy(dst, src, size);
-}
-
-TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memcmp, s1, s2, n);
- int res = 0;
- uptr len = 0;
- for (; len < n; len++) {
- if ((res = ((unsigned char*)s1)[len] - ((unsigned char*)s2)[len]))
- break;
+ // On FreeBSD we get here from libthr internals on thread initialization.
+ if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
+ SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size);
+ MemoryAccessRange(thr, pc, (uptr)dst, size, true);
+ MemoryAccessRange(thr, pc, (uptr)src, size, false);
}
- MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false);
- MemoryAccessRange(thr, pc, (uptr)s2, len < n ? len + 1 : n, false);
- return res;
-}
-
-TSAN_INTERCEPTOR(void*, memchr, void *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memchr, s, c, n);
- void *res = REAL(memchr)(s, c, n);
- uptr len = res ? (char*)res - (char*)s + 1 : n;
- MemoryAccessRange(thr, pc, (uptr)s, len, false);
- return res;
-}
-
-TSAN_INTERCEPTOR(void*, memrchr, char *s, int c, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memrchr, s, c, n);
- MemoryAccessRange(thr, pc, (uptr)s, n, false);
- return REAL(memrchr)(s, c, n);
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same implementation.
+ // We need to use internal_memmove here.
+ return internal_memmove(dst, src, size);
}
TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
- SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
- MemoryAccessRange(thr, pc, (uptr)dst, n, true);
- MemoryAccessRange(thr, pc, (uptr)src, n, false);
+ if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
+ SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, n, false);
+ }
return REAL(memmove)(dst, src, n);
}
TSAN_INTERCEPTOR(char*, strchr, char *s, int c) {
SCOPED_TSAN_INTERCEPTOR(strchr, s, c);
char *res = REAL(strchr)(s, c);
- uptr len = res ? (char*)res - (char*)s + 1 : internal_strlen(s) + 1;
- MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ uptr len = internal_strlen(s);
+ uptr n = res ? (char*)res - (char*)s + 1 : len + 1;
+ READ_STRING_OF_LEN(thr, pc, s, len, n);
return res;
}
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) {
SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c);
char *res = REAL(strchrnul)(s, c);
uptr len = (char*)res - (char*)s + 1;
- MemoryAccessRange(thr, pc, (uptr)s, len, false);
+ READ_STRING(thr, pc, s, len);
return res;
}
+#endif
TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) {
SCOPED_TSAN_INTERCEPTOR(strrchr, s, c);
@@ -702,16 +677,6 @@ TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
return REAL(strncpy)(dst, src, n);
}
-TSAN_INTERCEPTOR(const char*, strstr, const char *s1, const char *s2) {
- SCOPED_TSAN_INTERCEPTOR(strstr, s1, s2);
- const char *res = REAL(strstr)(s1, s2);
- uptr len1 = internal_strlen(s1);
- uptr len2 = internal_strlen(s2);
- MemoryAccessRange(thr, pc, (uptr)s1, len1 + 1, false);
- MemoryAccessRange(thr, pc, (uptr)s2, len2 + 1, false);
- return res;
-}
-
TSAN_INTERCEPTOR(char*, strdup, const char *str) {
SCOPED_TSAN_INTERCEPTOR(strdup, str);
// strdup will call malloc, so no instrumentation is required here.
@@ -732,8 +697,8 @@ static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
return true;
}
-TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
- int flags, int fd, unsigned off) {
+TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF_T off) {
SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off);
if (!fix_mmap_addr(&addr, sz, flags))
return MAP_FAILED;
@@ -746,8 +711,9 @@ TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot,
return res;
}
-TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
- int flags, int fd, u64 off) {
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF64_T off) {
SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off);
if (!fix_mmap_addr(&addr, sz, flags))
return MAP_FAILED;
@@ -759,35 +725,62 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot,
}
return res;
}
+#define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64)
+#else
+#define TSAN_MAYBE_INTERCEPT_MMAP64
+#endif
TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
- DontNeedShadowFor((uptr)addr, sz);
+ if (sz != 0) {
+ // If sz == 0, munmap will return EINVAL and don't unmap any memory.
+ DontNeedShadowFor((uptr)addr, sz);
+ ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz);
+ }
int res = REAL(munmap)(addr, sz);
return res;
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
return user_alloc(thr, pc, sz, align);
}
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
+#else
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_alloc(thr, pc, sz, align);
+}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(valloc, sz);
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
sz = RoundUp(sz, GetPageSizeCached());
return user_alloc(thr, pc, sz, GetPageSizeCached());
}
+#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
+#else
+#define TSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
*memptr = user_alloc(thr, pc, sz, align);
return 0;
}
+#endif
// Used in thread-safe function static initialization.
extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) {
@@ -817,6 +810,19 @@ extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_abort(atomic_uint32_t *g) {
atomic_store(g, 0, memory_order_relaxed);
}
+namespace __tsan {
+void DestroyThreadState() {
+ ThreadState *thr = cur_thread();
+ ThreadFinish(thr);
+ ThreadSignalContext *sctx = thr->signal_ctx;
+ if (sctx) {
+ thr->signal_ctx = 0;
+ UnmapOrDie(sctx, sizeof(*sctx));
+ }
+ cur_thread_finalize();
+}
+} // namespace __tsan
+
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
@@ -826,16 +832,7 @@ static void thread_finalize(void *v) {
}
return;
}
- {
- ScopedInRtl in_rtl;
- ThreadState *thr = cur_thread();
- ThreadFinish(thr);
- SignalContext *sctx = thr->signal_ctx;
- if (sctx) {
- thr->signal_ctx = 0;
- UnmapOrDie(sctx, sizeof(*sctx));
- }
- }
+ DestroyThreadState();
}
@@ -852,17 +849,19 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
int tid = 0;
{
ThreadState *thr = cur_thread();
- ScopedInRtl in_rtl;
+ // Thread-local state is not initialized yet.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(g_thread_finalize_key,
- (void *)kPthreadDestructorIterations)) {
+ (void *)GetPthreadDestructorIterations())) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
}
+ ThreadIgnoreEnd(thr, 0);
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
- pthread_yield();
- atomic_store(&p->tid, 0, memory_order_release);
+ internal_sched_yield();
ThreadStart(thr, tid, GetTid());
- CHECK_EQ(thr->in_rtl, 1);
+ atomic_store(&p->tid, 0, memory_order_release);
}
void *res = callback(param);
// Prevent the callback from being tail called,
@@ -875,6 +874,17 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
TSAN_INTERCEPTOR(int, pthread_create,
void *th, void *attr, void *(*callback)(void*), void * param) {
SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+ if (ctx->after_multithreaded_fork) {
+ if (flags()->die_after_fork) {
+ Report("ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported. Dying (set die_after_fork=0 to override)\n");
+ Die();
+ } else {
+ VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %d). Continuing because of "
+ "die_after_fork=0, but you are on your own\n", internal_getpid());
+ }
+ }
__sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
@@ -882,19 +892,34 @@ TSAN_INTERCEPTOR(int, pthread_create,
}
int detached = 0;
REAL(pthread_attr_getdetachstate)(attr, &detached);
- AdjustStackSizeLinux(attr);
+ AdjustStackSize(attr);
ThreadParam p;
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
- int res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ int res = -1;
+ {
+ // Otherwise we see false positives in pthread stack manipulation.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, pc);
+ res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ ThreadIgnoreEnd(thr, pc);
+ }
if (res == 0) {
- int tid = ThreadCreate(thr, pc, *(uptr*)th, detached);
+ int tid = ThreadCreate(thr, pc, *(uptr*)th,
+ detached == PTHREAD_CREATE_DETACHED);
CHECK_NE(tid, 0);
+ // Synchronization on p.tid serves two purposes:
+ // 1. ThreadCreate must finish before the new thread starts.
+ // Otherwise the new thread can call pthread_detach, but the pthread_t
+ // identifier is not yet registered in ThreadRegistry by ThreadCreate.
+ // 2. ThreadStart must finish before this thread continues.
+ // Otherwise, this thread can call pthread_detach and reset thr->sync
+ // before the new thread got a chance to acquire from it in ThreadStart.
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
- pthread_yield();
+ internal_sched_yield();
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
@@ -904,13 +929,17 @@ TSAN_INTERCEPTOR(int, pthread_create,
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
int tid = ThreadTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
if (res == 0) {
ThreadJoin(thr, pc, tid);
}
return res;
}
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
int tid = ThreadTid(thr, pc, (uptr)th);
@@ -921,6 +950,143 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
return res;
}
+// Problem:
+// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
+// pthread_cond_t has different size in the different versions.
+// If call new REAL functions for old pthread_cond_t, they will corrupt memory
+// after pthread_cond_t (old cond is smaller).
+// If we call old REAL functions for new pthread_cond_t, we will lose some
+// functionality (e.g. old functions do not support waiting against
+// CLOCK_REALTIME).
+// Proper handling would require to have 2 versions of interceptors as well.
+// But this is messy, in particular requires linker scripts when sanitizer
+// runtime is linked into a shared library.
+// Instead we assume we don't have dynamic libraries built against old
+// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
+// that allows to work with old libraries (but this mode does not support
+// some features, e.g. pthread_condattr_getpshared).
+static void *init_cond(void *c, bool force = false) {
+ // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
+ // So we allocate additional memory on the side large enough to hold
+ // any pthread_cond_t object. Always call new REAL functions, but pass
+ // the aux object to them.
+ // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
+ // first word of pthread_cond_t to zero.
+ // It's all relevant only for linux.
+ if (!common_flags()->legacy_pthread_cond)
+ return c;
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (void*)cond;
+ void *newcond = WRAP(malloc)(pthread_cond_t_sz);
+ internal_memset(newcond, 0, pthread_cond_t_sz);
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return newcond;
+ WRAP(free)(newcond);
+ return (void*)cond;
+}
+
+struct CondMutexUnlockCtx {
+ ScopedInterceptor *si;
+ ThreadState *thr;
+ uptr pc;
+ void *m;
+};
+
+static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+ // pthread_cond_wait interceptor has enabled async signal delivery
+ // (see BlockingCall below). Disable async signals since we are running
+ // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
+ // since the thread is cancelled, so we have to manually execute them
+ // (the thread still can run some user code due to pthread_cleanup_push).
+ ThreadSignalContext *ctx = SigCtx(arg->thr);
+ CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ MutexLock(arg->thr, arg->pc, (uptr)arg->m);
+ // Undo BlockingCall ctor effects.
+ arg->thr->ignore_interceptors--;
+ arg->si->~ScopedInterceptor();
+}
+
+INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ void *cond = init_cond(c, true);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ MutexUnlock(thr, pc, (uptr)m);
+ CondMutexUnlockCtx arg = {&si, thr, pc, m};
+ int res = 0;
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ {
+ // Enable signal delivery while the thread is blocked.
+ BlockingCall bc(thr);
+ res = call_pthread_cancel_with_cleanup(
+ (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait),
+ cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg);
+ }
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ MutexUnlock(thr, pc, (uptr)m);
+ CondMutexUnlockCtx arg = {&si, thr, pc, m};
+ int res = 0;
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ {
+ BlockingCall bc(thr);
+ res = call_pthread_cancel_with_cleanup(
+ REAL(pthread_cond_timedwait), cond, m, abstime,
+ (void(*)(void *arg))cond_mutex_unlock, &arg);
+ }
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ int res = REAL(pthread_cond_destroy)(cond);
+ if (common_flags()->legacy_pthread_cond) {
+ // Free our aux cond and zero the pointer to not leave dangling pointers.
+ WRAP(free)(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ }
+ return res;
+}
+
TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
int res = REAL(pthread_mutex_init)(m, a);
@@ -928,7 +1094,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
bool recursive = false;
if (a) {
int type = 0;
- if (pthread_mutexattr_gettype(a, &type) == 0)
+ if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
recursive = (type == PTHREAD_MUTEX_RECURSIVE
|| type == PTHREAD_MUTEX_RECURSIVE_NP);
}
@@ -952,10 +1118,11 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
if (res == EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
if (res == 0 || res == EOWNERDEAD)
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
return res;
}
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
int res = REAL(pthread_mutex_timedlock)(m, abstime);
@@ -964,7 +1131,9 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
}
return res;
}
+#endif
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
int res = REAL(pthread_spin_init)(m, pshared);
@@ -996,7 +1165,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
@@ -1007,6 +1176,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
int res = REAL(pthread_spin_unlock)(m);
return res;
}
+#endif
TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
@@ -1039,11 +1209,12 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
}
return res;
}
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
@@ -1052,6 +1223,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
}
return res;
}
+#endif
TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
@@ -1066,11 +1238,12 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
}
return res;
}
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
@@ -1079,6 +1252,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
}
return res;
}
+#endif
TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
@@ -1087,23 +1261,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
return res;
}
-TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
- MemoryWrite(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_destroy)(c);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m,
- void *abstime) {
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, c, m, abstime);
- MutexUnlock(thr, pc, (uptr)m);
- MemoryRead(thr, pc, (uptr)c, kSizeLog1);
- int res = REAL(pthread_cond_timedwait)(c, m, abstime);
- MutexLock(thr, pc, (uptr)m);
- return res;
-}
-
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
@@ -1129,28 +1287,27 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
}
return res;
}
+#endif
TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
- // Using SCOPED_INTERCEPTOR_RAW, because if we are called from an ignored lib,
- // the user callback must be executed with thr->in_rtl == 0.
if (o == 0 || f == 0)
return EINVAL;
- atomic_uint32_t *a = static_cast<atomic_uint32_t*>(o);
+ atomic_uint32_t *a;
+ if (!SANITIZER_MAC)
+ a = static_cast<atomic_uint32_t*>(o);
+ else // On OS X, pthread_once_t has a header with a long-sized signature.
+ a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
u32 v = atomic_load(a, memory_order_acquire);
if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
memory_order_relaxed)) {
- const int old_in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
(*f)();
- CHECK_EQ(thr->in_rtl, 0);
- thr->in_rtl = old_in_rtl;
if (!thr->in_ignored_lib)
Release(thr, pc, (uptr)o);
atomic_store(a, 2, memory_order_release);
} else {
while (v != 2) {
- pthread_yield();
+ internal_sched_yield();
v = atomic_load(a, memory_order_acquire);
}
if (!thr->in_ignored_lib)
@@ -1159,166 +1316,197 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
return 0;
}
-TSAN_INTERCEPTOR(int, sem_init, void *s, int pshared, unsigned value) {
- SCOPED_TSAN_INTERCEPTOR(sem_init, s, pshared, value);
- int res = REAL(sem_init)(s, pshared, value);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, sem_destroy, void *s) {
- SCOPED_TSAN_INTERCEPTOR(sem_destroy, s);
- int res = REAL(sem_destroy)(s);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, sem_wait, void *s) {
- SCOPED_TSAN_INTERCEPTOR(sem_wait, s);
- int res = BLOCK_REAL(sem_wait)(s);
- if (res == 0) {
- Acquire(thr, pc, (uptr)s);
- }
- return res;
-}
-
-TSAN_INTERCEPTOR(int, sem_trywait, void *s) {
- SCOPED_TSAN_INTERCEPTOR(sem_trywait, s);
- int res = BLOCK_REAL(sem_trywait)(s);
- if (res == 0) {
- Acquire(thr, pc, (uptr)s);
- }
- return res;
-}
-
-TSAN_INTERCEPTOR(int, sem_timedwait, void *s, void *abstime) {
- SCOPED_TSAN_INTERCEPTOR(sem_timedwait, s, abstime);
- int res = BLOCK_REAL(sem_timedwait)(s, abstime);
- if (res == 0) {
- Acquire(thr, pc, (uptr)s);
- }
- return res;
-}
-
-TSAN_INTERCEPTOR(int, sem_post, void *s) {
- SCOPED_TSAN_INTERCEPTOR(sem_post, s);
- Release(thr, pc, (uptr)s);
- int res = REAL(sem_post)(s);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
- SCOPED_TSAN_INTERCEPTOR(sem_getvalue, s, sval);
- int res = REAL(sem_getvalue)(s, sval);
- if (res == 0) {
- Acquire(thr, pc, (uptr)s);
- }
- return res;
-}
-
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__xstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT
+#endif
TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+ SCOPED_TSAN_INTERCEPTOR(stat, path, buf);
+ READ_STRING(thr, pc, path, 0);
+ return REAL(stat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__xstat)(0, path, buf);
+#endif
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__xstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___XSTAT64
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__xstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_STAT64
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__lxstat)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT
+#endif
TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+ SCOPED_TSAN_INTERCEPTOR(lstat, path, buf);
+ READ_STRING(thr, pc, path, 0);
+ return REAL(lstat)(path, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__lxstat)(0, path, buf);
+#endif
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__lxstat64)(version, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___LXSTAT64
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
+ READ_STRING(thr, pc, path, 0);
return REAL(__lxstat64)(0, path, buf);
}
+#define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_LSTAT64
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(fstat)(fd, buf);
+#else
SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(0, fd, buf);
+#endif
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(version, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat64)(0, fd, buf);
}
+#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
+ READ_STRING(thr, pc, name, 0);
int fd = REAL(open)(name, flags, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
+ READ_STRING(thr, pc, name, 0);
int fd = REAL(open64)(name, flags, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
+#else
+#define TSAN_MAYBE_INTERCEPT_OPEN64
+#endif
TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
+ READ_STRING(thr, pc, name, 0);
int fd = REAL(creat)(name, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
+ READ_STRING(thr, pc, name, 0);
int fd = REAL(creat64)(name, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_CREAT64
+#endif
TSAN_INTERCEPTOR(int, dup, int oldfd) {
SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
int newfd = REAL(dup)(oldfd);
if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
- FdDup(thr, pc, oldfd, newfd);
+ FdDup(thr, pc, oldfd, newfd, true);
return newfd;
}
@@ -1326,18 +1514,21 @@ TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
int newfd2 = REAL(dup2)(oldfd, newfd);
if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
- FdDup(thr, pc, oldfd, newfd2);
+ FdDup(thr, pc, oldfd, newfd2, false);
return newfd2;
}
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
int newfd2 = REAL(dup3)(oldfd, newfd, flags);
if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
- FdDup(thr, pc, oldfd, newfd2);
+ FdDup(thr, pc, oldfd, newfd2, false);
return newfd2;
}
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
int fd = REAL(eventfd)(initval, flags);
@@ -1345,7 +1536,12 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
FdEventCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_EVENTFD
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
if (fd >= 0)
@@ -1355,7 +1551,12 @@ TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
FdSignalCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, inotify_init, int fake) {
SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
int fd = REAL(inotify_init)(fake);
@@ -1363,7 +1564,12 @@ TSAN_INTERCEPTOR(int, inotify_init, int fake) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
int fd = REAL(inotify_init1)(flags);
@@ -1371,6 +1577,10 @@ TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
FdInotifyCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
+#endif
TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
@@ -1413,6 +1623,7 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
return res;
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, epoll_create, int size) {
SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
int fd = REAL(epoll_create)(size);
@@ -1420,7 +1631,12 @@ TSAN_INTERCEPTOR(int, epoll_create, int size) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
int fd = REAL(epoll_create1)(flags);
@@ -1428,6 +1644,10 @@ TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
FdPollCreate(thr, pc, fd);
return fd;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1
+#endif
TSAN_INTERCEPTOR(int, close, int fd) {
SCOPED_TSAN_INTERCEPTOR(close, fd);
@@ -1436,14 +1656,20 @@ TSAN_INTERCEPTOR(int, close, int fd) {
return REAL(close)(fd);
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __close, int fd) {
SCOPED_TSAN_INTERCEPTOR(__close, fd);
if (fd >= 0)
FdClose(thr, pc, fd);
return REAL(__close)(fd);
}
+#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
+#else
+#define TSAN_MAYBE_INTERCEPT___CLOSE
+#endif
// glibc guts
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
int fds[64];
@@ -1454,6 +1680,10 @@ TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
}
REAL(__res_iclose)(state, free_addr);
}
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
+#else
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
+#endif
TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
@@ -1463,6 +1693,7 @@ TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
return res;
}
+#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
int res = REAL(pipe2)(pipefd, flags);
@@ -1470,6 +1701,7 @@ TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
return res;
}
+#endif
TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) {
SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags);
@@ -1509,10 +1741,9 @@ TSAN_INTERCEPTOR(int, unlink, char *path) {
return res;
}
-TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
- SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
- void *res = REAL(fopen)(path, mode);
- Acquire(thr, pc, File2addr(path));
+TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
+ void *res = REAL(tmpfile)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1521,15 +1752,10 @@ TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
return res;
}
-TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
- SCOPED_TSAN_INTERCEPTOR(freopen, path, mode, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- void *res = REAL(freopen)(path, mode, stream);
- Acquire(thr, pc, File2addr(path));
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
+ void *res = REAL(tmpfile64)(fake);
if (res) {
int fd = fileno_unlocked(res);
if (fd >= 0)
@@ -1537,19 +1763,10 @@ TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
}
return res;
}
-
-TSAN_INTERCEPTOR(int, fclose, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fclose, stream);
- if (stream) {
- int fd = fileno_unlocked(stream);
- if (fd >= 0)
- FdClose(thr, pc, fd);
- }
- }
- return REAL(fclose)(stream);
-}
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
+#else
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64
+#endif
TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
// libc file streams can call user-supplied functions, see fopencookie.
@@ -1569,17 +1786,16 @@ TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
return REAL(fwrite)(p, size, nmemb, f);
}
-TSAN_INTERCEPTOR(int, fflush, void *stream) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fflush, stream);
- }
- return REAL(fflush)(stream);
+static void FlushStreams() {
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
}
TSAN_INTERCEPTOR(void, abort, int fake) {
SCOPED_TSAN_INTERCEPTOR(abort, fake);
- REAL(fflush)(0);
+ FlushStreams();
REAL(abort)(fake);
}
@@ -1596,14 +1812,14 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) {
return res;
}
-TSAN_INTERCEPTOR(void*, opendir, char *path) {
- SCOPED_TSAN_INTERCEPTOR(opendir, path);
- void *res = REAL(opendir)(path);
- if (res != 0)
- Acquire(thr, pc, Dir2addr(path));
- return res;
+TSAN_INTERCEPTOR(int, closedir, void *dirp) {
+ SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ int fd = dirfd(dirp);
+ FdClose(thr, pc, fd);
+ return REAL(closedir)(dirp);
}
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
if (epfd >= 0)
@@ -1615,7 +1831,12 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL
+#endif
+#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
if (epfd >= 0)
@@ -1625,31 +1846,121 @@ TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
FdAcquire(thr, pc, epfd);
return res;
}
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#endif
+
+namespace __tsan {
+
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
+ bool sigact, int sig, my_siginfo_t *info, void *uctx) {
+ if (acquire)
+ Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Ensure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 99;
+ // This code races with sigaction. Be careful to not read sa_sigaction twice.
+ // Also need to remember pc for reporting before the call,
+ // because the handler can reset it.
+ volatile uptr pc = sigact ?
+ (uptr)sigactions[sig].sa_sigaction :
+ (uptr)sigactions[sig].sa_handler;
+ if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) {
+ if (sigact)
+ ((sigactionhandler_t)pc)(sig, info, uctx);
+ else
+ ((sighandler_t)pc)(sig);
+ }
+ // We do not detect errno spoiling for SIGTERM,
+ // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
+ // tsan reports false positive in such case.
+ // It's difficult to properly detect this situation (reraise),
+ // because in async signal processing case (when handler is called directly
+ // from rtl_generic_sighandler) we have not yet received the reraised
+ // signal; and it looks too fragile to intercept all ways to reraise a signal.
+ if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ VarSizeStackTrace stack;
+ // StackTrace::GetNestInstructionPc(pc) is used because return address is
+ // expected, OutputReport() will undo this.
+ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+ }
+ errno = saved_errno;
+}
+
+void ProcessPendingSignals(ThreadState *thr) {
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 ||
+ atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+ return;
+ atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ // These are too big for stack.
+ static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
+ CHECK_EQ(0, REAL(sigfillset)(&emptyset));
+ CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &emptyset, &oldset));
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
+ &signal->siginfo, &signal->ctx);
+ }
+ }
+ CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &oldset, 0));
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
+ return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send);
+}
void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
my_siginfo_t *info, void *ctx) {
ThreadState *thr = cur_thread();
- SignalContext *sctx = SigCtx(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sig < 0 || sig >= kSigCount) {
+ VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
+ return;
+ }
// Don't mess with synchronous signals.
- if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
- sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
- // If we are sending signal to ourselves, we must process it now.
- (sctx && sig == sctx->int_signal_send) ||
+ const bool sync = is_sync_signal(sctx, sig);
+ if (sync ||
// If we are in blocking function, we can safely process it now
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
- (sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) {
- int in_rtl = thr->in_rtl;
- thr->in_rtl = 0;
- CHECK_EQ(thr->in_signal_handler, false);
- thr->in_signal_handler = true;
- if (sigact)
- sigactions[sig].sa_sigaction(sig, info, ctx);
- else
- sigactions[sig].sa_handler(sig);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
- thr->in_rtl = in_rtl;
+ (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
+ // We ignore interceptors in blocking functions,
+ // temporary enbled them again while we are calling user function.
+ int const i = thr->ignore_interceptors;
+ thr->ignore_interceptors = 0;
+ atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
+ thr->ignore_interceptors = i;
+ atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ } else {
+ // Be very conservative with when we do acquire in this case.
+ // It's unsafe to do acquire in async handlers, because ThreadState
+ // can be in inconsistent state.
+ // SIGSYS looks relatively safe -- it's synchronous and can actually
+ // need some global state.
+ bool acq = (sig == SIGSYS);
+ CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+ }
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
return;
}
@@ -1663,7 +1974,7 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
internal_memcpy(&signal->siginfo, info, sizeof(*info));
if (ctx)
internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
- sctx->pending_signal_count++;
+ atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
}
}
@@ -1681,7 +1992,19 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
internal_memcpy(old, &sigactions[sig], sizeof(*old));
if (act == 0)
return 0;
- internal_memcpy(&sigactions[sig], act, sizeof(*act));
+ // Copy act into sigactions[sig].
+ // Can't use struct copy, because compiler can emit call to memcpy.
+ // Can't use internal_memcpy, because it copies byte-by-byte,
+ // and signal handler reads the sa_handler concurrently. It it can read
+ // some bytes from old value and some bytes from new value.
+ // Use volatile to prevent insertion of memcpy.
+ sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler;
+ sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
+ internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
+ sizeof(sigactions[sig].sa_mask));
+#if !SANITIZER_FREEBSD
+ sigactions[sig].sa_restorer = act->sa_restorer;
+#endif
sigaction_t newact;
internal_memcpy(&newact, act, sizeof(newact));
REAL(sigfillset)(&newact.sa_mask);
@@ -1691,6 +2014,7 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
else
newact.sa_handler = rtl_sighandler;
}
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
int res = REAL(sigaction)(sig, &newact, 0);
return res;
}
@@ -1714,7 +2038,7 @@ TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
TSAN_INTERCEPTOR(int, raise, int sig) {
SCOPED_TSAN_INTERCEPTOR(raise, sig);
- SignalContext *sctx = SigCtx(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
CHECK_NE(sctx, 0);
int prev = sctx->int_signal_send;
sctx->int_signal_send = sig;
@@ -1726,7 +2050,7 @@ TSAN_INTERCEPTOR(int, raise, int sig) {
TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
- SignalContext *sctx = SigCtx(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
CHECK_NE(sctx, 0);
int prev = sctx->int_signal_send;
if (pid == (int)internal_getpid()) {
@@ -1742,7 +2066,7 @@ TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
- SignalContext *sctx = SigCtx(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
CHECK_NE(sctx, 0);
int prev = sctx->int_signal_send;
if (tid == pthread_self()) {
@@ -1769,60 +2093,97 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
// and can report false race between malloc and free
// inside of getaddrinfo. So ignore memory accesses.
ThreadIgnoreBegin(thr, pc);
- // getaddrinfo calls fopen, which can be intercepted by user.
- thr->in_rtl--;
- CHECK_EQ(thr->in_rtl, 0);
int res = REAL(getaddrinfo)(node, service, hints, rv);
- thr->in_rtl++;
ThreadIgnoreEnd(thr, pc);
return res;
}
-// Linux kernel has a bug that leads to kernel deadlock if a process
-// maps TBs of memory and then calls mlock().
-static void MlockIsUnsupported() {
- static atomic_uint8_t printed;
- if (atomic_exchange(&printed, 1, memory_order_relaxed))
- return;
- if (flags()->verbosity > 0)
- Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n");
-}
-
-TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, mlockall, int flags) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlockall, void) {
- MlockIsUnsupported();
- return 0;
-}
-
TSAN_INTERCEPTOR(int, fork, int fake) {
+ if (cur_thread()->in_symbolizer)
+ return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
+ ForkBefore(thr, pc);
int pid = REAL(fork)(fake);
if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
} else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
return pid;
}
+TSAN_INTERCEPTOR(int, vfork, int fake) {
+ // Some programs (e.g. openjdk) call close for all file descriptors
+ // in the child process. Under tsan it leads to false positives, because
+ // address space is shared, so the parent process also thinks that
+ // the descriptors are closed (while they are actually not).
+ // This leads to false positives due to missed synchronization.
+ // Strictly saying this is undefined behavior, because vfork child is not
+ // allowed to call any functions other than exec/exit. But this is what
+ // openjdk does, so we want to handle it.
+ // We could disable interceptors in the child process. But it's not possible
+ // to simply intercept and wrap vfork, because vfork child is not allowed
+ // to return from the function that calls vfork, and that's exactly what
+ // we would do. So this would require some assembly trickery as well.
+ // Instead we simply turn vfork into fork.
+ return WRAP(fork)(fake);
+}
+
+#if !SANITIZER_MAC
+typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data);
+struct dl_iterate_phdr_data {
+ ThreadState *thr;
+ uptr pc;
+ dl_iterate_phdr_cb_t cb;
+ void *data;
+};
+
+static bool IsAppNotRodata(uptr addr) {
+ return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
+}
+
+static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data) {
+ dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
+ // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
+ // accessible in dl_iterate_phdr callback. But we don't see synchronization
+ // inside of dynamic linker, so we "unpoison" it here in order to not
+ // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
+ // because some libc functions call __libc_dlopen.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ int res = cbdata->cb(info, size, cbdata->data);
+ // Perform the check one more time in case info->dlpi_name was overwritten
+ // by user callback.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
+ SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
+ dl_iterate_phdr_data cbdata;
+ cbdata.thr = thr;
+ cbdata.pc = pc;
+ cbdata.cb = cb;
+ cbdata.data = data;
+ int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
+ return res;
+}
+#endif
+
static int OnExit(ThreadState *thr) {
int status = Finalize(thr);
- REAL(fflush)(0);
+ FlushStreams();
return status;
}
@@ -1832,6 +2193,7 @@ struct TsanInterceptorContext {
const uptr pc;
};
+#if !SANITIZER_MAC
static void HandleRecvmsg(ThreadState *thr, uptr pc,
__sanitizer_msghdr *msg) {
int fds[64];
@@ -1839,21 +2201,40 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
for (int i = 0; i < cnt; i++)
FdEventCreate(thr, pc, fds[i]);
}
+#endif
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-// Causes interceptor recursion (getpwuid_r() calls fopen())
-#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
-#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
// Causes interceptor recursion (getaddrinfo() and fopen())
#undef SANITIZER_INTERCEPT_GETADDRINFO
-#undef SANITIZER_INTERCEPT_GETNAMEINFO
-// Causes interceptor recursion (glob64() calls lstat64())
-#undef SANITIZER_INTERCEPT_GLOB
+// There interceptors do not seem to be strictly necessary for tsan.
+// But we see cases where the interceptors consume 70% of execution time.
+// Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
+// First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
+// function "writes to" the buffer. Then, the same memory is "written to"
+// twice, first as buf and then as pwbufp (both of them refer to the same
+// addresses).
+#undef SANITIZER_INTERCEPT_GETPWENT
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+// __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// There are two potential issues:
+// 1. Sanitizer code contains a MOVDQA spill (it does not seem to be the case
+// right now). or 2. ProcessPendingSignal calls user handler which contains
+// MOVDQA spill (this happens right now).
+// Since the interceptor only initializes memory for msan, the simplest solution
+// is to disable the interceptor in tsan (other sanitizers do not call
+// signal handlers from COMMON_INTERCEPTOR_ENTER).
+// As __tls_get_addr has been intercepted in the past, to avoid breaking
+// libtsan ABI, keep it around, but just call the real function.
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define NEED_TLS_GET_ADDR
+#endif
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
-#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
- do { \
- } while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
@@ -1871,6 +2252,40 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
ctx = (void *)&_ctx; \
(void) ctx;
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ Acquire(thr, pc, File2addr(path)); \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdFileCreate(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ libignore()->OnLibraryLoaded(filename)
+
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ libignore()->OnLibraryUnloaded()
+
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
+ Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
+
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
@@ -1887,7 +2302,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
- CTX()->thread_registry->SetThreadNameByUserId(thread, name)
+ __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
@@ -1906,14 +2321,26 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+#if !SANITIZER_MAC
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, msg)
+#endif
+
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (TsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define TSAN_SYSCALL() \
ThreadState *thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return; \
ScopedSyscall scoped_syscall(thr) \
/**/
@@ -1922,18 +2349,15 @@ struct ScopedSyscall {
explicit ScopedSyscall(ThreadState *thr)
: thr(thr) {
- if (thr->in_rtl == 0)
- Initialize(thr);
- thr->in_rtl++;
+ Initialize(thr);
}
~ScopedSyscall() {
- thr->in_rtl--;
- if (thr->in_rtl == 0)
- ProcessPendingSignals(thr);
+ ProcessPendingSignals(thr);
}
};
+#if !SANITIZER_MAC
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -1942,12 +2366,12 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
static void syscall_acquire(uptr pc, uptr addr) {
TSAN_SYSCALL();
Acquire(thr, pc, addr);
- Printf("syscall_acquire(%p)\n", addr);
+ DPrintf("syscall_acquire(%p)\n", addr);
}
static void syscall_release(uptr pc, uptr addr) {
TSAN_SYSCALL();
- Printf("syscall_release(%p)\n", addr);
+ DPrintf("syscall_release(%p)\n", addr);
Release(thr, pc, addr);
}
@@ -1959,28 +2383,35 @@ static void syscall_fd_close(uptr pc, int fd) {
static USED void syscall_fd_acquire(uptr pc, int fd) {
TSAN_SYSCALL();
FdAcquire(thr, pc, fd);
- Printf("syscall_fd_acquire(%p)\n", fd);
+ DPrintf("syscall_fd_acquire(%p)\n", fd);
}
static USED void syscall_fd_release(uptr pc, int fd) {
TSAN_SYSCALL();
- Printf("syscall_fd_release(%p)\n", fd);
+ DPrintf("syscall_fd_release(%p)\n", fd);
FdRelease(thr, pc, fd);
}
static void syscall_pre_fork(uptr pc) {
TSAN_SYSCALL();
+ ForkBefore(thr, pc);
}
-static void syscall_post_fork(uptr pc, int res) {
+static void syscall_post_fork(uptr pc, int pid) {
TSAN_SYSCALL();
- if (res == 0) {
+ if (pid == 0) {
// child
+ ForkChildAfter(thr, pc);
FdOnFork(thr, pc);
- } else if (res > 0) {
+ } else if (pid > 0) {
// parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
}
}
+#endif
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
@@ -2020,86 +2451,46 @@ static void syscall_post_fork(uptr pc, int res) {
#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#ifdef NEED_TLS_GET_ADDR
+TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ return REAL(__tls_get_addr)(arg);
+}
+#endif
+
namespace __tsan {
static void finalize(void *arg) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
- atexit_ctx->exit(thr, pc);
int status = Finalize(thr);
- REAL(fflush)(0);
+ // Make sure the output is not lost.
+ FlushStreams();
if (status)
- REAL(_exit)(status);
-}
-
-void ProcessPendingSignals(ThreadState *thr) {
- CHECK_EQ(thr->in_rtl, 0);
- SignalContext *sctx = SigCtx(thr);
- if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
- return;
- Context *ctx = CTX();
- thr->in_signal_handler = true;
- sctx->pending_signal_count = 0;
- // These are too big for stack.
- static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
- REAL(sigfillset)(&emptyset);
- pthread_sigmask(SIG_SETMASK, &emptyset, &oldset);
- for (int sig = 0; sig < kSigCount; sig++) {
- SignalDesc *signal = &sctx->pending_signals[sig];
- if (signal->armed) {
- signal->armed = false;
- if (sigactions[sig].sa_handler != SIG_DFL
- && sigactions[sig].sa_handler != SIG_IGN) {
- // Insure that the handler does not spoil errno.
- const int saved_errno = errno;
- errno = 0;
- if (signal->sigaction)
- sigactions[sig].sa_sigaction(sig, &signal->siginfo, &signal->ctx);
- else
- sigactions[sig].sa_handler(sig);
- if (flags()->report_bugs && errno != 0) {
- ScopedInRtl in_rtl;
- __tsan::StackTrace stack;
- uptr pc = signal->sigaction ?
- (uptr)sigactions[sig].sa_sigaction :
- (uptr)sigactions[sig].sa_handler;
- pc += 1; // return address is expected, OutputReport() will undo this
- stack.Init(&pc, 1);
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
- }
- }
- errno = saved_errno;
- }
- }
- }
- pthread_sigmask(SIG_SETMASK, &oldset, 0);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
+ Die();
}
+#if !SANITIZER_MAC
static void unreachable() {
- Printf("FATAL: ThreadSanitizer: unreachable called\n");
+ Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
}
+#endif
void InitializeInterceptors() {
- CHECK_GT(cur_thread()->in_rtl, 0);
-
+#if !SANITIZER_MAC
// We need to setup it early, because functions like dlsym() can call it.
REAL(memset) = internal_memset;
REAL(memcpy) = internal_memcpy;
- REAL(memcmp) = internal_memcmp;
+#endif
// Instruct libc malloc to consume less memory.
+#if SANITIZER_LINUX
mallopt(1, 0); // M_MXFAST
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+#endif
- SANITIZER_COMMON_INTERCEPTORS_INIT;
+ InitializeCommonInterceptors();
+#if !SANITIZER_MAC
// We can not use TSAN_INTERCEPT to get setjmp addr,
// because it does &setjmp and setjmp is not present in some versions of libc.
using __interception::GetRealFunctionAddress;
@@ -2107,6 +2498,7 @@ void InitializeInterceptors() {
GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
GetRealFunctionAddress("sigsetjmp", (uptr*)&REAL(sigsetjmp), 0, 0);
GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+#endif
TSAN_INTERCEPT(longjmp);
TSAN_INTERCEPT(siglongjmp);
@@ -2118,32 +2510,35 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(free);
TSAN_INTERCEPT(cfree);
TSAN_INTERCEPT(mmap);
- TSAN_INTERCEPT(mmap64);
+ TSAN_MAYBE_INTERCEPT_MMAP64;
TSAN_INTERCEPT(munmap);
- TSAN_INTERCEPT(memalign);
+ TSAN_MAYBE_INTERCEPT_MEMALIGN;
TSAN_INTERCEPT(valloc);
- TSAN_INTERCEPT(pvalloc);
+ TSAN_MAYBE_INTERCEPT_PVALLOC;
TSAN_INTERCEPT(posix_memalign);
TSAN_INTERCEPT(strlen);
TSAN_INTERCEPT(memset);
TSAN_INTERCEPT(memcpy);
- TSAN_INTERCEPT(memchr);
- TSAN_INTERCEPT(memrchr);
TSAN_INTERCEPT(memmove);
- TSAN_INTERCEPT(memcmp);
TSAN_INTERCEPT(strchr);
TSAN_INTERCEPT(strchrnul);
TSAN_INTERCEPT(strrchr);
TSAN_INTERCEPT(strcpy); // NOLINT
TSAN_INTERCEPT(strncpy);
- TSAN_INTERCEPT(strstr);
TSAN_INTERCEPT(strdup);
TSAN_INTERCEPT(pthread_create);
TSAN_INTERCEPT(pthread_join);
TSAN_INTERCEPT(pthread_detach);
+ TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
+
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
TSAN_INTERCEPT(pthread_mutex_trylock);
@@ -2165,56 +2560,45 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
TSAN_INTERCEPT(pthread_rwlock_unlock);
- INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
- INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
-
TSAN_INTERCEPT(pthread_barrier_init);
TSAN_INTERCEPT(pthread_barrier_destroy);
TSAN_INTERCEPT(pthread_barrier_wait);
TSAN_INTERCEPT(pthread_once);
- TSAN_INTERCEPT(sem_init);
- TSAN_INTERCEPT(sem_destroy);
- TSAN_INTERCEPT(sem_wait);
- TSAN_INTERCEPT(sem_trywait);
- TSAN_INTERCEPT(sem_timedwait);
- TSAN_INTERCEPT(sem_post);
- TSAN_INTERCEPT(sem_getvalue);
-
TSAN_INTERCEPT(stat);
- TSAN_INTERCEPT(__xstat);
- TSAN_INTERCEPT(stat64);
- TSAN_INTERCEPT(__xstat64);
+ TSAN_MAYBE_INTERCEPT___XSTAT;
+ TSAN_MAYBE_INTERCEPT_STAT64;
+ TSAN_MAYBE_INTERCEPT___XSTAT64;
TSAN_INTERCEPT(lstat);
- TSAN_INTERCEPT(__lxstat);
- TSAN_INTERCEPT(lstat64);
- TSAN_INTERCEPT(__lxstat64);
+ TSAN_MAYBE_INTERCEPT___LXSTAT;
+ TSAN_MAYBE_INTERCEPT_LSTAT64;
+ TSAN_MAYBE_INTERCEPT___LXSTAT64;
TSAN_INTERCEPT(fstat);
- TSAN_INTERCEPT(__fxstat);
- TSAN_INTERCEPT(fstat64);
- TSAN_INTERCEPT(__fxstat64);
+ TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT64;
+ TSAN_MAYBE_INTERCEPT___FXSTAT64;
TSAN_INTERCEPT(open);
- TSAN_INTERCEPT(open64);
+ TSAN_MAYBE_INTERCEPT_OPEN64;
TSAN_INTERCEPT(creat);
- TSAN_INTERCEPT(creat64);
+ TSAN_MAYBE_INTERCEPT_CREAT64;
TSAN_INTERCEPT(dup);
TSAN_INTERCEPT(dup2);
TSAN_INTERCEPT(dup3);
- TSAN_INTERCEPT(eventfd);
- TSAN_INTERCEPT(signalfd);
- TSAN_INTERCEPT(inotify_init);
- TSAN_INTERCEPT(inotify_init1);
+ TSAN_MAYBE_INTERCEPT_EVENTFD;
+ TSAN_MAYBE_INTERCEPT_SIGNALFD;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
TSAN_INTERCEPT(socket);
TSAN_INTERCEPT(socketpair);
TSAN_INTERCEPT(connect);
TSAN_INTERCEPT(bind);
TSAN_INTERCEPT(listen);
- TSAN_INTERCEPT(epoll_create);
- TSAN_INTERCEPT(epoll_create1);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE;
+ TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1;
TSAN_INTERCEPT(close);
- TSAN_INTERCEPT(__close);
- TSAN_INTERCEPT(__res_iclose);
+ TSAN_MAYBE_INTERCEPT___CLOSE;
+ TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
@@ -2223,19 +2607,17 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(recv);
TSAN_INTERCEPT(unlink);
- TSAN_INTERCEPT(fopen);
- TSAN_INTERCEPT(freopen);
- TSAN_INTERCEPT(fclose);
+ TSAN_INTERCEPT(tmpfile);
+ TSAN_MAYBE_INTERCEPT_TMPFILE64;
TSAN_INTERCEPT(fread);
TSAN_INTERCEPT(fwrite);
- TSAN_INTERCEPT(fflush);
TSAN_INTERCEPT(abort);
TSAN_INTERCEPT(puts);
TSAN_INTERCEPT(rmdir);
- TSAN_INTERCEPT(opendir);
+ TSAN_INTERCEPT(closedir);
- TSAN_INTERCEPT(epoll_ctl);
- TSAN_INTERCEPT(epoll_wait);
+ TSAN_MAYBE_INTERCEPT_EPOLL_CTL;
+ TSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
@@ -2249,23 +2631,22 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(gettimeofday);
TSAN_INTERCEPT(getaddrinfo);
- TSAN_INTERCEPT(mlock);
- TSAN_INTERCEPT(munlock);
- TSAN_INTERCEPT(mlockall);
- TSAN_INTERCEPT(munlockall);
-
TSAN_INTERCEPT(fork);
- TSAN_INTERCEPT(dlopen);
- TSAN_INTERCEPT(dlclose);
+ TSAN_INTERCEPT(vfork);
+ TSAN_INTERCEPT(dl_iterate_phdr);
TSAN_INTERCEPT(on_exit);
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
+#ifdef NEED_TLS_GET_ADDR
+ TSAN_INTERCEPT(__tls_get_addr);
+#endif
+
+#if !SANITIZER_MAC
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
- atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
- AtExitContext();
+#endif
if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
Printf("ThreadSanitizer: failed to setup atexit callback\n");
@@ -2280,16 +2661,4 @@ void InitializeInterceptors() {
FdInit();
}
-void internal_start_thread(void(*func)(void *arg), void *arg) {
- // Start the thread with signals blocked, otherwise it can steal users
- // signals.
- __sanitizer_kernel_sigset_t set, old;
- internal_sigfillset(&set);
- internal_sigprocmask(SIG_SETMASK, &set, &old);
- void *th;
- REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg);
- REAL(pthread_detach)(th);
- internal_sigprocmask(SIG_SETMASK, &old, 0);
-}
-
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h
new file mode 100644
index 00000000000..ed68eb96bf8
--- /dev/null
+++ b/libsanitizer/tsan/tsan_interceptors.h
@@ -0,0 +1,47 @@
+#ifndef TSAN_INTERCEPTORS_H
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+class ScopedInterceptor {
+ public:
+ ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
+ ~ScopedInterceptor();
+ private:
+ ThreadState *const thr_;
+ const uptr pc_;
+ bool in_ignored_lib_;
+};
+
+} // namespace __tsan
+
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ ScopedInterceptor si(thr, #func, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc; \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ if (REAL(func) == 0) { \
+ Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Die(); \
+ } \
+ if (thr->ignore_interceptors || thr->in_ignored_lib) \
+ return REAL(func)(__VA_ARGS__); \
+/**/
+
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+
+#if SANITIZER_FREEBSD
+#define __libc_free __free
+#define __libc_malloc __malloc
+#endif
+
+extern "C" void __libc_free(void *ptr);
+extern "C" void *__libc_malloc(uptr size);
+
+#endif // TSAN_INTERCEPTORS_H
diff --git a/libsanitizer/tsan/tsan_interface.cc b/libsanitizer/tsan/tsan_interface.cc
index e056bd465bf..ee9a627076b 100644
--- a/libsanitizer/tsan/tsan_interface.cc
+++ b/libsanitizer/tsan/tsan_interface.cc
@@ -36,56 +36,89 @@ void __tsan_write16(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
}
-u16 __tsan_unaligned_read2(const uu16 *addr) {
+void __tsan_read16_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_write16_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+}
+
+// __tsan_unaligned_read/write calls are emitted by compiler.
+
+void __tsan_unaligned_read2(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
- return *addr;
}
-u32 __tsan_unaligned_read4(const uu32 *addr) {
+void __tsan_unaligned_read4(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
- return *addr;
}
-u64 __tsan_unaligned_read8(const uu64 *addr) {
+void __tsan_unaligned_read8(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
- return *addr;
}
-void __tsan_unaligned_write2(uu16 *addr, u16 v) {
+void __tsan_unaligned_read16(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
+}
+
+void __tsan_unaligned_write2(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
- *addr = v;
}
-void __tsan_unaligned_write4(uu32 *addr, u32 v) {
+void __tsan_unaligned_write4(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
- *addr = v;
}
-void __tsan_unaligned_write8(uu64 *addr, u64 v) {
+void __tsan_unaligned_write8(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
- *addr = v;
}
+void __tsan_unaligned_write16(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
+}
+
+// __sanitizer_unaligned_load/store are for user instrumentation.
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-uint16_t __sanitizer_unaligned_load16(void *addr)
- ALIAS("__tsan_unaligned_read2");
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+ __tsan_unaligned_read2(addr);
+ return *addr;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
-uint32_t __sanitizer_unaligned_load32(void *addr)
- ALIAS("__tsan_unaligned_read4");
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+ __tsan_unaligned_read4(addr);
+ return *addr;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
-uint64_t __sanitizer_unaligned_load64(void *addr)
- ALIAS("__tsan_unaligned_read8");
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+ __tsan_unaligned_read8(addr);
+ return *addr;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store16(void *addr, uint16_t v)
- ALIAS("__tsan_unaligned_write2");
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+ __tsan_unaligned_write2(addr);
+ *addr = v;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store32(void *addr, uint32_t v)
- ALIAS("__tsan_unaligned_write4");
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+ __tsan_unaligned_write4(addr);
+ *addr = v;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store64(void *addr, uint64_t v)
- ALIAS("__tsan_unaligned_write8");
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+ __tsan_unaligned_write8(addr);
+ *addr = v;
}
+} // extern "C"
void __tsan_acquire(void *addr) {
Acquire(cur_thread(), CALLERPC, (uptr)addr);
diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h
index 63de7b2ab5c..4ff6fa1831e 100644
--- a/libsanitizer/tsan/tsan_interface.h
+++ b/libsanitizer/tsan/tsan_interface.h
@@ -39,12 +39,27 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr);
-SANITIZER_INTERFACE_ATTRIBUTE u16 __tsan_unaligned_read2(const uu16 *addr);
-SANITIZER_INTERFACE_ATTRIBUTE u32 __tsan_unaligned_read4(const uu32 *addr);
-SANITIZER_INTERFACE_ATTRIBUTE u64 __tsan_unaligned_read8(const uu64 *addr);
-SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(uu16 *addr, u16 v);
-SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(uu32 *addr, u32 v);
-SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(uu64 *addr, u64 v);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read2(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read4(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read8(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read16(const void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16_pc(void *addr, void *pc);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16_pc(void *addr, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/libsanitizer/tsan/tsan_interface_ann.cc b/libsanitizer/tsan/tsan_interface_ann.cc
index 38224f429cd..19ff4050abc 100644
--- a/libsanitizer/tsan/tsan_interface_ann.cc
+++ b/libsanitizer/tsan/tsan_interface_ann.cc
@@ -31,22 +31,17 @@ class ScopedAnnotation {
public:
ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
uptr pc)
- : thr_(thr)
- , in_rtl_(thr->in_rtl) {
- CHECK_EQ(thr_->in_rtl, 0);
+ : thr_(thr) {
FuncEntry(thr_, pc);
- thr_->in_rtl++;
DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
}
~ScopedAnnotation() {
- thr_->in_rtl--;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
FuncExit(thr_);
+ CheckNoLocks(thr_);
}
private:
ThreadState *const thr_;
- const int in_rtl_;
};
#define SCOPED_ANNOTATION(typ) \
@@ -56,8 +51,8 @@ class ScopedAnnotation {
const uptr caller_pc = (uptr)__builtin_return_address(0); \
StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \
- ScopedAnnotation sa(thr, __FUNCTION__, f, l, caller_pc); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
@@ -66,8 +61,8 @@ static const int kMaxDescLen = 128;
struct ExpectRace {
ExpectRace *next;
ExpectRace *prev;
- int hitcount;
- int addcount;
+ atomic_uintptr_t hitcount;
+ atomic_uintptr_t addcount;
uptr addr;
uptr size;
char *file;
@@ -93,7 +88,8 @@ static void AddExpectRace(ExpectRace *list,
ExpectRace *race = list->next;
for (; race != list; race = race->next) {
if (race->addr == addr && race->size == size) {
- race->addcount++;
+ atomic_store_relaxed(&race->addcount,
+ atomic_load_relaxed(&race->addcount) + 1);
return;
}
}
@@ -103,8 +99,8 @@ static void AddExpectRace(ExpectRace *list,
race->file = f;
race->line = l;
race->desc[0] = 0;
- race->hitcount = 0;
- race->addcount = 1;
+ atomic_store_relaxed(&race->hitcount, 0);
+ atomic_store_relaxed(&race->addcount, 1);
if (desc) {
int i = 0;
for (; i < kMaxDescLen - 1 && desc[i]; i++)
@@ -129,13 +125,11 @@ static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
ExpectRace *race = FindRace(list, addr, size);
- if (race == 0 && AlternativeAddress(addr))
- race = FindRace(list, AlternativeAddress(addr), size);
if (race == 0)
return false;
DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
race->desc, race->addr, (int)race->size, race->file, race->line);
- race->hitcount++;
+ atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
return true;
}
@@ -151,7 +145,7 @@ void InitializeDynamicAnnotations() {
}
bool IsExpectedReport(uptr addr, uptr size) {
- Lock lock(&dyn_ann_ctx->mtx);
+ ReadLock lock(&dyn_ann_ctx->mtx);
if (CheckContains(&dyn_ann_ctx->expect, addr, size))
return true;
if (CheckContains(&dyn_ann_ctx->benign, addr, size))
@@ -160,20 +154,21 @@ bool IsExpectedReport(uptr addr, uptr size) {
}
static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
- int *unique_count, int *hit_count, int ExpectRace::*counter) {
+ int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
ExpectRace *list = &dyn_ann_ctx->benign;
for (ExpectRace *race = list->next; race != list; race = race->next) {
(*unique_count)++;
- if (race->*counter == 0)
+ const uptr cnt = atomic_load_relaxed(&(race->*counter));
+ if (cnt == 0)
continue;
- (*hit_count) += race->*counter;
+ *hit_count += cnt;
uptr i = 0;
for (; i < matched->Size(); i++) {
ExpectRace *race0 = &(*matched)[i];
if (race->line == race0->line
&& internal_strcmp(race->file, race0->file) == 0
&& internal_strcmp(race->desc, race0->desc) == 0) {
- race0->*counter += race->*counter;
+ atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
break;
}
}
@@ -198,8 +193,8 @@ void PrintMatchedBenignRaces() {
hit_count, (int)internal_getpid());
for (uptr i = 0; i < hit_matched.Size(); i++) {
Printf("%d %s:%d %s\n",
- hit_matched[i].hitcount, hit_matched[i].file,
- hit_matched[i].line, hit_matched[i].desc);
+ atomic_load_relaxed(&hit_matched[i].hitcount),
+ hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
}
}
if (hit_matched.Size()) {
@@ -208,8 +203,8 @@ void PrintMatchedBenignRaces() {
add_count, unique_count, (int)internal_getpid());
for (uptr i = 0; i < add_matched.Size(); i++) {
Printf("%d %s:%d %s\n",
- add_matched[i].addcount, add_matched[i].file,
- add_matched[i].line, add_matched[i].desc);
+ atomic_load_relaxed(&add_matched[i].addcount),
+ add_matched[i].file, add_matched[i].line, add_matched[i].desc);
}
}
}
@@ -308,8 +303,8 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
Lock lock(&dyn_ann_ctx->mtx);
while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
ExpectRace *race = dyn_ann_ctx->expect.next;
- if (race->hitcount == 0) {
- CTX()->nmissed_expected++;
+ if (atomic_load_relaxed(&race->hitcount) == 0) {
+ ctx->nmissed_expected++;
ReportMissedExpectedRace(race);
}
race->prev->next = race->next;
@@ -459,4 +454,6 @@ const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
void INTERFACE_ATTRIBUTE
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 180d87b7993..0ded1aa1099 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -10,7 +10,7 @@
//===----------------------------------------------------------------------===//
// ThreadSanitizer atomic operations are based on C++11/C1x standards.
-// For background see C++11 standard. A slightly older, publically
+// For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
// for casual browsing) is available here:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
@@ -19,72 +19,40 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "tsan_interface_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
using namespace __tsan; // NOLINT
-#define SCOPED_ATOMIC(func, ...) \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
- mo = ConvertOrder(mo); \
- mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
- ThreadState *const thr = cur_thread(); \
- AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
- ScopedAtomic sa(thr, callpc, a, mo, __FUNCTION__); \
- return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
-
-// Some shortcuts.
-typedef __tsan_memory_order morder;
-typedef __tsan_atomic8 a8;
-typedef __tsan_atomic16 a16;
-typedef __tsan_atomic32 a32;
-typedef __tsan_atomic64 a64;
-typedef __tsan_atomic128 a128;
-const morder mo_relaxed = __tsan_memory_order_relaxed;
-const morder mo_consume = __tsan_memory_order_consume;
-const morder mo_acquire = __tsan_memory_order_acquire;
-const morder mo_release = __tsan_memory_order_release;
-const morder mo_acq_rel = __tsan_memory_order_acq_rel;
-const morder mo_seq_cst = __tsan_memory_order_seq_cst;
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16; // NOLINT
+typedef unsigned int a32;
+typedef unsigned long long a64; // NOLINT
+#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
-class ScopedAtomic {
- public:
- ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
- morder mo, const char *func)
- : thr_(thr) {
- CHECK_EQ(thr_->in_rtl, 0);
- ProcessPendingSignals(thr);
- FuncEntry(thr_, pc);
- DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
- thr_->in_rtl++;
- }
- ~ScopedAtomic() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
- FuncExit(thr_);
- }
- private:
- ThreadState *thr_;
-};
+#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
+// Protects emulation of 128-bit atomic operations.
+static StaticSpinMutex mutex128;
+#endif
-static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
- StatInc(thr, StatAtomic);
- StatInc(thr, t);
- StatInc(thr, size == 1 ? StatAtomic1
- : size == 2 ? StatAtomic2
- : size == 4 ? StatAtomic4
- : size == 8 ? StatAtomic8
- : StatAtomic16);
- StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
- : mo == mo_consume ? StatAtomicConsume
- : mo == mo_acquire ? StatAtomicAcquire
- : mo == mo_release ? StatAtomicRelease
- : mo == mo_acq_rel ? StatAtomicAcq_Rel
- : StatAtomicSeq_Cst);
-}
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
@@ -108,27 +76,6 @@ static bool IsAcqRelOrder(morder mo) {
return mo == mo_acq_rel || mo == mo_seq_cst;
}
-static morder ConvertOrder(morder mo) {
- if (mo > (morder)100500) {
- mo = morder(mo - 100500);
- if (mo == morder(1 << 0))
- mo = mo_relaxed;
- else if (mo == morder(1 << 1))
- mo = mo_consume;
- else if (mo == morder(1 << 2))
- mo = mo_acquire;
- else if (mo == morder(1 << 3))
- mo = mo_release;
- else if (mo == morder(1 << 4))
- mo = mo_acq_rel;
- else if (mo == morder(1 << 5))
- mo = mo_seq_cst;
- }
- CHECK_GE(mo, mo_relaxed);
- CHECK_LE(mo, mo_seq_cst);
- return mo;
-}
-
template<typename T> T func_xchg(volatile T *v, T op) {
T res = __sync_lock_test_and_set(v, op);
// __sync_lock_test_and_set does not contain full barrier.
@@ -176,50 +123,59 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
+ && __TSAN_HAS_INT128
a128 func_xchg(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = op;
return cmp;
}
a128 func_add(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp + op;
return cmp;
}
a128 func_sub(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp - op;
return cmp;
}
a128 func_and(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp & op;
return cmp;
}
a128 func_or(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp | op;
return cmp;
}
a128 func_xor(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = cmp ^ op;
return cmp;
}
a128 func_nand(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
a128 cmp = *v;
*v = ~(cmp & op);
return cmp;
}
a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ SpinMutexLock lock(&mutex128);
a128 cur = *v;
if (cur == cmp)
*v = xch;
@@ -241,26 +197,80 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
+#ifndef SANITIZER_GO
+static atomic_uint8_t *to_atomic(const volatile a8 *a) {
+ return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
+}
+
+static atomic_uint16_t *to_atomic(const volatile a16 *a) {
+ return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
+}
+#endif
+
+static atomic_uint32_t *to_atomic(const volatile a32 *a) {
+ return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
+}
+
+static atomic_uint64_t *to_atomic(const volatile a64 *a) {
+ return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
+}
+
+static memory_order to_mo(morder mo) {
+ switch (mo) {
+ case mo_relaxed: return memory_order_relaxed;
+ case mo_consume: return memory_order_consume;
+ case mo_acquire: return memory_order_acquire;
+ case mo_release: return memory_order_release;
+ case mo_acq_rel: return memory_order_acq_rel;
+ case mo_seq_cst: return memory_order_seq_cst;
+ }
+ CHECK(0);
+ return memory_order_seq_cst;
+}
+
+template<typename T>
+static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+}
+#endif
+
template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
morder mo) {
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
- if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
+ if (!IsAcquireOrder(mo)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
- return *a; // as if atomic
+ return NoTsanAtomicLoad(a, mo);
}
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
AcquireImpl(thr, pc, &s->clock);
- T v = *a;
+ T v = NoTsanAtomicLoad(a, mo);
s->mtx.ReadUnlock();
- __sync_synchronize();
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
template<typename T>
+static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+}
+#endif
+
+template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
@@ -269,21 +279,18 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
// so must reset the clock.
- if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
- *a = v; // as if atomic
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomicStore(a, v, mo);
return;
}
__sync_synchronize();
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
ReleaseImpl(thr, pc, &s->clock);
- *a = v;
+ NoTsanAtomicStore(a, v, mo);
s->mtx.Unlock();
- // Trainling memory barrier to provide sequential consistency
- // for Dekker-like store-load synchronization.
- __sync_synchronize();
}
template<typename T, T (*F)(volatile T *v, T op)>
@@ -291,7 +298,7 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -309,6 +316,41 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
}
template<typename T>
+static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
+ return func_xchg(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
+ return func_add(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
+ return func_sub(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
+ return func_and(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
+ return func_or(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
+ return func_xor(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
+ return func_nand(a, v);
+}
+
+template<typename T>
static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
@@ -351,13 +393,37 @@ static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
}
template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
+ return true;
+ *c = cur;
+ return false;
+}
+#endif
+
+template<typename T>
+static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomicCAS(a, &c, v, mo, fmo);
+ return c;
+}
+
+template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
+ bool write_lock = mo != mo_acquire && mo != mo_consume;
if (mo != mo_relaxed) {
- s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -370,8 +436,12 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
}
T cc = *c;
T pr = func_cas(a, cc, v);
- if (s)
- s->mtx.Unlock();
+ if (s) {
+ if (write_lock)
+ s->mtx.Unlock();
+ else
+ s->mtx.ReadUnlock();
+ }
if (pr == cc)
return true;
*c = pr;
@@ -385,293 +455,498 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
+#ifndef SANITIZER_GO
+static void NoTsanAtomicFence(morder mo) {
+ __sync_synchronize();
+}
+
static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
// FIXME(dvyukov): not implemented.
__sync_synchronize();
}
+#endif
+
+// Interface functions follow.
+#ifndef SANITIZER_GO
+
+// C/C++
+
+#define SCOPED_ATOMIC(func, ...) \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+ ThreadState *const thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, a, mo, __func__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
+ }
+ ~ScopedAtomic() {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *thr_;
+};
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
SCOPED_ATOMIC(FetchNand, a, v, mo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo) {
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
#endif
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_thread_fence(morder mo) {
char* a = 0;
SCOPED_ATOMIC(Fence, mo);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo) {
}
+} // extern "C"
+
+#else // #ifndef SANITIZER_GO
+
+// Go
+
+#define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+#define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a32 cur = 0;
+ a32 cmp = *(a32*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
+ *(bool*)(a+16) = (cur == cmp);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a64 cur = 0;
+ a64 cmp = *(a64*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
+ *(bool*)(a+24) = (cur == cmp);
+}
+} // extern "C"
+#endif // #ifndef SANITIZER_GO
diff --git a/libsanitizer/tsan/tsan_interface_inl.h b/libsanitizer/tsan/tsan_interface_inl.h
index 4d6e10bcf7b..67a91aa341a 100644
--- a/libsanitizer/tsan/tsan_interface_inl.h
+++ b/libsanitizer/tsan/tsan_interface_inl.h
@@ -48,6 +48,38 @@ void __tsan_write8(void *addr) {
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
}
+void __tsan_read1_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_read2_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_read4_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_read8_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_write1_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_write2_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_write4_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_write8_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+}
+
void __tsan_vptr_update(void **vptr_p, void *new_val) {
CHECK_EQ(sizeof(vptr_p), 8);
if (*vptr_p != new_val) {
diff --git a/libsanitizer/tsan/tsan_interface_java.cc b/libsanitizer/tsan/tsan_interface_java.cc
index 70b5e5fcc65..934ccc3ef9c 100644
--- a/libsanitizer/tsan/tsan_interface_java.cc
+++ b/libsanitizer/tsan/tsan_interface_java.cc
@@ -20,54 +20,17 @@
using namespace __tsan; // NOLINT
-namespace __tsan {
-
-const uptr kHeapShadow = 0x300000000000ull;
-const uptr kHeapAlignment = 8;
-
-struct BlockDesc {
- bool begin;
- Mutex mtx;
- SyncVar *head;
+const jptr kHeapAlignment = 8;
- BlockDesc()
- : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
- , head() {
- CHECK_EQ(begin, false);
- begin = true;
- }
-
- ~BlockDesc() {
- CHECK_EQ(begin, true);
- begin = false;
- ThreadState *thr = cur_thread();
- SyncVar *s = head;
- while (s) {
- SyncVar *s1 = s->next;
- StatInc(thr, StatSyncDestroyed);
- s->mtx.Lock();
- s->mtx.Unlock();
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
- s = s1;
- }
- }
-};
+namespace __tsan {
struct JavaContext {
const uptr heap_begin;
const uptr heap_size;
- BlockDesc *heap_shadow;
JavaContext(jptr heap_begin, jptr heap_size)
: heap_begin(heap_begin)
, heap_size(heap_size) {
- uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
- heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
- if ((uptr)heap_shadow != kHeapShadow) {
- Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
- Die();
- }
}
};
@@ -77,13 +40,9 @@ class ScopedJavaFunc {
: thr_(thr) {
Initialize(thr_);
FuncEntry(thr, pc);
- CHECK_EQ(thr_->in_rtl, 0);
- thr_->in_rtl++;
}
~ScopedJavaFunc() {
- thr_->in_rtl--;
- CHECK_EQ(thr_->in_rtl, 0);
FuncExit(thr_);
// FIXME(dvyukov): process pending signals.
}
@@ -95,69 +54,12 @@ class ScopedJavaFunc {
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;
-static BlockDesc *getblock(uptr addr) {
- uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
- return &jctx->heap_shadow[i];
-}
-
-static uptr USED getmem(BlockDesc *b) {
- uptr i = b - jctx->heap_shadow;
- uptr p = jctx->heap_begin + i * kHeapAlignment;
- CHECK_GE(p, jctx->heap_begin);
- CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
- return p;
-}
-
-static BlockDesc *getblockbegin(uptr addr) {
- for (BlockDesc *b = getblock(addr);; b--) {
- CHECK_GE(b, jctx->heap_shadow);
- if (b->begin)
- return b;
- }
- return 0;
-}
-
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create) {
- if (jctx == 0 || addr < jctx->heap_begin
- || addr >= jctx->heap_begin + jctx->heap_size)
- return 0;
- BlockDesc *b = getblockbegin(addr);
- DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
- Lock l(&b->mtx);
- SyncVar *s = b->head;
- for (; s; s = s->next) {
- if (s->addr == addr) {
- DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
- break;
- }
- }
- if (s == 0 && create) {
- DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
- s = CTX()->synctab.Create(thr, pc, addr);
- s->next = b->head;
- b->head = s;
- }
- if (s) {
- if (write_lock)
- s->mtx.Lock();
- else
- s->mtx.ReadLock();
- }
- return s;
-}
-
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
- // We do not destroy Java mutexes other than in __tsan_java_free().
- return 0;
-}
-
} // namespace __tsan
#define SCOPED_JAVA_FUNC(func) \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = GET_CALLER_PC(); \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
ScopedJavaFunc scoped(thr, caller_pc); \
/**/
@@ -194,8 +96,7 @@ void __tsan_java_alloc(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *b = getblock(ptr);
- new(b) BlockDesc();
+ OnUserAlloc(thr, pc, ptr, size, false);
}
void __tsan_java_free(jptr ptr, jptr size) {
@@ -208,12 +109,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *beg = getblock(ptr);
- BlockDesc *end = getblock(ptr + size);
- for (BlockDesc *b = beg; b != end; b++) {
- if (b->begin)
- b->~BlockDesc();
- }
+ ctx->metamap.FreeRange(thr, pc, ptr, size);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
@@ -228,42 +124,36 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
CHECK_GE(dst, jctx->heap_begin);
CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
- CHECK(dst >= src + size || src >= dst + size);
+ CHECK_NE(dst, src);
+ CHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
- { // NOLINT
- BlockDesc *s = getblock(src);
- BlockDesc *d = getblock(dst);
- BlockDesc *send = getblock(src + size);
- for (; s != send; s++, d++) {
- CHECK_EQ(d->begin, false);
- if (s->begin) {
- DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
- new(d) BlockDesc;
- d->head = s->head;
- for (SyncVar *sync = d->head; sync; sync = sync->next) {
- uptr newaddr = sync->addr - src + dst;
- DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
- sync->addr = newaddr;
- }
- s->head = 0;
- s->~BlockDesc();
- }
- }
+ ctx->metamap.MoveMemory(src, dst, size);
+
+ // Move shadow.
+ u64 *s = (u64*)MemToShadow(src);
+ u64 *d = (u64*)MemToShadow(dst);
+ u64 *send = (u64*)MemToShadow(src + size);
+ uptr inc = 1;
+ if (dst > src) {
+ s = (u64*)MemToShadow(src + size) - 1;
+ d = (u64*)MemToShadow(dst + size) - 1;
+ send = (u64*)MemToShadow(src) - 1;
+ inc = -1;
}
-
- { // NOLINT
- u64 *s = (u64*)MemToShadow(src);
- u64 *d = (u64*)MemToShadow(dst);
- u64 *send = (u64*)MemToShadow(src + size);
- for (; s != send; s++, d++) {
- *d = *s;
- *s = 0;
- }
+ for (; s != send; s += inc, d += inc) {
+ *d = *s;
+ *s = 0;
}
}
+void __tsan_java_finalize() {
+ SCOPED_JAVA_FUNC(__tsan_java_finalize);
+ DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
+ AcquireGlobal(thr, 0);
+}
+
void __tsan_java_mutex_lock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
@@ -327,3 +217,33 @@ int __tsan_java_mutex_unlock_rec(jptr addr) {
return MutexUnlock(thr, pc, addr, true);
}
+
+void __tsan_java_acquire(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_acquire);
+ DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Acquire(thr, caller_pc, addr);
+}
+
+void __tsan_java_release(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Release(thr, caller_pc, addr);
+}
+
+void __tsan_java_release_store(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ ReleaseStore(thr, caller_pc, addr);
+}
diff --git a/libsanitizer/tsan/tsan_interface_java.h b/libsanitizer/tsan/tsan_interface_java.h
index 885ff289751..04e52031dd3 100644
--- a/libsanitizer/tsan/tsan_interface_java.h
+++ b/libsanitizer/tsan/tsan_interface_java.h
@@ -48,8 +48,13 @@ void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
// Callback for memory move by GC.
// Can be aggregated for several objects (preferably).
-// The ranges must not overlap.
+// The ranges can overlap.
void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+// This function must be called on the finalizer thread
+// before executing a batch of finalizers.
+// It ensures necessary synchronization between
+// java object creation and finalization.
+void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
@@ -72,6 +77,14 @@ void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE;
// the same recursion level.
int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE;
+// Raw acquire/release primitives.
+// Can be used to establish happens-before edges on volatile/final fields,
+// in atomic operations, etc. release_store is the same as release, but it
+// breaks release sequence on addr (see C++ standard 1.10/7 for details).
+void __tsan_java_acquire(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release_store(jptr addr) INTERFACE_ATTRIBUTE;
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/tsan/tsan_libdispatch_mac.cc b/libsanitizer/tsan/tsan_libdispatch_mac.cc
new file mode 100644
index 00000000000..5b39665d5d2
--- /dev/null
+++ b/libsanitizer/tsan/tsan_libdispatch_mac.cc
@@ -0,0 +1,70 @@
+//===-- tsan_libdispatch_mac.cc -------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific libdispatch (GCD) support.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+#include <dispatch/dispatch.h>
+#include <pthread.h>
+
+namespace __tsan {
+
+// GCD's dispatch_once implementation has a fast path that contains a racy read
+// and it's inlined into user's code. Furthermore, this fast path doesn't
+// establish a proper happens-before relations between the initialization and
+// code following the call to dispatch_once. We could deal with this in
+// instrumented code, but there's not much we can do about it in system
+// libraries. Let's disable the fast path (by never storing the value ~0 to
+// predicate), so the interceptor is always called, and let's add proper release
+// and acquire semantics. Since TSan does not see its own atomic stores, the
+// race on predicate won't be reported - the only accesses to it that TSan sees
+// are the loads on the fast path. Loads don't race. Secondly, dispatch_once is
+// both a macro and a real function, we want to intercept the function, so we
+// need to undefine the macro.
+#undef dispatch_once
+TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
+ dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_once, predicate, block);
+ atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate);
+ u32 v = atomic_load(a, memory_order_acquire);
+ if (v == 0 &&
+ atomic_compare_exchange_strong(a, &v, 1, memory_order_relaxed)) {
+ block();
+ Release(thr, pc, (uptr)a);
+ atomic_store(a, 2, memory_order_release);
+ } else {
+ while (v != 2) {
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_acquire);
+ }
+ Acquire(thr, pc, (uptr)a);
+ }
+}
+
+#undef dispatch_once_f
+TSAN_INTERCEPTOR(void, dispatch_once_f, dispatch_once_t *predicate,
+ void *context, dispatch_function_t function) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_once_f, predicate, context, function);
+ WRAP(dispatch_once)(predicate, ^(void) {
+ function(context);
+ });
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/tsan/tsan_malloc_mac.cc b/libsanitizer/tsan/tsan_malloc_mac.cc
new file mode 100644
index 00000000000..97090773ca7
--- /dev/null
+++ b/libsanitizer/tsan/tsan_malloc_mac.cc
@@ -0,0 +1,67 @@
+//===-- tsan_malloc_mac.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "tsan_interceptors.h"
+#include "tsan_stack_trace.h"
+
+using namespace __tsan;
+#define COMMON_MALLOC_ZONE_NAME "tsan"
+#define COMMON_MALLOC_ENTER()
+#define COMMON_MALLOC_SANITIZER_INITIALIZED (cur_thread()->is_inited)
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ void *p = \
+ user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
+#define COMMON_MALLOC_MALLOC(size) \
+ if (cur_thread()->in_symbolizer) \
+ return REAL(malloc)(size); \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
+ void *p = user_alloc(thr, pc, size)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ if (cur_thread()->in_symbolizer) \
+ return REAL(realloc)(ptr, size); \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+ void *p = user_realloc(thr, pc, ptr, size)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ if (cur_thread()->in_symbolizer) \
+ return REAL(calloc)(count, size); \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+ void *p = user_calloc(thr, pc, size, count)
+#define COMMON_MALLOC_VALLOC(size) \
+ if (cur_thread()->in_symbolizer) \
+ return REAL(valloc)(size); \
+ SCOPED_INTERCEPTOR_RAW(valloc, size); \
+ void *p = user_alloc(thr, pc, size, GetPageSizeCached())
+#define COMMON_MALLOC_FREE(ptr) \
+ if (cur_thread()->in_symbolizer) \
+ return REAL(free)(ptr); \
+ SCOPED_INTERCEPTOR_RAW(free, ptr); \
+ user_free(thr, pc, ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = user_alloc_usable_size(ptr);
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_IGNORE_INVALID_FREE false
+#define COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("free_common(%p) -- attempting to free unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __tsan
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif
diff --git a/libsanitizer/tsan/tsan_md5.cc b/libsanitizer/tsan/tsan_md5.cc
index 883239c2e71..f299dfc59b6 100644
--- a/libsanitizer/tsan/tsan_md5.cc
+++ b/libsanitizer/tsan/tsan_md5.cc
@@ -23,7 +23,7 @@ namespace __tsan {
(a) += (b);
#define SET(n) \
- (*(MD5_u32plus *)&ptr[(n) * 4])
+ (*(const MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
@@ -37,13 +37,11 @@ typedef struct {
MD5_u32plus block[16];
} MD5_CTX;
-static void *body(MD5_CTX *ctx, void *data, ulong_t size) {
- unsigned char *ptr;
+static const void *body(MD5_CTX *ctx, const void *data, ulong_t size) {
+ const unsigned char *ptr = (const unsigned char *)data;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
- ptr = (unsigned char*)data;
-
a = ctx->a;
b = ctx->b;
c = ctx->c;
@@ -149,7 +147,7 @@ void MD5_Init(MD5_CTX *ctx) {
ctx->hi = 0;
}
-void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) {
+void MD5_Update(MD5_CTX *ctx, const void *data, ulong_t size) {
MD5_u32plus saved_lo;
ulong_t used, free;
@@ -169,7 +167,7 @@ void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) {
}
internal_memcpy(&ctx->buffer[used], data, free);
- data = (unsigned char *)data + free;
+ data = (const unsigned char *)data + free;
size -= free;
body(ctx, ctx->buffer, 64);
}
@@ -236,7 +234,7 @@ MD5Hash md5_hash(const void *data, uptr size) {
MD5Hash res;
MD5_CTX ctx;
MD5_Init(&ctx);
- MD5_Update(&ctx, (void*)data, size);
+ MD5_Update(&ctx, data, size);
MD5_Final((unsigned char*)&res.hash[0], &ctx);
return res;
}
diff --git a/libsanitizer/tsan/tsan_mman.cc b/libsanitizer/tsan/tsan_mman.cc
index 832374becf5..c8a5a6a264a 100644
--- a/libsanitizer/tsan/tsan_mman.cc
+++ b/libsanitizer/tsan/tsan_mman.cc
@@ -8,6 +8,7 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_mman.h"
@@ -16,49 +17,40 @@
#include "tsan_flags.h"
// May be overriden by front-end.
-extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
+extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
-extern "C" void WEAK __tsan_free_hook(void *ptr) {
+extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
namespace __tsan {
-COMPILER_CHECK(sizeof(MBlock) == 16);
-
-void MBlock::Lock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- for (int iter = 0;; iter++) {
- if (v & 1) {
- if (iter < 10)
- proc_yield(20);
- else
- internal_sched_yield();
- v = atomic_load(a, memory_order_relaxed);
- continue;
- }
- if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
- break;
- }
-}
-
-void MBlock::Unlock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- DCHECK(v & 1);
- atomic_store(a, v & ~1, memory_order_relaxed);
-}
-
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
DontNeedShadowFor(p, size);
+ // Mark the corresponding meta shadow memory as not needed.
+ // Note the block does not contain any meta info at this point
+ // (this happens after free).
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ // Block came from LargeMmapAllocator, so must be large.
+ // We rely on this in the calculations below.
+ CHECK_GE(size, 2 * kPageSize);
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ p += diff;
+ size -= diff;
+ }
+ diff = p + size - RoundDown(p + size, kPageSize);
+ if (diff != 0)
+ size -= diff;
+ FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
}
};
@@ -68,7 +60,7 @@ Allocator *allocator() {
}
void InitializeAllocator() {
- allocator()->Init();
+ allocator()->Init(common_flags()->allocator_may_return_null);
}
void AllocatorThreadStart(ThreadState *thr) {
@@ -86,66 +78,67 @@ void AllocatorPrintStats() {
}
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
- if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
+ if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
+ !flags()->report_signal_unsafe)
+ return;
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
+ if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
return;
- Context *ctx = CTX();
- StackTrace stack;
- stack.ObtainCurrent(thr, pc);
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
- if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
- }
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
}
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
- CHECK_GT(thr->in_rtl, 0);
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return AllocatorReturnNull();
+ return allocator()->ReturnNullOrDie();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
- MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
- b->Init(sz, thr->tid, CurrentStackId(thr, pc));
- if (CTX() && CTX()->initialized) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
- else
- MemoryResetRange(thr, pc, (uptr)p, sz);
- }
- DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
- SignalUnsafeCall(thr, pc);
+ if (ctx && ctx->initialized)
+ OnUserAlloc(thr, pc, (uptr)p, sz, true);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
return p;
}
-void user_free(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
- CHECK_NE(p, (void*)0);
- DPrintf("#%d: free(%p)\n", thr->tid, p);
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- if (b->ListHead()) {
- MBlock::ScopedLock l(b);
- for (SyncVar *s = b->ListHead(); s;) {
- SyncVar *res = s;
- s = s->next;
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- DestroyAndFree(res);
- }
- b->ListReset();
- }
- if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
- }
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (CallocShouldReturnNullDueToOverflow(size, n))
+ return allocator()->ReturnNullOrDie();
+ void *p = user_alloc(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return p;
+}
+
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ if (ctx && ctx->initialized)
+ OnUserFree(thr, pc, (uptr)p, true);
allocator()->Deallocate(&thr->alloc_cache, p);
- SignalUnsafeCall(thr, pc);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+}
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ CHECK_NE(p, (void*)0);
+ uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeFreed(thr, pc, (uptr)p, sz);
}
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
- CHECK_GT(thr->in_rtl, 0);
void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
@@ -154,9 +147,8 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
if (p2 == 0)
return 0;
if (p) {
- MBlock *b = user_mblock(thr, p);
- CHECK_NE(b, 0);
- internal_memcpy(p2, p, min(b->Size(), sz));
+ uptr oldsz = user_alloc_usable_size(p);
+ internal_memcpy(p2, p, min(oldsz, sz));
}
}
if (p)
@@ -164,43 +156,29 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
return p2;
}
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
- CHECK_GT(thr->in_rtl, 0);
+uptr user_alloc_usable_size(const void *p) {
if (p == 0)
return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b ? b->Size() : 0;
-}
-
-MBlock *user_mblock(ThreadState *thr, void *p) {
- CHECK_NE(p, 0);
- Allocator *a = allocator();
- void *b = a->GetBlockBegin(p);
- if (b == 0)
- return 0;
- return (MBlock*)a->GetMetaData(b);
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ return b ? b->siz : 0;
}
void invoke_malloc_hook(void *ptr, uptr size) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_malloc_hook(ptr, size);
+ __sanitizer_malloc_hook(ptr, size);
}
void invoke_free_hook(void *ptr) {
- Context *ctx = CTX();
ThreadState *thr = cur_thread();
- if (ctx == 0 || !ctx->initialized || thr->in_rtl)
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_free_hook(ptr);
+ __sanitizer_free_hook(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
- CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
@@ -210,7 +188,6 @@ void *internal_alloc(MBlockType typ, uptr sz) {
void internal_free(void *p) {
ThreadState *thr = cur_thread();
- CHECK_GT(thr->in_rtl, 0);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
@@ -223,51 +200,42 @@ void internal_free(void *p) {
using namespace __tsan;
extern "C" {
-uptr __tsan_get_current_allocated_bytes() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMalloced];
- u64 f = stats[AllocatorStatFreed];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatAllocated];
}
-uptr __tsan_get_heap_size() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMmapped];
- u64 f = stats[AllocatorStatUnmapped];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatMapped];
}
-uptr __tsan_get_free_bytes() {
+uptr __sanitizer_get_free_bytes() {
return 1;
}
-uptr __tsan_get_unmapped_bytes() {
+uptr __sanitizer_get_unmapped_bytes() {
return 1;
}
-uptr __tsan_get_estimated_allocated_size(uptr size) {
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-bool __tsan_get_ownership(void *p) {
+int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
-uptr __tsan_get_allocated_size(void *p) {
- if (p == 0)
- return 0;
- p = allocator()->GetBlockBegin(p);
- if (p == 0)
- return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b->Size();
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return user_alloc_usable_size(p);
}
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
allocator()->SwallowCache(&thr->alloc_cache);
internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
+ ctx->metamap.OnThreadIdle(thr);
}
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_mman.h b/libsanitizer/tsan/tsan_mman.h
index 90faaffa1fc..a7c91fae5e1 100644
--- a/libsanitizer/tsan/tsan_mman.h
+++ b/libsanitizer/tsan/tsan_mman.h
@@ -18,21 +18,20 @@ namespace __tsan {
const uptr kDefaultAlignment = 16;
void InitializeAllocator();
+void ReplaceSystemMalloc();
void AllocatorThreadStart(ThreadState *thr);
void AllocatorThreadFinish(ThreadState *thr);
void AllocatorPrintStats();
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
- uptr align = kDefaultAlignment);
+ uptr align = kDefaultAlignment, bool signal = true);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
// Does not accept NULL.
-void user_free(ThreadState *thr, uptr pc, void *p);
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
-// Given the pointer p into a valid allocated block,
-// returns the descriptor of the block.
-MBlock *user_mblock(ThreadState *thr, void *p);
+uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.
void invoke_malloc_hook(void *ptr, uptr size);
@@ -60,7 +59,6 @@ enum MBlockType {
MBlockSuppression,
MBlockExpectRace,
MBlockSignal,
- MBlockFD,
MBlockJmpBuf,
// This must be the last.
diff --git a/libsanitizer/tsan/tsan_mutex.cc b/libsanitizer/tsan/tsan_mutex.cc
index e7846c53e4a..3d3f6cc82b9 100644
--- a/libsanitizer/tsan/tsan_mutex.cc
+++ b/libsanitizer/tsan/tsan_mutex.cc
@@ -23,28 +23,31 @@ namespace __tsan {
// then Report mutex can be locked while under Threads mutex.
// The leaf mutexes can be locked under any other mutexes.
// Recursive locking is not supported.
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
const MutexType MutexTypeLeaf = (MutexType)-1;
static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {},
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
/*2 MutexTypeThreads*/ {MutexTypeReport},
- /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
+ /*3 MutexTypeReport*/ {MutexTypeSyncVar,
MutexTypeMBlock, MutexTypeJavaMBlock},
- /*4 MutexTypeSyncVar*/ {},
- /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
+ /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
+ /*5 MutexTypeSyncTab*/ {}, // unused
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
/*7 MutexTypeAnnotations*/ {},
- /*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
+ /*11 MutexTypeDDetector*/ {},
+ /*12 MutexTypeFired*/ {MutexTypeLeaf},
+ /*13 MutexTypeRacy*/ {MutexTypeLeaf},
};
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
#endif
void InitializeMutex() {
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
// Build the "can lock" adjacency matrix.
// If [i][j]==true, then one can lock mutex j while under mutex i.
const int N = MutexTypeCount;
@@ -121,12 +124,12 @@ void InitializeMutex() {
#endif
}
-DeadlockDetector::DeadlockDetector() {
+InternalDeadlockDetector::InternalDeadlockDetector() {
// Rely on zero initialization because some mutexes can be locked before ctor.
}
-#if TSAN_DEBUG && !TSAN_GO
-void DeadlockDetector::Lock(MutexType t) {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+void InternalDeadlockDetector::Lock(MutexType t) {
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
CHECK_GT(t, MutexTypeInvalid);
CHECK_LT(t, MutexTypeCount);
@@ -153,13 +156,25 @@ void DeadlockDetector::Lock(MutexType t) {
}
}
-void DeadlockDetector::Unlock(MutexType t) {
+void InternalDeadlockDetector::Unlock(MutexType t) {
// Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
CHECK(locked_[t]);
locked_[t] = 0;
}
+
+void InternalDeadlockDetector::CheckNoLocks() {
+ for (int i = 0; i != MutexTypeCount; i++) {
+ CHECK_EQ(locked_[i], 0);
+ }
+}
#endif
+void CheckNoLocks(ThreadState *thr) {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ thr->internal_deadlock_detector.CheckNoLocks();
+#endif
+}
+
const uptr kUnlocked = 0;
const uptr kWriteLock = 1;
const uptr kReadLock = 2;
@@ -193,7 +208,7 @@ class Backoff {
Mutex::Mutex(MutexType type, StatType stat_type) {
CHECK_GT(type, MutexTypeInvalid);
CHECK_LT(type, MutexTypeCount);
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
type_ = type;
#endif
#if TSAN_COLLECT_STATS
@@ -207,8 +222,8 @@ Mutex::~Mutex() {
}
void Mutex::Lock() {
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
@@ -219,7 +234,7 @@ void Mutex::Lock() {
cmp = kUnlocked;
if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire)) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -232,14 +247,14 @@ void Mutex::Unlock() {
uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
(void)prev;
DCHECK_NE(prev & kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
void Mutex::ReadLock() {
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Lock(type_);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
@@ -247,7 +262,7 @@ void Mutex::ReadLock() {
for (Backoff backoff; backoff.Do();) {
prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -260,8 +275,8 @@ void Mutex::ReadUnlock() {
(void)prev;
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
- cur_thread()->deadlock_detector.Unlock(type_);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
diff --git a/libsanitizer/tsan/tsan_mutex.h b/libsanitizer/tsan/tsan_mutex.h
index 6d145059330..9960eee9bc8 100644
--- a/libsanitizer/tsan/tsan_mutex.h
+++ b/libsanitizer/tsan/tsan_mutex.h
@@ -29,6 +29,9 @@ enum MutexType {
MutexTypeAtExit,
MutexTypeMBlock,
MutexTypeJavaMBlock,
+ MutexTypeDDetector,
+ MutexTypeFired,
+ MutexTypeRacy,
// This must be the last.
MutexTypeCount
@@ -49,7 +52,7 @@ class Mutex {
private:
atomic_uintptr_t state_;
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
MutexType type_;
#endif
#if TSAN_COLLECT_STATS
@@ -63,11 +66,12 @@ class Mutex {
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
-class DeadlockDetector {
+class InternalDeadlockDetector {
public:
- DeadlockDetector();
+ InternalDeadlockDetector();
void Lock(MutexType t);
void Unlock(MutexType t);
+ void CheckNoLocks();
private:
u64 seq_;
u64 locked_[MutexTypeCount];
@@ -75,6 +79,10 @@ class DeadlockDetector {
void InitializeMutex();
+// Checks that the current thread does not hold any runtime locks
+// (e.g. when returning from an interceptor).
+void CheckNoLocks(ThreadState *thr);
+
} // namespace __tsan
#endif // TSAN_MUTEX_H
diff --git a/libsanitizer/tsan/tsan_mutexset.h b/libsanitizer/tsan/tsan_mutexset.h
index df36b462c60..d5406ae9f12 100644
--- a/libsanitizer/tsan/tsan_mutexset.h
+++ b/libsanitizer/tsan/tsan_mutexset.h
@@ -36,19 +36,24 @@ class MutexSet {
uptr Size() const;
Desc Get(uptr i) const;
+ void operator=(const MutexSet &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ }
+
private:
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
void RemovePos(uptr i);
+ MutexSet(const MutexSet&);
};
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
diff --git a/libsanitizer/tsan/tsan_new_delete.cc b/libsanitizer/tsan/tsan_new_delete.cc
new file mode 100644
index 00000000000..ba8474e0714
--- /dev/null
+++ b/libsanitizer/tsan/tsan_new_delete.cc
@@ -0,0 +1,94 @@
+//===-- tsan_new_delete.cc ----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interceptors.h"
+
+using namespace __tsan; // NOLINT
+
+namespace std {
+struct nothrow_t {};
+} // namespace std
+
+DECLARE_REAL(void *, malloc, uptr size)
+DECLARE_REAL(void, free, void *ptr)
+#if SANITIZER_MAC
+#define __libc_malloc REAL(malloc)
+#define __libc_free REAL(free)
+#endif
+
+#define OPERATOR_NEW_BODY(mangled_name) \
+ if (cur_thread()->in_symbolizer) \
+ return __libc_malloc(size); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_alloc(thr, pc, size); \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size);
+void *operator new(__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znwm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size);
+void *operator new[](__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znam);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t);
+}
+
+#define OPERATOR_DELETE_BODY(mangled_name) \
+ if (ptr == 0) return; \
+ if (cur_thread()->in_symbolizer) \
+ return __libc_free(ptr); \
+ invoke_free_hook(ptr); \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
+ user_free(thr, pc, ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT;
+void operator delete(void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT;
+void operator delete[](void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&);
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&);
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t);
+}
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index 164ee45d6c6..f34f577ec03 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -10,151 +10,338 @@
// Platform-specific code.
//===----------------------------------------------------------------------===//
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+#if !defined(SANITIZER_GO)
+
+#if defined(__x86_64__)
/*
-C++ linux memory layout:
-0000 0000 0000 - 03c0 0000 0000: protected
-03c0 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 6000 0000 0000: protected
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
-
-C++ COMPAT linux memory layout:
-0000 0000 0000 - 0400 0000 0000: protected
-0400 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 2900 0000 0000: protected
-2900 0000 0000 - 2c00 0000 0000: modules
-2c00 0000 0000 - 6000 0000 0000: -
+C/C++ on linux/x86_64 and freebsd/x86_64
+0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
+0100 0000 0000 - 0200 0000 0000: -
+0200 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7f00 0000 0000: -
-7f00 0000 0000 - 7fff ffff ffff: main thread stack
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x020000000000ull;
+const uptr kShadowEnd = 0x100000000000ull;
+const uptr kHeapMemBeg = 0x7d0000000000ull;
+const uptr kHeapMemEnd = 0x7e0000000000ull;
+const uptr kLoAppMemBeg = 0x000000001000ull;
+const uptr kLoAppMemEnd = 0x010000000000ull;
+const uptr kHiAppMemBeg = 0x7e8000000000ull;
+const uptr kHiAppMemEnd = 0x800000000000ull;
+const uptr kAppMemMsk = 0x7c0000000000ull;
+const uptr kAppMemXor = 0x020000000000ull;
+const uptr kVdsoBeg = 0xf000000000000000ull;
+#elif defined(__mips64)
+/*
+C/C++ on linux/mips64
+0100 0000 00 - 0200 0000 00: main binary
+0200 0000 00 - 1400 0000 00: -
+1400 0000 00 - 2400 0000 00: shadow
+2400 0000 00 - 3000 0000 00: -
+3000 0000 00 - 4000 0000 00: metainfo (memory blocks and sync objects)
+4000 0000 00 - 6000 0000 00: -
+6000 0000 00 - 6200 0000 00: traces
+6200 0000 00 - fe00 0000 00: -
+fe00 0000 00 - ff00 0000 00: heap
+ff00 0000 00 - ff80 0000 00: -
+ff80 0000 00 - ffff ffff ff: modules and main thread stack
+*/
+const uptr kMetaShadowBeg = 0x3000000000ull;
+const uptr kMetaShadowEnd = 0x4000000000ull;
+const uptr kTraceMemBeg = 0x6000000000ull;
+const uptr kTraceMemEnd = 0x6200000000ull;
+const uptr kShadowBeg = 0x1400000000ull;
+const uptr kShadowEnd = 0x2400000000ull;
+const uptr kHeapMemBeg = 0xfe00000000ull;
+const uptr kHeapMemEnd = 0xff00000000ull;
+const uptr kLoAppMemBeg = 0x0100000000ull;
+const uptr kLoAppMemEnd = 0x0200000000ull;
+const uptr kHiAppMemBeg = 0xff80000000ull;
+const uptr kHiAppMemEnd = 0xffffffffffull;
+const uptr kAppMemMsk = 0xfc00000000ull;
+const uptr kAppMemXor = 0x0400000000ull;
+const uptr kVdsoBeg = 0xfffff00000ull;
+#elif defined(__aarch64__)
+# if SANITIZER_AARCH64_VMA == 39
+/*
+C/C++ on linux/aarch64 (39-bit VMA)
+0000 4000 00 - 0200 0000 00: main binary
+2000 0000 00 - 4000 0000 00: shadow memory
+4000 0000 00 - 5000 0000 00: metainfo
+5000 0000 00 - 6000 0000 00: -
+6000 0000 00 - 6200 0000 00: traces
+6200 0000 00 - 7d00 0000 00: -
+7d00 0000 00 - 7e00 0000 00: heap
+7e00 0000 00 - 7fff ffff ff: modules and main thread stack
+*/
+const uptr kLoAppMemBeg = 0x0000400000ull;
+const uptr kLoAppMemEnd = 0x0200000000ull;
+const uptr kShadowBeg = 0x2000000000ull;
+const uptr kShadowEnd = 0x4000000000ull;
+const uptr kMetaShadowBeg = 0x4000000000ull;
+const uptr kMetaShadowEnd = 0x5000000000ull;
+const uptr kTraceMemBeg = 0x6000000000ull;
+const uptr kTraceMemEnd = 0x6200000000ull;
+const uptr kHeapMemBeg = 0x7d00000000ull;
+const uptr kHeapMemEnd = 0x7e00000000ull;
+const uptr kHiAppMemBeg = 0x7e00000000ull;
+const uptr kHiAppMemEnd = 0x7fffffffffull;
+const uptr kAppMemMsk = 0x7800000000ull;
+const uptr kAppMemXor = 0x0800000000ull;
+const uptr kVdsoBeg = 0x7f00000000ull;
+# elif SANITIZER_AARCH64_VMA == 42
+/*
+C/C++ on linux/aarch64 (42-bit VMA)
+00000 4000 00 - 01000 0000 00: main binary
+01000 0000 00 - 10000 0000 00: -
+10000 0000 00 - 20000 0000 00: shadow memory
+20000 0000 00 - 26000 0000 00: -
+26000 0000 00 - 28000 0000 00: metainfo
+28000 0000 00 - 36200 0000 00: -
+36200 0000 00 - 36240 0000 00: traces
+36240 0000 00 - 3e000 0000 00: -
+3e000 0000 00 - 3f000 0000 00: heap
+3c000 0000 00 - 3ff00 0000 00: -
+3ff00 0000 00 - 3ffff f000 00: modules and main thread stack
+*/
+const uptr kLoAppMemBeg = 0x00000400000ull;
+const uptr kLoAppMemEnd = 0x01000000000ull;
+const uptr kShadowBeg = 0x10000000000ull;
+const uptr kShadowEnd = 0x20000000000ull;
+const uptr kMetaShadowBeg = 0x26000000000ull;
+const uptr kMetaShadowEnd = 0x28000000000ull;
+const uptr kTraceMemBeg = 0x36200000000ull;
+const uptr kTraceMemEnd = 0x36400000000ull;
+const uptr kHeapMemBeg = 0x3e000000000ull;
+const uptr kHeapMemEnd = 0x3f000000000ull;
+const uptr kHiAppMemBeg = 0x3ff00000000ull;
+const uptr kHiAppMemEnd = 0x3fffff00000ull;
+const uptr kAppMemMsk = 0x3c000000000ull;
+const uptr kAppMemXor = 0x04000000000ull;
+const uptr kVdsoBeg = 0x37f00000000ull;
+# endif
+#endif
-Go linux and darwin memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
-00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 1380 0000 0000: shadow
-1460 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7fff ffff ffff: -
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return (mem >= kHeapMemBeg && mem < kHeapMemEnd) ||
+ (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) ||
+ (mem >= kHiAppMemBeg && mem < kHiAppMemEnd);
+}
-Go windows memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (((x) & ~(kAppMemMsk | (kShadowCell - 1)))
+ ^ kAppMemXor) * kShadowCnt;
+}
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1)))
+ ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1))
+ return (s / kShadowCnt) ^ kAppMemXor;
+ else
+ return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk;
+}
+
+static USED uptr UserRegions[] = {
+ kLoAppMemBeg, kLoAppMemEnd,
+ kHiAppMemBeg, kHiAppMemEnd,
+ kHeapMemBeg, kHeapMemEnd,
+};
+
+#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
+
+/* Go on linux, darwin and freebsd
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0560 0000 0000: shadow
-0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07ff ffff ffff: -
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
*/
-#ifndef TSAN_PLATFORM_H
-#define TSAN_PLATFORM_H
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x200000000000ull;
+const uptr kShadowEnd = 0x238000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
-#include "tsan_defs.h"
-#include "tsan_trace.h"
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
+}
-#if defined(__LP64__) || defined(_WIN64)
-namespace __tsan {
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
-#if defined(TSAN_GO)
-static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
-# if SANITIZER_WINDOWS
-static const uptr kLinuxShadowMsk = 0x010000000000ULL;
-# else
-static const uptr kLinuxShadowMsk = 0x200000000000ULL;
-# endif
-// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
-// when memory addresses are of the 0x2axxxxxxxxxx form.
-// The option is enabled with 'setarch x86_64 -L'.
-#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#else
-static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#endif
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
-static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
+}
-#if SANITIZER_WINDOWS
-const uptr kTraceMemBegin = 0x056000000000ULL;
-#else
-const uptr kTraceMemBegin = 0x600000000000ULL;
-#endif
-const uptr kTraceMemSize = 0x020000000000ULL;
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
-// This has to be a macro to allow constant initialization of constants below.
-#ifndef TSAN_GO
-#define MemToShadow(addr) \
- (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
-#else
-#define MemToShadow(addr) \
- ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
-#endif
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ return (s & ~kShadowBeg) / kShadowCnt;
+}
+
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
-static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
-static const uptr kLinuxShadowEnd =
- MemToShadow(kLinuxAppMemEnd) | 0xff;
+#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
-static inline bool IsAppMem(uptr mem) {
- return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0500 0000 0000: shadow
+0500 0000 0000 - 0560 0000 0000: -
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 8000 0000 0000: -
+*/
+
+const uptr kMetaShadowBeg = 0x076000000000ull;
+const uptr kMetaShadowEnd = 0x07d000000000ull;
+const uptr kTraceMemBeg = 0x056000000000ull;
+const uptr kTraceMemEnd = 0x076000000000ull;
+const uptr kShadowBeg = 0x010000000000ull;
+const uptr kShadowEnd = 0x050000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
}
-static inline bool IsShadowMem(uptr mem) {
- return mem >= kLinuxShadowBeg && mem <= kLinuxShadowEnd;
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
}
-static inline uptr ShadowToMem(uptr shadow) {
- CHECK(IsShadowMem(shadow));
-#ifdef TSAN_GO
- return (shadow & ~kLinuxShadowMsk) / kShadowCnt;
-#else
- return (shadow / kShadowCnt) | kLinuxAppMemMsk;
-#endif
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
}
-// For COMPAT mapping returns an alternative address
-// that mapped to the same shadow address.
-// COMPAT mapping is not quite one-to-one.
-static inline uptr AlternativeAddress(uptr addr) {
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
- return (addr & ~kLinuxAppMemMsk) | 0x280000000000ULL;
-#else
- return 0;
-#endif
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) + kShadowBeg;
}
-void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size);
-uptr GetRSS();
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
-const char *InitializePlatform();
-void FinalizePlatform();
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
+ return (s - kShadowBeg) / kShadowCnt;
+}
+
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
+
+#else
+# error "Unknown platform"
+#endif
// The additional page is to catch shadow stack overflow as paging fault.
-const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) + 4096
- + 4095) & ~4095;
+// Windows wants 64K alignment for mmaps.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
uptr ALWAYS_INLINE GetThreadTrace(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ kTraceSize * sizeof(Event);
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
-void internal_start_thread(void(*func)(void*), void *arg);
+void InitializePlatform();
+void CheckAndProtect();
+void InitializeShadowMemoryPlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
// Says whether the addr relates to a global var.
// Guesses with high probability, may yield both false positives and negatives.
@@ -162,10 +349,12 @@ bool IsGlobalVar(uptr addr);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
-} // namespace __tsan
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg);
-#else // defined(__LP64__) || defined(_WIN64)
-# error "Only 64-bit is supported"
-#endif
+void DestroyThreadState();
+
+} // namespace __tsan
#endif // TSAN_PLATFORM_H
diff --git a/libsanitizer/tsan/tsan_platform_linux.cc b/libsanitizer/tsan/tsan_platform_linux.cc
index fe69430b711..a2e89f22da6 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cc
+++ b/libsanitizer/tsan/tsan_platform_linux.cc
@@ -7,17 +7,19 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
-// Linux-specific code.
+// Linux- and FreeBSD-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
@@ -30,7 +32,6 @@
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
-#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/socket.h>
#include <sys/time.h>
@@ -41,9 +42,10 @@
#include <errno.h>
#include <sched.h>
#include <dlfcn.h>
+#if SANITIZER_LINUX
#define __need_res_state
#include <resolv.h>
-#include <malloc.h>
+#endif
#ifdef sa_handler
# undef sa_handler
@@ -53,99 +55,81 @@
# undef sa_sigaction
#endif
-extern "C" struct mallinfo __libc_mallinfo();
+#if SANITIZER_FREEBSD
+extern "C" void *__libc_stack_end;
+void *__libc_stack_end = 0;
+#endif
namespace __tsan {
-const uptr kPageSize = 4096;
-
-#ifndef TSAN_GO
-ScopedInRtl::ScopedInRtl()
- : thr_(cur_thread()) {
- in_rtl_ = thr_->in_rtl;
- thr_->in_rtl++;
- errno_ = errno;
-}
+static uptr g_data_start;
+static uptr g_data_end;
-ScopedInRtl::~ScopedInRtl() {
- thr_->in_rtl--;
- errno = errno_;
- CHECK_EQ(in_rtl_, thr_->in_rtl);
-}
+enum {
+ MemTotal = 0,
+ MemShadow = 1,
+ MemMeta = 2,
+ MemFile = 3,
+ MemMmap = 4,
+ MemTrace = 5,
+ MemHeap = 6,
+ MemOther = 7,
+ MemCount = 8,
+};
+
+void FillProfileCallback(uptr p, uptr rss, bool file,
+ uptr *mem, uptr stats_size) {
+ mem[MemTotal] += rss;
+ if (p >= kShadowBeg && p < kShadowEnd)
+ mem[MemShadow] += rss;
+ else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
+ mem[MemMeta] += rss;
+#ifndef SANITIZER_GO
+ else if (p >= kHeapMemBeg && p < kHeapMemEnd)
+ mem[MemHeap] += rss;
+ else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
#else
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
+ else if (p >= kAppMemBeg && p < kAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
#endif
-
-void FillProfileCallback(uptr start, uptr rss, bool file,
- uptr *mem, uptr stats_size) {
- CHECK_EQ(7, stats_size);
- mem[6] += rss; // total
- start >>= 40;
- if (start < 0x10) // shadow
- mem[0] += rss;
- else if (start >= 0x20 && start < 0x30) // compat modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x7e) // modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x60 && start < 0x62) // traces
- mem[3] += rss;
- else if (start >= 0x7d && start < 0x7e) // heap
- mem[4] += rss;
- else // other
- mem[5] += rss;
+ else if (p >= kTraceMemBeg && p < kTraceMemEnd)
+ mem[MemTrace] += rss;
+ else
+ mem[MemOther] += rss;
}
-void WriteMemoryProfile(char *buf, uptr buf_size) {
- uptr mem[7] = {};
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr mem[MemCount] = {};
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- char *buf_pos = buf;
- char *buf_end = buf + buf_size;
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
- mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
- mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
- struct mallinfo mi = __libc_mallinfo();
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
- mi.arena >> 20, mi.hblkhd >> 20, mi.fordblks >> 20, mi.keepcost >> 20);
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+ " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
+ mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
+ mem[MemHeap] >> 20, mem[MemOther] >> 20,
+ stacks->allocated >> 20, stacks->n_uniq_ids,
+ nlive, nthread);
}
-uptr GetRSS() {
- uptr mem[7] = {};
- __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- return mem[6];
-}
-
-
+#if SANITIZER_LINUX
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
+ FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
}
+#endif
void FlushShadowMemory() {
+#if SANITIZER_LINUX
StopTheWorld(FlushShadowMemoryCallback, 0);
-}
-
-#ifndef TSAN_GO
-static void ProtectRange(uptr beg, uptr end) {
- ScopedInRtl in_rtl;
- CHECK_LE(beg, end);
- if (beg == end)
- return;
- if (beg != (uptr)Mprotect(beg, end - beg)) {
- Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
- Printf("FATAL: Make sure you are not using unlimited stack\n");
- Die();
- }
-}
#endif
+}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
@@ -159,31 +143,32 @@ static void MapRodata() {
#endif
if (tmpdir == 0)
return;
- char filename[256];
- internal_snprintf(filename, sizeof(filename), "%s/tsan.rodata.%d",
+ char name[256];
+ internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
tmpdir, (int)internal_getpid());
- uptr openrv = internal_open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
+ uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
if (internal_iserror(openrv))
return;
+ internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
// Fill the file with kShadowRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
InternalScopedBuffer<u64> marker(kMarkerSize);
- for (u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+ // volatile to prevent insertion of memset
+ for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
*p = kShadowRodata;
internal_write(fd, marker.data(), marker.size());
// Map the file into memory.
- uptr page = internal_mmap(0, kPageSize, PROT_READ | PROT_WRITE,
+ uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
if (internal_iserror(page)) {
internal_close(fd);
- internal_unlink(filename);
return;
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset, prot;
- char name[128];
+ // Reusing the buffer 'name'.
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
if (name[0] != 0 && name[0] != '['
&& (prot & MemoryMappingLayout::kProtectionRead)
@@ -200,66 +185,30 @@ static void MapRodata() {
}
}
internal_close(fd);
- internal_unlink(filename);
}
-void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
- Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
- Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
- Die();
- }
- const uptr kClosedLowBeg = 0x200000;
- const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
- const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
- const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
- ProtectRange(kClosedLowBeg, kClosedLowEnd);
- ProtectRange(kClosedMidBeg, kClosedMidEnd);
- DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
- kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kClosedMid %zx-%zx (%zuGB)\n",
- kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
- DPrintf("stack %zx\n", (uptr)&shadow);
-
+void InitializeShadowMemoryPlatform() {
MapRodata();
}
-#endif
-
-static uptr g_data_start;
-static uptr g_data_end;
-
-#ifndef TSAN_GO
-static void CheckPIE() {
- // Ensure that the binary is indeed compiled with -pie.
- MemoryMappingLayout proc_maps(true);
- uptr start, end;
- if (proc_maps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0,
- /*protection*/0)) {
- if ((u64)start < kLinuxAppMemBeg) {
- Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
- "something is mapped at 0x%zx < 0x%zx)\n",
- start, kLinuxAppMemBeg);
- Printf("FATAL: Make sure to compile with -fPIE"
- " and to link with -pie.\n");
- Die();
- }
- }
-}
static void InitDataSeg() {
MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
+#if SANITIZER_FREEBSD
+ // On FreeBSD BSS is usually the last block allocated within the
+ // low range and heap is the last block allocated within the range
+ // 0x800000000-0x8ffffffff.
+ while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+ /*protection*/ 0)) {
+ DPrintf("%p-%p %p %s\n", start, end, offset, name);
+ if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
+ name[0] == '\0') {
+ g_data_start = start;
+ g_data_end = end;
+ }
+ }
+#else
bool prev_is_data = false;
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
/*protection*/ 0)) {
@@ -275,34 +224,17 @@ static void InitDataSeg() {
g_data_end = end;
prev_is_data = is_data;
}
+#endif
DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
CHECK_LT(g_data_start, g_data_end);
CHECK_GE((uptr)&g_data_start, g_data_start);
CHECK_LT((uptr)&g_data_start, g_data_end);
}
-#endif // #ifndef TSAN_GO
-
-static rlim_t getlim(int res) {
- rlimit rlim;
- CHECK_EQ(0, getrlimit(res, &rlim));
- return rlim.rlim_cur;
-}
-
-static void setlim(int res, rlim_t lim) {
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit rlim;
- rlim.rlim_cur = lim;
- rlim.rlim_max = lim;
- setrlimit(res, (rlimit*)&rlim);
-}
+#endif // #ifndef SANITIZER_GO
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- setlim(RLIMIT_CORE, 0);
- }
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
@@ -312,44 +244,44 @@ const char *InitializePlatform() {
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
- if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
+ if (StackSizeIsUnlimited()) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
- Report("WARNING: Program is run with unlimited stack size, which "
- "wouldn't work with ThreadSanitizer.\n");
- Report("Re-execing with stack size limited to %zd bytes.\n",
- kMaxStackSize);
+ VReport(1, "Program is run with unlimited stack size, which wouldn't "
+ "work with ThreadSanitizer.\n"
+ "Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
reexec = true;
}
- if (getlim(RLIMIT_AS) != (rlim_t)-1) {
+ if (!AddressSpaceIsUnlimited()) {
Report("WARNING: Program is run with limited virtual address space,"
" which wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with unlimited virtual address space.\n");
- setlim(RLIMIT_AS, -1);
+ SetAddressSpaceUnlimited();
reexec = true;
}
if (reexec)
ReExec();
}
-#ifndef TSAN_GO
- CheckPIE();
+#ifndef SANITIZER_GO
+ CheckAndProtect();
InitTlsSize();
InitDataSeg();
#endif
- return GetEnv(kTsanOptionsEnv);
}
bool IsGlobalVar(uptr addr) {
return g_data_start && addr >= g_data_start && addr < g_data_end;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
int ExtractResolvFDs(void *state, int *fds, int nfd) {
+#if SANITIZER_LINUX
int cnt = 0;
__res_state *statp = (__res_state*)state;
for (int i = 0; i < MAXNS && cnt < nfd; i++) {
@@ -357,6 +289,9 @@ int ExtractResolvFDs(void *state, int *fds, int nfd) {
fds[cnt++] = statp->_u._ext.nssocks[i];
}
return cnt;
+#else
+ return 0;
+#endif
}
// Extract file descriptors passed via UNIX domain sockets.
@@ -378,9 +313,26 @@ int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
}
return res;
}
+
+// Note: this function runs with async signals enabled,
+// so it must not touch any tsan state.
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
+}
#endif
+#ifndef SANITIZER_GO
+void ReplaceSystemMalloc() { }
+#endif
} // namespace __tsan
-#endif // SANITIZER_LINUX
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
diff --git a/libsanitizer/tsan/tsan_platform_mac.cc b/libsanitizer/tsan/tsan_platform_mac.cc
index 3dca611dc92..e1405ffeabf 100644
--- a/libsanitizer/tsan/tsan_platform_mac.cc
+++ b/libsanitizer/tsan/tsan_platform_mac.cc
@@ -13,8 +13,10 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
+#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
@@ -38,11 +40,61 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
+#ifndef SANITIZER_GO
+static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
+ atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
+ void *val = (void *)atomic_load_relaxed(a);
+ atomic_signal_fence(memory_order_acquire); // Turns the previous load into
+ // acquire wrt signals.
+ if (UNLIKELY(val == nullptr)) {
+ val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ CHECK(val);
+ void *cmp = nullptr;
+ if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
+ memory_order_acq_rel)) {
+ internal_munmap(val, size);
+ val = cmp;
+ }
+ }
+ return val;
+}
+
+// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
+// problematic, because there are several places where interceptors are called
+// when TLVs are not accessible (early process startup, thread cleanup, ...).
+// The following provides a "poor man's TLV" implementation, where we use the
+// shadow memory of the pointer returned by pthread_self() to store a pointer to
+// the ThreadState object. The main thread's ThreadState pointer is stored
+// separately in a static variable, because we need to access it even before the
+// shadow memory is set up.
+static uptr main_thread_identity = 0;
+static ThreadState *main_thread_state = nullptr;
+
+ThreadState *cur_thread() {
+ ThreadState **fake_tls;
+ uptr thread_identity = (uptr)pthread_self();
+ if (thread_identity == main_thread_identity || main_thread_identity == 0) {
+ fake_tls = &main_thread_state;
+ } else {
+ fake_tls = (ThreadState **)MemToShadow(thread_identity);
+ }
+ ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate(
+ (uptr *)fake_tls, sizeof(ThreadState));
+ return thr;
}
-ScopedInRtl::~ScopedInRtl() {
+// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
+// munmap first and then clear `fake_tls`; if we receive a signal in between,
+// handler will try to access the unmapped ThreadState.
+void cur_thread_finalize() {
+ uptr thread_identity = (uptr)pthread_self();
+ CHECK_NE(thread_identity, main_thread_identity);
+ ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
+ internal_munmap(*fake_tls, sizeof(ThreadState));
+ *fake_tls = nullptr;
}
+#endif
uptr GetShadowMemoryConsumption() {
return 0;
@@ -51,41 +103,81 @@ uptr GetShadowMemoryConsumption() {
void FlushShadowMemory() {
}
-#ifndef TSAN_GO
-void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
- Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
- Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie.\n");
- Die();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+}
+
+#ifndef SANITIZER_GO
+void InitializeShadowMemoryPlatform() { }
+
+// On OS X, GCD worker threads are created without a call to pthread_create. We
+// need to properly register these threads with ThreadCreate and ThreadStart.
+// These threads don't have a parent thread, as they are created "spuriously".
+// We're using a libpthread API that notifies us about a newly created thread.
+// The `thread == pthread_self()` check indicates this is actually a worker
+// thread. If it's just a regular thread, this hook is called on the parent
+// thread.
+typedef void (*pthread_introspection_hook_t)(unsigned int event,
+ pthread_t thread, void *addr,
+ size_t size);
+extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
+ pthread_introspection_hook_t hook);
+static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
+static const uptr PTHREAD_INTROSPECTION_THREAD_DESTROY = 4;
+static pthread_introspection_hook_t prev_pthread_introspection_hook;
+static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
+ void *addr, size_t size) {
+ if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
+ if (thread == pthread_self()) {
+ // The current thread is a newly created GCD worker thread.
+ ThreadState *parent_thread_state = nullptr; // No parent.
+ int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
+ CHECK_NE(tid, 0);
+ ThreadState *thr = cur_thread();
+ ThreadStart(thr, tid, GetTid());
+ }
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_DESTROY) {
+ ThreadState *thr = cur_thread();
+ if (thr->tctx && thr->tctx->parent_tid == kInvalidTid) {
+ DestroyThreadState();
+ }
}
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+
+ if (prev_pthread_introspection_hook != nullptr)
+ prev_pthread_introspection_hook(event, thread, addr, size);
}
#endif
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit lim;
- lim.rlim_cur = 0;
- lim.rlim_max = 0;
- setrlimit(RLIMIT_CORE, (rlimit*)&lim);
- }
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
+#ifndef SANITIZER_GO
+ CheckAndProtect();
+
+ CHECK_EQ(main_thread_identity, 0);
+ main_thread_identity = (uptr)pthread_self();
- return GetEnv(kTsanOptionsEnv);
+ prev_pthread_introspection_hook =
+ pthread_introspection_hook_install(&my_pthread_introspection_hook);
+#endif
+}
+
+#ifndef SANITIZER_GO
+// Note: this function runs with async signals enabled,
+// so it must not touch any tsan state.
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
}
+#endif
-void FinalizePlatform() {
- fflush(0);
+bool IsGlobalVar(uptr addr) {
+ return false;
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_platform_posix.cc b/libsanitizer/tsan/tsan_platform_posix.cc
new file mode 100644
index 00000000000..5e3d12e9496
--- /dev/null
+++ b/libsanitizer/tsan/tsan_platform_posix.cc
@@ -0,0 +1,122 @@
+//===-- tsan_platform_posix.cc --------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// POSIX-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_POSIX
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+#ifndef SANITIZER_GO
+void InitializeShadowMemory() {
+ // Map memory shadow.
+ uptr shadow =
+ (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow");
+ if (shadow != kShadowBeg) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
+ Die();
+ }
+ // This memory range is used for thread stacks and large user mmaps.
+ // Frequently a thread uses only a small part of stack and similarly
+ // a program uses a small part of large mmap. On some programs
+ // we see 20% memory usage reduction without huge pages for this range.
+ // FIXME: don't use constants here.
+#if defined(__x86_64__)
+ const uptr kMadviseRangeBeg = 0x7f0000000000ull;
+ const uptr kMadviseRangeSize = 0x010000000000ull;
+#elif defined(__mips64)
+ const uptr kMadviseRangeBeg = 0xff00000000ull;
+ const uptr kMadviseRangeSize = 0x0100000000ull;
+#elif defined(__aarch64__)
+ const uptr kMadviseRangeBeg = 0x7e00000000ull;
+ const uptr kMadviseRangeSize = 0x0100000000ull;
+#endif
+ NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg),
+ kMadviseRangeSize * kShadowMultiplier);
+ // Meta shadow is compressing and we don't flush it,
+ // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory.
+ // On one program it reduces memory consumption from 5GB to 2.5GB.
+ NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
+ DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
+
+ // Map meta shadow.
+ uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
+ uptr meta =
+ (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow");
+ if (meta != kMetaShadowBeg) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
+ Die();
+ }
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(meta, meta_size);
+ DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
+ meta, meta + meta_size, meta_size >> 30);
+
+ InitializeShadowMemoryPlatform();
+}
+
+static void ProtectRange(uptr beg, uptr end) {
+ CHECK_LE(beg, end);
+ if (beg == end)
+ return;
+ if (beg != (uptr)MmapNoAccess(beg, end - beg)) {
+ Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
+ Printf("FATAL: Make sure you are not using unlimited stack\n");
+ Die();
+ }
+}
+
+void CheckAndProtect() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ uptr p, end, prot;
+ while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) {
+ if (IsAppMem(p))
+ continue;
+ if (p >= kHeapMemEnd &&
+ p < HeapEnd())
+ continue;
+ if (prot == 0) // Zero page or mprotected.
+ continue;
+ if (p >= kVdsoBeg) // vdso
+ break;
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
+ Die();
+ }
+
+ ProtectRange(kLoAppMemEnd, kShadowBeg);
+ ProtectRange(kShadowEnd, kMetaShadowBeg);
+ ProtectRange(kMetaShadowEnd, kTraceMemBeg);
+ // Memory for traces is mapped lazily in MapThreadTrace.
+ // Protect the whole range for now, so that user does not map something here.
+ ProtectRange(kTraceMemBeg, kTraceMemEnd);
+ ProtectRange(kTraceMemEnd, kHeapMemBeg);
+ ProtectRange(HeapEnd(), kHiAppMemBeg);
+}
+#endif
+
+} // namespace __tsan
+
+#endif // SANITIZER_POSIX
diff --git a/libsanitizer/tsan/tsan_platform_windows.cc b/libsanitizer/tsan/tsan_platform_windows.cc
index 6e49ef42f0c..ccea22ed66e 100644
--- a/libsanitizer/tsan/tsan_platform_windows.cc
+++ b/libsanitizer/tsan/tsan_platform_windows.cc
@@ -19,12 +19,6 @@
namespace __tsan {
-ScopedInRtl::ScopedInRtl() {
-}
-
-ScopedInRtl::~ScopedInRtl() {
-}
-
uptr GetShadowMemoryConsumption() {
return 0;
}
@@ -32,12 +26,10 @@ uptr GetShadowMemoryConsumption() {
void FlushShadowMemory() {
}
-const char *InitializePlatform() {
- return GetEnv(kTsanOptionsEnv);
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-void FinalizePlatform() {
- fflush(0);
+void InitializePlatform() {
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_report.cc b/libsanitizer/tsan/tsan_report.cc
index f2484166e81..119b1ec1da9 100644
--- a/libsanitizer/tsan/tsan_report.cc
+++ b/libsanitizer/tsan/tsan_report.cc
@@ -11,13 +11,31 @@
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
namespace __tsan {
-class Decorator: private __sanitizer::AnsiColorDecorator {
+ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
+
+ReportStack *ReportStack::New() {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
+ return new(mem) ReportStack();
+}
+
+ReportLocation::ReportLocation(ReportLocationType type)
+ : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
+ fd(0), suppressable(false), stack(nullptr) {}
+
+ReportLocation *ReportLocation::New(ReportLocationType type) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
+ return new(mem) ReportLocation(type);
+}
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Warning() { return Red(); }
const char *EndWarning() { return Default(); }
const char *Access() { return Blue(); }
@@ -38,6 +56,7 @@ ReportDesc::ReportDesc()
, locs(MBlockReportLoc)
, mutexes(MBlockReportMutex)
, threads(MBlockReportThread)
+ , unique_tids(MBlockReportThread)
, sleep()
, count() {
}
@@ -50,7 +69,7 @@ ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -67,30 +86,47 @@ static const char *ReportTypeString(ReportType typ) {
return "data race on vptr (ctor/dtor vs virtual call)";
if (typ == ReportTypeUseAfterFree)
return "heap-use-after-free";
+ if (typ == ReportTypeVptrUseAfterFree)
+ return "heap-use-after-free (virtual call vs free)";
if (typ == ReportTypeThreadLeak)
return "thread leak";
if (typ == ReportTypeMutexDestroyLocked)
return "destroy of a locked mutex";
+ if (typ == ReportTypeMutexDoubleLock)
+ return "double lock of a mutex";
+ if (typ == ReportTypeMutexBadUnlock)
+ return "unlock of an unlocked mutex (or by a wrong thread)";
+ if (typ == ReportTypeMutexBadReadLock)
+ return "read lock of a write locked mutex";
+ if (typ == ReportTypeMutexBadReadUnlock)
+ return "read unlock of a write locked mutex";
if (typ == ReportTypeSignalUnsafe)
return "signal-unsafe call inside of a signal";
if (typ == ReportTypeErrnoInSignal)
return "signal handler spoils errno";
+ if (typ == ReportTypeDeadlock)
+ return "lock-order-inversion (potential deadlock)";
return "";
}
+#if SANITIZER_MAC
+static const char *const kInterposedFunctionPrefix = "wrap_";
+#else
+static const char *const kInterposedFunctionPrefix = "__interceptor_";
+#endif
+
void PrintStack(const ReportStack *ent) {
- if (ent == 0) {
+ if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n\n");
return;
}
- for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" #%d %s %s:%d", i, ent->func, ent->file, ent->line);
- if (ent->col)
- Printf(":%d", ent->col);
- if (ent->module && ent->offset)
- Printf(" (%s+%p)\n", ent->module, (void*)ent->offset);
- else
- Printf(" (%p)\n", (void*)ent->pc);
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
+ InternalScopedString res(2 * GetPageSizeCached());
+ RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
+ Printf("%s\n", res.data());
}
Printf("\n");
}
@@ -132,12 +168,20 @@ static void PrintLocation(const ReportLocation *loc) {
bool print_stack = false;
Printf("%s", d.Location());
if (loc->type == ReportLocationGlobal) {
- Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
- loc->name, loc->size, loc->addr, loc->module, loc->offset);
+ const DataInfo &global = loc->global;
+ if (global.size != 0)
+ Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
+ global.name, global.size, global.start,
+ StripModuleName(global.module), global.module_offset);
+ else
+ Printf(" Location is global '%s' at %p (%s+%p)\n\n", global.name,
+ global.start, StripModuleName(global.module),
+ global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->size, loc->addr, thread_name(thrbuf, loc->tid));
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
print_stack = true;
} else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
@@ -153,6 +197,17 @@ static void PrintLocation(const ReportLocation *loc) {
PrintStack(loc->stack);
}
+static void PrintMutexShort(const ReportMutex *rm, const char *after) {
+ Decorator d;
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after);
+}
+
+static void PrintMutexShortWithAddress(const ReportMutex *rm,
+ const char *after) {
+ Decorator d;
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after);
+}
+
static void PrintMutex(const ReportMutex *rm) {
Decorator d;
if (rm->destroyed) {
@@ -161,7 +216,7 @@ static void PrintMutex(const ReportMutex *rm) {
Printf("%s", d.EndMutex());
} else {
Printf("%s", d.Mutex());
- Printf(" Mutex M%llu created at:\n", rm->id);
+ Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
Printf("%s", d.EndMutex());
PrintStack(rm->stack);
}
@@ -206,10 +261,20 @@ static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
return 0;
}
-ReportStack *SkipTsanInternalFrames(ReportStack *ent) {
- while (FrameIsInternal(ent) && ent->next)
- ent = ent->next;
- return ent;
+static bool FrameIsInternal(const SymbolizedStack *frame) {
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ return file != 0 &&
+ (internal_strstr(file, "tsan_interceptors.cc") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_"));
+}
+
+static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
+ while (FrameIsInternal(frames) && frames->next)
+ frames = frames->next;
+ return frames;
}
void PrintReport(const ReportDesc *rep) {
@@ -221,10 +286,42 @@ void PrintReport(const ReportDesc *rep) {
(int)internal_getpid());
Printf("%s", d.EndWarning());
- for (uptr i = 0; i < rep->stacks.Size(); i++) {
- if (i)
- Printf(" and:\n");
- PrintStack(rep->stacks[i]);
+ if (rep->typ == ReportTypeDeadlock) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Cycle in lock order graph: ");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutexShortWithAddress(rep->mutexes[i], " => ");
+ PrintMutexShort(rep->mutexes[0], "\n\n");
+ CHECK_GT(rep->mutexes.Size(), 0U);
+ CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1),
+ rep->stacks.Size());
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()],
+ " acquired here while holding mutex ");
+ PrintMutexShort(rep->mutexes[i], " in ");
+ Printf("%s", d.ThreadDescription());
+ Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
+ Printf("%s", d.EndThreadDescription());
+ if (flags()->second_deadlock_stack) {
+ PrintStack(rep->stacks[2*i]);
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[i],
+ " previously acquired by the same thread here:\n");
+ PrintStack(rep->stacks[2*i+1]);
+ } else {
+ PrintStack(rep->stacks[i]);
+ if (i == 0)
+ Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 "
+ "to get more informative warning message\n\n");
+ }
+ }
+ } else {
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
}
for (uptr i = 0; i < rep->mops.Size(); i++)
@@ -236,8 +333,10 @@ void PrintReport(const ReportDesc *rep) {
for (uptr i = 0; i < rep->locs.Size(); i++)
PrintLocation(rep->locs[i]);
- for (uptr i = 0; i < rep->mutexes.Size(); i++)
- PrintMutex(rep->mutexes[i]);
+ if (rep->typ != ReportTypeDeadlock) {
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+ }
for (uptr i = 0; i < rep->threads.Size(); i++)
PrintThread(rep->threads[i]);
@@ -245,24 +344,29 @@ void PrintReport(const ReportDesc *rep) {
if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
- if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep)))
- ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func);
+ if (ReportStack *stack = ChooseSummaryStack(rep)) {
+ if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames))
+ ReportErrorSummary(rep_typ_str, frame->info);
+ }
Printf("==================\n");
}
-#else // #ifndef TSAN_GO
+#else // #ifndef SANITIZER_GO
const int kMainThreadId = 1;
void PrintStack(const ReportStack *ent) {
- if (ent == 0) {
+ if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n");
return;
}
- for (int i = 0; ent; ent = ent->next, i++) {
- Printf(" %s()\n %s:%d +0x%zx\n",
- ent->func, ent->file, ent->line, (void*)ent->offset);
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame; frame = frame->next, i++) {
+ const AddressInfo &info = frame->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function,
+ StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+ info.line, (void *)info.module_offset);
}
}
@@ -289,11 +393,26 @@ static void PrintThread(const ReportThread *rt) {
void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
- Printf("WARNING: DATA RACE");
- for (uptr i = 0; i < rep->mops.Size(); i++)
- PrintMop(rep->mops[i], i == 0);
- for (uptr i = 0; i < rep->threads.Size(); i++)
- PrintThread(rep->threads[i]);
+ if (rep->typ == ReportTypeRace) {
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ } else if (rep->typ == ReportTypeDeadlock) {
+ Printf("WARNING: DEADLOCK\n");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
+ 999, rep->mutexes[i]->id,
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i]);
+ Printf("\n");
+ Printf("Mutex %d was previously locked here:\n",
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i + 1]);
+ Printf("\n");
+ }
+ }
Printf("==================\n");
}
diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h
index c0eef9eb023..68924edb547 100644
--- a/libsanitizer/tsan/tsan_report.h
+++ b/libsanitizer/tsan/tsan_report.h
@@ -11,6 +11,7 @@
#ifndef TSAN_REPORT_H
#define TSAN_REPORT_H
+#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h"
#include "tsan_vector.h"
@@ -20,21 +21,25 @@ enum ReportType {
ReportTypeRace,
ReportTypeVptrRace,
ReportTypeUseAfterFree,
+ ReportTypeVptrUseAfterFree,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
+ ReportTypeMutexDoubleLock,
+ ReportTypeMutexBadUnlock,
+ ReportTypeMutexBadReadLock,
+ ReportTypeMutexBadReadUnlock,
ReportTypeSignalUnsafe,
- ReportTypeErrnoInSignal
+ ReportTypeErrnoInSignal,
+ ReportTypeDeadlock
};
struct ReportStack {
- ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
+ SymbolizedStack *frames;
+ bool suppressable;
+ static ReportStack *New();
+
+ private:
+ ReportStack();
};
struct ReportMopMutex {
@@ -64,16 +69,17 @@ enum ReportLocationType {
struct ReportLocation {
ReportLocationType type;
- uptr addr;
- uptr size;
- char *module;
- uptr offset;
+ DataInfo global;
+ uptr heap_chunk_start;
+ uptr heap_chunk_size;
int tid;
int fd;
- char *name;
- char *file;
- int line;
+ bool suppressable;
ReportStack *stack;
+
+ static ReportLocation *New(ReportLocationType type);
+ private:
+ explicit ReportLocation(ReportLocationType type);
};
struct ReportThread {
@@ -87,6 +93,7 @@ struct ReportThread {
struct ReportMutex {
u64 id;
+ uptr addr;
bool destroyed;
ReportStack *stack;
};
@@ -99,6 +106,7 @@ class ReportDesc {
Vector<ReportLocation*> locs;
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
+ Vector<int> unique_tids;
ReportStack *sleep;
int count;
diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc
index 573eeb8a918..4fceca6f41f 100644
--- a/libsanitizer/tsan/tsan_rtl.cc
+++ b/libsanitizer/tsan/tsan_rtl.cc
@@ -22,6 +22,17 @@
#include "tsan_mman.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
+#include "ubsan/ubsan_init.h"
+
+#ifdef __SSE3__
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+#define _MM_MALLOC_H_INCLUDED
+#define __MM_MALLOC_H
+#include <emmintrin.h>
+typedef __m128i m128;
+#endif
volatile int __tsan_resumed = 0;
@@ -31,38 +42,49 @@ extern "C" void __tsan_resume() {
namespace __tsan {
-#ifndef TSAN_GO
+#if !defined(SANITIZER_GO) && !SANITIZER_MAC
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+Context *ctx;
// Can be overriden by a front-end.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnFinalize(bool failed);
+void OnInitialize();
#else
SANITIZER_INTERFACE_ATTRIBUTE
bool WEAK OnFinalize(bool failed) {
return failed;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+void WEAK OnInitialize() {}
#endif
-static Context *ctx;
-Context *CTX() {
- return ctx;
-}
-
static char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadContextBase *CreateThreadContext(u32 tid) {
// Map thread trace when context is created.
- MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
- MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
- new(ThreadTrace(tid)) Trace();
+ char name[50];
+ internal_snprintf(name, sizeof(name), "trace %u", tid);
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
+ const uptr hdr = GetThreadTraceHeader(tid);
+ internal_snprintf(name, sizeof(name), "trace header %u", tid);
+ MapThreadTrace(hdr, sizeof(Trace), name);
+ new((void*)hdr) Trace();
+ // We are going to use only a small part of the trace with the default
+ // value of history_size. However, the constructor writes to the whole trace.
+ // Unmap the unused part.
+ uptr hdr_end = hdr + sizeof(Trace);
+ hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
+ hdr_end = RoundUp(hdr_end, GetPageSizeCached());
+ if (hdr_end < hdr + sizeof(Trace))
+ UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
return new(mem) ThreadContext(tid);
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -74,22 +96,26 @@ Context::Context()
, nreported()
, nmissed_expected()
, thread_registry(new(thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize))
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
+ , racy_mtx(MutexTypeRacy, StatMtxRacy)
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
+ , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
, fired_suppressions(8) {
}
// The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size)
: fast_state(tid, epoch)
// Do not touch these, rely on zero initialization,
// they may be accessed before the ctor.
// , ignore_reads_and_writes()
- // , in_rtl()
-#ifndef TSAN_GO
+ // , ignore_interceptors()
+ , clock(tid, reuse_count)
+#ifndef SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -97,70 +123,78 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_addr(stk_addr)
, stk_size(stk_size)
, tls_addr(tls_addr)
- , tls_size(tls_size) {
+ , tls_size(tls_size)
+#ifndef SANITIZER_GO
+ , last_sleep_clock(tid)
+#endif
+{
}
+#ifndef SANITIZER_GO
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_threads;
uptr n_running_threads;
ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
InternalScopedBuffer<char> buf(4096);
- internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
- i, n_threads, n_running_threads);
- internal_write(fd, buf.data(), internal_strlen(buf.data()));
- WriteMemoryProfile(buf.data(), buf.size());
- internal_write(fd, buf.data(), internal_strlen(buf.data()));
+ WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
+ WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
}
static void BackgroundThread(void *arg) {
- ScopedInRtl in_rtl;
- Context *ctx = CTX();
+ // This is a non-initialized non-user thread, nothing to see here.
+ // We don't use ScopedIgnoreInterceptors, because we want ignores to be
+ // enabled even when the thread function exits (e.g. during pthread thread
+ // shutdown code).
+ cur_thread()->ignore_interceptors++;
const u64 kMs2Ns = 1000 * 1000;
fd_t mprof_fd = kInvalidFd;
if (flags()->profile_memory && flags()->profile_memory[0]) {
- InternalScopedBuffer<char> filename(4096);
- internal_snprintf(filename.data(), filename.size(), "%s.%d",
- flags()->profile_memory, (int)internal_getpid());
- uptr openrv = OpenFile(filename.data(), true);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- &filename[0]);
+ if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
+ mprof_fd = 1;
+ } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
+ mprof_fd = 2;
} else {
- mprof_fd = openrv;
+ InternalScopedString filename(kMaxPathLength);
+ filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
+ fd_t fd = OpenFile(filename.data(), WrOnly);
+ if (fd == kInvalidFd) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = fd;
+ }
}
}
u64 last_flush = NanoTime();
uptr last_rss = 0;
- for (int i = 0; ; i++) {
- SleepForSeconds(1);
+ for (int i = 0;
+ atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
+ i++) {
+ SleepForMillis(100);
u64 now = NanoTime();
// Flush memory if requested.
if (flags()->flush_memory_ms > 0) {
if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: periodic memory flush\n");
+ VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
FlushShadowMemory();
last_flush = NanoTime();
}
}
+ // GetRSS can be expensive on huge programs, so don't do it every 100ms.
if (flags()->memory_limit_mb > 0) {
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
- if (flags()->verbosity > 0) {
- Printf("ThreadSanitizer: memory flush check"
- " RSS=%llu LAST=%llu LIMIT=%llu\n",
- (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20);
- }
+ VPrintf(1, "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
if (2 * rss > limit + last_rss) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: flushing memory due to RSS\n");
+ VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
FlushShadowMemory();
rss = GetRSS();
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
}
last_rss = rss;
}
@@ -169,7 +203,6 @@ static void BackgroundThread(void *arg) {
if (mprof_fd != kInvalidFd)
MemoryProfiler(ctx, mprof_fd, i);
-#ifndef TSAN_GO
// Flush symbolizer cache if requested.
if (flags()->flush_symbolizer_ms > 0) {
u64 last = atomic_load(&ctx->last_symbolize_time_ns,
@@ -181,10 +214,22 @@ static void BackgroundThread(void *arg) {
atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
}
}
-#endif
}
}
+static void StartBackgroundThread() {
+ ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
+}
+
+#ifndef __mips__
+static void StopBackgroundThread() {
+ atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
+ internal_join_thread(ctx->background_thread);
+ ctx->background_thread = 0;
+}
+#endif
+#endif
+
void DontNeedShadowFor(uptr addr, uptr size) {
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size);
@@ -192,14 +237,44 @@ void DontNeedShadowFor(uptr addr, uptr size) {
}
void MapShadow(uptr addr, uptr size) {
- MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
+ // Global data is not 64K aligned, but there are no adjacent mappings,
+ // so we can get away with unaligned mapping.
+ // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow");
+
+ // Meta shadow is 2:1, so tread carefully.
+ static bool data_mapped = false;
+ static uptr mapped_meta_end = 0;
+ uptr meta_begin = (uptr)MemToMeta(addr);
+ uptr meta_end = (uptr)MemToMeta(addr + size);
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (!data_mapped) {
+ // First call maps data+bss.
+ data_mapped = true;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
+ } else {
+ // Mapping continous heap.
+ // Windows wants 64K alignment.
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (meta_end <= mapped_meta_end)
+ return;
+ if (meta_begin < mapped_meta_end)
+ meta_begin = mapped_meta_end;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
+ mapped_meta_end = meta_end;
+ }
+ VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
+ addr, addr+size, meta_begin, meta_end);
}
-void MapThreadTrace(uptr addr, uptr size) {
+void MapThreadTrace(uptr addr, uptr size, const char *name) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, kTraceMemBegin);
- CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
- uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
+ CHECK_GE(addr, kTraceMemBeg);
+ CHECK_LE(addr + size, kTraceMemEnd);
+ CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
if (addr1 != addr) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
addr, size, addr1);
@@ -207,57 +282,84 @@ void MapThreadTrace(uptr addr, uptr size) {
}
}
+static void CheckShadowMapping() {
+ for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
+ const uptr beg = UserRegions[i];
+ const uptr end = UserRegions[i + 1];
+ VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
+ for (int x = -1; x <= 1; x++) {
+ const uptr p = p0 + x;
+ if (p < beg || p >= end)
+ continue;
+ const uptr s = MemToShadow(p);
+ const uptr m = (uptr)MemToMeta(p);
+ VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
+ CHECK(IsAppMem(p));
+ CHECK(IsShadowMem(s));
+ CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ CHECK(IsMetaMem(m));
+ }
+ }
+ }
+}
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
if (is_initialized)
return;
is_initialized = true;
+ // We are not ready to handle interceptors yet.
+ ScopedIgnoreInterceptors ignore;
SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(TsanCheckFailed);
- ScopedInRtl in_rtl;
-#ifndef TSAN_GO
+ ctx = new(ctx_placeholder) Context;
+ const char *options = GetEnv(kTsanOptionsEnv);
+ CacheBinaryName();
+ InitializeFlags(&ctx->flags, options);
+ CheckVMASize();
+#ifndef SANITIZER_GO
InitializeAllocator();
+ ReplaceSystemMalloc();
#endif
InitializeInterceptors();
- const char *env = InitializePlatform();
+ CheckShadowMapping();
+ InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
- ctx = new(ctx_placeholder) Context;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeShadowMemory();
#endif
- InitializeFlags(&ctx->flags, env);
// Setup correct file descriptor for error reports.
- __sanitizer_set_report_path(flags()->log_path);
+ __sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeLibIgnore();
- // Initialize external symbolizer before internal threads are started.
- const char *external_symbolizer = flags()->external_symbolizer_path;
- bool external_symbolizer_started =
- Symbolizer::Init(external_symbolizer)->IsExternalAvailable();
- if (external_symbolizer != 0 && external_symbolizer[0] != '\0' &&
- !external_symbolizer_started) {
- Printf("Failed to start external symbolizer: '%s'\n",
- external_symbolizer);
- Die();
- }
- Symbolizer::Get()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+ // On MIPS, TSan initialization is run before
+ // __pthread_initialize_minimal_internal() is finished, so we can not spawn
+ // new threads.
+#ifndef __mips__
+ StartBackgroundThread();
+ SetSandboxingCallback(StopBackgroundThread);
#endif
- internal_start_thread(&BackgroundThread, 0);
+#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
- if (ctx->flags.verbosity)
- Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
- (int)internal_getpid());
+ VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
// Initialize thread 0.
int tid = ThreadCreate(thr, 0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(thr, tid, internal_getpid());
- CHECK_EQ(thr->in_rtl, 1);
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::InitAsPlugin();
+#endif
ctx->initialized = true;
if (flags()->stop_on_start) {
@@ -266,11 +368,11 @@ void Initialize(ThreadState *thr) {
(int)internal_getpid());
while (__tsan_resumed == 0) {}
}
+
+ OnInitialize();
}
int Finalize(ThreadState *thr) {
- ScopedInRtl in_rtl;
- Context *ctx = __tsan::ctx;
bool failed = false;
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
@@ -282,16 +384,15 @@ int Finalize(ThreadState *thr) {
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef TSAN_GO
- if (ctx->flags.verbosity)
- AllocatorPrintStats();
+#ifndef SANITIZER_GO
+ if (Verbosity()) AllocatorPrintStats();
#endif
ThreadFinalize(thr);
if (ctx->nreported) {
failed = true;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -304,45 +405,99 @@ int Finalize(ThreadState *thr) {
ctx->nmissed_expected);
}
- if (flags()->print_suppressions)
+ if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
failed = OnFinalize(failed);
+#if TSAN_COLLECT_STATS
StatAggregate(ctx->stat, thr->stat);
StatOutput(ctx->stat);
- return failed ? flags()->exitcode : 0;
+#endif
+
+ return failed ? common_flags()->exitcode : 0;
+}
+
+#ifndef SANITIZER_GO
+void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
+}
+
+void ForkParentAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+}
+
+void ForkChildAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+ uptr nthread = 0;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+ VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+ if (nthread == 1) {
+ StartBackgroundThread();
+ } else {
+ // We've just forked a multi-threaded process. We cannot reasonably function
+ // after that (some mutexes may be locked before fork). So just enable
+ // ignores for everything in the hope that we will exec soon.
+ ctx->after_multithreaded_fork = true;
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, pc);
+ ThreadIgnoreSyncBegin(thr, pc);
+ }
+}
+#endif
+
+#ifdef SANITIZER_GO
+NOINLINE
+void GrowShadowStack(ThreadState *thr) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
}
+#endif
-#ifndef TSAN_GO
u32 CurrentStackId(ThreadState *thr, uptr pc) {
- if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
+ if (!thr->is_inited) // May happen during bootstrap.
return 0;
- if (pc) {
+ if (pc != 0) {
+#ifndef SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
- u32 id = StackDepotPut(thr->shadow_stack,
- thr->shadow_stack_pos - thr->shadow_stack);
- if (pc)
+ u32 id = StackDepotPut(
+ StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
+ if (pc != 0)
thr->shadow_stack_pos--;
return id;
}
-#endif
void TraceSwitch(ThreadState *thr) {
thr->nomalloc++;
- ScopedInRtl in_rtl;
Trace *thr_trace = ThreadTrace(thr->tid);
Lock l(&thr_trace->mtx);
unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
TraceHeader *hdr = &thr_trace->headers[trace];
hdr->epoch0 = thr->fast_state.epoch();
- hdr->stack0.ObtainCurrent(thr, 0);
+ ObtainCurrentStack(thr, 0, &hdr->stack0);
hdr->mset0 = thr->mset;
thr->nomalloc--;
}
@@ -365,7 +520,7 @@ uptr TraceParts() {
return TraceSize() / kTracePartSize;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -392,28 +547,25 @@ void StoreIfNotYetStored(u64 *sp, u64 *s) {
*s = 0;
}
-static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ALWAYS_INLINE
+void HandleRace(ThreadState *thr, u64 *shadow_mem,
Shadow cur, Shadow old) {
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
#endif
}
-static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
- return old.epoch() >= thr->fast_synch_epoch;
-}
-
static inline bool HappensBefore(Shadow old, ThreadState *thr) {
return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
}
-ALWAYS_INLINE USED
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ALWAYS_INLINE
+void MemoryAccessImpl1(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
StatInc(thr, StatMop);
@@ -435,43 +587,26 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
// it's just not worth it (performance- and complexity-wise).
Shadow old(0);
- if (kShadowCnt == 1) {
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- } else if (kShadowCnt == 2) {
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
-#include "tsan_update_shadow_word_inl.h"
- } else if (kShadowCnt == 4) {
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
-#include "tsan_update_shadow_word_inl.h"
- idx = 2;
-#include "tsan_update_shadow_word_inl.h"
- idx = 3;
-#include "tsan_update_shadow_word_inl.h"
- } else if (kShadowCnt == 8) {
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
-#include "tsan_update_shadow_word_inl.h"
- idx = 2;
-#include "tsan_update_shadow_word_inl.h"
- idx = 3;
+
+ // It release mode we manually unroll the loop,
+ // because empirically gcc generates better code this way.
+ // However, we can't afford unrolling in debug mode, because the function
+ // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
+ // threads, which is not enough for the unrolled loop.
+#if SANITIZER_DEBUG
+ for (int idx = 0; idx < 4; idx++) {
#include "tsan_update_shadow_word_inl.h"
- idx = 4;
+ }
+#else
+ int idx = 0;
#include "tsan_update_shadow_word_inl.h"
- idx = 5;
+ idx = 1;
#include "tsan_update_shadow_word_inl.h"
- idx = 6;
+ idx = 2;
#include "tsan_update_shadow_word_inl.h"
- idx = 7;
+ idx = 3;
#include "tsan_update_shadow_word_inl.h"
- } else {
- CHECK(false);
- }
+#endif
// we did not find any races and had already stored
// the current access info, so we are done
@@ -491,13 +626,13 @@ void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
while (size) {
int size1 = 1;
int kAccessSizeLog = kSizeLog1;
- if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
+ if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
size1 = 8;
kAccessSizeLog = kSizeLog8;
- } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
+ } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
size1 = 4;
kAccessSizeLog = kSizeLog4;
- } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
+ } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
size1 = 2;
kAccessSizeLog = kSizeLog2;
}
@@ -507,6 +642,93 @@ void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
}
+ALWAYS_INLINE
+bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ Shadow cur(a);
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ Shadow old(LoadShadow(&s[i]));
+ if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
+ old.TidWithIgnore() == cur.TidWithIgnore() &&
+ old.epoch() > sync_epoch &&
+ old.IsAtomic() == cur.IsAtomic() &&
+ old.IsRead() <= cur.IsRead())
+ return true;
+ }
+ return false;
+}
+
+#if defined(__SSE3__)
+#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
+ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
+ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+ALWAYS_INLINE
+bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ // This is an optimized version of ContainsSameAccessSlow.
+ // load current access into access[0:63]
+ const m128 access = _mm_cvtsi64_si128(a);
+ // duplicate high part of access in addr0:
+ // addr0[0:31] = access[32:63]
+ // addr0[32:63] = access[32:63]
+ // addr0[64:95] = access[32:63]
+ // addr0[96:127] = access[32:63]
+ const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
+ // load 4 shadow slots
+ const m128 shadow0 = _mm_load_si128((__m128i*)s);
+ const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
+ // load high parts of 4 shadow slots into addr_vect:
+ // addr_vect[0:31] = shadow0[32:63]
+ // addr_vect[32:63] = shadow0[96:127]
+ // addr_vect[64:95] = shadow1[32:63]
+ // addr_vect[96:127] = shadow1[96:127]
+ m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
+ if (!is_write) {
+ // set IsRead bit in addr_vect
+ const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
+ const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
+ addr_vect = _mm_or_si128(addr_vect, rw_mask);
+ }
+ // addr0 == addr_vect?
+ const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
+ // epoch1[0:63] = sync_epoch
+ const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
+ // epoch[0:31] = sync_epoch[0:31]
+ // epoch[32:63] = sync_epoch[0:31]
+ // epoch[64:95] = sync_epoch[0:31]
+ // epoch[96:127] = sync_epoch[0:31]
+ const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
+ // load low parts of shadow cell epochs into epoch_vect:
+ // epoch_vect[0:31] = shadow0[0:31]
+ // epoch_vect[32:63] = shadow0[64:95]
+ // epoch_vect[64:95] = shadow1[0:31]
+ // epoch_vect[96:127] = shadow1[64:95]
+ const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
+ // epoch_vect >= sync_epoch?
+ const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
+ // addr_res & epoch_res
+ const m128 res = _mm_and_si128(addr_res, epoch_res);
+ // mask[0] = res[7]
+ // mask[1] = res[15]
+ // ...
+ // mask[15] = res[127]
+ const int mask = _mm_movemask_epi8(res);
+ return mask != 0;
+}
+#endif
+
+ALWAYS_INLINE
+bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+#if defined(__SSE3__)
+ bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads. But it still can be useful if you modify
+ // ContainsSameAccessFast and want to ensure that it's not completely broken.
+ // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ return res;
+#else
+ return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
+#endif
+}
+
ALWAYS_INLINE USED
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
@@ -517,7 +739,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
(int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
(uptr)shadow_mem[0], (uptr)shadow_mem[1],
(uptr)shadow_mem[2], (uptr)shadow_mem[3]);
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
if (!IsAppMem(addr)) {
Printf("Access to non app mem %zx\n", addr);
DCHECK(IsAppMem(addr));
@@ -528,7 +750,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- if (*shadow_mem == kShadowRodata) {
+ if (kCppMode && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
@@ -539,20 +761,54 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
FastState fast_state = thr->fast_state;
- if (fast_state.GetIgnoreBit())
+ if (fast_state.GetIgnoreBit()) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopIgnored);
return;
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
+ }
+
Shadow cur(fast_state);
cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
cur.SetWrite(kAccessIsWrite);
cur.SetAtomic(kIsAtomic);
- // We must not store to the trace if we do not store to the shadow.
- // That is, this call must be moved somewhere below.
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ cur.IncrementEpoch();
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+// Called by MemoryAccessRange in tsan_rtl_thread.cc
+ALWAYS_INLINE USED
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
- MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
@@ -581,7 +837,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < 64*1024) {
+ if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -593,7 +849,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
}
} else {
// The region is big, reset only beginning and end.
- const uptr kPageSize = 4096;
+ const uptr kPageSize = GetPageSizeCached();
u64 *begin = (u64*)MemToShadow(addr);
u64 *end = begin + size / kShadowCell * kShadowCnt;
u64 *p = begin;
@@ -631,8 +887,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
thr->is_freeing = false;
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();
@@ -642,8 +900,10 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.SetWrite(true);
@@ -653,29 +913,21 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ }
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
- if (thr->shadow_stack_pos == thr->shadow_stack_end) {
- const int sz = thr->shadow_stack_end - thr->shadow_stack;
- const int newsz = 2 * sz;
- uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
- newsz * sizeof(uptr));
- internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
- internal_free(thr->shadow_stack);
- thr->shadow_stack = newstack;
- thr->shadow_stack_pos = newstack + sz;
- thr->shadow_stack_end = newstack + newsz;
- }
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
@@ -683,14 +935,15 @@ void FuncEntry(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
- DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ }
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
@@ -701,8 +954,9 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
-#ifndef TSAN_GO
- thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+#ifndef SANITIZER_GO
+ if (!ctx->after_multithreaded_fork)
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
@@ -712,7 +966,7 @@ void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
thr->mop_ignore_set.Reset();
#endif
}
@@ -722,8 +976,9 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
-#ifndef TSAN_GO
- thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+#ifndef SANITIZER_GO
+ if (!ctx->after_multithreaded_fork)
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
@@ -731,9 +986,9 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (thr->ignore_sync == 0)
- thr->mop_ignore_set.Reset();
+ thr->sync_ignore_set.Reset();
#endif
}
@@ -741,7 +996,7 @@ bool MD5Hash::operator==(const MD5Hash &other) const {
return hash[0] == other.hash[0] && hash[1] == other.hash[1];
}
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
void build_consistency_debug() {}
#else
void build_consistency_release() {}
@@ -753,19 +1008,9 @@ void build_consistency_stats() {}
void build_consistency_nostats() {}
#endif
-#if TSAN_SHADOW_COUNT == 1
-void build_consistency_shadow1() {}
-#elif TSAN_SHADOW_COUNT == 2
-void build_consistency_shadow2() {}
-#elif TSAN_SHADOW_COUNT == 4
-void build_consistency_shadow4() {}
-#else
-void build_consistency_shadow8() {}
-#endif
-
} // namespace __tsan
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index 20493ea4d3b..12587dd203e 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -28,6 +28,7 @@
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common/sanitizer_libignore.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
@@ -41,6 +42,7 @@
#include "tsan_platform.h"
#include "tsan_mutexset.h"
#include "tsan_ignoreset.h"
+#include "tsan_stack_trace.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -48,88 +50,23 @@
namespace __tsan {
-// Descriptor of user's memory block.
-struct MBlock {
- /*
- u64 mtx : 1; // must be first
- u64 lst : 44;
- u64 stk : 31; // on word boundary
- u64 tid : kTidBits;
- u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
- */
- u64 raw[2];
-
- void Init(uptr siz, u32 tid, u32 stk) {
- raw[0] = raw[1] = 0;
- raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
- raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
- raw[0] |= (u64)stk << (1 + 44);
- raw[1] |= (u64)stk >> (64 - 44 - 1);
- DCHECK_EQ(Size(), siz);
- DCHECK_EQ(Tid(), tid);
- DCHECK_EQ(StackId(), stk);
- }
-
- u32 Tid() const {
- return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
- }
-
- uptr Size() const {
- return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
- }
-
- u32 StackId() const {
- return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
- }
-
- SyncVar *ListHead() const {
- return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
- }
-
- void ListPush(SyncVar *v) {
- SyncVar *lst = ListHead();
- v->next = lst;
- u64 x = (u64)v ^ (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), v);
- }
-
- SyncVar *ListPop() {
- SyncVar *lst = ListHead();
- SyncVar *nxt = lst->next;
- lst->next = 0;
- u64 x = (u64)lst ^ (u64)nxt;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), nxt);
- return lst;
- }
-
- void ListReset() {
- SyncVar *lst = ListHead();
- u64 x = (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), 0);
- }
-
- void Lock();
- void Unlock();
- typedef GenericScopedLock<MBlock> ScopedLock;
-};
-
-#ifndef TSAN_GO
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#else
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#endif
-const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-
+#ifndef SANITIZER_GO
struct MapUnmapCallback;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
+#if defined(__mips64) || defined(__aarch64__)
+static const uptr kAllocatorSpace = 0;
+static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
+static const uptr kAllocatorRegionSizeLog = 20;
+static const uptr kAllocatorNumRegions =
+ kAllocatorSize >> kAllocatorRegionSizeLog;
+typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
+ MapUnmapCallback> ByteMap;
+typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
+ CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
+ MapUnmapCallback> PrimaryAllocator;
+#else
+typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
+#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@@ -145,14 +82,14 @@ const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
// FastState (from most significant bit):
// ignore : 1
// tid : kTidBits
-// epoch : kClkBits
// unused : -
// history_size : 3
+// epoch : kClkBits
class FastState {
public:
FastState(u64 tid, u64 epoch) {
x_ = tid << kTidShift;
- x_ |= epoch << kClkShift;
+ x_ |= epoch;
DCHECK_EQ(tid, this->tid());
DCHECK_EQ(epoch, this->epoch());
DCHECK_EQ(GetIgnoreBit(), false);
@@ -177,13 +114,13 @@ class FastState {
}
u64 epoch() const {
- u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
+ u64 res = x_ & ((1ull << kClkBits) - 1);
return res;
}
void IncrementEpoch() {
u64 old_epoch = epoch();
- x_ += 1 << kClkShift;
+ x_ += 1;
DCHECK_EQ(old_epoch + 1, epoch());
(void)old_epoch;
}
@@ -195,17 +132,19 @@ class FastState {
void SetHistorySize(int hs) {
CHECK_GE(hs, 0);
CHECK_LE(hs, 7);
- x_ = (x_ & ~7) | hs;
+ x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
}
+ ALWAYS_INLINE
int GetHistorySize() const {
- return (int)(x_ & 7);
+ return (int)((x_ >> kHistoryShift) & kHistoryMask);
}
void ClearHistorySize() {
- x_ &= ~7;
+ SetHistorySize(0);
}
+ ALWAYS_INLINE
u64 GetTracePos() const {
const int hs = GetHistorySize();
// When hs == 0, the trace consists of 2 parts.
@@ -216,20 +155,21 @@ class FastState {
private:
friend class Shadow;
static const int kTidShift = 64 - kTidBits - 1;
- static const int kClkShift = kTidShift - kClkBits;
static const u64 kIgnoreBit = 1ull << 63;
static const u64 kFreedBit = 1ull << 63;
+ static const u64 kHistoryShift = kClkBits;
+ static const u64 kHistoryMask = 7;
u64 x_;
};
// Shadow (from most significant bit):
// freed : 1
// tid : kTidBits
-// epoch : kClkBits
// is_atomic : 1
// is_read : 1
// size_log : 2
// addr0 : 3
+// epoch : kClkBits
class Shadow : public FastState {
public:
explicit Shadow(u64 x)
@@ -242,10 +182,10 @@ class Shadow : public FastState {
}
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
- DCHECK_EQ(x_ & 31, 0);
+ DCHECK_EQ((x_ >> kClkBits) & 31, 0);
DCHECK_LE(addr0, 7);
DCHECK_LE(kAccessSizeLog, 3);
- x_ |= (kAccessSizeLog << 3) | addr0;
+ x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
DCHECK_EQ(kAccessSizeLog, size_log());
DCHECK_EQ(addr0, this->addr0());
}
@@ -278,47 +218,34 @@ class Shadow : public FastState {
return shifted_xor == 0;
}
- static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
- u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
+ static ALWAYS_INLINE
+ bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
return masked_xor == 0;
}
- static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
unsigned kS2AccessSize) {
bool res = false;
u64 diff = s1.addr0() - s2.addr0();
if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
// if (s1.addr0() + size1) > s2.addr0()) return true;
- if (s1.size() > -diff) res = true;
+ if (s1.size() > -diff)
+ res = true;
} else {
// if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
- if (kS2AccessSize > diff) res = true;
+ if (kS2AccessSize > diff)
+ res = true;
}
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
return res;
}
- // The idea behind the offset is as follows.
- // Consider that we have 8 bool's contained within a single 8-byte block
- // (mapped to a single shadow "cell"). Now consider that we write to the bools
- // from a single thread (which we consider the common case).
- // W/o offsetting each access will have to scan 4 shadow values at average
- // to find the corresponding shadow value for the bool.
- // With offsetting we start scanning shadow with the offset so that
- // each access hits necessary shadow straight off (at least in an expected
- // optimistic case).
- // This logic works seamlessly for any layout of user data. For example,
- // if user data is {int, short, char, char}, then accesses to the int are
- // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
- // from a single thread won't need to scan all 8 shadow values.
- unsigned ComputeSearchOffset() {
- return x_ & 7;
- }
- u64 addr0() const { return x_ & 7; }
- u64 size() const { return 1ull << size_log(); }
- bool IsWrite() const { return !IsRead(); }
- bool IsRead() const { return x_ & kReadBit; }
+ u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+ u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+ bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+ bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
// The idea behind the freed bit is as follows.
// When the memory is freed (or otherwise unaccessible) we write to the shadow
@@ -343,15 +270,14 @@ class Shadow : public FastState {
return res;
}
- bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
- // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
- bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
- | (kIsAtomic << kAtomicShift));
+ bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
+ | (u64(kIsAtomic) << kAtomicShift));
DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
return v;
}
- bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
<= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
@@ -359,7 +285,7 @@ class Shadow : public FastState {
return v;
}
- bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
>= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
@@ -368,14 +294,14 @@ class Shadow : public FastState {
}
private:
- static const u64 kReadShift = 5;
+ static const u64 kReadShift = 5 + kClkBits;
static const u64 kReadBit = 1ull << kReadShift;
- static const u64 kAtomicShift = 6;
+ static const u64 kAtomicShift = 6 + kClkBits;
static const u64 kAtomicBit = 1ull << kAtomicShift;
- u64 size_log() const { return (x_ >> 3) & 3; }
+ u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
- static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
+ static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
if (s1.addr0() == s2.addr0()) return true;
if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
return true;
@@ -385,11 +311,14 @@ class Shadow : public FastState {
}
};
-struct SignalContext;
+struct ThreadSignalContext;
struct JmpBuf {
uptr sp;
uptr mangled_sp;
+ int int_signal_send;
+ bool in_blocking_func;
+ uptr in_signal_handler;
uptr *shadow_stack_pos;
};
@@ -414,7 +343,7 @@ struct ThreadState {
int ignore_reads_and_writes;
int ignore_sync;
// Go does not support ignores.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
@@ -427,18 +356,21 @@ struct ThreadState {
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
+ int ignore_interceptors;
#endif
+#if TSAN_COLLECT_STATS
u64 stat[StatCnt];
+#endif
const int tid;
const int unique_id;
- int in_rtl;
bool in_symbolizer;
bool in_ignored_lib;
- bool is_alive;
+ bool is_inited;
+ bool is_dead;
bool is_freeing;
bool is_vptr_access;
const uptr stk_addr;
@@ -447,12 +379,20 @@ struct ThreadState {
const uptr tls_size;
ThreadContext *tctx;
- DeadlockDetector deadlock_detector;
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ InternalDeadlockDetector internal_deadlock_detector;
+#endif
+ DDPhysicalThread *dd_pt;
+ DDLogicalThread *dd_lt;
+
+ atomic_uintptr_t in_signal_handler;
+ ThreadSignalContext *signal_ctx;
- bool in_signal_handler;
- SignalContext *signal_ctx;
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -462,29 +402,31 @@ struct ThreadState {
int nomalloc;
explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
};
-Context *CTX();
-
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
+#if SANITIZER_MAC
+ThreadState *cur_thread();
+void cur_thread_finalize();
+#else
+__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
INLINE ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
}
-#endif
+INLINE void cur_thread_finalize() { }
+#endif // SANITIZER_MAC
+#endif // SANITIZER_GO
class ThreadContext : public ThreadContextBase {
public:
explicit ThreadContext(int tid);
~ThreadContext();
ThreadState *thr;
-#ifdef TSAN_GO
- StackTrace creation_stack;
-#else
u32 creation_stack_id;
-#endif
SyncClock sync;
// Epoch at which the thread had started.
// If we see an event from the thread stamped by an older epoch,
@@ -493,12 +435,13 @@ class ThreadContext : public ThreadContextBase {
u64 epoch1;
// Override superclass callbacks.
- void OnDead();
- void OnJoined(void *arg);
- void OnFinished();
- void OnStarted(void *arg);
- void OnCreated(void *arg);
- void OnReset();
+ void OnDead() override;
+ void OnJoined(void *arg) override;
+ void OnFinished() override;
+ void OnStarted(void *arg) override;
+ void OnCreated(void *arg) override;
+ void OnReset() override;
+ void OnDetached(void *arg) override;
};
struct RacyStacks {
@@ -519,7 +462,7 @@ struct RacyAddress {
struct FiredSuppression {
ReportType type;
- uptr pc;
+ uptr pc_or_addr;
Suppression *supp;
};
@@ -527,20 +470,29 @@ struct Context {
Context();
bool initialized;
+ bool after_multithreaded_fork;
- SyncTab synctab;
+ MetaMap metamap;
Mutex report_mtx;
int nreported;
int nmissed_expected;
atomic_uint64_t last_symbolize_time_ns;
+ void *background_thread;
+ atomic_uint32_t stop_background_thread;
+
ThreadRegistry *thread_registry;
+ Mutex racy_mtx;
Vector<RacyStacks> racy_stacks;
Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
+ Mutex fired_suppressions_mtx;
InternalMmapVector<FiredSuppression> fired_suppressions;
+ DDetector *dd;
+
+ ClockAlloc clock_alloc;
Flags flags;
@@ -549,14 +501,20 @@ struct Context {
u64 int_alloc_siz[MBlockTypeCount];
};
-class ScopedInRtl {
- public:
- ScopedInRtl();
- ~ScopedInRtl();
- private:
- ThreadState*thr_;
- int in_rtl_;
- int errno_;
+extern Context *ctx; // The one and the only global runtime context.
+
+struct ScopedIgnoreInterceptors {
+ ScopedIgnoreInterceptors() {
+#ifndef SANITIZER_GO
+ cur_thread()->ignore_interceptors++;
+#endif
+ }
+
+ ~ScopedIgnoreInterceptors() {
+#ifndef SANITIZER_GO
+ cur_thread()->ignore_interceptors--;
+#endif
+ }
};
class ScopedReport {
@@ -564,11 +522,14 @@ class ScopedReport {
explicit ScopedReport(ReportType typ);
~ScopedReport();
- void AddStack(const StackTrace *stack);
- void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
+ void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
const MutexSet *mset);
- void AddThread(const ThreadContext *tctx);
+ void AddStack(StackTrace stack, bool suppressable = false);
+ void AddThread(const ThreadContext *tctx, bool suppressable = false);
+ void AddThread(int unique_tid, bool suppressable = false);
+ void AddUniqueTid(int unique_tid);
void AddMutex(const SyncVar *s);
+ u64 AddMutex(u64 id);
void AddLocation(uptr addr, uptr size);
void AddSleep(u32 stack_id);
void SetCount(int count);
@@ -576,49 +537,65 @@ class ScopedReport {
const ReportDesc *GetReport() const;
private:
- Context *ctx_;
ReportDesc *rep_;
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore_interceptors_;
- void AddMutex(u64 id);
+ void AddDeadMutex(u64 id);
ScopedReport(const ScopedReport&);
void operator = (const ScopedReport&);
};
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset);
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
+ uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+ uptr start = 0;
+ if (size + !!toppc > kStackTraceMax) {
+ start = size + !!toppc - kStackTraceMax;
+ size = kStackTraceMax - !!toppc;
+ }
+ stack->Init(&thr->shadow_stack[start], size, toppc);
+}
+
+
+#if TSAN_COLLECT_STATS
void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat);
+#endif
+
void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
- if (kCollectStats)
- thr->stat[typ] += n;
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] += n;
+#endif
}
void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
- if (kCollectStats)
- thr->stat[typ] = n;
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] = n;
+#endif
}
void MapShadow(uptr addr, uptr size);
-void MapThreadTrace(uptr addr, uptr size);
+void MapThreadTrace(uptr addr, uptr size, const char *name);
void DontNeedShadowFor(uptr addr, uptr size);
void InitializeShadowMemory();
void InitializeInterceptors();
void InitializeLibIgnore();
void InitializeDynamicAnnotations();
+void ForkBefore(ThreadState *thr, uptr pc);
+void ForkParentAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc);
+
void ReportRace(ThreadState *thr);
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1 = 0,
- const ReportStack *suppress_stack2 = 0,
- const ReportLocation *suppress_loc = 0);
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
void PrintMatchedBenignRaces();
-bool FrameIsInternal(const ReportStack *frame);
-ReportStack *SkipTsanInternalFrames(ReportStack *ent);
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
# define DPrintf Printf
@@ -635,14 +612,13 @@ ReportStack *SkipTsanInternalFrames(ReportStack *ent);
u32 CurrentStackId(ThreadState *thr, uptr pc);
ReportStack *SymbolizeStackId(u32 stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
-void PrintCurrentStackSlow(); // uses libunwind
+void PrintCurrentStackSlow(uptr pc); // uses libunwind
void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr);
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create);
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
@@ -707,14 +683,21 @@ void ProcessPendingSignals(ThreadState *thr);
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
+ bool try_lock = false);
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
void Acquire(ThreadState *thr, uptr pc, uptr addr);
+// AcquireGlobal synchronizes the current thread with all other threads.
+// In terms of happens-before relation, it draws a HB edge from all threads
+// (where they happen to execute right now) to the current thread. We use it to
+// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
+// right before executing finalizers. This provides a coarse, but simple
+// approximation of the actual required synchronization.
void AcquireGlobal(ThreadState *thr, uptr pc);
void Release(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
@@ -730,7 +713,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
// The trick is that the call preserves all registers and the compiler
// does not treat it as a call.
// If it does not work for you, use normal call.
-#if TSAN_DEBUG == 0
+#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
// The caller may not create the stack frame for itself at all,
// so we create a reserve stack frame for it (1024b must be enough).
#define HACKY_CALL(f) \
@@ -754,13 +737,15 @@ Trace *ThreadTrace(int tid);
extern "C" void __tsan_trace_switch();
void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, u64 addr) {
+ if (!kCollectHistory)
+ return;
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
DCHECK_EQ(GetLsb(addr, 61), addr);
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
@@ -772,6 +757,12 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
*evp = ev;
}
+#ifndef SANITIZER_GO
+uptr ALWAYS_INLINE HeapEnd() {
+ return kHeapMemEnd + PrimaryAllocator::AdditionalSize();
+}
+#endif
+
} // namespace __tsan
#endif // TSAN_RTL_H
diff --git a/libsanitizer/tsan/tsan_rtl_aarch64.S b/libsanitizer/tsan/tsan_rtl_aarch64.S
new file mode 100644
index 00000000000..20bf00827e9
--- /dev/null
+++ b/libsanitizer/tsan/tsan_rtl_aarch64.S
@@ -0,0 +1,204 @@
+#include "sanitizer_common/sanitizer_asm.h"
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.type setjmp, @function
+setjmp:
+ CFI_STARTPROC
+
+ // save env parameters for function call
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save jmp_buf
+ str x19, [sp, 16]
+ CFI_OFFSET (19, -16)
+ mov x19, x0
+
+ // SP pointer mangling (see glibc setjmp)
+ adrp x2, :got:__pointer_chk_guard
+ ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ add x0, x29, 32
+ ldr x2, [x2]
+ eor x1, x2, x0
+
+ // call tsan interceptor
+ bl __tsan_setjmp
+
+ // restore env parameter
+ mov x0, x19
+ ldr x19, [sp, 16]
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (30)
+ CFI_RESTORE (19)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc setjmp
+ adrp x1, :got:_ZN14__interception11real_setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
+ ldr x1, [x1]
+ br x1
+
+ CFI_ENDPROC
+.size setjmp, .-setjmp
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl _setjmp
+.type _setjmp, @function
+_setjmp:
+ CFI_STARTPROC
+
+ // save env parameters for function call
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save jmp_buf
+ str x19, [sp, 16]
+ CFI_OFFSET (19, -16)
+ mov x19, x0
+
+ // SP pointer mangling (see glibc setjmp)
+ adrp x2, :got:__pointer_chk_guard
+ ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ add x0, x29, 32
+ ldr x2, [x2]
+ eor x1, x2, x0
+
+ // call tsan interceptor
+ bl __tsan_setjmp
+
+ // Restore jmp_buf parameter
+ mov x0, x19
+ ldr x19, [sp, 16]
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (30)
+ CFI_RESTORE (19)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc setjmp
+ adrp x1, :got:_ZN14__interception12real__setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
+ ldr x1, [x1]
+ br x1
+
+ CFI_ENDPROC
+.size _setjmp, .-_setjmp
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl sigsetjmp
+.type sigsetjmp, @function
+sigsetjmp:
+ CFI_STARTPROC
+
+ // save env parameters for function call
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save jmp_buf and savesigs
+ stp x19, x20, [sp, 16]
+ CFI_OFFSET (19, -16)
+ CFI_OFFSET (20, -8)
+ mov w20, w1
+ mov x19, x0
+
+ // SP pointer mangling (see glibc setjmp)
+ adrp x2, :got:__pointer_chk_guard
+ ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ add x0, x29, 32
+ ldr x2, [x2]
+ eor x1, x2, x0
+
+ // call tsan interceptor
+ bl __tsan_setjmp
+
+ // restore env parameter
+ mov w1, w20
+ mov x0, x19
+ ldp x19, x20, [sp, 16]
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (30)
+ CFI_RESTORE (29)
+ CFI_RESTORE (19)
+ CFI_RESTORE (20)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc sigsetjmp
+ adrp x2, :got:_ZN14__interception14real_sigsetjmpE
+ ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
+ ldr x2, [x2]
+ br x2
+ CFI_ENDPROC
+.size sigsetjmp, .-sigsetjmp
+
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+ CFI_STARTPROC
+
+ // save env parameters for function call
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save jmp_buf and savesigs
+ stp x19, x20, [sp, 16]
+ CFI_OFFSET (19, -16)
+ CFI_OFFSET (20, -8)
+ mov w20, w1
+ mov x19, x0
+
+ // SP pointer mangling (see glibc setjmp)
+ adrp x2, :got:__pointer_chk_guard
+ ldr x2, [x2, #:got_lo12:__pointer_chk_guard]
+ add x0, x29, 32
+ ldr x2, [x2]
+ eor x1, x2, x0
+
+ // call tsan interceptor
+ bl __tsan_setjmp
+
+ mov w1, w20
+ mov x0, x19
+ ldp x19, x20, [sp, 16]
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (30)
+ CFI_RESTORE (29)
+ CFI_RESTORE (19)
+ CFI_RESTORE (20)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc __sigsetjmp
+ adrp x2, :got:_ZN14__interception16real___sigsetjmpE
+ ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE]
+ ldr x2, [x2]
+ br x2
+ CFI_ENDPROC
+.size __sigsetjmp, .-__sigsetjmp
+
+#if defined(__linux__)
+/* We do not need executable stack. */
+.section .note.GNU-stack,"",@progbits
+#endif
diff --git a/libsanitizer/tsan/tsan_rtl_amd64.S b/libsanitizer/tsan/tsan_rtl_amd64.S
index 71a2ecda9dd..6df36a500a7 100644
--- a/libsanitizer/tsan/tsan_rtl_amd64.S
+++ b/libsanitizer/tsan/tsan_rtl_amd64.S
@@ -170,10 +170,15 @@ setjmp:
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
@@ -197,10 +202,15 @@ _setjmp:
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 8(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// restore env parameter
@@ -231,10 +241,15 @@ sigsetjmp:
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
@@ -272,10 +287,15 @@ __sigsetjmp:
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain %rsp
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+ mov %rdi, %rsi
+#else
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#endif
// call tsan interceptor
call __tsan_setjmp
// unalign stack frame
@@ -296,7 +316,7 @@ __sigsetjmp:
CFI_ENDPROC
.size __sigsetjmp, .-__sigsetjmp
-#ifdef __linux__
+#if defined(__FreeBSD__) || defined(__linux__)
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
#endif
diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cc b/libsanitizer/tsan/tsan_rtl_mutex.cc
index d9a3a3baa3f..deb7722f521 100644
--- a/libsanitizer/tsan/tsan_rtl_mutex.cc
+++ b/libsanitizer/tsan/tsan_rtl_mutex.cc
@@ -9,6 +9,9 @@
//
//===----------------------------------------------------------------------===//
+#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_stackdepot.h>
+
#include "tsan_rtl.h"
#include "tsan_flags.h"
#include "tsan_sync.h"
@@ -18,10 +21,47 @@
namespace __tsan {
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+
+struct Callback : DDCallback {
+ ThreadState *thr;
+ uptr pc;
+
+ Callback(ThreadState *thr, uptr pc)
+ : thr(thr)
+ , pc(pc) {
+ DDCallback::pt = thr->dd_pt;
+ DDCallback::lt = thr->dd_lt;
+ }
+
+ u32 Unwind() override { return CurrentStackId(thr, pc); }
+ int UniqueTid() override { return thr->unique_id; }
+};
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ s->dd.ctx = s->GetId();
+}
+
+static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
+ uptr addr, u64 mid) {
+ // In Go, these misuses are either impossible, or detected by std lib,
+ // or false positives (e.g. unlock in a different thread).
+ if (kGoMode)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(typ);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
if (!linker_init && IsAppMem(addr)) {
@@ -30,71 +70,92 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
+ if (kCppMode && s->creation_stack_id == 0)
+ s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Global mutexes not marked as LINKER_INITIALIZED
// cause tons of not interesting reports, so just ignore it.
if (IsGlobalVar(addr))
return;
#endif
- SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
- if (s == 0)
- return;
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s == 0)
+ return;
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ bool unlock_locked = false;
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
s->is_broken = true;
+ unlock_locked = true;
+ }
+ u64 mid = s->GetId();
+ u32 last_lock = s->last_lock;
+ if (!unlock_locked)
+ s->Reset(thr); // must not reset it before the report is printed
+ s->mtx.Unlock();
+ if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(s);
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
- rep.AddStack(&trace);
- FastState last(s->last_lock);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace);
+ FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(&trace);
- rep.AddLocation(s->addr, 1);
- OutputReport(ctx, rep);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
}
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
+ if (unlock_locked) {
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s != 0) {
+ s->Reset(thr);
+ s->mtx.Unlock();
+ }
+ }
+ thr->mset.Remove(mid);
+ // s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
CHECK_GT(rec, 0);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ bool report_double_lock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
CHECK_EQ(s->recursion, 0);
s->owner_tid = thr->tid;
s->last_lock = thr->fast_state.raw();
} else if (s->owner_tid == thr->tid) {
CHECK_GT(s->recursion, 0);
- } else {
- Printf("ThreadSanitizer WARNING: double lock of mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
+ } else if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_double_lock = true;
}
if (s->recursion == 0) {
StatInc(thr, StatMutexLock);
@@ -105,30 +166,36 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
}
s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
+ Callback cb(thr, pc);
+ if (!try_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_double_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
- if (s->recursion == 0) {
- if (!s->is_broken) {
- s->is_broken = true;
- Printf("ThreadSanitizer WARNING: unlock of unlocked mutex %p\n", addr);
- PrintCurrentStack(thr, pc);
- }
- } else if (s->owner_tid != thr->tid) {
- if (!s->is_broken) {
+ bool report_bad_unlock = false;
+ if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlocked by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
} else {
rec = all ? s->recursion : 1;
@@ -142,56 +209,97 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
}
}
thr->mset.Del(s->GetId(), true);
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks && !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
return rec;
}
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
+void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ bool report_bad_lock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read lock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_lock = true;
+ }
}
AcquireImpl(thr, pc, &s->clock);
s->last_lock = thr->fast_state.raw();
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ if (!trylock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
+ }
+ u64 mid = s->GetId();
s->mtx.ReadUnlock();
+ // Can't touch s after this point.
+ if (report_bad_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ bool report_bad_unlock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- Printf("ThreadSanitizer WARNING: read unlock of a write locked mutex %p\n",
- addr);
- PrintCurrentStack(thr, pc);
+ if (flags()->report_mutex_bugs && !s->is_broken) {
+ s->is_broken = true;
+ report_bad_unlock = true;
+ }
}
ReleaseImpl(thr, pc, &s->read_clock);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
- thr->mset.Del(s->GetId(), false);
+ // Can't touch s after this point.
+ thr->mset.Del(mid, false);
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
+ bool report_bad_unlock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
// Seems to be read unlock.
write = false;
@@ -214,30 +322,37 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
}
} else if (!s->is_broken) {
s->is_broken = true;
- Printf("ThreadSanitizer WARNING: mutex %p is unlock by wrong thread\n",
- addr);
- PrintCurrentStack(thr, pc);
+ report_bad_unlock = true;
}
thr->mset.Del(s->GetId(), write);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
+ u64 mid = s->GetId();
s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
}
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->owner_tid = SyncVar::kInvalidTid;
s->recursion = 0;
s->mtx.Unlock();
}
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
@@ -255,17 +370,16 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
DPrintf("#%d: AcquireGlobal\n", thr->tid);
if (thr->ignore_sync)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateClockCallback, thr);
}
void Release(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -274,11 +388,10 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
}
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
- CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
@@ -286,7 +399,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
@@ -301,8 +414,8 @@ void AfterSleep(ThreadState *thr, uptr pc) {
if (thr->ignore_sync)
return;
thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- ThreadRegistryLock l(CTX()->thread_registry);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
UpdateSleepClockCallback, thr);
}
#endif
@@ -310,37 +423,63 @@ void AfterSleep(ThreadState *thr, uptr pc) {
void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->clock.acquire(c);
+ thr->clock.set(thr->fast_state.epoch());
+ thr->clock.acquire(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
}
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(c);
+ thr->clock.release(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(c);
+ thr->clock.ReleaseStore(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(c);
+ thr->clock.acq_rel(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
+ if (r == 0)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ if (stk && stk != 0xffffffff) {
+ rep.AddStack(StackDepotGet(stk), true);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ rep.AddStack(StackTrace(&dummy_pc, 1), true);
+ }
+ }
+ }
+ OutputReport(thr, rep);
+}
+
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_rtl_report.cc b/libsanitizer/tsan/tsan_rtl_report.cc
index f2248afeab2..d0d1fbaf45a 100644
--- a/libsanitizer/tsan/tsan_rtl_report.cc
+++ b/libsanitizer/tsan/tsan_rtl_report.cc
@@ -28,15 +28,18 @@ namespace __tsan {
using namespace __sanitizer; // NOLINT
-static ReportStack *SymbolizeStack(const StackTrace& trace);
+static ReportStack *SymbolizeStack(StackTrace trace);
void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- ScopedInRtl in_rtl;
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
- PrintCurrentStackSlow();
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
Die();
}
@@ -51,120 +54,107 @@ bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
}
#endif
-static void StackStripMain(ReportStack *stack) {
- ReportStack *last_frame = 0;
- ReportStack *last_frame2 = 0;
- const char *prefix = "__interceptor_";
- uptr prefix_len = internal_strlen(prefix);
- const char *path_prefix = flags()->strip_path_prefix;
- uptr path_prefix_len = internal_strlen(path_prefix);
- char *pos;
- for (ReportStack *ent = stack; ent; ent = ent->next) {
- if (ent->func && 0 == internal_strncmp(ent->func, prefix, prefix_len))
- ent->func += prefix_len;
- if (ent->file && (pos = internal_strstr(ent->file, path_prefix)))
- ent->file = pos + path_prefix_len;
- if (ent->file && ent->file[0] == '.' && ent->file[1] == '/')
- ent->file += 2;
+static void StackStripMain(SymbolizedStack *frames) {
+ SymbolizedStack *last_frame = nullptr;
+ SymbolizedStack *last_frame2 = nullptr;
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
last_frame2 = last_frame;
- last_frame = ent;
+ last_frame = cur;
}
if (last_frame2 == 0)
return;
- const char *last = last_frame->func;
-#ifndef TSAN_GO
- const char *last2 = last_frame2->func;
+#ifndef SANITIZER_GO
+ const char *last = last_frame->info.function;
+ const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
if (last2 && 0 == internal_strcmp(last2, "main")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// Strip our internal thread start routine.
} else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// Strip global ctors init.
} else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// If both are 0, then we probably just failed to symbolize.
} else if (last || last2) {
// Ensure that we recovered stack completely. Trimmed stack
// can actually happen if we do not instrument some code,
// so it's only a debug print. However we must try hard to not miss it
// due to our fault.
- DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
+ DPrintf("Bottom stack frame is missed\n");
}
#else
// The last frame always point into runtime (gosched0, goexit0, runtime.main).
- last_frame2->next = 0;
- (void)last;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
#endif
}
-#ifndef TSAN_GO
ReportStack *SymbolizeStackId(u32 stack_id) {
- uptr ssz = 0;
- const uptr *stack = StackDepotGet(stack_id, &ssz);
- if (stack == 0)
+ if (stack_id == 0)
return 0;
- StackTrace trace;
- trace.Init(stack, ssz);
- return SymbolizeStack(trace);
+ StackTrace stack = StackDepotGet(stack_id);
+ if (stack.trace == nullptr)
+ return nullptr;
+ return SymbolizeStack(stack);
}
-#endif
-static ReportStack *SymbolizeStack(const StackTrace& trace) {
- if (trace.IsEmpty())
+static ReportStack *SymbolizeStack(StackTrace trace) {
+ if (trace.size == 0)
return 0;
- ReportStack *stack = 0;
- for (uptr si = 0; si < trace.Size(); si++) {
- const uptr pc = trace.Get(si);
-#ifndef TSAN_GO
- // We obtain the return address, that is, address of the next instruction,
- // so offset it by 1 byte.
- const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc);
-#else
- // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
+ SymbolizedStack *top = nullptr;
+ for (uptr si = 0; si < trace.size; si++) {
+ const uptr pc = trace.trace[si];
uptr pc1 = pc;
- if (si != trace.Size() - 1)
- pc1 -= 1;
-#endif
- ReportStack *ent = SymbolizeCode(pc1);
+ // We obtain the return address, but we're interested in the previous
+ // instruction.
+ if ((pc & kExternalPCBit) == 0)
+ pc1 = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *ent = SymbolizeCode(pc1);
CHECK_NE(ent, 0);
- ReportStack *last = ent;
+ SymbolizedStack *last = ent;
while (last->next) {
- last->pc = pc; // restore original pc for report
+ last->info.address = pc; // restore original pc for report
last = last->next;
}
- last->pc = pc; // restore original pc for report
- last->next = stack;
- stack = ent;
+ last->info.address = pc; // restore original pc for report
+ last->next = top;
+ top = ent;
}
- StackStripMain(stack);
+ StackStripMain(top);
+
+ ReportStack *stack = ReportStack::New();
+ stack->frames = top;
return stack;
}
ScopedReport::ScopedReport(ReportType typ) {
- ctx_ = CTX();
- ctx_->thread_registry->CheckLocked();
+ ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
- ctx_->report_mtx.Lock();
+ ctx->report_mtx.Lock();
CommonSanitizerReportMutex.Lock();
}
ScopedReport::~ScopedReport() {
CommonSanitizerReportMutex.Unlock();
- ctx_->report_mtx.Unlock();
+ ctx->report_mtx.Unlock();
DestroyAndFree(rep_);
}
-void ScopedReport::AddStack(const StackTrace *stack) {
+void ScopedReport::AddStack(StackTrace stack, bool suppressable) {
ReportStack **rs = rep_->stacks.PushBack();
- *rs = SymbolizeStack(*stack);
+ *rs = SymbolizeStack(stack);
+ (*rs)->suppressable = suppressable;
}
-void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
- const StackTrace *stack, const MutexSet *mset) {
+void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
+ const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
@@ -173,30 +163,22 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->size = s.size();
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
- mop->stack = SymbolizeStack(*stack);
+ mop->stack = SymbolizeStack(stack);
+ if (mop->stack)
+ mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
- u64 uid = 0;
- uptr addr = SyncVar::SplitId(d.id, &uid);
- SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
- // Check that the mutex is still alive.
- // Another mutex can be created at the same address,
- // so check uid as well.
- if (s && s->CheckId(uid)) {
- ReportMopMutex mtx = {s->uid, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(s);
- } else {
- ReportMopMutex mtx = {d.id, d.write};
- mop->mset.PushBack(mtx);
- AddMutex(d.id);
- }
- if (s)
- s->mtx.ReadUnlock();
+ u64 mid = this->AddMutex(d.id);
+ ReportMopMutex mtx = {mid, d.write};
+ mop->mset.PushBack(mtx);
}
}
-void ScopedReport::AddThread(const ThreadContext *tctx) {
+void ScopedReport::AddUniqueTid(int unique_tid) {
+ rep_->unique_tids.PushBack(unique_tid);
+}
+
+void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
for (uptr i = 0; i < rep_->threads.Size(); i++) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
@@ -207,19 +189,16 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->id = tctx->tid;
rt->pid = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
- rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
+ rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
rt->stack = 0;
-#ifdef TSAN_GO
- rt->stack = SymbolizeStack(tctx->creation_stack);
-#else
rt->stack = SymbolizeStackId(tctx->creation_stack_id);
-#endif
+ if (rt->stack)
+ rt->stack->suppressable = suppressable;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static ThreadContext *FindThreadByUidLocked(int unique_id) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
ThreadContext *tctx = static_cast<ThreadContext*>(
@@ -232,7 +211,6 @@ static ThreadContext *FindThreadByUidLocked(int unique_id) {
}
static ThreadContext *FindThreadByTidLocked(int tid) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
return static_cast<ThreadContext*>(
ctx->thread_registry->GetThreadLocked(tid));
@@ -250,7 +228,6 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
- Context *ctx = CTX();
ctx->thread_registry->CheckLocked();
ThreadContext *tctx = static_cast<ThreadContext*>(
ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
@@ -264,6 +241,13 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
+void ScopedReport::AddThread(int unique_tid, bool suppressable) {
+#ifndef SANITIZER_GO
+ if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ AddThread(tctx, suppressable);
+#endif
+}
+
void ScopedReport::AddMutex(const SyncVar *s) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == s->uid)
@@ -273,14 +257,31 @@ void ScopedReport::AddMutex(const SyncVar *s) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = s->uid;
+ rm->addr = s->addr;
rm->destroyed = false;
- rm->stack = 0;
-#ifndef TSAN_GO
rm->stack = SymbolizeStackId(s->creation_stack_id);
-#endif
}
-void ScopedReport::AddMutex(u64 id) {
+u64 ScopedReport::AddMutex(u64 id) {
+ u64 uid = 0;
+ u64 mid = id;
+ uptr addr = SyncVar::SplitId(id, &uid);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ mid = s->uid;
+ AddMutex(s);
+ } else {
+ AddDeadMutex(id);
+ }
+ if (s)
+ s->mtx.Unlock();
+ return mid;
+}
+
+void ScopedReport::AddDeadMutex(u64 id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
if (rep_->mutexes[i]->id == id)
return;
@@ -289,6 +290,7 @@ void ScopedReport::AddMutex(u64 id) {
ReportMutex *rm = new(mem) ReportMutex();
rep_->mutexes.PushBack(rm);
rm->id = id;
+ rm->addr = 0;
rm->destroyed = true;
rm->stack = 0;
}
@@ -296,62 +298,57 @@ void ScopedReport::AddMutex(u64 id) {
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
- if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
- || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = ReportLocationFD;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
+ ReportLocation *loc = ReportLocation::New(ReportLocationFD);
loc->fd = fd;
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
+ rep_->locs.PushBack(loc);
ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
if (tctx)
AddThread(tctx);
return;
}
MBlock *b = 0;
- if (allocator()->PointerIsMine((void*)addr)
- && (b = user_mblock(0, (void*)addr))) {
- ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void*)addr)) {
+ void *block_begin = a->GetBlockBegin((void*)addr);
+ if (block_begin)
+ b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b != 0) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->tid);
+ ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ loc->heap_chunk_size = b->siz;
+ loc->tid = tctx ? tctx->tid : b->tid;
+ loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
- loc->type = ReportLocationHeap;
- loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
- loc->size = b->Size();
- loc->tid = tctx ? tctx->tid : b->Tid();
- loc->name = 0;
- loc->file = 0;
- loc->line = 0;
- loc->stack = 0;
- loc->stack = SymbolizeStackId(b->StackId());
if (tctx)
AddThread(tctx);
return;
}
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
- void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
- ReportLocation *loc = new(mem) ReportLocation();
- rep_->locs.PushBack(loc);
- loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
+ ReportLocation *loc =
+ ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
loc->tid = tctx->tid;
+ rep_->locs.PushBack(loc);
AddThread(tctx);
}
- ReportLocation *loc = SymbolizeData(addr);
- if (loc) {
+ if (ReportLocation *loc = SymbolizeData(addr)) {
+ loc->suppressable = true;
rep_->locs.PushBack(loc);
return;
}
#endif
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
@@ -365,39 +362,32 @@ const ReportDesc *ScopedReport::GetReport() const {
return rep_;
}
-void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- Context *ctx = CTX();
- ctx->thread_registry->CheckLocked();
- ThreadContext *tctx = static_cast<ThreadContext*>(
- ctx->thread_registry->GetThreadLocked(tid));
- if (tctx == 0)
- return;
- if (tctx->status != ThreadStatusRunning
- && tctx->status != ThreadStatusFinished
- && tctx->status != ThreadStatusDead)
- return;
- Trace* trace = ThreadTrace(tctx->tid);
- Lock l(&trace->mtx);
+ Trace* trace = ThreadTrace(tid);
+ ReadLock l(&trace->mtx);
const int partidx = (epoch / kTracePartSize) % TraceParts();
TraceHeader* hdr = &trace->headers[partidx];
- if (epoch < hdr->epoch0)
+ if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
return;
+ CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
const u64 epoch0 = RoundDown(epoch, TraceSize());
const u64 eend = epoch % TraceSize();
const u64 ebegin = RoundDown(eend, kTracePartSize);
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
- InternalScopedBuffer<uptr> stack(kShadowStackSize);
- for (uptr i = 0; i < hdr->stack0.Size(); i++) {
- stack[i] = hdr->stack0.Get(i);
- DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
+ Vector<uptr> stack(MBlockReportStack);
+ stack.Resize(hdr->stack0.size + 64);
+ for (uptr i = 0; i < hdr->stack0.size; i++) {
+ stack[i] = hdr->stack0.trace[i];
+ DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
}
if (mset)
*mset = hdr->mset0;
- uptr pos = hdr->stack0.Size();
+ uptr pos = hdr->stack0.size;
Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i];
@@ -407,6 +397,8 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
if (typ == EventTypeMop) {
stack[pos] = pc;
} else if (typ == EventTypeFuncEnter) {
+ if (stack.Size() < pos + 2)
+ stack.Resize(pos + 2);
stack[pos++] = pc;
} else if (typ == EventTypeFuncExit) {
if (pos > 0)
@@ -429,56 +421,62 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
if (pos == 0 && stack[0] == 0)
return;
pos++;
- stk->Init(stack.data(), pos);
+ stk->Init(&stack[0], pos);
}
-static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
bool equal_stack = false;
RacyStacks hash;
- if (flags()->suppress_equal_stacks) {
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
- for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
- if (hash == ctx->racy_stacks[i]) {
- DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n");
- equal_stack = true;
- break;
- }
- }
- }
bool equal_address = false;
RacyAddress ra0 = {addr_min, addr_max};
- if (flags()->suppress_equal_addresses) {
- for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
- RacyAddress ra2 = ctx->racy_addresses[i];
- uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
- uptr minend = min(ra0.addr_max, ra2.addr_max);
- if (maxbeg < minend) {
- DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n");
- equal_address = true;
- break;
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (flags()->suppress_equal_stacks) {
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
+ for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
+ if (hash == ctx->racy_stacks[i]) {
+ VPrintf(2,
+ "ThreadSanitizer: suppressing report as doubled (stack)\n");
+ equal_stack = true;
+ break;
+ }
+ }
+ }
+ if (flags()->suppress_equal_addresses) {
+ for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
+ RacyAddress ra2 = ctx->racy_addresses[i];
+ uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
+ uptr minend = min(ra0.addr_max, ra2.addr_max);
+ if (maxbeg < minend) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
+ equal_address = true;
+ break;
+ }
}
}
}
- if (equal_stack || equal_address) {
- if (!equal_stack)
- ctx->racy_stacks.PushBack(hash);
- if (!equal_address)
- ctx->racy_addresses.PushBack(ra0);
- return true;
+ if (!equal_stack && !equal_address)
+ return false;
+ if (!equal_stack) {
+ Lock lock(&ctx->racy_mtx);
+ ctx->racy_stacks.PushBack(hash);
}
- return false;
+ if (!equal_address) {
+ Lock lock(&ctx->racy_mtx);
+ ctx->racy_addresses.PushBack(ra0);
+ }
+ return true;
}
-static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
- uptr addr_min, uptr addr_max) {
- Context *ctx = CTX();
+static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
+ Lock lock(&ctx->racy_mtx);
if (flags()->suppress_equal_stacks) {
RacyStacks hash;
- hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr));
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
ctx->racy_stacks.PushBack(hash);
}
if (flags()->suppress_equal_addresses) {
@@ -487,43 +485,51 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
}
}
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1,
- const ReportStack *suppress_stack2,
- const ReportLocation *suppress_loc) {
- atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
+ if (!flags()->report_bugs)
+ return false;
+ atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
const ReportDesc *rep = srep.GetReport();
Suppression *supp = 0;
- uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp);
- if (suppress_pc != 0) {
- FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
+ uptr pc_or_addr = 0;
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
+ if (pc_or_addr != 0) {
+ Lock lock(&ctx->fired_suppressions_mtx);
+ FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
ctx->fired_suppressions.push_back(s);
}
- if (OnReport(rep, suppress_pc != 0))
- return false;
+ {
+ bool old_is_freeing = thr->is_freeing;
+ thr->is_freeing = false;
+ bool suppressed = OnReport(rep, pc_or_addr != 0);
+ thr->is_freeing = old_is_freeing;
+ if (suppressed)
+ return false;
+ }
PrintReport(rep);
ctx->nreported++;
if (flags()->halt_on_error)
- internal__exit(flags()->exitcode);
+ Die();
return true;
}
-bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- const StackTrace &trace) {
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
- if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
+ if (ctx->fired_suppressions[k].type != type)
continue;
- for (uptr j = 0; j < trace.Size(); j++) {
+ for (uptr j = 0; j < trace.size; j++) {
FiredSuppression *s = &ctx->fired_suppressions[k];
- if (trace.Get(j) == s->pc) {
+ if (trace.trace[j] == s->pc_or_addr) {
if (s->supp)
- s->supp->hit_count++;
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
return true;
}
}
@@ -531,64 +537,21 @@ bool IsFiredSuppression(Context *ctx,
return false;
}
-static bool IsFiredSuppression(Context *ctx,
- const ScopedReport &srep,
- uptr addr) {
+static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
- if (ctx->fired_suppressions[k].type != srep.GetReport()->typ)
+ if (ctx->fired_suppressions[k].type != type)
continue;
FiredSuppression *s = &ctx->fired_suppressions[k];
- if (addr == s->pc) {
+ if (addr == s->pc_or_addr) {
if (s->supp)
- s->supp->hit_count++;
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
return true;
}
}
return false;
}
-bool FrameIsInternal(const ReportStack *frame) {
- return frame != 0 && frame->file != 0
- && (internal_strstr(frame->file, "tsan_interceptors.cc") ||
- internal_strstr(frame->file, "sanitizer_common_interceptors.inc") ||
- internal_strstr(frame->file, "tsan_interface_"));
-}
-
-// On programs that use Java we see weird reports like:
-// WARNING: ThreadSanitizer: data race (pid=22512)
-// Read of size 8 at 0x7d2b00084318 by thread 100:
-// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
-// #1 <null> <null>:0 (0x7f7ad9b40193)
-// Previous write of size 8 at 0x7d2b00084318 by thread 105:
-// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
-// #1 <null> <null>:0 (0x7f7ad9b42707)
-static bool IsJavaNonsense(const ReportDesc *rep) {
-#ifndef TSAN_GO
- for (uptr i = 0; i < rep->mops.Size(); i++) {
- ReportMop *mop = rep->mops[i];
- ReportStack *frame = mop->stack;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- return true;
- }
- if (FrameIsInternal(frame)) {
- frame = frame->next;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- if (frame) {
- FiredSuppression supp = {rep->typ, frame->pc, 0};
- CTX()->fired_suppressions.push_back(supp);
- }
- return true;
- }
- }
- }
-#endif
- return false;
-}
-
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
Shadow s0(thr->racy_state[0]);
Shadow s1(thr->racy_state[1]);
@@ -603,10 +566,14 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
}
void ReportRace(ThreadState *thr) {
+ CheckNoLocks(thr);
+
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore;
+
if (!flags()->report_bugs)
return;
- ScopedInRtl in_rtl;
-
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
@@ -631,42 +598,44 @@ void ReportRace(ThreadState *thr) {
return;
}
- Context *ctx = CTX();
- ThreadRegistryLock l0(ctx->thread_registry);
-
ReportType typ = ReportTypeRace;
- if (thr->is_vptr_access)
+ if (thr->is_vptr_access && freed)
+ typ = ReportTypeVptrUseAfterFree;
+ else if (thr->is_vptr_access)
typ = ReportTypeVptrRace;
else if (freed)
typ = ReportTypeUseAfterFree;
- ScopedReport rep(typ);
- if (IsFiredSuppression(ctx, rep, addr))
+
+ if (IsFiredSuppression(ctx, typ, addr))
return;
+
const uptr kMop = 2;
- StackTrace traces[kMop];
+ VarSizeStackTrace traces[kMop];
const uptr toppc = TraceTopPC(thr);
- traces[0].ObtainCurrent(thr, toppc);
- if (IsFiredSuppression(ctx, rep, traces[0]))
+ ObtainCurrentStack(thr, toppc, &traces[0]);
+ if (IsFiredSuppression(ctx, typ, traces[0]))
return;
- InternalScopedBuffer<MutexSet> mset2(1);
- new(mset2.data()) MutexSet();
+
+ // MutexSet is too large to live on stack.
+ Vector<u64> mset_buffer(MBlockScopedBuf);
+ mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
+ MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
+
Shadow s2(thr->racy_state[1]);
- RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
- if (IsFiredSuppression(ctx, rep, traces[1]))
+ RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2);
+ if (IsFiredSuppression(ctx, typ, traces[1]))
return;
if (HandleRacyStacks(thr, traces, addr_min, addr_max))
return;
+ ThreadRegistryLock l0(ctx->thread_registry);
+ ScopedReport rep(typ);
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, s, &traces[i],
- i == 0 ? &thr->mset : mset2.data());
+ rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2);
}
- if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
- return;
-
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
ThreadContext *tctx = static_cast<ThreadContext*>(
@@ -678,7 +647,7 @@ void ReportRace(ThreadState *thr) {
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -686,37 +655,40 @@ void ReportRace(ThreadState *thr) {
}
#endif
- ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ?
- rep.GetReport()->locs[0] : 0;
- if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
- rep.GetReport()->mops[1]->stack,
- suppress_loc))
+ if (!OutputReport(thr, rep))
return;
AddRacyStacks(thr, traces, addr_min, addr_max);
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
- StackTrace trace;
- trace.ObtainCurrent(thr, pc);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
PrintStack(SymbolizeStack(trace));
}
-void PrintCurrentStackSlow() {
-#ifndef TSAN_GO
- __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace,
- sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace;
- ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(),
- 0, 0, 0, false);
+void PrintCurrentStackSlow(uptr pc) {
+#ifndef SANITIZER_GO
+ BufferedStackTrace *ptrace =
+ new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
+ BufferedStackTrace();
+ ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
- uptr tmp = ptrace->trace[i];
- ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1];
- ptrace->trace[ptrace->size - i - 1] = tmp;
+ uptr tmp = ptrace->trace_buffer[i];
+ ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
+ ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
}
- StackTrace trace;
- trace.Init(ptrace->trace, ptrace->size);
- PrintStack(SymbolizeStack(trace));
+ PrintStack(SymbolizeStack(*ptrace));
#endif
}
} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+} // extern "C"
diff --git a/libsanitizer/tsan/tsan_rtl_thread.cc b/libsanitizer/tsan/tsan_rtl_thread.cc
index dea66983535..aed3fe87393 100644
--- a/libsanitizer/tsan/tsan_rtl_thread.cc
+++ b/libsanitizer/tsan/tsan_rtl_thread.cc
@@ -28,19 +28,19 @@ ThreadContext::ThreadContext(int tid)
, epoch1() {
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
void ThreadContext::OnDead() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
}
void ThreadContext::OnJoined(void *arg) {
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
AcquireImpl(caller_thr, 0, &sync);
- sync.Reset();
+ sync.Reset(&caller_thr->clock_cache);
}
struct OnCreatedArgs {
@@ -53,25 +53,28 @@ void ThreadContext::OnCreated(void *arg) {
if (tid == 0)
return;
OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ if (!args->thr) // GCD workers don't have a parent thread.
+ return;
args->thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
ReleaseImpl(args->thr, 0, &sync);
-#ifdef TSAN_GO
- creation_stack.ObtainCurrent(args->thr, args->pc);
-#else
creation_stack_id = CurrentStackId(args->thr, args->pc);
-#endif
if (reuse_count == 0)
StatInc(args->thr, StatThreadMaxTid);
}
void ThreadContext::OnReset() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
//!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
}
+void ThreadContext::OnDetached(void *arg) {
+ ThreadState *thr1 = static_cast<ThreadState*>(arg);
+ sync.Reset(&thr1->clock_cache);
+}
+
struct OnStartedArgs {
ThreadState *thr;
uptr stk_addr;
@@ -87,9 +90,9 @@ void ThreadContext::OnStarted(void *arg) {
// from different threads.
epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
epoch1 = (u64)-1;
- new(thr) ThreadState(CTX(), tid, unique_id,
- epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef TSAN_GO
+ new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
+ args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+#ifndef SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -101,9 +104,13 @@ void ThreadContext::OnStarted(void *arg) {
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorThreadStart(thr);
#endif
+ if (common_flags()->detect_deadlocks) {
+ thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+ }
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
thr->fast_state.SetHistorySize(flags()->history_size);
@@ -111,12 +118,12 @@ void ThreadContext::OnStarted(void *arg) {
Trace *thr_trace = ThreadTrace(thr->tid);
thr_trace->headers[trace].epoch0 = epoch0;
StatInc(thr, StatSyncAcquire);
- sync.Reset();
+ sync.Reset(&thr->clock_cache);
+ thr->is_inited = true;
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
tid, (uptr)epoch0, args->stk_addr, args->stk_size,
args->tls_addr, args->tls_size);
- thr->is_alive = true;
}
void ThreadContext::OnFinished() {
@@ -128,15 +135,23 @@ void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
-#ifndef TSAN_GO
+ if (common_flags()->detect_deadlocks) {
+ ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ }
+ ctx->clock_alloc.FlushCache(&thr->clock_cache);
+ ctx->metamap.OnThreadIdle(thr);
+#ifndef SANITIZER_GO
AllocatorThreadFinish(thr);
#endif
thr->~ThreadState();
- StatAggregate(CTX()->stat, thr->stat);
+#if TSAN_COLLECT_STATS
+ StatAggregate(ctx->stat, thr->stat);
+#endif
thr = 0;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -158,7 +173,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == 0) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -177,6 +192,8 @@ static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
}
static void ThreadCheckIgnore(ThreadState *thr) {
+ if (ctx->after_multithreaded_fork)
+ return;
if (thr->ignore_reads_and_writes)
ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
if (thr->ignore_sync)
@@ -187,50 +204,46 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
#endif
void ThreadFinalize(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
- ThreadRegistryLock l(CTX()->thread_registry);
+ ThreadRegistryLock l(ctx->thread_registry);
Vector<ThreadLeak> leaks(MBlockScopedBuf);
- CTX()->thread_registry->RunCallbackForEachThreadLocked(
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
MaybeReportThreadLeak, &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
ScopedReport rep(ReportTypeThreadLeak);
- rep.AddThread(leaks[i].tctx);
+ rep.AddThread(leaks[i].tctx, true);
rep.SetCount(leaks[i].count);
- OutputReport(CTX(), rep);
+ OutputReport(thr, rep);
}
#endif
}
int ThreadCount(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
uptr result;
ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
return (int)result;
}
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- CHECK_GT(thr->in_rtl, 0);
StatInc(thr, StatThreadCreate);
- Context *ctx = CTX();
OnCreatedArgs args = { thr, pc };
- int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
- DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
+ u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
+ int tid =
+ ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
return tid;
}
void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
- Context *ctx = CTX();
- CHECK_GT(thr->in_rtl, 0);
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
uptr tls_size = 0;
+#ifndef SANITIZER_GO
GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
if (tid) {
@@ -251,6 +264,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr_end, tls_addr + tls_size - thr_end);
}
}
+#endif
ThreadRegistry *tr = ctx->thread_registry;
OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
@@ -259,18 +273,24 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
tr->Lock();
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
+
+#ifndef SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ }
+#endif
}
void ThreadFinish(ThreadState *thr) {
- CHECK_GT(thr->in_rtl, 0);
ThreadCheckIgnore(thr);
StatInc(thr, StatThreadFinish);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
- thr->is_alive = false;
- Context *ctx = CTX();
+ thr->is_dead = true;
ctx->thread_registry->FinishThread(thr->tid);
}
@@ -284,33 +304,26 @@ static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
}
int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
- CHECK_GT(thr->in_rtl, 0);
- Context *ctx = CTX();
int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
return res;
}
void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
- Context *ctx = CTX();
ctx->thread_registry->JoinThread(tid, thr);
}
void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(thr->in_rtl, 0);
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
- Context *ctx = CTX();
- ctx->thread_registry->DetachThread(tid);
+ ctx->thread_registry->DetachThread(tid, thr);
}
void ThreadSetName(ThreadState *thr, const char *name) {
- CHECK_GT(thr->in_rtl, 0);
- CTX()->thread_registry->SetThreadName(thr->tid, name);
+ ctx->thread_registry->SetThreadName(thr->tid, name);
}
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
@@ -323,7 +336,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
thr->tid, (void*)pc, (void*)addr,
(int)size, is_write);
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
if (!IsAppMem(addr)) {
Printf("Access to non app mem %zx\n", addr);
DCHECK(IsAppMem(addr));
diff --git a/libsanitizer/tsan/tsan_stack_trace.cc b/libsanitizer/tsan/tsan_stack_trace.cc
new file mode 100644
index 00000000000..3734e0e4975
--- /dev/null
+++ b/libsanitizer/tsan/tsan_stack_trace.cc
@@ -0,0 +1,44 @@
+//===-- tsan_stack_trace.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+VarSizeStackTrace::VarSizeStackTrace()
+ : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
+
+VarSizeStackTrace::~VarSizeStackTrace() {
+ ResizeBuffer(0);
+}
+
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+ if (trace_buffer) {
+ internal_free(trace_buffer);
+ }
+ trace_buffer =
+ (new_size > 0)
+ ? (uptr *)internal_alloc(MBlockStackTrace,
+ new_size * sizeof(trace_buffer[0]))
+ : nullptr;
+ trace = trace_buffer;
+ size = new_size;
+}
+
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ ResizeBuffer(cnt + !!extra_top_pc);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+}
+
+} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_stack_trace.h b/libsanitizer/tsan/tsan_stack_trace.h
new file mode 100644
index 00000000000..b097a9b2696
--- /dev/null
+++ b/libsanitizer/tsan/tsan_stack_trace.h
@@ -0,0 +1,37 @@
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+ uptr *trace_buffer; // Owned.
+
+ VarSizeStackTrace();
+ ~VarSizeStackTrace();
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ private:
+ void ResizeBuffer(uptr new_size);
+
+ VarSizeStackTrace(const VarSizeStackTrace &);
+ void operator=(const VarSizeStackTrace &);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STACK_TRACE_H
diff --git a/libsanitizer/tsan/tsan_stat.cc b/libsanitizer/tsan/tsan_stat.cc
index 6bc345397ad..5eccca69312 100644
--- a/libsanitizer/tsan/tsan_stat.cc
+++ b/libsanitizer/tsan/tsan_stat.cc
@@ -13,17 +13,14 @@
namespace __tsan {
+#if TSAN_COLLECT_STATS
+
void StatAggregate(u64 *dst, u64 *src) {
- if (!kCollectStats)
- return;
for (int i = 0; i < StatCnt; i++)
dst[i] += src[i];
}
void StatOutput(u64 *stat) {
- if (!kCollectStats)
- return;
-
stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
static const char *name[StatCnt] = {};
@@ -35,6 +32,7 @@ void StatOutput(u64 *stat) {
name[StatMop4] = " size 4 ";
name[StatMop8] = " size 8 ";
name[StatMopSame] = " Including same ";
+ name[StatMopIgnored] = " Including ignored ";
name[StatMopRange] = " Including range ";
name[StatMopRodata] = " Including .rodata ";
name[StatMopRangeRodata] = " Including .rodata range ";
@@ -72,6 +70,28 @@ void StatOutput(u64 *stat) {
name[StatSyncAcquire] = " acquired ";
name[StatSyncRelease] = " released ";
+ name[StatClockAcquire] = "Clock acquire ";
+ name[StatClockAcquireEmpty] = " empty clock ";
+ name[StatClockAcquireFastRelease] = " fast from release-store ";
+ name[StatClockAcquireLarge] = " contains my tid ";
+ name[StatClockAcquireRepeat] = " repeated (fast) ";
+ name[StatClockAcquireFull] = " full (slow) ";
+ name[StatClockAcquiredSomething] = " acquired something ";
+ name[StatClockRelease] = "Clock release ";
+ name[StatClockReleaseResize] = " resize ";
+ name[StatClockReleaseFast1] = " fast1 ";
+ name[StatClockReleaseFast2] = " fast2 ";
+ name[StatClockReleaseSlow] = " dirty overflow (slow) ";
+ name[StatClockReleaseFull] = " full (slow) ";
+ name[StatClockReleaseAcquired] = " was acquired ";
+ name[StatClockReleaseClearTail] = " clear tail ";
+ name[StatClockStore] = "Clock release store ";
+ name[StatClockStoreResize] = " resize ";
+ name[StatClockStoreFast] = " fast ";
+ name[StatClockStoreFull] = " slow ";
+ name[StatClockStoreTail] = " clear tail ";
+ name[StatClockAcquireRelease] = "Clock acquire-release ";
+
name[StatAtomic] = "Atomic operations ";
name[StatAtomicLoad] = " Including load ";
name[StatAtomicStore] = " store ";
@@ -96,338 +116,6 @@ void StatOutput(u64 *stat) {
name[StatAtomic8] = " size 8 ";
name[StatAtomic16] = " size 16 ";
- name[StatInterceptor] = "Interceptors ";
- name[StatInt_longjmp] = " longjmp ";
- name[StatInt_siglongjmp] = " siglongjmp ";
- name[StatInt_malloc] = " malloc ";
- name[StatInt___libc_memalign] = " __libc_memalign ";
- name[StatInt_calloc] = " calloc ";
- name[StatInt_realloc] = " realloc ";
- name[StatInt_free] = " free ";
- name[StatInt_cfree] = " cfree ";
- name[StatInt_malloc_usable_size] = " malloc_usable_size ";
- name[StatInt_mmap] = " mmap ";
- name[StatInt_mmap64] = " mmap64 ";
- name[StatInt_munmap] = " munmap ";
- name[StatInt_memalign] = " memalign ";
- name[StatInt_valloc] = " valloc ";
- name[StatInt_pvalloc] = " pvalloc ";
- name[StatInt_posix_memalign] = " posix_memalign ";
- name[StatInt__Znwm] = " _Znwm ";
- name[StatInt__ZnwmRKSt9nothrow_t] = " _ZnwmRKSt9nothrow_t ";
- name[StatInt__Znam] = " _Znam ";
- name[StatInt__ZnamRKSt9nothrow_t] = " _ZnamRKSt9nothrow_t ";
- name[StatInt__ZdlPv] = " _ZdlPv ";
- name[StatInt__ZdlPvRKSt9nothrow_t] = " _ZdlPvRKSt9nothrow_t ";
- name[StatInt__ZdaPv] = " _ZdaPv ";
- name[StatInt__ZdaPvRKSt9nothrow_t] = " _ZdaPvRKSt9nothrow_t ";
- name[StatInt_strlen] = " strlen ";
- name[StatInt_memset] = " memset ";
- name[StatInt_memcpy] = " memcpy ";
- name[StatInt_textdomain] = " textdomain ";
- name[StatInt_strcmp] = " strcmp ";
- name[StatInt_memchr] = " memchr ";
- name[StatInt_memrchr] = " memrchr ";
- name[StatInt_memmove] = " memmove ";
- name[StatInt_memcmp] = " memcmp ";
- name[StatInt_strchr] = " strchr ";
- name[StatInt_strchrnul] = " strchrnul ";
- name[StatInt_strrchr] = " strrchr ";
- name[StatInt_strncmp] = " strncmp ";
- name[StatInt_strcpy] = " strcpy ";
- name[StatInt_strncpy] = " strncpy ";
- name[StatInt_strstr] = " strstr ";
- name[StatInt_strdup] = " strdup ";
- name[StatInt_strcasecmp] = " strcasecmp ";
- name[StatInt_strncasecmp] = " strncasecmp ";
- name[StatInt_atexit] = " atexit ";
- name[StatInt__exit] = " _exit ";
- name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire ";
- name[StatInt___cxa_guard_release] = " __cxa_guard_release ";
- name[StatInt___cxa_guard_abort] = " __cxa_guard_abort ";
- name[StatInt_pthread_create] = " pthread_create ";
- name[StatInt_pthread_join] = " pthread_join ";
- name[StatInt_pthread_detach] = " pthread_detach ";
- name[StatInt_pthread_mutex_init] = " pthread_mutex_init ";
- name[StatInt_pthread_mutex_destroy] = " pthread_mutex_destroy ";
- name[StatInt_pthread_mutex_lock] = " pthread_mutex_lock ";
- name[StatInt_pthread_mutex_trylock] = " pthread_mutex_trylock ";
- name[StatInt_pthread_mutex_timedlock] = " pthread_mutex_timedlock ";
- name[StatInt_pthread_mutex_unlock] = " pthread_mutex_unlock ";
- name[StatInt_pthread_spin_init] = " pthread_spin_init ";
- name[StatInt_pthread_spin_destroy] = " pthread_spin_destroy ";
- name[StatInt_pthread_spin_lock] = " pthread_spin_lock ";
- name[StatInt_pthread_spin_trylock] = " pthread_spin_trylock ";
- name[StatInt_pthread_spin_unlock] = " pthread_spin_unlock ";
- name[StatInt_pthread_rwlock_init] = " pthread_rwlock_init ";
- name[StatInt_pthread_rwlock_destroy] = " pthread_rwlock_destroy ";
- name[StatInt_pthread_rwlock_rdlock] = " pthread_rwlock_rdlock ";
- name[StatInt_pthread_rwlock_tryrdlock] = " pthread_rwlock_tryrdlock ";
- name[StatInt_pthread_rwlock_timedrdlock]
- = " pthread_rwlock_timedrdlock ";
- name[StatInt_pthread_rwlock_wrlock] = " pthread_rwlock_wrlock ";
- name[StatInt_pthread_rwlock_trywrlock] = " pthread_rwlock_trywrlock ";
- name[StatInt_pthread_rwlock_timedwrlock]
- = " pthread_rwlock_timedwrlock ";
- name[StatInt_pthread_rwlock_unlock] = " pthread_rwlock_unlock ";
- name[StatInt_pthread_cond_init] = " pthread_cond_init ";
- name[StatInt_pthread_cond_destroy] = " pthread_cond_destroy ";
- name[StatInt_pthread_cond_signal] = " pthread_cond_signal ";
- name[StatInt_pthread_cond_broadcast] = " pthread_cond_broadcast ";
- name[StatInt_pthread_cond_wait] = " pthread_cond_wait ";
- name[StatInt_pthread_cond_timedwait] = " pthread_cond_timedwait ";
- name[StatInt_pthread_barrier_init] = " pthread_barrier_init ";
- name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy ";
- name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait ";
- name[StatInt_pthread_once] = " pthread_once ";
- name[StatInt_pthread_getschedparam] = " pthread_getschedparam ";
- name[StatInt_pthread_setname_np] = " pthread_setname_np ";
- name[StatInt_sem_init] = " sem_init ";
- name[StatInt_sem_destroy] = " sem_destroy ";
- name[StatInt_sem_wait] = " sem_wait ";
- name[StatInt_sem_trywait] = " sem_trywait ";
- name[StatInt_sem_timedwait] = " sem_timedwait ";
- name[StatInt_sem_post] = " sem_post ";
- name[StatInt_sem_getvalue] = " sem_getvalue ";
- name[StatInt_stat] = " stat ";
- name[StatInt___xstat] = " __xstat ";
- name[StatInt_stat64] = " stat64 ";
- name[StatInt___xstat64] = " __xstat64 ";
- name[StatInt_lstat] = " lstat ";
- name[StatInt___lxstat] = " __lxstat ";
- name[StatInt_lstat64] = " lstat64 ";
- name[StatInt___lxstat64] = " __lxstat64 ";
- name[StatInt_fstat] = " fstat ";
- name[StatInt___fxstat] = " __fxstat ";
- name[StatInt_fstat64] = " fstat64 ";
- name[StatInt___fxstat64] = " __fxstat64 ";
- name[StatInt_open] = " open ";
- name[StatInt_open64] = " open64 ";
- name[StatInt_creat] = " creat ";
- name[StatInt_creat64] = " creat64 ";
- name[StatInt_dup] = " dup ";
- name[StatInt_dup2] = " dup2 ";
- name[StatInt_dup3] = " dup3 ";
- name[StatInt_eventfd] = " eventfd ";
- name[StatInt_signalfd] = " signalfd ";
- name[StatInt_inotify_init] = " inotify_init ";
- name[StatInt_inotify_init1] = " inotify_init1 ";
- name[StatInt_socket] = " socket ";
- name[StatInt_socketpair] = " socketpair ";
- name[StatInt_connect] = " connect ";
- name[StatInt_bind] = " bind ";
- name[StatInt_listen] = " listen ";
- name[StatInt_accept] = " accept ";
- name[StatInt_accept4] = " accept4 ";
- name[StatInt_epoll_create] = " epoll_create ";
- name[StatInt_epoll_create1] = " epoll_create1 ";
- name[StatInt_close] = " close ";
- name[StatInt___close] = " __close ";
- name[StatInt___res_iclose] = " __res_iclose ";
- name[StatInt_pipe] = " pipe ";
- name[StatInt_pipe2] = " pipe2 ";
- name[StatInt_read] = " read ";
- name[StatInt_prctl] = " prctl ";
- name[StatInt_pread] = " pread ";
- name[StatInt_pread64] = " pread64 ";
- name[StatInt_readv] = " readv ";
- name[StatInt_preadv] = " preadv ";
- name[StatInt_preadv64] = " preadv64 ";
- name[StatInt_write] = " write ";
- name[StatInt_pwrite] = " pwrite ";
- name[StatInt_pwrite64] = " pwrite64 ";
- name[StatInt_writev] = " writev ";
- name[StatInt_pwritev] = " pwritev ";
- name[StatInt_pwritev64] = " pwritev64 ";
- name[StatInt_send] = " send ";
- name[StatInt_sendmsg] = " sendmsg ";
- name[StatInt_recv] = " recv ";
- name[StatInt_recvmsg] = " recvmsg ";
- name[StatInt_unlink] = " unlink ";
- name[StatInt_fopen] = " fopen ";
- name[StatInt_freopen] = " freopen ";
- name[StatInt_fclose] = " fclose ";
- name[StatInt_fread] = " fread ";
- name[StatInt_fwrite] = " fwrite ";
- name[StatInt_fflush] = " fflush ";
- name[StatInt_abort] = " abort ";
- name[StatInt_puts] = " puts ";
- name[StatInt_rmdir] = " rmdir ";
- name[StatInt_opendir] = " opendir ";
- name[StatInt_epoll_ctl] = " epoll_ctl ";
- name[StatInt_epoll_wait] = " epoll_wait ";
- name[StatInt_poll] = " poll ";
- name[StatInt_ppoll] = " ppoll ";
- name[StatInt_sigaction] = " sigaction ";
- name[StatInt_signal] = " signal ";
- name[StatInt_sigsuspend] = " sigsuspend ";
- name[StatInt_raise] = " raise ";
- name[StatInt_kill] = " kill ";
- name[StatInt_pthread_kill] = " pthread_kill ";
- name[StatInt_sleep] = " sleep ";
- name[StatInt_usleep] = " usleep ";
- name[StatInt_nanosleep] = " nanosleep ";
- name[StatInt_gettimeofday] = " gettimeofday ";
- name[StatInt_fork] = " fork ";
- name[StatInt_vscanf] = " vscanf ";
- name[StatInt_vsscanf] = " vsscanf ";
- name[StatInt_vfscanf] = " vfscanf ";
- name[StatInt_scanf] = " scanf ";
- name[StatInt_sscanf] = " sscanf ";
- name[StatInt_fscanf] = " fscanf ";
- name[StatInt___isoc99_vscanf] = " vscanf ";
- name[StatInt___isoc99_vsscanf] = " vsscanf ";
- name[StatInt___isoc99_vfscanf] = " vfscanf ";
- name[StatInt___isoc99_scanf] = " scanf ";
- name[StatInt___isoc99_sscanf] = " sscanf ";
- name[StatInt___isoc99_fscanf] = " fscanf ";
- name[StatInt_on_exit] = " on_exit ";
- name[StatInt___cxa_atexit] = " __cxa_atexit ";
- name[StatInt_localtime] = " localtime ";
- name[StatInt_localtime_r] = " localtime_r ";
- name[StatInt_gmtime] = " gmtime ";
- name[StatInt_gmtime_r] = " gmtime_r ";
- name[StatInt_ctime] = " ctime ";
- name[StatInt_ctime_r] = " ctime_r ";
- name[StatInt_asctime] = " asctime ";
- name[StatInt_asctime_r] = " asctime_r ";
- name[StatInt_strptime] = " strptime ";
- name[StatInt_frexp] = " frexp ";
- name[StatInt_frexpf] = " frexpf ";
- name[StatInt_frexpl] = " frexpl ";
- name[StatInt_getpwnam] = " getpwnam ";
- name[StatInt_getpwuid] = " getpwuid ";
- name[StatInt_getgrnam] = " getgrnam ";
- name[StatInt_getgrgid] = " getgrgid ";
- name[StatInt_getpwnam_r] = " getpwnam_r ";
- name[StatInt_getpwuid_r] = " getpwuid_r ";
- name[StatInt_getgrnam_r] = " getgrnam_r ";
- name[StatInt_getgrgid_r] = " getgrgid_r ";
- name[StatInt_clock_getres] = " clock_getres ";
- name[StatInt_clock_gettime] = " clock_gettime ";
- name[StatInt_clock_settime] = " clock_settime ";
- name[StatInt_getitimer] = " getitimer ";
- name[StatInt_setitimer] = " setitimer ";
- name[StatInt_time] = " time ";
- name[StatInt_glob] = " glob ";
- name[StatInt_glob64] = " glob64 ";
- name[StatInt_wait] = " wait ";
- name[StatInt_waitid] = " waitid ";
- name[StatInt_waitpid] = " waitpid ";
- name[StatInt_wait3] = " wait3 ";
- name[StatInt_wait4] = " wait4 ";
- name[StatInt_inet_ntop] = " inet_ntop ";
- name[StatInt_inet_pton] = " inet_pton ";
- name[StatInt_inet_aton] = " inet_aton ";
- name[StatInt_getaddrinfo] = " getaddrinfo ";
- name[StatInt_getnameinfo] = " getnameinfo ";
- name[StatInt_getsockname] = " getsockname ";
- name[StatInt_gethostent] = " gethostent ";
- name[StatInt_gethostbyname] = " gethostbyname ";
- name[StatInt_gethostbyname2] = " gethostbyname2 ";
- name[StatInt_gethostbyaddr] = " gethostbyaddr ";
- name[StatInt_gethostent_r] = " gethostent_r ";
- name[StatInt_gethostbyname_r] = " gethostbyname_r ";
- name[StatInt_gethostbyname2_r] = " gethostbyname2_r ";
- name[StatInt_gethostbyaddr_r] = " gethostbyaddr_r ";
- name[StatInt_getsockopt] = " getsockopt ";
- name[StatInt_modf] = " modf ";
- name[StatInt_modff] = " modff ";
- name[StatInt_modfl] = " modfl ";
- name[StatInt_getpeername] = " getpeername ";
- name[StatInt_ioctl] = " ioctl ";
- name[StatInt_sysinfo] = " sysinfo ";
- name[StatInt_readdir] = " readdir ";
- name[StatInt_readdir64] = " readdir64 ";
- name[StatInt_readdir_r] = " readdir_r ";
- name[StatInt_readdir64_r] = " readdir64_r ";
- name[StatInt_ptrace] = " ptrace ";
- name[StatInt_setlocale] = " setlocale ";
- name[StatInt_getcwd] = " getcwd ";
- name[StatInt_get_current_dir_name] = " get_current_dir_name ";
- name[StatInt_strtoimax] = " strtoimax ";
- name[StatInt_strtoumax] = " strtoumax ";
- name[StatInt_mbstowcs] = " mbstowcs ";
- name[StatInt_mbsrtowcs] = " mbsrtowcs ";
- name[StatInt_mbsnrtowcs] = " mbsnrtowcs ";
- name[StatInt_wcstombs] = " wcstombs ";
- name[StatInt_wcsrtombs] = " wcsrtombs ";
- name[StatInt_wcsnrtombs] = " wcsnrtombs ";
- name[StatInt_tcgetattr] = " tcgetattr ";
- name[StatInt_realpath] = " realpath ";
- name[StatInt_canonicalize_file_name] = " canonicalize_file_name ";
- name[StatInt_confstr] = " confstr ";
- name[StatInt_sched_getaffinity] = " sched_getaffinity ";
- name[StatInt_strerror] = " strerror ";
- name[StatInt_strerror_r] = " strerror_r ";
- name[StatInt___xpg_strerror_r] = " __xpg_strerror_r ";
- name[StatInt_scandir] = " scandir ";
- name[StatInt_scandir64] = " scandir64 ";
- name[StatInt_getgroups] = " getgroups ";
- name[StatInt_wordexp] = " wordexp ";
- name[StatInt_sigwait] = " sigwait ";
- name[StatInt_sigwaitinfo] = " sigwaitinfo ";
- name[StatInt_sigtimedwait] = " sigtimedwait ";
- name[StatInt_sigemptyset] = " sigemptyset ";
- name[StatInt_sigfillset] = " sigfillset ";
- name[StatInt_sigpending] = " sigpending ";
- name[StatInt_sigprocmask] = " sigprocmask ";
- name[StatInt_backtrace] = " backtrace ";
- name[StatInt_backtrace_symbols] = " backtrace_symbols ";
- name[StatInt_dlopen] = " dlopen ";
- name[StatInt_dlclose] = " dlclose ";
- name[StatInt_getmntent] = " getmntent ";
- name[StatInt_getmntent_r] = " getmntent_r ";
- name[StatInt_statfs] = " statfs ";
- name[StatInt_statfs64] = " statfs64 ";
- name[StatInt_fstatfs] = " fstatfs ";
- name[StatInt_fstatfs64] = " fstatfs64 ";
- name[StatInt_statvfs] = " statvfs ";
- name[StatInt_statvfs64] = " statvfs64 ";
- name[StatInt_fstatvfs] = " fstatvfs ";
- name[StatInt_fstatvfs64] = " fstatvfs64 ";
- name[StatInt_initgroups] = " initgroups ";
- name[StatInt_ether_ntoa] = " ether_ntoa ";
- name[StatInt_ether_aton] = " ether_aton ";
- name[StatInt_ether_ntoa_r] = " ether_ntoa_r ";
- name[StatInt_ether_aton_r] = " ether_aton_r ";
- name[StatInt_ether_ntohost] = " ether_ntohost ";
- name[StatInt_ether_hostton] = " ether_hostton ";
- name[StatInt_ether_line] = " ether_line ";
- name[StatInt_shmctl] = " shmctl ";
- name[StatInt_random_r] = " random_r ";
- name[StatInt_tmpnam] = " tmpnam ";
- name[StatInt_tmpnam_r] = " tmpnam_r ";
- name[StatInt_tempnam] = " tempnam ";
- name[StatInt_sincos] = " sincos ";
- name[StatInt_sincosf] = " sincosf ";
- name[StatInt_sincosl] = " sincosl ";
- name[StatInt_remquo] = " remquo ";
- name[StatInt_remquof] = " remquof ";
- name[StatInt_remquol] = " remquol ";
- name[StatInt_lgamma] = " lgamma ";
- name[StatInt_lgammaf] = " lgammaf ";
- name[StatInt_lgammal] = " lgammal ";
- name[StatInt_lgamma_r] = " lgamma_r ";
- name[StatInt_lgammaf_r] = " lgammaf_r ";
- name[StatInt_lgammal_r] = " lgammal_r ";
- name[StatInt_drand48_r] = " drand48_r ";
- name[StatInt_lrand48_r] = " lrand48_r ";
- name[StatInt_getline] = " getline ";
- name[StatInt_getdelim] = " getdelim ";
- name[StatInt_iconv] = " iconv ";
- name[StatInt_times] = " times ";
-
- name[StatInt_pthread_attr_getdetachstate] = " pthread_addr_getdetachstate "; // NOLINT
- name[StatInt_pthread_attr_getguardsize] = " pthread_addr_getguardsize "; // NOLINT
- name[StatInt_pthread_attr_getschedparam] = " pthread_addr_getschedparam "; // NOLINT
- name[StatInt_pthread_attr_getschedpolicy] = " pthread_addr_getschedpolicy "; // NOLINT
- name[StatInt_pthread_attr_getinheritsched] = " pthread_addr_getinheritsched "; // NOLINT
- name[StatInt_pthread_attr_getscope] = " pthread_addr_getscope "; // NOLINT
- name[StatInt_pthread_attr_getstacksize] = " pthread_addr_getstacksize "; // NOLINT
- name[StatInt_pthread_attr_getstack] = " pthread_addr_getstack "; // NOLINT
- name[StatInt_pthread_attr_getaffinity_np] = " pthread_addr_getaffinity_np "; // NOLINT
-
name[StatAnnotation] = "Dynamic annotations ";
name[StatAnnotateHappensBefore] = " HappensBefore ";
name[StatAnnotateHappensAfter] = " HappensAfter ";
@@ -474,12 +162,16 @@ void StatOutput(u64 *stat) {
name[StatMtxAtExit] = " Atexit ";
name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock ";
- name[StatMtxJavaMBlock] = " JavaMBlock ";
+ name[StatMtxDeadlockDetector] = " DeadlockDetector ";
+ name[StatMtxFired] = " FiredSuppressions ";
+ name[StatMtxRacy] = " RacyStacks ";
name[StatMtxFD] = " FD ";
Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++)
- Printf("%s: %zu\n", name[i], (uptr)stat[i]);
+ Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
}
+#endif
+
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h
index 3e08313d1a5..002570f42f1 100644
--- a/libsanitizer/tsan/tsan_stat.h
+++ b/libsanitizer/tsan/tsan_stat.h
@@ -24,6 +24,7 @@ enum StatType {
StatMop4,
StatMop8,
StatMopSame,
+ StatMopIgnored,
StatMopRange,
StatMopRodata,
StatMopRangeRodata,
@@ -67,6 +68,32 @@ enum StatType {
StatSyncAcquire,
StatSyncRelease,
+ // Clocks - acquire.
+ StatClockAcquire,
+ StatClockAcquireEmpty,
+ StatClockAcquireFastRelease,
+ StatClockAcquireLarge,
+ StatClockAcquireRepeat,
+ StatClockAcquireFull,
+ StatClockAcquiredSomething,
+ // Clocks - release.
+ StatClockRelease,
+ StatClockReleaseResize,
+ StatClockReleaseFast1,
+ StatClockReleaseFast2,
+ StatClockReleaseSlow,
+ StatClockReleaseFull,
+ StatClockReleaseAcquired,
+ StatClockReleaseClearTail,
+ // Clocks - release store.
+ StatClockStore,
+ StatClockStoreResize,
+ StatClockStoreFast,
+ StatClockStoreFull,
+ StatClockStoreTail,
+ // Clocks - acquire-release.
+ StatClockAcquireRelease,
+
// Atomics.
StatAtomic,
StatAtomicLoad,
@@ -92,337 +119,6 @@ enum StatType {
StatAtomic8,
StatAtomic16,
- // Interceptors.
- StatInterceptor,
- StatInt_longjmp,
- StatInt_siglongjmp,
- StatInt_malloc,
- StatInt___libc_memalign,
- StatInt_calloc,
- StatInt_realloc,
- StatInt_free,
- StatInt_cfree,
- StatInt_malloc_usable_size,
- StatInt_mmap,
- StatInt_mmap64,
- StatInt_munmap,
- StatInt_memalign,
- StatInt_valloc,
- StatInt_pvalloc,
- StatInt_posix_memalign,
- StatInt__Znwm,
- StatInt__ZnwmRKSt9nothrow_t,
- StatInt__Znam,
- StatInt__ZnamRKSt9nothrow_t,
- StatInt__ZdlPv,
- StatInt__ZdlPvRKSt9nothrow_t,
- StatInt__ZdaPv,
- StatInt__ZdaPvRKSt9nothrow_t,
- StatInt_strlen,
- StatInt_memset,
- StatInt_memcpy,
- StatInt_textdomain,
- StatInt_strcmp,
- StatInt_memchr,
- StatInt_memrchr,
- StatInt_memmove,
- StatInt_memcmp,
- StatInt_strchr,
- StatInt_strchrnul,
- StatInt_strrchr,
- StatInt_strncmp,
- StatInt_strcpy,
- StatInt_strncpy,
- StatInt_strcasecmp,
- StatInt_strncasecmp,
- StatInt_strstr,
- StatInt_strdup,
- StatInt_atexit,
- StatInt__exit,
- StatInt___cxa_guard_acquire,
- StatInt___cxa_guard_release,
- StatInt___cxa_guard_abort,
- StatInt_pthread_create,
- StatInt_pthread_join,
- StatInt_pthread_detach,
- StatInt_pthread_mutex_init,
- StatInt_pthread_mutex_destroy,
- StatInt_pthread_mutex_lock,
- StatInt_pthread_mutex_trylock,
- StatInt_pthread_mutex_timedlock,
- StatInt_pthread_mutex_unlock,
- StatInt_pthread_spin_init,
- StatInt_pthread_spin_destroy,
- StatInt_pthread_spin_lock,
- StatInt_pthread_spin_trylock,
- StatInt_pthread_spin_unlock,
- StatInt_pthread_rwlock_init,
- StatInt_pthread_rwlock_destroy,
- StatInt_pthread_rwlock_rdlock,
- StatInt_pthread_rwlock_tryrdlock,
- StatInt_pthread_rwlock_timedrdlock,
- StatInt_pthread_rwlock_wrlock,
- StatInt_pthread_rwlock_trywrlock,
- StatInt_pthread_rwlock_timedwrlock,
- StatInt_pthread_rwlock_unlock,
- StatInt_pthread_cond_init,
- StatInt_pthread_cond_destroy,
- StatInt_pthread_cond_signal,
- StatInt_pthread_cond_broadcast,
- StatInt_pthread_cond_wait,
- StatInt_pthread_cond_timedwait,
- StatInt_pthread_barrier_init,
- StatInt_pthread_barrier_destroy,
- StatInt_pthread_barrier_wait,
- StatInt_pthread_once,
- StatInt_pthread_getschedparam,
- StatInt_pthread_setname_np,
- StatInt_sem_init,
- StatInt_sem_destroy,
- StatInt_sem_wait,
- StatInt_sem_trywait,
- StatInt_sem_timedwait,
- StatInt_sem_post,
- StatInt_sem_getvalue,
- StatInt_stat,
- StatInt___xstat,
- StatInt_stat64,
- StatInt___xstat64,
- StatInt_lstat,
- StatInt___lxstat,
- StatInt_lstat64,
- StatInt___lxstat64,
- StatInt_fstat,
- StatInt___fxstat,
- StatInt_fstat64,
- StatInt___fxstat64,
- StatInt_open,
- StatInt_open64,
- StatInt_creat,
- StatInt_creat64,
- StatInt_dup,
- StatInt_dup2,
- StatInt_dup3,
- StatInt_eventfd,
- StatInt_signalfd,
- StatInt_inotify_init,
- StatInt_inotify_init1,
- StatInt_socket,
- StatInt_socketpair,
- StatInt_connect,
- StatInt_bind,
- StatInt_listen,
- StatInt_accept,
- StatInt_accept4,
- StatInt_epoll_create,
- StatInt_epoll_create1,
- StatInt_close,
- StatInt___close,
- StatInt___res_iclose,
- StatInt_pipe,
- StatInt_pipe2,
- StatInt_read,
- StatInt_prctl,
- StatInt_pread,
- StatInt_pread64,
- StatInt_readv,
- StatInt_preadv,
- StatInt_preadv64,
- StatInt_write,
- StatInt_pwrite,
- StatInt_pwrite64,
- StatInt_writev,
- StatInt_pwritev,
- StatInt_pwritev64,
- StatInt_send,
- StatInt_sendmsg,
- StatInt_recv,
- StatInt_recvmsg,
- StatInt_unlink,
- StatInt_fopen,
- StatInt_freopen,
- StatInt_fclose,
- StatInt_fread,
- StatInt_fwrite,
- StatInt_fflush,
- StatInt_abort,
- StatInt_puts,
- StatInt_rmdir,
- StatInt_opendir,
- StatInt_epoll_ctl,
- StatInt_epoll_wait,
- StatInt_poll,
- StatInt_ppoll,
- StatInt_sigaction,
- StatInt_signal,
- StatInt_sigsuspend,
- StatInt_raise,
- StatInt_kill,
- StatInt_pthread_kill,
- StatInt_sleep,
- StatInt_usleep,
- StatInt_nanosleep,
- StatInt_gettimeofday,
- StatInt_fork,
- StatInt_vscanf,
- StatInt_vsscanf,
- StatInt_vfscanf,
- StatInt_scanf,
- StatInt_sscanf,
- StatInt_fscanf,
- StatInt___isoc99_vscanf,
- StatInt___isoc99_vsscanf,
- StatInt___isoc99_vfscanf,
- StatInt___isoc99_scanf,
- StatInt___isoc99_sscanf,
- StatInt___isoc99_fscanf,
- StatInt_on_exit,
- StatInt___cxa_atexit,
- StatInt_localtime,
- StatInt_localtime_r,
- StatInt_gmtime,
- StatInt_gmtime_r,
- StatInt_ctime,
- StatInt_ctime_r,
- StatInt_asctime,
- StatInt_asctime_r,
- StatInt_strptime,
- StatInt_frexp,
- StatInt_frexpf,
- StatInt_frexpl,
- StatInt_getpwnam,
- StatInt_getpwuid,
- StatInt_getgrnam,
- StatInt_getgrgid,
- StatInt_getpwnam_r,
- StatInt_getpwuid_r,
- StatInt_getgrnam_r,
- StatInt_getgrgid_r,
- StatInt_clock_getres,
- StatInt_clock_gettime,
- StatInt_clock_settime,
- StatInt_getitimer,
- StatInt_setitimer,
- StatInt_time,
- StatInt_glob,
- StatInt_glob64,
- StatInt_wait,
- StatInt_waitid,
- StatInt_waitpid,
- StatInt_wait3,
- StatInt_wait4,
- StatInt_inet_ntop,
- StatInt_inet_pton,
- StatInt_inet_aton,
- StatInt_getaddrinfo,
- StatInt_getnameinfo,
- StatInt_getsockname,
- StatInt_gethostent,
- StatInt_gethostbyname,
- StatInt_gethostbyname2,
- StatInt_gethostbyaddr,
- StatInt_gethostent_r,
- StatInt_gethostbyname_r,
- StatInt_gethostbyname2_r,
- StatInt_gethostbyaddr_r,
- StatInt_getsockopt,
- StatInt_modf,
- StatInt_modff,
- StatInt_modfl,
- StatInt_getpeername,
- StatInt_ioctl,
- StatInt_sysinfo,
- StatInt_readdir,
- StatInt_readdir64,
- StatInt_readdir_r,
- StatInt_readdir64_r,
- StatInt_ptrace,
- StatInt_setlocale,
- StatInt_getcwd,
- StatInt_get_current_dir_name,
- StatInt_strtoimax,
- StatInt_strtoumax,
- StatInt_mbstowcs,
- StatInt_mbsrtowcs,
- StatInt_mbsnrtowcs,
- StatInt_wcstombs,
- StatInt_wcsrtombs,
- StatInt_wcsnrtombs,
- StatInt_tcgetattr,
- StatInt_realpath,
- StatInt_canonicalize_file_name,
- StatInt_confstr,
- StatInt_sched_getaffinity,
- StatInt_strerror,
- StatInt_strerror_r,
- StatInt___xpg_strerror_r,
- StatInt_scandir,
- StatInt_scandir64,
- StatInt_getgroups,
- StatInt_wordexp,
- StatInt_sigwait,
- StatInt_sigwaitinfo,
- StatInt_sigtimedwait,
- StatInt_sigemptyset,
- StatInt_sigfillset,
- StatInt_sigpending,
- StatInt_sigprocmask,
- StatInt_backtrace,
- StatInt_backtrace_symbols,
- StatInt_dlopen,
- StatInt_dlclose,
- StatInt_getmntent,
- StatInt_getmntent_r,
- StatInt_statfs,
- StatInt_statfs64,
- StatInt_fstatfs,
- StatInt_fstatfs64,
- StatInt_statvfs,
- StatInt_statvfs64,
- StatInt_fstatvfs,
- StatInt_fstatvfs64,
- StatInt_initgroups,
- StatInt_ether_ntoa,
- StatInt_ether_aton,
- StatInt_ether_ntoa_r,
- StatInt_ether_aton_r,
- StatInt_ether_ntohost,
- StatInt_ether_hostton,
- StatInt_ether_line,
- StatInt_shmctl,
- StatInt_random_r,
- StatInt_tmpnam,
- StatInt_tmpnam_r,
- StatInt_tempnam,
- StatInt_sincos,
- StatInt_sincosf,
- StatInt_sincosl,
- StatInt_remquo,
- StatInt_remquof,
- StatInt_remquol,
- StatInt_lgamma,
- StatInt_lgammaf,
- StatInt_lgammal,
- StatInt_lgamma_r,
- StatInt_lgammaf_r,
- StatInt_lgammal_r,
- StatInt_drand48_r,
- StatInt_lrand48_r,
- StatInt_getline,
- StatInt_getdelim,
- StatInt_iconv,
- StatInt_times,
-
- StatInt_pthread_attr_getdetachstate,
- StatInt_pthread_attr_getguardsize,
- StatInt_pthread_attr_getschedparam,
- StatInt_pthread_attr_getschedpolicy,
- StatInt_pthread_attr_getinheritsched,
- StatInt_pthread_attr_getscope,
- StatInt_pthread_attr_getstacksize,
- StatInt_pthread_attr_getstack,
- StatInt_pthread_attr_getaffinity_np,
-
// Dynamic annotations.
StatAnnotation,
StatAnnotateHappensBefore,
@@ -471,7 +167,9 @@ enum StatType {
StatMtxAnnotations,
StatMtxAtExit,
StatMtxMBlock,
- StatMtxJavaMBlock,
+ StatMtxDeadlockDetector,
+ StatMtxFired,
+ StatMtxRacy,
StatMtxFD,
// This must be the last.
diff --git a/libsanitizer/tsan/tsan_suppressions.cc b/libsanitizer/tsan/tsan_suppressions.cc
index fa0c30dc28c..5c8d03dddd2 100644
--- a/libsanitizer/tsan/tsan_suppressions.cc
+++ b/libsanitizer/tsan/tsan_suppressions.cc
@@ -19,6 +19,7 @@
#include "tsan_mman.h"
#include "tsan_platform.h"
+#ifndef SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
static const char *const std_suppressions =
// Libstdc++ 4.4 has data races in std::string.
@@ -31,7 +32,6 @@ static const char *const std_suppressions =
"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
// Can be overriden in frontend.
-#ifndef TSAN_GO
extern "C" const char *WEAK __tsan_default_suppressions() {
return 0;
}
@@ -39,130 +39,125 @@ extern "C" const char *WEAK __tsan_default_suppressions() {
namespace __tsan {
-static SuppressionContext* g_ctx;
-
-static char *ReadFile(const char *filename) {
- if (filename == 0 || filename[0] == 0)
- return 0;
- InternalScopedBuffer<char> tmp(4*1024);
- if (filename[0] == '/' || GetPwd() == 0)
- internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
- else
- internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
- uptr openrv = OpenFile(tmp.data(), false);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- fd_t fd = openrv;
- const uptr fsize = internal_filesize(fd);
- if (fsize == (uptr)-1) {
- Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- char *buf = (char*)internal_alloc(MBlockSuppression, fsize + 1);
- if (fsize != internal_read(fd, buf, fsize)) {
- Printf("ThreadSanitizer: failed to read suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- internal_close(fd);
- buf[fsize] = 0;
- return buf;
-}
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char *kSuppressionTypes[] = {
+ kSuppressionRace, kSuppressionRaceTop, kSuppressionMutex,
+ kSuppressionThread, kSuppressionSignal, kSuppressionLib,
+ kSuppressionDeadlock};
void InitializeSuppressions() {
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- g_ctx = new(placeholder_) SuppressionContext;
- const char *supp = ReadFile(flags()->suppressions);
- g_ctx->Parse(supp);
-#ifndef TSAN_GO
- supp = __tsan_default_suppressions();
- g_ctx->Parse(supp);
- g_ctx->Parse(std_suppressions);
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+#ifndef SANITIZER_GO
+ suppression_ctx->Parse(__tsan_default_suppressions());
+ suppression_ctx->Parse(std_suppressions);
#endif
}
-SuppressionContext *GetSuppressionContext() {
- CHECK_NE(g_ctx, 0);
- return g_ctx;
+SuppressionContext *Suppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
}
-SuppressionType conv(ReportType typ) {
+static const char *conv(ReportType typ) {
if (typ == ReportTypeRace)
- return SuppressionRace;
+ return kSuppressionRace;
else if (typ == ReportTypeVptrRace)
- return SuppressionRace;
+ return kSuppressionRace;
else if (typ == ReportTypeUseAfterFree)
- return SuppressionRace;
+ return kSuppressionRace;
+ else if (typ == ReportTypeVptrUseAfterFree)
+ return kSuppressionRace;
else if (typ == ReportTypeThreadLeak)
- return SuppressionThread;
+ return kSuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
- return SuppressionMutex;
+ return kSuppressionMutex;
+ else if (typ == ReportTypeMutexDoubleLock)
+ return kSuppressionMutex;
+ else if (typ == ReportTypeMutexBadUnlock)
+ return kSuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadLock)
+ return kSuppressionMutex;
+ else if (typ == ReportTypeMutexBadReadUnlock)
+ return kSuppressionMutex;
else if (typ == ReportTypeSignalUnsafe)
- return SuppressionSignal;
+ return kSuppressionSignal;
else if (typ == ReportTypeErrnoInSignal)
- return SuppressionNone;
+ return kSuppressionNone;
+ else if (typ == ReportTypeDeadlock)
+ return kSuppressionDeadlock;
Printf("ThreadSanitizer: unknown report type %d\n", typ),
Die();
}
+static uptr IsSuppressed(const char *stype, const AddressInfo &info,
+ Suppression **sp) {
+ if (suppression_ctx->Match(info.function, stype, sp) ||
+ suppression_ctx->Match(info.file, stype, sp) ||
+ suppression_ctx->Match(info.module, stype, sp)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ);
+ atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed);
+ return info.address;
+ }
+ return 0;
+}
+
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || stack == 0) return 0;
- SuppressionType stype = conv(typ);
- if (stype == SuppressionNone)
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || stack == 0 ||
+ !stack->suppressable)
return 0;
- Suppression *s;
- for (const ReportStack *frame = stack; frame; frame = frame->next) {
- if (g_ctx->Match(frame->func, stype, &s) ||
- g_ctx->Match(frame->file, stype, &s) ||
- g_ctx->Match(frame->module, stype, &s)) {
- DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
- s->hit_count++;
- *sp = s;
- return frame->pc;
- }
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
+ return 0;
+ for (const SymbolizedStack *frame = stack->frames; frame;
+ frame = frame->next) {
+ uptr pc = IsSuppressed(stype, frame->info, sp);
+ if (pc != 0)
+ return pc;
}
+ if (0 == internal_strcmp(stype, kSuppressionRace) && stack->frames != nullptr)
+ return IsSuppressed(kSuppressionRaceTop, stack->frames->info, sp);
return 0;
}
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || loc == 0 ||
- loc->type != ReportLocationGlobal)
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal || !loc->suppressable)
return 0;
- SuppressionType stype = conv(typ);
- if (stype == SuppressionNone)
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
return 0;
Suppression *s;
- if (g_ctx->Match(loc->name, stype, &s) ||
- g_ctx->Match(loc->file, stype, &s) ||
- g_ctx->Match(loc->module, stype, &s)) {
- DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
- s->hit_count++;
+ const DataInfo &global = loc->global;
+ if (suppression_ctx->Match(global.name, stype, &s) ||
+ suppression_ctx->Match(global.module, stype, &s)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ);
+ atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed);
*sp = s;
- return loc->addr;
+ return global.start;
}
return 0;
}
void PrintMatchedSuppressions() {
- CHECK(g_ctx);
InternalMmapVector<Suppression *> matched(1);
- g_ctx->GetMatched(&matched);
+ CHECK(suppression_ctx);
+ suppression_ctx->GetMatched(&matched);
if (!matched.size())
return;
int hit_count = 0;
for (uptr i = 0; i < matched.size(); i++)
- hit_count += matched[i]->hit_count;
+ hit_count += atomic_load_relaxed(&matched[i]->hit_count);
Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
(int)internal_getpid());
for (uptr i = 0; i < matched.size(); i++) {
- Printf("%d %s:%s\n", matched[i]->hit_count,
- SuppressionTypeString(matched[i]->type), matched[i]->templ);
+ Printf("%d %s:%s\n", matched[i]->hit_count, matched[i]->type,
+ matched[i]->templ);
}
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_suppressions.h b/libsanitizer/tsan/tsan_suppressions.h
index 2939e9a8b9f..58951975075 100644
--- a/libsanitizer/tsan/tsan_suppressions.h
+++ b/libsanitizer/tsan/tsan_suppressions.h
@@ -16,11 +16,20 @@
namespace __tsan {
+const char kSuppressionNone[] = "none";
+const char kSuppressionRace[] = "race";
+const char kSuppressionRaceTop[] = "race_top";
+const char kSuppressionMutex[] = "mutex";
+const char kSuppressionThread[] = "thread";
+const char kSuppressionSignal[] = "signal";
+const char kSuppressionLib[] = "called_from_lib";
+const char kSuppressionDeadlock[] = "deadlock";
+
void InitializeSuppressions();
+SuppressionContext *Suppressions();
void PrintMatchedSuppressions();
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
-SuppressionContext *GetSuppressionContext();
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize.cc b/libsanitizer/tsan/tsan_symbolize.cc
index c0e794be2d2..0e54562e385 100644
--- a/libsanitizer/tsan/tsan_symbolize.cc
+++ b/libsanitizer/tsan/tsan_symbolize.cc
@@ -24,66 +24,26 @@ void EnterSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(!thr->in_symbolizer);
thr->in_symbolizer = true;
+ thr->ignore_interceptors++;
}
void ExitSymbolizer() {
ThreadState *thr = cur_thread();
CHECK(thr->in_symbolizer);
thr->in_symbolizer = false;
+ thr->ignore_interceptors--;
}
-ReportStack *NewReportStackEntry(uptr addr) {
- ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack,
- sizeof(ReportStack));
- internal_memset(ent, 0, sizeof(*ent));
- ent->pc = addr;
- return ent;
-}
-
-static ReportStack *NewReportStackEntry(const AddressInfo &info) {
- ReportStack *ent = NewReportStackEntry(info.address);
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.function)
- ent->func = internal_strdup(info.function);
- if (info.file)
- ent->file = internal_strdup(info.file);
- ent->line = info.line;
- ent->col = info.column;
- return ent;
-}
-
-
- ReportStack *next;
- char *module;
- uptr offset;
- uptr pc;
- char *func;
- char *file;
- int line;
- int col;
-
-
-// Denotes fake PC values that come from JIT/JAVA/etc.
-// For such PC values __tsan_symbolize_external() will be called.
-const uptr kExternalPCBit = 1ULL << 60;
-
// May be overriden by JIT/JAVA/etc,
// whatever produces PCs marked with kExternalPCBit.
-extern "C" bool __tsan_symbolize_external(uptr pc,
- char *func_buf, uptr func_siz,
- char *file_buf, uptr file_siz,
- int *line, int *col)
- SANITIZER_WEAK_ATTRIBUTE;
-
-bool __tsan_symbolize_external(uptr pc,
- char *func_buf, uptr func_siz,
- char *file_buf, uptr file_siz,
- int *line, int *col) {
+extern "C" bool WEAK __tsan_symbolize_external(uptr pc,
+ char *func_buf, uptr func_siz,
+ char *file_buf, uptr file_siz,
+ int *line, int *col) {
return false;
}
-ReportStack *SymbolizeCode(uptr addr) {
+SymbolizedStack *SymbolizeCode(uptr addr) {
// Check if PC comes from non-native land.
if (addr & kExternalPCBit) {
// Declare static to not consume too much stack space.
@@ -91,66 +51,30 @@ ReportStack *SymbolizeCode(uptr addr) {
static char func_buf[1024];
static char file_buf[1024];
int line, col;
- if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf),
- file_buf, sizeof(file_buf), &line, &col))
- return NewReportStackEntry(addr);
- ReportStack *ent = NewReportStackEntry(addr);
- ent->module = 0;
- ent->offset = 0;
- ent->func = internal_strdup(func_buf);
- ent->file = internal_strdup(file_buf);
- ent->line = line;
- ent->col = col;
- return ent;
- }
- if (!Symbolizer::Get()->IsAvailable())
- return SymbolizeCodeAddr2Line(addr);
- static const uptr kMaxAddrFrames = 16;
- InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
- for (uptr i = 0; i < kMaxAddrFrames; i++)
- new(&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
- addr, addr_frames.data(), kMaxAddrFrames);
- if (addr_frames_num == 0)
- return NewReportStackEntry(addr);
- ReportStack *top = 0;
- ReportStack *bottom = 0;
- for (uptr i = 0; i < addr_frames_num; i++) {
- ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]);
- CHECK(cur_entry);
- addr_frames[i].Clear();
- if (i == 0)
- top = cur_entry;
- else
- bottom->next = cur_entry;
- bottom = cur_entry;
+ SymbolizedStack *frame = SymbolizedStack::New(addr);
+ if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf,
+ sizeof(file_buf), &line, &col)) {
+ frame->info.function = internal_strdup(func_buf);
+ frame->info.file = internal_strdup(file_buf);
+ frame->info.line = line;
+ frame->info.column = col;
+ }
+ return frame;
}
- return top;
+ return Symbolizer::GetOrInit()->SymbolizePC(addr);
}
ReportLocation *SymbolizeData(uptr addr) {
- if (!Symbolizer::Get()->IsAvailable())
- return 0;
DataInfo info;
- if (!Symbolizer::Get()->SymbolizeData(addr, &info))
+ if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
- ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
- sizeof(ReportLocation));
- internal_memset(ent, 0, sizeof(*ent));
- ent->type = ReportLocationGlobal;
- ent->module = StripModuleName(info.module);
- ent->offset = info.module_offset;
- if (info.name)
- ent->name = internal_strdup(info.name);
- ent->addr = info.start;
- ent->size = info.size;
+ ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ ent->global = info;
return ent;
}
void SymbolizeFlush() {
- if (!Symbolizer::Get()->IsAvailable())
- return;
- Symbolizer::Get()->Flush();
+ Symbolizer::GetOrInit()->Flush();
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize.h b/libsanitizer/tsan/tsan_symbolize.h
index 892c11c0667..a859f6318b0 100644
--- a/libsanitizer/tsan/tsan_symbolize.h
+++ b/libsanitizer/tsan/tsan_symbolize.h
@@ -18,12 +18,10 @@ namespace __tsan {
void EnterSymbolizer();
void ExitSymbolizer();
-ReportStack *SymbolizeCode(uptr addr);
+SymbolizedStack *SymbolizeCode(uptr addr);
ReportLocation *SymbolizeData(uptr addr);
void SymbolizeFlush();
-ReportStack *SymbolizeCodeAddr2Line(uptr addr);
-
ReportStack *NewReportStackEntry(uptr addr);
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc b/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
deleted file mode 100644
index c278a42f317..00000000000
--- a/libsanitizer/tsan/tsan_symbolize_addr2line_linux.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-//===-- tsan_symbolize_addr2line.cc ---------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_symbolize.h"
-#include "tsan_mman.h"
-#include "tsan_rtl.h"
-#include "tsan_platform.h"
-
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <errno.h>
-#include <link.h>
-#include <linux/limits.h>
-#include <sys/types.h>
-
-namespace __tsan {
-
-struct ModuleDesc {
- const char *fullname;
- const char *name;
- uptr base;
- int inp_fd;
- int out_fd;
-};
-
-struct SectionDesc {
- SectionDesc *next;
- ModuleDesc *module;
- uptr base;
- uptr end;
-};
-
-struct DlIteratePhdrCtx {
- SectionDesc *sections;
- bool is_first;
-};
-
-static void NOINLINE InitModule(ModuleDesc *m) {
- int outfd[2] = {};
- if (pipe(&outfd[0])) {
- Printf("ThreadSanitizer: outfd pipe() failed (%d)\n", errno);
- Die();
- }
- int infd[2] = {};
- if (pipe(&infd[0])) {
- Printf("ThreadSanitizer: infd pipe() failed (%d)\n", errno);
- Die();
- }
- int pid = fork();
- if (pid == 0) {
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
- internal_close(fd);
- execl("/usr/bin/addr2line", "/usr/bin/addr2line", "-Cfe", m->fullname, 0);
- _exit(0);
- } else if (pid < 0) {
- Printf("ThreadSanitizer: failed to fork symbolizer\n");
- Die();
- }
- internal_close(outfd[0]);
- internal_close(infd[1]);
- m->inp_fd = infd[0];
- m->out_fd = outfd[1];
-}
-
-static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
- DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg;
- InternalScopedBuffer<char> tmp(128);
- if (ctx->is_first) {
- internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe",
- (int)internal_getpid());
- info->dlpi_name = tmp.data();
- }
- ctx->is_first = false;
- if (info->dlpi_name == 0 || info->dlpi_name[0] == 0)
- return 0;
- ModuleDesc *m = (ModuleDesc*)internal_alloc(MBlockReportStack,
- sizeof(ModuleDesc));
- m->fullname = internal_strdup(info->dlpi_name);
- m->name = internal_strrchr(m->fullname, '/');
- if (m->name)
- m->name += 1;
- else
- m->name = m->fullname;
- m->base = (uptr)info->dlpi_addr;
- m->inp_fd = -1;
- m->out_fd = -1;
- DPrintf2("Module %s %zx\n", m->name, m->base);
- for (int i = 0; i < info->dlpi_phnum; i++) {
- const Elf64_Phdr *s = &info->dlpi_phdr[i];
- DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
- " p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
- (uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
- (uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
- (uptr)s->p_flags, (uptr)s->p_align);
- if (s->p_type != PT_LOAD)
- continue;
- SectionDesc *sec = (SectionDesc*)internal_alloc(MBlockReportStack,
- sizeof(SectionDesc));
- sec->module = m;
- sec->base = info->dlpi_addr + s->p_vaddr;
- sec->end = sec->base + s->p_memsz;
- sec->next = ctx->sections;
- ctx->sections = sec;
- DPrintf2(" Section %zx-%zx\n", sec->base, sec->end);
- }
- return 0;
-}
-
-static SectionDesc *InitSections() {
- DlIteratePhdrCtx ctx = {0, true};
- dl_iterate_phdr(dl_iterate_phdr_cb, &ctx);
- return ctx.sections;
-}
-
-static SectionDesc *GetSectionDesc(uptr addr) {
- static SectionDesc *sections = 0;
- if (sections == 0)
- sections = InitSections();
- for (SectionDesc *s = sections; s; s = s->next) {
- if (addr >= s->base && addr < s->end) {
- if (s->module->inp_fd == -1)
- InitModule(s->module);
- return s;
- }
- }
- return 0;
-}
-
-ReportStack *SymbolizeCodeAddr2Line(uptr addr) {
- SectionDesc *s = GetSectionDesc(addr);
- if (s == 0)
- return NewReportStackEntry(addr);
- ModuleDesc *m = s->module;
- uptr offset = addr - m->base;
- char addrstr[32];
- internal_snprintf(addrstr, sizeof(addrstr), "%p\n", (void*)offset);
- if (0 >= internal_write(m->out_fd, addrstr, internal_strlen(addrstr))) {
- Printf("ThreadSanitizer: can't write from symbolizer (%d, %d)\n",
- m->out_fd, errno);
- Die();
- }
- InternalScopedBuffer<char> func(1024);
- ssize_t len = internal_read(m->inp_fd, func.data(), func.size() - 1);
- if (len <= 0) {
- Printf("ThreadSanitizer: can't read from symbolizer (%d, %d)\n",
- m->inp_fd, errno);
- Die();
- }
- func.data()[len] = 0;
- ReportStack *res = NewReportStackEntry(addr);
- res->module = internal_strdup(m->name);
- res->offset = offset;
- char *pos = (char*)internal_strchr(func.data(), '\n');
- if (pos && func[0] != '?') {
- res->func = (char*)internal_alloc(MBlockReportStack, pos - func.data() + 1);
- internal_memcpy(res->func, func.data(), pos - func.data());
- res->func[pos - func.data()] = 0;
- char *pos2 = (char*)internal_strchr(pos, ':');
- if (pos2) {
- res->file = (char*)internal_alloc(MBlockReportStack, pos2 - pos - 1 + 1);
- internal_memcpy(res->file, pos + 1, pos2 - pos - 1);
- res->file[pos2 - pos - 1] = 0;
- res->line = atoi(pos2 + 1);
- }
- }
- return res;
-}
-
-ReportStack *SymbolizeDataAddr2Line(uptr addr) {
- return 0;
-}
-
-} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_sync.cc b/libsanitizer/tsan/tsan_sync.cc
index 0c5be105f67..91ad8c8b228 100644
--- a/libsanitizer/tsan/tsan_sync.cc
+++ b/libsanitizer/tsan/tsan_sync.cc
@@ -15,290 +15,271 @@
namespace __tsan {
-SyncVar::SyncVar(uptr addr, u64 uid)
- : mtx(MutexTypeSyncVar, StatMtxSyncVar)
- , addr(addr)
- , uid(uid)
- , owner_tid(kInvalidTid)
- , last_lock()
- , recursion()
- , is_rw()
- , is_recursive()
- , is_broken()
- , is_linker_init() {
-}
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
-SyncTab::Part::Part()
- : mtx(MutexTypeSyncTab, StatMtxSyncTab)
- , val() {
+SyncVar::SyncVar()
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
+ Reset(0);
}
-SyncTab::SyncTab() {
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+ this->addr = addr;
+ this->uid = uid;
+ this->next = 0;
+
+ creation_stack_id = 0;
+ if (kCppMode) // Go does not use them
+ creation_stack_id = CurrentStackId(thr, pc);
+ if (common_flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, this);
}
-SyncTab::~SyncTab() {
- for (int i = 0; i < kPartCount; i++) {
- while (tab_[i].val) {
- SyncVar *tmp = tab_[i].val;
- tab_[i].val = tmp->next;
- DestroyAndFree(tmp);
- }
+void SyncVar::Reset(ThreadState *thr) {
+ uid = 0;
+ creation_stack_id = 0;
+ owner_tid = kInvalidTid;
+ last_lock = 0;
+ recursion = 0;
+ is_rw = 0;
+ is_recursive = 0;
+ is_broken = 0;
+ is_linker_init = 0;
+
+ if (thr == 0) {
+ CHECK_EQ(clock.size(), 0);
+ CHECK_EQ(read_clock.size(), 0);
+ } else {
+ clock.Reset(&thr->clock_cache);
+ read_clock.Reset(&thr->clock_cache);
}
}
-SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock) {
- return GetAndLock(thr, pc, addr, write_lock, true);
+MetaMap::MetaMap() {
+ atomic_store(&uid_gen_, 0, memory_order_relaxed);
}
-SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
- return GetAndLock(0, 0, addr, write_lock, false);
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ MBlock *b = block_alloc_.Map(idx);
+ b->siz = sz;
+ b->tid = thr->tid;
+ b->stk = CurrentStackId(thr, pc);
+ u32 *meta = MemToMeta(p);
+ DCHECK_EQ(*meta, 0);
+ *meta = idx | kFlagBlock;
}
-SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
- StatInc(thr, StatSyncCreated);
- void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
- const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- SyncVar *res = new(mem) SyncVar(addr, uid);
-#ifndef TSAN_GO
- res->creation_stack_id = CurrentStackId(thr, pc);
-#endif
- return res;
+uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+ MBlock* b = GetBlock(p);
+ if (b == 0)
+ return 0;
+ uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+ FreeRange(thr, pc, p, sz);
+ return sz;
}
-SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock, bool create) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
- if (res)
- return res;
- }
-
- // Here we ask only PrimaryAllocator, because
- // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
- // the hashmap anyway.
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- MBlock::ScopedLock l(b);
- SyncVar *res = 0;
- for (res = b->ListHead(); res; res = res->next) {
- if (res->addr == addr)
- break;
- }
- if (res == 0) {
- if (!create)
- return 0;
- res = Create(thr, pc, addr);
- b->ListPush(res);
- }
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- {
- ReadLock l(&p->mtx);
- for (SyncVar *res = p->val; res; res = res->next) {
- if (res->addr == addr) {
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
+bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ bool has_something = false;
+ u32 *meta = MemToMeta(p);
+ u32 *end = MemToMeta(p + sz);
+ if (end == meta)
+ end++;
+ for (; meta < end; meta++) {
+ u32 idx = *meta;
+ if (idx == 0) {
+ // Note: don't write to meta in this case -- the block can be huge.
+ continue;
}
- }
- if (!create)
- return 0;
- {
- Lock l(&p->mtx);
- SyncVar *res = p->val;
- for (; res; res = res->next) {
- if (res->addr == addr)
+ *meta = 0;
+ has_something = true;
+ while (idx != 0) {
+ if (idx & kFlagBlock) {
+ block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
break;
+ } else if (idx & kFlagSync) {
+ DCHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ u32 next = s->next;
+ s->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ idx = next;
+ } else {
+ CHECK(0);
+ }
}
- if (res == 0) {
- res = Create(thr, pc, addr);
- res->next = p->val;
- p->val = res;
- }
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
}
+ return has_something;
}
-SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
- if (res)
- return res;
+// ResetRange removes all meta objects from the range.
+// It is called for large mmap-ed regions. The function is best-effort wrt
+// freeing of meta objects, because we don't want to page in the whole range
+// which can be huge. The function probes pages one-by-one until it finds a page
+// without meta objects, at this point it stops freeing meta objects. Because
+// thread stacks grow top-down, we do the same starting from end as well.
+void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ if (sz <= 4 * kPageSize) {
+ // If the range is small, just do the normal free procedure.
+ FreeRange(thr, pc, p, sz);
+ return;
}
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- SyncVar *res = 0;
- {
- MBlock::ScopedLock l(b);
- res = b->ListHead();
- if (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- b->ListPop();
- } else {
- SyncVar **prev = &res->next;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
- }
- }
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
- }
- }
- return res;
+ // First, round both ends of the range to page size.
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ FreeRange(thr, pc, p, diff);
+ p += diff;
+ sz -= diff;
}
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- SyncVar *res = 0;
- {
- Lock l(&p->mtx);
- SyncVar **prev = &p->val;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
- }
+ diff = p + sz - RoundDown(p + sz, kPageSize);
+ if (diff != 0) {
+ FreeRange(thr, pc, p + sz - diff, diff);
+ sz -= diff;
+ }
+ // Now we must have a non-empty page-aligned range.
+ CHECK_GT(sz, 0);
+ CHECK_EQ(p, RoundUp(p, kPageSize));
+ CHECK_EQ(sz, RoundUp(sz, kPageSize));
+ const uptr p0 = p;
+ const uptr sz0 = sz;
+ // Probe start of the range.
+ while (sz > 0) {
+ bool has_something = FreeRange(thr, pc, p, kPageSize);
+ p += kPageSize;
+ sz -= kPageSize;
+ if (!has_something)
+ break;
}
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
+ // Probe end of the range.
+ while (sz > 0) {
+ bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
+ sz -= kPageSize;
+ if (!has_something)
+ break;
}
- return res;
+ // Finally, page out the whole range (including the parts that we've just
+ // freed). Note: we can't simply madvise, because we need to leave a zeroed
+ // range (otherwise __tsan_java_move can crash if it encounters a left-over
+ // meta objects in java heap).
+ uptr metap = (uptr)MemToMeta(p0);
+ uptr metasz = sz0 / kMetaRatio;
+ UnmapOrDie((void*)metap, metasz);
+ MmapFixedNoReserve(metap, metasz);
}
-int SyncTab::PartIdx(uptr addr) {
- return (addr >> 3) % kPartCount;
+MBlock* MetaMap::GetBlock(uptr p) {
+ u32 *meta = MemToMeta(p);
+ u32 idx = *meta;
+ for (;;) {
+ if (idx == 0)
+ return 0;
+ if (idx & kFlagBlock)
+ return block_alloc_.Map(idx & ~kFlagMask);
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ idx = s->next;
+ }
}
-StackTrace::StackTrace()
- : n_()
- , s_()
- , c_() {
+SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
}
-StackTrace::StackTrace(uptr *buf, uptr cnt)
- : n_()
- , s_(buf)
- , c_(cnt) {
- CHECK_NE(buf, 0);
- CHECK_NE(cnt, 0);
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
+ return GetAndLock(0, 0, addr, true, false);
}
-StackTrace::~StackTrace() {
- Reset();
-}
+SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock, bool create) {
+ u32 *meta = MemToMeta(addr);
+ u32 idx0 = *meta;
+ u32 myidx = 0;
+ SyncVar *mys = 0;
+ for (;;) {
+ u32 idx = idx0;
+ for (;;) {
+ if (idx == 0)
+ break;
+ if (idx & kFlagBlock)
+ break;
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ if (s->addr == addr) {
+ if (myidx != 0) {
+ mys->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, myidx);
+ }
+ if (write_lock)
+ s->mtx.Lock();
+ else
+ s->mtx.ReadLock();
+ return s;
+ }
+ idx = s->next;
+ }
+ if (!create)
+ return 0;
+ if (*meta != idx0) {
+ idx0 = *meta;
+ continue;
+ }
-void StackTrace::Reset() {
- if (s_ && !c_) {
- CHECK_NE(n_, 0);
- internal_free(s_);
- s_ = 0;
+ if (myidx == 0) {
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ mys = sync_alloc_.Map(myidx);
+ mys->Init(thr, pc, addr, uid);
+ }
+ mys->next = idx0;
+ if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+ myidx | kFlagSync, memory_order_release)) {
+ if (write_lock)
+ mys->mtx.Lock();
+ else
+ mys->mtx.ReadLock();
+ return mys;
+ }
}
- n_ = 0;
}
-void StackTrace::Init(const uptr *pcs, uptr cnt) {
- Reset();
- if (cnt == 0)
- return;
- if (c_) {
- CHECK_NE(s_, 0);
- CHECK_LE(cnt, c_);
- } else {
- s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
+ uptr diff = dst - src;
+ u32 *src_meta = MemToMeta(src);
+ u32 *dst_meta = MemToMeta(dst);
+ u32 *src_meta_end = MemToMeta(src + sz);
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
}
- n_ = cnt;
- internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
-}
-
-void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
- Reset();
- n_ = thr->shadow_stack_pos - thr->shadow_stack;
- if (n_ + !!toppc == 0)
- return;
- uptr start = 0;
- if (c_) {
- CHECK_NE(s_, 0);
- if (n_ + !!toppc > c_) {
- start = n_ - c_ + !!toppc;
- n_ = c_ - !!toppc;
- }
- } else {
- // Cap potentially huge stacks.
- if (n_ + !!toppc > kTraceStackSize) {
- start = n_ - kTraceStackSize + !!toppc;
- n_ = kTraceStackSize - !!toppc;
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
+ CHECK_EQ(*dst_meta, 0);
+ u32 idx = *src_meta;
+ *src_meta = 0;
+ *dst_meta = idx;
+ // Patch the addresses in sync objects.
+ while (idx != 0) {
+ if (idx & kFlagBlock)
+ break;
+ CHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ s->addr += diff;
+ idx = s->next;
}
- s_ = (uptr*)internal_alloc(MBlockStackTrace,
- (n_ + !!toppc) * sizeof(s_[0]));
}
- for (uptr i = 0; i < n_; i++)
- s_[i] = thr->shadow_stack[start + i];
- if (toppc) {
- s_[n_] = toppc;
- n_++;
- }
-}
-
-void StackTrace::CopyFrom(const StackTrace& other) {
- Reset();
- Init(other.Begin(), other.Size());
-}
-
-bool StackTrace::IsEmpty() const {
- return n_ == 0;
-}
-
-uptr StackTrace::Size() const {
- return n_;
-}
-
-uptr StackTrace::Get(uptr i) const {
- CHECK_LT(i, n_);
- return s_[i];
}
-const uptr *StackTrace::Begin() const {
- return s_;
+void MetaMap::OnThreadIdle(ThreadState *thr) {
+ block_alloc_.FlushCache(&thr->block_cache);
+ sync_alloc_.FlushCache(&thr->sync_cache);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_sync.h b/libsanitizer/tsan/tsan_sync.h
index 2867a8ac79e..50bc872275d 100644
--- a/libsanitizer/tsan/tsan_sync.h
+++ b/libsanitizer/tsan/tsan_sync.h
@@ -13,50 +13,22 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "tsan_clock.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
+#include "tsan_clock.h"
#include "tsan_mutex.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
-class SlabCache;
-
-class StackTrace {
- public:
- StackTrace();
- // Initialized the object in "static mode",
- // in this mode it never calls malloc/free but uses the provided buffer.
- StackTrace(uptr *buf, uptr cnt);
- ~StackTrace();
- void Reset();
-
- void Init(const uptr *pcs, uptr cnt);
- void ObtainCurrent(ThreadState *thr, uptr toppc);
- bool IsEmpty() const;
- uptr Size() const;
- uptr Get(uptr i) const;
- const uptr *Begin() const;
- void CopyFrom(const StackTrace& other);
-
- private:
- uptr n_;
- uptr *s_;
- const uptr c_;
-
- StackTrace(const StackTrace&);
- void operator = (const StackTrace&);
-};
-
struct SyncVar {
- explicit SyncVar(uptr addr, u64 uid);
+ SyncVar();
static const int kInvalidTid = -1;
+ uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
- uptr addr;
- const u64 uid; // Globally unique id.
- SyncClock clock;
- SyncClock read_clock; // Used for rw mutexes only.
+ u64 uid; // Globally unique id.
u32 creation_stack_id;
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
@@ -65,9 +37,16 @@ struct SyncVar {
bool is_recursive;
bool is_broken;
bool is_linker_init;
- SyncVar *next; // In SyncTab hashtable.
+ u32 next; // in MetaMap
+ DDMutex dd;
+ SyncClock read_clock; // Used for rw mutexes only.
+ // The clock is placed last, so that it is situated on a different cache line
+ // with the mtx. This reduces contention for hot sync objects.
+ SyncClock clock;
+
+ void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+ void Reset(ThreadState *thr);
- uptr GetMemoryConsumption();
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
@@ -82,42 +61,40 @@ struct SyncVar {
}
};
-class SyncTab {
+/* MetaMap allows to map arbitrary user pointers onto various descriptors.
+ Currently it maps pointers to heap block descriptors and sync var descs.
+ It uses 1/2 direct shadow, see tsan_platform.h.
+*/
+class MetaMap {
public:
- SyncTab();
- ~SyncTab();
+ MetaMap();
+
+ void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
+ bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ MBlock* GetBlock(uptr p);
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
-
- // If the SyncVar does not exist, returns 0.
- SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
+ SyncVar* GetIfExistsAndLock(uptr addr);
- SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
+ void MoveMemory(uptr src, uptr dst, uptr sz);
- uptr GetMemoryConsumption(uptr *nsync);
+ void OnThreadIdle(ThreadState *thr);
private:
- struct Part {
- Mutex mtx;
- SyncVar *val;
- char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT
- Part();
- };
-
- // FIXME: Implement something more sane.
- static const int kPartCount = 1009;
- Part tab_[kPartCount];
+ static const u32 kFlagMask = 3u << 30;
+ static const u32 kFlagBlock = 1u << 30;
+ static const u32 kFlagSync = 2u << 30;
+ typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ BlockAlloc block_alloc_;
+ SyncAlloc sync_alloc_;
atomic_uint64_t uid_gen_;
- int PartIdx(uptr addr);
-
- SyncVar* GetAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock, bool create);
-
- SyncTab(const SyncTab&); // Not implemented.
- void operator = (const SyncTab&); // Not implemented.
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+ bool create);
};
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_trace.h b/libsanitizer/tsan/tsan_trace.h
index 93ed8d907e5..a27efa9fd7a 100644
--- a/libsanitizer/tsan/tsan_trace.h
+++ b/libsanitizer/tsan/tsan_trace.h
@@ -13,14 +13,14 @@
#include "tsan_defs.h"
#include "tsan_mutex.h"
-#include "tsan_sync.h"
+#include "tsan_stack_trace.h"
#include "tsan_mutexset.h"
namespace __tsan {
-const int kTracePartSizeBits = 14;
+const int kTracePartSizeBits = 13;
const int kTracePartSize = 1 << kTracePartSizeBits;
-const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize;
+const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
const int kTraceSize = kTracePartSize * kTraceParts;
// Must fit into 3 bits.
@@ -40,31 +40,27 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
- StackTrace stack0; // Start stack for the trace.
+#ifndef SANITIZER_GO
+ BufferedStackTrace stack0; // Start stack for the trace.
+#else
+ VarSizeStackTrace stack0;
+#endif
u64 epoch0; // Start epoch for the trace.
MutexSet mset0;
-#ifndef TSAN_GO
- uptr stack0buf[kTraceStackSize];
-#endif
- TraceHeader()
-#ifndef TSAN_GO
- : stack0(stack0buf, kTraceStackSize)
-#else
- : stack0()
-#endif
- , epoch0() {
- }
+ TraceHeader() : stack0(), epoch0() {}
};
struct Trace {
- TraceHeader headers[kTraceParts];
Mutex mtx;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];
#endif
+ // Must be the last field, because we unmap the unused part in
+ // CreateThreadContext.
+ TraceHeader headers[kTraceParts];
Trace()
: mtx(MutexTypeTrace, StatMtxTrace) {
diff --git a/libsanitizer/tsan/tsan_update_shadow_word_inl.h b/libsanitizer/tsan/tsan_update_shadow_word_inl.h
index 42caf80d349..2ea74283818 100644
--- a/libsanitizer/tsan/tsan_update_shadow_word_inl.h
+++ b/libsanitizer/tsan/tsan_update_shadow_word_inl.h
@@ -14,8 +14,7 @@
do {
StatInc(thr, StatShadowProcessed);
const unsigned kAccessSize = 1 << kAccessSizeLog;
- unsigned off = cur.ComputeSearchOffset();
- u64 *sp = &shadow_mem[(idx + off) % kShadowCnt];
+ u64 *sp = &shadow_mem[idx];
old = LoadShadow(sp);
if (old.IsZero()) {
StatInc(thr, StatShadowZero);
@@ -31,23 +30,14 @@ do {
// same thread?
if (Shadow::TidsAreEqual(old, cur)) {
StatInc(thr, StatShadowSameThread);
- if (OldIsInSameSynchEpoch(old, thr)) {
- if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
- // found a slot that holds effectively the same info
- // (that is, same tid, same sync epoch and same size)
- StatInc(thr, StatMopSame);
- return;
- }
- StoreIfNotYetStored(sp, &store_word);
- break;
- }
if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
StoreIfNotYetStored(sp, &store_word);
break;
}
StatInc(thr, StatShadowAnotherThread);
if (HappensBefore(old, thr)) {
- StoreIfNotYetStored(sp, &store_word);
+ if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
+ StoreIfNotYetStored(sp, &store_word);
break;
}
if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
diff --git a/libsanitizer/tsan/tsan_vector.h b/libsanitizer/tsan/tsan_vector.h
index 4da8b83d5c3..c0485513ee2 100644
--- a/libsanitizer/tsan/tsan_vector.h
+++ b/libsanitizer/tsan/tsan_vector.h
@@ -56,10 +56,18 @@ class Vector {
return begin_[i];
}
- T *PushBack(T v = T()) {
+ T *PushBack() {
EnsureSize(Size() + 1);
- end_[-1] = v;
- return &end_[-1];
+ T *p = &end_[-1];
+ internal_memset(p, 0, sizeof(*p));
+ return p;
+ }
+
+ T *PushBack(const T& v) {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memcpy(p, &v, sizeof(*p));
+ return p;
}
void PopBack() {
@@ -68,11 +76,15 @@ class Vector {
}
void Resize(uptr size) {
+ if (size == 0) {
+ end_ = begin_;
+ return;
+ }
uptr old_size = Size();
EnsureSize(size);
if (old_size < size) {
for (uptr i = old_size; i < size; i++)
- begin_[i] = T();
+ internal_memset(&begin_[i], 0, sizeof(begin_[i]));
}
}
@@ -90,7 +102,7 @@ class Vector {
return;
}
uptr cap0 = last_ - begin_;
- uptr cap = 2 * cap0;
+ uptr cap = cap0 * 5 / 4; // 25% growth
if (cap == 0)
cap = 16;
if (cap < size)
diff --git a/libsanitizer/ubsan/Makefile.am b/libsanitizer/ubsan/Makefile.am
index 80b79e1cb82..9e70b12d3fd 100644
--- a/libsanitizer/ubsan/Makefile.am
+++ b/libsanitizer/ubsan/Makefile.am
@@ -3,20 +3,29 @@ AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libubsan.la
-ubsan_files = \
+ubsan_plugin_files = \
ubsan_diag.cc \
+ ubsan_flags.cc \
ubsan_handlers.cc \
ubsan_handlers_cxx.cc \
+ ubsan_init.cc \
ubsan_type_hash.cc \
+ ubsan_type_hash_itanium.cc \
+ ubsan_type_hash_win.cc \
ubsan_value.cc
+ubsan_files = \
+ $(ubsan_plugin_files) \
+ ubsan_init_standalone.cc
+
libubsan_la_SOURCES = $(ubsan_files)
libubsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la
if !USING_MAC_INTERPOSE
@@ -29,7 +38,7 @@ libubsan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)
libubsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libubsan)
# Use special rules for files that require RTTI support.
-ubsan_handlers_cxx.% ubsan_type_hash.% : AM_CXXFLAGS += -frtti
+ubsan_handlers_cxx.% ubsan_type_hash.% ubsan_type_hash_itanium.% : AM_CXXFLAGS += -frtti
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
diff --git a/libsanitizer/ubsan/Makefile.in b/libsanitizer/ubsan/Makefile.in
index c38740be666..c1189b0cd8d 100644
--- a/libsanitizer/ubsan/Makefile.in
+++ b/libsanitizer/ubsan/Makefile.in
@@ -84,9 +84,12 @@ am__DEPENDENCIES_1 =
libubsan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1)
-am__objects_1 = ubsan_diag.lo ubsan_handlers.lo ubsan_handlers_cxx.lo \
- ubsan_type_hash.lo ubsan_value.lo
-am_libubsan_la_OBJECTS = $(am__objects_1)
+am__objects_1 = ubsan_diag.lo ubsan_flags.lo ubsan_handlers.lo \
+ ubsan_handlers_cxx.lo ubsan_init.lo ubsan_type_hash.lo \
+ ubsan_type_hash_itanium.lo ubsan_type_hash_win.lo \
+ ubsan_value.lo
+am__objects_2 = $(am__objects_1) ubsan_init_standalone.lo
+am_libubsan_la_OBJECTS = $(am__objects_2)
libubsan_la_OBJECTS = $(am_libubsan_la_OBJECTS)
libubsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
@@ -131,7 +134,7 @@ CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
@@ -165,6 +168,7 @@ NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
+OBSTACK_DEFS = @OBSTACK_DEFS@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
@@ -176,10 +180,12 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
+RPC_DEFS = @RPC_DEFS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
+TSAN_TARGET_DEPENDENT_OBJECTS = @TSAN_TARGET_DEPENDENT_OBJECTS@
VERSION = @VERSION@
VIEW_FILE = @VIEW_FILE@
abs_builddir = @abs_builddir@
@@ -255,16 +261,25 @@ gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
- -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
+ -Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS) \
+ -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libubsan.la
-ubsan_files = \
+ubsan_plugin_files = \
ubsan_diag.cc \
+ ubsan_flags.cc \
ubsan_handlers.cc \
ubsan_handlers_cxx.cc \
+ ubsan_init.cc \
ubsan_type_hash.cc \
+ ubsan_type_hash_itanium.cc \
+ ubsan_type_hash_win.cc \
ubsan_value.cc
+ubsan_files = \
+ $(ubsan_plugin_files) \
+ ubsan_init_standalone.cc
+
libubsan_la_SOURCES = $(ubsan_files)
libubsan_la_LIBADD = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
@@ -385,9 +400,14 @@ distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_diag.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers_cxx.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init_standalone.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_itanium.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_value.Plo@am__quote@
.cc.o:
@@ -589,7 +609,7 @@ uninstall-am: uninstall-toolexeclibLTLIBRARIES
# Use special rules for files that require RTTI support.
-ubsan_handlers_cxx.% ubsan_type_hash.% : AM_CXXFLAGS += -frtti
+ubsan_handlers_cxx.% ubsan_type_hash.% ubsan_type_hash_itanium.% : AM_CXXFLAGS += -frtti
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/libsanitizer/ubsan/ubsan_checks.inc b/libsanitizer/ubsan/ubsan_checks.inc
new file mode 100644
index 00000000000..935d82c5079
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_checks.inc
@@ -0,0 +1,51 @@
+//===-- ubsan_checks.inc ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// List of checks handled by UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_CHECK
+# error "Define UBSAN_CHECK prior to including this file!"
+#endif
+
+// UBSAN_CHECK(Name, SummaryKind, FlagName)
+// SummaryKind and FlagName should be string literals.
+
+UBSAN_CHECK(GenericUB, "undefined-behavior", "-fsanitize=undefined")
+UBSAN_CHECK(NullPointerUse, "null-pointer-use", "-fsanitize=null")
+UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use",
+ "-fsanitize=alignment")
+UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size",
+ "-fsanitize=object-size")
+UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow",
+ "-fsanitize=signed-integer-overflow")
+UBSAN_CHECK(UnsignedIntegerOverflow, "unsigned-integer-overflow",
+ "-fsanitize=unsigned-integer-overflow")
+UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero",
+ "-fsanitize=integer-divide-by-zero")
+UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero",
+ "-fsanitize=float-divide-by-zero")
+UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "-fsanitize=shift-base")
+UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent",
+ "-fsanitize=shift-exponent")
+UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "-fsanitize=bounds")
+UBSAN_CHECK(UnreachableCall, "unreachable-call", "-fsanitize=unreachable")
+UBSAN_CHECK(MissingReturn, "missing-return", "-fsanitize=return")
+UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index",
+ "-fsanitize=vla-bound")
+UBSAN_CHECK(FloatCastOverflow, "float-cast-overflow",
+ "-fsanitize=float-cast-overflow")
+UBSAN_CHECK(InvalidBoolLoad, "invalid-bool-load", "-fsanitize=bool")
+UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "-fsanitize=enum")
+UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch",
+ "-fsanitize=function")
+UBSAN_CHECK(InvalidNullReturn, "invalid-null-return",
+ "-fsanitize=returns-nonnull-attribute")
+UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument",
+ "-fsanitize=nonnull-attribute")
+UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "-fsanitize=vptr")
+UBSAN_CHECK(CFIBadType, "cfi-bad-type", "-fsanitize=cfi")
diff --git a/libsanitizer/ubsan/ubsan_diag.cc b/libsanitizer/ubsan/ubsan_diag.cc
index 786ffa7254f..1197f837f75 100644
--- a/libsanitizer/ubsan/ubsan_diag.cc
+++ b/libsanitizer/ubsan/ubsan_diag.cc
@@ -9,40 +9,89 @@
//
//===----------------------------------------------------------------------===//
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
#include "ubsan_diag.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_libc.h"
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include <stdio.h>
using namespace __ubsan;
-Location __ubsan::getCallerLocation(uptr CallerLoc) {
- if (!CallerLoc)
- return Location();
-
- uptr Loc = StackTrace::GetPreviousInstructionPc(CallerLoc);
- return getFunctionLocation(Loc, 0);
+static void MaybePrintStackTrace(uptr pc, uptr bp) {
+ // We assume that flags are already parsed, as UBSan runtime
+ // will definitely be called when we print the first diagnostics message.
+ if (!flags()->print_stacktrace)
+ return;
+ // We can only use slow unwind, as we don't have any information about stack
+ // top/bottom.
+ // FIXME: It's better to respect "fast_unwind_on_fatal" runtime flag and
+ // fetch stack top/bottom information if we have it (e.g. if we're running
+ // under ASan).
+ if (StackTrace::WillUseFastUnwind(false))
+ return;
+ BufferedStackTrace stack;
+ stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false);
+ stack.Print();
}
-Location __ubsan::getFunctionLocation(uptr Loc, const char **FName) {
- if (!Loc)
- return Location();
-
- AddressInfo Info;
- if (!Symbolizer::GetOrInit()->SymbolizeCode(Loc, &Info, 1) ||
- !Info.module || !*Info.module)
- return Location(Loc);
+static const char *ConvertTypeToString(ErrorType Type) {
+ switch (Type) {
+#define UBSAN_CHECK(Name, SummaryKind, FlagName) \
+ case ErrorType::Name: \
+ return SummaryKind;
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+ }
+ UNREACHABLE("unknown ErrorType!");
+}
- if (FName && Info.function)
- *FName = Info.function;
+static void MaybeReportErrorSummary(Location Loc, ErrorType Type) {
+ if (!common_flags()->print_summary)
+ return;
+ if (!flags()->report_error_type)
+ Type = ErrorType::GenericUB;
+ const char *ErrorKind = ConvertTypeToString(Type);
+ if (Loc.isSourceLocation()) {
+ SourceLocation SLoc = Loc.getSourceLocation();
+ if (!SLoc.isInvalid()) {
+ AddressInfo AI;
+ AI.file = internal_strdup(SLoc.getFilename());
+ AI.line = SLoc.getLine();
+ AI.column = SLoc.getColumn();
+ AI.function = internal_strdup(""); // Avoid printing ?? as function name.
+ ReportErrorSummary(ErrorKind, AI);
+ AI.Clear();
+ return;
+ }
+ } else if (Loc.isSymbolizedStack()) {
+ const AddressInfo &AI = Loc.getSymbolizedStack()->info;
+ ReportErrorSummary(ErrorKind, AI);
+ return;
+ }
+ ReportErrorSummary(ErrorKind);
+}
- if (!Info.file)
- return ModuleLocation(Info.module, Info.module_offset);
+namespace {
+class Decorator : public SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Highlight() const { return Green(); }
+ const char *EndHighlight() const { return Default(); }
+ const char *Note() const { return Black(); }
+ const char *EndNote() const { return Default(); }
+};
+}
- return SourceLocation(Info.file, Info.line, Info.column);
+SymbolizedStack *__ubsan::getSymbolizedLocation(uptr PC) {
+ InitAsStandaloneIfNecessary();
+ return Symbolizer::GetOrInit()->SymbolizePC(PC);
}
Diag &Diag::operator<<(const TypeDescriptor &V) {
@@ -82,17 +131,28 @@ static void renderLocation(Location Loc) {
if (SLoc.isInvalid())
LocBuffer.append("<unknown>");
else
- PrintSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(),
- SLoc.getColumn());
+ RenderSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(),
+ SLoc.getColumn(), common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
break;
}
- case Location::LK_Module:
- PrintModuleAndOffset(&LocBuffer, Loc.getModuleLocation().getModuleName(),
- Loc.getModuleLocation().getOffset());
- break;
case Location::LK_Memory:
LocBuffer.append("%p", Loc.getMemoryLocation());
break;
+ case Location::LK_Symbolized: {
+ const AddressInfo &Info = Loc.getSymbolizedStack()->info;
+ if (Info.file) {
+ RenderSourceLocation(&LocBuffer, Info.file, Info.line, Info.column,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ } else if (Info.module) {
+ RenderModuleLocation(&LocBuffer, Info.module, Info.module_offset,
+ common_flags()->strip_path_prefix);
+ } else {
+ LocBuffer.append("%p", Info.address);
+ }
+ break;
+ }
case Location::LK_Null:
LocBuffer.append("<unknown>");
break;
@@ -116,8 +176,12 @@ static void renderText(const char *Message, const Diag::Arg *Args) {
case Diag::AK_String:
Printf("%s", A.String);
break;
- case Diag::AK_Mangled: {
- Printf("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
+ case Diag::AK_TypeName: {
+ if (SANITIZER_WINDOWS)
+ // The Windows implementation demangles names early.
+ Printf("'%s'", A.String);
+ else
+ Printf("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
break;
}
case Diag::AK_SInt:
@@ -137,7 +201,11 @@ static void renderText(const char *Message, const Diag::Arg *Args) {
// FIXME: Support floating-point formatting in sanitizer_common's
// printf, and stop using snprintf here.
char Buffer[32];
+#if SANITIZER_WINDOWS
+ sprintf_s(Buffer, sizeof(Buffer), "%Lg", (long double)A.Float);
+#else
snprintf(Buffer, sizeof(Buffer), "%Lg", (long double)A.Float);
+#endif
Printf("%s", Buffer);
break;
}
@@ -162,36 +230,49 @@ static Range *upperBound(MemoryLocation Loc, Range *Ranges,
return Best;
}
+static inline uptr subtractNoOverflow(uptr LHS, uptr RHS) {
+ return (LHS < RHS) ? 0 : LHS - RHS;
+}
+
+static inline uptr addNoOverflow(uptr LHS, uptr RHS) {
+ const uptr Limit = (uptr)-1;
+ return (LHS > Limit - RHS) ? Limit : LHS + RHS;
+}
+
/// Render a snippet of the address space near a location.
-static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor,
- MemoryLocation Loc,
+static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
Range *Ranges, unsigned NumRanges,
const Diag::Arg *Args) {
- const unsigned BytesToShow = 32;
- const unsigned MinBytesNearLoc = 4;
-
// Show at least the 8 bytes surrounding Loc.
- MemoryLocation Min = Loc - MinBytesNearLoc, Max = Loc + MinBytesNearLoc;
+ const unsigned MinBytesNearLoc = 4;
+ MemoryLocation Min = subtractNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation Max = addNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation OrigMin = Min;
for (unsigned I = 0; I < NumRanges; ++I) {
Min = __sanitizer::Min(Ranges[I].getStart().getMemoryLocation(), Min);
Max = __sanitizer::Max(Ranges[I].getEnd().getMemoryLocation(), Max);
}
// If we have too many interesting bytes, prefer to show bytes after Loc.
+ const unsigned BytesToShow = 32;
if (Max - Min > BytesToShow)
- Min = __sanitizer::Min(Max - BytesToShow, Loc - MinBytesNearLoc);
- Max = Min + BytesToShow;
+ Min = __sanitizer::Min(Max - BytesToShow, OrigMin);
+ Max = addNoOverflow(Min, BytesToShow);
+
+ if (!IsAccessibleMemoryRange(Min, Max - Min)) {
+ Printf("<memory cannot be printed>\n");
+ return;
+ }
// Emit data.
for (uptr P = Min; P != Max; ++P) {
- // FIXME: Check that the address is readable before printing it.
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
Printf("%s%02x", (P % 8 == 0) ? " " : " ", C);
}
Printf("\n");
// Emit highlights.
- Printf(Decor.Green());
+ Printf(Decor.Highlight());
Range *InRange = upperBound(Min, Ranges, NumRanges);
for (uptr P = Min; P != Max; ++P) {
char Pad = ' ', Byte = ' ';
@@ -206,7 +287,7 @@ static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor,
char Buffer[] = { Pad, Pad, P == Loc ? '^' : Byte, Byte, 0 };
Printf((P % 8 == 0) ? Buffer : &Buffer[1]);
}
- Printf("%s\n", Decor.Default());
+ Printf("%s\n", Decor.EndHighlight());
// Go over the line again, and print names for the ranges.
InRange = 0;
@@ -244,8 +325,9 @@ static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor,
}
Diag::~Diag() {
- __sanitizer::AnsiColorDecorator Decor(PrintsToTty());
- SpinMutexLock l(&CommonSanitizerReportMutex);
+ // All diagnostics should be printed under report mutex.
+ CommonSanitizerReportMutex.CheckLocked();
+ Decorator Decor;
Printf(Decor.Bold());
renderLocation(Loc);
@@ -253,11 +335,11 @@ Diag::~Diag() {
switch (Level) {
case DL_Error:
Printf("%s runtime error: %s%s",
- Decor.Red(), Decor.Default(), Decor.Bold());
+ Decor.Warning(), Decor.EndWarning(), Decor.Bold());
break;
case DL_Note:
- Printf("%s note: %s", Decor.Black(), Decor.Default());
+ Printf("%s note: %s", Decor.Note(), Decor.EndNote());
break;
}
@@ -269,3 +351,39 @@ Diag::~Diag() {
renderMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges,
NumRanges, Args);
}
+
+ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc,
+ ErrorType Type)
+ : Opts(Opts), SummaryLoc(SummaryLoc), Type(Type) {
+ InitAsStandaloneIfNecessary();
+ CommonSanitizerReportMutex.Lock();
+}
+
+ScopedReport::~ScopedReport() {
+ MaybePrintStackTrace(Opts.pc, Opts.bp);
+ MaybeReportErrorSummary(SummaryLoc, Type);
+ CommonSanitizerReportMutex.Unlock();
+ if (Opts.DieAfterReport || flags()->halt_on_error)
+ Die();
+}
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kVptrCheck[] = "vptr_check";
+static const char *kSuppressionTypes[] = { kVptrCheck };
+
+void __ubsan::InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+}
+
+bool __ubsan::IsVptrCheckSuppressed(const char *TypeName) {
+ InitAsStandaloneIfNecessary();
+ CHECK(suppression_ctx);
+ Suppression *s;
+ return suppression_ctx->Match(TypeName, kVptrCheck, &s);
+}
+
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_diag.h b/libsanitizer/ubsan/ubsan_diag.h
index 04503687568..7103d522953 100644
--- a/libsanitizer/ubsan/ubsan_diag.h
+++ b/libsanitizer/ubsan/ubsan_diag.h
@@ -12,78 +12,85 @@
#define UBSAN_DIAG_H
#include "ubsan_value.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __ubsan {
-/// \brief A location within a loaded module in the program. These are used when
-/// the location can't be resolved to a SourceLocation.
-class ModuleLocation {
- const char *ModuleName;
- uptr Offset;
+class SymbolizedStackHolder {
+ SymbolizedStack *Stack;
+
+ void clear() {
+ if (Stack)
+ Stack->ClearAll();
+ }
public:
- ModuleLocation() : ModuleName(0), Offset(0) {}
- ModuleLocation(const char *ModuleName, uptr Offset)
- : ModuleName(ModuleName), Offset(Offset) {}
- const char *getModuleName() const { return ModuleName; }
- uptr getOffset() const { return Offset; }
+ explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr)
+ : Stack(Stack) {}
+ ~SymbolizedStackHolder() { clear(); }
+ void reset(SymbolizedStack *S) {
+ if (Stack != S)
+ clear();
+ Stack = S;
+ }
+ const SymbolizedStack *get() const { return Stack; }
};
+SymbolizedStack *getSymbolizedLocation(uptr PC);
+
+inline SymbolizedStack *getCallerLocation(uptr CallerPC) {
+ CHECK(CallerPC);
+ uptr PC = StackTrace::GetPreviousInstructionPc(CallerPC);
+ return getSymbolizedLocation(PC);
+}
+
/// A location of some data within the program's address space.
typedef uptr MemoryLocation;
/// \brief Location at which a diagnostic can be emitted. Either a
-/// SourceLocation, a ModuleLocation, or a MemoryLocation.
+/// SourceLocation, a MemoryLocation, or a SymbolizedStack.
class Location {
public:
- enum LocationKind { LK_Null, LK_Source, LK_Module, LK_Memory };
+ enum LocationKind { LK_Null, LK_Source, LK_Memory, LK_Symbolized };
private:
LocationKind Kind;
// FIXME: In C++11, wrap these in an anonymous union.
SourceLocation SourceLoc;
- ModuleLocation ModuleLoc;
MemoryLocation MemoryLoc;
+ const SymbolizedStack *SymbolizedLoc; // Not owned.
public:
Location() : Kind(LK_Null) {}
Location(SourceLocation Loc) :
Kind(LK_Source), SourceLoc(Loc) {}
- Location(ModuleLocation Loc) :
- Kind(LK_Module), ModuleLoc(Loc) {}
Location(MemoryLocation Loc) :
Kind(LK_Memory), MemoryLoc(Loc) {}
+ // SymbolizedStackHolder must outlive Location object.
+ Location(const SymbolizedStackHolder &Stack) :
+ Kind(LK_Symbolized), SymbolizedLoc(Stack.get()) {}
LocationKind getKind() const { return Kind; }
bool isSourceLocation() const { return Kind == LK_Source; }
- bool isModuleLocation() const { return Kind == LK_Module; }
bool isMemoryLocation() const { return Kind == LK_Memory; }
+ bool isSymbolizedStack() const { return Kind == LK_Symbolized; }
SourceLocation getSourceLocation() const {
CHECK(isSourceLocation());
return SourceLoc;
}
- ModuleLocation getModuleLocation() const {
- CHECK(isModuleLocation());
- return ModuleLoc;
- }
MemoryLocation getMemoryLocation() const {
CHECK(isMemoryLocation());
return MemoryLoc;
}
+ const SymbolizedStack *getSymbolizedStack() const {
+ CHECK(isSymbolizedStack());
+ return SymbolizedLoc;
+ }
};
-/// Try to obtain a location for the caller. This might fail, and produce either
-/// an invalid location or a module location for the caller.
-Location getCallerLocation(uptr CallerLoc = GET_CALLER_PC());
-
-/// Try to obtain a location for the given function pointer. This might fail,
-/// and produce either an invalid location or a module location for the caller.
-/// If FName is non-null and the name of the function is known, set *FName to
-/// the function name, otherwise *FName is unchanged.
-Location getFunctionLocation(uptr Loc, const char **FName);
-
/// A diagnostic severity level.
enum DiagLevel {
DL_Error, ///< An error.
@@ -104,11 +111,11 @@ public:
const char *getText() const { return Text; }
};
-/// \brief A mangled C++ name. Really just a strong typedef for 'const char*'.
-class MangledName {
+/// \brief A C++ type name. Really just a strong typedef for 'const char*'.
+class TypeName {
const char *Name;
public:
- MangledName(const char *Name) : Name(Name) {}
+ TypeName(const char *Name) : Name(Name) {}
const char *getName() const { return Name; }
};
@@ -132,7 +139,7 @@ public:
/// Kinds of arguments, corresponding to members of \c Arg's union.
enum ArgKind {
AK_String, ///< A string argument, displayed as-is.
- AK_Mangled,///< A C++ mangled name, demangled before display.
+ AK_TypeName,///< A C++ type name, possibly demangled before display.
AK_UInt, ///< An unsigned integer argument.
AK_SInt, ///< A signed integer argument.
AK_Float, ///< A floating-point argument.
@@ -143,7 +150,7 @@ public:
struct Arg {
Arg() {}
Arg(const char *String) : Kind(AK_String), String(String) {}
- Arg(MangledName MN) : Kind(AK_Mangled), String(MN.getName()) {}
+ Arg(TypeName TN) : Kind(AK_TypeName), String(TN.getName()) {}
Arg(UIntMax UInt) : Kind(AK_UInt), UInt(UInt) {}
Arg(SIntMax SInt) : Kind(AK_SInt), SInt(SInt) {}
Arg(FloatMax Float) : Kind(AK_Float), Float(Float) {}
@@ -193,7 +200,7 @@ public:
~Diag();
Diag &operator<<(const char *Str) { return AddArg(Str); }
- Diag &operator<<(MangledName MN) { return AddArg(MN); }
+ Diag &operator<<(TypeName TN) { return AddArg(TN); }
Diag &operator<<(unsigned long long V) { return AddArg(UIntMax(V)); }
Diag &operator<<(const void *V) { return AddArg(V); }
Diag &operator<<(const TypeDescriptor &V);
@@ -201,6 +208,43 @@ public:
Diag &operator<<(const Range &R) { return AddRange(R); }
};
+struct ReportOptions {
+ /// If DieAfterReport is specified, UBSan will terminate the program after the
+ /// report is printed.
+ bool DieAfterReport;
+ /// pc/bp are used to unwind the stack trace.
+ uptr pc;
+ uptr bp;
+};
+
+enum class ErrorType {
+#define UBSAN_CHECK(Name, SummaryKind, FlagName) Name,
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+};
+
+#define GET_REPORT_OPTIONS(die_after_report) \
+ GET_CALLER_PC_BP; \
+ ReportOptions Opts = {die_after_report, pc, bp}
+
+/// \brief Instantiate this class before printing diagnostics in the error
+/// report. This class ensures that reports from different threads and from
+/// different sanitizers won't be mixed.
+class ScopedReport {
+ ReportOptions Opts;
+ Location SummaryLoc;
+ ErrorType Type;
+
+public:
+ ScopedReport(ReportOptions Opts, Location SummaryLoc,
+ ErrorType Type = ErrorType::GenericUB);
+ void setErrorType(ErrorType T) { Type = T; }
+ ~ScopedReport();
+};
+
+void InitializeSuppressions();
+bool IsVptrCheckSuppressed(const char *TypeName);
+
} // namespace __ubsan
#endif // UBSAN_DIAG_H
diff --git a/libsanitizer/ubsan/ubsan_flags.cc b/libsanitizer/ubsan/ubsan_flags.cc
new file mode 100644
index 00000000000..666e3697ddc
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_flags.cc
@@ -0,0 +1,92 @@
+//===-- ubsan_flags.cc ----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __ubsan {
+
+const char *MaybeCallUbsanDefaultOptions() {
+ return (&__ubsan_default_options) ? __ubsan_default_options() : "";
+}
+
+Flags ubsan_flags;
+
+void Flags::SetDefaults() {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+}
+
+void RegisterUbsanFlags(FlagParser *parser, Flags *f) {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+}
+
+void InitializeFlags() {
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.print_summary = false;
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterCommonFlags(&parser);
+ RegisterUbsanFlags(&parser, f);
+
+ // Override from user-specified string.
+ parser.ParseString(MaybeCallUbsanDefaultOptions());
+ // Override from predefined file
+ char *ubsanFlags;
+ uptr len = 0;
+ ReadFileToBuffer("/UBSAN_OPTIONS", &ubsanFlags, &len, GetPageSizeCached());
+ ParseFlagsFromString(f, ubsanFlags);
+ UnmapOrDie(ubsanFlags, len);
+ // Override from environment variable.
+ parser.ParseString(GetEnv("UBSAN_OPTIONS"));
+ SetVerbosity(common_flags()->verbosity);
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+}
+
+} // namespace __ubsan
+
+extern "C" {
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options() { return ""; }
+#endif
+
+#if SANITIZER_WINDOWS
+const char *__ubsan_default_default_options() { return ""; }
+# ifdef _WIN64
+# pragma comment(linker, "/alternatename:__ubsan_default_options=__ubsan_default_default_options")
+# else
+# pragma comment(linker, "/alternatename:___ubsan_default_options=___ubsan_default_default_options")
+# endif
+#endif
+
+} // extern "C"
+
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_flags.h b/libsanitizer/ubsan/ubsan_flags.h
new file mode 100644
index 00000000000..2604b6b00cc
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_flags.h
@@ -0,0 +1,47 @@
+//===-- ubsan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAGS_H
+#define UBSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+class FlagParser;
+}
+
+namespace __ubsan {
+
+struct Flags {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags ubsan_flags;
+inline Flags *flags() { return &ubsan_flags; }
+
+void InitializeFlags();
+void RegisterUbsanFlags(FlagParser *parser, Flags *f);
+
+const char *MaybeCallUbsanDefaultOptions();
+
+} // namespace __ubsan
+
+extern "C" {
+// Users may provide their own implementation of __ubsan_default_options to
+// override the default flag values.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options();
+} // extern "C"
+
+#endif // UBSAN_FLAGS_H
diff --git a/libsanitizer/ubsan/ubsan_flags.inc b/libsanitizer/ubsan/ubsan_flags.inc
new file mode 100644
index 00000000000..170777adf26
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_flags.inc
@@ -0,0 +1,24 @@
+//===-- ubsan_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// UBSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAG
+# error "Define UBSAN_FLAG prior to including this file!"
+#endif
+
+// UBSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+UBSAN_FLAG(bool, halt_on_error, false,
+ "Crash the program after printing the first error report")
+UBSAN_FLAG(bool, print_stacktrace, false,
+ "Include full stacktrace into an error report")
+UBSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+UBSAN_FLAG(bool, report_error_type, false,
+ "Print specific error type instead of 'undefined-behavior' in summary.")
diff --git a/libsanitizer/ubsan/ubsan_handlers.cc b/libsanitizer/ubsan/ubsan_handlers.cc
index dd2e7bbf3e5..8530fcffc88 100644
--- a/libsanitizer/ubsan/ubsan_handlers.cc
+++ b/libsanitizer/ubsan/ubsan_handlers.cc
@@ -9,6 +9,8 @@
//
//===----------------------------------------------------------------------===//
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
#include "ubsan_handlers.h"
#include "ubsan_diag.h"
@@ -17,263 +19,490 @@
using namespace __sanitizer;
using namespace __ubsan;
+static bool ignoreReport(SourceLocation SLoc, ReportOptions Opts) {
+ // If source location is already acquired, we don't need to print an error
+ // report for the second time. However, if we're in an unrecoverable handler,
+ // it's possible that location was required by concurrently running thread.
+ // In this case, we should continue the execution to ensure that any of
+ // threads will grab the report mutex and print the report before
+ // crashing the program.
+ return SLoc.isDisabled() && !Opts.DieAfterReport;
+}
+
namespace __ubsan {
- const char *TypeCheckKinds[] = {
+const char *TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within",
- "member call on", "constructor call on", "downcast of", "downcast of"
- };
+ "member call on", "constructor call on", "downcast of", "downcast of",
+ "upcast of", "cast to virtual base of"};
}
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
- Location FallbackLoc) {
+ ReportOptions Opts) {
Location Loc = Data->Loc.acquire();
-
// Use the SourceLocation from Data to track deduplication, even if 'invalid'
- if (Loc.getSourceLocation().isDisabled())
+ if (ignoreReport(Loc.getSourceLocation(), Opts))
return;
- if (Data->Loc.isInvalid())
+
+ SymbolizedStackHolder FallbackLoc;
+ if (Data->Loc.isInvalid()) {
+ FallbackLoc.reset(getCallerLocation(Opts.pc));
Loc = FallbackLoc;
+ }
- if (!Pointer)
+ ScopedReport R(Opts, Loc);
+
+ if (!Pointer) {
+ R.setErrorType(ErrorType::NullPointerUse);
Diag(Loc, DL_Error, "%0 null pointer of type %1")
<< TypeCheckKinds[Data->TypeCheckKind] << Data->Type;
- else if (Data->Alignment && (Pointer & (Data->Alignment - 1)))
+ } else if (Data->Alignment && (Pointer & (Data->Alignment - 1))) {
+ R.setErrorType(ErrorType::MisalignedPointerUse);
Diag(Loc, DL_Error, "%0 misaligned address %1 for type %3, "
"which requires %2 byte alignment")
<< TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer
<< Data->Alignment << Data->Type;
- else
+ } else {
+ R.setErrorType(ErrorType::InsufficientObjectSize);
Diag(Loc, DL_Error, "%0 address %1 with insufficient space "
"for an object of type %2")
<< TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type;
+ }
if (Pointer)
Diag(Pointer, DL_Note, "pointer points here");
}
+
void __ubsan::__ubsan_handle_type_mismatch(TypeMismatchData *Data,
ValueHandle Pointer) {
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation());
+ GET_REPORT_OPTIONS(false);
+ handleTypeMismatchImpl(Data, Pointer, Opts);
}
void __ubsan::__ubsan_handle_type_mismatch_abort(TypeMismatchData *Data,
ValueHandle Pointer) {
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation());
+ GET_REPORT_OPTIONS(true);
+ handleTypeMismatchImpl(Data, Pointer, Opts);
Die();
}
/// \brief Common diagnostic emission for various forms of integer overflow.
-template<typename T> static void HandleIntegerOverflow(OverflowData *Data,
- ValueHandle LHS,
- const char *Operator,
- T RHS) {
+template <typename T>
+static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ const char *Operator, T RHS,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ bool IsSigned = Data->Type.isSignedIntegerTy();
+ ScopedReport R(Opts, Loc, IsSigned ? ErrorType::SignedIntegerOverflow
+ : ErrorType::UnsignedIntegerOverflow);
+
Diag(Loc, DL_Error, "%0 integer overflow: "
"%1 %2 %3 cannot be represented in type %4")
- << (Data->Type.isSignedIntegerTy() ? "signed" : "unsigned")
+ << (IsSigned ? "signed" : "unsigned")
<< Value(Data->Type, LHS) << Operator << RHS << Data->Type;
}
-void __ubsan::__ubsan_handle_add_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "+", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_add_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_add_overflow(Data, LHS, RHS);
- Die();
-}
-
-void __ubsan::__ubsan_handle_sub_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "-", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_sub_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_sub_overflow(Data, LHS, RHS);
- Die();
-}
-
-void __ubsan::__ubsan_handle_mul_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "*", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_mul_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_mul_overflow(Data, LHS, RHS);
- Die();
-}
-
-void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
- ValueHandle OldVal) {
+#define UBSAN_OVERFLOW_HANDLER(handler_name, op, abort) \
+ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \
+ ValueHandle RHS) { \
+ GET_REPORT_OPTIONS(abort); \
+ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \
+ if (abort) Die(); \
+ }
+
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true)
+
+static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
- if (Data->Type.isSignedIntegerTy())
+ bool IsSigned = Data->Type.isSignedIntegerTy();
+ ScopedReport R(Opts, Loc, IsSigned ? ErrorType::SignedIntegerOverflow
+ : ErrorType::UnsignedIntegerOverflow);
+
+ if (IsSigned)
Diag(Loc, DL_Error,
"negation of %0 cannot be represented in type %1; "
"cast to an unsigned type to negate this value to itself")
- << Value(Data->Type, OldVal) << Data->Type;
+ << Value(Data->Type, OldVal) << Data->Type;
else
- Diag(Loc, DL_Error,
- "negation of %0 cannot be represented in type %1")
- << Value(Data->Type, OldVal) << Data->Type;
+ Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1")
+ << Value(Data->Type, OldVal) << Data->Type;
+}
+
+void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
+ ValueHandle OldVal) {
+ GET_REPORT_OPTIONS(false);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
}
void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data,
ValueHandle OldVal) {
- __ubsan_handle_negate_overflow(Data, OldVal);
+ GET_REPORT_OPTIONS(true);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
Die();
}
-void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
+static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ ValueHandle RHS, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value LHSVal(Data->Type, LHS);
Value RHSVal(Data->Type, RHS);
- if (RHSVal.isMinusOne())
+ if (RHSVal.isMinusOne()) {
+ R.setErrorType(ErrorType::SignedIntegerOverflow);
Diag(Loc, DL_Error,
"division of %0 by -1 cannot be represented in type %1")
<< LHSVal << Data->Type;
- else
+ } else {
+ R.setErrorType(Data->Type.isIntegerTy() ? ErrorType::IntegerDivideByZero
+ : ErrorType::FloatDivideByZero);
Diag(Loc, DL_Error, "division by zero");
+ }
+}
+
+void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
+ ValueHandle LHS, ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
}
void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data,
ValueHandle LHS,
ValueHandle RHS) {
- __ubsan_handle_divrem_overflow(Data, LHS, RHS);
+ GET_REPORT_OPTIONS(true);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
Die();
}
-void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
+static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS, ValueHandle RHS,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value LHSVal(Data->LHSType, LHS);
Value RHSVal(Data->RHSType, RHS);
- if (RHSVal.isNegative())
+ if (RHSVal.isNegative()) {
+ R.setErrorType(ErrorType::InvalidShiftExponent);
Diag(Loc, DL_Error, "shift exponent %0 is negative") << RHSVal;
- else if (RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth())
- Diag(Loc, DL_Error,
- "shift exponent %0 is too large for %1-bit type %2")
- << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType;
- else if (LHSVal.isNegative())
+ } else if (RHSVal.getPositiveIntValue() >=
+ Data->LHSType.getIntegerBitWidth()) {
+ R.setErrorType(ErrorType::InvalidShiftExponent);
+ Diag(Loc, DL_Error, "shift exponent %0 is too large for %1-bit type %2")
+ << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType;
+ } else if (LHSVal.isNegative()) {
+ R.setErrorType(ErrorType::InvalidShiftBase);
Diag(Loc, DL_Error, "left shift of negative value %0") << LHSVal;
- else
+ } else {
+ R.setErrorType(ErrorType::InvalidShiftBase);
Diag(Loc, DL_Error,
"left shift of %0 by %1 places cannot be represented in type %2")
- << LHSVal << RHSVal << Data->LHSType;
+ << LHSVal << RHSVal << Data->LHSType;
+ }
+}
+
+void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS,
+ ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
}
void __ubsan::__ubsan_handle_shift_out_of_bounds_abort(
ShiftOutOfBoundsData *Data,
ValueHandle LHS,
ValueHandle RHS) {
- __ubsan_handle_shift_out_of_bounds(Data, LHS, RHS);
+ GET_REPORT_OPTIONS(true);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
Die();
}
-void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data,
- ValueHandle Index) {
+static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc, ErrorType::OutOfBoundsIndex);
+
Value IndexVal(Data->IndexType, Index);
Diag(Loc, DL_Error, "index %0 out of bounds for type %1")
<< IndexVal << Data->ArrayType;
}
+
+void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data,
+ ValueHandle Index) {
+ GET_REPORT_OPTIONS(false);
+ handleOutOfBoundsImpl(Data, Index, Opts);
+}
void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data,
ValueHandle Index) {
- __ubsan_handle_out_of_bounds(Data, Index);
+ GET_REPORT_OPTIONS(true);
+ handleOutOfBoundsImpl(Data, Index, Opts);
Die();
}
-void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) {
+static void handleBuiltinUnreachableImpl(UnreachableData *Data,
+ ReportOptions Opts) {
+ ScopedReport R(Opts, Data->Loc, ErrorType::UnreachableCall);
Diag(Data->Loc, DL_Error, "execution reached a __builtin_unreachable() call");
+}
+
+void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleBuiltinUnreachableImpl(Data, Opts);
Die();
}
-void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
+static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) {
+ ScopedReport R(Opts, Data->Loc, ErrorType::MissingReturn);
Diag(Data->Loc, DL_Error,
"execution reached the end of a value-returning function "
"without returning a value");
+}
+
+void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleMissingReturnImpl(Data, Opts);
Die();
}
-void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data,
- ValueHandle Bound) {
+static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc, ErrorType::NonPositiveVLAIndex);
+
Diag(Loc, DL_Error, "variable length array bound evaluates to "
"non-positive value %0")
<< Value(Data->Type, Bound);
}
+
+void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data,
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(false);
+ handleVLABoundNotPositive(Data, Bound, Opts);
+}
void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data,
- ValueHandle Bound) {
- __ubsan_handle_vla_bound_not_positive(Data, Bound);
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(true);
+ handleVLABoundNotPositive(Data, Bound, Opts);
Die();
}
+static bool looksLikeFloatCastOverflowDataV1(void *Data) {
+ // First field is either a pointer to filename or a pointer to a
+ // TypeDescriptor.
+ u8 *FilenameOrTypeDescriptor;
+ internal_memcpy(&FilenameOrTypeDescriptor, Data,
+ sizeof(FilenameOrTypeDescriptor));
+
+ // Heuristic: For float_cast_overflow, the TypeKind will be either TK_Integer
+ // (0x0), TK_Float (0x1) or TK_Unknown (0xff). If both types are known,
+ // adding both bytes will be 0 or 1 (for BE or LE). If it were a filename,
+ // adding two printable characters will not yield such a value. Otherwise,
+ // if one of them is 0xff, this is most likely TK_Unknown type descriptor.
+ u16 MaybeFromTypeKind =
+ FilenameOrTypeDescriptor[0] + FilenameOrTypeDescriptor[1];
+ return MaybeFromTypeKind < 2 || FilenameOrTypeDescriptor[0] == 0xff ||
+ FilenameOrTypeDescriptor[1] == 0xff;
+}
+
+static void handleFloatCastOverflow(void *DataPtr, ValueHandle From,
+ ReportOptions Opts) {
+ SymbolizedStackHolder CallerLoc;
+ Location Loc;
+ const TypeDescriptor *FromType, *ToType;
+
+ if (looksLikeFloatCastOverflowDataV1(DataPtr)) {
+ auto Data = reinterpret_cast<FloatCastOverflowData *>(DataPtr);
+ CallerLoc.reset(getCallerLocation(Opts.pc));
+ Loc = CallerLoc;
+ FromType = &Data->FromType;
+ ToType = &Data->ToType;
+ } else {
+ auto Data = reinterpret_cast<FloatCastOverflowDataV2 *>(DataPtr);
+ SourceLocation SLoc = Data->Loc.acquire();
+ if (ignoreReport(SLoc, Opts))
+ return;
+ Loc = SLoc;
+ FromType = &Data->FromType;
+ ToType = &Data->ToType;
+ }
+
+ ScopedReport R(Opts, Loc, ErrorType::FloatCastOverflow);
-void __ubsan::__ubsan_handle_float_cast_overflow(FloatCastOverflowData *Data,
- ValueHandle From) {
- // TODO: Add deduplication once a SourceLocation is generated for this check.
- Diag(getCallerLocation(), DL_Error,
+ Diag(Loc, DL_Error,
"value %0 is outside the range of representable values of type %2")
- << Value(Data->FromType, From) << Data->FromType << Data->ToType;
+ << Value(*FromType, From) << *FromType << *ToType;
}
-void __ubsan::__ubsan_handle_float_cast_overflow_abort(
- FloatCastOverflowData *Data,
- ValueHandle From) {
- Diag(getCallerLocation(), DL_Error,
- "value %0 is outside the range of representable values of type %2")
- << Value(Data->FromType, From) << Data->FromType << Data->ToType;
+
+void __ubsan::__ubsan_handle_float_cast_overflow(void *Data, ValueHandle From) {
+ GET_REPORT_OPTIONS(false);
+ handleFloatCastOverflow(Data, From, Opts);
+}
+void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data,
+ ValueHandle From) {
+ GET_REPORT_OPTIONS(true);
+ handleFloatCastOverflow(Data, From, Opts);
Die();
}
-void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data,
- ValueHandle Val) {
+static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ // This check could be more precise if we used different handlers for
+ // -fsanitize=bool and -fsanitize=enum.
+ bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'"));
+ ScopedReport R(Opts, Loc, IsBool ? ErrorType::InvalidBoolLoad
+ : ErrorType::InvalidEnumLoad);
+
Diag(Loc, DL_Error,
"load of value %0, which is not a valid value for type %1")
<< Value(Data->Type, Val) << Data->Type;
}
+
+void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data,
+ ValueHandle Val) {
+ GET_REPORT_OPTIONS(false);
+ handleLoadInvalidValue(Data, Val, Opts);
+}
void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data,
ValueHandle Val) {
- __ubsan_handle_load_invalid_value(Data, Val);
+ GET_REPORT_OPTIONS(true);
+ handleLoadInvalidValue(Data, Val, Opts);
Die();
}
-void __ubsan::__ubsan_handle_function_type_mismatch(
- FunctionTypeMismatchData *Data,
- ValueHandle Function) {
- const char *FName = "(unknown)";
+static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function,
+ ReportOptions Opts) {
+ SourceLocation CallLoc = Data->Loc.acquire();
+ if (ignoreReport(CallLoc, Opts))
+ return;
- Location Loc = getFunctionLocation(Function, &FName);
+ ScopedReport R(Opts, CallLoc, ErrorType::FunctionTypeMismatch);
- Diag(Data->Loc, DL_Error,
+ SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
+ const char *FName = FLoc.get()->info.function;
+ if (!FName)
+ FName = "(unknown)";
+
+ Diag(CallLoc, DL_Error,
"call to function %0 through pointer to incorrect function type %1")
- << FName << Data->Type;
- Diag(Loc, DL_Note, "%0 defined here") << FName;
+ << FName << Data->Type;
+ Diag(FLoc, DL_Note, "%0 defined here") << FName;
+}
+
+void
+__ubsan::__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function) {
+ GET_REPORT_OPTIONS(false);
+ handleFunctionTypeMismatch(Data, Function, Opts);
}
void __ubsan::__ubsan_handle_function_type_mismatch_abort(
- FunctionTypeMismatchData *Data,
- ValueHandle Function) {
- __ubsan_handle_function_type_mismatch(Data, Function);
+ FunctionTypeMismatchData *Data, ValueHandle Function) {
+ GET_REPORT_OPTIONS(true);
+ handleFunctionTypeMismatch(Data, Function, Opts);
+ Die();
+}
+
+static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ if (ignoreReport(Loc, Opts))
+ return;
+
+ ScopedReport R(Opts, Loc, ErrorType::InvalidNullReturn);
+
+ Diag(Loc, DL_Error, "null pointer returned from function declared to never "
+ "return null");
+ if (!Data->AttrLoc.isInvalid())
+ Diag(Data->AttrLoc, DL_Note, "returns_nonnull attribute specified here");
+}
+
+void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullReturn(Data, Opts);
+}
+
+void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullReturn(Data, Opts);
+ Die();
+}
+
+static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ if (ignoreReport(Loc, Opts))
+ return;
+
+ ScopedReport R(Opts, Loc, ErrorType::InvalidNullArgument);
+
+ Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to "
+ "never be null") << Data->ArgIndex;
+ if (!Data->AttrLoc.isInvalid())
+ Diag(Data->AttrLoc, DL_Note, "nonnull attribute specified here");
+}
+
+void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullArg(Data, Opts);
+}
+
+void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullArg(Data, Opts);
+ Die();
+}
+
+static void handleCFIBadIcall(CFIBadIcallData *Data, ValueHandle Function,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ if (ignoreReport(Loc, Opts))
+ return;
+
+ ScopedReport R(Opts, Loc);
+
+ Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during "
+ "indirect function call")
+ << Data->Type;
+
+ SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
+ const char *FName = FLoc.get()->info.function;
+ if (!FName)
+ FName = "(unknown)";
+ Diag(FLoc, DL_Note, "%0 defined here") << FName;
+}
+
+void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *Data,
+ ValueHandle Function) {
+ GET_REPORT_OPTIONS(false);
+ handleCFIBadIcall(Data, Function, Opts);
+}
+
+void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *Data,
+ ValueHandle Function) {
+ GET_REPORT_OPTIONS(true);
+ handleCFIBadIcall(Data, Function, Opts);
Die();
}
+
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h
index 226faadc287..668535868e9 100644
--- a/libsanitizer/ubsan/ubsan_handlers.h
+++ b/libsanitizer/ubsan/ubsan_handlers.h
@@ -22,10 +22,14 @@ struct TypeMismatchData {
unsigned char TypeCheckKind;
};
+#define UNRECOVERABLE(checkname, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
+ void __ubsan_handle_ ## checkname( __VA_ARGS__ );
+
#define RECOVERABLE(checkname, ...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE \
void __ubsan_handle_ ## checkname( __VA_ARGS__ ); \
- extern "C" SANITIZER_INTERFACE_ATTRIBUTE \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
void __ubsan_handle_ ## checkname ## _abort( __VA_ARGS__ );
/// \brief Handle a runtime type check failure, caused by either a misaligned
@@ -79,11 +83,9 @@ struct UnreachableData {
};
/// \brief Handle a __builtin_unreachable which is reached.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __ubsan_handle_builtin_unreachable(UnreachableData *Data);
+UNRECOVERABLE(builtin_unreachable, UnreachableData *Data)
/// \brief Handle reaching the end of a value-returning function.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __ubsan_handle_missing_return(UnreachableData *Data);
+UNRECOVERABLE(missing_return, UnreachableData *Data)
struct VLABoundData {
SourceLocation Loc;
@@ -93,14 +95,22 @@ struct VLABoundData {
/// \brief Handle a VLA with a non-positive bound.
RECOVERABLE(vla_bound_not_positive, VLABoundData *Data, ValueHandle Bound)
+// Keeping this around for binary compatibility with (sanitized) programs
+// compiled with older compilers.
struct FloatCastOverflowData {
- // FIXME: SourceLocation Loc;
const TypeDescriptor &FromType;
const TypeDescriptor &ToType;
};
-/// \brief Handle overflow in a conversion to or from a floating-point type.
-RECOVERABLE(float_cast_overflow, FloatCastOverflowData *Data, ValueHandle From)
+struct FloatCastOverflowDataV2 {
+ SourceLocation Loc;
+ const TypeDescriptor &FromType;
+ const TypeDescriptor &ToType;
+};
+
+/// Handle overflow in a conversion to or from a floating-point type.
+/// void *Data is one of FloatCastOverflowData* or FloatCastOverflowDataV2*
+RECOVERABLE(float_cast_overflow, void *Data, ValueHandle From)
struct InvalidValueData {
SourceLocation Loc;
@@ -119,6 +129,31 @@ RECOVERABLE(function_type_mismatch,
FunctionTypeMismatchData *Data,
ValueHandle Val)
+struct NonNullReturnData {
+ SourceLocation Loc;
+ SourceLocation AttrLoc;
+};
+
+/// \brief Handle returning null from function with returns_nonnull attribute.
+RECOVERABLE(nonnull_return, NonNullReturnData *Data)
+
+struct NonNullArgData {
+ SourceLocation Loc;
+ SourceLocation AttrLoc;
+ int ArgIndex;
+};
+
+/// \brief Handle passing null pointer to function with nonnull attribute.
+RECOVERABLE(nonnull_arg, NonNullArgData *Data)
+
+struct CFIBadIcallData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+/// \brief Handle control flow integrity failure for indirect function calls.
+RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
+
}
#endif // UBSAN_HANDLERS_H
diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.cc b/libsanitizer/ubsan/ubsan_handlers_cxx.cc
index bb43cc75cfc..b50b4d4636d 100644
--- a/libsanitizer/ubsan/ubsan_handlers_cxx.cc
+++ b/libsanitizer/ubsan/ubsan_handlers_cxx.cc
@@ -11,11 +11,14 @@
//
//===----------------------------------------------------------------------===//
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
#include "ubsan_handlers_cxx.h"
#include "ubsan_diag.h"
#include "ubsan_type_hash.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
using namespace __sanitizer;
using namespace __ubsan;
@@ -26,47 +29,92 @@ namespace __ubsan {
static void HandleDynamicTypeCacheMiss(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash,
- bool Abort) {
+ ReportOptions Opts) {
if (checkDynamicType((void*)Pointer, Data->TypeInfo, Hash))
// Just a cache miss. The type matches after all.
return;
+ // Check if error report should be suppressed.
+ DynamicTypeInfo DTI = getDynamicTypeInfoFromObject((void*)Pointer);
+ if (DTI.isValid() && IsVptrCheckSuppressed(DTI.getMostDerivedTypeName()))
+ return;
+
SourceLocation Loc = Data->Loc.acquire();
if (Loc.isDisabled())
return;
+ ScopedReport R(Opts, Loc, ErrorType::DynamicTypeMismatch);
+
Diag(Loc, DL_Error,
"%0 address %1 which does not point to an object of type %2")
<< TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type;
// If possible, say what type it actually points to.
- DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer);
if (!DTI.isValid())
Diag(Pointer, DL_Note, "object has invalid vptr")
- << MangledName(DTI.getMostDerivedTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
+ << TypeName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
else if (!DTI.getOffset())
Diag(Pointer, DL_Note, "object is of type %0")
- << MangledName(DTI.getMostDerivedTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
+ << TypeName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
else
// FIXME: Find the type at the specified offset, and include that
// in the note.
Diag(Pointer - DTI.getOffset(), DL_Note,
"object is base class subobject at offset %0 within object of type %1")
- << DTI.getOffset() << MangledName(DTI.getMostDerivedTypeName())
- << MangledName(DTI.getSubobjectTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "vptr for %2 base class of %1");
-
- if (Abort)
- Die();
+ << DTI.getOffset() << TypeName(DTI.getMostDerivedTypeName())
+ << TypeName(DTI.getSubobjectTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr),
+ "vptr for %2 base class of %1");
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
- HandleDynamicTypeCacheMiss(Data, Pointer, Hash, false);
+ GET_REPORT_OPTIONS(false);
+ HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
- HandleDynamicTypeCacheMiss(Data, Pointer, Hash, true);
+ GET_REPORT_OPTIONS(true);
+ HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
+}
+
+static void HandleCFIBadType(CFIBadTypeData *Data, ValueHandle Vtable,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ScopedReport R(Opts, Loc, ErrorType::CFIBadType);
+ DynamicTypeInfo DTI = getDynamicTypeInfoFromVtable((void*)Vtable);
+
+ static const char *TypeCheckKinds[] = {
+ "virtual call",
+ "non-virtual call",
+ "base-to-derived cast",
+ "cast to unrelated type",
+ };
+
+ Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during "
+ "%1 (vtable address %2)")
+ << Data->Type << TypeCheckKinds[Data->TypeCheckKind] << (void *)Vtable;
+
+ // If possible, say what type it actually points to.
+ if (!DTI.isValid())
+ Diag(Vtable, DL_Note, "invalid vtable");
+ else
+ Diag(Vtable, DL_Note, "vtable is of type %0")
+ << TypeName(DTI.getMostDerivedTypeName());
}
+
+void __ubsan::__ubsan_handle_cfi_bad_type(CFIBadTypeData *Data,
+ ValueHandle Vtable) {
+ GET_REPORT_OPTIONS(false);
+ HandleCFIBadType(Data, Vtable, Opts);
+}
+
+void __ubsan::__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *Data,
+ ValueHandle Vtable) {
+ GET_REPORT_OPTIONS(true);
+ HandleCFIBadType(Data, Vtable, Opts);
+}
+
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.h b/libsanitizer/ubsan/ubsan_handlers_cxx.h
index 3419744e390..b1486864298 100644
--- a/libsanitizer/ubsan/ubsan_handlers_cxx.h
+++ b/libsanitizer/ubsan/ubsan_handlers_cxx.h
@@ -23,6 +23,12 @@ struct DynamicTypeCacheMissData {
unsigned char TypeCheckKind;
};
+struct CFIBadTypeData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+ unsigned char TypeCheckKind;
+};
+
/// \brief Handle a runtime type check failure, caused by an incorrect vptr.
/// When this handler is called, all we know is that the type was not in the
/// cache; this does not necessarily imply the existence of a bug.
@@ -33,6 +39,13 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
+/// \brief Handle a control flow integrity check failure by printing a
+/// diagnostic.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_cfi_bad_type(CFIBadTypeData *Data, ValueHandle Vtable);
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *Data, ValueHandle Vtable);
+
}
#endif // UBSAN_HANDLERS_H
diff --git a/libsanitizer/ubsan/ubsan_init.cc b/libsanitizer/ubsan/ubsan_init.cc
new file mode 100644
index 00000000000..5da4122c07a
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_init.cc
@@ -0,0 +1,76 @@
+//===-- ubsan_init.cc -----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_diag.h"
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+using namespace __ubsan;
+
+static enum {
+ UBSAN_MODE_UNKNOWN = 0,
+ UBSAN_MODE_STANDALONE,
+ UBSAN_MODE_PLUGIN
+} ubsan_mode;
+static StaticSpinMutex ubsan_init_mu;
+
+static void CommonInit() {
+ InitializeSuppressions();
+}
+
+static void CommonStandaloneInit() {
+ SanitizerToolName = "UndefinedBehaviorSanitizer";
+ InitializeFlags();
+ CacheBinaryName();
+ __sanitizer_set_report_path(common_flags()->log_path);
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+ CommonInit();
+ ubsan_mode = UBSAN_MODE_STANDALONE;
+}
+
+void __ubsan::InitAsStandalone() {
+ if (SANITIZER_CAN_USE_PREINIT_ARRAY) {
+ CHECK_EQ(UBSAN_MODE_UNKNOWN, ubsan_mode);
+ CommonStandaloneInit();
+ return;
+ }
+ SpinMutexLock l(&ubsan_init_mu);
+ CHECK_NE(UBSAN_MODE_PLUGIN, ubsan_mode);
+ if (ubsan_mode == UBSAN_MODE_UNKNOWN)
+ CommonStandaloneInit();
+}
+
+void __ubsan::InitAsStandaloneIfNecessary() {
+ if (SANITIZER_CAN_USE_PREINIT_ARRAY) {
+ CHECK_NE(UBSAN_MODE_UNKNOWN, ubsan_mode);
+ return;
+ }
+ SpinMutexLock l(&ubsan_init_mu);
+ if (ubsan_mode == UBSAN_MODE_UNKNOWN)
+ CommonStandaloneInit();
+}
+
+void __ubsan::InitAsPlugin() {
+#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+ SpinMutexLock l(&ubsan_init_mu);
+#endif
+ CHECK_EQ(UBSAN_MODE_UNKNOWN, ubsan_mode);
+ CommonInit();
+ ubsan_mode = UBSAN_MODE_PLUGIN;
+}
+
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_init.h b/libsanitizer/ubsan/ubsan_init.h
new file mode 100644
index 00000000000..6a8366f82bf
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_init.h
@@ -0,0 +1,29 @@
+//===-- ubsan_init.h --------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization function for UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_INIT_H
+#define UBSAN_INIT_H
+
+namespace __ubsan {
+
+// Initialize UBSan as a standalone tool. Typically should be called early
+// during initialization.
+void InitAsStandalone();
+
+// Initialize UBSan as a standalone tool, if it hasn't been initialized before.
+void InitAsStandaloneIfNecessary();
+
+// Initializes UBSan as a plugin tool. This function should be called once
+// from "parent tool" (e.g. ASan) initialization.
+void InitAsPlugin();
+
+} // namespace __ubsan
+
+#endif // UBSAN_INIT_H
diff --git a/libsanitizer/ubsan/ubsan_init_standalone.cc b/libsanitizer/ubsan/ubsan_init_standalone.cc
new file mode 100644
index 00000000000..1630fd724fb
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_init_standalone.cc
@@ -0,0 +1,32 @@
+//===-- ubsan_init_standalone.cc ------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of standalone UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if !CAN_SANITIZE_UB
+# error "UBSan is not supported on this platform!"
+#endif
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "ubsan_init.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+__attribute__((section(".preinit_array"), used))
+void (*__local_ubsan_preinit)(void) = __ubsan::InitAsStandalone;
+#else
+// Use a dynamic initializer.
+class UbsanStandaloneInitializer {
+ public:
+ UbsanStandaloneInitializer() {
+ __ubsan::InitAsStandalone();
+ }
+};
+static UbsanStandaloneInitializer ubsan_standalone_initializer;
+#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
diff --git a/libsanitizer/ubsan/ubsan_platform.h b/libsanitizer/ubsan/ubsan_platform.h
new file mode 100644
index 00000000000..999bf815493
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_platform.h
@@ -0,0 +1,27 @@
+//===-- ubsan_platform.h ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the platforms which UBSan is supported at.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_PLATFORM_H
+#define UBSAN_PLATFORM_H
+
+#ifndef CAN_SANITIZE_UB
+// Other platforms should be easy to add, and probably work as-is.
+#if (defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) && \
+ (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__mips__) || defined(__powerpc64__))
+# define CAN_SANITIZE_UB 1
+#elif defined(_WIN32)
+# define CAN_SANITIZE_UB 1
+#else
+# define CAN_SANITIZE_UB 0
+#endif
+#endif //CAN_SANITIZE_UB
+
+#endif
diff --git a/libsanitizer/ubsan/ubsan_type_hash.cc b/libsanitizer/ubsan/ubsan_type_hash.cc
index d01009426db..65160aa4aba 100644
--- a/libsanitizer/ubsan/ubsan_type_hash.cc
+++ b/libsanitizer/ubsan/ubsan_type_hash.cc
@@ -9,240 +9,24 @@
// relationships. This file is only linked into C++ compilations, and is
// permitted to use language features which require a C++ ABI library.
//
+// Most of the implementation lives in an ABI-specific source file
+// (ubsan_type_hash_{itanium,win}.cc).
+//
//===----------------------------------------------------------------------===//
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
#include "ubsan_type_hash.h"
#include "sanitizer_common/sanitizer_common.h"
-// The following are intended to be binary compatible with the definitions
-// given in the Itanium ABI. We make no attempt to be ODR-compatible with
-// those definitions, since existing ABI implementations aren't.
-
-namespace std {
- class type_info {
- public:
- virtual ~type_info();
-
- const char *__type_name;
- };
-}
-
-namespace __cxxabiv1 {
-
-/// Type info for classes with no bases, and base class for type info for
-/// classes with bases.
-class __class_type_info : public std::type_info {
- virtual ~__class_type_info();
-};
-
-/// Type info for classes with simple single public inheritance.
-class __si_class_type_info : public __class_type_info {
-public:
- virtual ~__si_class_type_info();
-
- const __class_type_info *__base_type;
-};
-
-class __base_class_type_info {
-public:
- const __class_type_info *__base_type;
- long __offset_flags;
-
- enum __offset_flags_masks {
- __virtual_mask = 0x1,
- __public_mask = 0x2,
- __offset_shift = 8
- };
-};
-
-/// Type info for classes with multiple, virtual, or non-public inheritance.
-class __vmi_class_type_info : public __class_type_info {
-public:
- virtual ~__vmi_class_type_info();
-
- unsigned int flags;
- unsigned int base_count;
- __base_class_type_info base_info[1];
-};
-
-}
-
-namespace abi = __cxxabiv1;
-
-// We implement a simple two-level cache for type-checking results. For each
-// (vptr,type) pair, a hash is computed. This hash is assumed to be globally
-// unique; if it collides, we will get false negatives, but:
-// * such a collision would have to occur on the *first* bad access,
-// * the probability of such a collision is low (and for a 64-bit target, is
-// negligible), and
-// * the vptr, and thus the hash, can be affected by ASLR, so multiple runs
-// give better coverage.
-//
-// The first caching layer is a small hash table with no chaining; buckets are
-// reused as needed. The second caching layer is a large hash table with open
-// chaining. We can freely evict from either layer since this is just a cache.
-//
-// FIXME: Make these hash table accesses thread-safe. The races here are benign:
-// assuming the unsequenced loads and stores don't misbehave too badly,
-// the worst case is false negatives or poor cache behavior, not false
-// positives or crashes.
-
-/// Find a bucket to store the given hash value in.
-static __ubsan::HashValue *getTypeCacheHashTableBucket(__ubsan::HashValue V) {
- static const unsigned HashTableSize = 65537;
- static __ubsan::HashValue __ubsan_vptr_hash_set[HashTableSize];
-
- unsigned First = (V & 65535) ^ 1;
- unsigned Probe = First;
- for (int Tries = 5; Tries; --Tries) {
- if (!__ubsan_vptr_hash_set[Probe] || __ubsan_vptr_hash_set[Probe] == V)
- return &__ubsan_vptr_hash_set[Probe];
- Probe += ((V >> 16) & 65535) + 1;
- if (Probe >= HashTableSize)
- Probe -= HashTableSize;
- }
- // FIXME: Pick a random entry from the probe sequence to evict rather than
- // just taking the first.
- return &__ubsan_vptr_hash_set[First];
-}
-
/// A cache of recently-checked hashes. Mini hash table with "random" evictions.
__ubsan::HashValue
__ubsan::__ubsan_vptr_type_cache[__ubsan::VptrTypeCacheSize];
-/// \brief Determine whether \p Derived has a \p Base base class subobject at
-/// offset \p Offset.
-static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
- const abi::__class_type_info *Base,
- sptr Offset) {
- if (Derived->__type_name == Base->__type_name)
- return Offset == 0;
-
- if (const abi::__si_class_type_info *SI =
- dynamic_cast<const abi::__si_class_type_info*>(Derived))
- return isDerivedFromAtOffset(SI->__base_type, Base, Offset);
-
- const abi::__vmi_class_type_info *VTI =
- dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
- if (!VTI)
- // No base class subobjects.
- return false;
-
- // Look for a base class which is derived from \p Base at the right offset.
- for (unsigned int base = 0; base != VTI->base_count; ++base) {
- // FIXME: Curtail the recursion if this base can't possibly contain the
- // given offset.
- sptr OffsetHere = VTI->base_info[base].__offset_flags >>
- abi::__base_class_type_info::__offset_shift;
- if (VTI->base_info[base].__offset_flags &
- abi::__base_class_type_info::__virtual_mask)
- // For now, just punt on virtual bases and say 'yes'.
- // FIXME: OffsetHere is the offset in the vtable of the virtual base
- // offset. Read the vbase offset out of the vtable and use it.
- return true;
- if (isDerivedFromAtOffset(VTI->base_info[base].__base_type,
- Base, Offset - OffsetHere))
- return true;
- }
-
- return false;
-}
-
-/// \brief Find the derived-most dynamic base class of \p Derived at offset
-/// \p Offset.
-static const abi::__class_type_info *findBaseAtOffset(
- const abi::__class_type_info *Derived, sptr Offset) {
- if (!Offset)
- return Derived;
-
- if (const abi::__si_class_type_info *SI =
- dynamic_cast<const abi::__si_class_type_info*>(Derived))
- return findBaseAtOffset(SI->__base_type, Offset);
-
- const abi::__vmi_class_type_info *VTI =
- dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
- if (!VTI)
- // No base class subobjects.
- return 0;
-
- for (unsigned int base = 0; base != VTI->base_count; ++base) {
- sptr OffsetHere = VTI->base_info[base].__offset_flags >>
- abi::__base_class_type_info::__offset_shift;
- if (VTI->base_info[base].__offset_flags &
- abi::__base_class_type_info::__virtual_mask)
- // FIXME: Can't handle virtual bases yet.
- continue;
- if (const abi::__class_type_info *Base =
- findBaseAtOffset(VTI->base_info[base].__base_type,
- Offset - OffsetHere))
- return Base;
- }
-
- return 0;
-}
-
-namespace {
-
-struct VtablePrefix {
- /// The offset from the vptr to the start of the most-derived object.
- /// This should never be greater than zero, and will usually be exactly
- /// zero.
- sptr Offset;
- /// The type_info object describing the most-derived class type.
- std::type_info *TypeInfo;
-};
-VtablePrefix *getVtablePrefix(void *Object) {
- VtablePrefix **VptrPtr = reinterpret_cast<VtablePrefix**>(Object);
- if (!*VptrPtr)
- return 0;
- VtablePrefix *Prefix = *VptrPtr - 1;
- if (Prefix->Offset > 0 || !Prefix->TypeInfo)
- // This can't possibly be a valid vtable.
- return 0;
- return Prefix;
-}
-
+__ubsan::DynamicTypeInfo __ubsan::getDynamicTypeInfoFromObject(void *Object) {
+ void *VtablePtr = *reinterpret_cast<void **>(Object);
+ return getDynamicTypeInfoFromVtable(VtablePtr);
}
-bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) {
- // A crash anywhere within this function probably means the vptr is corrupted.
- // FIXME: Perform these checks more cautiously.
-
- // Check whether this is something we've evicted from the cache.
- HashValue *Bucket = getTypeCacheHashTableBucket(Hash);
- if (*Bucket == Hash) {
- __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash;
- return true;
- }
-
- VtablePrefix *Vtable = getVtablePrefix(Object);
- if (!Vtable)
- return false;
-
- // Check that this is actually a type_info object for a class type.
- abi::__class_type_info *Derived =
- dynamic_cast<abi::__class_type_info*>(Vtable->TypeInfo);
- if (!Derived)
- return false;
-
- abi::__class_type_info *Base = (abi::__class_type_info*)Type;
- if (!isDerivedFromAtOffset(Derived, Base, -Vtable->Offset))
- return false;
-
- // Success. Cache this result.
- __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash;
- *Bucket = Hash;
- return true;
-}
-
-__ubsan::DynamicTypeInfo __ubsan::getDynamicTypeInfo(void *Object) {
- VtablePrefix *Vtable = getVtablePrefix(Object);
- if (!Vtable)
- return DynamicTypeInfo(0, 0, 0);
- const abi::__class_type_info *ObjectType = findBaseAtOffset(
- static_cast<const abi::__class_type_info*>(Vtable->TypeInfo),
- -Vtable->Offset);
- return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset,
- ObjectType ? ObjectType->__type_name : "<unknown>");
-}
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_type_hash.h b/libsanitizer/ubsan/ubsan_type_hash.h
index 138559f204d..2da070edffa 100644
--- a/libsanitizer/ubsan/ubsan_type_hash.h
+++ b/libsanitizer/ubsan/ubsan_type_hash.h
@@ -39,7 +39,10 @@ public:
};
/// \brief Get information about the dynamic type of an object.
-DynamicTypeInfo getDynamicTypeInfo(void *Object);
+DynamicTypeInfo getDynamicTypeInfoFromObject(void *Object);
+
+/// \brief Get information about the dynamic type of an object from its vtable.
+DynamicTypeInfo getDynamicTypeInfoFromVtable(void *Vtable);
/// \brief Check whether the dynamic type of \p Object has a \p Type subobject
/// at offset 0.
diff --git a/libsanitizer/ubsan/ubsan_type_hash_itanium.cc b/libsanitizer/ubsan/ubsan_type_hash_itanium.cc
new file mode 100644
index 00000000000..e4f133434d6
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_type_hash_itanium.cc
@@ -0,0 +1,249 @@
+//===-- ubsan_type_hash_itanium.cc ----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of type hashing/lookup for Itanium C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB && !SANITIZER_WINDOWS
+#include "ubsan_type_hash.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+// The following are intended to be binary compatible with the definitions
+// given in the Itanium ABI. We make no attempt to be ODR-compatible with
+// those definitions, since existing ABI implementations aren't.
+
+namespace std {
+ class type_info {
+ public:
+ virtual ~type_info();
+
+ const char *__type_name;
+ };
+}
+
+namespace __cxxabiv1 {
+
+/// Type info for classes with no bases, and base class for type info for
+/// classes with bases.
+class __class_type_info : public std::type_info {
+ ~__class_type_info() override;
+};
+
+/// Type info for classes with simple single public inheritance.
+class __si_class_type_info : public __class_type_info {
+public:
+ ~__si_class_type_info() override;
+
+ const __class_type_info *__base_type;
+};
+
+class __base_class_type_info {
+public:
+ const __class_type_info *__base_type;
+ long __offset_flags;
+
+ enum __offset_flags_masks {
+ __virtual_mask = 0x1,
+ __public_mask = 0x2,
+ __offset_shift = 8
+ };
+};
+
+/// Type info for classes with multiple, virtual, or non-public inheritance.
+class __vmi_class_type_info : public __class_type_info {
+public:
+ ~__vmi_class_type_info() override;
+
+ unsigned int flags;
+ unsigned int base_count;
+ __base_class_type_info base_info[1];
+};
+
+}
+
+namespace abi = __cxxabiv1;
+
+// We implement a simple two-level cache for type-checking results. For each
+// (vptr,type) pair, a hash is computed. This hash is assumed to be globally
+// unique; if it collides, we will get false negatives, but:
+// * such a collision would have to occur on the *first* bad access,
+// * the probability of such a collision is low (and for a 64-bit target, is
+// negligible), and
+// * the vptr, and thus the hash, can be affected by ASLR, so multiple runs
+// give better coverage.
+//
+// The first caching layer is a small hash table with no chaining; buckets are
+// reused as needed. The second caching layer is a large hash table with open
+// chaining. We can freely evict from either layer since this is just a cache.
+//
+// FIXME: Make these hash table accesses thread-safe. The races here are benign:
+// assuming the unsequenced loads and stores don't misbehave too badly,
+// the worst case is false negatives or poor cache behavior, not false
+// positives or crashes.
+
+/// Find a bucket to store the given hash value in.
+static __ubsan::HashValue *getTypeCacheHashTableBucket(__ubsan::HashValue V) {
+ static const unsigned HashTableSize = 65537;
+ static __ubsan::HashValue __ubsan_vptr_hash_set[HashTableSize];
+
+ unsigned First = (V & 65535) ^ 1;
+ unsigned Probe = First;
+ for (int Tries = 5; Tries; --Tries) {
+ if (!__ubsan_vptr_hash_set[Probe] || __ubsan_vptr_hash_set[Probe] == V)
+ return &__ubsan_vptr_hash_set[Probe];
+ Probe += ((V >> 16) & 65535) + 1;
+ if (Probe >= HashTableSize)
+ Probe -= HashTableSize;
+ }
+ // FIXME: Pick a random entry from the probe sequence to evict rather than
+ // just taking the first.
+ return &__ubsan_vptr_hash_set[First];
+}
+
+/// \brief Determine whether \p Derived has a \p Base base class subobject at
+/// offset \p Offset.
+static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
+ const abi::__class_type_info *Base,
+ sptr Offset) {
+ if (Derived->__type_name == Base->__type_name)
+ return Offset == 0;
+
+ if (const abi::__si_class_type_info *SI =
+ dynamic_cast<const abi::__si_class_type_info*>(Derived))
+ return isDerivedFromAtOffset(SI->__base_type, Base, Offset);
+
+ const abi::__vmi_class_type_info *VTI =
+ dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
+ if (!VTI)
+ // No base class subobjects.
+ return false;
+
+ // Look for a base class which is derived from \p Base at the right offset.
+ for (unsigned int base = 0; base != VTI->base_count; ++base) {
+ // FIXME: Curtail the recursion if this base can't possibly contain the
+ // given offset.
+ sptr OffsetHere = VTI->base_info[base].__offset_flags >>
+ abi::__base_class_type_info::__offset_shift;
+ if (VTI->base_info[base].__offset_flags &
+ abi::__base_class_type_info::__virtual_mask)
+ // For now, just punt on virtual bases and say 'yes'.
+ // FIXME: OffsetHere is the offset in the vtable of the virtual base
+ // offset. Read the vbase offset out of the vtable and use it.
+ return true;
+ if (isDerivedFromAtOffset(VTI->base_info[base].__base_type,
+ Base, Offset - OffsetHere))
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Find the derived-most dynamic base class of \p Derived at offset
+/// \p Offset.
+static const abi::__class_type_info *findBaseAtOffset(
+ const abi::__class_type_info *Derived, sptr Offset) {
+ if (!Offset)
+ return Derived;
+
+ if (const abi::__si_class_type_info *SI =
+ dynamic_cast<const abi::__si_class_type_info*>(Derived))
+ return findBaseAtOffset(SI->__base_type, Offset);
+
+ const abi::__vmi_class_type_info *VTI =
+ dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
+ if (!VTI)
+ // No base class subobjects.
+ return 0;
+
+ for (unsigned int base = 0; base != VTI->base_count; ++base) {
+ sptr OffsetHere = VTI->base_info[base].__offset_flags >>
+ abi::__base_class_type_info::__offset_shift;
+ if (VTI->base_info[base].__offset_flags &
+ abi::__base_class_type_info::__virtual_mask)
+ // FIXME: Can't handle virtual bases yet.
+ continue;
+ if (const abi::__class_type_info *Base =
+ findBaseAtOffset(VTI->base_info[base].__base_type,
+ Offset - OffsetHere))
+ return Base;
+ }
+
+ return 0;
+}
+
+namespace {
+
+struct VtablePrefix {
+ /// The offset from the vptr to the start of the most-derived object.
+ /// This will only be greater than zero in some virtual base class vtables
+ /// used during object con-/destruction, and will usually be exactly zero.
+ sptr Offset;
+ /// The type_info object describing the most-derived class type.
+ std::type_info *TypeInfo;
+};
+VtablePrefix *getVtablePrefix(void *Vtable) {
+ VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable);
+ if (!Vptr)
+ return 0;
+ VtablePrefix *Prefix = Vptr - 1;
+ if (!Prefix->TypeInfo)
+ // This can't possibly be a valid vtable.
+ return 0;
+ return Prefix;
+}
+
+}
+
+bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) {
+ // A crash anywhere within this function probably means the vptr is corrupted.
+ // FIXME: Perform these checks more cautiously.
+
+ // Check whether this is something we've evicted from the cache.
+ HashValue *Bucket = getTypeCacheHashTableBucket(Hash);
+ if (*Bucket == Hash) {
+ __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash;
+ return true;
+ }
+
+ void *VtablePtr = *reinterpret_cast<void **>(Object);
+ VtablePrefix *Vtable = getVtablePrefix(VtablePtr);
+ if (!Vtable)
+ return false;
+
+ // Check that this is actually a type_info object for a class type.
+ abi::__class_type_info *Derived =
+ dynamic_cast<abi::__class_type_info*>(Vtable->TypeInfo);
+ if (!Derived)
+ return false;
+
+ abi::__class_type_info *Base = (abi::__class_type_info*)Type;
+ if (!isDerivedFromAtOffset(Derived, Base, -Vtable->Offset))
+ return false;
+
+ // Success. Cache this result.
+ __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash;
+ *Bucket = Hash;
+ return true;
+}
+
+__ubsan::DynamicTypeInfo
+__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) {
+ VtablePrefix *Vtable = getVtablePrefix(VtablePtr);
+ if (!Vtable)
+ return DynamicTypeInfo(0, 0, 0);
+ const abi::__class_type_info *ObjectType = findBaseAtOffset(
+ static_cast<const abi::__class_type_info*>(Vtable->TypeInfo),
+ -Vtable->Offset);
+ return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset,
+ ObjectType ? ObjectType->__type_name : "<unknown>");
+}
+
+#endif // CAN_SANITIZE_UB && !SANITIZER_WINDOWS
diff --git a/libsanitizer/ubsan/ubsan_type_hash_win.cc b/libsanitizer/ubsan/ubsan_type_hash_win.cc
new file mode 100644
index 00000000000..a2eb1a71cb9
--- /dev/null
+++ b/libsanitizer/ubsan/ubsan_type_hash_win.cc
@@ -0,0 +1,79 @@
+//===-- ubsan_type_hash_win.cc --------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of type hashing/lookup for Microsoft C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB && SANITIZER_WINDOWS
+#include "ubsan_type_hash.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+#include <typeinfo>
+
+struct CompleteObjectLocator {
+ int is_image_relative;
+ int offset_to_top;
+ int vfptr_offset;
+ int rtti_addr;
+ int chd_addr;
+ int obj_locator_addr;
+};
+
+struct CompleteObjectLocatorAbs {
+ int is_image_relative;
+ int offset_to_top;
+ int vfptr_offset;
+ std::type_info *rtti_addr;
+ void *chd_addr;
+ CompleteObjectLocator *obj_locator_addr;
+};
+
+bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) {
+ // FIXME: Implement.
+ return false;
+}
+
+__ubsan::DynamicTypeInfo
+__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) {
+ // The virtual table may not have a complete object locator if the object
+ // was compiled without RTTI (i.e. we might be reading from some other global
+ // laid out before the virtual table), so we need to carefully validate each
+ // pointer dereference and perform sanity checks.
+ CompleteObjectLocator **obj_locator_ptr =
+ ((CompleteObjectLocator**)VtablePtr)-1;
+ if (!IsAccessibleMemoryRange((uptr)obj_locator_ptr, sizeof(void*)))
+ return DynamicTypeInfo(0, 0, 0);
+
+ CompleteObjectLocator *obj_locator = *obj_locator_ptr;
+ if (!IsAccessibleMemoryRange((uptr)obj_locator,
+ sizeof(CompleteObjectLocator)))
+ return DynamicTypeInfo(0, 0, 0);
+
+ std::type_info *tinfo;
+ if (obj_locator->is_image_relative == 1) {
+ char *image_base = ((char *)obj_locator) - obj_locator->obj_locator_addr;
+ tinfo = (std::type_info *)(image_base + obj_locator->rtti_addr);
+ } else if (obj_locator->is_image_relative == 0)
+ tinfo = ((CompleteObjectLocatorAbs *)obj_locator)->rtti_addr;
+ else
+ // Probably not a complete object locator.
+ return DynamicTypeInfo(0, 0, 0);
+
+ if (!IsAccessibleMemoryRange((uptr)tinfo, sizeof(std::type_info)))
+ return DynamicTypeInfo(0, 0, 0);
+
+ // Okay, this is probably a std::type_info. Request its name.
+ // FIXME: Implement a base class search like we do for Itanium.
+ return DynamicTypeInfo(tinfo->name(), obj_locator->offset_to_top,
+ "<unknown>");
+}
+
+#endif // CAN_SANITIZE_UB && SANITIZER_WINDOWS
diff --git a/libsanitizer/ubsan/ubsan_value.cc b/libsanitizer/ubsan/ubsan_value.cc
index 141e8b53504..e327f6ffc2e 100644
--- a/libsanitizer/ubsan/ubsan_value.cc
+++ b/libsanitizer/ubsan/ubsan_value.cc
@@ -10,6 +10,8 @@
//
//===----------------------------------------------------------------------===//
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
#include "ubsan_value.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
@@ -79,7 +81,14 @@ FloatMax Value::getFloatValue() const {
#endif
case 32: {
float Value;
- internal_memcpy(&Value, &Val, 4);
+#if defined(__BIG_ENDIAN__)
+ // For big endian the float value is in the last 4 bytes.
+ // On some targets we may only have 4 bytes so we count backwards from
+ // the end of Val to account for both the 32-bit and 64-bit cases.
+ internal_memcpy(&Value, ((const char*)(&Val + 1)) - 4, 4);
+#else
+ internal_memcpy(&Value, &Val, 4);
+#endif
return Value;
}
case 64: {
@@ -92,8 +101,11 @@ FloatMax Value::getFloatValue() const {
switch (getType().getFloatBitWidth()) {
case 64: return *reinterpret_cast<double*>(Val);
case 80: return *reinterpret_cast<long double*>(Val);
+ case 96: return *reinterpret_cast<long double*>(Val);
case 128: return *reinterpret_cast<long double*>(Val);
}
}
UNREACHABLE("unexpected floating point bit width");
}
+
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_value.h b/libsanitizer/ubsan/ubsan_value.h
index 6ca0f56c99d..cca1ac85b77 100644
--- a/libsanitizer/ubsan/ubsan_value.h
+++ b/libsanitizer/ubsan/ubsan_value.h
@@ -12,12 +12,6 @@
#ifndef UBSAN_VALUE_H
#define UBSAN_VALUE_H
-// For now, only support linux and darwin. Other platforms should be easy to
-// add, and probably work as-is.
-#if !defined(__linux__) && !defined(__APPLE__)
-#error "UBSan not supported for this platform!"
-#endif
-
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -30,7 +24,6 @@ __extension__ typedef unsigned __int128 u128;
#define HAVE_INT128_T 0
#endif
-
namespace __ubsan {
/// \brief Largest integer types we support.
diff --git a/libstdc++-v3/include/bits/regex_compiler.h b/libstdc++-v3/include/bits/regex_compiler.h
index af76f55054a..cd95d6db167 100644
--- a/libstdc++-v3/include/bits/regex_compiler.h
+++ b/libstdc++-v3/include/bits/regex_compiler.h
@@ -409,8 +409,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private:
typedef typename is_same<_CharT, char>::type _IsChar;
struct _Dummy { };
+
typedef typename conditional<_IsChar::value,
- std::bitset<1 << (8 * sizeof(_CharT))>,
+ std::bitset<1 << (sizeof(_CharT) * __CHAR_BIT__ * int(_IsChar::value))>,
_Dummy>::type _CacheT;
typedef typename make_unsigned<_CharT>::type _UnsignedCharT;