summaryrefslogtreecommitdiffstats
path: root/libsanitizer
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2021-10-06 10:24:24 -0700
committerH.J. Lu <hjl.tools@gmail.com>2021-10-06 13:08:47 -0700
commit2e3d50c09519d1b4899845b21843bae66ecffc2f (patch)
tree1c5ac48bcb8a3893f929c68d5e470913d4d31f97 /libsanitizer
parentlibstdc++: Implement std::move_only_function for C++23 (P0288R9) (diff)
downloadgcc-2e3d50c09519d1b4899845b21843bae66ecffc2f.tar.gz
gcc-2e3d50c09519d1b4899845b21843bae66ecffc2f.tar.bz2
gcc-2e3d50c09519d1b4899845b21843bae66ecffc2f.tar.xz
libsanitizer: Merge with upstream
Merged revision: fdf4c035225de52f596899931b1f6100e5e3e928
Diffstat (limited to 'libsanitizer')
-rw-r--r--libsanitizer/MERGE2
-rw-r--r--libsanitizer/asan/asan_allocator.cpp15
-rw-r--r--libsanitizer/asan/asan_allocator.h2
-rw-r--r--libsanitizer/asan/asan_debugging.cpp5
-rw-r--r--libsanitizer/asan/asan_globals.cpp19
-rw-r--r--libsanitizer/asan/asan_interceptors.h7
-rw-r--r--libsanitizer/asan/asan_mapping.h2
-rw-r--r--libsanitizer/asan/asan_stats.cpp4
-rw-r--r--libsanitizer/hwasan/hwasan.cpp4
-rw-r--r--libsanitizer/hwasan/hwasan_setjmp_x86_64.S2
-rw-r--r--libsanitizer/lsan/lsan_common.cpp56
-rw-r--r--libsanitizer/lsan/lsan_common.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_asm.h4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.h9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S1
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S1
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp20
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_hash.h24
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cpp12
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.h20
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp21
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h16
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp5
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp49
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.h10
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp17
-rw-r--r--libsanitizer/tsan/tsan_interceptors.h6
-rw-r--r--libsanitizer/tsan/tsan_interceptors_posix.cpp13
-rw-r--r--libsanitizer/tsan/tsan_interface.cpp5
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cpp18
-rw-r--r--libsanitizer/tsan/tsan_platform_mac.cpp62
-rw-r--r--libsanitizer/tsan/tsan_rtl.cpp8
-rw-r--r--libsanitizer/tsan/tsan_rtl.h9
-rw-r--r--libsanitizer/tsan/tsan_rtl_amd64.S6
-rw-r--r--libsanitizer/tsan/tsan_rtl_ppc64.S1
-rw-r--r--libsanitizer/ubsan/ubsan_flags.cpp1
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.cpp15
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.h8
-rw-r--r--libsanitizer/ubsan/ubsan_platform.h2
44 files changed, 257 insertions, 257 deletions
diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE
index 2094a8beb3e..5ea083a693a 100644
--- a/libsanitizer/MERGE
+++ b/libsanitizer/MERGE
@@ -1,4 +1,4 @@
11c2e5fd66ea27d0c51360ba4e22099124a915562 1fdf4c035225de52f596899931b1f6100e5e3e928
2 2
3The first line of this file holds the git revision number of the 3The first line of this file holds the git revision number of the
4last merge done from the master library sources. 4last merge done from the master library sources.
diff --git a/libsanitizer/asan/asan_allocator.cpp b/libsanitizer/asan/asan_allocator.cpp
index 414fba3b427..268feac59dd 100644
--- a/libsanitizer/asan/asan_allocator.cpp
+++ b/libsanitizer/asan/asan_allocator.cpp
@@ -908,13 +908,6 @@ AllocType AsanChunkView::GetAllocType() const {
908 return (AllocType)chunk_->alloc_type; 908 return (AllocType)chunk_->alloc_type;
909} 909}
910 910
911static StackTrace GetStackTraceFromId(u32 id) {
912 CHECK(id);
913 StackTrace res = StackDepotGet(id);
914 CHECK(res.trace);
915 return res;
916}
917
918u32 AsanChunkView::GetAllocStackId() const { 911u32 AsanChunkView::GetAllocStackId() const {
919 u32 tid = 0; 912 u32 tid = 0;
920 u32 stack = 0; 913 u32 stack = 0;
@@ -931,14 +924,6 @@ u32 AsanChunkView::GetFreeStackId() const {
931 return stack; 924 return stack;
932} 925}
933 926
934StackTrace AsanChunkView::GetAllocStack() const {
935 return GetStackTraceFromId(GetAllocStackId());
936}
937
938StackTrace AsanChunkView::GetFreeStack() const {
939 return GetStackTraceFromId(GetFreeStackId());
940}
941
942void InitializeAllocator(const AllocatorOptions &options) { 927void InitializeAllocator(const AllocatorOptions &options) {
943 instance.InitLinkerInitialized(options); 928 instance.InitLinkerInitialized(options);
944} 929}
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 2963e979b55..27d826fb613 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -64,8 +64,6 @@ class AsanChunkView {
64 bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } 64 bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
65 u32 GetAllocStackId() const; 65 u32 GetAllocStackId() const;
66 u32 GetFreeStackId() const; 66 u32 GetFreeStackId() const;
67 StackTrace GetAllocStack() const;
68 StackTrace GetFreeStack() const;
69 AllocType GetAllocType() const; 67 AllocType GetAllocType() const;
70 bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const { 68 bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
71 if (addr >= Beg() && (addr + access_size) <= End()) { 69 if (addr >= Beg() && (addr + access_size) <= End()) {
diff --git a/libsanitizer/asan/asan_debugging.cpp b/libsanitizer/asan/asan_debugging.cpp
index c01360b52fc..0b4bf52f249 100644
--- a/libsanitizer/asan/asan_debugging.cpp
+++ b/libsanitizer/asan/asan_debugging.cpp
@@ -19,6 +19,7 @@
19#include "asan_mapping.h" 19#include "asan_mapping.h"
20#include "asan_report.h" 20#include "asan_report.h"
21#include "asan_thread.h" 21#include "asan_thread.h"
22#include "sanitizer_common/sanitizer_stackdepot.h"
22 23
23namespace { 24namespace {
24using namespace __asan; 25using namespace __asan;
@@ -54,11 +55,11 @@ uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
54 StackTrace stack(nullptr, 0); 55 StackTrace stack(nullptr, 0);
55 if (alloc_stack) { 56 if (alloc_stack) {
56 if (chunk.AllocTid() == kInvalidTid) return 0; 57 if (chunk.AllocTid() == kInvalidTid) return 0;
57 stack = chunk.GetAllocStack(); 58 stack = StackDepotGet(chunk.GetAllocStackId());
58 if (thread_id) *thread_id = chunk.AllocTid(); 59 if (thread_id) *thread_id = chunk.AllocTid();
59 } else { 60 } else {
60 if (chunk.FreeTid() == kInvalidTid) return 0; 61 if (chunk.FreeTid() == kInvalidTid) return 0;
61 stack = chunk.GetFreeStack(); 62 stack = StackDepotGet(chunk.GetFreeStackId());
62 if (thread_id) *thread_id = chunk.FreeTid(); 63 if (thread_id) *thread_id = chunk.FreeTid();
63 } 64 }
64 65
diff --git a/libsanitizer/asan/asan_globals.cpp b/libsanitizer/asan/asan_globals.cpp
index 763d3c6d2c0..9bf378f6207 100644
--- a/libsanitizer/asan/asan_globals.cpp
+++ b/libsanitizer/asan/asan_globals.cpp
@@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
154 } 154 }
155} 155}
156 156
157// Check ODR violation for given global G by checking if it's already poisoned.
158// We use this method in case compiler doesn't use private aliases for global
159// variables.
160static void CheckODRViolationViaPoisoning(const Global *g) {
161 if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
162 // This check may not be enough: if the first global is much larger
163 // the entire redzone of the second global may be within the first global.
164 for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
165 if (g->beg == l->g->beg &&
166 (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
167 !IsODRViolationSuppressed(g->name))
168 ReportODRViolation(g, FindRegistrationSite(g),
169 l->g, FindRegistrationSite(l->g));
170 }
171 }
172}
173
157// Clang provides two different ways for global variables protection: 174// Clang provides two different ways for global variables protection:
158// it can poison the global itself or its private alias. In former 175// it can poison the global itself or its private alias. In former
159// case we may poison same symbol multiple times, that can help us to 176// case we may poison same symbol multiple times, that can help us to
@@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
199 // where two globals with the same name are defined in different modules. 216 // where two globals with the same name are defined in different modules.
200 if (UseODRIndicator(g)) 217 if (UseODRIndicator(g))
201 CheckODRViolationViaIndicator(g); 218 CheckODRViolationViaIndicator(g);
219 else
220 CheckODRViolationViaPoisoning(g);
202 } 221 }
203 if (CanPoisonMemory()) 222 if (CanPoisonMemory())
204 PoisonRedZones(*g); 223 PoisonRedZones(*g);
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index 105c672cc24..047b044c8bf 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
81#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \ 81#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
82 !SANITIZER_NETBSD 82 !SANITIZER_NETBSD
83# define ASAN_INTERCEPT___CXA_THROW 1 83# define ASAN_INTERCEPT___CXA_THROW 1
84# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \ 84# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
85 || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
86# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
87# else
88# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
89# endif
90# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__)) 85# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
91# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1 86# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
92# else 87# else
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 4b0037fced3..e5a7f2007ae 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -165,7 +165,7 @@ static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
165static const u64 kRiscv64_ShadowOffset64 = 0xd55550000; 165static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
166static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; 166static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
167static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; 167static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
168static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; 168static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
169static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; 169static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
170static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000 170static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
171static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 171static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
diff --git a/libsanitizer/asan/asan_stats.cpp b/libsanitizer/asan/asan_stats.cpp
index 41827610833..9a715ea76fe 100644
--- a/libsanitizer/asan/asan_stats.cpp
+++ b/libsanitizer/asan/asan_stats.cpp
@@ -124,9 +124,9 @@ static void PrintAccumulatedStats() {
124 // Use lock to keep reports from mixing up. 124 // Use lock to keep reports from mixing up.
125 Lock lock(&print_lock); 125 Lock lock(&print_lock);
126 stats.Print(); 126 stats.Print();
127 StackDepotStats *stack_depot_stats = StackDepotGetStats(); 127 StackDepotStats stack_depot_stats = StackDepotGetStats();
128 Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", 128 Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
129 stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); 129 stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
130 PrintInternalAllocatorStats(); 130 PrintInternalAllocatorStats();
131} 131}
132 132
diff --git a/libsanitizer/hwasan/hwasan.cpp b/libsanitizer/hwasan/hwasan.cpp
index 46541902212..e8ffbbd6f48 100644
--- a/libsanitizer/hwasan/hwasan.cpp
+++ b/libsanitizer/hwasan/hwasan.cpp
@@ -141,7 +141,7 @@ static void CheckUnwind() {
141static void HwasanFormatMemoryUsage(InternalScopedString &s) { 141static void HwasanFormatMemoryUsage(InternalScopedString &s) {
142 HwasanThreadList &thread_list = hwasanThreadList(); 142 HwasanThreadList &thread_list = hwasanThreadList();
143 auto thread_stats = thread_list.GetThreadStats(); 143 auto thread_stats = thread_list.GetThreadStats();
144 auto *sds = StackDepotGetStats(); 144 auto sds = StackDepotGetStats();
145 AllocatorStatCounters asc; 145 AllocatorStatCounters asc;
146 GetAllocatorStats(asc); 146 GetAllocatorStats(asc);
147 s.append( 147 s.append(
@@ -151,7 +151,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
151 internal_getpid(), GetRSS(), thread_stats.n_live_threads, 151 internal_getpid(), GetRSS(), thread_stats.n_live_threads,
152 thread_stats.total_stack_size, 152 thread_stats.total_stack_size,
153 thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(), 153 thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
154 sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]); 154 sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]);
155} 155}
156 156
157#if SANITIZER_ANDROID 157#if SANITIZER_ANDROID
diff --git a/libsanitizer/hwasan/hwasan_setjmp_x86_64.S b/libsanitizer/hwasan/hwasan_setjmp_x86_64.S
index 84512d10b23..7566c1ea0a5 100644
--- a/libsanitizer/hwasan/hwasan_setjmp_x86_64.S
+++ b/libsanitizer/hwasan/hwasan_setjmp_x86_64.S
@@ -35,6 +35,7 @@
35ASM_TYPE_FUNCTION(__interceptor_setjmp) 35ASM_TYPE_FUNCTION(__interceptor_setjmp)
36__interceptor_setjmp: 36__interceptor_setjmp:
37 CFI_STARTPROC 37 CFI_STARTPROC
38 _CET_ENDBR
38 xorl %esi, %esi 39 xorl %esi, %esi
39 jmp __interceptor_sigsetjmp 40 jmp __interceptor_sigsetjmp
40 CFI_ENDPROC 41 CFI_ENDPROC
@@ -44,6 +45,7 @@ ASM_SIZE(__interceptor_setjmp)
44ASM_TYPE_FUNCTION(__interceptor_sigsetjmp) 45ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
45__interceptor_sigsetjmp: 46__interceptor_sigsetjmp:
46 CFI_STARTPROC 47 CFI_STARTPROC
48 _CET_ENDBR
47 49
48 // Save callee save registers. 50 // Save callee save registers.
49 mov %rbx, (0*8)(%rdi) 51 mov %rbx, (0*8)(%rdi)
diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp
index 96a487e037c..5f8fc5be417 100644
--- a/libsanitizer/lsan/lsan_common.cpp
+++ b/libsanitizer/lsan/lsan_common.cpp
@@ -79,7 +79,8 @@ class LeakSuppressionContext {
79 int suppression_types_num) 79 int suppression_types_num)
80 : context(supprression_types, suppression_types_num) {} 80 : context(supprression_types, suppression_types_num) {}
81 81
82 Suppression *GetSuppressionForStack(u32 stack_trace_id); 82 Suppression *GetSuppressionForStack(u32 stack_trace_id,
83 const StackTrace &stack);
83 84
84 const InternalMmapVector<u32> &GetSortedSuppressedStacks() { 85 const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
85 if (!suppressed_stacks_sorted) { 86 if (!suppressed_stacks_sorted) {
@@ -477,9 +478,7 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
477 } 478 }
478} 479}
479 480
480static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { 481static uptr GetCallerPC(const StackTrace &stack) {
481 CHECK(stack_id);
482 StackTrace stack = map->Get(stack_id);
483 // The top frame is our malloc/calloc/etc. The next frame is the caller. 482 // The top frame is our malloc/calloc/etc. The next frame is the caller.
484 if (stack.size >= 2) 483 if (stack.size >= 2)
485 return stack.trace[1]; 484 return stack.trace[1];
@@ -488,7 +487,7 @@ static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
488 487
489struct InvalidPCParam { 488struct InvalidPCParam {
490 Frontier *frontier; 489 Frontier *frontier;
491 StackDepotReverseMap *stack_depot_reverse_map; 490 const StackDepotReverseMap *stack_depot;
492 bool skip_linker_allocations; 491 bool skip_linker_allocations;
493}; 492};
494 493
@@ -503,7 +502,7 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
503 u32 stack_id = m.stack_trace_id(); 502 u32 stack_id = m.stack_trace_id();
504 uptr caller_pc = 0; 503 uptr caller_pc = 0;
505 if (stack_id > 0) 504 if (stack_id > 0)
506 caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); 505 caller_pc = GetCallerPC(param->stack_depot->Get(stack_id));
507 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark 506 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
508 // it as reachable, as we can't properly report its allocation stack anyway. 507 // it as reachable, as we can't properly report its allocation stack anyway.
509 if (caller_pc == 0 || (param->skip_linker_allocations && 508 if (caller_pc == 0 || (param->skip_linker_allocations &&
@@ -534,11 +533,11 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
534// which we don't care about). 533// which we don't care about).
535// On all other platforms, this simply checks to ensure that the caller pc is 534// On all other platforms, this simply checks to ensure that the caller pc is
536// valid before reporting chunks as leaked. 535// valid before reporting chunks as leaked.
537void ProcessPC(Frontier *frontier) { 536static void ProcessPC(Frontier *frontier,
538 StackDepotReverseMap stack_depot_reverse_map; 537 const StackDepotReverseMap &stack_depot) {
539 InvalidPCParam arg; 538 InvalidPCParam arg;
540 arg.frontier = frontier; 539 arg.frontier = frontier;
541 arg.stack_depot_reverse_map = &stack_depot_reverse_map; 540 arg.stack_depot = &stack_depot;
542 arg.skip_linker_allocations = 541 arg.skip_linker_allocations =
543 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; 542 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
544 ForEachChunk(MarkInvalidPCCb, &arg); 543 ForEachChunk(MarkInvalidPCCb, &arg);
@@ -546,6 +545,7 @@ void ProcessPC(Frontier *frontier) {
546 545
547// Sets the appropriate tag on each chunk. 546// Sets the appropriate tag on each chunk.
548static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, 547static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
548 const StackDepotReverseMap &stack_depot,
549 Frontier *frontier) { 549 Frontier *frontier) {
550 const InternalMmapVector<u32> &suppressed_stacks = 550 const InternalMmapVector<u32> &suppressed_stacks =
551 GetSuppressionContext()->GetSortedSuppressedStacks(); 551 GetSuppressionContext()->GetSortedSuppressedStacks();
@@ -560,7 +560,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
560 FloodFillTag(frontier, kReachable); 560 FloodFillTag(frontier, kReachable);
561 561
562 CHECK_EQ(0, frontier->size()); 562 CHECK_EQ(0, frontier->size());
563 ProcessPC(frontier); 563 ProcessPC(frontier, stack_depot);
564 564
565 // The check here is relatively expensive, so we do this in a separate flood 565 // The check here is relatively expensive, so we do this in a separate flood
566 // fill. That way we can skip the check for chunks that are reachable 566 // fill. That way we can skip the check for chunks that are reachable
@@ -584,11 +584,6 @@ static void ResetTagsCb(uptr chunk, void *arg) {
584 m.set_tag(kDirectlyLeaked); 584 m.set_tag(kDirectlyLeaked);
585} 585}
586 586
587static void PrintStackTraceById(u32 stack_trace_id) {
588 CHECK(stack_trace_id);
589 StackDepotGet(stack_trace_id).Print();
590}
591
592// ForEachChunk callback. Aggregates information about unreachable chunks into 587// ForEachChunk callback. Aggregates information about unreachable chunks into
593// a LeakReport. 588// a LeakReport.
594static void CollectLeaksCb(uptr chunk, void *arg) { 589static void CollectLeaksCb(uptr chunk, void *arg) {
@@ -598,16 +593,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
598 LsanMetadata m(chunk); 593 LsanMetadata m(chunk);
599 if (!m.allocated()) return; 594 if (!m.allocated()) return;
600 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 595 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
601 u32 resolution = flags()->resolution; 596 leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
602 u32 stack_trace_id = 0;
603 if (resolution > 0) {
604 StackTrace stack = StackDepotGet(m.stack_trace_id());
605 stack.size = Min(stack.size, resolution);
606 stack_trace_id = StackDepotPut(stack);
607 } else {
608 stack_trace_id = m.stack_trace_id();
609 }
610 leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
611 m.tag()); 597 m.tag());
612 } 598 }
613} 599}
@@ -668,7 +654,8 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
668 CHECK(param); 654 CHECK(param);
669 CHECK(!param->success); 655 CHECK(!param->success);
670 ReportUnsuspendedThreads(suspended_threads); 656 ReportUnsuspendedThreads(suspended_threads);
671 ClassifyAllChunks(suspended_threads, &param->frontier); 657 ClassifyAllChunks(suspended_threads, param->leak_report.stack_depot(),
658 &param->frontier);
672 ForEachChunk(CollectLeaksCb, &param->leak_report); 659 ForEachChunk(CollectLeaksCb, &param->leak_report);
673 // Clean up for subsequent leak checks. This assumes we did not overwrite any 660 // Clean up for subsequent leak checks. This assumes we did not overwrite any
674 // kIgnored tags. 661 // kIgnored tags.
@@ -780,9 +767,8 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
780} 767}
781 768
782Suppression *LeakSuppressionContext::GetSuppressionForStack( 769Suppression *LeakSuppressionContext::GetSuppressionForStack(
783 u32 stack_trace_id) { 770 u32 stack_trace_id, const StackTrace &stack) {
784 LazyInit(); 771 LazyInit();
785 StackTrace stack = StackDepotGet(stack_trace_id);
786 for (uptr i = 0; i < stack.size; i++) { 772 for (uptr i = 0; i < stack.size; i++) {
787 Suppression *s = GetSuppressionForAddr( 773 Suppression *s = GetSuppressionForAddr(
788 StackTrace::GetPreviousInstructionPc(stack.trace[i])); 774 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
@@ -807,6 +793,13 @@ const uptr kMaxLeaksConsidered = 5000;
807void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, 793void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
808 uptr leaked_size, ChunkTag tag) { 794 uptr leaked_size, ChunkTag tag) {
809 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 795 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
796
797 if (u32 resolution = flags()->resolution) {
798 StackTrace stack = stack_depot_.Get(stack_trace_id);
799 stack.size = Min(stack.size, resolution);
800 stack_trace_id = StackDepotPut(stack);
801 }
802
810 bool is_directly_leaked = (tag == kDirectlyLeaked); 803 bool is_directly_leaked = (tag == kDirectlyLeaked);
811 uptr i; 804 uptr i;
812 for (i = 0; i < leaks_.size(); i++) { 805 for (i = 0; i < leaks_.size(); i++) {
@@ -869,7 +862,8 @@ void LeakReport::PrintReportForLeak(uptr index) {
869 leaks_[index].total_size, leaks_[index].hit_count); 862 leaks_[index].total_size, leaks_[index].hit_count);
870 Printf("%s", d.Default()); 863 Printf("%s", d.Default());
871 864
872 PrintStackTraceById(leaks_[index].stack_trace_id); 865 CHECK(leaks_[index].stack_trace_id);
866 stack_depot_.Get(leaks_[index].stack_trace_id).Print();
873 867
874 if (flags()->report_objects) { 868 if (flags()->report_objects) {
875 Printf("Objects leaked above:\n"); 869 Printf("Objects leaked above:\n");
@@ -905,8 +899,8 @@ uptr LeakReport::ApplySuppressions() {
905 LeakSuppressionContext *suppressions = GetSuppressionContext(); 899 LeakSuppressionContext *suppressions = GetSuppressionContext();
906 uptr new_suppressions = false; 900 uptr new_suppressions = false;
907 for (uptr i = 0; i < leaks_.size(); i++) { 901 for (uptr i = 0; i < leaks_.size(); i++) {
908 Suppression *s = 902 Suppression *s = suppressions->GetSuppressionForStack(
909 suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id); 903 leaks_[i].stack_trace_id, stack_depot_.Get(leaks_[i].stack_trace_id));
910 if (s) { 904 if (s) {
911 s->weight += leaks_[i].total_size; 905 s->weight += leaks_[i].total_size;
912 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + 906 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index 776ca60b1e9..c15df1bfa71 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -18,6 +18,7 @@
18#include "sanitizer_common/sanitizer_common.h" 18#include "sanitizer_common/sanitizer_common.h"
19#include "sanitizer_common/sanitizer_internal_defs.h" 19#include "sanitizer_common/sanitizer_internal_defs.h"
20#include "sanitizer_common/sanitizer_platform.h" 20#include "sanitizer_common/sanitizer_platform.h"
21#include "sanitizer_common/sanitizer_stackdepot.h"
21#include "sanitizer_common/sanitizer_stoptheworld.h" 22#include "sanitizer_common/sanitizer_stoptheworld.h"
22#include "sanitizer_common/sanitizer_symbolizer.h" 23#include "sanitizer_common/sanitizer_symbolizer.h"
23 24
@@ -107,12 +108,14 @@ class LeakReport {
107 uptr ApplySuppressions(); 108 uptr ApplySuppressions();
108 uptr UnsuppressedLeakCount(); 109 uptr UnsuppressedLeakCount();
109 uptr IndirectUnsuppressedLeakCount(); 110 uptr IndirectUnsuppressedLeakCount();
111 const StackDepotReverseMap &stack_depot() { return stack_depot_; }
110 112
111 private: 113 private:
112 void PrintReportForLeak(uptr index); 114 void PrintReportForLeak(uptr index);
113 void PrintLeakedObjectsForLeak(uptr index); 115 void PrintLeakedObjectsForLeak(uptr index);
114 116
115 u32 next_id_ = 0; 117 u32 next_id_ = 0;
118 StackDepotReverseMap stack_depot_;
116 InternalMmapVector<Leak> leaks_; 119 InternalMmapVector<Leak> leaks_;
117 InternalMmapVector<LeakedObject> leaked_objects_; 120 InternalMmapVector<LeakedObject> leaked_objects_;
118}; 121};
diff --git a/libsanitizer/sanitizer_common/sanitizer_asm.h b/libsanitizer/sanitizer_common/sanitizer_asm.h
index b544542c26a..6b861203ac2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_asm.h
+++ b/libsanitizer/sanitizer_common/sanitizer_asm.h
@@ -66,3 +66,7 @@
66#else 66#else
67#define NO_EXEC_STACK_DIRECTIVE 67#define NO_EXEC_STACK_DIRECTIVE
68#endif 68#endif
69
70#if defined(__x86_64__) || defined(__i386__)
71#include <cet.h>
72#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp b/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp
index 250ac39e130..7fe9cd78d1d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.cpp
@@ -14,7 +14,7 @@
14namespace __sanitizer { 14namespace __sanitizer {
15 15
16bool ChainedOriginDepot::ChainedOriginDepotNode::eq( 16bool ChainedOriginDepot::ChainedOriginDepotNode::eq(
17 u32 hash, const args_type &args) const { 17 hash_type hash, const args_type &args) const {
18 return here_id == args.here_id && prev_id == args.prev_id; 18 return here_id == args.here_id && prev_id == args.prev_id;
19} 19}
20 20
@@ -36,7 +36,8 @@ uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
36 split, or one of two reserved values (-1) or (-2). Either case can 36 split, or one of two reserved values (-1) or (-2). Either case can
37 dominate depending on the workload. 37 dominate depending on the workload.
38*/ 38*/
39u32 ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) { 39ChainedOriginDepot::ChainedOriginDepotNode::hash_type
40ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
40 const u32 m = 0x5bd1e995; 41 const u32 m = 0x5bd1e995;
41 const u32 seed = 0x9747b28c; 42 const u32 seed = 0x9747b28c;
42 const u32 r = 24; 43 const u32 r = 24;
@@ -67,7 +68,7 @@ bool ChainedOriginDepot::ChainedOriginDepotNode::is_valid(
67} 68}
68 69
69void ChainedOriginDepot::ChainedOriginDepotNode::store(const args_type &args, 70void ChainedOriginDepot::ChainedOriginDepotNode::store(const args_type &args,
70 u32 other_hash) { 71 hash_type other_hash) {
71 here_id = args.here_id; 72 here_id = args.here_id;
72 prev_id = args.prev_id; 73 prev_id = args.prev_id;
73} 74}
@@ -85,7 +86,9 @@ ChainedOriginDepot::ChainedOriginDepotNode::get_handle() {
85 86
86ChainedOriginDepot::ChainedOriginDepot() {} 87ChainedOriginDepot::ChainedOriginDepot() {}
87 88
88StackDepotStats *ChainedOriginDepot::GetStats() { return depot.GetStats(); } 89StackDepotStats ChainedOriginDepot::GetStats() const {
90 return depot.GetStats();
91}
89 92
90bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) { 93bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
91 ChainedOriginDepotDesc desc = {here_id, prev_id}; 94 ChainedOriginDepotDesc desc = {here_id, prev_id};
diff --git a/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.h b/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.h
index 453cdf6b544..73a10e114f9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.h
+++ b/libsanitizer/sanitizer_common/sanitizer_chained_origin_depot.h
@@ -22,7 +22,7 @@ class ChainedOriginDepot {
22 ChainedOriginDepot(); 22 ChainedOriginDepot();
23 23
24 // Gets the statistic of the origin chain storage. 24 // Gets the statistic of the origin chain storage.
25 StackDepotStats *GetStats(); 25 StackDepotStats GetStats() const;
26 26
27 // Stores a chain with StackDepot ID here_id and previous chain ID prev_id. 27 // Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
28 // If successful, returns true and the new chain id new_id. 28 // If successful, returns true and the new chain id new_id.
@@ -43,6 +43,7 @@ class ChainedOriginDepot {
43 }; 43 };
44 44
45 struct ChainedOriginDepotNode { 45 struct ChainedOriginDepotNode {
46 using hash_type = u32;
46 ChainedOriginDepotNode *link; 47 ChainedOriginDepotNode *link;
47 u32 id; 48 u32 id;
48 u32 here_id; 49 u32 here_id;
@@ -50,15 +51,15 @@ class ChainedOriginDepot {
50 51
51 typedef ChainedOriginDepotDesc args_type; 52 typedef ChainedOriginDepotDesc args_type;
52 53
53 bool eq(u32 hash, const args_type &args) const; 54 bool eq(hash_type hash, const args_type &args) const;
54 55
55 static uptr storage_size(const args_type &args); 56 static uptr storage_size(const args_type &args);
56 57
57 static u32 hash(const args_type &args); 58 static hash_type hash(const args_type &args);
58 59
59 static bool is_valid(const args_type &args); 60 static bool is_valid(const args_type &args);
60 61
61 void store(const args_type &args, u32 other_hash); 62 void store(const args_type &args, hash_type other_hash);
62 63
63 args_type load() const; 64 args_type load() const;
64 65
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
index ed693819c6d..f60b05d157b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
@@ -6,6 +6,7 @@
6.globl ASM_WRAPPER_NAME(vfork) 6.globl ASM_WRAPPER_NAME(vfork)
7ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) 7ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
8ASM_WRAPPER_NAME(vfork): 8ASM_WRAPPER_NAME(vfork):
9 _CET_ENDBR
9 // Store return address in the spill area and tear down the stack frame. 10 // Store return address in the spill area and tear down the stack frame.
10 sub $12, %esp 11 sub $12, %esp
11 call COMMON_INTERCEPTOR_SPILL_AREA 12 call COMMON_INTERCEPTOR_SPILL_AREA
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
index 8147cdd0924..8fd18ea67ff 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
@@ -6,6 +6,7 @@
6.globl ASM_WRAPPER_NAME(vfork) 6.globl ASM_WRAPPER_NAME(vfork)
7ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) 7ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
8ASM_WRAPPER_NAME(vfork): 8ASM_WRAPPER_NAME(vfork):
9 _CET_ENDBR
9 // Store return address in the spill area and tear down the stack frame. 10 // Store return address in the spill area and tear down the stack frame.
10 push %rcx 11 push %rcx
11 call COMMON_INTERCEPTOR_SPILL_AREA 12 call COMMON_INTERCEPTOR_SPILL_AREA
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp
index 01ccacc6f32..bc4b477e350 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -26,9 +26,7 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
26 26
27#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO 27#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
28// Weak default implementation for when sanitizer_stackdepot is not linked in. 28// Weak default implementation for when sanitizer_stackdepot is not linked in.
29SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() { 29SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
30 return nullptr;
31}
32 30
33void *BackgroundThread(void *arg) { 31void *BackgroundThread(void *arg) {
34 const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; 32 const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
@@ -48,16 +46,12 @@ void *BackgroundThread(void *arg) {
48 prev_reported_rss = current_rss_mb; 46 prev_reported_rss = current_rss_mb;
49 } 47 }
50 // If stack depot has grown 10% since last time, print it too. 48 // If stack depot has grown 10% since last time, print it too.
51 StackDepotStats *stack_depot_stats = StackDepotGetStats(); 49 StackDepotStats stack_depot_stats = StackDepotGetStats();
52 if (stack_depot_stats) { 50 if (prev_reported_stack_depot_size * 11 / 10 <
53 if (prev_reported_stack_depot_size * 11 / 10 < 51 stack_depot_stats.allocated) {
54 stack_depot_stats->allocated) { 52 Printf("%s: StackDepot: %zd ids; %zdM allocated\n", SanitizerToolName,
55 Printf("%s: StackDepot: %zd ids; %zdM allocated\n", 53 stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
56 SanitizerToolName, 54 prev_reported_stack_depot_size = stack_depot_stats.allocated;
57 stack_depot_stats->n_uniq_ids,
58 stack_depot_stats->allocated >> 20);
59 prev_reported_stack_depot_size = stack_depot_stats->allocated;
60 }
61 } 55 }
62 } 56 }
63 // Check RSS against the limit. 57 // Check RSS against the limit.
diff --git a/libsanitizer/sanitizer_common/sanitizer_hash.h b/libsanitizer/sanitizer_common/sanitizer_hash.h
index 3d97dcc5d28..f7cf9f234e6 100644
--- a/libsanitizer/sanitizer_common/sanitizer_hash.h
+++ b/libsanitizer/sanitizer_common/sanitizer_hash.h
@@ -38,6 +38,30 @@ class MurMur2HashBuilder {
38 return x; 38 return x;
39 } 39 }
40}; 40};
41
42class MurMur2Hash64Builder {
43 static const u64 m = 0xc6a4a7935bd1e995ull;
44 static const u64 seed = 0x9747b28c9747b28cull;
45 static const u64 r = 47;
46 u64 h;
47
48 public:
49 explicit MurMur2Hash64Builder(u64 init = 0) { h = seed ^ (init * m); }
50 void add(u64 k) {
51 k *= m;
52 k ^= k >> r;
53 k *= m;
54 h ^= k;
55 h *= m;
56 }
57 u64 get() {
58 u64 x = h;
59 x ^= x >> r;
60 x *= m;
61 x ^= x >> r;
62 return x;
63 }
64};
41} //namespace __sanitizer 65} //namespace __sanitizer
42 66
43#endif // SANITIZER_HASH_H 67#endif // SANITIZER_HASH_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
index fc5619e4b37..7ce9e25da34 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -759,13 +759,9 @@ u32 GetNumberOfCPUs() {
759#elif SANITIZER_SOLARIS 759#elif SANITIZER_SOLARIS
760 return sysconf(_SC_NPROCESSORS_ONLN); 760 return sysconf(_SC_NPROCESSORS_ONLN);
761#else 761#else
762#if defined(CPU_COUNT)
763 cpu_set_t CPUs; 762 cpu_set_t CPUs;
764 CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); 763 CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
765 return CPU_COUNT(&CPUs); 764 return CPU_COUNT(&CPUs);
766#else
767 return 1;
768#endif
769#endif 765#endif
770} 766}
771 767
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cpp b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
index fa077a129c2..b8839f197d2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cpp
@@ -37,7 +37,7 @@
37extern char **environ; 37extern char **environ;
38#endif 38#endif
39 39
40#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__) 40#if defined(__has_include) && __has_include(<os/trace.h>)
41#define SANITIZER_OS_TRACE 1 41#define SANITIZER_OS_TRACE 1
42#include <os/trace.h> 42#include <os/trace.h>
43#else 43#else
@@ -70,15 +70,7 @@ extern "C" {
70#include <mach/mach_time.h> 70#include <mach/mach_time.h>
71#include <mach/vm_statistics.h> 71#include <mach/vm_statistics.h>
72#include <malloc/malloc.h> 72#include <malloc/malloc.h>
73#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format) 73#include <os/log.h>
74# include <os/log.h>
75#else
76 /* Without support for __builtin_os_log_format, fall back to the older
77 method. */
78# define OS_LOG_DEFAULT 0
79# define os_log_error(A,B,C) \
80 asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
81#endif
82#include <pthread.h> 74#include <pthread.h>
83#include <sched.h> 75#include <sched.h>
84#include <signal.h> 76#include <signal.h>
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
index 96a5986a47a..0b6af5a3c0e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -14,26 +14,6 @@
14 14
15#include "sanitizer_common.h" 15#include "sanitizer_common.h"
16#include "sanitizer_platform.h" 16#include "sanitizer_platform.h"
17
18/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
19 TARGET_OS_MAC (we have no support for iOS in any form for these versions,
20 so there's no ambiguity). */
21#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
22# define TARGET_OS_OSX 1
23#endif
24
25/* Other TARGET_OS_xxx are not present on earlier versions, define them to
26 0 (we have no support for them; they are not valid targets anyway). */
27#ifndef TARGET_OS_IOS
28#define TARGET_OS_IOS 0
29#endif
30#ifndef TARGET_OS_TV
31#define TARGET_OS_TV 0
32#endif
33#ifndef TARGET_OS_WATCH
34#define TARGET_OS_WATCH 0
35#endif
36
37#if SANITIZER_MAC 17#if SANITIZER_MAC
38#include "sanitizer_posix.h" 18#include "sanitizer_posix.h"
39 19
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
index 02c51d9fb0d..4e6efcad44d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -548,10 +548,10 @@
548#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD 548#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
549#define SANITIZER_INTERCEPT_MD4 SI_NETBSD 549#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
550#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD 550#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
551#define SANITIZER_INTERCEPT_MD5 SI_NETBSD 551#define SANITIZER_INTERCEPT_MD5 (SI_NETBSD || SI_FREEBSD)
552#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD) 552#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
553#define SANITIZER_INTERCEPT_MD2 SI_NETBSD 553#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
554#define SANITIZER_INTERCEPT_SHA2 SI_NETBSD 554#define SANITIZER_INTERCEPT_SHA2 (SI_NETBSD || SI_FREEBSD)
555#define SANITIZER_INTERCEPT_CDB SI_NETBSD 555#define SANITIZER_INTERCEPT_CDB SI_NETBSD
556#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD) 556#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
557#define SANITIZER_INTERCEPT_POPEN SI_POSIX 557#define SANITIZER_INTERCEPT_POPEN SI_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
index bfe3eea464d..64535805e40 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -69,6 +69,11 @@
69#include <semaphore.h> 69#include <semaphore.h>
70#include <signal.h> 70#include <signal.h>
71#include <stddef.h> 71#include <stddef.h>
72#include <md5.h>
73#include <sha224.h>
74#include <sha256.h>
75#include <sha384.h>
76#include <sha512.h>
72#include <stdio.h> 77#include <stdio.h>
73#include <stringlist.h> 78#include <stringlist.h>
74#include <term.h> 79#include <term.h>
@@ -361,6 +366,22 @@ const int si_SEGV_MAPERR = SEGV_MAPERR;
361const int si_SEGV_ACCERR = SEGV_ACCERR; 366const int si_SEGV_ACCERR = SEGV_ACCERR;
362const int unvis_valid = UNVIS_VALID; 367const int unvis_valid = UNVIS_VALID;
363const int unvis_validpush = UNVIS_VALIDPUSH; 368const int unvis_validpush = UNVIS_VALIDPUSH;
369
370const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
371const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
372
373#define SHA2_CONST(LEN) \
374 const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
375 const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
376 const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
377 const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
378
379SHA2_CONST(224);
380SHA2_CONST(256);
381SHA2_CONST(384);
382SHA2_CONST(512);
383
384#undef SHA2_CONST
364} // namespace __sanitizer 385} // namespace __sanitizer
365 386
366using namespace __sanitizer; 387using namespace __sanitizer;
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h
index 89022ca6422..649e64fd1a3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_freebsd.h
@@ -647,6 +647,22 @@ extern unsigned IOCTL_KDSKBMODE;
647extern const int si_SEGV_MAPERR; 647extern const int si_SEGV_MAPERR;
648extern const int si_SEGV_ACCERR; 648extern const int si_SEGV_ACCERR;
649 649
650extern const unsigned MD5_CTX_sz;
651extern const unsigned MD5_return_length;
652
653#define SHA2_EXTERN(LEN) \
654 extern const unsigned SHA##LEN##_CTX_sz; \
655 extern const unsigned SHA##LEN##_return_length; \
656 extern const unsigned SHA##LEN##_block_length; \
657 extern const unsigned SHA##LEN##_digest_length
658
659SHA2_EXTERN(224);
660SHA2_EXTERN(256);
661SHA2_EXTERN(384);
662SHA2_EXTERN(512);
663
664#undef SHA2_EXTERN
665
650struct __sanitizer_cap_rights { 666struct __sanitizer_cap_rights {
651 u64 cr_rights[2]; 667 u64 cr_rights[2];
652}; 668};
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
index 2b1a2f7932c..9d577570ea1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -26,10 +26,7 @@
26 26
27// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that 27// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
28// are not defined anywhere in userspace headers. Fake them. This seems to work 28// are not defined anywhere in userspace headers. Fake them. This seems to work
29// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat 29// fine with newer headers, too.
30// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
31// Also, for some platforms (e.g. mips) there are additional members in the
32// <sys/stat.h> struct stat:s.
33#include <linux/posix_types.h> 30#include <linux/posix_types.h>
34# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__) 31# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
35# include <sys/stat.h> 32# include <sys/stat.h>
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index da53b5abef2..d69b344dd61 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
83#elif defined(__mips__) 83#elif defined(__mips__)
84const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID 84const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
85 ? FIRST_32_SECOND_64(104, 128) 85 ? FIRST_32_SECOND_64(104, 128)
86 : FIRST_32_SECOND_64(144, 216); 86 : FIRST_32_SECOND_64(160, 216);
87const unsigned struct_kernel_stat64_sz = 104; 87const unsigned struct_kernel_stat64_sz = 104;
88#elif defined(__s390__) && !defined(__s390x__) 88#elif defined(__s390__) && !defined(__s390x__)
89const unsigned struct_kernel_stat_sz = 64; 89const unsigned struct_kernel_stat_sz = 64;
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp
index 44a95214e38..fc2ea2fc768 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp
@@ -19,26 +19,24 @@
19namespace __sanitizer { 19namespace __sanitizer {
20 20
21struct StackDepotNode { 21struct StackDepotNode {
22 using hash_type = u32;
22 StackDepotNode *link; 23 StackDepotNode *link;
23 u32 id; 24 u32 id;
24 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20; 25 hash_type stack_hash;
25 u32 size; 26 u32 size;
26 u32 tag; 27 atomic_uint32_t tag_and_use_count; // tag : 12 high bits; use_count : 20;
27 uptr stack[1]; // [size] 28 uptr stack[1]; // [size]
28 29
29 static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; 30 static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
30 // Lower kTabSizeLog bits are equal for all items in one bucket. 31 static const u32 kUseCountBits = 20;
31 // We use these bits to store the per-stack use counter.
32 static const u32 kUseCountBits = kTabSizeLog;
33 static const u32 kMaxUseCount = 1 << kUseCountBits; 32 static const u32 kMaxUseCount = 1 << kUseCountBits;
34 static const u32 kUseCountMask = (1 << kUseCountBits) - 1; 33 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35 static const u32 kHashMask = ~kUseCountMask;
36 34
37 typedef StackTrace args_type; 35 typedef StackTrace args_type;
38 bool eq(u32 hash, const args_type &args) const { 36 bool eq(hash_type hash, const args_type &args) const {
39 u32 hash_bits = 37 u32 tag =
40 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask; 38 atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
41 if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag) 39 if (stack_hash != hash || args.size != size || args.tag != tag)
42 return false; 40 return false;
43 uptr i = 0; 41 uptr i = 0;
44 for (; i < size; i++) { 42 for (; i < size; i++) {
@@ -49,7 +47,7 @@ struct StackDepotNode {
49 static uptr storage_size(const args_type &args) { 47 static uptr storage_size(const args_type &args) {
50 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr); 48 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
51 } 49 }
52 static u32 hash(const args_type &args) { 50 static hash_type hash(const args_type &args) {
53 MurMur2HashBuilder H(args.size * sizeof(uptr)); 51 MurMur2HashBuilder H(args.size * sizeof(uptr));
54 for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]); 52 for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
55 return H.get(); 53 return H.get();
@@ -57,13 +55,17 @@ struct StackDepotNode {
57 static bool is_valid(const args_type &args) { 55 static bool is_valid(const args_type &args) {
58 return args.size > 0 && args.trace; 56 return args.size > 0 && args.trace;
59 } 57 }
60 void store(const args_type &args, u32 hash) { 58 void store(const args_type &args, hash_type hash) {
61 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed); 59 CHECK_EQ(args.tag & (~kUseCountMask >> kUseCountBits), args.tag);
60 atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
61 memory_order_relaxed);
62 stack_hash = hash;
62 size = args.size; 63 size = args.size;
63 tag = args.tag;
64 internal_memcpy(stack, args.trace, size * sizeof(uptr)); 64 internal_memcpy(stack, args.trace, size * sizeof(uptr));
65 } 65 }
66 args_type load() const { 66 args_type load() const {
67 u32 tag =
68 atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
67 return args_type(&stack[0], size, tag); 69 return args_type(&stack[0], size, tag);
68 } 70 }
69 StackDepotHandle get_handle() { return StackDepotHandle(this); } 71 StackDepotHandle get_handle() { return StackDepotHandle(this); }
@@ -71,16 +73,16 @@ struct StackDepotNode {
71 typedef StackDepotHandle handle_type; 73 typedef StackDepotHandle handle_type;
72}; 74};
73 75
74COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount); 76COMPILER_CHECK(StackDepotNode::kMaxUseCount >= (u32)kStackDepotMaxUseCount);
75 77
76u32 StackDepotHandle::id() { return node_->id; } 78u32 StackDepotHandle::id() { return node_->id; }
77int StackDepotHandle::use_count() { 79int StackDepotHandle::use_count() {
78 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) & 80 return atomic_load(&node_->tag_and_use_count, memory_order_relaxed) &
79 StackDepotNode::kUseCountMask; 81 StackDepotNode::kUseCountMask;
80} 82}
81void StackDepotHandle::inc_use_count_unsafe() { 83void StackDepotHandle::inc_use_count_unsafe() {
82 u32 prev = 84 u32 prev =
83 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) & 85 atomic_fetch_add(&node_->tag_and_use_count, 1, memory_order_relaxed) &
84 StackDepotNode::kUseCountMask; 86 StackDepotNode::kUseCountMask;
85 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount); 87 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
86} 88}
@@ -90,9 +92,7 @@ typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
90 StackDepot; 92 StackDepot;
91static StackDepot theDepot; 93static StackDepot theDepot;
92 94
93StackDepotStats *StackDepotGetStats() { 95StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
94 return theDepot.GetStats();
95}
96 96
97u32 StackDepotPut(StackTrace stack) { 97u32 StackDepotPut(StackTrace stack) {
98 StackDepotHandle h = theDepot.Put(stack); 98 StackDepotHandle h = theDepot.Put(stack);
@@ -127,8 +127,10 @@ bool StackDepotReverseMap::IdDescPair::IdComparator(
127 return a.id < b.id; 127 return a.id < b.id;
128} 128}
129 129
130StackDepotReverseMap::StackDepotReverseMap() { 130void StackDepotReverseMap::Init() const {
131 map_.reserve(StackDepotGetStats()->n_uniq_ids + 100); 131 if (LIKELY(map_.capacity()))
132 return;
133 map_.reserve(StackDepotGetStats().n_uniq_ids + 100);
132 for (int idx = 0; idx < StackDepot::kTabSize; idx++) { 134 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
133 atomic_uintptr_t *p = &theDepot.tab[idx]; 135 atomic_uintptr_t *p = &theDepot.tab[idx];
134 uptr v = atomic_load(p, memory_order_consume); 136 uptr v = atomic_load(p, memory_order_consume);
@@ -141,7 +143,8 @@ StackDepotReverseMap::StackDepotReverseMap() {
141 Sort(map_.data(), map_.size(), &IdDescPair::IdComparator); 143 Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
142} 144}
143 145
144StackTrace StackDepotReverseMap::Get(u32 id) { 146StackTrace StackDepotReverseMap::Get(u32 id) const {
147 Init();
145 if (!map_.size()) 148 if (!map_.size())
146 return StackTrace(); 149 return StackTrace();
147 IdDescPair pair = {id, nullptr}; 150 IdDescPair pair = {id, nullptr};
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
index 0e26c1fc37c..6f79fffeea8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
@@ -33,7 +33,7 @@ struct StackDepotHandle {
33 33
34const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20); 34const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20);
35 35
36StackDepotStats *StackDepotGetStats(); 36StackDepotStats StackDepotGetStats();
37u32 StackDepotPut(StackTrace stack); 37u32 StackDepotPut(StackTrace stack);
38StackDepotHandle StackDepotPut_WithHandle(StackTrace stack); 38StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
39// Retrieves a stored stack trace by the id. 39// Retrieves a stored stack trace by the id.
@@ -49,8 +49,8 @@ void StackDepotPrintAll();
49// which were stored before it was instantiated. 49// which were stored before it was instantiated.
50class StackDepotReverseMap { 50class StackDepotReverseMap {
51 public: 51 public:
52 StackDepotReverseMap(); 52 StackDepotReverseMap() = default;
53 StackTrace Get(u32 id); 53 StackTrace Get(u32 id) const;
54 54
55 private: 55 private:
56 struct IdDescPair { 56 struct IdDescPair {
@@ -60,7 +60,9 @@ class StackDepotReverseMap {
60 static bool IdComparator(const IdDescPair &a, const IdDescPair &b); 60 static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
61 }; 61 };
62 62
63 InternalMmapVector<IdDescPair> map_; 63 void Init() const;
64
65 mutable InternalMmapVector<IdDescPair> map_;
64 66
65 // Disallow evil constructors. 67 // Disallow evil constructors.
66 StackDepotReverseMap(const StackDepotReverseMap&); 68 StackDepotReverseMap(const StackDepotReverseMap&);
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
index 1af2c1892ef..435f634cd11 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h
@@ -27,19 +27,20 @@ class StackDepotBase {
27 public: 27 public:
28 typedef typename Node::args_type args_type; 28 typedef typename Node::args_type args_type;
29 typedef typename Node::handle_type handle_type; 29 typedef typename Node::handle_type handle_type;
30 typedef typename Node::hash_type hash_type;
30 // Maps stack trace to an unique id. 31 // Maps stack trace to an unique id.
31 handle_type Put(args_type args, bool *inserted = nullptr); 32 handle_type Put(args_type args, bool *inserted = nullptr);
32 // Retrieves a stored stack trace by the id. 33 // Retrieves a stored stack trace by the id.
33 args_type Get(u32 id); 34 args_type Get(u32 id);
34 35
35 StackDepotStats *GetStats() { return &stats; } 36 StackDepotStats GetStats() const { return stats; }
36 37
37 void LockAll(); 38 void LockAll();
38 void UnlockAll(); 39 void UnlockAll();
39 void PrintAll(); 40 void PrintAll();
40 41
41 private: 42 private:
42 static Node *find(Node *s, args_type args, u32 hash); 43 static Node *find(Node *s, args_type args, hash_type hash);
43 static Node *lock(atomic_uintptr_t *p); 44 static Node *lock(atomic_uintptr_t *p);
44 static void unlock(atomic_uintptr_t *p, Node *s); 45 static void unlock(atomic_uintptr_t *p, Node *s);
45 46
@@ -62,7 +63,7 @@ class StackDepotBase {
62template <class Node, int kReservedBits, int kTabSizeLog> 63template <class Node, int kReservedBits, int kTabSizeLog>
63Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s, 64Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
64 args_type args, 65 args_type args,
65 u32 hash) { 66 hash_type hash) {
66 // Searches linked list s for the stack, returns its id. 67 // Searches linked list s for the stack, returns its id.
67 for (; s; s = s->link) { 68 for (; s; s = s->link) {
68 if (s->eq(hash, args)) { 69 if (s->eq(hash, args)) {
@@ -101,7 +102,7 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
101 bool *inserted) { 102 bool *inserted) {
102 if (inserted) *inserted = false; 103 if (inserted) *inserted = false;
103 if (!Node::is_valid(args)) return handle_type(); 104 if (!Node::is_valid(args)) return handle_type();
104 uptr h = Node::hash(args); 105 hash_type h = Node::hash(args);
105 atomic_uintptr_t *p = &tab[h % kTabSize]; 106 atomic_uintptr_t *p = &tab[h % kTabSize];
106 uptr v = atomic_load(p, memory_order_consume); 107 uptr v = atomic_load(p, memory_order_consume);
107 Node *s = (Node *)(v & ~1); 108 Node *s = (Node *)(v & ~1);
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
index 15ea4954aa4..4707c6c5d00 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cpp
@@ -86,8 +86,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
86 // Nope, this does not look right either. This means the frame after next does 86 // Nope, this does not look right either. This means the frame after next does
87 // not have a valid frame pointer, but we can still extract the caller PC. 87 // not have a valid frame pointer, but we can still extract the caller PC.
88 // Unfortunately, there is no way to decide between GCC and LLVM frame 88 // Unfortunately, there is no way to decide between GCC and LLVM frame
89 // layouts. Assume GCC. 89 // layouts. Assume LLVM.
90 return bp_prev - 1; 90 return bp_prev;
91#else 91#else
92 return (uhwptr*)bp; 92 return (uhwptr*)bp;
93#endif 93#endif
@@ -110,21 +110,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
110 IsAligned((uptr)frame, sizeof(*frame)) && 110 IsAligned((uptr)frame, sizeof(*frame)) &&
111 size < max_depth) { 111 size < max_depth) {
112#ifdef __powerpc__ 112#ifdef __powerpc__
113 // PowerPC ABIs specify that the return address is saved on the 113 // PowerPC ABIs specify that the return address is saved at offset
114 // *caller's* stack frame. Thus we must dereference the back chain 114 // 16 of the *caller's* stack frame. Thus we must dereference the
115 // to find the caller frame before extracting it. 115 // back chain to find the caller frame before extracting it.
116 uhwptr *caller_frame = (uhwptr*)frame[0]; 116 uhwptr *caller_frame = (uhwptr*)frame[0];
117 if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) || 117 if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
118 !IsAligned((uptr)caller_frame, sizeof(uhwptr))) 118 !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
119 break; 119 break;
120 // For most ABIs the offset where the return address is saved is two
121 // register sizes. The exception is the SVR4 ABI, which uses an
122 // offset of only one register size.
123#ifdef _CALL_SYSV
124 uhwptr pc1 = caller_frame[1];
125#else
126 uhwptr pc1 = caller_frame[2]; 120 uhwptr pc1 = caller_frame[2];
127#endif
128#elif defined(__s390__) 121#elif defined(__s390__)
129 uhwptr pc1 = frame[14]; 122 uhwptr pc1 = frame[14];
130#elif defined(__riscv) 123#elif defined(__riscv)
diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h
index a855d1d8dea..89b2f990d03 100644
--- a/libsanitizer/tsan/tsan_interceptors.h
+++ b/libsanitizer/tsan/tsan_interceptors.h
@@ -32,16 +32,14 @@ LibIgnore *libignore();
32 32
33#if !SANITIZER_GO 33#if !SANITIZER_GO
34inline bool in_symbolizer() { 34inline bool in_symbolizer() {
35 cur_thread_init(); 35 return UNLIKELY(cur_thread_init()->in_symbolizer);
36 return UNLIKELY(cur_thread()->in_symbolizer);
37} 36}
38#endif 37#endif
39 38
40} // namespace __tsan 39} // namespace __tsan
41 40
42#define SCOPED_INTERCEPTOR_RAW(func, ...) \ 41#define SCOPED_INTERCEPTOR_RAW(func, ...) \
43 cur_thread_init(); \ 42 ThreadState *thr = cur_thread_init(); \
44 ThreadState *thr = cur_thread(); \
45 const uptr caller_pc = GET_CALLER_PC(); \ 43 const uptr caller_pc = GET_CALLER_PC(); \
46 ScopedInterceptor si(thr, #func, caller_pc); \ 44 ScopedInterceptor si(thr, #func, caller_pc); \
47 const uptr pc = GET_CURRENT_PC(); \ 45 const uptr pc = GET_CURRENT_PC(); \
diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp
index d3e4c8f0371..617eda65031 100644
--- a/libsanitizer/tsan/tsan_interceptors_posix.cpp
+++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp
@@ -153,7 +153,7 @@ const int SIG_SETMASK = 2;
153#endif 153#endif
154 154
155#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \ 155#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
156 (cur_thread_init(), !cur_thread()->is_inited) 156 (!cur_thread_init()->is_inited)
157 157
158namespace __tsan { 158namespace __tsan {
159struct SignalDesc { 159struct SignalDesc {
@@ -531,10 +531,7 @@ static void LongJmp(ThreadState *thr, uptr *env) {
531} 531}
532 532
533// FIXME: put everything below into a common extern "C" block? 533// FIXME: put everything below into a common extern "C" block?
534extern "C" void __tsan_setjmp(uptr sp) { 534extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
535 cur_thread_init();
536 SetJmp(cur_thread(), sp);
537}
538 535
539#if SANITIZER_MAC 536#if SANITIZER_MAC
540TSAN_INTERCEPTOR(int, setjmp, void *env); 537TSAN_INTERCEPTOR(int, setjmp, void *env);
@@ -973,8 +970,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
973 void* (*callback)(void *arg) = p->callback; 970 void* (*callback)(void *arg) = p->callback;
974 void *param = p->param; 971 void *param = p->param;
975 { 972 {
976 cur_thread_init(); 973 ThreadState *thr = cur_thread_init();
977 ThreadState *thr = cur_thread();
978 // Thread-local state is not initialized yet. 974 // Thread-local state is not initialized yet.
979 ScopedIgnoreInterceptors ignore; 975 ScopedIgnoreInterceptors ignore;
980#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 976#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
@@ -2061,8 +2057,7 @@ static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
2061} 2057}
2062 2058
2063void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) { 2059void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2064 cur_thread_init(); 2060 ThreadState *thr = cur_thread_init();
2065 ThreadState *thr = cur_thread();
2066 ThreadSignalContext *sctx = SigCtx(thr); 2061 ThreadSignalContext *sctx = SigCtx(thr);
2067 if (sig < 0 || sig >= kSigCount) { 2062 if (sig < 0 || sig >= kSigCount) {
2068 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); 2063 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
diff --git a/libsanitizer/tsan/tsan_interface.cpp b/libsanitizer/tsan/tsan_interface.cpp
index 704c06a1c78..04871518515 100644
--- a/libsanitizer/tsan/tsan_interface.cpp
+++ b/libsanitizer/tsan/tsan_interface.cpp
@@ -20,10 +20,7 @@
20 20
21using namespace __tsan; 21using namespace __tsan;
22 22
23void __tsan_init() { 23void __tsan_init() { Initialize(cur_thread_init()); }
24 cur_thread_init();
25 Initialize(cur_thread());
26}
27 24
28void __tsan_flush_memory() { 25void __tsan_flush_memory() {
29 FlushShadowMemory(); 26 FlushShadowMemory();
diff --git a/libsanitizer/tsan/tsan_platform_linux.cpp b/libsanitizer/tsan/tsan_platform_linux.cpp
index 6134a1be2bf..2fb753dd080 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cpp
+++ b/libsanitizer/tsan/tsan_platform_linux.cpp
@@ -124,13 +124,13 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
124 internal_memset(mem, 0, sizeof(mem)); 124 internal_memset(mem, 0, sizeof(mem));
125 GetMemoryProfile(FillProfileCallback, mem, MemCount); 125 GetMemoryProfile(FillProfileCallback, mem, MemCount);
126 auto meta = ctx->metamap.GetMemoryStats(); 126 auto meta = ctx->metamap.GetMemoryStats();
127 StackDepotStats *stacks = StackDepotGetStats(); 127 StackDepotStats stacks = StackDepotGetStats();
128 uptr nthread, nlive; 128 uptr nthread, nlive;
129 ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive); 129 ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
130 uptr internal_stats[AllocatorStatCount]; 130 uptr internal_stats[AllocatorStatCount];
131 internal_allocator()->GetStats(internal_stats); 131 internal_allocator()->GetStats(internal_stats);
132 // All these are allocated from the common mmap region. 132 // All these are allocated from the common mmap region.
133 mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks->allocated + 133 mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks.allocated +
134 internal_stats[AllocatorStatMapped]; 134 internal_stats[AllocatorStatMapped];
135 if (s64(mem[MemMmap]) < 0) 135 if (s64(mem[MemMmap]) < 0)
136 mem[MemMmap] = 0; 136 mem[MemMmap] = 0;
@@ -143,8 +143,8 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
143 mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20, 143 mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20,
144 mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20, 144 mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20,
145 mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20, 145 mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
146 meta.mem_block >> 20, meta.sync_obj >> 20, stacks->allocated >> 20, 146 meta.mem_block >> 20, meta.sync_obj >> 20, stacks.allocated >> 20,
147 stacks->n_uniq_ids, nlive, nthread); 147 stacks.n_uniq_ids, nlive, nthread);
148} 148}
149 149
150# if SANITIZER_LINUX 150# if SANITIZER_LINUX
@@ -456,12 +456,14 @@ static void InitializeLongjmpXorKey() {
456extern "C" void __tsan_tls_initialization() {} 456extern "C" void __tsan_tls_initialization() {}
457 457
458void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { 458void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
459 // Check that the thr object is in tls;
459 const uptr thr_beg = (uptr)thr; 460 const uptr thr_beg = (uptr)thr;
460 const uptr thr_end = (uptr)thr + sizeof(*thr); 461 const uptr thr_end = (uptr)thr + sizeof(*thr);
461 // ThreadState is normally allocated in TLS and is large, 462 CHECK_GE(thr_beg, tls_addr);
462 // so we skip it. But unit tests allocate ThreadState outside of TLS. 463 CHECK_LE(thr_beg, tls_addr + tls_size);
463 if (thr_beg < tls_addr || thr_end >= tls_addr + tls_size) 464 CHECK_GE(thr_end, tls_addr);
464 return; 465 CHECK_LE(thr_end, tls_addr + tls_size);
466 // Since the thr object is huge, skip it.
465 const uptr pc = StackTrace::GetNextInstructionPc( 467 const uptr pc = StackTrace::GetNextInstructionPc(
466 reinterpret_cast<uptr>(__tsan_tls_initialization)); 468 reinterpret_cast<uptr>(__tsan_tls_initialization));
467 MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr); 469 MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr);
diff --git a/libsanitizer/tsan/tsan_platform_mac.cpp b/libsanitizer/tsan/tsan_platform_mac.cpp
index f2aff7786e0..388b3836d7d 100644
--- a/libsanitizer/tsan/tsan_platform_mac.cpp
+++ b/libsanitizer/tsan/tsan_platform_mac.cpp
@@ -159,35 +159,35 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
159 RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty); 159 RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty);
160#endif 160#endif
161 161
162 StackDepotStats *stacks = StackDepotGetStats(); 162 StackDepotStats stacks = StackDepotGetStats();
163 uptr nthread, nlive; 163 uptr nthread, nlive;
164 ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive); 164 ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
165 internal_snprintf(buf, buf_size, 165 internal_snprintf(
166 "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 166 buf, buf_size,
167 "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 167 "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
168 "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 168 "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
169#if !SANITIZER_GO 169 "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
170 "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 170# if !SANITIZER_GO
171 "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 171 "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
172 "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 172 "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
173#else // !SANITIZER_GO 173 "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
174# else // !SANITIZER_GO
174 "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" 175 "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
175#endif 176# endif
176 "stacks: %zd unique IDs, %zd kB allocated\n" 177 "stacks: %zd unique IDs, %zd kB allocated\n"
177 "threads: %zd total, %zd live\n" 178 "threads: %zd total, %zd live\n"
178 "------------------------------\n", 179 "------------------------------\n",
179 ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024, 180 ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
180 MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024, 181 MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
181 TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024, 182 TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
182#if !SANITIZER_GO 183# if !SANITIZER_GO
183 LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024, 184 LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
184 HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024, 185 HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
185 HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024, 186 HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
186#else // !SANITIZER_GO 187# else // !SANITIZER_GO
187 LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024, 188 LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024,
188#endif 189# endif
189 stacks->n_uniq_ids, stacks->allocated / 1024, 190 stacks.n_uniq_ids, stacks.allocated / 1024, nthread, nlive);
190 nthread, nlive);
191} 191}
192 192
193# if !SANITIZER_GO 193# if !SANITIZER_GO
@@ -283,13 +283,17 @@ uptr ExtractLongJmpSp(uptr *env) {
283} 283}
284 284
285#if !SANITIZER_GO 285#if !SANITIZER_GO
286extern "C" void __tsan_tls_initialization() {}
287
286void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { 288void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
287 // The pointer to the ThreadState object is stored in the shadow memory 289 // The pointer to the ThreadState object is stored in the shadow memory
288 // of the tls. 290 // of the tls.
289 uptr tls_end = tls_addr + tls_size; 291 uptr tls_end = tls_addr + tls_size;
290 uptr thread_identity = (uptr)pthread_self(); 292 uptr thread_identity = (uptr)pthread_self();
293 const uptr pc = StackTrace::GetNextInstructionPc(
294 reinterpret_cast<uptr>(__tsan_tls_initialization));
291 if (thread_identity == main_thread_identity) { 295 if (thread_identity == main_thread_identity) {
292 MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size); 296 MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
293 } else { 297 } else {
294 uptr thr_state_start = thread_identity; 298 uptr thr_state_start = thread_identity;
295 uptr thr_state_end = thr_state_start + sizeof(uptr); 299 uptr thr_state_end = thr_state_start + sizeof(uptr);
@@ -297,10 +301,8 @@ void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
297 CHECK_LE(thr_state_start, tls_addr + tls_size); 301 CHECK_LE(thr_state_start, tls_addr + tls_size);
298 CHECK_GE(thr_state_end, tls_addr); 302 CHECK_GE(thr_state_end, tls_addr);
299 CHECK_LE(thr_state_end, tls_addr + tls_size); 303 CHECK_LE(thr_state_end, tls_addr + tls_size);
300 MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, 304 MemoryRangeImitateWrite(thr, pc, tls_addr, thr_state_start - tls_addr);
301 thr_state_start - tls_addr); 305 MemoryRangeImitateWrite(thr, pc, thr_state_end, tls_end - thr_state_end);
302 MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
303 tls_end - thr_state_end);
304 } 306 }
305} 307}
306#endif 308#endif
diff --git a/libsanitizer/tsan/tsan_rtl.cpp b/libsanitizer/tsan/tsan_rtl.cpp
index d6792822454..1c53f957bdf 100644
--- a/libsanitizer/tsan/tsan_rtl.cpp
+++ b/libsanitizer/tsan/tsan_rtl.cpp
@@ -43,9 +43,10 @@ int (*on_finalize)(int);
43 43
44#if !SANITIZER_GO && !SANITIZER_MAC 44#if !SANITIZER_GO && !SANITIZER_MAC
45__attribute__((tls_model("initial-exec"))) 45__attribute__((tls_model("initial-exec")))
46THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); 46THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
47 SANITIZER_CACHE_LINE_SIZE);
47#endif 48#endif
48static char ctx_placeholder[sizeof(Context)] ALIGNED(64); 49static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
49Context *ctx; 50Context *ctx;
50 51
51// Can be overriden by a front-end. 52// Can be overriden by a front-end.
@@ -195,8 +196,7 @@ static void *BackgroundThread(void *arg) {
195 // We don't use ScopedIgnoreInterceptors, because we want ignores to be 196 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
196 // enabled even when the thread function exits (e.g. during pthread thread 197 // enabled even when the thread function exits (e.g. during pthread thread
197 // shutdown code). 198 // shutdown code).
198 cur_thread_init(); 199 cur_thread_init()->ignore_interceptors++;
199 cur_thread()->ignore_interceptors++;
200 const u64 kMs2Ns = 1000 * 1000; 200 const u64 kMs2Ns = 1000 * 1000;
201 const u64 start = NanoTime(); 201 const u64 start = NanoTime();
202 202
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index 4f50656a1ea..669c4ac9a54 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -230,23 +230,24 @@ struct ThreadState {
230ThreadState *cur_thread(); 230ThreadState *cur_thread();
231void set_cur_thread(ThreadState *thr); 231void set_cur_thread(ThreadState *thr);
232void cur_thread_finalize(); 232void cur_thread_finalize();
233inline void cur_thread_init() { } 233inline ThreadState *cur_thread_init() { return cur_thread(); }
234#else 234# else
235__attribute__((tls_model("initial-exec"))) 235__attribute__((tls_model("initial-exec")))
236extern THREADLOCAL char cur_thread_placeholder[]; 236extern THREADLOCAL char cur_thread_placeholder[];
237inline ThreadState *cur_thread() { 237inline ThreadState *cur_thread() {
238 return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current; 238 return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
239} 239}
240inline void cur_thread_init() { 240inline ThreadState *cur_thread_init() {
241 ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder); 241 ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
242 if (UNLIKELY(!thr->current)) 242 if (UNLIKELY(!thr->current))
243 thr->current = thr; 243 thr->current = thr;
244 return thr->current;
244} 245}
245inline void set_cur_thread(ThreadState *thr) { 246inline void set_cur_thread(ThreadState *thr) {
246 reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr; 247 reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
247} 248}
248inline void cur_thread_finalize() { } 249inline void cur_thread_finalize() { }
249#endif // SANITIZER_MAC || SANITIZER_ANDROID 250# endif // SANITIZER_MAC || SANITIZER_ANDROID
250#endif // SANITIZER_GO 251#endif // SANITIZER_GO
251 252
252class ThreadContext final : public ThreadContextBase { 253class ThreadContext final : public ThreadContextBase {
diff --git a/libsanitizer/tsan/tsan_rtl_amd64.S b/libsanitizer/tsan/tsan_rtl_amd64.S
index 5913aa360c5..10c0122f564 100644
--- a/libsanitizer/tsan/tsan_rtl_amd64.S
+++ b/libsanitizer/tsan/tsan_rtl_amd64.S
@@ -13,6 +13,7 @@ ASM_HIDDEN(__tsan_trace_switch)
13.globl ASM_SYMBOL(__tsan_trace_switch_thunk) 13.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
14ASM_SYMBOL(__tsan_trace_switch_thunk): 14ASM_SYMBOL(__tsan_trace_switch_thunk):
15 CFI_STARTPROC 15 CFI_STARTPROC
16 _CET_ENDBR
16 # Save scratch registers. 17 # Save scratch registers.
17 push %rax 18 push %rax
18 CFI_ADJUST_CFA_OFFSET(8) 19 CFI_ADJUST_CFA_OFFSET(8)
@@ -93,6 +94,7 @@ ASM_HIDDEN(__tsan_report_race)
93.globl ASM_SYMBOL(__tsan_report_race_thunk) 94.globl ASM_SYMBOL(__tsan_report_race_thunk)
94ASM_SYMBOL(__tsan_report_race_thunk): 95ASM_SYMBOL(__tsan_report_race_thunk):
95 CFI_STARTPROC 96 CFI_STARTPROC
97 _CET_ENDBR
96 # Save scratch registers. 98 # Save scratch registers.
97 push %rax 99 push %rax
98 CFI_ADJUST_CFA_OFFSET(8) 100 CFI_ADJUST_CFA_OFFSET(8)
@@ -185,6 +187,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
185ASM_SYMBOL_INTERCEPTOR(setjmp): 187ASM_SYMBOL_INTERCEPTOR(setjmp):
186#endif 188#endif
187 CFI_STARTPROC 189 CFI_STARTPROC
190 _CET_ENDBR
188 // save env parameter 191 // save env parameter
189 push %rdi 192 push %rdi
190 CFI_ADJUST_CFA_OFFSET(8) 193 CFI_ADJUST_CFA_OFFSET(8)
@@ -226,6 +229,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
226ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp)) 229ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
227ASM_SYMBOL_INTERCEPTOR(_setjmp): 230ASM_SYMBOL_INTERCEPTOR(_setjmp):
228 CFI_STARTPROC 231 CFI_STARTPROC
232 _CET_ENDBR
229 // save env parameter 233 // save env parameter
230 push %rdi 234 push %rdi
231 CFI_ADJUST_CFA_OFFSET(8) 235 CFI_ADJUST_CFA_OFFSET(8)
@@ -267,6 +271,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
267ASM_SYMBOL_INTERCEPTOR(sigsetjmp): 271ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
268#endif 272#endif
269 CFI_STARTPROC 273 CFI_STARTPROC
274 _CET_ENDBR
270 // save env parameter 275 // save env parameter
271 push %rdi 276 push %rdi
272 CFI_ADJUST_CFA_OFFSET(8) 277 CFI_ADJUST_CFA_OFFSET(8)
@@ -323,6 +328,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
323ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)) 328ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
324ASM_SYMBOL_INTERCEPTOR(__sigsetjmp): 329ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
325 CFI_STARTPROC 330 CFI_STARTPROC
331 _CET_ENDBR
326 // save env parameter 332 // save env parameter
327 push %rdi 333 push %rdi
328 CFI_ADJUST_CFA_OFFSET(8) 334 CFI_ADJUST_CFA_OFFSET(8)
diff --git a/libsanitizer/tsan/tsan_rtl_ppc64.S b/libsanitizer/tsan/tsan_rtl_ppc64.S
index 9e533a71a9c..8285e21aa1e 100644
--- a/libsanitizer/tsan/tsan_rtl_ppc64.S
+++ b/libsanitizer/tsan/tsan_rtl_ppc64.S
@@ -1,6 +1,5 @@
1#include "tsan_ppc_regs.h" 1#include "tsan_ppc_regs.h"
2 2
3 .machine altivec
4 .section .text 3 .section .text
5 .hidden __tsan_setjmp 4 .hidden __tsan_setjmp
6 .globl _setjmp 5 .globl _setjmp
diff --git a/libsanitizer/ubsan/ubsan_flags.cpp b/libsanitizer/ubsan/ubsan_flags.cpp
index 9a66bd37518..25cefd46ce2 100644
--- a/libsanitizer/ubsan/ubsan_flags.cpp
+++ b/libsanitizer/ubsan/ubsan_flags.cpp
@@ -50,7 +50,6 @@ void InitializeFlags() {
50 { 50 {
51 CommonFlags cf; 51 CommonFlags cf;
52 cf.CopyFrom(*common_flags()); 52 cf.CopyFrom(*common_flags());
53 cf.print_summary = false;
54 cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH"); 53 cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
55 OverrideCommonFlags(cf); 54 OverrideCommonFlags(cf);
56 } 55 }
diff --git a/libsanitizer/ubsan/ubsan_handlers.cpp b/libsanitizer/ubsan/ubsan_handlers.cpp
index 2184625aa6e..e201e6bba22 100644
--- a/libsanitizer/ubsan/ubsan_handlers.cpp
+++ b/libsanitizer/ubsan/ubsan_handlers.cpp
@@ -894,21 +894,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
894 894
895} // namespace __ubsan 895} // namespace __ubsan
896 896
897void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
898 ValueHandle Function) {
899 GET_REPORT_OPTIONS(false);
900 CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
901 handleCFIBadIcall(&Data, Function, Opts);
902}
903
904void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
905 ValueHandle Function) {
906 GET_REPORT_OPTIONS(true);
907 CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
908 handleCFIBadIcall(&Data, Function, Opts);
909 Die();
910}
911
912void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data, 897void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
913 ValueHandle Value, 898 ValueHandle Value,
914 uptr ValidVtable) { 899 uptr ValidVtable) {
diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h
index 9f412353fc0..219fb15de55 100644
--- a/libsanitizer/ubsan/ubsan_handlers.h
+++ b/libsanitizer/ubsan/ubsan_handlers.h
@@ -215,20 +215,12 @@ enum CFITypeCheckKind : unsigned char {
215 CFITCK_VMFCall, 215 CFITCK_VMFCall,
216}; 216};
217 217
218struct CFIBadIcallData {
219 SourceLocation Loc;
220 const TypeDescriptor &Type;
221};
222
223struct CFICheckFailData { 218struct CFICheckFailData {
224 CFITypeCheckKind CheckKind; 219 CFITypeCheckKind CheckKind;
225 SourceLocation Loc; 220 SourceLocation Loc;
226 const TypeDescriptor &Type; 221 const TypeDescriptor &Type;
227}; 222};
228 223
229/// \brief Handle control flow integrity failure for indirect function calls.
230RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
231
232/// \brief Handle control flow integrity failures. 224/// \brief Handle control flow integrity failures.
233RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function, 225RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
234 uptr VtableIsValid) 226 uptr VtableIsValid)
diff --git a/libsanitizer/ubsan/ubsan_platform.h b/libsanitizer/ubsan/ubsan_platform.h
index ad3e883f0f3..d2cc2e10bd2 100644
--- a/libsanitizer/ubsan/ubsan_platform.h
+++ b/libsanitizer/ubsan/ubsan_platform.h
@@ -12,7 +12,6 @@
12#ifndef UBSAN_PLATFORM_H 12#ifndef UBSAN_PLATFORM_H
13#define UBSAN_PLATFORM_H 13#define UBSAN_PLATFORM_H
14 14
15#ifndef CAN_SANITIZE_UB
16// Other platforms should be easy to add, and probably work as-is. 15// Other platforms should be easy to add, and probably work as-is.
17#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \ 16#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
18 defined(__NetBSD__) || defined(__DragonFly__) || \ 17 defined(__NetBSD__) || defined(__DragonFly__) || \
@@ -22,6 +21,5 @@
22#else 21#else
23# define CAN_SANITIZE_UB 0 22# define CAN_SANITIZE_UB 0
24#endif 23#endif
25#endif //CAN_SANITIZE_UB
26 24
27#endif 25#endif