1 //=-- lsan_common.cc ------------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of LeakSanitizer.
11 // Implementation of common leak checking functionality.
13 //===----------------------------------------------------------------------===//
15 #include "lsan_common.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_flag_parser.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_procmaps.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_report_decorator.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
28 #if CAN_SANITIZE_LEAKS
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32 // also to protect the global list of root regions.
33 BlockingMutex
global_mutex(LINKER_INITIALIZED
);
35 __attribute__((tls_model("initial-exec")))
36 THREADLOCAL
int disable_counter
;
37 bool DisabledInThisThread() { return disable_counter
> 0; }
38 void DisableInThisThread() { disable_counter
++; }
39 void EnableInThisThread() {
40 if (!disable_counter
&& common_flags()->detect_leaks
) {
41 Report("Unmatched call to __lsan_enable().\n");
49 void Flags::SetDefaults() {
50 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
51 #include "lsan_flags.inc"
55 void RegisterLsanFlags(FlagParser
*parser
, Flags
*f
) {
56 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
57 RegisterFlag(parser, #Name, Description, &f->Name);
58 #include "lsan_flags.inc"
62 #define LOG_POINTERS(...) \
64 if (flags()->log_pointers) Report(__VA_ARGS__); \
67 #define LOG_THREADS(...) \
69 if (flags()->log_threads) Report(__VA_ARGS__); \
72 ALIGNED(64) static char suppression_placeholder
[sizeof(SuppressionContext
)];
73 static SuppressionContext
*suppression_ctx
= nullptr;
74 static const char kSuppressionLeak
[] = "leak";
75 static const char *kSuppressionTypes
[] = { kSuppressionLeak
};
77 void InitializeSuppressions() {
78 CHECK_EQ(nullptr, suppression_ctx
);
79 suppression_ctx
= new (suppression_placeholder
) // NOLINT
80 SuppressionContext(kSuppressionTypes
, ARRAY_SIZE(kSuppressionTypes
));
81 suppression_ctx
->ParseFromFile(flags()->suppressions
);
82 if (&__lsan_default_suppressions
)
83 suppression_ctx
->Parse(__lsan_default_suppressions());
86 static SuppressionContext
*GetSuppressionContext() {
87 CHECK(suppression_ctx
);
88 return suppression_ctx
;
96 InternalMmapVector
<RootRegion
> *root_regions
;
98 void InitializeRootRegions() {
100 ALIGNED(64) static char placeholder
[sizeof(InternalMmapVector
<RootRegion
>)];
101 root_regions
= new(placeholder
) InternalMmapVector
<RootRegion
>(1);
104 void InitCommonLsan() {
105 InitializeRootRegions();
106 if (common_flags()->detect_leaks
) {
107 // Initialization which can fail or print warnings should only be done if
108 // LSan is actually enabled.
109 InitializeSuppressions();
110 InitializePlatformSpecificModules();
114 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
116 Decorator() : SanitizerCommonDecorator() { }
117 const char *Error() { return Red(); }
118 const char *Leak() { return Blue(); }
119 const char *End() { return Default(); }
122 static inline bool CanBeAHeapPointer(uptr p
) {
123 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
124 // bound on heap addresses.
125 const uptr kMinAddress
= 4 * 4096;
126 if (p
< kMinAddress
) return false;
127 #if defined(__x86_64__)
128 // Accept only canonical form user-space addresses.
129 return ((p
>> 47) == 0);
130 #elif defined(__mips64)
131 return ((p
>> 40) == 0);
132 #elif defined(__aarch64__)
133 unsigned runtimeVMA
=
134 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
135 return ((p
>> runtimeVMA
) == 0);
141 // Scans the memory range, looking for byte patterns that point into allocator
142 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
143 // There are two usage modes for this function: finding reachable chunks
144 // (|tag| = kReachable) and finding indirectly leaked chunks
145 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
146 // so |frontier| = 0.
147 void ScanRangeForPointers(uptr begin
, uptr end
,
149 const char *region_type
, ChunkTag tag
) {
150 CHECK(tag
== kReachable
|| tag
== kIndirectlyLeaked
);
151 const uptr alignment
= flags()->pointer_alignment();
152 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type
, begin
, end
);
155 pp
= pp
+ alignment
- pp
% alignment
;
156 for (; pp
+ sizeof(void *) <= end
; pp
+= alignment
) { // NOLINT
157 void *p
= *reinterpret_cast<void **>(pp
);
158 if (!CanBeAHeapPointer(reinterpret_cast<uptr
>(p
))) continue;
159 uptr chunk
= PointsIntoChunk(p
);
160 if (!chunk
) continue;
161 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
162 if (chunk
== begin
) continue;
163 LsanMetadata
m(chunk
);
164 if (m
.tag() == kReachable
|| m
.tag() == kIgnored
) continue;
166 // Do this check relatively late so we can log only the interesting cases.
167 if (!flags()->use_poisoned
&& WordIsPoisoned(pp
)) {
169 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
171 pp
, p
, chunk
, chunk
+ m
.requested_size(), m
.requested_size());
176 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp
, p
,
177 chunk
, chunk
+ m
.requested_size(), m
.requested_size());
179 frontier
->push_back(chunk
);
183 void ForEachExtraStackRangeCb(uptr begin
, uptr end
, void* arg
) {
184 Frontier
*frontier
= reinterpret_cast<Frontier
*>(arg
);
185 ScanRangeForPointers(begin
, end
, frontier
, "FAKE STACK", kReachable
);
188 // Scans thread data (stacks and TLS) for heap pointers.
189 static void ProcessThreads(SuspendedThreadsList
const &suspended_threads
,
190 Frontier
*frontier
) {
191 InternalScopedBuffer
<uptr
> registers(SuspendedThreadsList::RegisterCount());
192 uptr registers_begin
= reinterpret_cast<uptr
>(registers
.data());
193 uptr registers_end
= registers_begin
+ registers
.size();
194 for (uptr i
= 0; i
< suspended_threads
.thread_count(); i
++) {
195 uptr os_id
= static_cast<uptr
>(suspended_threads
.GetThreadID(i
));
196 LOG_THREADS("Processing thread %d.\n", os_id
);
197 uptr stack_begin
, stack_end
, tls_begin
, tls_end
, cache_begin
, cache_end
;
199 bool thread_found
= GetThreadRangesLocked(os_id
, &stack_begin
, &stack_end
,
200 &tls_begin
, &tls_end
,
201 &cache_begin
, &cache_end
, &dtls
);
203 // If a thread can't be found in the thread registry, it's probably in the
204 // process of destruction. Log this event and move on.
205 LOG_THREADS("Thread %d not found in registry.\n", os_id
);
209 bool have_registers
=
210 (suspended_threads
.GetRegistersAndSP(i
, registers
.data(), &sp
) == 0);
211 if (!have_registers
) {
212 Report("Unable to get registers from thread %d.\n");
213 // If unable to get SP, consider the entire stack to be reachable.
217 if (flags()->use_registers
&& have_registers
)
218 ScanRangeForPointers(registers_begin
, registers_end
, frontier
,
219 "REGISTERS", kReachable
);
221 if (flags()->use_stacks
) {
222 LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin
, stack_end
, sp
);
223 if (sp
< stack_begin
|| sp
>= stack_end
) {
224 // SP is outside the recorded stack range (e.g. the thread is running a
225 // signal handler on alternate stack, or swapcontext was used).
226 // Again, consider the entire stack range to be reachable.
227 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
228 uptr page_size
= GetPageSizeCached();
230 while (stack_begin
< stack_end
&&
231 !IsAccessibleMemoryRange(stack_begin
, 1)) {
233 stack_begin
+= page_size
;
235 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
236 skipped
, stack_begin
, stack_end
);
238 // Shrink the stack range to ignore out-of-scope values.
241 ScanRangeForPointers(stack_begin
, stack_end
, frontier
, "STACK",
243 ForEachExtraStackRange(os_id
, ForEachExtraStackRangeCb
, frontier
);
246 if (flags()->use_tls
) {
247 LOG_THREADS("TLS at %p-%p.\n", tls_begin
, tls_end
);
248 if (cache_begin
== cache_end
) {
249 ScanRangeForPointers(tls_begin
, tls_end
, frontier
, "TLS", kReachable
);
251 // Because LSan should not be loaded with dlopen(), we can assume
252 // that allocator cache will be part of static TLS image.
253 CHECK_LE(tls_begin
, cache_begin
);
254 CHECK_GE(tls_end
, cache_end
);
255 if (tls_begin
< cache_begin
)
256 ScanRangeForPointers(tls_begin
, cache_begin
, frontier
, "TLS",
258 if (tls_end
> cache_end
)
259 ScanRangeForPointers(cache_end
, tls_end
, frontier
, "TLS", kReachable
);
262 for (uptr j
= 0; j
< dtls
->dtv_size
; ++j
) {
263 uptr dtls_beg
= dtls
->dtv
[j
].beg
;
264 uptr dtls_end
= dtls_beg
+ dtls
->dtv
[j
].size
;
265 if (dtls_beg
< dtls_end
) {
266 LOG_THREADS("DTLS %zu at %p-%p.\n", j
, dtls_beg
, dtls_end
);
267 ScanRangeForPointers(dtls_beg
, dtls_end
, frontier
, "DTLS",
276 static void ProcessRootRegion(Frontier
*frontier
, uptr root_begin
,
278 MemoryMappingLayout
proc_maps(/*cache_enabled*/true);
279 uptr begin
, end
, prot
;
280 while (proc_maps
.Next(&begin
, &end
,
281 /*offset*/ nullptr, /*filename*/ nullptr,
282 /*filename_size*/ 0, &prot
)) {
283 uptr intersection_begin
= Max(root_begin
, begin
);
284 uptr intersection_end
= Min(end
, root_end
);
285 if (intersection_begin
>= intersection_end
) continue;
286 bool is_readable
= prot
& MemoryMappingLayout::kProtectionRead
;
287 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
288 root_begin
, root_end
, begin
, end
,
289 is_readable
? "readable" : "unreadable");
291 ScanRangeForPointers(intersection_begin
, intersection_end
, frontier
,
296 // Scans root regions for heap pointers.
297 static void ProcessRootRegions(Frontier
*frontier
) {
298 if (!flags()->use_root_regions
) return;
300 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
301 RootRegion region
= (*root_regions
)[i
];
302 uptr begin_addr
= reinterpret_cast<uptr
>(region
.begin
);
303 ProcessRootRegion(frontier
, begin_addr
, begin_addr
+ region
.size
);
307 static void FloodFillTag(Frontier
*frontier
, ChunkTag tag
) {
308 while (frontier
->size()) {
309 uptr next_chunk
= frontier
->back();
310 frontier
->pop_back();
311 LsanMetadata
m(next_chunk
);
312 ScanRangeForPointers(next_chunk
, next_chunk
+ m
.requested_size(), frontier
,
317 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
318 // which are reachable from it as indirectly leaked.
319 static void MarkIndirectlyLeakedCb(uptr chunk
, void *arg
) {
320 chunk
= GetUserBegin(chunk
);
321 LsanMetadata
m(chunk
);
322 if (m
.allocated() && m
.tag() != kReachable
) {
323 ScanRangeForPointers(chunk
, chunk
+ m
.requested_size(),
324 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked
);
328 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
330 static void CollectIgnoredCb(uptr chunk
, void *arg
) {
332 chunk
= GetUserBegin(chunk
);
333 LsanMetadata
m(chunk
);
334 if (m
.allocated() && m
.tag() == kIgnored
) {
335 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
336 chunk
, chunk
+ m
.requested_size(), m
.requested_size());
337 reinterpret_cast<Frontier
*>(arg
)->push_back(chunk
);
341 // Sets the appropriate tag on each chunk.
342 static void ClassifyAllChunks(SuspendedThreadsList
const &suspended_threads
) {
343 // Holds the flood fill frontier.
344 Frontier
frontier(1);
346 ForEachChunk(CollectIgnoredCb
, &frontier
);
347 ProcessGlobalRegions(&frontier
);
348 ProcessThreads(suspended_threads
, &frontier
);
349 ProcessRootRegions(&frontier
);
350 FloodFillTag(&frontier
, kReachable
);
352 // The check here is relatively expensive, so we do this in a separate flood
353 // fill. That way we can skip the check for chunks that are reachable
355 LOG_POINTERS("Processing platform-specific allocations.\n");
356 CHECK_EQ(0, frontier
.size());
357 ProcessPlatformSpecificAllocations(&frontier
);
358 FloodFillTag(&frontier
, kReachable
);
360 // Iterate over leaked chunks and mark those that are reachable from other
362 LOG_POINTERS("Scanning leaked chunks.\n");
363 ForEachChunk(MarkIndirectlyLeakedCb
, nullptr);
366 // ForEachChunk callback. Resets the tags to pre-leak-check state.
367 static void ResetTagsCb(uptr chunk
, void *arg
) {
369 chunk
= GetUserBegin(chunk
);
370 LsanMetadata
m(chunk
);
371 if (m
.allocated() && m
.tag() != kIgnored
)
372 m
.set_tag(kDirectlyLeaked
);
375 static void PrintStackTraceById(u32 stack_trace_id
) {
376 CHECK(stack_trace_id
);
377 StackDepotGet(stack_trace_id
).Print();
380 // ForEachChunk callback. Aggregates information about unreachable chunks into
382 static void CollectLeaksCb(uptr chunk
, void *arg
) {
384 LeakReport
*leak_report
= reinterpret_cast<LeakReport
*>(arg
);
385 chunk
= GetUserBegin(chunk
);
386 LsanMetadata
m(chunk
);
387 if (!m
.allocated()) return;
388 if (m
.tag() == kDirectlyLeaked
|| m
.tag() == kIndirectlyLeaked
) {
389 u32 resolution
= flags()->resolution
;
390 u32 stack_trace_id
= 0;
391 if (resolution
> 0) {
392 StackTrace stack
= StackDepotGet(m
.stack_trace_id());
393 stack
.size
= Min(stack
.size
, resolution
);
394 stack_trace_id
= StackDepotPut(stack
);
396 stack_trace_id
= m
.stack_trace_id();
398 leak_report
->AddLeakedChunk(chunk
, stack_trace_id
, m
.requested_size(),
403 static void PrintMatchedSuppressions() {
404 InternalMmapVector
<Suppression
*> matched(1);
405 GetSuppressionContext()->GetMatched(&matched
);
408 const char *line
= "-----------------------------------------------------";
409 Printf("%s\n", line
);
410 Printf("Suppressions used:\n");
411 Printf(" count bytes template\n");
412 for (uptr i
= 0; i
< matched
.size(); i
++)
413 Printf("%7zu %10zu %s\n", static_cast<uptr
>(atomic_load_relaxed(
414 &matched
[i
]->hit_count
)), matched
[i
]->weight
, matched
[i
]->templ
);
415 Printf("%s\n\n", line
);
418 struct CheckForLeaksParam
{
420 LeakReport leak_report
;
423 static void CheckForLeaksCallback(const SuspendedThreadsList
&suspended_threads
,
425 CheckForLeaksParam
*param
= reinterpret_cast<CheckForLeaksParam
*>(arg
);
427 CHECK(!param
->success
);
428 ClassifyAllChunks(suspended_threads
);
429 ForEachChunk(CollectLeaksCb
, ¶m
->leak_report
);
430 // Clean up for subsequent leak checks. This assumes we did not overwrite any
432 ForEachChunk(ResetTagsCb
, nullptr);
433 param
->success
= true;
436 static bool CheckForLeaks() {
437 if (&__lsan_is_turned_off
&& __lsan_is_turned_off())
439 EnsureMainThreadIDIsCorrect();
440 CheckForLeaksParam param
;
441 param
.success
= false;
442 LockThreadRegistry();
444 DoStopTheWorld(CheckForLeaksCallback
, ¶m
);
446 UnlockThreadRegistry();
448 if (!param
.success
) {
449 Report("LeakSanitizer has encountered a fatal error.\n");
451 "HINT: For debugging, try setting environment variable "
452 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
454 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
457 param
.leak_report
.ApplySuppressions();
458 uptr unsuppressed_count
= param
.leak_report
.UnsuppressedLeakCount();
459 if (unsuppressed_count
> 0) {
462 "================================================================="
464 Printf("%s", d
.Error());
465 Report("ERROR: LeakSanitizer: detected memory leaks\n");
466 Printf("%s", d
.End());
467 param
.leak_report
.ReportTopLeaks(flags()->max_leaks
);
469 if (common_flags()->print_suppressions
)
470 PrintMatchedSuppressions();
471 if (unsuppressed_count
> 0) {
472 param
.leak_report
.PrintSummary();
479 BlockingMutexLock
l(&global_mutex
);
480 static bool already_done
;
481 if (already_done
) return;
483 bool have_leaks
= CheckForLeaks();
487 if (common_flags()->exitcode
) {
492 static int DoRecoverableLeakCheck() {
493 BlockingMutexLock
l(&global_mutex
);
494 bool have_leaks
= CheckForLeaks();
495 return have_leaks
? 1 : 0;
498 static Suppression
*GetSuppressionForAddr(uptr addr
) {
499 Suppression
*s
= nullptr;
501 // Suppress by module name.
502 SuppressionContext
*suppressions
= GetSuppressionContext();
503 if (const char *module_name
=
504 Symbolizer::GetOrInit()->GetModuleNameForPc(addr
))
505 if (suppressions
->Match(module_name
, kSuppressionLeak
, &s
))
508 // Suppress by file or function name.
509 SymbolizedStack
*frames
= Symbolizer::GetOrInit()->SymbolizePC(addr
);
510 for (SymbolizedStack
*cur
= frames
; cur
; cur
= cur
->next
) {
511 if (suppressions
->Match(cur
->info
.function
, kSuppressionLeak
, &s
) ||
512 suppressions
->Match(cur
->info
.file
, kSuppressionLeak
, &s
)) {
520 static Suppression
*GetSuppressionForStack(u32 stack_trace_id
) {
521 StackTrace stack
= StackDepotGet(stack_trace_id
);
522 for (uptr i
= 0; i
< stack
.size
; i
++) {
523 Suppression
*s
= GetSuppressionForAddr(
524 StackTrace::GetPreviousInstructionPc(stack
.trace
[i
]));
530 ///// LeakReport implementation. /////
532 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
533 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
534 // in real-world applications.
535 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
537 const uptr kMaxLeaksConsidered
= 5000;
539 void LeakReport::AddLeakedChunk(uptr chunk
, u32 stack_trace_id
,
540 uptr leaked_size
, ChunkTag tag
) {
541 CHECK(tag
== kDirectlyLeaked
|| tag
== kIndirectlyLeaked
);
542 bool is_directly_leaked
= (tag
== kDirectlyLeaked
);
544 for (i
= 0; i
< leaks_
.size(); i
++) {
545 if (leaks_
[i
].stack_trace_id
== stack_trace_id
&&
546 leaks_
[i
].is_directly_leaked
== is_directly_leaked
) {
547 leaks_
[i
].hit_count
++;
548 leaks_
[i
].total_size
+= leaked_size
;
552 if (i
== leaks_
.size()) {
553 if (leaks_
.size() == kMaxLeaksConsidered
) return;
554 Leak leak
= { next_id_
++, /* hit_count */ 1, leaked_size
, stack_trace_id
,
555 is_directly_leaked
, /* is_suppressed */ false };
556 leaks_
.push_back(leak
);
558 if (flags()->report_objects
) {
559 LeakedObject obj
= {leaks_
[i
].id
, chunk
, leaked_size
};
560 leaked_objects_
.push_back(obj
);
564 static bool LeakComparator(const Leak
&leak1
, const Leak
&leak2
) {
565 if (leak1
.is_directly_leaked
== leak2
.is_directly_leaked
)
566 return leak1
.total_size
> leak2
.total_size
;
568 return leak1
.is_directly_leaked
;
571 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report
) {
572 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
574 if (leaks_
.size() == kMaxLeaksConsidered
)
575 Printf("Too many leaks! Only the first %zu leaks encountered will be "
577 kMaxLeaksConsidered
);
579 uptr unsuppressed_count
= UnsuppressedLeakCount();
580 if (num_leaks_to_report
> 0 && num_leaks_to_report
< unsuppressed_count
)
581 Printf("The %zu top leak(s):\n", num_leaks_to_report
);
582 InternalSort(&leaks_
, leaks_
.size(), LeakComparator
);
583 uptr leaks_reported
= 0;
584 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
585 if (leaks_
[i
].is_suppressed
) continue;
586 PrintReportForLeak(i
);
588 if (leaks_reported
== num_leaks_to_report
) break;
590 if (leaks_reported
< unsuppressed_count
) {
591 uptr remaining
= unsuppressed_count
- leaks_reported
;
592 Printf("Omitting %zu more leak(s).\n", remaining
);
596 void LeakReport::PrintReportForLeak(uptr index
) {
598 Printf("%s", d
.Leak());
599 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
600 leaks_
[index
].is_directly_leaked
? "Direct" : "Indirect",
601 leaks_
[index
].total_size
, leaks_
[index
].hit_count
);
602 Printf("%s", d
.End());
604 PrintStackTraceById(leaks_
[index
].stack_trace_id
);
606 if (flags()->report_objects
) {
607 Printf("Objects leaked above:\n");
608 PrintLeakedObjectsForLeak(index
);
613 void LeakReport::PrintLeakedObjectsForLeak(uptr index
) {
614 u32 leak_id
= leaks_
[index
].id
;
615 for (uptr j
= 0; j
< leaked_objects_
.size(); j
++) {
616 if (leaked_objects_
[j
].leak_id
== leak_id
)
617 Printf("%p (%zu bytes)\n", leaked_objects_
[j
].addr
,
618 leaked_objects_
[j
].size
);
622 void LeakReport::PrintSummary() {
623 CHECK(leaks_
.size() <= kMaxLeaksConsidered
);
624 uptr bytes
= 0, allocations
= 0;
625 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
626 if (leaks_
[i
].is_suppressed
) continue;
627 bytes
+= leaks_
[i
].total_size
;
628 allocations
+= leaks_
[i
].hit_count
;
630 InternalScopedString
summary(kMaxSummaryLength
);
631 summary
.append("%zu byte(s) leaked in %zu allocation(s).", bytes
,
633 ReportErrorSummary(summary
.data());
636 void LeakReport::ApplySuppressions() {
637 for (uptr i
= 0; i
< leaks_
.size(); i
++) {
638 Suppression
*s
= GetSuppressionForStack(leaks_
[i
].stack_trace_id
);
640 s
->weight
+= leaks_
[i
].total_size
;
641 atomic_store_relaxed(&s
->hit_count
, atomic_load_relaxed(&s
->hit_count
) +
642 leaks_
[i
].hit_count
);
643 leaks_
[i
].is_suppressed
= true;
648 uptr
LeakReport::UnsuppressedLeakCount() {
650 for (uptr i
= 0; i
< leaks_
.size(); i
++)
651 if (!leaks_
[i
].is_suppressed
) result
++;
655 } // namespace __lsan
656 #else // CAN_SANITIZE_LEAKS
658 void InitCommonLsan() { }
659 void DoLeakCheck() { }
660 void DisableInThisThread() { }
661 void EnableInThisThread() { }
663 #endif // CAN_SANITIZE_LEAKS
665 using namespace __lsan
; // NOLINT
668 SANITIZER_INTERFACE_ATTRIBUTE
669 void __lsan_ignore_object(const void *p
) {
670 #if CAN_SANITIZE_LEAKS
671 if (!common_flags()->detect_leaks
)
673 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
675 BlockingMutexLock
l(&global_mutex
);
676 IgnoreObjectResult res
= IgnoreObjectLocked(p
);
677 if (res
== kIgnoreObjectInvalid
)
678 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p
);
679 if (res
== kIgnoreObjectAlreadyIgnored
)
680 VReport(1, "__lsan_ignore_object(): "
681 "heap object at %p is already being ignored\n", p
);
682 if (res
== kIgnoreObjectSuccess
)
683 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p
);
684 #endif // CAN_SANITIZE_LEAKS
687 SANITIZER_INTERFACE_ATTRIBUTE
688 void __lsan_register_root_region(const void *begin
, uptr size
) {
689 #if CAN_SANITIZE_LEAKS
690 BlockingMutexLock
l(&global_mutex
);
692 RootRegion region
= {begin
, size
};
693 root_regions
->push_back(region
);
694 VReport(1, "Registered root region at %p of size %llu\n", begin
, size
);
695 #endif // CAN_SANITIZE_LEAKS
698 SANITIZER_INTERFACE_ATTRIBUTE
699 void __lsan_unregister_root_region(const void *begin
, uptr size
) {
700 #if CAN_SANITIZE_LEAKS
701 BlockingMutexLock
l(&global_mutex
);
703 bool removed
= false;
704 for (uptr i
= 0; i
< root_regions
->size(); i
++) {
705 RootRegion region
= (*root_regions
)[i
];
706 if (region
.begin
== begin
&& region
.size
== size
) {
708 uptr last_index
= root_regions
->size() - 1;
709 (*root_regions
)[i
] = (*root_regions
)[last_index
];
710 root_regions
->pop_back();
711 VReport(1, "Unregistered root region at %p of size %llu\n", begin
, size
);
717 "__lsan_unregister_root_region(): region at %p of size %llu has not "
718 "been registered.\n",
722 #endif // CAN_SANITIZE_LEAKS
725 SANITIZER_INTERFACE_ATTRIBUTE
726 void __lsan_disable() {
727 #if CAN_SANITIZE_LEAKS
728 __lsan::DisableInThisThread();
732 SANITIZER_INTERFACE_ATTRIBUTE
733 void __lsan_enable() {
734 #if CAN_SANITIZE_LEAKS
735 __lsan::EnableInThisThread();
739 SANITIZER_INTERFACE_ATTRIBUTE
740 void __lsan_do_leak_check() {
741 #if CAN_SANITIZE_LEAKS
742 if (common_flags()->detect_leaks
)
743 __lsan::DoLeakCheck();
744 #endif // CAN_SANITIZE_LEAKS
747 SANITIZER_INTERFACE_ATTRIBUTE
748 int __lsan_do_recoverable_leak_check() {
749 #if CAN_SANITIZE_LEAKS
750 if (common_flags()->detect_leaks
)
751 return __lsan::DoRecoverableLeakCheck();
752 #endif // CAN_SANITIZE_LEAKS
756 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
757 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
758 int __lsan_is_turned_off() {
762 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
763 const char *__lsan_default_suppressions() {