]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //=-- lsan_common.cc ------------------------------------------------------===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // This file is a part of LeakSanitizer. | |
11 | // Implementation of common leak checking functionality. | |
12 | // | |
13 | //===----------------------------------------------------------------------===// | |
14 | ||
15 | #include "lsan_common.h" | |
16 | ||
17 | #include "sanitizer_common/sanitizer_common.h" | |
18 | #include "sanitizer_common/sanitizer_flags.h" | |
92a42be0 | 19 | #include "sanitizer_common/sanitizer_flag_parser.h" |
1a4d82fc JJ |
20 | #include "sanitizer_common/sanitizer_placement_new.h" |
21 | #include "sanitizer_common/sanitizer_procmaps.h" | |
22 | #include "sanitizer_common/sanitizer_stackdepot.h" | |
23 | #include "sanitizer_common/sanitizer_stacktrace.h" | |
1a4d82fc JJ |
24 | #include "sanitizer_common/sanitizer_suppressions.h" |
25 | #include "sanitizer_common/sanitizer_report_decorator.h" | |
5bcae85e | 26 | #include "sanitizer_common/sanitizer_tls_get_addr.h" |
1a4d82fc JJ |
27 | |
28 | #if CAN_SANITIZE_LEAKS | |
29 | namespace __lsan { | |
30 | ||
31 | // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and | |
32 | // also to protect the global list of root regions. | |
33 | BlockingMutex global_mutex(LINKER_INITIALIZED); | |
34 | ||
7cac9316 | 35 | __attribute__((tls_model("initial-exec"))) |
1a4d82fc JJ |
36 | THREADLOCAL int disable_counter; |
37 | bool DisabledInThisThread() { return disable_counter > 0; } | |
5bcae85e SL |
38 | void DisableInThisThread() { disable_counter++; } |
39 | void EnableInThisThread() { | |
40 | if (!disable_counter && common_flags()->detect_leaks) { | |
41 | Report("Unmatched call to __lsan_enable().\n"); | |
42 | Die(); | |
43 | } | |
44 | disable_counter--; | |
45 | } | |
1a4d82fc JJ |
46 | |
47 | Flags lsan_flags; | |
48 | ||
92a42be0 SL |
49 | void Flags::SetDefaults() { |
50 | #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; | |
51 | #include "lsan_flags.inc" | |
52 | #undef LSAN_FLAG | |
53 | } | |
54 | ||
55 | void RegisterLsanFlags(FlagParser *parser, Flags *f) { | |
56 | #define LSAN_FLAG(Type, Name, DefaultValue, Description) \ | |
57 | RegisterFlag(parser, #Name, Description, &f->Name); | |
58 | #include "lsan_flags.inc" | |
59 | #undef LSAN_FLAG | |
1a4d82fc JJ |
60 | } |
61 | ||
62 | #define LOG_POINTERS(...) \ | |
63 | do { \ | |
64 | if (flags()->log_pointers) Report(__VA_ARGS__); \ | |
65 | } while (0); | |
66 | ||
67 | #define LOG_THREADS(...) \ | |
68 | do { \ | |
69 | if (flags()->log_threads) Report(__VA_ARGS__); \ | |
70 | } while (0); | |
71 | ||
92a42be0 SL |
72 | ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; |
73 | static SuppressionContext *suppression_ctx = nullptr; | |
74 | static const char kSuppressionLeak[] = "leak"; | |
75 | static const char *kSuppressionTypes[] = { kSuppressionLeak }; | |
1a4d82fc JJ |
76 | |
77 | void InitializeSuppressions() { | |
92a42be0 SL |
78 | CHECK_EQ(nullptr, suppression_ctx); |
79 | suppression_ctx = new (suppression_placeholder) // NOLINT | |
80 | SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); | |
81 | suppression_ctx->ParseFromFile(flags()->suppressions); | |
1a4d82fc JJ |
82 | if (&__lsan_default_suppressions) |
83 | suppression_ctx->Parse(__lsan_default_suppressions()); | |
84 | } | |
85 | ||
92a42be0 SL |
86 | static SuppressionContext *GetSuppressionContext() { |
87 | CHECK(suppression_ctx); | |
88 | return suppression_ctx; | |
89 | } | |
90 | ||
1a4d82fc JJ |
91 | struct RootRegion { |
92 | const void *begin; | |
93 | uptr size; | |
94 | }; | |
95 | ||
96 | InternalMmapVector<RootRegion> *root_regions; | |
97 | ||
98 | void InitializeRootRegions() { | |
99 | CHECK(!root_regions); | |
100 | ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)]; | |
101 | root_regions = new(placeholder) InternalMmapVector<RootRegion>(1); | |
102 | } | |
103 | ||
104 | void InitCommonLsan() { | |
1a4d82fc JJ |
105 | InitializeRootRegions(); |
106 | if (common_flags()->detect_leaks) { | |
107 | // Initialization which can fail or print warnings should only be done if | |
108 | // LSan is actually enabled. | |
109 | InitializeSuppressions(); | |
110 | InitializePlatformSpecificModules(); | |
111 | } | |
112 | } | |
113 | ||
92a42be0 | 114 | class Decorator: public __sanitizer::SanitizerCommonDecorator { |
1a4d82fc | 115 | public: |
92a42be0 | 116 | Decorator() : SanitizerCommonDecorator() { } |
1a4d82fc JJ |
117 | const char *Error() { return Red(); } |
118 | const char *Leak() { return Blue(); } | |
119 | const char *End() { return Default(); } | |
120 | }; | |
121 | ||
122 | static inline bool CanBeAHeapPointer(uptr p) { | |
123 | // Since our heap is located in mmap-ed memory, we can assume a sensible lower | |
124 | // bound on heap addresses. | |
125 | const uptr kMinAddress = 4 * 4096; | |
126 | if (p < kMinAddress) return false; | |
92a42be0 | 127 | #if defined(__x86_64__) |
1a4d82fc JJ |
128 | // Accept only canonical form user-space addresses. |
129 | return ((p >> 47) == 0); | |
92a42be0 SL |
130 | #elif defined(__mips64) |
131 | return ((p >> 40) == 0); | |
132 | #elif defined(__aarch64__) | |
3157f602 XL |
133 | unsigned runtimeVMA = |
134 | (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); | |
135 | return ((p >> runtimeVMA) == 0); | |
1a4d82fc JJ |
136 | #else |
137 | return true; | |
138 | #endif | |
139 | } | |
140 | ||
141 | // Scans the memory range, looking for byte patterns that point into allocator | |
142 | // chunks. Marks those chunks with |tag| and adds them to |frontier|. | |
92a42be0 SL |
143 | // There are two usage modes for this function: finding reachable chunks |
144 | // (|tag| = kReachable) and finding indirectly leaked chunks | |
1a4d82fc JJ |
145 | // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, |
146 | // so |frontier| = 0. | |
147 | void ScanRangeForPointers(uptr begin, uptr end, | |
148 | Frontier *frontier, | |
149 | const char *region_type, ChunkTag tag) { | |
92a42be0 | 150 | CHECK(tag == kReachable || tag == kIndirectlyLeaked); |
1a4d82fc JJ |
151 | const uptr alignment = flags()->pointer_alignment(); |
152 | LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); | |
153 | uptr pp = begin; | |
154 | if (pp % alignment) | |
155 | pp = pp + alignment - pp % alignment; | |
156 | for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT | |
157 | void *p = *reinterpret_cast<void **>(pp); | |
158 | if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; | |
159 | uptr chunk = PointsIntoChunk(p); | |
160 | if (!chunk) continue; | |
161 | // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. | |
162 | if (chunk == begin) continue; | |
163 | LsanMetadata m(chunk); | |
92a42be0 | 164 | if (m.tag() == kReachable || m.tag() == kIgnored) continue; |
1a4d82fc JJ |
165 | |
166 | // Do this check relatively late so we can log only the interesting cases. | |
167 | if (!flags()->use_poisoned && WordIsPoisoned(pp)) { | |
168 | LOG_POINTERS( | |
169 | "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " | |
170 | "%zu.\n", | |
171 | pp, p, chunk, chunk + m.requested_size(), m.requested_size()); | |
172 | continue; | |
173 | } | |
174 | ||
175 | m.set_tag(tag); | |
176 | LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, | |
177 | chunk, chunk + m.requested_size(), m.requested_size()); | |
178 | if (frontier) | |
179 | frontier->push_back(chunk); | |
180 | } | |
181 | } | |
182 | ||
183 | void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { | |
184 | Frontier *frontier = reinterpret_cast<Frontier *>(arg); | |
185 | ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); | |
186 | } | |
187 | ||
188 | // Scans thread data (stacks and TLS) for heap pointers. | |
189 | static void ProcessThreads(SuspendedThreadsList const &suspended_threads, | |
190 | Frontier *frontier) { | |
191 | InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); | |
192 | uptr registers_begin = reinterpret_cast<uptr>(registers.data()); | |
193 | uptr registers_end = registers_begin + registers.size(); | |
194 | for (uptr i = 0; i < suspended_threads.thread_count(); i++) { | |
195 | uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); | |
196 | LOG_THREADS("Processing thread %d.\n", os_id); | |
197 | uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; | |
5bcae85e | 198 | DTLS *dtls; |
1a4d82fc JJ |
199 | bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, |
200 | &tls_begin, &tls_end, | |
5bcae85e | 201 | &cache_begin, &cache_end, &dtls); |
1a4d82fc JJ |
202 | if (!thread_found) { |
203 | // If a thread can't be found in the thread registry, it's probably in the | |
204 | // process of destruction. Log this event and move on. | |
205 | LOG_THREADS("Thread %d not found in registry.\n", os_id); | |
206 | continue; | |
207 | } | |
208 | uptr sp; | |
209 | bool have_registers = | |
210 | (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); | |
211 | if (!have_registers) { | |
212 | Report("Unable to get registers from thread %d.\n"); | |
213 | // If unable to get SP, consider the entire stack to be reachable. | |
214 | sp = stack_begin; | |
215 | } | |
216 | ||
217 | if (flags()->use_registers && have_registers) | |
218 | ScanRangeForPointers(registers_begin, registers_end, frontier, | |
219 | "REGISTERS", kReachable); | |
220 | ||
221 | if (flags()->use_stacks) { | |
222 | LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp); | |
223 | if (sp < stack_begin || sp >= stack_end) { | |
224 | // SP is outside the recorded stack range (e.g. the thread is running a | |
5bcae85e SL |
225 | // signal handler on alternate stack, or swapcontext was used). |
226 | // Again, consider the entire stack range to be reachable. | |
1a4d82fc | 227 | LOG_THREADS("WARNING: stack pointer not in stack range.\n"); |
5bcae85e SL |
228 | uptr page_size = GetPageSizeCached(); |
229 | int skipped = 0; | |
230 | while (stack_begin < stack_end && | |
231 | !IsAccessibleMemoryRange(stack_begin, 1)) { | |
232 | skipped++; | |
233 | stack_begin += page_size; | |
234 | } | |
235 | LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", | |
236 | skipped, stack_begin, stack_end); | |
1a4d82fc JJ |
237 | } else { |
238 | // Shrink the stack range to ignore out-of-scope values. | |
239 | stack_begin = sp; | |
240 | } | |
241 | ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", | |
242 | kReachable); | |
243 | ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); | |
244 | } | |
245 | ||
246 | if (flags()->use_tls) { | |
247 | LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end); | |
248 | if (cache_begin == cache_end) { | |
249 | ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); | |
250 | } else { | |
251 | // Because LSan should not be loaded with dlopen(), we can assume | |
252 | // that allocator cache will be part of static TLS image. | |
253 | CHECK_LE(tls_begin, cache_begin); | |
254 | CHECK_GE(tls_end, cache_end); | |
255 | if (tls_begin < cache_begin) | |
256 | ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", | |
257 | kReachable); | |
258 | if (tls_end > cache_end) | |
259 | ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); | |
260 | } | |
5bcae85e SL |
261 | if (dtls) { |
262 | for (uptr j = 0; j < dtls->dtv_size; ++j) { | |
263 | uptr dtls_beg = dtls->dtv[j].beg; | |
264 | uptr dtls_end = dtls_beg + dtls->dtv[j].size; | |
265 | if (dtls_beg < dtls_end) { | |
266 | LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end); | |
267 | ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", | |
268 | kReachable); | |
269 | } | |
270 | } | |
271 | } | |
1a4d82fc JJ |
272 | } |
273 | } | |
274 | } | |
275 | ||
276 | static void ProcessRootRegion(Frontier *frontier, uptr root_begin, | |
277 | uptr root_end) { | |
278 | MemoryMappingLayout proc_maps(/*cache_enabled*/true); | |
279 | uptr begin, end, prot; | |
280 | while (proc_maps.Next(&begin, &end, | |
92a42be0 SL |
281 | /*offset*/ nullptr, /*filename*/ nullptr, |
282 | /*filename_size*/ 0, &prot)) { | |
1a4d82fc JJ |
283 | uptr intersection_begin = Max(root_begin, begin); |
284 | uptr intersection_end = Min(end, root_end); | |
285 | if (intersection_begin >= intersection_end) continue; | |
286 | bool is_readable = prot & MemoryMappingLayout::kProtectionRead; | |
287 | LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", | |
288 | root_begin, root_end, begin, end, | |
289 | is_readable ? "readable" : "unreadable"); | |
290 | if (is_readable) | |
291 | ScanRangeForPointers(intersection_begin, intersection_end, frontier, | |
292 | "ROOT", kReachable); | |
293 | } | |
294 | } | |
295 | ||
296 | // Scans root regions for heap pointers. | |
297 | static void ProcessRootRegions(Frontier *frontier) { | |
298 | if (!flags()->use_root_regions) return; | |
299 | CHECK(root_regions); | |
300 | for (uptr i = 0; i < root_regions->size(); i++) { | |
301 | RootRegion region = (*root_regions)[i]; | |
302 | uptr begin_addr = reinterpret_cast<uptr>(region.begin); | |
303 | ProcessRootRegion(frontier, begin_addr, begin_addr + region.size); | |
304 | } | |
305 | } | |
306 | ||
307 | static void FloodFillTag(Frontier *frontier, ChunkTag tag) { | |
308 | while (frontier->size()) { | |
309 | uptr next_chunk = frontier->back(); | |
310 | frontier->pop_back(); | |
311 | LsanMetadata m(next_chunk); | |
312 | ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, | |
313 | "HEAP", tag); | |
314 | } | |
315 | } | |
316 | ||
317 | // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks | |
318 | // which are reachable from it as indirectly leaked. | |
319 | static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { | |
320 | chunk = GetUserBegin(chunk); | |
321 | LsanMetadata m(chunk); | |
322 | if (m.allocated() && m.tag() != kReachable) { | |
323 | ScanRangeForPointers(chunk, chunk + m.requested_size(), | |
92a42be0 | 324 | /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); |
1a4d82fc JJ |
325 | } |
326 | } | |
327 | ||
328 | // ForEachChunk callback. If chunk is marked as ignored, adds its address to | |
329 | // frontier. | |
330 | static void CollectIgnoredCb(uptr chunk, void *arg) { | |
331 | CHECK(arg); | |
332 | chunk = GetUserBegin(chunk); | |
333 | LsanMetadata m(chunk); | |
92a42be0 SL |
334 | if (m.allocated() && m.tag() == kIgnored) { |
335 | LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", | |
336 | chunk, chunk + m.requested_size(), m.requested_size()); | |
1a4d82fc | 337 | reinterpret_cast<Frontier *>(arg)->push_back(chunk); |
92a42be0 | 338 | } |
1a4d82fc JJ |
339 | } |
340 | ||
341 | // Sets the appropriate tag on each chunk. | |
342 | static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { | |
343 | // Holds the flood fill frontier. | |
344 | Frontier frontier(1); | |
345 | ||
92a42be0 | 346 | ForEachChunk(CollectIgnoredCb, &frontier); |
1a4d82fc JJ |
347 | ProcessGlobalRegions(&frontier); |
348 | ProcessThreads(suspended_threads, &frontier); | |
349 | ProcessRootRegions(&frontier); | |
350 | FloodFillTag(&frontier, kReachable); | |
92a42be0 | 351 | |
1a4d82fc JJ |
352 | // The check here is relatively expensive, so we do this in a separate flood |
353 | // fill. That way we can skip the check for chunks that are reachable | |
354 | // otherwise. | |
355 | LOG_POINTERS("Processing platform-specific allocations.\n"); | |
92a42be0 | 356 | CHECK_EQ(0, frontier.size()); |
1a4d82fc JJ |
357 | ProcessPlatformSpecificAllocations(&frontier); |
358 | FloodFillTag(&frontier, kReachable); | |
359 | ||
1a4d82fc JJ |
360 | // Iterate over leaked chunks and mark those that are reachable from other |
361 | // leaked chunks. | |
362 | LOG_POINTERS("Scanning leaked chunks.\n"); | |
92a42be0 SL |
363 | ForEachChunk(MarkIndirectlyLeakedCb, nullptr); |
364 | } | |
365 | ||
366 | // ForEachChunk callback. Resets the tags to pre-leak-check state. | |
367 | static void ResetTagsCb(uptr chunk, void *arg) { | |
368 | (void)arg; | |
369 | chunk = GetUserBegin(chunk); | |
370 | LsanMetadata m(chunk); | |
371 | if (m.allocated() && m.tag() != kIgnored) | |
372 | m.set_tag(kDirectlyLeaked); | |
1a4d82fc JJ |
373 | } |
374 | ||
375 | static void PrintStackTraceById(u32 stack_trace_id) { | |
376 | CHECK(stack_trace_id); | |
92a42be0 | 377 | StackDepotGet(stack_trace_id).Print(); |
1a4d82fc JJ |
378 | } |
379 | ||
380 | // ForEachChunk callback. Aggregates information about unreachable chunks into | |
381 | // a LeakReport. | |
382 | static void CollectLeaksCb(uptr chunk, void *arg) { | |
383 | CHECK(arg); | |
384 | LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg); | |
385 | chunk = GetUserBegin(chunk); | |
386 | LsanMetadata m(chunk); | |
387 | if (!m.allocated()) return; | |
388 | if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { | |
92a42be0 | 389 | u32 resolution = flags()->resolution; |
1a4d82fc JJ |
390 | u32 stack_trace_id = 0; |
391 | if (resolution > 0) { | |
92a42be0 SL |
392 | StackTrace stack = StackDepotGet(m.stack_trace_id()); |
393 | stack.size = Min(stack.size, resolution); | |
394 | stack_trace_id = StackDepotPut(stack); | |
1a4d82fc JJ |
395 | } else { |
396 | stack_trace_id = m.stack_trace_id(); | |
397 | } | |
398 | leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(), | |
399 | m.tag()); | |
400 | } | |
401 | } | |
402 | ||
403 | static void PrintMatchedSuppressions() { | |
404 | InternalMmapVector<Suppression *> matched(1); | |
92a42be0 | 405 | GetSuppressionContext()->GetMatched(&matched); |
1a4d82fc JJ |
406 | if (!matched.size()) |
407 | return; | |
408 | const char *line = "-----------------------------------------------------"; | |
409 | Printf("%s\n", line); | |
410 | Printf("Suppressions used:\n"); | |
411 | Printf(" count bytes template\n"); | |
412 | for (uptr i = 0; i < matched.size(); i++) | |
92a42be0 SL |
413 | Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed( |
414 | &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); | |
1a4d82fc JJ |
415 | Printf("%s\n\n", line); |
416 | } | |
417 | ||
92a42be0 | 418 | struct CheckForLeaksParam { |
1a4d82fc JJ |
419 | bool success; |
420 | LeakReport leak_report; | |
421 | }; | |
422 | ||
92a42be0 SL |
423 | static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, |
424 | void *arg) { | |
425 | CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); | |
1a4d82fc JJ |
426 | CHECK(param); |
427 | CHECK(!param->success); | |
428 | ClassifyAllChunks(suspended_threads); | |
429 | ForEachChunk(CollectLeaksCb, ¶m->leak_report); | |
92a42be0 SL |
430 | // Clean up for subsequent leak checks. This assumes we did not overwrite any |
431 | // kIgnored tags. | |
432 | ForEachChunk(ResetTagsCb, nullptr); | |
1a4d82fc JJ |
433 | param->success = true; |
434 | } | |
435 | ||
92a42be0 | 436 | static bool CheckForLeaks() { |
1a4d82fc | 437 | if (&__lsan_is_turned_off && __lsan_is_turned_off()) |
92a42be0 SL |
438 | return false; |
439 | EnsureMainThreadIDIsCorrect(); | |
440 | CheckForLeaksParam param; | |
1a4d82fc JJ |
441 | param.success = false; |
442 | LockThreadRegistry(); | |
443 | LockAllocator(); | |
92a42be0 | 444 | DoStopTheWorld(CheckForLeaksCallback, ¶m); |
1a4d82fc JJ |
445 | UnlockAllocator(); |
446 | UnlockThreadRegistry(); | |
447 | ||
448 | if (!param.success) { | |
449 | Report("LeakSanitizer has encountered a fatal error.\n"); | |
5bcae85e SL |
450 | Report( |
451 | "HINT: For debugging, try setting environment variable " | |
452 | "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); | |
7cac9316 XL |
453 | Report( |
454 | "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n"); | |
1a4d82fc JJ |
455 | Die(); |
456 | } | |
457 | param.leak_report.ApplySuppressions(); | |
458 | uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount(); | |
459 | if (unsuppressed_count > 0) { | |
460 | Decorator d; | |
461 | Printf("\n" | |
462 | "=================================================================" | |
463 | "\n"); | |
464 | Printf("%s", d.Error()); | |
465 | Report("ERROR: LeakSanitizer: detected memory leaks\n"); | |
466 | Printf("%s", d.End()); | |
467 | param.leak_report.ReportTopLeaks(flags()->max_leaks); | |
468 | } | |
92a42be0 | 469 | if (common_flags()->print_suppressions) |
1a4d82fc JJ |
470 | PrintMatchedSuppressions(); |
471 | if (unsuppressed_count > 0) { | |
472 | param.leak_report.PrintSummary(); | |
92a42be0 | 473 | return true; |
1a4d82fc | 474 | } |
92a42be0 SL |
475 | return false; |
476 | } | |
477 | ||
478 | void DoLeakCheck() { | |
479 | BlockingMutexLock l(&global_mutex); | |
480 | static bool already_done; | |
481 | if (already_done) return; | |
482 | already_done = true; | |
483 | bool have_leaks = CheckForLeaks(); | |
484 | if (!have_leaks) { | |
485 | return; | |
486 | } | |
487 | if (common_flags()->exitcode) { | |
488 | Die(); | |
489 | } | |
490 | } | |
491 | ||
492 | static int DoRecoverableLeakCheck() { | |
493 | BlockingMutexLock l(&global_mutex); | |
494 | bool have_leaks = CheckForLeaks(); | |
495 | return have_leaks ? 1 : 0; | |
1a4d82fc JJ |
496 | } |
497 | ||
498 | static Suppression *GetSuppressionForAddr(uptr addr) { | |
92a42be0 | 499 | Suppression *s = nullptr; |
1a4d82fc JJ |
500 | |
501 | // Suppress by module name. | |
92a42be0 SL |
502 | SuppressionContext *suppressions = GetSuppressionContext(); |
503 | if (const char *module_name = | |
504 | Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) | |
505 | if (suppressions->Match(module_name, kSuppressionLeak, &s)) | |
506 | return s; | |
1a4d82fc JJ |
507 | |
508 | // Suppress by file or function name. | |
92a42be0 SL |
509 | SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); |
510 | for (SymbolizedStack *cur = frames; cur; cur = cur->next) { | |
511 | if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || | |
512 | suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { | |
513 | break; | |
514 | } | |
1a4d82fc | 515 | } |
92a42be0 SL |
516 | frames->ClearAll(); |
517 | return s; | |
1a4d82fc JJ |
518 | } |
519 | ||
520 | static Suppression *GetSuppressionForStack(u32 stack_trace_id) { | |
92a42be0 SL |
521 | StackTrace stack = StackDepotGet(stack_trace_id); |
522 | for (uptr i = 0; i < stack.size; i++) { | |
523 | Suppression *s = GetSuppressionForAddr( | |
524 | StackTrace::GetPreviousInstructionPc(stack.trace[i])); | |
1a4d82fc JJ |
525 | if (s) return s; |
526 | } | |
92a42be0 | 527 | return nullptr; |
1a4d82fc JJ |
528 | } |
529 | ||
530 | ///// LeakReport implementation. ///// | |
531 | ||
532 | // A hard limit on the number of distinct leaks, to avoid quadratic complexity | |
533 | // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks | |
534 | // in real-world applications. | |
535 | // FIXME: Get rid of this limit by changing the implementation of LeakReport to | |
536 | // use a hash table. | |
537 | const uptr kMaxLeaksConsidered = 5000; | |
538 | ||
539 | void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, | |
540 | uptr leaked_size, ChunkTag tag) { | |
541 | CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); | |
542 | bool is_directly_leaked = (tag == kDirectlyLeaked); | |
543 | uptr i; | |
544 | for (i = 0; i < leaks_.size(); i++) { | |
545 | if (leaks_[i].stack_trace_id == stack_trace_id && | |
546 | leaks_[i].is_directly_leaked == is_directly_leaked) { | |
547 | leaks_[i].hit_count++; | |
548 | leaks_[i].total_size += leaked_size; | |
549 | break; | |
550 | } | |
551 | } | |
552 | if (i == leaks_.size()) { | |
553 | if (leaks_.size() == kMaxLeaksConsidered) return; | |
554 | Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, | |
555 | is_directly_leaked, /* is_suppressed */ false }; | |
556 | leaks_.push_back(leak); | |
557 | } | |
558 | if (flags()->report_objects) { | |
559 | LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; | |
560 | leaked_objects_.push_back(obj); | |
561 | } | |
562 | } | |
563 | ||
564 | static bool LeakComparator(const Leak &leak1, const Leak &leak2) { | |
565 | if (leak1.is_directly_leaked == leak2.is_directly_leaked) | |
566 | return leak1.total_size > leak2.total_size; | |
567 | else | |
568 | return leak1.is_directly_leaked; | |
569 | } | |
570 | ||
571 | void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { | |
572 | CHECK(leaks_.size() <= kMaxLeaksConsidered); | |
573 | Printf("\n"); | |
574 | if (leaks_.size() == kMaxLeaksConsidered) | |
575 | Printf("Too many leaks! Only the first %zu leaks encountered will be " | |
576 | "reported.\n", | |
577 | kMaxLeaksConsidered); | |
578 | ||
579 | uptr unsuppressed_count = UnsuppressedLeakCount(); | |
580 | if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) | |
581 | Printf("The %zu top leak(s):\n", num_leaks_to_report); | |
582 | InternalSort(&leaks_, leaks_.size(), LeakComparator); | |
583 | uptr leaks_reported = 0; | |
584 | for (uptr i = 0; i < leaks_.size(); i++) { | |
585 | if (leaks_[i].is_suppressed) continue; | |
586 | PrintReportForLeak(i); | |
587 | leaks_reported++; | |
588 | if (leaks_reported == num_leaks_to_report) break; | |
589 | } | |
590 | if (leaks_reported < unsuppressed_count) { | |
591 | uptr remaining = unsuppressed_count - leaks_reported; | |
592 | Printf("Omitting %zu more leak(s).\n", remaining); | |
593 | } | |
594 | } | |
595 | ||
596 | void LeakReport::PrintReportForLeak(uptr index) { | |
597 | Decorator d; | |
598 | Printf("%s", d.Leak()); | |
599 | Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", | |
600 | leaks_[index].is_directly_leaked ? "Direct" : "Indirect", | |
601 | leaks_[index].total_size, leaks_[index].hit_count); | |
602 | Printf("%s", d.End()); | |
603 | ||
604 | PrintStackTraceById(leaks_[index].stack_trace_id); | |
605 | ||
606 | if (flags()->report_objects) { | |
607 | Printf("Objects leaked above:\n"); | |
608 | PrintLeakedObjectsForLeak(index); | |
609 | Printf("\n"); | |
610 | } | |
611 | } | |
612 | ||
613 | void LeakReport::PrintLeakedObjectsForLeak(uptr index) { | |
614 | u32 leak_id = leaks_[index].id; | |
615 | for (uptr j = 0; j < leaked_objects_.size(); j++) { | |
616 | if (leaked_objects_[j].leak_id == leak_id) | |
617 | Printf("%p (%zu bytes)\n", leaked_objects_[j].addr, | |
618 | leaked_objects_[j].size); | |
619 | } | |
620 | } | |
621 | ||
622 | void LeakReport::PrintSummary() { | |
623 | CHECK(leaks_.size() <= kMaxLeaksConsidered); | |
624 | uptr bytes = 0, allocations = 0; | |
625 | for (uptr i = 0; i < leaks_.size(); i++) { | |
626 | if (leaks_[i].is_suppressed) continue; | |
627 | bytes += leaks_[i].total_size; | |
628 | allocations += leaks_[i].hit_count; | |
629 | } | |
92a42be0 SL |
630 | InternalScopedString summary(kMaxSummaryLength); |
631 | summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, | |
632 | allocations); | |
1a4d82fc JJ |
633 | ReportErrorSummary(summary.data()); |
634 | } | |
635 | ||
636 | void LeakReport::ApplySuppressions() { | |
637 | for (uptr i = 0; i < leaks_.size(); i++) { | |
638 | Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); | |
639 | if (s) { | |
640 | s->weight += leaks_[i].total_size; | |
92a42be0 SL |
641 | atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + |
642 | leaks_[i].hit_count); | |
1a4d82fc JJ |
643 | leaks_[i].is_suppressed = true; |
644 | } | |
645 | } | |
646 | } | |
647 | ||
648 | uptr LeakReport::UnsuppressedLeakCount() { | |
649 | uptr result = 0; | |
650 | for (uptr i = 0; i < leaks_.size(); i++) | |
651 | if (!leaks_[i].is_suppressed) result++; | |
652 | return result; | |
653 | } | |
654 | ||
92a42be0 | 655 | } // namespace __lsan |
5bcae85e SL |
656 | #else // CAN_SANITIZE_LEAKS |
657 | namespace __lsan { | |
658 | void InitCommonLsan() { } | |
659 | void DoLeakCheck() { } | |
660 | void DisableInThisThread() { } | |
661 | void EnableInThisThread() { } | |
662 | } | |
92a42be0 | 663 | #endif // CAN_SANITIZE_LEAKS |
1a4d82fc JJ |
664 | |
665 | using namespace __lsan; // NOLINT | |
666 | ||
667 | extern "C" { | |
668 | SANITIZER_INTERFACE_ATTRIBUTE | |
669 | void __lsan_ignore_object(const void *p) { | |
670 | #if CAN_SANITIZE_LEAKS | |
671 | if (!common_flags()->detect_leaks) | |
672 | return; | |
673 | // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not | |
674 | // locked. | |
675 | BlockingMutexLock l(&global_mutex); | |
676 | IgnoreObjectResult res = IgnoreObjectLocked(p); | |
677 | if (res == kIgnoreObjectInvalid) | |
678 | VReport(1, "__lsan_ignore_object(): no heap object found at %p", p); | |
679 | if (res == kIgnoreObjectAlreadyIgnored) | |
680 | VReport(1, "__lsan_ignore_object(): " | |
681 | "heap object at %p is already being ignored\n", p); | |
682 | if (res == kIgnoreObjectSuccess) | |
683 | VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); | |
92a42be0 | 684 | #endif // CAN_SANITIZE_LEAKS |
1a4d82fc JJ |
685 | } |
686 | ||
687 | SANITIZER_INTERFACE_ATTRIBUTE | |
688 | void __lsan_register_root_region(const void *begin, uptr size) { | |
689 | #if CAN_SANITIZE_LEAKS | |
690 | BlockingMutexLock l(&global_mutex); | |
691 | CHECK(root_regions); | |
692 | RootRegion region = {begin, size}; | |
693 | root_regions->push_back(region); | |
694 | VReport(1, "Registered root region at %p of size %llu\n", begin, size); | |
92a42be0 | 695 | #endif // CAN_SANITIZE_LEAKS |
1a4d82fc JJ |
696 | } |
697 | ||
698 | SANITIZER_INTERFACE_ATTRIBUTE | |
699 | void __lsan_unregister_root_region(const void *begin, uptr size) { | |
700 | #if CAN_SANITIZE_LEAKS | |
701 | BlockingMutexLock l(&global_mutex); | |
702 | CHECK(root_regions); | |
703 | bool removed = false; | |
704 | for (uptr i = 0; i < root_regions->size(); i++) { | |
705 | RootRegion region = (*root_regions)[i]; | |
706 | if (region.begin == begin && region.size == size) { | |
707 | removed = true; | |
708 | uptr last_index = root_regions->size() - 1; | |
709 | (*root_regions)[i] = (*root_regions)[last_index]; | |
710 | root_regions->pop_back(); | |
711 | VReport(1, "Unregistered root region at %p of size %llu\n", begin, size); | |
712 | break; | |
713 | } | |
714 | } | |
715 | if (!removed) { | |
716 | Report( | |
717 | "__lsan_unregister_root_region(): region at %p of size %llu has not " | |
718 | "been registered.\n", | |
719 | begin, size); | |
720 | Die(); | |
721 | } | |
92a42be0 | 722 | #endif // CAN_SANITIZE_LEAKS |
1a4d82fc JJ |
723 | } |
724 | ||
725 | SANITIZER_INTERFACE_ATTRIBUTE | |
726 | void __lsan_disable() { | |
727 | #if CAN_SANITIZE_LEAKS | |
5bcae85e | 728 | __lsan::DisableInThisThread(); |
1a4d82fc JJ |
729 | #endif |
730 | } | |
731 | ||
732 | SANITIZER_INTERFACE_ATTRIBUTE | |
733 | void __lsan_enable() { | |
734 | #if CAN_SANITIZE_LEAKS | |
5bcae85e | 735 | __lsan::EnableInThisThread(); |
1a4d82fc JJ |
736 | #endif |
737 | } | |
738 | ||
739 | SANITIZER_INTERFACE_ATTRIBUTE | |
740 | void __lsan_do_leak_check() { | |
741 | #if CAN_SANITIZE_LEAKS | |
742 | if (common_flags()->detect_leaks) | |
743 | __lsan::DoLeakCheck(); | |
92a42be0 SL |
744 | #endif // CAN_SANITIZE_LEAKS |
745 | } | |
746 | ||
747 | SANITIZER_INTERFACE_ATTRIBUTE | |
748 | int __lsan_do_recoverable_leak_check() { | |
749 | #if CAN_SANITIZE_LEAKS | |
750 | if (common_flags()->detect_leaks) | |
751 | return __lsan::DoRecoverableLeakCheck(); | |
752 | #endif // CAN_SANITIZE_LEAKS | |
753 | return 0; | |
1a4d82fc JJ |
754 | } |
755 | ||
756 | #if !SANITIZER_SUPPORTS_WEAK_HOOKS | |
757 | SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE | |
758 | int __lsan_is_turned_off() { | |
759 | return 0; | |
760 | } | |
7cac9316 XL |
761 | |
762 | SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE | |
763 | const char *__lsan_default_suppressions() { | |
764 | return ""; | |
765 | } | |
1a4d82fc | 766 | #endif |
92a42be0 | 767 | } // extern "C" |