]> git.proxmox.com Git - rustc.git/blame - src/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc
New upstream version 1.19.0+dfsg1
[rustc.git] / src / compiler-rt / lib / tsan / rtl / tsan_platform_linux.cc
CommitLineData
1a4d82fc
JJ
1//===-- tsan_platform_linux.cc --------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of ThreadSanitizer (TSan), a race detector.
11//
92a42be0 12// Linux- and FreeBSD-specific code.
1a4d82fc
JJ
13//===----------------------------------------------------------------------===//
14
15
16#include "sanitizer_common/sanitizer_platform.h"
92a42be0 17#if SANITIZER_LINUX || SANITIZER_FREEBSD
1a4d82fc
JJ
18
19#include "sanitizer_common/sanitizer_common.h"
20#include "sanitizer_common/sanitizer_libc.h"
5bcae85e
SL
21#include "sanitizer_common/sanitizer_linux.h"
22#include "sanitizer_common/sanitizer_platform_limits_posix.h"
92a42be0 23#include "sanitizer_common/sanitizer_posix.h"
1a4d82fc
JJ
24#include "sanitizer_common/sanitizer_procmaps.h"
25#include "sanitizer_common/sanitizer_stoptheworld.h"
92a42be0 26#include "sanitizer_common/sanitizer_stackdepot.h"
1a4d82fc
JJ
27#include "tsan_platform.h"
28#include "tsan_rtl.h"
29#include "tsan_flags.h"
30
31#include <fcntl.h>
32#include <pthread.h>
33#include <signal.h>
34#include <stdio.h>
35#include <stdlib.h>
36#include <string.h>
37#include <stdarg.h>
38#include <sys/mman.h>
5bcae85e
SL
39#if SANITIZER_LINUX
40#include <sys/personality.h>
7cac9316 41#include <setjmp.h>
5bcae85e 42#endif
1a4d82fc
JJ
43#include <sys/syscall.h>
44#include <sys/socket.h>
45#include <sys/time.h>
46#include <sys/types.h>
47#include <sys/resource.h>
48#include <sys/stat.h>
49#include <unistd.h>
50#include <errno.h>
51#include <sched.h>
52#include <dlfcn.h>
92a42be0 53#if SANITIZER_LINUX
1a4d82fc
JJ
54#define __need_res_state
55#include <resolv.h>
92a42be0 56#endif
1a4d82fc
JJ
57
58#ifdef sa_handler
59# undef sa_handler
60#endif
61
62#ifdef sa_sigaction
63# undef sa_sigaction
64#endif
65
92a42be0
SL
66#if SANITIZER_FREEBSD
67extern "C" void *__libc_stack_end;
68void *__libc_stack_end = 0;
69#endif
1a4d82fc 70
7cac9316
XL
71#if SANITIZER_LINUX && defined(__aarch64__)
72void InitializeGuardPtr() __attribute__((visibility("hidden")));
73#endif
74
1a4d82fc
JJ
75namespace __tsan {
76
3157f602
XL
77#ifdef TSAN_RUNTIME_VMA
78// Runtime detected VMA size.
79uptr vmaSize;
80#endif
81
92a42be0
SL
82enum {
83 MemTotal = 0,
84 MemShadow = 1,
85 MemMeta = 2,
86 MemFile = 3,
87 MemMmap = 4,
88 MemTrace = 5,
89 MemHeap = 6,
90 MemOther = 7,
91 MemCount = 8,
92};
93
94void FillProfileCallback(uptr p, uptr rss, bool file,
1a4d82fc 95 uptr *mem, uptr stats_size) {
92a42be0 96 mem[MemTotal] += rss;
3157f602 97 if (p >= ShadowBeg() && p < ShadowEnd())
92a42be0 98 mem[MemShadow] += rss;
3157f602 99 else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
92a42be0 100 mem[MemMeta] += rss;
7cac9316 101#if !SANITIZER_GO
3157f602 102 else if (p >= HeapMemBeg() && p < HeapMemEnd())
92a42be0 103 mem[MemHeap] += rss;
3157f602 104 else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
92a42be0 105 mem[file ? MemFile : MemMmap] += rss;
3157f602 106 else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
92a42be0
SL
107 mem[file ? MemFile : MemMmap] += rss;
108#else
3157f602 109 else if (p >= AppMemBeg() && p < AppMemEnd())
92a42be0
SL
110 mem[file ? MemFile : MemMmap] += rss;
111#endif
3157f602 112 else if (p >= TraceMemBeg() && p < TraceMemEnd())
92a42be0
SL
113 mem[MemTrace] += rss;
114 else
115 mem[MemOther] += rss;
1a4d82fc
JJ
116}
117
92a42be0 118void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
3157f602
XL
119 uptr mem[MemCount];
120 internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
1a4d82fc 121 __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
92a42be0
SL
122 StackDepotStats *stacks = StackDepotGetStats();
123 internal_snprintf(buf, buf_size,
124 "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
125 " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
126 mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
127 mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
128 mem[MemHeap] >> 20, mem[MemOther] >> 20,
129 stacks->allocated >> 20, stacks->n_uniq_ids,
130 nlive, nthread);
1a4d82fc
JJ
131}
132
92a42be0 133#if SANITIZER_LINUX
1a4d82fc
JJ
134void FlushShadowMemoryCallback(
135 const SuspendedThreadsList &suspended_threads_list,
136 void *argument) {
7cac9316 137 ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
1a4d82fc 138}
92a42be0 139#endif
1a4d82fc
JJ
140
141void FlushShadowMemory() {
92a42be0 142#if SANITIZER_LINUX
1a4d82fc 143 StopTheWorld(FlushShadowMemoryCallback, 0);
1a4d82fc 144#endif
92a42be0 145}
1a4d82fc 146
7cac9316 147#if !SANITIZER_GO
1a4d82fc
JJ
148// Mark shadow for .rodata sections with the special kShadowRodata marker.
149// Accesses to .rodata can't race, so this saves time, memory and trace space.
150static void MapRodata() {
151 // First create temp file.
152 const char *tmpdir = GetEnv("TMPDIR");
153 if (tmpdir == 0)
154 tmpdir = GetEnv("TEST_TMPDIR");
155#ifdef P_tmpdir
156 if (tmpdir == 0)
157 tmpdir = P_tmpdir;
158#endif
159 if (tmpdir == 0)
160 return;
161 char name[256];
162 internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
163 tmpdir, (int)internal_getpid());
164 uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
165 if (internal_iserror(openrv))
166 return;
167 internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
168 fd_t fd = openrv;
169 // Fill the file with kShadowRodata.
170 const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
171 InternalScopedBuffer<u64> marker(kMarkerSize);
172 // volatile to prevent insertion of memset
173 for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
174 *p = kShadowRodata;
175 internal_write(fd, marker.data(), marker.size());
176 // Map the file into memory.
92a42be0 177 uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
1a4d82fc
JJ
178 MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
179 if (internal_iserror(page)) {
180 internal_close(fd);
181 return;
182 }
183 // Map the file into shadow of .rodata sections.
184 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
185 uptr start, end, offset, prot;
186 // Reusing the buffer 'name'.
187 while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
188 if (name[0] != 0 && name[0] != '['
189 && (prot & MemoryMappingLayout::kProtectionRead)
190 && (prot & MemoryMappingLayout::kProtectionExecute)
191 && !(prot & MemoryMappingLayout::kProtectionWrite)
192 && IsAppMem(start)) {
193 // Assume it's .rodata
194 char *shadow_start = (char*)MemToShadow(start);
195 char *shadow_end = (char*)MemToShadow(end);
196 for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
197 internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
198 PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
199 }
200 }
201 }
202 internal_close(fd);
203}
204
92a42be0 205void InitializeShadowMemoryPlatform() {
1a4d82fc
JJ
206 MapRodata();
207}
1a4d82fc 208
7cac9316 209#endif // #if !SANITIZER_GO
1a4d82fc 210
3157f602
XL
211void InitializePlatformEarly() {
212#ifdef TSAN_RUNTIME_VMA
213 vmaSize =
214 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
215#if defined(__aarch64__)
7cac9316 216 if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
3157f602 217 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
7cac9316 218 Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize);
3157f602
XL
219 Die();
220 }
221#elif defined(__powerpc64__)
222 if (vmaSize != 44 && vmaSize != 46) {
223 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
224 Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize);
225 Die();
226 }
227#endif
228#endif
229}
230
92a42be0
SL
231void InitializePlatform() {
232 DisableCoreDumperIfNecessary();
1a4d82fc
JJ
233
234 // Go maps shadow memory lazily and works fine with limited address space.
235 // Unlimited stack is not a problem as well, because the executable
236 // is not compiled with -pie.
7cac9316 237 if (!SANITIZER_GO) {
1a4d82fc
JJ
238 bool reexec = false;
239 // TSan doesn't play well with unlimited stack size (as stack
240 // overlaps with shadow memory). If we detect unlimited stack size,
241 // we re-exec the program with limited stack size as a best effort.
92a42be0 242 if (StackSizeIsUnlimited()) {
1a4d82fc
JJ
243 const uptr kMaxStackSize = 32 * 1024 * 1024;
244 VReport(1, "Program is run with unlimited stack size, which wouldn't "
245 "work with ThreadSanitizer.\n"
246 "Re-execing with stack size limited to %zd bytes.\n",
247 kMaxStackSize);
248 SetStackSizeLimitInBytes(kMaxStackSize);
249 reexec = true;
250 }
251
92a42be0 252 if (!AddressSpaceIsUnlimited()) {
1a4d82fc
JJ
253 Report("WARNING: Program is run with limited virtual address space,"
254 " which wouldn't work with ThreadSanitizer.\n");
255 Report("Re-execing with unlimited virtual address space.\n");
92a42be0 256 SetAddressSpaceUnlimited();
1a4d82fc
JJ
257 reexec = true;
258 }
5bcae85e
SL
259#if SANITIZER_LINUX && defined(__aarch64__)
260 // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
261 // linux kernel, the random gap between stack and mapped area is increased
262 // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
263 // this big range, we should disable randomized virtual space on aarch64.
264 int old_personality = personality(0xffffffff);
265 if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
266 VReport(1, "WARNING: Program is run with randomized virtual address "
267 "space, which wouldn't work with ThreadSanitizer.\n"
268 "Re-execing with fixed virtual address space.\n");
269 CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
270 reexec = true;
271 }
7cac9316
XL
272 // Initialize the guard pointer used in {sig}{set,long}jump.
273 InitializeGuardPtr();
5bcae85e 274#endif
1a4d82fc
JJ
275 if (reexec)
276 ReExec();
277 }
278
7cac9316 279#if !SANITIZER_GO
92a42be0 280 CheckAndProtect();
1a4d82fc 281 InitTlsSize();
1a4d82fc 282#endif
1a4d82fc
JJ
283}
284
7cac9316 285#if !SANITIZER_GO
1a4d82fc
JJ
286// Extract file descriptors passed to glibc internal __res_iclose function.
287// This is required to properly "close" the fds, because we do not see internal
288// closes within glibc. The code is a pure hack.
289int ExtractResolvFDs(void *state, int *fds, int nfd) {
3157f602 290#if SANITIZER_LINUX && !SANITIZER_ANDROID
1a4d82fc
JJ
291 int cnt = 0;
292 __res_state *statp = (__res_state*)state;
293 for (int i = 0; i < MAXNS && cnt < nfd; i++) {
294 if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
295 fds[cnt++] = statp->_u._ext.nssocks[i];
296 }
297 return cnt;
92a42be0
SL
298#else
299 return 0;
300#endif
1a4d82fc
JJ
301}
302
303// Extract file descriptors passed via UNIX domain sockets.
304// This is requried to properly handle "open" of these fds.
305// see 'man recvmsg' and 'man 3 cmsg'.
306int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
307 int res = 0;
308 msghdr *msg = (msghdr*)msgp;
309 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
310 for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
311 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
312 continue;
313 int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
314 for (int i = 0; i < n; i++) {
315 fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
316 if (res == nfd)
317 return res;
318 }
319 }
320 return res;
321}
322
92a42be0
SL
323// Note: this function runs with async signals enabled,
324// so it must not touch any tsan state.
1a4d82fc
JJ
325int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
326 void *abstime), void *c, void *m, void *abstime,
327 void(*cleanup)(void *arg), void *arg) {
328 // pthread_cleanup_push/pop are hardcore macros mess.
329 // We can't intercept nor call them w/o including pthread.h.
330 int res;
331 pthread_cleanup_push(cleanup, arg);
332 res = fn(c, m, abstime);
333 pthread_cleanup_pop(0);
334 return res;
335}
336#endif
337
7cac9316 338#if !SANITIZER_GO
92a42be0
SL
339void ReplaceSystemMalloc() { }
340#endif
341
7cac9316 342#if !SANITIZER_GO
5bcae85e
SL
343#if SANITIZER_ANDROID
344
345#if defined(__aarch64__)
346# define __get_tls() \
347 ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; })
348#elif defined(__x86_64__)
349# define __get_tls() \
350 ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; })
351#else
352#error unsupported architecture
353#endif
354
355// On Android, __thread is not supported. So we store the pointer to ThreadState
356// in TLS_SLOT_TSAN, which is the tls slot allocated by Android bionic for tsan.
357static const int TLS_SLOT_TSAN = 8;
358// On Android, one thread can call intercepted functions after
359// DestroyThreadState(), so add a fake thread state for "dead" threads.
360static ThreadState *dead_thread_state = nullptr;
361
362ThreadState *cur_thread() {
363 ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
364 if (thr == nullptr) {
365 __sanitizer_sigset_t emptyset;
366 internal_sigfillset(&emptyset);
367 __sanitizer_sigset_t oldset;
368 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
369 thr = reinterpret_cast<ThreadState*>(__get_tls()[TLS_SLOT_TSAN]);
370 if (thr == nullptr) {
371 thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
372 "ThreadState"));
373 __get_tls()[TLS_SLOT_TSAN] = thr;
374 if (dead_thread_state == nullptr) {
375 dead_thread_state = reinterpret_cast<ThreadState*>(
376 MmapOrDie(sizeof(ThreadState), "ThreadState"));
377 dead_thread_state->fast_state.SetIgnoreBit();
378 dead_thread_state->ignore_interceptors = 1;
379 dead_thread_state->is_dead = true;
380 *const_cast<int*>(&dead_thread_state->tid) = -1;
381 CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
382 PROT_READ));
383 }
384 }
385 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
386 }
387 return thr;
388}
389
390void cur_thread_finalize() {
391 __sanitizer_sigset_t emptyset;
392 internal_sigfillset(&emptyset);
393 __sanitizer_sigset_t oldset;
394 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
395 ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
396 if (thr != dead_thread_state) {
397 __get_tls()[TLS_SLOT_TSAN] = dead_thread_state;
398 UnmapOrDie(thr, sizeof(ThreadState));
399 }
400 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
401}
402#endif // SANITIZER_ANDROID
7cac9316 403#endif // if !SANITIZER_GO
5bcae85e 404
1a4d82fc
JJ
405} // namespace __tsan
406
92a42be0 407#endif // SANITIZER_LINUX || SANITIZER_FREEBSD