]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // See sanitizer_stoptheworld.h for details. | |
11 | // This implementation was inspired by Markus Gutschke's linuxthreads.cc. | |
12 | // | |
13 | //===----------------------------------------------------------------------===// | |
14 | ||
1a4d82fc | 15 | #include "sanitizer_platform.h" |
92a42be0 SL |
16 | |
17 | #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ | |
5bcae85e SL |
18 | defined(__aarch64__) || defined(__powerpc64__) || \ |
19 | defined(__s390__)) | |
1a4d82fc JJ |
20 | |
21 | #include "sanitizer_stoptheworld.h" | |
22 | ||
23 | #include "sanitizer_platform_limits_posix.h" | |
92a42be0 | 24 | #include "sanitizer_atomic.h" |
1a4d82fc JJ |
25 | |
26 | #include <errno.h> | |
27 | #include <sched.h> // for CLONE_* definitions | |
28 | #include <stddef.h> | |
29 | #include <sys/prctl.h> // for PR_* definitions | |
30 | #include <sys/ptrace.h> // for PTRACE_* definitions | |
31 | #include <sys/types.h> // for pid_t | |
92a42be0 SL |
32 | #include <sys/uio.h> // for iovec |
33 | #include <elf.h> // for NT_PRSTATUS | |
1a4d82fc JJ |
34 | #if SANITIZER_ANDROID && defined(__arm__) |
35 | # include <linux/user.h> // for pt_regs | |
36 | #else | |
92a42be0 SL |
37 | # ifdef __aarch64__ |
38 | // GLIBC 2.20+ sys/user does not include asm/ptrace.h | |
39 | # include <asm/ptrace.h> | |
40 | # endif | |
1a4d82fc | 41 | # include <sys/user.h> // for user_regs_struct |
5bcae85e SL |
42 | # if SANITIZER_ANDROID && SANITIZER_MIPS |
43 | # include <asm/reg.h> // for mips SP register in sys/user.h | |
44 | # endif | |
1a4d82fc JJ |
45 | #endif |
46 | #include <sys/wait.h> // for signal-related stuff | |
47 | ||
48 | #ifdef sa_handler | |
49 | # undef sa_handler | |
50 | #endif | |
51 | ||
52 | #ifdef sa_sigaction | |
53 | # undef sa_sigaction | |
54 | #endif | |
55 | ||
56 | #include "sanitizer_common.h" | |
57 | #include "sanitizer_flags.h" | |
58 | #include "sanitizer_libc.h" | |
59 | #include "sanitizer_linux.h" | |
60 | #include "sanitizer_mutex.h" | |
61 | #include "sanitizer_placement_new.h" | |
62 | ||
63 | // This module works by spawning a Linux task which then attaches to every | |
64 | // thread in the caller process with ptrace. This suspends the threads, and | |
65 | // PTRACE_GETREGS can then be used to obtain their register state. The callback | |
66 | // supplied to StopTheWorld() is run in the tracer task while the threads are | |
67 | // suspended. | |
68 | // The tracer task must be placed in a different thread group for ptrace to | |
69 | // work, so it cannot be spawned as a pthread. Instead, we use the low-level | |
70 | // clone() interface (we want to share the address space with the caller | |
71 | // process, so we prefer clone() over fork()). | |
72 | // | |
73 | // We don't use any libc functions, relying instead on direct syscalls. There | |
74 | // are two reasons for this: | |
75 | // 1. calling a library function while threads are suspended could cause a | |
76 | // deadlock, if one of the treads happens to be holding a libc lock; | |
77 | // 2. it's generally not safe to call libc functions from the tracer task, | |
78 | // because clone() does not set up a thread-local storage for it. Any | |
79 | // thread-local variables used by libc will be shared between the tracer task | |
80 | // and the thread which spawned it. | |
81 | ||
1a4d82fc | 82 | namespace __sanitizer { |
92a42be0 | 83 | |
7cac9316 XL |
84 | COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t)); |
85 | ||
92a42be0 SL |
86 | // Structure for passing arguments into the tracer thread. |
87 | struct TracerThreadArgument { | |
88 | StopTheWorldCallback callback; | |
89 | void *callback_argument; | |
90 | // The tracer thread waits on this mutex while the parent finishes its | |
91 | // preparations. | |
92 | BlockingMutex mutex; | |
93 | // Tracer thread signals its completion by setting done. | |
94 | atomic_uintptr_t done; | |
95 | uptr parent_pid; | |
96 | }; | |
97 | ||
1a4d82fc JJ |
98 | // This class handles thread suspending/unsuspending in the tracer thread. |
99 | class ThreadSuspender { | |
100 | public: | |
92a42be0 SL |
101 | explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg) |
102 | : arg(arg) | |
103 | , pid_(pid) { | |
1a4d82fc JJ |
104 | CHECK_GE(pid, 0); |
105 | } | |
106 | bool SuspendAllThreads(); | |
107 | void ResumeAllThreads(); | |
108 | void KillAllThreads(); | |
109 | SuspendedThreadsList &suspended_threads_list() { | |
110 | return suspended_threads_list_; | |
111 | } | |
92a42be0 | 112 | TracerThreadArgument *arg; |
1a4d82fc JJ |
113 | private: |
114 | SuspendedThreadsList suspended_threads_list_; | |
115 | pid_t pid_; | |
116 | bool SuspendThread(SuspendedThreadID thread_id); | |
117 | }; | |
118 | ||
92a42be0 | 119 | bool ThreadSuspender::SuspendThread(SuspendedThreadID tid) { |
1a4d82fc JJ |
120 | // Are we already attached to this thread? |
121 | // Currently this check takes linear time, however the number of threads is | |
122 | // usually small. | |
92a42be0 | 123 | if (suspended_threads_list_.Contains(tid)) |
1a4d82fc JJ |
124 | return false; |
125 | int pterrno; | |
92a42be0 | 126 | if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr), |
1a4d82fc JJ |
127 | &pterrno)) { |
128 | // Either the thread is dead, or something prevented us from attaching. | |
129 | // Log this event and move on. | |
92a42be0 | 130 | VReport(1, "Could not attach to thread %d (errno %d).\n", tid, pterrno); |
1a4d82fc JJ |
131 | return false; |
132 | } else { | |
92a42be0 | 133 | VReport(2, "Attached to thread %d.\n", tid); |
1a4d82fc | 134 | // The thread is not guaranteed to stop before ptrace returns, so we must |
92a42be0 SL |
135 | // wait on it. Note: if the thread receives a signal concurrently, |
136 | // we can get notification about the signal before notification about stop. | |
137 | // In such case we need to forward the signal to the thread, otherwise | |
138 | // the signal will be missed (as we do PTRACE_DETACH with arg=0) and | |
139 | // any logic relying on signals will break. After forwarding we need to | |
140 | // continue to wait for stopping, because the thread is not stopped yet. | |
141 | // We do ignore delivery of SIGSTOP, because we want to make stop-the-world | |
142 | // as invisible as possible. | |
143 | for (;;) { | |
144 | int status; | |
145 | uptr waitpid_status; | |
146 | HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL)); | |
147 | int wperrno; | |
148 | if (internal_iserror(waitpid_status, &wperrno)) { | |
149 | // Got a ECHILD error. I don't think this situation is possible, but it | |
150 | // doesn't hurt to report it. | |
151 | VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n", | |
152 | tid, wperrno); | |
153 | internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr); | |
154 | return false; | |
155 | } | |
156 | if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) { | |
157 | internal_ptrace(PTRACE_CONT, tid, nullptr, | |
158 | (void*)(uptr)WSTOPSIG(status)); | |
159 | continue; | |
160 | } | |
161 | break; | |
1a4d82fc | 162 | } |
92a42be0 | 163 | suspended_threads_list_.Append(tid); |
1a4d82fc JJ |
164 | return true; |
165 | } | |
166 | } | |
167 | ||
168 | void ThreadSuspender::ResumeAllThreads() { | |
169 | for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) { | |
170 | pid_t tid = suspended_threads_list_.GetThreadID(i); | |
171 | int pterrno; | |
92a42be0 | 172 | if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr), |
1a4d82fc | 173 | &pterrno)) { |
92a42be0 | 174 | VReport(2, "Detached from thread %d.\n", tid); |
1a4d82fc JJ |
175 | } else { |
176 | // Either the thread is dead, or we are already detached. | |
177 | // The latter case is possible, for instance, if this function was called | |
178 | // from a signal handler. | |
179 | VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno); | |
180 | } | |
181 | } | |
182 | } | |
183 | ||
184 | void ThreadSuspender::KillAllThreads() { | |
185 | for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) | |
186 | internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i), | |
92a42be0 | 187 | nullptr, nullptr); |
1a4d82fc JJ |
188 | } |
189 | ||
190 | bool ThreadSuspender::SuspendAllThreads() { | |
191 | ThreadLister thread_lister(pid_); | |
192 | bool added_threads; | |
7cac9316 | 193 | bool first_iteration = true; |
1a4d82fc JJ |
194 | do { |
195 | // Run through the directory entries once. | |
196 | added_threads = false; | |
197 | pid_t tid = thread_lister.GetNextTID(); | |
198 | while (tid >= 0) { | |
199 | if (SuspendThread(tid)) | |
200 | added_threads = true; | |
201 | tid = thread_lister.GetNextTID(); | |
202 | } | |
7cac9316 | 203 | if (thread_lister.error() || (first_iteration && !added_threads)) { |
1a4d82fc JJ |
204 | // Detach threads and fail. |
205 | ResumeAllThreads(); | |
206 | return false; | |
207 | } | |
208 | thread_lister.Reset(); | |
7cac9316 | 209 | first_iteration = false; |
1a4d82fc JJ |
210 | } while (added_threads); |
211 | return true; | |
212 | } | |
213 | ||
214 | // Pointer to the ThreadSuspender instance for use in signal handler. | |
92a42be0 | 215 | static ThreadSuspender *thread_suspender_instance = nullptr; |
1a4d82fc | 216 | |
92a42be0 SL |
217 | // Synchronous signals that should not be blocked. |
218 | static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS, | |
219 | SIGXCPU, SIGXFSZ }; | |
1a4d82fc JJ |
220 | |
221 | static void TracerThreadDieCallback() { | |
222 | // Generally a call to Die() in the tracer thread should be fatal to the | |
223 | // parent process as well, because they share the address space. | |
224 | // This really only works correctly if all the threads are suspended at this | |
225 | // point. So we correctly handle calls to Die() from within the callback, but | |
226 | // not those that happen before or after the callback. Hopefully there aren't | |
227 | // a lot of opportunities for that to happen... | |
92a42be0 SL |
228 | ThreadSuspender *inst = thread_suspender_instance; |
229 | if (inst && stoptheworld_tracer_pid == internal_getpid()) { | |
230 | inst->KillAllThreads(); | |
231 | thread_suspender_instance = nullptr; | |
232 | } | |
233 | } | |
234 | ||
235 | // Signal handler to wake up suspended threads when the tracer thread dies. | |
236 | static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) { | |
237 | SignalContext ctx = SignalContext::Create(siginfo, uctx); | |
5bcae85e SL |
238 | Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum, |
239 | ctx.addr, ctx.pc, ctx.sp); | |
92a42be0 SL |
240 | ThreadSuspender *inst = thread_suspender_instance; |
241 | if (inst) { | |
242 | if (signum == SIGABRT) | |
243 | inst->KillAllThreads(); | |
244 | else | |
245 | inst->ResumeAllThreads(); | |
246 | RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); | |
247 | thread_suspender_instance = nullptr; | |
248 | atomic_store(&inst->arg->done, 1, memory_order_relaxed); | |
249 | } | |
250 | internal__exit((signum == SIGABRT) ? 1 : 2); | |
1a4d82fc JJ |
251 | } |
252 | ||
253 | // Size of alternative stack for signal handlers in the tracer thread. | |
254 | static const int kHandlerStackSize = 4096; | |
255 | ||
256 | // This function will be run as a cloned task. | |
257 | static int TracerThread(void* argument) { | |
258 | TracerThreadArgument *tracer_thread_argument = | |
259 | (TracerThreadArgument *)argument; | |
260 | ||
261 | internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); | |
262 | // Check if parent is already dead. | |
263 | if (internal_getppid() != tracer_thread_argument->parent_pid) | |
264 | internal__exit(4); | |
265 | ||
266 | // Wait for the parent thread to finish preparations. | |
267 | tracer_thread_argument->mutex.Lock(); | |
268 | tracer_thread_argument->mutex.Unlock(); | |
269 | ||
92a42be0 | 270 | RAW_CHECK(AddDieCallback(TracerThreadDieCallback)); |
1a4d82fc | 271 | |
92a42be0 | 272 | ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument); |
1a4d82fc JJ |
273 | // Global pointer for the signal handler. |
274 | thread_suspender_instance = &thread_suspender; | |
275 | ||
276 | // Alternate stack for signal handling. | |
277 | InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize); | |
278 | struct sigaltstack handler_stack; | |
279 | internal_memset(&handler_stack, 0, sizeof(handler_stack)); | |
280 | handler_stack.ss_sp = handler_stack_memory.data(); | |
281 | handler_stack.ss_size = kHandlerStackSize; | |
92a42be0 SL |
282 | internal_sigaltstack(&handler_stack, nullptr); |
283 | ||
284 | // Install our handler for synchronous signals. Other signals should be | |
285 | // blocked by the mask we inherited from the parent thread. | |
286 | for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) { | |
287 | __sanitizer_sigaction act; | |
288 | internal_memset(&act, 0, sizeof(act)); | |
289 | act.sigaction = TracerThreadSignalHandler; | |
290 | act.sa_flags = SA_ONSTACK | SA_SIGINFO; | |
291 | internal_sigaction_norestorer(kSyncSignals[i], &act, 0); | |
1a4d82fc JJ |
292 | } |
293 | ||
294 | int exit_code = 0; | |
295 | if (!thread_suspender.SuspendAllThreads()) { | |
296 | VReport(1, "Failed suspending threads.\n"); | |
297 | exit_code = 3; | |
298 | } else { | |
299 | tracer_thread_argument->callback(thread_suspender.suspended_threads_list(), | |
300 | tracer_thread_argument->callback_argument); | |
301 | thread_suspender.ResumeAllThreads(); | |
302 | exit_code = 0; | |
303 | } | |
92a42be0 SL |
304 | RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); |
305 | thread_suspender_instance = nullptr; | |
306 | atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed); | |
1a4d82fc JJ |
307 | return exit_code; |
308 | } | |
309 | ||
310 | class ScopedStackSpaceWithGuard { | |
311 | public: | |
312 | explicit ScopedStackSpaceWithGuard(uptr stack_size) { | |
313 | stack_size_ = stack_size; | |
314 | guard_size_ = GetPageSizeCached(); | |
315 | // FIXME: Omitting MAP_STACK here works in current kernels but might break | |
316 | // in the future. | |
317 | guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_, | |
318 | "ScopedStackWithGuard"); | |
92a42be0 | 319 | CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_)); |
1a4d82fc JJ |
320 | } |
321 | ~ScopedStackSpaceWithGuard() { | |
322 | UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_); | |
323 | } | |
324 | void *Bottom() const { | |
325 | return (void *)(guard_start_ + stack_size_ + guard_size_); | |
326 | } | |
327 | ||
328 | private: | |
329 | uptr stack_size_; | |
330 | uptr guard_size_; | |
331 | uptr guard_start_; | |
332 | }; | |
333 | ||
334 | // We have a limitation on the stack frame size, so some stuff had to be moved | |
335 | // into globals. | |
336 | static __sanitizer_sigset_t blocked_sigset; | |
337 | static __sanitizer_sigset_t old_sigset; | |
1a4d82fc JJ |
338 | |
339 | class StopTheWorldScope { | |
340 | public: | |
341 | StopTheWorldScope() { | |
1a4d82fc JJ |
342 | // Make this process dumpable. Processes that are not dumpable cannot be |
343 | // attached to. | |
344 | process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0); | |
345 | if (!process_was_dumpable_) | |
346 | internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); | |
1a4d82fc JJ |
347 | } |
348 | ||
349 | ~StopTheWorldScope() { | |
1a4d82fc JJ |
350 | // Restore the dumpable flag. |
351 | if (!process_was_dumpable_) | |
352 | internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0); | |
1a4d82fc JJ |
353 | } |
354 | ||
355 | private: | |
356 | int process_was_dumpable_; | |
357 | }; | |
358 | ||
359 | // When sanitizer output is being redirected to file (i.e. by using log_path), | |
360 | // the tracer should write to the parent's log instead of trying to open a new | |
361 | // file. Alert the logging code to the fact that we have a tracer. | |
362 | struct ScopedSetTracerPID { | |
363 | explicit ScopedSetTracerPID(uptr tracer_pid) { | |
364 | stoptheworld_tracer_pid = tracer_pid; | |
365 | stoptheworld_tracer_ppid = internal_getpid(); | |
366 | } | |
367 | ~ScopedSetTracerPID() { | |
368 | stoptheworld_tracer_pid = 0; | |
369 | stoptheworld_tracer_ppid = 0; | |
370 | } | |
371 | }; | |
372 | ||
373 | void StopTheWorld(StopTheWorldCallback callback, void *argument) { | |
374 | StopTheWorldScope in_stoptheworld; | |
375 | // Prepare the arguments for TracerThread. | |
376 | struct TracerThreadArgument tracer_thread_argument; | |
377 | tracer_thread_argument.callback = callback; | |
378 | tracer_thread_argument.callback_argument = argument; | |
379 | tracer_thread_argument.parent_pid = internal_getpid(); | |
92a42be0 | 380 | atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed); |
1a4d82fc JJ |
381 | const uptr kTracerStackSize = 2 * 1024 * 1024; |
382 | ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize); | |
383 | // Block the execution of TracerThread until after we have set ptrace | |
384 | // permissions. | |
385 | tracer_thread_argument.mutex.Lock(); | |
92a42be0 SL |
386 | // Signal handling story. |
387 | // We don't want async signals to be delivered to the tracer thread, | |
388 | // so we block all async signals before creating the thread. An async signal | |
389 | // handler can temporary modify errno, which is shared with this thread. | |
390 | // We ought to use pthread_sigmask here, because sigprocmask has undefined | |
391 | // behavior in multithreaded programs. However, on linux sigprocmask is | |
392 | // equivalent to pthread_sigmask with the exception that pthread_sigmask | |
393 | // does not allow to block some signals used internally in pthread | |
394 | // implementation. We are fine with blocking them here, we are really not | |
395 | // going to pthread_cancel the thread. | |
396 | // The tracer thread should not raise any synchronous signals. But in case it | |
397 | // does, we setup a special handler for sync signals that properly kills the | |
398 | // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers | |
399 | // in the tracer thread won't interfere with user program. Double note: if a | |
400 | // user does something along the lines of 'kill -11 pid', that can kill the | |
401 | // process even if user setup own handler for SEGV. | |
402 | // Thing to watch out for: this code should not change behavior of user code | |
403 | // in any observable way. In particular it should not override user signal | |
404 | // handlers. | |
405 | internal_sigfillset(&blocked_sigset); | |
406 | for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) | |
407 | internal_sigdelset(&blocked_sigset, kSyncSignals[i]); | |
408 | int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset); | |
409 | CHECK_EQ(rv, 0); | |
1a4d82fc JJ |
410 | uptr tracer_pid = internal_clone( |
411 | TracerThread, tracer_stack.Bottom(), | |
412 | CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED, | |
92a42be0 SL |
413 | &tracer_thread_argument, nullptr /* parent_tidptr */, |
414 | nullptr /* newtls */, nullptr /* child_tidptr */); | |
415 | internal_sigprocmask(SIG_SETMASK, &old_sigset, 0); | |
1a4d82fc JJ |
416 | int local_errno = 0; |
417 | if (internal_iserror(tracer_pid, &local_errno)) { | |
418 | VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno); | |
419 | tracer_thread_argument.mutex.Unlock(); | |
420 | } else { | |
421 | ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid); | |
422 | // On some systems we have to explicitly declare that we want to be traced | |
423 | // by the tracer thread. | |
424 | #ifdef PR_SET_PTRACER | |
425 | internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); | |
426 | #endif | |
427 | // Allow the tracer thread to start. | |
428 | tracer_thread_argument.mutex.Unlock(); | |
92a42be0 SL |
429 | // NOTE: errno is shared between this thread and the tracer thread. |
430 | // internal_waitpid() may call syscall() which can access/spoil errno, | |
431 | // so we can't call it now. Instead we for the tracer thread to finish using | |
432 | // the spin loop below. Man page for sched_yield() says "In the Linux | |
433 | // implementation, sched_yield() always succeeds", so let's hope it does not | |
434 | // spoil errno. Note that this spin loop runs only for brief periods before | |
435 | // the tracer thread has suspended us and when it starts unblocking threads. | |
436 | while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0) | |
437 | sched_yield(); | |
438 | // Now the tracer thread is about to exit and does not touch errno, | |
439 | // wait for it. | |
440 | for (;;) { | |
441 | uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL); | |
442 | if (!internal_iserror(waitpid_status, &local_errno)) | |
443 | break; | |
444 | if (local_errno == EINTR) | |
445 | continue; | |
1a4d82fc JJ |
446 | VReport(1, "Waiting on the tracer thread failed (errno %d).\n", |
447 | local_errno); | |
92a42be0 SL |
448 | break; |
449 | } | |
1a4d82fc JJ |
450 | } |
451 | } | |
452 | ||
453 | // Platform-specific methods from SuspendedThreadsList. | |
454 | #if SANITIZER_ANDROID && defined(__arm__) | |
455 | typedef pt_regs regs_struct; | |
456 | #define REG_SP ARM_sp | |
457 | ||
458 | #elif SANITIZER_LINUX && defined(__arm__) | |
459 | typedef user_regs regs_struct; | |
460 | #define REG_SP uregs[13] | |
461 | ||
462 | #elif defined(__i386__) || defined(__x86_64__) | |
463 | typedef user_regs_struct regs_struct; | |
464 | #if defined(__i386__) | |
465 | #define REG_SP esp | |
466 | #else | |
467 | #define REG_SP rsp | |
468 | #endif | |
469 | ||
470 | #elif defined(__powerpc__) || defined(__powerpc64__) | |
471 | typedef pt_regs regs_struct; | |
472 | #define REG_SP gpr[PT_R1] | |
473 | ||
474 | #elif defined(__mips__) | |
475 | typedef struct user regs_struct; | |
5bcae85e SL |
476 | # if SANITIZER_ANDROID |
477 | # define REG_SP regs[EF_R29] | |
478 | # else | |
479 | # define REG_SP regs[EF_REG29] | |
480 | # endif | |
1a4d82fc | 481 | |
92a42be0 SL |
482 | #elif defined(__aarch64__) |
483 | typedef struct user_pt_regs regs_struct; | |
484 | #define REG_SP sp | |
485 | #define ARCH_IOVEC_FOR_GETREGSET | |
486 | ||
5bcae85e SL |
487 | #elif defined(__s390__) |
488 | typedef _user_regs_struct regs_struct; | |
489 | #define REG_SP gprs[15] | |
490 | #define ARCH_IOVEC_FOR_GETREGSET | |
491 | ||
1a4d82fc JJ |
492 | #else |
493 | #error "Unsupported architecture" | |
494 | #endif // SANITIZER_ANDROID && defined(__arm__) | |
495 | ||
496 | int SuspendedThreadsList::GetRegistersAndSP(uptr index, | |
497 | uptr *buffer, | |
498 | uptr *sp) const { | |
499 | pid_t tid = GetThreadID(index); | |
500 | regs_struct regs; | |
501 | int pterrno; | |
92a42be0 SL |
502 | #ifdef ARCH_IOVEC_FOR_GETREGSET |
503 | struct iovec regset_io; | |
504 | regset_io.iov_base = ®s; | |
505 | regset_io.iov_len = sizeof(regs_struct); | |
506 | bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid, | |
507 | (void*)NT_PRSTATUS, (void*)®set_io), | |
508 | &pterrno); | |
509 | #else | |
510 | bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr, | |
511 | ®s), &pterrno); | |
512 | #endif | |
513 | if (isErr) { | |
1a4d82fc JJ |
514 | VReport(1, "Could not get registers from thread %d (errno %d).\n", tid, |
515 | pterrno); | |
516 | return -1; | |
517 | } | |
518 | ||
519 | *sp = regs.REG_SP; | |
520 | internal_memcpy(buffer, ®s, sizeof(regs)); | |
521 | return 0; | |
522 | } | |
523 | ||
524 | uptr SuspendedThreadsList::RegisterCount() { | |
525 | return sizeof(regs_struct) / sizeof(uptr); | |
526 | } | |
92a42be0 | 527 | } // namespace __sanitizer |
1a4d82fc | 528 | |
3157f602 XL |
529 | #endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) |
530 | // || defined(__aarch64__) || defined(__powerpc64__) | |
5bcae85e | 531 | // || defined(__s390__) |