]> git.proxmox.com Git - rustc.git/blob - src/compiler-rt/lib/sanitizer_common/sanitizer_win.cc
New upstream version 1.12.0+dfsg1
[rustc.git] / src / compiler-rt / lib / sanitizer_common / sanitizer_win.cc
1 //===-- sanitizer_win.cc --------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries and implements windows-specific functions from
12 // sanitizer_libc.h.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_platform.h"
16 #if SANITIZER_WINDOWS
17
18 #define WIN32_LEAN_AND_MEAN
19 #define NOGDI
20 #include <windows.h>
21 #include <dbghelp.h>
22 #include <io.h>
23 #include <psapi.h>
24 #include <stdlib.h>
25
26 #include "sanitizer_common.h"
27 #include "sanitizer_libc.h"
28 #include "sanitizer_mutex.h"
29 #include "sanitizer_placement_new.h"
30 #include "sanitizer_stacktrace.h"
31 #include "sanitizer_symbolizer.h"
32
33 namespace __sanitizer {
34
35 #include "sanitizer_syscall_generic.inc"
36
37 // --------------------- sanitizer_common.h
38 uptr GetPageSize() {
39 SYSTEM_INFO si;
40 GetSystemInfo(&si);
41 return si.dwPageSize;
42 }
43
44 uptr GetMmapGranularity() {
45 SYSTEM_INFO si;
46 GetSystemInfo(&si);
47 return si.dwAllocationGranularity;
48 }
49
50 uptr GetMaxVirtualAddress() {
51 SYSTEM_INFO si;
52 GetSystemInfo(&si);
53 return (uptr)si.lpMaximumApplicationAddress;
54 }
55
56 bool FileExists(const char *filename) {
57 return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
58 }
59
60 uptr internal_getpid() {
61 return GetProcessId(GetCurrentProcess());
62 }
63
64 // In contrast to POSIX, on Windows GetCurrentThreadId()
65 // returns a system-unique identifier.
66 uptr GetTid() {
67 return GetCurrentThreadId();
68 }
69
70 uptr GetThreadSelf() {
71 return GetTid();
72 }
73
74 #if !SANITIZER_GO
75 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
76 uptr *stack_bottom) {
77 CHECK(stack_top);
78 CHECK(stack_bottom);
79 MEMORY_BASIC_INFORMATION mbi;
80 CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
81 // FIXME: is it possible for the stack to not be a single allocation?
82 // Are these values what ASan expects to get (reserved, not committed;
83 // including stack guard page) ?
84 *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
85 *stack_bottom = (uptr)mbi.AllocationBase;
86 }
87 #endif // #if !SANITIZER_GO
88
89 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
90 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
91 if (rv == 0)
92 ReportMmapFailureAndDie(size, mem_type, "allocate",
93 GetLastError(), raw_report);
94 return rv;
95 }
96
97 void UnmapOrDie(void *addr, uptr size) {
98 if (!size || !addr)
99 return;
100
101 MEMORY_BASIC_INFORMATION mbi;
102 CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
103
104 // MEM_RELEASE can only be used to unmap whole regions previously mapped with
105 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
106 // fails try MEM_DECOMMIT.
107 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
108 if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
109 Report("ERROR: %s failed to "
110 "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
111 SanitizerToolName, size, size, addr, GetLastError());
112 CHECK("unable to unmap" && 0);
113 }
114 }
115 }
116
117 // We want to map a chunk of address space aligned to 'alignment'.
118 void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
119 CHECK(IsPowerOfTwo(size));
120 CHECK(IsPowerOfTwo(alignment));
121
122 // Windows will align our allocations to at least 64K.
123 alignment = Max(alignment, GetMmapGranularity());
124
125 uptr mapped_addr =
126 (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
127 if (!mapped_addr)
128 ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
129
130 // If we got it right on the first try, return. Otherwise, unmap it and go to
131 // the slow path.
132 if (IsAligned(mapped_addr, alignment))
133 return (void*)mapped_addr;
134 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
135 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
136
137 // If we didn't get an aligned address, overallocate, find an aligned address,
138 // unmap, and try to allocate at that aligned address.
139 int retries = 0;
140 const int kMaxRetries = 10;
141 for (; retries < kMaxRetries &&
142 (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
143 retries++) {
144 // Overallocate size + alignment bytes.
145 mapped_addr =
146 (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
147 if (!mapped_addr)
148 ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
149 GetLastError());
150
151 // Find the aligned address.
152 uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
153
154 // Free the overallocation.
155 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
156 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
157
158 // Attempt to allocate exactly the number of bytes we need at the aligned
159 // address. This may fail for a number of reasons, in which case we continue
160 // the loop.
161 mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
162 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
163 }
164
165 // Fail if we can't make this work quickly.
166 if (retries == kMaxRetries && mapped_addr == 0)
167 ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
168
169 return (void *)mapped_addr;
170 }
171
172 void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
173 // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
174 // but on Win64 it does.
175 (void)name; // unsupported
176 #if SANITIZER_WINDOWS64
177 // On Windows64, use MEM_COMMIT would result in error
178 // 1455:ERROR_COMMITMENT_LIMIT.
179 // We use exception handler to commit page on demand.
180 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
181 #else
182 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
183 PAGE_READWRITE);
184 #endif
185 if (p == 0)
186 Report("ERROR: %s failed to "
187 "allocate %p (%zd) bytes at %p (error code: %d)\n",
188 SanitizerToolName, size, size, fixed_addr, GetLastError());
189 return p;
190 }
191
192 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by
193 // 'MmapFixedNoAccess'.
194 void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
195 void *p = VirtualAlloc((LPVOID)fixed_addr, size,
196 MEM_COMMIT, PAGE_READWRITE);
197 if (p == 0) {
198 char mem_type[30];
199 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
200 fixed_addr);
201 ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
202 }
203 return p;
204 }
205
206 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
207 // FIXME: make this really NoReserve?
208 return MmapOrDie(size, mem_type);
209 }
210
211 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
212 (void)name; // unsupported
213 void *res = VirtualAlloc((LPVOID)fixed_addr, size,
214 MEM_RESERVE, PAGE_NOACCESS);
215 if (res == 0)
216 Report("WARNING: %s failed to "
217 "mprotect %p (%zd) bytes at %p (error code: %d)\n",
218 SanitizerToolName, size, size, fixed_addr, GetLastError());
219 return res;
220 }
221
222 void *MmapNoAccess(uptr size) {
223 // FIXME: unsupported.
224 return nullptr;
225 }
226
227 bool MprotectNoAccess(uptr addr, uptr size) {
228 DWORD old_protection;
229 return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
230 }
231
232
233 void FlushUnneededShadowMemory(uptr addr, uptr size) {
234 // This is almost useless on 32-bits.
235 // FIXME: add madvise-analog when we move to 64-bits.
236 }
237
238 void NoHugePagesInRegion(uptr addr, uptr size) {
239 // FIXME: probably similar to FlushUnneededShadowMemory.
240 }
241
242 void DontDumpShadowMemory(uptr addr, uptr length) {
243 // This is almost useless on 32-bits.
244 // FIXME: add madvise-analog when we move to 64-bits.
245 }
246
247 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
248 MEMORY_BASIC_INFORMATION mbi;
249 CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
250 return mbi.Protect == PAGE_NOACCESS &&
251 (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
252 }
253
254 void *MapFileToMemory(const char *file_name, uptr *buff_size) {
255 UNIMPLEMENTED();
256 }
257
258 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
259 UNIMPLEMENTED();
260 }
261
262 static const int kMaxEnvNameLength = 128;
263 static const DWORD kMaxEnvValueLength = 32767;
264
265 namespace {
266
267 struct EnvVariable {
268 char name[kMaxEnvNameLength];
269 char value[kMaxEnvValueLength];
270 };
271
272 } // namespace
273
274 static const int kEnvVariables = 5;
275 static EnvVariable env_vars[kEnvVariables];
276 static int num_env_vars;
277
278 const char *GetEnv(const char *name) {
279 // Note: this implementation caches the values of the environment variables
280 // and limits their quantity.
281 for (int i = 0; i < num_env_vars; i++) {
282 if (0 == internal_strcmp(name, env_vars[i].name))
283 return env_vars[i].value;
284 }
285 CHECK_LT(num_env_vars, kEnvVariables);
286 DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
287 kMaxEnvValueLength);
288 if (rv > 0 && rv < kMaxEnvValueLength) {
289 CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
290 internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
291 num_env_vars++;
292 return env_vars[num_env_vars - 1].value;
293 }
294 return 0;
295 }
296
297 const char *GetPwd() {
298 UNIMPLEMENTED();
299 }
300
301 u32 GetUid() {
302 UNIMPLEMENTED();
303 }
304
305 namespace {
306 struct ModuleInfo {
307 const char *filepath;
308 uptr base_address;
309 uptr end_address;
310 };
311
312 #ifndef SANITIZER_GO
313 int CompareModulesBase(const void *pl, const void *pr) {
314 const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
315 if (l->base_address < r->base_address)
316 return -1;
317 return l->base_address > r->base_address;
318 }
319 #endif
320 } // namespace
321
322 #ifndef SANITIZER_GO
323 void DumpProcessMap() {
324 Report("Dumping process modules:\n");
325 ListOfModules modules;
326 modules.init();
327 uptr num_modules = modules.size();
328
329 InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
330 for (size_t i = 0; i < num_modules; ++i) {
331 module_infos[i].filepath = modules[i].full_name();
332 module_infos[i].base_address = modules[i].base_address();
333 module_infos[i].end_address = modules[i].ranges().front()->end;
334 }
335 qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
336 CompareModulesBase);
337
338 for (size_t i = 0; i < num_modules; ++i) {
339 const ModuleInfo &mi = module_infos[i];
340 if (mi.end_address != 0) {
341 Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
342 mi.filepath[0] ? mi.filepath : "[no name]");
343 } else if (mi.filepath[0]) {
344 Printf("\t??\?-??? %s\n", mi.filepath);
345 } else {
346 Printf("\t???\n");
347 }
348 }
349 }
350 #endif
351
352 void DisableCoreDumperIfNecessary() {
353 // Do nothing.
354 }
355
356 void ReExec() {
357 UNIMPLEMENTED();
358 }
359
360 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
361 #if !SANITIZER_GO
362 CovPrepareForSandboxing(args);
363 #endif
364 }
365
366 bool StackSizeIsUnlimited() {
367 UNIMPLEMENTED();
368 }
369
370 void SetStackSizeLimitInBytes(uptr limit) {
371 UNIMPLEMENTED();
372 }
373
374 bool AddressSpaceIsUnlimited() {
375 UNIMPLEMENTED();
376 }
377
378 void SetAddressSpaceUnlimited() {
379 UNIMPLEMENTED();
380 }
381
382 bool IsPathSeparator(const char c) {
383 return c == '\\' || c == '/';
384 }
385
386 bool IsAbsolutePath(const char *path) {
387 UNIMPLEMENTED();
388 }
389
390 void SleepForSeconds(int seconds) {
391 Sleep(seconds * 1000);
392 }
393
394 void SleepForMillis(int millis) {
395 Sleep(millis);
396 }
397
398 u64 NanoTime() {
399 return 0;
400 }
401
402 void Abort() {
403 if (::IsDebuggerPresent())
404 __debugbreak();
405 internal__exit(3);
406 }
407
408 #ifndef SANITIZER_GO
409 // Read the file to extract the ImageBase field from the PE header. If ASLR is
410 // disabled and this virtual address is available, the loader will typically
411 // load the image at this address. Therefore, we call it the preferred base. Any
412 // addresses in the DWARF typically assume that the object has been loaded at
413 // this address.
414 static uptr GetPreferredBase(const char *modname) {
415 fd_t fd = OpenFile(modname, RdOnly, nullptr);
416 if (fd == kInvalidFd)
417 return 0;
418 FileCloser closer(fd);
419
420 // Read just the DOS header.
421 IMAGE_DOS_HEADER dos_header;
422 uptr bytes_read;
423 if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
424 bytes_read != sizeof(dos_header))
425 return 0;
426
427 // The file should start with the right signature.
428 if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
429 return 0;
430
431 // The layout at e_lfanew is:
432 // "PE\0\0"
433 // IMAGE_FILE_HEADER
434 // IMAGE_OPTIONAL_HEADER
435 // Seek to e_lfanew and read all that data.
436 char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
437 if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
438 INVALID_SET_FILE_POINTER)
439 return 0;
440 if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
441 bytes_read != sizeof(buf))
442 return 0;
443
444 // Check for "PE\0\0" before the PE header.
445 char *pe_sig = &buf[0];
446 if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
447 return 0;
448
449 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
450 IMAGE_OPTIONAL_HEADER *pe_header =
451 (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
452
453 // Check for more magic in the PE header.
454 if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
455 return 0;
456
457 // Finally, return the ImageBase.
458 return (uptr)pe_header->ImageBase;
459 }
460
461 void ListOfModules::init() {
462 clear();
463 HANDLE cur_process = GetCurrentProcess();
464
465 // Query the list of modules. Start by assuming there are no more than 256
466 // modules and retry if that's not sufficient.
467 HMODULE *hmodules = 0;
468 uptr modules_buffer_size = sizeof(HMODULE) * 256;
469 DWORD bytes_required;
470 while (!hmodules) {
471 hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
472 CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
473 &bytes_required));
474 if (bytes_required > modules_buffer_size) {
475 // Either there turned out to be more than 256 hmodules, or new hmodules
476 // could have loaded since the last try. Retry.
477 UnmapOrDie(hmodules, modules_buffer_size);
478 hmodules = 0;
479 modules_buffer_size = bytes_required;
480 }
481 }
482
483 // |num_modules| is the number of modules actually present,
484 size_t num_modules = bytes_required / sizeof(HMODULE);
485 for (size_t i = 0; i < num_modules; ++i) {
486 HMODULE handle = hmodules[i];
487 MODULEINFO mi;
488 if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
489 continue;
490
491 // Get the UTF-16 path and convert to UTF-8.
492 wchar_t modname_utf16[kMaxPathLength];
493 int modname_utf16_len =
494 GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
495 if (modname_utf16_len == 0)
496 modname_utf16[0] = '\0';
497 char module_name[kMaxPathLength];
498 int module_name_len =
499 ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
500 &module_name[0], kMaxPathLength, NULL, NULL);
501 module_name[module_name_len] = '\0';
502
503 uptr base_address = (uptr)mi.lpBaseOfDll;
504 uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
505
506 // Adjust the base address of the module so that we get a VA instead of an
507 // RVA when computing the module offset. This helps llvm-symbolizer find the
508 // right DWARF CU. In the common case that the image is loaded at it's
509 // preferred address, we will now print normal virtual addresses.
510 uptr preferred_base = GetPreferredBase(&module_name[0]);
511 uptr adjusted_base = base_address - preferred_base;
512
513 LoadedModule cur_module;
514 cur_module.set(module_name, adjusted_base);
515 // We add the whole module as one single address range.
516 cur_module.addAddressRange(base_address, end_address, /*executable*/ true);
517 modules_.push_back(cur_module);
518 }
519 UnmapOrDie(hmodules, modules_buffer_size);
520 };
521
522 // We can't use atexit() directly at __asan_init time as the CRT is not fully
523 // initialized at this point. Place the functions into a vector and use
524 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
525 InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
526
527 int Atexit(void (*function)(void)) {
528 atexit_functions.push_back(function);
529 return 0;
530 }
531
532 static int RunAtexit() {
533 int ret = 0;
534 for (uptr i = 0; i < atexit_functions.size(); ++i) {
535 ret |= atexit(atexit_functions[i]);
536 }
537 return ret;
538 }
539
540 #pragma section(".CRT$XID", long, read) // NOLINT
541 __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
542 #endif
543
544 // ------------------ sanitizer_libc.h
545 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
546 // FIXME: Use the wide variants to handle Unicode filenames.
547 fd_t res;
548 if (mode == RdOnly) {
549 res = CreateFileA(filename, GENERIC_READ,
550 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
551 nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
552 } else if (mode == WrOnly) {
553 res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
554 FILE_ATTRIBUTE_NORMAL, nullptr);
555 } else {
556 UNIMPLEMENTED();
557 }
558 CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
559 CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
560 if (res == kInvalidFd && last_error)
561 *last_error = GetLastError();
562 return res;
563 }
564
565 void CloseFile(fd_t fd) {
566 CloseHandle(fd);
567 }
568
569 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
570 error_t *error_p) {
571 CHECK(fd != kInvalidFd);
572
573 // bytes_read can't be passed directly to ReadFile:
574 // uptr is unsigned long long on 64-bit Windows.
575 unsigned long num_read_long;
576
577 bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
578 if (!success && error_p)
579 *error_p = GetLastError();
580 if (bytes_read)
581 *bytes_read = num_read_long;
582 return success;
583 }
584
585 bool SupportsColoredOutput(fd_t fd) {
586 // FIXME: support colored output.
587 return false;
588 }
589
590 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
591 error_t *error_p) {
592 CHECK(fd != kInvalidFd);
593
594 // Handle null optional parameters.
595 error_t dummy_error;
596 error_p = error_p ? error_p : &dummy_error;
597 uptr dummy_bytes_written;
598 bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
599
600 // Initialize output parameters in case we fail.
601 *error_p = 0;
602 *bytes_written = 0;
603
604 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
605 // closed, in which case this will fail.
606 if (fd == kStdoutFd || fd == kStderrFd) {
607 fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
608 if (fd == 0) {
609 *error_p = ERROR_INVALID_HANDLE;
610 return false;
611 }
612 }
613
614 DWORD bytes_written_32;
615 if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
616 *error_p = GetLastError();
617 return false;
618 } else {
619 *bytes_written = bytes_written_32;
620 return true;
621 }
622 }
623
624 bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
625 UNIMPLEMENTED();
626 }
627
628 uptr internal_sched_yield() {
629 Sleep(0);
630 return 0;
631 }
632
633 void internal__exit(int exitcode) {
634 ExitProcess(exitcode);
635 }
636
637 uptr internal_ftruncate(fd_t fd, uptr size) {
638 UNIMPLEMENTED();
639 }
640
641 uptr GetRSS() {
642 return 0;
643 }
644
645 void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
646 void internal_join_thread(void *th) { }
647
648 // ---------------------- BlockingMutex ---------------- {{{1
649 const uptr LOCK_UNINITIALIZED = 0;
650 const uptr LOCK_READY = (uptr)-1;
651
652 BlockingMutex::BlockingMutex(LinkerInitialized li) {
653 // FIXME: see comments in BlockingMutex::Lock() for the details.
654 CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
655
656 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
657 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
658 owner_ = LOCK_READY;
659 }
660
661 BlockingMutex::BlockingMutex() {
662 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
663 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
664 owner_ = LOCK_READY;
665 }
666
667 void BlockingMutex::Lock() {
668 if (owner_ == LOCK_UNINITIALIZED) {
669 // FIXME: hm, global BlockingMutex objects are not initialized?!?
670 // This might be a side effect of the clang+cl+link Frankenbuild...
671 new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
672
673 // FIXME: If it turns out the linker doesn't invoke our
674 // constructors, we should probably manually Lock/Unlock all the global
675 // locks while we're starting in one thread to avoid double-init races.
676 }
677 EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
678 CHECK_EQ(owner_, LOCK_READY);
679 owner_ = GetThreadSelf();
680 }
681
682 void BlockingMutex::Unlock() {
683 CHECK_EQ(owner_, GetThreadSelf());
684 owner_ = LOCK_READY;
685 LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
686 }
687
688 void BlockingMutex::CheckLocked() {
689 CHECK_EQ(owner_, GetThreadSelf());
690 }
691
692 uptr GetTlsSize() {
693 return 0;
694 }
695
696 void InitTlsSize() {
697 }
698
699 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
700 uptr *tls_addr, uptr *tls_size) {
701 #ifdef SANITIZER_GO
702 *stk_addr = 0;
703 *stk_size = 0;
704 *tls_addr = 0;
705 *tls_size = 0;
706 #else
707 uptr stack_top, stack_bottom;
708 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
709 *stk_addr = stack_bottom;
710 *stk_size = stack_top - stack_bottom;
711 *tls_addr = 0;
712 *tls_size = 0;
713 #endif
714 }
715
716 #if !SANITIZER_GO
717 void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
718 CHECK_GE(max_depth, 2);
719 // FIXME: CaptureStackBackTrace might be too slow for us.
720 // FIXME: Compare with StackWalk64.
721 // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
722 size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax),
723 (void**)trace, 0);
724 if (size == 0)
725 return;
726
727 // Skip the RTL frames by searching for the PC in the stacktrace.
728 uptr pc_location = LocatePcInTrace(pc);
729 PopStackFrames(pc_location);
730 }
731
732 void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
733 u32 max_depth) {
734 CONTEXT ctx = *(CONTEXT *)context;
735 STACKFRAME64 stack_frame;
736 memset(&stack_frame, 0, sizeof(stack_frame));
737
738 size = 0;
739 #if defined(_WIN64)
740 int machine_type = IMAGE_FILE_MACHINE_AMD64;
741 stack_frame.AddrPC.Offset = ctx.Rip;
742 stack_frame.AddrFrame.Offset = ctx.Rbp;
743 stack_frame.AddrStack.Offset = ctx.Rsp;
744 #else
745 int machine_type = IMAGE_FILE_MACHINE_I386;
746 stack_frame.AddrPC.Offset = ctx.Eip;
747 stack_frame.AddrFrame.Offset = ctx.Ebp;
748 stack_frame.AddrStack.Offset = ctx.Esp;
749 #endif
750 stack_frame.AddrPC.Mode = AddrModeFlat;
751 stack_frame.AddrFrame.Mode = AddrModeFlat;
752 stack_frame.AddrStack.Mode = AddrModeFlat;
753 while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
754 &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
755 &SymGetModuleBase64, NULL) &&
756 size < Min(max_depth, kStackTraceMax)) {
757 trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
758 }
759 }
760 #endif // #if !SANITIZER_GO
761
762 void ReportFile::Write(const char *buffer, uptr length) {
763 SpinMutexLock l(mu);
764 ReopenIfNecessary();
765 if (!WriteToFile(fd, buffer, length)) {
766 // stderr may be closed, but we may be able to print to the debugger
767 // instead. This is the case when launching a program from Visual Studio,
768 // and the following routine should write to its console.
769 OutputDebugStringA(buffer);
770 }
771 }
772
773 void SetAlternateSignalStack() {
774 // FIXME: Decide what to do on Windows.
775 }
776
777 void UnsetAlternateSignalStack() {
778 // FIXME: Decide what to do on Windows.
779 }
780
781 void InstallDeadlySignalHandlers(SignalHandlerType handler) {
782 (void)handler;
783 // FIXME: Decide what to do on Windows.
784 }
785
786 bool IsHandledDeadlySignal(int signum) {
787 // FIXME: Decide what to do on Windows.
788 return false;
789 }
790
791 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
792 SYSTEM_INFO si;
793 GetNativeSystemInfo(&si);
794 uptr page_size = si.dwPageSize;
795 uptr page_mask = ~(page_size - 1);
796
797 for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
798 page <= end;) {
799 MEMORY_BASIC_INFORMATION info;
800 if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
801 return false;
802
803 if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
804 info.Protect == PAGE_EXECUTE)
805 return false;
806
807 if (info.RegionSize == 0)
808 return false;
809
810 page += info.RegionSize;
811 }
812
813 return true;
814 }
815
816 SignalContext SignalContext::Create(void *siginfo, void *context) {
817 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
818 CONTEXT *context_record = (CONTEXT *)context;
819
820 uptr pc = (uptr)exception_record->ExceptionAddress;
821 #ifdef _WIN64
822 uptr bp = (uptr)context_record->Rbp;
823 uptr sp = (uptr)context_record->Rsp;
824 #else
825 uptr bp = (uptr)context_record->Ebp;
826 uptr sp = (uptr)context_record->Esp;
827 #endif
828 uptr access_addr = exception_record->ExceptionInformation[1];
829
830 // The contents of this array are documented at
831 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
832 // The first element indicates read as 0, write as 1, or execute as 8. The
833 // second element is the faulting address.
834 WriteFlag write_flag = SignalContext::UNKNOWN;
835 switch (exception_record->ExceptionInformation[0]) {
836 case 0: write_flag = SignalContext::READ; break;
837 case 1: write_flag = SignalContext::WRITE; break;
838 case 8: write_flag = SignalContext::UNKNOWN; break;
839 }
840 bool is_memory_access = write_flag != SignalContext::UNKNOWN;
841 return SignalContext(context, access_addr, pc, sp, bp, is_memory_access,
842 write_flag);
843 }
844
845 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
846 // FIXME: Actually implement this function.
847 CHECK_GT(buf_len, 0);
848 buf[0] = 0;
849 return 0;
850 }
851
852 uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
853 return ReadBinaryName(buf, buf_len);
854 }
855
856 void CheckVMASize() {
857 // Do nothing.
858 }
859
860 void MaybeReexec() {
861 // No need to re-exec on Windows.
862 }
863
864 char **GetArgv() {
865 // FIXME: Actually implement this function.
866 return 0;
867 }
868
869 pid_t StartSubprocess(const char *program, const char *const argv[],
870 fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
871 // FIXME: implement on this platform
872 // Should be implemented based on
873 // SymbolizerProcess::StarAtSymbolizerSubprocess
874 // from lib/sanitizer_common/sanitizer_symbolizer_win.cc.
875 return -1;
876 }
877
878 bool IsProcessRunning(pid_t pid) {
879 // FIXME: implement on this platform.
880 return false;
881 }
882
883 int WaitForProcess(pid_t pid) { return -1; }
884
885 } // namespace __sanitizer
886
887 #endif // _WIN32