]> git.proxmox.com Git - rustc.git/blob - library/std/src/sys/unix/thread.rs
New upstream version 1.65.0+dfsg1
[rustc.git] / library / std / src / sys / unix / thread.rs
1 use crate::cmp;
2 use crate::ffi::CStr;
3 use crate::io;
4 use crate::mem;
5 use crate::num::NonZeroUsize;
6 use crate::ptr;
7 use crate::sys::{os, stack_overflow};
8 use crate::time::Duration;
9
10 #[cfg(all(target_os = "linux", target_env = "gnu"))]
11 use crate::sys::weak::dlsym;
12 #[cfg(any(target_os = "solaris", target_os = "illumos"))]
13 use crate::sys::weak::weak;
14 #[cfg(not(any(target_os = "l4re", target_os = "vxworks", target_os = "espidf")))]
15 pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
16 #[cfg(target_os = "l4re")]
17 pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024;
18 #[cfg(target_os = "vxworks")]
19 pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024;
20 #[cfg(target_os = "espidf")]
21 pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF menuconfig system should be used
22
23 #[cfg(target_os = "fuchsia")]
24 mod zircon {
25 type zx_handle_t = u32;
26 type zx_status_t = i32;
27 pub const ZX_PROP_NAME: u32 = 3;
28
29 extern "C" {
30 pub fn zx_object_set_property(
31 handle: zx_handle_t,
32 property: u32,
33 value: *const libc::c_void,
34 value_size: libc::size_t,
35 ) -> zx_status_t;
36 pub fn zx_thread_self() -> zx_handle_t;
37 }
38 }
39
40 pub struct Thread {
41 id: libc::pthread_t,
42 }
43
44 // Some platforms may have pthread_t as a pointer in which case we still want
45 // a thread to be Send/Sync
46 unsafe impl Send for Thread {}
47 unsafe impl Sync for Thread {}
48
49 impl Thread {
50 // unsafe: see thread::Builder::spawn_unchecked for safety requirements
51 pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
52 let p = Box::into_raw(box p);
53 let mut native: libc::pthread_t = mem::zeroed();
54 let mut attr: libc::pthread_attr_t = mem::zeroed();
55 assert_eq!(libc::pthread_attr_init(&mut attr), 0);
56
57 #[cfg(target_os = "espidf")]
58 if stack > 0 {
59 // Only set the stack if a non-zero value is passed
60 // 0 is used as an indication that the default stack size configured in the ESP-IDF menuconfig system should be used
61 assert_eq!(
62 libc::pthread_attr_setstacksize(&mut attr, cmp::max(stack, min_stack_size(&attr))),
63 0
64 );
65 }
66
67 #[cfg(not(target_os = "espidf"))]
68 {
69 let stack_size = cmp::max(stack, min_stack_size(&attr));
70
71 match libc::pthread_attr_setstacksize(&mut attr, stack_size) {
72 0 => {}
73 n => {
74 assert_eq!(n, libc::EINVAL);
75 // EINVAL means |stack_size| is either too small or not a
76 // multiple of the system page size. Because it's definitely
77 // >= PTHREAD_STACK_MIN, it must be an alignment issue.
78 // Round up to the nearest page and try again.
79 let page_size = os::page_size();
80 let stack_size =
81 (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
82 assert_eq!(libc::pthread_attr_setstacksize(&mut attr, stack_size), 0);
83 }
84 };
85 }
86
87 let ret = libc::pthread_create(&mut native, &attr, thread_start, p as *mut _);
88 // Note: if the thread creation fails and this assert fails, then p will
89 // be leaked. However, an alternative design could cause double-free
90 // which is clearly worse.
91 assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
92
93 return if ret != 0 {
94 // The thread failed to start and as a result p was not consumed. Therefore, it is
95 // safe to reconstruct the box so that it gets deallocated.
96 drop(Box::from_raw(p));
97 Err(io::Error::from_raw_os_error(ret))
98 } else {
99 Ok(Thread { id: native })
100 };
101
102 extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
103 unsafe {
104 // Next, set up our stack overflow handler which may get triggered if we run
105 // out of stack.
106 let _handler = stack_overflow::Handler::new();
107 // Finally, let's run some code.
108 Box::from_raw(main as *mut Box<dyn FnOnce()>)();
109 }
110 ptr::null_mut()
111 }
112 }
113
114 pub fn yield_now() {
115 let ret = unsafe { libc::sched_yield() };
116 debug_assert_eq!(ret, 0);
117 }
118
119 #[cfg(target_os = "android")]
120 pub fn set_name(name: &CStr) {
121 const PR_SET_NAME: libc::c_int = 15;
122 unsafe {
123 libc::prctl(
124 PR_SET_NAME,
125 name.as_ptr(),
126 0 as libc::c_ulong,
127 0 as libc::c_ulong,
128 0 as libc::c_ulong,
129 );
130 }
131 }
132
133 #[cfg(target_os = "linux")]
134 pub fn set_name(name: &CStr) {
135 const TASK_COMM_LEN: usize = 16;
136
137 unsafe {
138 // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20.
139 let name = truncate_cstr(name, TASK_COMM_LEN);
140 libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
141 }
142 }
143
144 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd"))]
145 pub fn set_name(name: &CStr) {
146 unsafe {
147 libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
148 }
149 }
150
151 #[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))]
152 pub fn set_name(name: &CStr) {
153 unsafe {
154 let name = truncate_cstr(name, libc::MAXTHREADNAMESIZE);
155 libc::pthread_setname_np(name.as_ptr());
156 }
157 }
158
159 #[cfg(target_os = "netbsd")]
160 pub fn set_name(name: &CStr) {
161 use crate::ffi::CString;
162 let cname = CString::new(&b"%s"[..]).unwrap();
163 unsafe {
164 libc::pthread_setname_np(
165 libc::pthread_self(),
166 cname.as_ptr(),
167 name.as_ptr() as *mut libc::c_void,
168 );
169 }
170 }
171
172 #[cfg(any(target_os = "solaris", target_os = "illumos"))]
173 pub fn set_name(name: &CStr) {
174 weak! {
175 fn pthread_setname_np(
176 libc::pthread_t, *const libc::c_char
177 ) -> libc::c_int
178 }
179
180 if let Some(f) = pthread_setname_np.get() {
181 unsafe {
182 f(libc::pthread_self(), name.as_ptr());
183 }
184 }
185 }
186
187 #[cfg(target_os = "fuchsia")]
188 pub fn set_name(name: &CStr) {
189 use self::zircon::*;
190 unsafe {
191 zx_object_set_property(
192 zx_thread_self(),
193 ZX_PROP_NAME,
194 name.as_ptr() as *const libc::c_void,
195 name.to_bytes().len(),
196 );
197 }
198 }
199
200 #[cfg(target_os = "haiku")]
201 pub fn set_name(name: &CStr) {
202 unsafe {
203 let thread_self = libc::find_thread(ptr::null_mut());
204 libc::rename_thread(thread_self, name.as_ptr());
205 }
206 }
207
208 #[cfg(any(
209 target_env = "newlib",
210 target_os = "l4re",
211 target_os = "emscripten",
212 target_os = "redox",
213 target_os = "vxworks"
214 ))]
215 pub fn set_name(_name: &CStr) {
216 // Newlib, Emscripten, and VxWorks have no way to set a thread name.
217 }
218
219 #[cfg(not(target_os = "espidf"))]
220 pub fn sleep(dur: Duration) {
221 let mut secs = dur.as_secs();
222 let mut nsecs = dur.subsec_nanos() as _;
223
224 // If we're awoken with a signal then the return value will be -1 and
225 // nanosleep will fill in `ts` with the remaining time.
226 unsafe {
227 while secs > 0 || nsecs > 0 {
228 let mut ts = libc::timespec {
229 tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t,
230 tv_nsec: nsecs,
231 };
232 secs -= ts.tv_sec as u64;
233 let ts_ptr = &mut ts as *mut _;
234 if libc::nanosleep(ts_ptr, ts_ptr) == -1 {
235 assert_eq!(os::errno(), libc::EINTR);
236 secs += ts.tv_sec as u64;
237 nsecs = ts.tv_nsec;
238 } else {
239 nsecs = 0;
240 }
241 }
242 }
243 }
244
245 #[cfg(target_os = "espidf")]
246 pub fn sleep(dur: Duration) {
247 let mut micros = dur.as_micros();
248 unsafe {
249 while micros > 0 {
250 let st = if micros > u32::MAX as u128 { u32::MAX } else { micros as u32 };
251 libc::usleep(st);
252
253 micros -= st as u128;
254 }
255 }
256 }
257
258 pub fn join(self) {
259 unsafe {
260 let ret = libc::pthread_join(self.id, ptr::null_mut());
261 mem::forget(self);
262 assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
263 }
264 }
265
266 pub fn id(&self) -> libc::pthread_t {
267 self.id
268 }
269
270 pub fn into_id(self) -> libc::pthread_t {
271 let id = self.id;
272 mem::forget(self);
273 id
274 }
275 }
276
277 impl Drop for Thread {
278 fn drop(&mut self) {
279 let ret = unsafe { libc::pthread_detach(self.id) };
280 debug_assert_eq!(ret, 0);
281 }
282 }
283
284 #[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios", target_os = "watchos"))]
285 fn truncate_cstr(cstr: &CStr, max_with_nul: usize) -> crate::borrow::Cow<'_, CStr> {
286 use crate::{borrow::Cow, ffi::CString};
287
288 if cstr.to_bytes_with_nul().len() > max_with_nul {
289 let bytes = cstr.to_bytes()[..max_with_nul - 1].to_vec();
290 // SAFETY: the non-nul bytes came straight from a CStr.
291 // (CString will add the terminating nul.)
292 Cow::Owned(unsafe { CString::from_vec_unchecked(bytes) })
293 } else {
294 Cow::Borrowed(cstr)
295 }
296 }
297
298 pub fn available_parallelism() -> io::Result<NonZeroUsize> {
299 cfg_if::cfg_if! {
300 if #[cfg(any(
301 target_os = "android",
302 target_os = "emscripten",
303 target_os = "fuchsia",
304 target_os = "ios",
305 target_os = "linux",
306 target_os = "macos",
307 target_os = "solaris",
308 target_os = "illumos",
309 ))] {
310 #[cfg(any(target_os = "android", target_os = "linux"))]
311 {
312 let quota = cgroups::quota().max(1);
313 let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
314 unsafe {
315 if libc::sched_getaffinity(0, mem::size_of::<libc::cpu_set_t>(), &mut set) == 0 {
316 let count = libc::CPU_COUNT(&set) as usize;
317 let count = count.min(quota);
318 // SAFETY: affinity mask can't be empty and the quota gets clamped to a minimum of 1
319 return Ok(NonZeroUsize::new_unchecked(count));
320 }
321 }
322 }
323 match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
324 -1 => Err(io::Error::last_os_error()),
325 0 => Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform")),
326 cpus => Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) }),
327 }
328 } else if #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "netbsd"))] {
329 use crate::ptr;
330
331 let mut cpus: libc::c_uint = 0;
332 let mut cpus_size = crate::mem::size_of_val(&cpus);
333
334 unsafe {
335 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
336 }
337
338 // Fallback approach in case of errors or no hardware threads.
339 if cpus < 1 {
340 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
341 let res = unsafe {
342 libc::sysctl(
343 mib.as_mut_ptr(),
344 2,
345 &mut cpus as *mut _ as *mut _,
346 &mut cpus_size as *mut _ as *mut _,
347 ptr::null_mut(),
348 0,
349 )
350 };
351
352 // Handle errors if any.
353 if res == -1 {
354 return Err(io::Error::last_os_error());
355 } else if cpus == 0 {
356 return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
357 }
358 }
359 Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) })
360 } else if #[cfg(target_os = "openbsd")] {
361 use crate::ptr;
362
363 let mut cpus: libc::c_uint = 0;
364 let mut cpus_size = crate::mem::size_of_val(&cpus);
365 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
366
367 let res = unsafe {
368 libc::sysctl(
369 mib.as_mut_ptr(),
370 2,
371 &mut cpus as *mut _ as *mut _,
372 &mut cpus_size as *mut _ as *mut _,
373 ptr::null_mut(),
374 0,
375 )
376 };
377
378 // Handle errors if any.
379 if res == -1 {
380 return Err(io::Error::last_os_error());
381 } else if cpus == 0 {
382 return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
383 }
384
385 Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) })
386 } else if #[cfg(target_os = "haiku")] {
387 // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus`
388 // `get_system_info` calls then `smp_get_num_cpus`
389 unsafe {
390 let mut sinfo: libc::system_info = crate::mem::zeroed();
391 let res = libc::get_system_info(&mut sinfo);
392
393 if res != libc::B_OK {
394 return Err(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform"));
395 }
396
397 Ok(NonZeroUsize::new_unchecked(sinfo.cpu_count as usize))
398 }
399 } else {
400 // FIXME: implement on vxWorks, Redox, l4re
401 Err(io::const_io_error!(io::ErrorKind::Unsupported, "Getting the number of hardware threads is not supported on the target platform"))
402 }
403 }
404 }
405
406 #[cfg(any(target_os = "android", target_os = "linux"))]
407 mod cgroups {
408 //! Currently not covered
409 //! * cgroup v2 in non-standard mountpoints
410 //! * paths containing control characters or spaces, since those would be escaped in procfs
411 //! output and we don't unescape
412 use crate::borrow::Cow;
413 use crate::ffi::OsString;
414 use crate::fs::{try_exists, File};
415 use crate::io::Read;
416 use crate::io::{BufRead, BufReader};
417 use crate::os::unix::ffi::OsStringExt;
418 use crate::path::Path;
419 use crate::path::PathBuf;
420 use crate::str::from_utf8;
421
422 #[derive(PartialEq)]
423 enum Cgroup {
424 V1,
425 V2,
426 }
427
428 /// Returns cgroup CPU quota in core-equivalents, rounded down or usize::MAX if the quota cannot
429 /// be determined or is not set.
430 pub(super) fn quota() -> usize {
431 let mut quota = usize::MAX;
432 if cfg!(miri) {
433 // Attempting to open a file fails under default flags due to isolation.
434 // And Miri does not have parallelism anyway.
435 return quota;
436 }
437
438 let _: Option<()> = try {
439 let mut buf = Vec::with_capacity(128);
440 // find our place in the cgroup hierarchy
441 File::open("/proc/self/cgroup").ok()?.read_to_end(&mut buf).ok()?;
442 let (cgroup_path, version) =
443 buf.split(|&c| c == b'\n').fold(None, |previous, line| {
444 let mut fields = line.splitn(3, |&c| c == b':');
445 // 2nd field is a list of controllers for v1 or empty for v2
446 let version = match fields.nth(1) {
447 Some(b"") => Cgroup::V2,
448 Some(controllers)
449 if from_utf8(controllers)
450 .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
451 {
452 Cgroup::V1
453 }
454 _ => return previous,
455 };
456
457 // already-found v1 trumps v2 since it explicitly specifies its controllers
458 if previous.is_some() && version == Cgroup::V2 {
459 return previous;
460 }
461
462 let path = fields.last()?;
463 // skip leading slash
464 Some((path[1..].to_owned(), version))
465 })?;
466 let cgroup_path = PathBuf::from(OsString::from_vec(cgroup_path));
467
468 quota = match version {
469 Cgroup::V1 => quota_v1(cgroup_path),
470 Cgroup::V2 => quota_v2(cgroup_path),
471 };
472 };
473
474 quota
475 }
476
477 fn quota_v2(group_path: PathBuf) -> usize {
478 let mut quota = usize::MAX;
479
480 let mut path = PathBuf::with_capacity(128);
481 let mut read_buf = String::with_capacity(20);
482
483 // standard mount location defined in file-hierarchy(7) manpage
484 let cgroup_mount = "/sys/fs/cgroup";
485
486 path.push(cgroup_mount);
487 path.push(&group_path);
488
489 path.push("cgroup.controllers");
490
491 // skip if we're not looking at cgroup2
492 if matches!(try_exists(&path), Err(_) | Ok(false)) {
493 return usize::MAX;
494 };
495
496 path.pop();
497
498 let _: Option<()> = try {
499 while path.starts_with(cgroup_mount) {
500 path.push("cpu.max");
501
502 read_buf.clear();
503
504 if File::open(&path).and_then(|mut f| f.read_to_string(&mut read_buf)).is_ok() {
505 let raw_quota = read_buf.lines().next()?;
506 let mut raw_quota = raw_quota.split(' ');
507 let limit = raw_quota.next()?;
508 let period = raw_quota.next()?;
509 match (limit.parse::<usize>(), period.parse::<usize>()) {
510 (Ok(limit), Ok(period)) => {
511 quota = quota.min(limit / period);
512 }
513 _ => {}
514 }
515 }
516
517 path.pop(); // pop filename
518 path.pop(); // pop dir
519 }
520 };
521
522 quota
523 }
524
525 fn quota_v1(group_path: PathBuf) -> usize {
526 let mut quota = usize::MAX;
527 let mut path = PathBuf::with_capacity(128);
528 let mut read_buf = String::with_capacity(20);
529
530 // Hardcode commonly used locations mentioned in the cgroups(7) manpage
531 // if that doesn't work scan mountinfo and adjust `group_path` for bind-mounts
532 let mounts: &[fn(&Path) -> Option<(_, &Path)>] = &[
533 |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu"), p)),
534 |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu,cpuacct"), p)),
535 // this can be expensive on systems with tons of mountpoints
536 // but we only get to this point when /proc/self/cgroups explicitly indicated
537 // this process belongs to a cpu-controller cgroup v1 and the defaults didn't work
538 find_mountpoint,
539 ];
540
541 for mount in mounts {
542 let Some((mount, group_path)) = mount(&group_path) else { continue };
543
544 path.clear();
545 path.push(mount.as_ref());
546 path.push(&group_path);
547
548 // skip if we guessed the mount incorrectly
549 if matches!(try_exists(&path), Err(_) | Ok(false)) {
550 continue;
551 }
552
553 while path.starts_with(mount.as_ref()) {
554 let mut parse_file = |name| {
555 path.push(name);
556 read_buf.clear();
557
558 let f = File::open(&path);
559 path.pop(); // restore buffer before any early returns
560 f.ok()?.read_to_string(&mut read_buf).ok()?;
561 let parsed = read_buf.trim().parse::<usize>().ok()?;
562
563 Some(parsed)
564 };
565
566 let limit = parse_file("cpu.cfs_quota_us");
567 let period = parse_file("cpu.cfs_period_us");
568
569 match (limit, period) {
570 (Some(limit), Some(period)) => quota = quota.min(limit / period),
571 _ => {}
572 }
573
574 path.pop();
575 }
576
577 // we passed the try_exists above so we should have traversed the correct hierarchy
578 // when reaching this line
579 break;
580 }
581
582 quota
583 }
584
585 /// Scan mountinfo for cgroup v1 mountpoint with a cpu controller
586 ///
587 /// If the cgroupfs is a bind mount then `group_path` is adjusted to skip
588 /// over the already-included prefix
589 fn find_mountpoint(group_path: &Path) -> Option<(Cow<'static, str>, &Path)> {
590 let mut reader = BufReader::new(File::open("/proc/self/mountinfo").ok()?);
591 let mut line = String::with_capacity(256);
592 loop {
593 line.clear();
594 if reader.read_line(&mut line).ok()? == 0 {
595 break;
596 }
597
598 let line = line.trim();
599 let mut items = line.split(' ');
600
601 let sub_path = items.nth(3)?;
602 let mount_point = items.next()?;
603 let mount_opts = items.next_back()?;
604 let filesystem_type = items.nth_back(1)?;
605
606 if filesystem_type != "cgroup" || !mount_opts.split(',').any(|opt| opt == "cpu") {
607 // not a cgroup / not a cpu-controller
608 continue;
609 }
610
611 let sub_path = Path::new(sub_path).strip_prefix("/").ok()?;
612
613 if !group_path.starts_with(sub_path) {
614 // this is a bind-mount and the bound subdirectory
615 // does not contain the cgroup this process belongs to
616 continue;
617 }
618
619 let trimmed_group_path = group_path.strip_prefix(sub_path).ok()?;
620
621 return Some((Cow::Owned(mount_point.to_owned()), trimmed_group_path));
622 }
623
624 None
625 }
626 }
627
628 #[cfg(all(
629 not(target_os = "linux"),
630 not(target_os = "freebsd"),
631 not(target_os = "macos"),
632 not(target_os = "netbsd"),
633 not(target_os = "openbsd"),
634 not(target_os = "solaris")
635 ))]
636 #[cfg_attr(test, allow(dead_code))]
637 pub mod guard {
638 use crate::ops::Range;
639 pub type Guard = Range<usize>;
640 pub unsafe fn current() -> Option<Guard> {
641 None
642 }
643 pub unsafe fn init() -> Option<Guard> {
644 None
645 }
646 }
647
648 #[cfg(any(
649 target_os = "linux",
650 target_os = "freebsd",
651 target_os = "macos",
652 target_os = "netbsd",
653 target_os = "openbsd",
654 target_os = "solaris"
655 ))]
656 #[cfg_attr(test, allow(dead_code))]
657 pub mod guard {
658 use libc::{mmap, mprotect};
659 use libc::{MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE};
660
661 use crate::io;
662 use crate::ops::Range;
663 use crate::sync::atomic::{AtomicUsize, Ordering};
664 use crate::sys::os;
665
666 // This is initialized in init() and only read from after
667 static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
668
669 pub type Guard = Range<usize>;
670
671 #[cfg(target_os = "solaris")]
672 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
673 let mut current_stack: libc::stack_t = crate::mem::zeroed();
674 assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
675 Some(current_stack.ss_sp)
676 }
677
678 #[cfg(target_os = "macos")]
679 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
680 let th = libc::pthread_self();
681 let stackptr = libc::pthread_get_stackaddr_np(th);
682 Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
683 }
684
685 #[cfg(target_os = "openbsd")]
686 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
687 let mut current_stack: libc::stack_t = crate::mem::zeroed();
688 assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
689
690 let stack_ptr = current_stack.ss_sp;
691 let stackaddr = if libc::pthread_main_np() == 1 {
692 // main thread
693 stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
694 } else {
695 // new thread
696 stack_ptr.addr() - current_stack.ss_size
697 };
698 Some(stack_ptr.with_addr(stackaddr))
699 }
700
701 #[cfg(any(
702 target_os = "android",
703 target_os = "freebsd",
704 target_os = "linux",
705 target_os = "netbsd",
706 target_os = "l4re"
707 ))]
708 unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
709 let mut ret = None;
710 let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
711 #[cfg(target_os = "freebsd")]
712 assert_eq!(libc::pthread_attr_init(&mut attr), 0);
713 #[cfg(target_os = "freebsd")]
714 let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
715 #[cfg(not(target_os = "freebsd"))]
716 let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
717 if e == 0 {
718 let mut stackaddr = crate::ptr::null_mut();
719 let mut stacksize = 0;
720 assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0);
721 ret = Some(stackaddr);
722 }
723 if e == 0 || cfg!(target_os = "freebsd") {
724 assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
725 }
726 ret
727 }
728
729 // Precondition: PAGE_SIZE is initialized.
730 unsafe fn get_stack_start_aligned() -> Option<*mut libc::c_void> {
731 let page_size = PAGE_SIZE.load(Ordering::Relaxed);
732 assert!(page_size != 0);
733 let stackptr = get_stack_start()?;
734 let stackaddr = stackptr.addr();
735
736 // Ensure stackaddr is page aligned! A parent process might
737 // have reset RLIMIT_STACK to be non-page aligned. The
738 // pthread_attr_getstack() reports the usable stack area
739 // stackaddr < stackaddr + stacksize, so if stackaddr is not
740 // page-aligned, calculate the fix such that stackaddr <
741 // new_page_aligned_stackaddr < stackaddr + stacksize
742 let remainder = stackaddr % page_size;
743 Some(if remainder == 0 {
744 stackptr
745 } else {
746 stackptr.with_addr(stackaddr + page_size - remainder)
747 })
748 }
749
750 pub unsafe fn init() -> Option<Guard> {
751 let page_size = os::page_size();
752 PAGE_SIZE.store(page_size, Ordering::Relaxed);
753
754 if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
755 // Linux doesn't allocate the whole stack right away, and
756 // the kernel has its own stack-guard mechanism to fault
757 // when growing too close to an existing mapping. If we map
758 // our own guard, then the kernel starts enforcing a rather
759 // large gap above that, rendering much of the possible
760 // stack space useless. See #43052.
761 //
762 // Instead, we'll just note where we expect rlimit to start
763 // faulting, so our handler can report "stack overflow", and
764 // trust that the kernel's own stack guard will work.
765 let stackptr = get_stack_start_aligned()?;
766 let stackaddr = stackptr.addr();
767 Some(stackaddr - page_size..stackaddr)
768 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
769 // For the main thread, the musl's pthread_attr_getstack
770 // returns the current stack size, rather than maximum size
771 // it can eventually grow to. It cannot be used to determine
772 // the position of kernel's stack guard.
773 None
774 } else if cfg!(target_os = "freebsd") {
775 // FreeBSD's stack autogrows, and optionally includes a guard page
776 // at the bottom. If we try to remap the bottom of the stack
777 // ourselves, FreeBSD's guard page moves upwards. So we'll just use
778 // the builtin guard page.
779 let stackptr = get_stack_start_aligned()?;
780 let guardaddr = stackptr.addr();
781 // Technically the number of guard pages is tunable and controlled
782 // by the security.bsd.stack_guard_page sysctl, but there are
783 // few reasons to change it from the default. The default value has
784 // been 1 ever since FreeBSD 11.1 and 10.4.
785 const GUARD_PAGES: usize = 1;
786 let guard = guardaddr..guardaddr + GUARD_PAGES * page_size;
787 Some(guard)
788 } else {
789 // Reallocate the last page of the stack.
790 // This ensures SIGBUS will be raised on
791 // stack overflow.
792 // Systems which enforce strict PAX MPROTECT do not allow
793 // to mprotect() a mapping with less restrictive permissions
794 // than the initial mmap() used, so we mmap() here with
795 // read/write permissions and only then mprotect() it to
796 // no permissions at all. See issue #50313.
797 let stackptr = get_stack_start_aligned()?;
798 let result = mmap(
799 stackptr,
800 page_size,
801 PROT_READ | PROT_WRITE,
802 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
803 -1,
804 0,
805 );
806 if result != stackptr || result == MAP_FAILED {
807 panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
808 }
809
810 let result = mprotect(stackptr, page_size, PROT_NONE);
811 if result != 0 {
812 panic!("failed to protect the guard page: {}", io::Error::last_os_error());
813 }
814
815 let guardaddr = stackptr.addr();
816
817 Some(guardaddr..guardaddr + page_size)
818 }
819 }
820
821 #[cfg(any(target_os = "macos", target_os = "openbsd", target_os = "solaris"))]
822 pub unsafe fn current() -> Option<Guard> {
823 let stackptr = get_stack_start()?;
824 let stackaddr = stackptr.addr();
825 Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
826 }
827
828 #[cfg(any(
829 target_os = "android",
830 target_os = "freebsd",
831 target_os = "linux",
832 target_os = "netbsd",
833 target_os = "l4re"
834 ))]
835 pub unsafe fn current() -> Option<Guard> {
836 let mut ret = None;
837 let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
838 #[cfg(target_os = "freebsd")]
839 assert_eq!(libc::pthread_attr_init(&mut attr), 0);
840 #[cfg(target_os = "freebsd")]
841 let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
842 #[cfg(not(target_os = "freebsd"))]
843 let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
844 if e == 0 {
845 let mut guardsize = 0;
846 assert_eq!(libc::pthread_attr_getguardsize(&attr, &mut guardsize), 0);
847 if guardsize == 0 {
848 if cfg!(all(target_os = "linux", target_env = "musl")) {
849 // musl versions before 1.1.19 always reported guard
850 // size obtained from pthread_attr_get_np as zero.
851 // Use page size as a fallback.
852 guardsize = PAGE_SIZE.load(Ordering::Relaxed);
853 } else {
854 panic!("there is no guard page");
855 }
856 }
857 let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
858 let mut size = 0;
859 assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
860
861 let stackaddr = stackptr.addr();
862 ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd")) {
863 Some(stackaddr - guardsize..stackaddr)
864 } else if cfg!(all(target_os = "linux", target_env = "musl")) {
865 Some(stackaddr - guardsize..stackaddr)
866 } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
867 {
868 // glibc used to include the guard area within the stack, as noted in the BUGS
869 // section of `man pthread_attr_getguardsize`. This has been corrected starting
870 // with glibc 2.27, and in some distro backports, so the guard is now placed at the
871 // end (below) the stack. There's no easy way for us to know which we have at
872 // runtime, so we'll just match any fault in the range right above or below the
873 // stack base to call that fault a stack overflow.
874 Some(stackaddr - guardsize..stackaddr + guardsize)
875 } else {
876 Some(stackaddr..stackaddr + guardsize)
877 };
878 }
879 if e == 0 || cfg!(target_os = "freebsd") {
880 assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
881 }
882 ret
883 }
884 }
885
886 // glibc >= 2.15 has a __pthread_get_minstack() function that returns
887 // PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
888 // We need that information to avoid blowing up when a small stack
889 // is created in an application with big thread-local storage requirements.
890 // See #6233 for rationale and details.
891 #[cfg(all(target_os = "linux", target_env = "gnu"))]
892 fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
893 // We use dlsym to avoid an ELF version dependency on GLIBC_PRIVATE. (#23628)
894 // We shouldn't really be using such an internal symbol, but there's currently
895 // no other way to account for the TLS size.
896 dlsym!(fn __pthread_get_minstack(*const libc::pthread_attr_t) -> libc::size_t);
897
898 match __pthread_get_minstack.get() {
899 None => libc::PTHREAD_STACK_MIN,
900 Some(f) => unsafe { f(attr) },
901 }
902 }
903
904 // No point in looking up __pthread_get_minstack() on non-glibc platforms.
905 #[cfg(all(not(all(target_os = "linux", target_env = "gnu")), not(target_os = "netbsd")))]
906 fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
907 libc::PTHREAD_STACK_MIN
908 }
909
910 #[cfg(target_os = "netbsd")]
911 fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
912 2048 // just a guess
913 }