std/sys/pal/unix/
thread.rs

1use crate::ffi::CStr;
2use crate::mem::{self, ManuallyDrop};
3use crate::num::NonZero;
4#[cfg(all(target_os = "linux", target_env = "gnu"))]
5use crate::sys::weak::dlsym;
6#[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto",))]
7use crate::sys::weak::weak;
8use crate::sys::{os, stack_overflow};
9use crate::time::Duration;
10use crate::{cmp, io, ptr};
11#[cfg(not(any(
12    target_os = "l4re",
13    target_os = "vxworks",
14    target_os = "espidf",
15    target_os = "nuttx"
16)))]
17pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024;
18#[cfg(target_os = "l4re")]
19pub const DEFAULT_MIN_STACK_SIZE: usize = 1024 * 1024;
20#[cfg(target_os = "vxworks")]
21pub const DEFAULT_MIN_STACK_SIZE: usize = 256 * 1024;
22#[cfg(any(target_os = "espidf", target_os = "nuttx"))]
23pub const DEFAULT_MIN_STACK_SIZE: usize = 0; // 0 indicates that the stack size configured in the ESP-IDF/NuttX menuconfig system should be used
24
25pub struct Thread {
26    id: libc::pthread_t,
27}
28
29// Some platforms may have pthread_t as a pointer in which case we still want
30// a thread to be Send/Sync
31unsafe impl Send for Thread {}
32unsafe impl Sync for Thread {}
33
34impl Thread {
35    // unsafe: see thread::Builder::spawn_unchecked for safety requirements
36    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
37    pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
38        let p = Box::into_raw(Box::new(p));
39        let mut native: libc::pthread_t = mem::zeroed();
40        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
41        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
42
43        #[cfg(any(target_os = "espidf", target_os = "nuttx"))]
44        if stack > 0 {
45            // Only set the stack if a non-zero value is passed
46            // 0 is used as an indication that the default stack size configured in the ESP-IDF/NuttX menuconfig system should be used
47            assert_eq!(
48                libc::pthread_attr_setstacksize(
49                    attr.as_mut_ptr(),
50                    cmp::max(stack, min_stack_size(attr.as_ptr()))
51                ),
52                0
53            );
54        }
55
56        #[cfg(not(any(target_os = "espidf", target_os = "nuttx")))]
57        {
58            let stack_size = cmp::max(stack, min_stack_size(attr.as_ptr()));
59
60            match libc::pthread_attr_setstacksize(attr.as_mut_ptr(), stack_size) {
61                0 => {}
62                n => {
63                    assert_eq!(n, libc::EINVAL);
64                    // EINVAL means |stack_size| is either too small or not a
65                    // multiple of the system page size. Because it's definitely
66                    // >= PTHREAD_STACK_MIN, it must be an alignment issue.
67                    // Round up to the nearest page and try again.
68                    let page_size = os::page_size();
69                    let stack_size =
70                        (stack_size + page_size - 1) & (-(page_size as isize - 1) as usize - 1);
71                    assert_eq!(libc::pthread_attr_setstacksize(attr.as_mut_ptr(), stack_size), 0);
72                }
73            };
74        }
75
76        let ret = libc::pthread_create(&mut native, attr.as_ptr(), thread_start, p as *mut _);
77        // Note: if the thread creation fails and this assert fails, then p will
78        // be leaked. However, an alternative design could cause double-free
79        // which is clearly worse.
80        assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
81
82        return if ret != 0 {
83            // The thread failed to start and as a result p was not consumed. Therefore, it is
84            // safe to reconstruct the box so that it gets deallocated.
85            drop(Box::from_raw(p));
86            Err(io::Error::from_raw_os_error(ret))
87        } else {
88            Ok(Thread { id: native })
89        };
90
91        extern "C" fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
92            unsafe {
93                // Next, set up our stack overflow handler which may get triggered if we run
94                // out of stack.
95                let _handler = stack_overflow::Handler::new();
96                // Finally, let's run some code.
97                Box::from_raw(main as *mut Box<dyn FnOnce()>)();
98            }
99            ptr::null_mut()
100        }
101    }
102
103    pub fn yield_now() {
104        let ret = unsafe { libc::sched_yield() };
105        debug_assert_eq!(ret, 0);
106    }
107
108    #[cfg(target_os = "android")]
109    pub fn set_name(name: &CStr) {
110        const PR_SET_NAME: libc::c_int = 15;
111        unsafe {
112            let res = libc::prctl(
113                PR_SET_NAME,
114                name.as_ptr(),
115                0 as libc::c_ulong,
116                0 as libc::c_ulong,
117                0 as libc::c_ulong,
118            );
119            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
120            debug_assert_eq!(res, 0);
121        }
122    }
123
124    #[cfg(any(
125        target_os = "linux",
126        target_os = "freebsd",
127        target_os = "dragonfly",
128        target_os = "nuttx",
129        target_os = "cygwin"
130    ))]
131    pub fn set_name(name: &CStr) {
132        unsafe {
133            cfg_if::cfg_if! {
134                if #[cfg(any(target_os = "linux", target_os = "cygwin"))] {
135                    // Linux and Cygwin limits the allowed length of the name.
136                    const TASK_COMM_LEN: usize = 16;
137                    let name = truncate_cstr::<{ TASK_COMM_LEN }>(name);
138                } else {
139                    // FreeBSD, DragonFly BSD and NuttX do not enforce length limits.
140                }
141            };
142            // Available since glibc 2.12, musl 1.1.16, and uClibc 1.0.20 for Linux,
143            // FreeBSD 12.2 and 13.0, and DragonFly BSD 6.0.
144            let res = libc::pthread_setname_np(libc::pthread_self(), name.as_ptr());
145            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
146            debug_assert_eq!(res, 0);
147        }
148    }
149
150    #[cfg(target_os = "openbsd")]
151    pub fn set_name(name: &CStr) {
152        unsafe {
153            libc::pthread_set_name_np(libc::pthread_self(), name.as_ptr());
154        }
155    }
156
157    #[cfg(target_vendor = "apple")]
158    pub fn set_name(name: &CStr) {
159        unsafe {
160            let name = truncate_cstr::<{ libc::MAXTHREADNAMESIZE }>(name);
161            let res = libc::pthread_setname_np(name.as_ptr());
162            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
163            debug_assert_eq!(res, 0);
164        }
165    }
166
167    #[cfg(target_os = "netbsd")]
168    pub fn set_name(name: &CStr) {
169        unsafe {
170            let res = libc::pthread_setname_np(
171                libc::pthread_self(),
172                c"%s".as_ptr(),
173                name.as_ptr() as *mut libc::c_void,
174            );
175            debug_assert_eq!(res, 0);
176        }
177    }
178
179    #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))]
180    pub fn set_name(name: &CStr) {
181        weak!(
182            fn pthread_setname_np(
183                thread: libc::pthread_t,
184                name: *const libc::c_char,
185            ) -> libc::c_int;
186        );
187
188        if let Some(f) = pthread_setname_np.get() {
189            #[cfg(target_os = "nto")]
190            const THREAD_NAME_MAX: usize = libc::_NTO_THREAD_NAME_MAX as usize;
191            #[cfg(any(target_os = "solaris", target_os = "illumos"))]
192            const THREAD_NAME_MAX: usize = 32;
193
194            let name = truncate_cstr::<{ THREAD_NAME_MAX }>(name);
195            let res = unsafe { f(libc::pthread_self(), name.as_ptr()) };
196            debug_assert_eq!(res, 0);
197        }
198    }
199
200    #[cfg(target_os = "fuchsia")]
201    pub fn set_name(name: &CStr) {
202        use super::fuchsia::*;
203        unsafe {
204            zx_object_set_property(
205                zx_thread_self(),
206                ZX_PROP_NAME,
207                name.as_ptr() as *const libc::c_void,
208                name.to_bytes().len(),
209            );
210        }
211    }
212
213    #[cfg(target_os = "haiku")]
214    pub fn set_name(name: &CStr) {
215        unsafe {
216            let thread_self = libc::find_thread(ptr::null_mut());
217            let res = libc::rename_thread(thread_self, name.as_ptr());
218            // We have no good way of propagating errors here, but in debug-builds let's check that this actually worked.
219            debug_assert_eq!(res, libc::B_OK);
220        }
221    }
222
223    #[cfg(target_os = "vxworks")]
224    pub fn set_name(name: &CStr) {
225        // FIXME(libc): adding real STATUS, ERROR type eventually.
226        unsafe extern "C" {
227            fn taskNameSet(task_id: libc::TASK_ID, task_name: *mut libc::c_char) -> libc::c_int;
228        }
229
230        //  VX_TASK_NAME_LEN is 31 in VxWorks 7.
231        const VX_TASK_NAME_LEN: usize = 31;
232
233        let mut name = truncate_cstr::<{ VX_TASK_NAME_LEN }>(name);
234        let res = unsafe { taskNameSet(libc::taskIdSelf(), name.as_mut_ptr()) };
235        debug_assert_eq!(res, libc::OK);
236    }
237
238    #[cfg(any(
239        target_env = "newlib",
240        target_os = "l4re",
241        target_os = "emscripten",
242        target_os = "redox",
243        target_os = "hurd",
244        target_os = "aix",
245    ))]
246    pub fn set_name(_name: &CStr) {
247        // Newlib and Emscripten have no way to set a thread name.
248    }
249
250    #[cfg(not(target_os = "espidf"))]
251    pub fn sleep(dur: Duration) {
252        let mut secs = dur.as_secs();
253        let mut nsecs = dur.subsec_nanos() as _;
254
255        // If we're awoken with a signal then the return value will be -1 and
256        // nanosleep will fill in `ts` with the remaining time.
257        unsafe {
258            while secs > 0 || nsecs > 0 {
259                let mut ts = libc::timespec {
260                    tv_sec: cmp::min(libc::time_t::MAX as u64, secs) as libc::time_t,
261                    tv_nsec: nsecs,
262                };
263                secs -= ts.tv_sec as u64;
264                let ts_ptr = &raw mut ts;
265                if libc::nanosleep(ts_ptr, ts_ptr) == -1 {
266                    assert_eq!(os::errno(), libc::EINTR);
267                    secs += ts.tv_sec as u64;
268                    nsecs = ts.tv_nsec;
269                } else {
270                    nsecs = 0;
271                }
272            }
273        }
274    }
275
276    #[cfg(target_os = "espidf")]
277    pub fn sleep(dur: Duration) {
278        // ESP-IDF does not have `nanosleep`, so we use `usleep` instead.
279        // As per the documentation of `usleep`, it is expected to support
280        // sleep times as big as at least up to 1 second.
281        //
282        // ESP-IDF does support almost up to `u32::MAX`, but due to a potential integer overflow in its
283        // `usleep` implementation
284        // (https://github.com/espressif/esp-idf/blob/d7ca8b94c852052e3bc33292287ef4dd62c9eeb1/components/newlib/time.c#L210),
285        // we limit the sleep time to the maximum one that would not cause the underlying `usleep` implementation to overflow
286        // (`portTICK_PERIOD_MS` can be anything between 1 to 1000, and is 10 by default).
287        const MAX_MICROS: u32 = u32::MAX - 1_000_000 - 1;
288
289        // Add any nanoseconds smaller than a microsecond as an extra microsecond
290        // so as to comply with the `std::thread::sleep` contract which mandates
291        // implementations to sleep for _at least_ the provided `dur`.
292        // We can't overflow `micros` as it is a `u128`, while `Duration` is a pair of
293        // (`u64` secs, `u32` nanos), where the nanos are strictly smaller than 1 second
294        // (i.e. < 1_000_000_000)
295        let mut micros = dur.as_micros() + if dur.subsec_nanos() % 1_000 > 0 { 1 } else { 0 };
296
297        while micros > 0 {
298            let st = if micros > MAX_MICROS as u128 { MAX_MICROS } else { micros as u32 };
299            unsafe {
300                libc::usleep(st);
301            }
302
303            micros -= st as u128;
304        }
305    }
306
307    pub fn join(self) {
308        let id = self.into_id();
309        let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) };
310        assert!(ret == 0, "failed to join thread: {}", io::Error::from_raw_os_error(ret));
311    }
312
313    pub fn id(&self) -> libc::pthread_t {
314        self.id
315    }
316
317    pub fn into_id(self) -> libc::pthread_t {
318        ManuallyDrop::new(self).id
319    }
320}
321
322impl Drop for Thread {
323    fn drop(&mut self) {
324        let ret = unsafe { libc::pthread_detach(self.id) };
325        debug_assert_eq!(ret, 0);
326    }
327}
328
329#[cfg(any(
330    target_os = "linux",
331    target_os = "nto",
332    target_os = "solaris",
333    target_os = "illumos",
334    target_os = "vxworks",
335    target_os = "cygwin",
336    target_vendor = "apple",
337))]
338fn truncate_cstr<const MAX_WITH_NUL: usize>(cstr: &CStr) -> [libc::c_char; MAX_WITH_NUL] {
339    let mut result = [0; MAX_WITH_NUL];
340    for (src, dst) in cstr.to_bytes().iter().zip(&mut result[..MAX_WITH_NUL - 1]) {
341        *dst = *src as libc::c_char;
342    }
343    result
344}
345
346pub fn available_parallelism() -> io::Result<NonZero<usize>> {
347    cfg_if::cfg_if! {
348        if #[cfg(any(
349            target_os = "android",
350            target_os = "emscripten",
351            target_os = "fuchsia",
352            target_os = "hurd",
353            target_os = "linux",
354            target_os = "aix",
355            target_vendor = "apple",
356            target_os = "cygwin",
357        ))] {
358            #[allow(unused_assignments)]
359            #[allow(unused_mut)]
360            let mut quota = usize::MAX;
361
362            #[cfg(any(target_os = "android", target_os = "linux"))]
363            {
364                quota = cgroups::quota().max(1);
365                let mut set: libc::cpu_set_t = unsafe { mem::zeroed() };
366                unsafe {
367                    if libc::sched_getaffinity(0, size_of::<libc::cpu_set_t>(), &mut set) == 0 {
368                        let count = libc::CPU_COUNT(&set) as usize;
369                        let count = count.min(quota);
370
371                        // According to sched_getaffinity's API it should always be non-zero, but
372                        // some old MIPS kernels were buggy and zero-initialized the mask if
373                        // none was explicitly set.
374                        // In that case we use the sysconf fallback.
375                        if let Some(count) = NonZero::new(count) {
376                            return Ok(count)
377                        }
378                    }
379                }
380            }
381            match unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) } {
382                -1 => Err(io::Error::last_os_error()),
383                0 => Err(io::Error::UNKNOWN_THREAD_COUNT),
384                cpus => {
385                    let count = cpus as usize;
386                    // Cover the unusual situation where we were able to get the quota but not the affinity mask
387                    let count = count.min(quota);
388                    Ok(unsafe { NonZero::new_unchecked(count) })
389                }
390            }
391        } else if #[cfg(any(
392                   target_os = "freebsd",
393                   target_os = "dragonfly",
394                   target_os = "openbsd",
395                   target_os = "netbsd",
396               ))] {
397            use crate::ptr;
398
399            #[cfg(target_os = "freebsd")]
400            {
401                let mut set: libc::cpuset_t = unsafe { mem::zeroed() };
402                unsafe {
403                    if libc::cpuset_getaffinity(
404                        libc::CPU_LEVEL_WHICH,
405                        libc::CPU_WHICH_PID,
406                        -1,
407                        size_of::<libc::cpuset_t>(),
408                        &mut set,
409                    ) == 0 {
410                        let count = libc::CPU_COUNT(&set) as usize;
411                        if count > 0 {
412                            return Ok(NonZero::new_unchecked(count));
413                        }
414                    }
415                }
416            }
417
418            #[cfg(target_os = "netbsd")]
419            {
420                unsafe {
421                    let set = libc::_cpuset_create();
422                    if !set.is_null() {
423                        let mut count: usize = 0;
424                        if libc::pthread_getaffinity_np(libc::pthread_self(), libc::_cpuset_size(set), set) == 0 {
425                            for i in 0..libc::cpuid_t::MAX {
426                                match libc::_cpuset_isset(i, set) {
427                                    -1 => break,
428                                    0 => continue,
429                                    _ => count = count + 1,
430                                }
431                            }
432                        }
433                        libc::_cpuset_destroy(set);
434                        if let Some(count) = NonZero::new(count) {
435                            return Ok(count);
436                        }
437                    }
438                }
439            }
440
441            let mut cpus: libc::c_uint = 0;
442            let mut cpus_size = size_of_val(&cpus);
443
444            unsafe {
445                cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
446            }
447
448            // Fallback approach in case of errors or no hardware threads.
449            if cpus < 1 {
450                let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
451                let res = unsafe {
452                    libc::sysctl(
453                        mib.as_mut_ptr(),
454                        2,
455                        (&raw mut cpus) as *mut _,
456                        (&raw mut cpus_size) as *mut _,
457                        ptr::null_mut(),
458                        0,
459                    )
460                };
461
462                // Handle errors if any.
463                if res == -1 {
464                    return Err(io::Error::last_os_error());
465                } else if cpus == 0 {
466                    return Err(io::Error::UNKNOWN_THREAD_COUNT);
467                }
468            }
469
470            Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
471        } else if #[cfg(target_os = "nto")] {
472            unsafe {
473                use libc::_syspage_ptr;
474                if _syspage_ptr.is_null() {
475                    Err(io::const_error!(io::ErrorKind::NotFound, "no syspage available"))
476                } else {
477                    let cpus = (*_syspage_ptr).num_cpu;
478                    NonZero::new(cpus as usize)
479                        .ok_or(io::Error::UNKNOWN_THREAD_COUNT)
480                }
481            }
482        } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] {
483            let mut cpus = 0u32;
484            if unsafe { libc::pset_info(libc::PS_MYID, core::ptr::null_mut(), &mut cpus, core::ptr::null_mut()) } != 0 {
485                return Err(io::Error::UNKNOWN_THREAD_COUNT);
486            }
487            Ok(unsafe { NonZero::new_unchecked(cpus as usize) })
488        } else if #[cfg(target_os = "haiku")] {
489            // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus`
490            // `get_system_info` calls then `smp_get_num_cpus`
491            unsafe {
492                let mut sinfo: libc::system_info = crate::mem::zeroed();
493                let res = libc::get_system_info(&mut sinfo);
494
495                if res != libc::B_OK {
496                    return Err(io::Error::UNKNOWN_THREAD_COUNT);
497                }
498
499                Ok(NonZero::new_unchecked(sinfo.cpu_count as usize))
500            }
501        } else if #[cfg(target_os = "vxworks")] {
502            // Note: there is also `vxCpuConfiguredGet`, closer to _SC_NPROCESSORS_CONF
503            // expectations than the actual cores availability.
504            unsafe extern "C" {
505                fn vxCpuEnabledGet() -> libc::cpuset_t;
506            }
507
508            // SAFETY: `vxCpuEnabledGet` always fetches a mask with at least one bit set
509            unsafe{
510                let set = vxCpuEnabledGet();
511                Ok(NonZero::new_unchecked(set.count_ones() as usize))
512            }
513        } else {
514            // FIXME: implement on Redox, l4re
515            Err(io::const_error!(io::ErrorKind::Unsupported, "getting the number of hardware threads is not supported on the target platform"))
516        }
517    }
518}
519
520#[cfg(any(target_os = "android", target_os = "linux"))]
521mod cgroups {
522    //! Currently not covered
523    //! * cgroup v2 in non-standard mountpoints
524    //! * paths containing control characters or spaces, since those would be escaped in procfs
525    //!   output and we don't unescape
526
527    use crate::borrow::Cow;
528    use crate::ffi::OsString;
529    use crate::fs::{File, exists};
530    use crate::io::{BufRead, Read};
531    use crate::os::unix::ffi::OsStringExt;
532    use crate::path::{Path, PathBuf};
533    use crate::str::from_utf8;
534
535    #[derive(PartialEq)]
536    enum Cgroup {
537        V1,
538        V2,
539    }
540
541    /// Returns cgroup CPU quota in core-equivalents, rounded down or usize::MAX if the quota cannot
542    /// be determined or is not set.
543    pub(super) fn quota() -> usize {
544        let mut quota = usize::MAX;
545        if cfg!(miri) {
546            // Attempting to open a file fails under default flags due to isolation.
547            // And Miri does not have parallelism anyway.
548            return quota;
549        }
550
551        let _: Option<()> = try {
552            let mut buf = Vec::with_capacity(128);
553            // find our place in the cgroup hierarchy
554            File::open("/proc/self/cgroup").ok()?.read_to_end(&mut buf).ok()?;
555            let (cgroup_path, version) =
556                buf.split(|&c| c == b'\n').fold(None, |previous, line| {
557                    let mut fields = line.splitn(3, |&c| c == b':');
558                    // 2nd field is a list of controllers for v1 or empty for v2
559                    let version = match fields.nth(1) {
560                        Some(b"") => Cgroup::V2,
561                        Some(controllers)
562                            if from_utf8(controllers)
563                                .is_ok_and(|c| c.split(',').any(|c| c == "cpu")) =>
564                        {
565                            Cgroup::V1
566                        }
567                        _ => return previous,
568                    };
569
570                    // already-found v1 trumps v2 since it explicitly specifies its controllers
571                    if previous.is_some() && version == Cgroup::V2 {
572                        return previous;
573                    }
574
575                    let path = fields.last()?;
576                    // skip leading slash
577                    Some((path[1..].to_owned(), version))
578                })?;
579            let cgroup_path = PathBuf::from(OsString::from_vec(cgroup_path));
580
581            quota = match version {
582                Cgroup::V1 => quota_v1(cgroup_path),
583                Cgroup::V2 => quota_v2(cgroup_path),
584            };
585        };
586
587        quota
588    }
589
590    fn quota_v2(group_path: PathBuf) -> usize {
591        let mut quota = usize::MAX;
592
593        let mut path = PathBuf::with_capacity(128);
594        let mut read_buf = String::with_capacity(20);
595
596        // standard mount location defined in file-hierarchy(7) manpage
597        let cgroup_mount = "/sys/fs/cgroup";
598
599        path.push(cgroup_mount);
600        path.push(&group_path);
601
602        path.push("cgroup.controllers");
603
604        // skip if we're not looking at cgroup2
605        if matches!(exists(&path), Err(_) | Ok(false)) {
606            return usize::MAX;
607        };
608
609        path.pop();
610
611        let _: Option<()> = try {
612            while path.starts_with(cgroup_mount) {
613                path.push("cpu.max");
614
615                read_buf.clear();
616
617                if File::open(&path).and_then(|mut f| f.read_to_string(&mut read_buf)).is_ok() {
618                    let raw_quota = read_buf.lines().next()?;
619                    let mut raw_quota = raw_quota.split(' ');
620                    let limit = raw_quota.next()?;
621                    let period = raw_quota.next()?;
622                    match (limit.parse::<usize>(), period.parse::<usize>()) {
623                        (Ok(limit), Ok(period)) if period > 0 => {
624                            quota = quota.min(limit / period);
625                        }
626                        _ => {}
627                    }
628                }
629
630                path.pop(); // pop filename
631                path.pop(); // pop dir
632            }
633        };
634
635        quota
636    }
637
638    fn quota_v1(group_path: PathBuf) -> usize {
639        let mut quota = usize::MAX;
640        let mut path = PathBuf::with_capacity(128);
641        let mut read_buf = String::with_capacity(20);
642
643        // Hardcode commonly used locations mentioned in the cgroups(7) manpage
644        // if that doesn't work scan mountinfo and adjust `group_path` for bind-mounts
645        let mounts: &[fn(&Path) -> Option<(_, &Path)>] = &[
646            |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu"), p)),
647            |p| Some((Cow::Borrowed("/sys/fs/cgroup/cpu,cpuacct"), p)),
648            // this can be expensive on systems with tons of mountpoints
649            // but we only get to this point when /proc/self/cgroups explicitly indicated
650            // this process belongs to a cpu-controller cgroup v1 and the defaults didn't work
651            find_mountpoint,
652        ];
653
654        for mount in mounts {
655            let Some((mount, group_path)) = mount(&group_path) else { continue };
656
657            path.clear();
658            path.push(mount.as_ref());
659            path.push(&group_path);
660
661            // skip if we guessed the mount incorrectly
662            if matches!(exists(&path), Err(_) | Ok(false)) {
663                continue;
664            }
665
666            while path.starts_with(mount.as_ref()) {
667                let mut parse_file = |name| {
668                    path.push(name);
669                    read_buf.clear();
670
671                    let f = File::open(&path);
672                    path.pop(); // restore buffer before any early returns
673                    f.ok()?.read_to_string(&mut read_buf).ok()?;
674                    let parsed = read_buf.trim().parse::<usize>().ok()?;
675
676                    Some(parsed)
677                };
678
679                let limit = parse_file("cpu.cfs_quota_us");
680                let period = parse_file("cpu.cfs_period_us");
681
682                match (limit, period) {
683                    (Some(limit), Some(period)) if period > 0 => quota = quota.min(limit / period),
684                    _ => {}
685                }
686
687                path.pop();
688            }
689
690            // we passed the try_exists above so we should have traversed the correct hierarchy
691            // when reaching this line
692            break;
693        }
694
695        quota
696    }
697
698    /// Scan mountinfo for cgroup v1 mountpoint with a cpu controller
699    ///
700    /// If the cgroupfs is a bind mount then `group_path` is adjusted to skip
701    /// over the already-included prefix
702    fn find_mountpoint(group_path: &Path) -> Option<(Cow<'static, str>, &Path)> {
703        let mut reader = File::open_buffered("/proc/self/mountinfo").ok()?;
704        let mut line = String::with_capacity(256);
705        loop {
706            line.clear();
707            if reader.read_line(&mut line).ok()? == 0 {
708                break;
709            }
710
711            let line = line.trim();
712            let mut items = line.split(' ');
713
714            let sub_path = items.nth(3)?;
715            let mount_point = items.next()?;
716            let mount_opts = items.next_back()?;
717            let filesystem_type = items.nth_back(1)?;
718
719            if filesystem_type != "cgroup" || !mount_opts.split(',').any(|opt| opt == "cpu") {
720                // not a cgroup / not a cpu-controller
721                continue;
722            }
723
724            let sub_path = Path::new(sub_path).strip_prefix("/").ok()?;
725
726            if !group_path.starts_with(sub_path) {
727                // this is a bind-mount and the bound subdirectory
728                // does not contain the cgroup this process belongs to
729                continue;
730            }
731
732            let trimmed_group_path = group_path.strip_prefix(sub_path).ok()?;
733
734            return Some((Cow::Owned(mount_point.to_owned()), trimmed_group_path));
735        }
736
737        None
738    }
739}
740
741// glibc >= 2.15 has a __pthread_get_minstack() function that returns
742// PTHREAD_STACK_MIN plus bytes needed for thread-local storage.
743// We need that information to avoid blowing up when a small stack
744// is created in an application with big thread-local storage requirements.
745// See #6233 for rationale and details.
746#[cfg(all(target_os = "linux", target_env = "gnu"))]
747unsafe fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
748    // We use dlsym to avoid an ELF version dependency on GLIBC_PRIVATE. (#23628)
749    // We shouldn't really be using such an internal symbol, but there's currently
750    // no other way to account for the TLS size.
751    dlsym!(
752        fn __pthread_get_minstack(attr: *const libc::pthread_attr_t) -> libc::size_t;
753    );
754
755    match __pthread_get_minstack.get() {
756        None => libc::PTHREAD_STACK_MIN,
757        Some(f) => unsafe { f(attr) },
758    }
759}
760
761// No point in looking up __pthread_get_minstack() on non-glibc platforms.
762#[cfg(all(
763    not(all(target_os = "linux", target_env = "gnu")),
764    not(any(target_os = "netbsd", target_os = "nuttx"))
765))]
766unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
767    libc::PTHREAD_STACK_MIN
768}
769
770#[cfg(any(target_os = "netbsd", target_os = "nuttx"))]
771unsafe fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
772    static STACK: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
773
774    *STACK.get_or_init(|| {
775        let mut stack = unsafe { libc::sysconf(libc::_SC_THREAD_STACK_MIN) };
776        if stack < 0 {
777            stack = 2048; // just a guess
778        }
779
780        stack as usize
781    })
782}