]> git.proxmox.com Git - rustc.git/blob - vendor/memmap2/src/unix.rs
New upstream version 1.74.1+dfsg1
[rustc.git] / vendor / memmap2 / src / unix.rs
1 extern crate libc;
2
3 use std::fs::File;
4 use std::mem::ManuallyDrop;
5 use std::os::unix::io::{FromRawFd, RawFd};
6 use std::sync::atomic::{AtomicUsize, Ordering};
7 use std::{io, ptr};
8
9 use crate::advice::Advice;
10
11 #[cfg(any(
12 all(target_os = "linux", not(target_arch = "mips")),
13 target_os = "freebsd",
14 target_os = "android"
15 ))]
16 const MAP_STACK: libc::c_int = libc::MAP_STACK;
17
18 #[cfg(not(any(
19 all(target_os = "linux", not(target_arch = "mips")),
20 target_os = "freebsd",
21 target_os = "android"
22 )))]
23 const MAP_STACK: libc::c_int = 0;
24
25 #[cfg(any(target_os = "linux", target_os = "android"))]
26 const MAP_POPULATE: libc::c_int = libc::MAP_POPULATE;
27
28 #[cfg(not(any(target_os = "linux", target_os = "android")))]
29 const MAP_POPULATE: libc::c_int = 0;
30
31 #[cfg(any(
32 target_os = "android",
33 all(target_os = "linux", not(target_env = "musl"))
34 ))]
35 use libc::{mmap64 as mmap, off64_t as off_t};
36
37 #[cfg(not(any(
38 target_os = "android",
39 all(target_os = "linux", not(target_env = "musl"))
40 )))]
41 use libc::{mmap, off_t};
42
43 pub struct MmapInner {
44 ptr: *mut libc::c_void,
45 len: usize,
46 }
47
48 impl MmapInner {
49 /// Creates a new `MmapInner`.
50 ///
51 /// This is a thin wrapper around the `mmap` sytem call.
52 fn new(
53 len: usize,
54 prot: libc::c_int,
55 flags: libc::c_int,
56 file: RawFd,
57 offset: u64,
58 ) -> io::Result<MmapInner> {
59 let alignment = offset % page_size() as u64;
60 let aligned_offset = offset - alignment;
61
62 let (map_len, map_offset) = Self::adjust_mmap_params(len as usize, alignment as usize)?;
63
64 unsafe {
65 let ptr = mmap(
66 ptr::null_mut(),
67 map_len as libc::size_t,
68 prot,
69 flags,
70 file,
71 aligned_offset as off_t,
72 );
73
74 if ptr == libc::MAP_FAILED {
75 Err(io::Error::last_os_error())
76 } else {
77 Ok(Self::from_raw_parts(ptr, len, map_offset))
78 }
79 }
80 }
81
82 fn adjust_mmap_params(len: usize, alignment: usize) -> io::Result<(usize, usize)> {
83 use std::isize;
84
85 // Rust's slice cannot be larger than isize::MAX.
86 // See https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html
87 //
88 // This is not a problem on 64-bit targets, but on 32-bit one
89 // having a file or an anonymous mapping larger than 2GB is quite normal
90 // and we have to prevent it.
91 //
92 // The code below is essentially the same as in Rust's std:
93 // https://github.com/rust-lang/rust/blob/db78ab70a88a0a5e89031d7ee4eccec835dcdbde/library/alloc/src/raw_vec.rs#L495
94 if std::mem::size_of::<usize>() < 8 && len > isize::MAX as usize {
95 return Err(io::Error::new(
96 io::ErrorKind::InvalidData,
97 "memory map length overflows isize",
98 ));
99 }
100
101 let map_len = len + alignment;
102 let map_offset = alignment;
103
104 // `libc::mmap` does not support zero-size mappings. POSIX defines:
105 //
106 // https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html
107 // > If `len` is zero, `mmap()` shall fail and no mapping shall be established.
108 //
109 // So if we would create such a mapping, crate a one-byte mapping instead:
110 let map_len = map_len.max(1);
111
112 // Note that in that case `MmapInner::len` is still set to zero,
113 // and `Mmap` will still dereferences to an empty slice.
114 //
115 // If this mapping is backed by an empty file, we create a mapping larger than the file.
116 // This is unusual but well-defined. On the same man page, POSIX further defines:
117 //
118 // > The `mmap()` function can be used to map a region of memory that is larger
119 // > than the current size of the object.
120 //
121 // (The object here is the file.)
122 //
123 // > Memory access within the mapping but beyond the current end of the underlying
124 // > objects may result in SIGBUS signals being sent to the process. The reason for this
125 // > is that the size of the object can be manipulated by other processes and can change
126 // > at any moment. The implementation should tell the application that a memory reference
127 // > is outside the object where this can be detected; otherwise, written data may be lost
128 // > and read data may not reflect actual data in the object.
129 //
130 // Because `MmapInner::len` is not incremented, this increment of `aligned_len`
131 // will not allow accesses past the end of the file and will not cause SIGBUS.
132 //
133 // (SIGBUS is still possible by mapping a non-empty file and then truncating it
134 // to a shorter size, but that is unrelated to this handling of empty files.)
135 Ok((map_len, map_offset))
136 }
137
138 /// Get the current memory mapping as a `(ptr, map_len, offset)` tuple.
139 ///
140 /// Note that `map_len` is the length of the memory mapping itself and
141 /// _not_ the one that would be passed to `from_raw_parts`.
142 fn as_mmap_params(&self) -> (*mut libc::c_void, usize, usize) {
143 let offset = self.ptr as usize % page_size();
144 let len = self.len + offset;
145
146 // There are two possible memory layouts we could have, depending on
147 // the length and offset passed when constructing this instance:
148 //
149 // 1. The "normal" memory layout looks like this:
150 //
151 // |<------------------>|<---------------------->|
152 // mmap ptr offset ptr public slice
153 //
154 // That is, we have
155 // - The start of the page-aligned memory mapping returned by mmap,
156 // followed by,
157 // - Some number of bytes that are memory mapped but ignored since
158 // they are before the byte offset requested by the user, followed
159 // by,
160 // - The actual memory mapped slice requested by the user.
161 //
162 // This maps cleanly to a (ptr, len, offset) tuple.
163 //
164 // 2. Then, we have the case where the user requested a zero-length
165 // memory mapping. mmap(2) does not support zero-length mappings so
166 // this crate works around that by actually making a mapping of
167 // length one. This means that we have
168 // - A length zero slice, followed by,
169 // - A single memory mapped byte
170 //
171 // Note that this only happens if the offset within the page is also
172 // zero. Otherwise, we have a memory map of offset bytes and not a
173 // zero-length memory map.
174 //
175 // This doesn't fit cleanly into a (ptr, len, offset) tuple. Instead,
176 // we fudge it slightly: a zero-length memory map turns into a
177 // mapping of length one and can't be told apart outside of this
178 // method without knowing the original length.
179 if len == 0 {
180 (self.ptr, 1, 0)
181 } else {
182 (unsafe { self.ptr.offset(-(offset as isize)) }, len, offset)
183 }
184 }
185
186 /// Construct this `MmapInner` from its raw components
187 ///
188 /// # Safety
189 ///
190 /// - `ptr` must point to the start of memory mapping that can be freed
191 /// using `munmap(2)` (i.e. returned by `mmap(2)` or `mremap(2)`)
192 /// - The memory mapping at `ptr` must have a length of `len + offset`.
193 /// - If `len + offset == 0` then the memory mapping must be of length 1.
194 /// - `offset` must be less than the current page size.
195 unsafe fn from_raw_parts(ptr: *mut libc::c_void, len: usize, offset: usize) -> Self {
196 debug_assert_eq!(ptr as usize % page_size(), 0, "ptr not page-aligned");
197 debug_assert!(offset < page_size(), "offset larger than page size");
198
199 Self {
200 ptr: ptr.offset(offset as isize),
201 len,
202 }
203 }
204
205 pub fn map(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
206 let populate = if populate { MAP_POPULATE } else { 0 };
207 MmapInner::new(
208 len,
209 libc::PROT_READ,
210 libc::MAP_SHARED | populate,
211 file,
212 offset,
213 )
214 }
215
216 pub fn map_exec(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
217 let populate = if populate { MAP_POPULATE } else { 0 };
218 MmapInner::new(
219 len,
220 libc::PROT_READ | libc::PROT_EXEC,
221 libc::MAP_SHARED | populate,
222 file,
223 offset,
224 )
225 }
226
227 pub fn map_mut(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
228 let populate = if populate { MAP_POPULATE } else { 0 };
229 MmapInner::new(
230 len,
231 libc::PROT_READ | libc::PROT_WRITE,
232 libc::MAP_SHARED | populate,
233 file,
234 offset,
235 )
236 }
237
238 pub fn map_copy(len: usize, file: RawFd, offset: u64, populate: bool) -> io::Result<MmapInner> {
239 let populate = if populate { MAP_POPULATE } else { 0 };
240 MmapInner::new(
241 len,
242 libc::PROT_READ | libc::PROT_WRITE,
243 libc::MAP_PRIVATE | populate,
244 file,
245 offset,
246 )
247 }
248
249 pub fn map_copy_read_only(
250 len: usize,
251 file: RawFd,
252 offset: u64,
253 populate: bool,
254 ) -> io::Result<MmapInner> {
255 let populate = if populate { MAP_POPULATE } else { 0 };
256 MmapInner::new(
257 len,
258 libc::PROT_READ,
259 libc::MAP_PRIVATE | populate,
260 file,
261 offset,
262 )
263 }
264
265 /// Open an anonymous memory map.
266 pub fn map_anon(len: usize, stack: bool, populate: bool) -> io::Result<MmapInner> {
267 let stack = if stack { MAP_STACK } else { 0 };
268 let populate = if populate { MAP_POPULATE } else { 0 };
269 MmapInner::new(
270 len,
271 libc::PROT_READ | libc::PROT_WRITE,
272 libc::MAP_PRIVATE | libc::MAP_ANON | stack | populate,
273 -1,
274 0,
275 )
276 }
277
278 pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
279 let alignment = (self.ptr as usize + offset) % page_size();
280 let offset = offset as isize - alignment as isize;
281 let len = len + alignment;
282 let result =
283 unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_SYNC) };
284 if result == 0 {
285 Ok(())
286 } else {
287 Err(io::Error::last_os_error())
288 }
289 }
290
291 pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
292 let alignment = (self.ptr as usize + offset) % page_size();
293 let offset = offset as isize - alignment as isize;
294 let len = len + alignment;
295 let result =
296 unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_ASYNC) };
297 if result == 0 {
298 Ok(())
299 } else {
300 Err(io::Error::last_os_error())
301 }
302 }
303
304 fn mprotect(&mut self, prot: libc::c_int) -> io::Result<()> {
305 unsafe {
306 let alignment = self.ptr as usize % page_size();
307 let ptr = self.ptr.offset(-(alignment as isize));
308 let len = self.len + alignment;
309 let len = len.max(1);
310 if libc::mprotect(ptr, len, prot) == 0 {
311 Ok(())
312 } else {
313 Err(io::Error::last_os_error())
314 }
315 }
316 }
317
318 pub fn make_read_only(&mut self) -> io::Result<()> {
319 self.mprotect(libc::PROT_READ)
320 }
321
322 pub fn make_exec(&mut self) -> io::Result<()> {
323 self.mprotect(libc::PROT_READ | libc::PROT_EXEC)
324 }
325
326 pub fn make_mut(&mut self) -> io::Result<()> {
327 self.mprotect(libc::PROT_READ | libc::PROT_WRITE)
328 }
329
330 #[inline]
331 pub fn ptr(&self) -> *const u8 {
332 self.ptr as *const u8
333 }
334
335 #[inline]
336 pub fn mut_ptr(&mut self) -> *mut u8 {
337 self.ptr as *mut u8
338 }
339
340 #[inline]
341 pub fn len(&self) -> usize {
342 self.len
343 }
344
345 pub fn advise(&self, advice: Advice, offset: usize, len: usize) -> io::Result<()> {
346 let alignment = (self.ptr as usize + offset) % page_size();
347 let offset = offset as isize - alignment as isize;
348 let len = len + alignment;
349 unsafe {
350 if libc::madvise(self.ptr.offset(offset), len, advice as i32) != 0 {
351 Err(io::Error::last_os_error())
352 } else {
353 Ok(())
354 }
355 }
356 }
357
358 #[cfg(target_os = "linux")]
359 pub fn remap(&mut self, new_len: usize, options: crate::RemapOptions) -> io::Result<()> {
360 let (old_ptr, old_len, offset) = self.as_mmap_params();
361 let (map_len, offset) = Self::adjust_mmap_params(new_len, offset)?;
362
363 unsafe {
364 let new_ptr = libc::mremap(old_ptr, old_len, map_len, options.into_flags());
365
366 if new_ptr == libc::MAP_FAILED {
367 Err(io::Error::last_os_error())
368 } else {
369 // We explicitly don't drop self since the pointer within is no longer valid.
370 ptr::write(self, Self::from_raw_parts(new_ptr, new_len, offset));
371 Ok(())
372 }
373 }
374 }
375
376 pub fn lock(&self) -> io::Result<()> {
377 unsafe {
378 if libc::mlock(self.ptr, self.len) != 0 {
379 Err(io::Error::last_os_error())
380 } else {
381 Ok(())
382 }
383 }
384 }
385
386 pub fn unlock(&self) -> io::Result<()> {
387 unsafe {
388 if libc::munlock(self.ptr, self.len) != 0 {
389 Err(io::Error::last_os_error())
390 } else {
391 Ok(())
392 }
393 }
394 }
395 }
396
397 impl Drop for MmapInner {
398 fn drop(&mut self) {
399 let (ptr, len, _) = self.as_mmap_params();
400
401 // Any errors during unmapping/closing are ignored as the only way
402 // to report them would be through panicking which is highly discouraged
403 // in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
404 unsafe { libc::munmap(ptr, len as libc::size_t) };
405 }
406 }
407
408 unsafe impl Sync for MmapInner {}
409 unsafe impl Send for MmapInner {}
410
411 fn page_size() -> usize {
412 static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
413
414 match PAGE_SIZE.load(Ordering::Relaxed) {
415 0 => {
416 let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
417
418 PAGE_SIZE.store(page_size, Ordering::Relaxed);
419
420 page_size
421 }
422 page_size => page_size,
423 }
424 }
425
426 pub fn file_len(file: RawFd) -> io::Result<u64> {
427 // SAFETY: We must not close the passed-in fd by dropping the File we create,
428 // we ensure this by immediately wrapping it in a ManuallyDrop.
429 unsafe {
430 let file = ManuallyDrop::new(File::from_raw_fd(file));
431 Ok(file.metadata()?.len())
432 }
433 }