]> git.proxmox.com Git - proxmox-backup.git/blob - src/pxar/create.rs
move 'wait_for_local_worker' from client to server
[proxmox-backup.git] / src / pxar / create.rs
1 use std::collections::{HashSet, HashMap};
2 use std::ffi::{CStr, CString, OsStr};
3 use std::fmt;
4 use std::io::{self, Read, Write};
5 use std::os::unix::ffi::OsStrExt;
6 use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
7 use std::path::{Path, PathBuf};
8 use std::sync::{Arc, Mutex};
9
10 use anyhow::{bail, format_err, Error};
11 use nix::dir::Dir;
12 use nix::errno::Errno;
13 use nix::fcntl::OFlag;
14 use nix::sys::stat::{FileStat, Mode};
15 use futures::future::BoxFuture;
16 use futures::FutureExt;
17
18 use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
19 use pxar::Metadata;
20 use pxar::encoder::{SeqWrite, LinkOffset};
21
22 use proxmox::c_str;
23 use proxmox::sys::error::SysError;
24 use proxmox::tools::fd::RawFdNum;
25 use proxmox::tools::vec;
26
27 use pbs_datastore::catalog::BackupCatalogWriter;
28 use pbs_tools::fs;
29
30 use crate::pxar::metadata::errno_is_unsupported;
31 use crate::pxar::Flags;
32 use crate::pxar::tools::assert_single_path_component;
33 use crate::tools::{acl, xattr, Fd};
34
35 /// Pxar options for creating a pxar archive/stream
36 #[derive(Default, Clone)]
37 pub struct PxarCreateOptions {
38 /// Device/mountpoint st_dev numbers that should be included. None for no limitation.
39 pub device_set: Option<HashSet<u64>>,
40 /// Exclusion patterns
41 pub patterns: Vec<MatchEntry>,
42 /// Maximum number of entries to hold in memory
43 pub entries_max: usize,
44 /// Skip lost+found directory
45 pub skip_lost_and_found: bool,
46 /// Verbose output
47 pub verbose: bool,
48 }
49
50
51 fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
52 let mut fs_stat = std::mem::MaybeUninit::uninit();
53 let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
54 Errno::result(res)?;
55 let fs_stat = unsafe { fs_stat.assume_init() };
56
57 Ok(fs_stat.f_type)
58 }
59
60 #[rustfmt::skip]
61 pub fn is_virtual_file_system(magic: i64) -> bool {
62 use proxmox::sys::linux::magic::*;
63
64 matches!(magic, BINFMTFS_MAGIC |
65 CGROUP2_SUPER_MAGIC |
66 CGROUP_SUPER_MAGIC |
67 CONFIGFS_MAGIC |
68 DEBUGFS_MAGIC |
69 DEVPTS_SUPER_MAGIC |
70 EFIVARFS_MAGIC |
71 FUSE_CTL_SUPER_MAGIC |
72 HUGETLBFS_MAGIC |
73 MQUEUE_MAGIC |
74 NFSD_MAGIC |
75 PROC_SUPER_MAGIC |
76 PSTOREFS_MAGIC |
77 RPCAUTH_GSSMAGIC |
78 SECURITYFS_MAGIC |
79 SELINUX_MAGIC |
80 SMACK_MAGIC |
81 SYSFS_MAGIC)
82 }
83
84 #[derive(Debug)]
85 struct ArchiveError {
86 path: PathBuf,
87 error: Error,
88 }
89
90 impl ArchiveError {
91 fn new(path: PathBuf, error: Error) -> Self {
92 Self { path, error }
93 }
94 }
95
96 impl std::error::Error for ArchiveError {}
97
98 impl fmt::Display for ArchiveError {
99 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
100 write!(f, "error at {:?}: {}", self.path, self.error)
101 }
102 }
103
104 #[derive(Eq, PartialEq, Hash)]
105 struct HardLinkInfo {
106 st_dev: u64,
107 st_ino: u64,
108 }
109
110 /// TODO: make a builder for the create_archive call for fewer parameters and add a method to add a
111 /// logger which does not write to stderr.
112 struct Logger;
113
114 impl std::io::Write for Logger {
115 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
116 std::io::stderr().write(data)
117 }
118
119 fn flush(&mut self) -> io::Result<()> {
120 std::io::stderr().flush()
121 }
122 }
123
124 /// And the error case.
125 struct ErrorReporter;
126
127 impl std::io::Write for ErrorReporter {
128 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
129 std::io::stderr().write(data)
130 }
131
132 fn flush(&mut self) -> io::Result<()> {
133 std::io::stderr().flush()
134 }
135 }
136
137 struct Archiver {
138 feature_flags: Flags,
139 fs_feature_flags: Flags,
140 fs_magic: i64,
141 patterns: Vec<MatchEntry>,
142 callback: Box<dyn FnMut(&Path) -> Result<(), Error> + Send>,
143 catalog: Option<Arc<Mutex<dyn BackupCatalogWriter + Send>>>,
144 path: PathBuf,
145 entry_counter: usize,
146 entry_limit: usize,
147 current_st_dev: libc::dev_t,
148 device_set: Option<HashSet<u64>>,
149 hardlinks: HashMap<HardLinkInfo, (PathBuf, LinkOffset)>,
150 errors: ErrorReporter,
151 logger: Logger,
152 file_copy_buffer: Vec<u8>,
153 }
154
155 type Encoder<'a, T> = pxar::encoder::aio::Encoder<'a, T>;
156
157 pub async fn create_archive<T, F>(
158 source_dir: Dir,
159 mut writer: T,
160 feature_flags: Flags,
161 callback: F,
162 catalog: Option<Arc<Mutex<dyn BackupCatalogWriter + Send>>>,
163 options: PxarCreateOptions,
164 ) -> Result<(), Error>
165 where
166 T: SeqWrite + Send,
167 F: FnMut(&Path) -> Result<(), Error> + Send + 'static,
168 {
169 let fs_magic = detect_fs_type(source_dir.as_raw_fd())?;
170 if is_virtual_file_system(fs_magic) {
171 bail!("refusing to backup a virtual file system");
172 }
173
174 let mut fs_feature_flags = Flags::from_magic(fs_magic);
175
176 let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?;
177 let metadata = get_metadata(
178 source_dir.as_raw_fd(),
179 &stat,
180 feature_flags & fs_feature_flags,
181 fs_magic,
182 &mut fs_feature_flags,
183 )
184 .map_err(|err| format_err!("failed to get metadata for source directory: {}", err))?;
185
186 let mut device_set = options.device_set.clone();
187 if let Some(ref mut set) = device_set {
188 set.insert(stat.st_dev);
189 }
190
191 let mut encoder = Encoder::new(&mut writer, &metadata).await?;
192
193 let mut patterns = options.patterns;
194
195 if options.skip_lost_and_found {
196 patterns.push(MatchEntry::parse_pattern(
197 "lost+found",
198 PatternFlag::PATH_NAME,
199 MatchType::Exclude,
200 )?);
201 }
202
203 let mut archiver = Archiver {
204 feature_flags,
205 fs_feature_flags,
206 fs_magic,
207 callback: Box::new(callback),
208 patterns,
209 catalog,
210 path: PathBuf::new(),
211 entry_counter: 0,
212 entry_limit: options.entries_max,
213 current_st_dev: stat.st_dev,
214 device_set,
215 hardlinks: HashMap::new(),
216 errors: ErrorReporter,
217 logger: Logger,
218 file_copy_buffer: vec::undefined(4 * 1024 * 1024),
219 };
220
221 archiver.archive_dir_contents(&mut encoder, source_dir, true).await?;
222 encoder.finish().await?;
223 Ok(())
224 }
225
226 struct FileListEntry {
227 name: CString,
228 path: PathBuf,
229 stat: FileStat,
230 }
231
232 impl Archiver {
233 /// Get the currently effective feature flags. (Requested flags masked by the file system
234 /// feature flags).
235 fn flags(&self) -> Flags {
236 self.feature_flags & self.fs_feature_flags
237 }
238
239 fn wrap_err(&self, err: Error) -> Error {
240 if err.downcast_ref::<ArchiveError>().is_some() {
241 err
242 } else {
243 ArchiveError::new(self.path.clone(), err).into()
244 }
245 }
246
247 fn archive_dir_contents<'a, 'b, T: SeqWrite + Send>(
248 &'a mut self,
249 encoder: &'a mut Encoder<'b, T>,
250 mut dir: Dir,
251 is_root: bool,
252 ) -> BoxFuture<'a, Result<(), Error>> {
253 async move {
254 let entry_counter = self.entry_counter;
255
256 let old_patterns_count = self.patterns.len();
257 self.read_pxar_excludes(dir.as_raw_fd())?;
258
259 let mut file_list = self.generate_directory_file_list(&mut dir, is_root)?;
260
261 if is_root && old_patterns_count > 0 {
262 file_list.push(FileListEntry {
263 name: CString::new(".pxarexclude-cli").unwrap(),
264 path: PathBuf::new(),
265 stat: unsafe { std::mem::zeroed() },
266 });
267 }
268
269 let dir_fd = dir.as_raw_fd();
270
271 let old_path = std::mem::take(&mut self.path);
272
273 for file_entry in file_list {
274 let file_name = file_entry.name.to_bytes();
275
276 if is_root && file_name == b".pxarexclude-cli" {
277 self.encode_pxarexclude_cli(encoder, &file_entry.name, old_patterns_count).await?;
278 continue;
279 }
280
281 (self.callback)(&file_entry.path)?;
282 self.path = file_entry.path;
283 self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat).await
284 .map_err(|err| self.wrap_err(err))?;
285 }
286 self.path = old_path;
287 self.entry_counter = entry_counter;
288 self.patterns.truncate(old_patterns_count);
289
290 Ok(())
291 }.boxed()
292 }
293
294 /// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`.
295 ///
296 /// The `existed` flag is set when iterating through a directory to note that we know the file
297 /// is supposed to exist and we should warn if it doesnt'.
298 fn open_file(
299 &mut self,
300 parent: RawFd,
301 file_name: &CStr,
302 oflags: OFlag,
303 existed: bool,
304 ) -> Result<Option<Fd>, Error> {
305 // common flags we always want to use:
306 let oflags = oflags | OFlag::O_CLOEXEC | OFlag::O_NOCTTY;
307
308 let mut noatime = OFlag::O_NOATIME;
309 loop {
310 return match Fd::openat(
311 &unsafe { RawFdNum::from_raw_fd(parent) },
312 file_name,
313 oflags | noatime,
314 Mode::empty(),
315 ) {
316 Ok(fd) => Ok(Some(fd)),
317 Err(nix::Error::Sys(Errno::ENOENT)) => {
318 if existed {
319 self.report_vanished_file()?;
320 }
321 Ok(None)
322 }
323 Err(nix::Error::Sys(Errno::EACCES)) => {
324 writeln!(self.errors, "failed to open file: {:?}: access denied", file_name)?;
325 Ok(None)
326 }
327 Err(nix::Error::Sys(Errno::EPERM)) if !noatime.is_empty() => {
328 // Retry without O_NOATIME:
329 noatime = OFlag::empty();
330 continue;
331 }
332 Err(other) => Err(Error::from(other)),
333 }
334 }
335 }
336
337 fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> {
338 let fd = match self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)? {
339 Some(fd) => fd,
340 None => return Ok(()),
341 };
342
343 let old_pattern_count = self.patterns.len();
344
345 let path_bytes = self.path.as_os_str().as_bytes();
346
347 let file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
348
349 use io::BufRead;
350 for line in io::BufReader::new(file).split(b'\n') {
351 let line = match line {
352 Ok(line) => line,
353 Err(err) => {
354 let _ = writeln!(
355 self.errors,
356 "ignoring .pxarexclude after read error in {:?}: {}",
357 self.path,
358 err,
359 );
360 self.patterns.truncate(old_pattern_count);
361 return Ok(());
362 }
363 };
364
365 let line = crate::tools::strip_ascii_whitespace(&line);
366
367 if line.is_empty() || line[0] == b'#' {
368 continue;
369 }
370
371 let mut buf;
372 let (line, mode, anchored) = if line[0] == b'/' {
373 buf = Vec::with_capacity(path_bytes.len() + 1 + line.len());
374 buf.extend(path_bytes);
375 buf.extend(line);
376 (&buf[..], MatchType::Exclude, true)
377 } else if line.starts_with(b"!/") {
378 // inverted case with absolute path
379 buf = Vec::with_capacity(path_bytes.len() + line.len());
380 buf.extend(path_bytes);
381 buf.extend(&line[1..]); // without the '!'
382 (&buf[..], MatchType::Include, true)
383 } else if line.starts_with(b"!") {
384 (&line[1..], MatchType::Include, false)
385 } else {
386 (line, MatchType::Exclude, false)
387 };
388
389 match MatchEntry::parse_pattern(line, PatternFlag::PATH_NAME, mode) {
390 Ok(pattern) => {
391 if anchored {
392 self.patterns.push(pattern.add_flags(MatchFlag::ANCHORED));
393 } else {
394 self.patterns.push(pattern);
395 }
396 }
397 Err(err) => {
398 let _ = writeln!(self.errors, "bad pattern in {:?}: {}", self.path, err);
399 }
400 }
401 }
402
403 Ok(())
404 }
405
406 async fn encode_pxarexclude_cli<T: SeqWrite + Send>(
407 &mut self,
408 encoder: &mut Encoder<'_, T>,
409 file_name: &CStr,
410 patterns_count: usize,
411 ) -> Result<(), Error> {
412 let content = generate_pxar_excludes_cli(&self.patterns[..patterns_count]);
413 if let Some(ref catalog) = self.catalog {
414 catalog.lock().unwrap().add_file(file_name, content.len() as u64, 0)?;
415 }
416
417 let mut metadata = Metadata::default();
418 metadata.stat.mode = pxar::format::mode::IFREG | 0o600;
419
420 let mut file = encoder.create_file(&metadata, ".pxarexclude-cli", content.len() as u64).await?;
421 file.write_all(&content).await?;
422
423 Ok(())
424 }
425
426 fn generate_directory_file_list(
427 &mut self,
428 dir: &mut Dir,
429 is_root: bool,
430 ) -> Result<Vec<FileListEntry>, Error> {
431 let dir_fd = dir.as_raw_fd();
432
433 let mut file_list = Vec::new();
434
435 for file in dir.iter() {
436 let file = file?;
437
438 let file_name = file.file_name().to_owned();
439 let file_name_bytes = file_name.to_bytes();
440 if file_name_bytes == b"." || file_name_bytes == b".." {
441 continue;
442 }
443
444 if is_root && file_name_bytes == b".pxarexclude-cli" {
445 continue;
446 }
447
448 let os_file_name = OsStr::from_bytes(file_name_bytes);
449 assert_single_path_component(os_file_name)?;
450 let full_path = self.path.join(os_file_name);
451
452 let stat = match nix::sys::stat::fstatat(
453 dir_fd,
454 file_name.as_c_str(),
455 nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
456 ) {
457 Ok(stat) => stat,
458 Err(ref err) if err.not_found() => continue,
459 Err(err) => bail!("stat failed on {:?}: {}", full_path, err),
460 };
461
462 let match_path = PathBuf::from("/").join(full_path.clone());
463 if self
464 .patterns
465 .matches(match_path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
466 == Some(MatchType::Exclude)
467 {
468 continue;
469 }
470
471 self.entry_counter += 1;
472 if self.entry_counter > self.entry_limit {
473 bail!("exceeded allowed number of file entries (> {})",self.entry_limit);
474 }
475
476 file_list.push(FileListEntry {
477 name: file_name,
478 path: full_path,
479 stat
480 });
481 }
482
483 file_list.sort_unstable_by(|a, b| a.name.cmp(&b.name));
484
485 Ok(file_list)
486 }
487
488 fn report_vanished_file(&mut self) -> Result<(), Error> {
489 writeln!(self.errors, "warning: file vanished while reading: {:?}", self.path)?;
490 Ok(())
491 }
492
493 fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
494 writeln!(
495 self.errors,
496 "warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
497 self.path,
498 )?;
499 Ok(())
500 }
501
502 fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
503 writeln!(
504 self.errors,
505 "warning: file size increased while reading: {:?}, file will be truncated!",
506 self.path,
507 )?;
508 Ok(())
509 }
510
511 async fn add_entry<T: SeqWrite + Send>(
512 &mut self,
513 encoder: &mut Encoder<'_, T>,
514 parent: RawFd,
515 c_file_name: &CStr,
516 stat: &FileStat,
517 ) -> Result<(), Error> {
518 use pxar::format::mode;
519
520 let file_mode = stat.st_mode & libc::S_IFMT;
521 let open_mode = if file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR {
522 OFlag::empty()
523 } else {
524 OFlag::O_PATH
525 };
526
527 let fd = self.open_file(
528 parent,
529 c_file_name,
530 open_mode | OFlag::O_RDONLY | OFlag::O_NOFOLLOW,
531 true,
532 )?;
533
534 let fd = match fd {
535 Some(fd) => fd,
536 None => return Ok(()),
537 };
538
539 let metadata = get_metadata(fd.as_raw_fd(), &stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags)?;
540
541 if self
542 .patterns
543 .matches(self.path.as_os_str().as_bytes(), Some(stat.st_mode as u32))
544 == Some(MatchType::Exclude)
545 {
546 return Ok(());
547 }
548
549 let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
550 match metadata.file_type() {
551 mode::IFREG => {
552 let link_info = HardLinkInfo {
553 st_dev: stat.st_dev,
554 st_ino: stat.st_ino,
555 };
556
557 if stat.st_nlink > 1 {
558 if let Some((path, offset)) = self.hardlinks.get(&link_info) {
559 if let Some(ref catalog) = self.catalog {
560 catalog.lock().unwrap().add_hardlink(c_file_name)?;
561 }
562
563 encoder.add_hardlink(file_name, path, *offset).await?;
564
565 return Ok(());
566 }
567 }
568
569 let file_size = stat.st_size as u64;
570 if let Some(ref catalog) = self.catalog {
571 catalog.lock().unwrap().add_file(c_file_name, file_size, stat.st_mtime)?;
572 }
573
574 let offset: LinkOffset =
575 self.add_regular_file(encoder, fd, file_name, &metadata, file_size).await?;
576
577 if stat.st_nlink > 1 {
578 self.hardlinks.insert(link_info, (self.path.clone(), offset));
579 }
580
581 Ok(())
582 }
583 mode::IFDIR => {
584 let dir = Dir::from_fd(fd.into_raw_fd())?;
585
586 if let Some(ref catalog) = self.catalog {
587 catalog.lock().unwrap().start_directory(c_file_name)?;
588 }
589 let result = self.add_directory(encoder, dir, c_file_name, &metadata, stat).await;
590 if let Some(ref catalog) = self.catalog {
591 catalog.lock().unwrap().end_directory()?;
592 }
593 result
594 }
595 mode::IFSOCK => {
596 if let Some(ref catalog) = self.catalog {
597 catalog.lock().unwrap().add_socket(c_file_name)?;
598 }
599
600 Ok(encoder.add_socket(&metadata, file_name).await?)
601 }
602 mode::IFIFO => {
603 if let Some(ref catalog) = self.catalog {
604 catalog.lock().unwrap().add_fifo(c_file_name)?;
605 }
606
607 Ok(encoder.add_fifo(&metadata, file_name).await?)
608 }
609 mode::IFLNK => {
610 if let Some(ref catalog) = self.catalog {
611 catalog.lock().unwrap().add_symlink(c_file_name)?;
612 }
613
614 self.add_symlink(encoder, fd, file_name, &metadata).await
615 }
616 mode::IFBLK => {
617 if let Some(ref catalog) = self.catalog {
618 catalog.lock().unwrap().add_block_device(c_file_name)?;
619 }
620
621 self.add_device(encoder, file_name, &metadata, &stat).await
622 }
623 mode::IFCHR => {
624 if let Some(ref catalog) = self.catalog {
625 catalog.lock().unwrap().add_char_device(c_file_name)?;
626 }
627
628 self.add_device(encoder, file_name, &metadata, &stat).await
629 }
630 other => bail!(
631 "encountered unknown file type: 0x{:x} (0o{:o})",
632 other,
633 other
634 ),
635 }
636 }
637
638 async fn add_directory<T: SeqWrite + Send>(
639 &mut self,
640 encoder: &mut Encoder<'_, T>,
641 dir: Dir,
642 dir_name: &CStr,
643 metadata: &Metadata,
644 stat: &FileStat,
645 ) -> Result<(), Error> {
646 let dir_name = OsStr::from_bytes(dir_name.to_bytes());
647
648 let mut encoder = encoder.create_directory(dir_name, &metadata).await?;
649
650 let old_fs_magic = self.fs_magic;
651 let old_fs_feature_flags = self.fs_feature_flags;
652 let old_st_dev = self.current_st_dev;
653
654 let mut skip_contents = false;
655 if old_st_dev != stat.st_dev {
656 self.fs_magic = detect_fs_type(dir.as_raw_fd())?;
657 self.fs_feature_flags = Flags::from_magic(self.fs_magic);
658 self.current_st_dev = stat.st_dev;
659
660 if is_virtual_file_system(self.fs_magic) {
661 skip_contents = true;
662 } else if let Some(set) = &self.device_set {
663 skip_contents = !set.contains(&stat.st_dev);
664 }
665 }
666
667 let result = if skip_contents {
668 writeln!(self.logger, "skipping mount point: {:?}", self.path)?;
669 Ok(())
670 } else {
671 self.archive_dir_contents(&mut encoder, dir, false).await
672 };
673
674 self.fs_magic = old_fs_magic;
675 self.fs_feature_flags = old_fs_feature_flags;
676 self.current_st_dev = old_st_dev;
677
678 encoder.finish().await?;
679 result
680 }
681
682 async fn add_regular_file<T: SeqWrite + Send>(
683 &mut self,
684 encoder: &mut Encoder<'_, T>,
685 fd: Fd,
686 file_name: &Path,
687 metadata: &Metadata,
688 file_size: u64,
689 ) -> Result<LinkOffset, Error> {
690 let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
691 let mut remaining = file_size;
692 let mut out = encoder.create_file(metadata, file_name, file_size).await?;
693 while remaining != 0 {
694 let mut got = match file.read(&mut self.file_copy_buffer[..]) {
695 Ok(0) => break,
696 Ok(got) => got,
697 Err(err) if err.kind() == std::io::ErrorKind::Interrupted => continue,
698 Err(err) => bail!(err),
699 };
700 if got as u64 > remaining {
701 self.report_file_grew_while_reading()?;
702 got = remaining as usize;
703 }
704 out.write_all(&self.file_copy_buffer[..got]).await?;
705 remaining -= got as u64;
706 }
707 if remaining > 0 {
708 self.report_file_shrunk_while_reading()?;
709 let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
710 vec::clear(&mut self.file_copy_buffer[..to_zero]);
711 while remaining != 0 {
712 let fill = remaining.min(self.file_copy_buffer.len() as u64) as usize;
713 out.write_all(&self.file_copy_buffer[..fill]).await?;
714 remaining -= fill as u64;
715 }
716 }
717
718 Ok(out.file_offset())
719 }
720
721 async fn add_symlink<T: SeqWrite + Send>(
722 &mut self,
723 encoder: &mut Encoder<'_, T>,
724 fd: Fd,
725 file_name: &Path,
726 metadata: &Metadata,
727 ) -> Result<(), Error> {
728 let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?;
729 encoder.add_symlink(metadata, file_name, dest).await?;
730 Ok(())
731 }
732
733 async fn add_device<T: SeqWrite + Send>(
734 &mut self,
735 encoder: &mut Encoder<'_, T>,
736 file_name: &Path,
737 metadata: &Metadata,
738 stat: &FileStat,
739 ) -> Result<(), Error> {
740 Ok(encoder.add_device(
741 metadata,
742 file_name,
743 pxar::format::Device::from_dev_t(stat.st_rdev),
744 ).await?)
745 }
746 }
747
748 fn get_metadata(fd: RawFd, stat: &FileStat, flags: Flags, fs_magic: i64, fs_feature_flags: &mut Flags) -> Result<Metadata, Error> {
749 // required for some of these
750 let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
751
752 let mut meta = Metadata {
753 stat: pxar::Stat {
754 mode: u64::from(stat.st_mode),
755 flags: 0,
756 uid: stat.st_uid,
757 gid: stat.st_gid,
758 mtime: pxar::format::StatxTimestamp::new(stat.st_mtime, stat.st_mtime_nsec as u32),
759 },
760 ..Default::default()
761 };
762
763 get_xattr_fcaps_acl(&mut meta, fd, &proc_path, flags, fs_feature_flags)?;
764 get_chattr(&mut meta, fd)?;
765 get_fat_attr(&mut meta, fd, fs_magic)?;
766 get_quota_project_id(&mut meta, fd, flags, fs_magic)?;
767 Ok(meta)
768 }
769
770 fn get_fcaps(meta: &mut Metadata, fd: RawFd, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
771 if !flags.contains(Flags::WITH_FCAPS) {
772 return Ok(());
773 }
774
775 match xattr::fgetxattr(fd, xattr::xattr_name_fcaps()) {
776 Ok(data) => {
777 meta.fcaps = Some(pxar::format::FCaps { data });
778 Ok(())
779 }
780 Err(Errno::ENODATA) => Ok(()),
781 Err(Errno::EOPNOTSUPP) => {
782 fs_feature_flags.remove(Flags::WITH_FCAPS);
783 Ok(())
784 }
785 Err(Errno::EBADF) => Ok(()), // symlinks
786 Err(err) => bail!("failed to read file capabilities: {}", err),
787 }
788 }
789
790 fn get_xattr_fcaps_acl(
791 meta: &mut Metadata,
792 fd: RawFd,
793 proc_path: &Path,
794 flags: Flags,
795 fs_feature_flags: &mut Flags,
796 ) -> Result<(), Error> {
797 if !flags.contains(Flags::WITH_XATTRS) {
798 return Ok(());
799 }
800
801 let xattrs = match xattr::flistxattr(fd) {
802 Ok(names) => names,
803 Err(Errno::EOPNOTSUPP) => {
804 fs_feature_flags.remove(Flags::WITH_XATTRS);
805 return Ok(());
806 },
807 Err(Errno::EBADF) => return Ok(()), // symlinks
808 Err(err) => bail!("failed to read xattrs: {}", err),
809 };
810
811 for attr in &xattrs {
812 if xattr::is_security_capability(&attr) {
813 get_fcaps(meta, fd, flags, fs_feature_flags)?;
814 continue;
815 }
816
817 if xattr::is_acl(&attr) {
818 get_acl(meta, proc_path, flags, fs_feature_flags)?;
819 continue;
820 }
821
822 if !xattr::is_valid_xattr_name(&attr) {
823 continue;
824 }
825
826 match xattr::fgetxattr(fd, attr) {
827 Ok(data) => meta
828 .xattrs
829 .push(pxar::format::XAttr::new(attr.to_bytes(), data)),
830 Err(Errno::ENODATA) => (), // it got removed while we were iterating...
831 Err(Errno::EOPNOTSUPP) => (), // shouldn't be possible so just ignore this
832 Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either
833 Err(err) => bail!("error reading extended attribute {:?}: {}", attr, err),
834 }
835 }
836
837 Ok(())
838 }
839
840 fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
841 let mut attr: libc::c_long = 0;
842
843 match unsafe { fs::read_attr_fd(fd, &mut attr) } {
844 Ok(_) => (),
845 Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
846 return Ok(());
847 }
848 Err(err) => bail!("failed to read file attributes: {}", err),
849 }
850
851 metadata.stat.flags |= Flags::from_chattr(attr).bits();
852
853 Ok(())
854 }
855
856 fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> {
857 use proxmox::sys::linux::magic::*;
858
859 if fs_magic != MSDOS_SUPER_MAGIC && fs_magic != FUSE_SUPER_MAGIC {
860 return Ok(());
861 }
862
863 let mut attr: u32 = 0;
864
865 match unsafe { fs::read_fat_attr_fd(fd, &mut attr) } {
866 Ok(_) => (),
867 Err(nix::Error::Sys(errno)) if errno_is_unsupported(errno) => {
868 return Ok(());
869 }
870 Err(err) => bail!("failed to read fat attributes: {}", err),
871 }
872
873 metadata.stat.flags |= Flags::from_fat_attr(attr).bits();
874
875 Ok(())
876 }
877
878 /// Read the quota project id for an inode, supported on ext4/XFS/FUSE/ZFS filesystems
879 fn get_quota_project_id(
880 metadata: &mut Metadata,
881 fd: RawFd,
882 flags: Flags,
883 magic: i64,
884 ) -> Result<(), Error> {
885 if !(metadata.is_dir() || metadata.is_regular_file()) {
886 return Ok(());
887 }
888
889 if !flags.contains(Flags::WITH_QUOTA_PROJID) {
890 return Ok(());
891 }
892
893 use proxmox::sys::linux::magic::*;
894
895 match magic {
896 EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (),
897 _ => return Ok(()),
898 }
899
900 let mut fsxattr = fs::FSXAttr::default();
901 let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) };
902
903 // On some FUSE filesystems it can happen that ioctl is not supported.
904 // For these cases projid is set to 0 while the error is ignored.
905 if let Err(err) = res {
906 let errno = err
907 .as_errno()
908 .ok_or_else(|| format_err!("error while reading quota project id"))?;
909 if errno_is_unsupported(errno) {
910 return Ok(());
911 } else {
912 bail!("error while reading quota project id ({})", errno);
913 }
914 }
915
916 let projid = fsxattr.fsx_projid as u64;
917 if projid != 0 {
918 metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid });
919 }
920 Ok(())
921 }
922
923 fn get_acl(metadata: &mut Metadata, proc_path: &Path, flags: Flags, fs_feature_flags: &mut Flags) -> Result<(), Error> {
924 if !flags.contains(Flags::WITH_ACL) {
925 return Ok(());
926 }
927
928 if metadata.is_symlink() {
929 return Ok(());
930 }
931
932 get_acl_do(metadata, proc_path, acl::ACL_TYPE_ACCESS, fs_feature_flags)?;
933
934 if metadata.is_dir() {
935 get_acl_do(metadata, proc_path, acl::ACL_TYPE_DEFAULT, fs_feature_flags)?;
936 }
937
938 Ok(())
939 }
940
941 fn get_acl_do(
942 metadata: &mut Metadata,
943 proc_path: &Path,
944 acl_type: acl::ACLType,
945 fs_feature_flags: &mut Flags,
946 ) -> Result<(), Error> {
947 // In order to be able to get ACLs with type ACL_TYPE_DEFAULT, we have
948 // to create a path for acl_get_file(). acl_get_fd() only allows to get
949 // ACL_TYPE_ACCESS attributes.
950 let acl = match acl::ACL::get_file(&proc_path, acl_type) {
951 Ok(acl) => acl,
952 // Don't bail if underlying endpoint does not support acls
953 Err(Errno::EOPNOTSUPP) => {
954 fs_feature_flags.remove(Flags::WITH_ACL);
955 return Ok(());
956 }
957 // Don't bail if the endpoint cannot carry acls
958 Err(Errno::EBADF) => return Ok(()),
959 // Don't bail if there is no data
960 Err(Errno::ENODATA) => return Ok(()),
961 Err(err) => bail!("error while reading ACL - {}", err),
962 };
963
964 process_acl(metadata, acl, acl_type)
965 }
966
967 fn process_acl(
968 metadata: &mut Metadata,
969 acl: acl::ACL,
970 acl_type: acl::ACLType,
971 ) -> Result<(), Error> {
972 use pxar::format::acl as pxar_acl;
973 use pxar::format::acl::{Group, GroupObject, Permissions, User};
974
975 let mut acl_user = Vec::new();
976 let mut acl_group = Vec::new();
977 let mut acl_group_obj = None;
978 let mut acl_default = None;
979 let mut user_obj_permissions = None;
980 let mut group_obj_permissions = None;
981 let mut other_permissions = None;
982 let mut mask_permissions = None;
983
984 for entry in &mut acl.entries() {
985 let tag = entry.get_tag_type()?;
986 let permissions = entry.get_permissions()?;
987 match tag {
988 acl::ACL_USER_OBJ => user_obj_permissions = Some(Permissions(permissions)),
989 acl::ACL_GROUP_OBJ => group_obj_permissions = Some(Permissions(permissions)),
990 acl::ACL_OTHER => other_permissions = Some(Permissions(permissions)),
991 acl::ACL_MASK => mask_permissions = Some(Permissions(permissions)),
992 acl::ACL_USER => {
993 acl_user.push(User {
994 uid: entry.get_qualifier()?,
995 permissions: Permissions(permissions),
996 });
997 }
998 acl::ACL_GROUP => {
999 acl_group.push(Group {
1000 gid: entry.get_qualifier()?,
1001 permissions: Permissions(permissions),
1002 });
1003 }
1004 _ => bail!("Unexpected ACL tag encountered!"),
1005 }
1006 }
1007
1008 acl_user.sort();
1009 acl_group.sort();
1010
1011 match acl_type {
1012 acl::ACL_TYPE_ACCESS => {
1013 // The mask permissions are mapped to the stat group permissions
1014 // in case that the ACL group permissions were set.
1015 // Only in that case we need to store the group permissions,
1016 // in the other cases they are identical to the stat group permissions.
1017 if let (Some(gop), true) = (group_obj_permissions, mask_permissions.is_some()) {
1018 acl_group_obj = Some(GroupObject { permissions: gop });
1019 }
1020
1021 metadata.acl.users = acl_user;
1022 metadata.acl.groups = acl_group;
1023 metadata.acl.group_obj = acl_group_obj;
1024 }
1025 acl::ACL_TYPE_DEFAULT => {
1026 if user_obj_permissions != None
1027 || group_obj_permissions != None
1028 || other_permissions != None
1029 || mask_permissions != None
1030 {
1031 acl_default = Some(pxar_acl::Default {
1032 // The value is set to UINT64_MAX as placeholder if one
1033 // of the permissions is not set
1034 user_obj_permissions: user_obj_permissions.unwrap_or(Permissions::NO_MASK),
1035 group_obj_permissions: group_obj_permissions.unwrap_or(Permissions::NO_MASK),
1036 other_permissions: other_permissions.unwrap_or(Permissions::NO_MASK),
1037 mask_permissions: mask_permissions.unwrap_or(Permissions::NO_MASK),
1038 });
1039 }
1040
1041 metadata.acl.default_users = acl_user;
1042 metadata.acl.default_groups = acl_group;
1043 metadata.acl.default = acl_default;
1044 }
1045 _ => bail!("Unexpected ACL type encountered"),
1046 }
1047
1048 Ok(())
1049 }
1050
1051 /// Note that our pattern lists are "positive". `MatchType::Include` means the file is included.
1052 /// Since we are generating an *exclude* list, we need to invert this, so includes get a `'!'`
1053 /// prefix.
1054 fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec<u8> {
1055 use pathpatterns::MatchPattern;
1056
1057 let mut content = Vec::new();
1058
1059 for pattern in patterns {
1060 match pattern.match_type() {
1061 MatchType::Include => content.push(b'!'),
1062 MatchType::Exclude => (),
1063 }
1064
1065 match pattern.pattern() {
1066 MatchPattern::Literal(lit) => content.extend(lit),
1067 MatchPattern::Pattern(pat) => content.extend(pat.pattern().to_bytes()),
1068 }
1069
1070 if pattern.match_flags() == MatchFlag::MATCH_DIRECTORIES && content.last() != Some(&b'/') {
1071 content.push(b'/');
1072 }
1073
1074 content.push(b'\n');
1075 }
1076
1077 content
1078 }