]> git.proxmox.com Git - proxmox-backup.git/blame - pbs-datastore/src/datastore.rs
datastore: add snapshot iterator and provide example
[proxmox-backup.git] / pbs-datastore / src / datastore.rs
CommitLineData
42c2b5be
TL
1use std::collections::{HashMap, HashSet};
2use std::convert::TryFrom;
54552dda 3use std::io::{self, Write};
367f002e 4use std::path::{Path, PathBuf};
cb4b721c 5use std::str::FromStr;
42c2b5be 6use std::sync::{Arc, Mutex};
1a374fcf 7use std::time::Duration;
367f002e 8
f7d4e4b5 9use anyhow::{bail, format_err, Error};
2c32fdde 10use lazy_static::lazy_static;
e4439025 11
fef61684
DC
12use proxmox_schema::ApiType;
13
42c2b5be
TL
14use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
15use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
d5790a9f 16use proxmox_sys::process_locker::ProcessLockSharedGuard;
25877d05 17use proxmox_sys::WorkerTaskContext;
d5790a9f 18use proxmox_sys::{task_log, task_warn};
529de6c7 19
fef61684 20use pbs_api_types::{
42c2b5be 21 Authid, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus, HumanByte,
33eb23d5 22 Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, BACKUP_TYPE_REGEX, UPID,
fef61684 23};
118deb4d 24use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
529de6c7 25
42c2b5be 26use crate::backup_info::{BackupDir, BackupGroup};
6d5d305d
DM
27use crate::chunk_store::ChunkStore;
28use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
29use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
30use crate::index::IndexFile;
31use crate::manifest::{
42c2b5be
TL
32 archive_type, ArchiveType, BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
33 MANIFEST_LOCK_NAME,
6d5d305d 34};
4bc84a65 35use crate::task_tracking::update_active_operations;
42c2b5be 36use crate::DataBlob;
6d5d305d 37
367f002e 38lazy_static! {
42c2b5be
TL
39 static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStoreImpl>>> =
40 Mutex::new(HashMap::new());
b3483782 41}
ff3d3100 42
9751ef4b
DC
43/// checks if auth_id is owner, or, if owner is a token, if
44/// auth_id is the user of the token
42c2b5be
TL
45pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> {
46 let correct_owner =
47 owner == auth_id || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
9751ef4b
DC
48 if !correct_owner {
49 bail!("backup owner check failed ({} != {})", auth_id, owner);
50 }
51 Ok(())
52}
53
e5064ba6
DM
54/// Datastore Management
55///
56/// A Datastore can store severals backups, and provides the
57/// management interface for backup.
4bc84a65 58pub struct DataStoreImpl {
1629d2ad 59 chunk_store: Arc<ChunkStore>,
81b2a872 60 gc_mutex: Mutex<()>,
f2b99c34 61 last_gc_status: Mutex<GarbageCollectionStatus>,
0698f78d 62 verify_new: bool,
fef61684 63 chunk_order: ChunkOrder,
118deb4d
DC
64 last_generation: usize,
65 last_update: i64,
529de6c7
DM
66}
67
4bc84a65
HL
68pub struct DataStore {
69 inner: Arc<DataStoreImpl>,
70 operation: Option<Operation>,
71}
72
73impl Clone for DataStore {
74 fn clone(&self) -> Self {
75 let mut new_operation = self.operation;
76 if let Some(operation) = self.operation {
77 if let Err(e) = update_active_operations(self.name(), operation, 1) {
78 log::error!("could not update active operations - {}", e);
79 new_operation = None;
80 }
81 }
82
83 DataStore {
84 inner: self.inner.clone(),
85 operation: new_operation,
86 }
87 }
88}
89
90impl Drop for DataStore {
91 fn drop(&mut self) {
92 if let Some(operation) = self.operation {
93 if let Err(e) = update_active_operations(self.name(), operation, -1) {
94 log::error!("could not update active operations - {}", e);
95 }
96 }
97 }
98}
99
529de6c7 100impl DataStore {
e9d2fc93
HL
101 pub fn lookup_datastore(
102 name: &str,
103 operation: Option<Operation>,
104 ) -> Result<Arc<DataStore>, Error> {
118deb4d
DC
105 let version_cache = ConfigVersionCache::new()?;
106 let generation = version_cache.datastore_generation();
107 let now = proxmox_time::epoch_i64();
2c32fdde 108
e9d2fc93
HL
109 let (config, _digest) = pbs_config::datastore::config()?;
110 let config: DataStoreConfig = config.lookup("datastore", name)?;
111 let path = PathBuf::from(&config.path);
112
113 if let Some(maintenance_mode) = config.get_maintenance_mode() {
114 if let Err(error) = maintenance_mode.check(operation) {
115 bail!("datastore '{}' is in {}", name, error);
116 }
117 }
118
4bc84a65
HL
119 if let Some(operation) = operation {
120 update_active_operations(name, operation, 1)?;
121 }
122
515688d1 123 let mut map = DATASTORE_MAP.lock().unwrap();
118deb4d 124 let entry = map.get(name);
2c32fdde 125
118deb4d
DC
126 if let Some(datastore) = &entry {
127 if datastore.last_generation == generation && now < (datastore.last_update + 60) {
4bc84a65
HL
128 return Ok(Arc::new(Self {
129 inner: Arc::clone(datastore),
130 operation,
131 }));
2c32fdde
DM
132 }
133 }
134
118deb4d 135 let datastore = DataStore::open_with_path(name, &path, config, generation, now)?;
f0a61124
DM
136
137 let datastore = Arc::new(datastore);
138 map.insert(name.to_string(), datastore.clone());
2c32fdde 139
4bc84a65
HL
140 Ok(Arc::new(Self {
141 inner: datastore,
142 operation,
143 }))
2c32fdde
DM
144 }
145
062cf75c 146 /// removes all datastores that are not configured anymore
42c2b5be 147 pub fn remove_unused_datastores() -> Result<(), Error> {
e7d4be9d 148 let (config, _digest) = pbs_config::datastore::config()?;
062cf75c
DC
149
150 let mut map = DATASTORE_MAP.lock().unwrap();
151 // removes all elements that are not in the config
42c2b5be 152 map.retain(|key, _| config.sections.contains_key(key));
062cf75c
DC
153 Ok(())
154 }
155
118deb4d
DC
156 fn open_with_path(
157 store_name: &str,
158 path: &Path,
159 config: DataStoreConfig,
160 last_generation: usize,
161 last_update: i64,
4bc84a65 162 ) -> Result<DataStoreImpl, Error> {
277fc5a3 163 let chunk_store = ChunkStore::open(store_name, path)?;
529de6c7 164
b683fd58
DC
165 let mut gc_status_path = chunk_store.base_path();
166 gc_status_path.push(".gc-status");
167
168 let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
169 match serde_json::from_str(&state) {
170 Ok(state) => state,
171 Err(err) => {
172 eprintln!("error reading gc-status: {}", err);
173 GarbageCollectionStatus::default()
174 }
175 }
176 } else {
177 GarbageCollectionStatus::default()
178 };
f2b99c34 179
fef61684 180 let tuning: DatastoreTuning = serde_json::from_value(
42c2b5be
TL
181 DatastoreTuning::API_SCHEMA
182 .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
fef61684
DC
183 )?;
184 let chunk_order = tuning.chunk_order.unwrap_or(ChunkOrder::Inode);
185
4bc84a65 186 Ok(DataStoreImpl {
1629d2ad 187 chunk_store: Arc::new(chunk_store),
81b2a872 188 gc_mutex: Mutex::new(()),
f2b99c34 189 last_gc_status: Mutex::new(gc_status),
0698f78d 190 verify_new: config.verify_new.unwrap_or(false),
fef61684 191 chunk_order,
118deb4d
DC
192 last_generation,
193 last_update,
529de6c7
DM
194 })
195 }
196
d59397e6
WB
197 pub fn get_chunk_iterator(
198 &self,
199 ) -> Result<
25877d05 200 impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)>,
42c2b5be 201 Error,
d59397e6 202 > {
4bc84a65 203 self.inner.chunk_store.get_chunk_iterator()
d59397e6
WB
204 }
205
42c2b5be
TL
206 pub fn create_fixed_writer<P: AsRef<Path>>(
207 &self,
208 filename: P,
209 size: usize,
210 chunk_size: usize,
211 ) -> Result<FixedIndexWriter, Error> {
212 let index = FixedIndexWriter::create(
213 self.inner.chunk_store.clone(),
214 filename.as_ref(),
215 size,
216 chunk_size,
217 )?;
529de6c7
DM
218
219 Ok(index)
220 }
221
42c2b5be
TL
222 pub fn open_fixed_reader<P: AsRef<Path>>(
223 &self,
224 filename: P,
225 ) -> Result<FixedIndexReader, Error> {
226 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
a7c72ad9
DM
227
228 let index = FixedIndexReader::open(&full_path)?;
529de6c7
DM
229
230 Ok(index)
231 }
3d5c11e5 232
93d5d779 233 pub fn create_dynamic_writer<P: AsRef<Path>>(
42c2b5be
TL
234 &self,
235 filename: P,
93d5d779 236 ) -> Result<DynamicIndexWriter, Error> {
42c2b5be 237 let index = DynamicIndexWriter::create(self.inner.chunk_store.clone(), filename.as_ref())?;
0433db19
DM
238
239 Ok(index)
240 }
ff3d3100 241
42c2b5be
TL
242 pub fn open_dynamic_reader<P: AsRef<Path>>(
243 &self,
244 filename: P,
245 ) -> Result<DynamicIndexReader, Error> {
246 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
d48a9955
DM
247
248 let index = DynamicIndexReader::open(&full_path)?;
77703d95
DM
249
250 Ok(index)
251 }
252
5de2bced
WB
253 pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
254 where
255 P: AsRef<Path>,
256 {
257 let filename = filename.as_ref();
42c2b5be
TL
258 let out: Box<dyn IndexFile + Send> = match archive_type(filename)? {
259 ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?),
260 ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?),
261 _ => bail!("cannot open index file of unknown type: {:?}", filename),
262 };
5de2bced
WB
263 Ok(out)
264 }
265
1369bcdb 266 /// Fast index verification - only check if chunks exists
28570d19
DM
267 pub fn fast_index_verification(
268 &self,
269 index: &dyn IndexFile,
42c2b5be 270 checked: &mut HashSet<[u8; 32]>,
28570d19 271 ) -> Result<(), Error> {
1369bcdb
DM
272 for pos in 0..index.index_count() {
273 let info = index.chunk_info(pos).unwrap();
28570d19
DM
274 if checked.contains(&info.digest) {
275 continue;
276 }
277
42c2b5be
TL
278 self.stat_chunk(&info.digest).map_err(|err| {
279 format_err!(
280 "fast_index_verification error, stat_chunk {} failed - {}",
281 hex::encode(&info.digest),
282 err,
283 )
284 })?;
28570d19
DM
285
286 checked.insert(info.digest);
1369bcdb
DM
287 }
288
289 Ok(())
290 }
291
60f9a6ea 292 pub fn name(&self) -> &str {
4bc84a65 293 self.inner.chunk_store.name()
60f9a6ea
DM
294 }
295
ff3d3100 296 pub fn base_path(&self) -> PathBuf {
4bc84a65 297 self.inner.chunk_store.base_path()
ff3d3100
DM
298 }
299
c47e294e 300 /// Cleanup a backup directory
7759eef5
DM
301 ///
302 /// Removes all files not mentioned in the manifest.
42c2b5be
TL
303 pub fn cleanup_backup_dir(
304 &self,
305 backup_dir: &BackupDir,
306 manifest: &BackupManifest,
307 ) -> Result<(), Error> {
7759eef5
DM
308 let mut full_path = self.base_path();
309 full_path.push(backup_dir.relative_path());
310
311 let mut wanted_files = HashSet::new();
312 wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
1610c45a 313 wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
42c2b5be
TL
314 manifest.files().iter().for_each(|item| {
315 wanted_files.insert(item.filename.clone());
316 });
7759eef5 317
540fca5c
FG
318 for item in proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &full_path)?.flatten() {
319 if let Some(file_type) = item.file_type() {
42c2b5be
TL
320 if file_type != nix::dir::Type::File {
321 continue;
322 }
540fca5c
FG
323 }
324 let file_name = item.file_name().to_bytes();
42c2b5be
TL
325 if file_name == b"." || file_name == b".." {
326 continue;
327 };
540fca5c 328 if let Ok(name) = std::str::from_utf8(file_name) {
42c2b5be
TL
329 if wanted_files.contains(name) {
330 continue;
331 }
7759eef5 332 }
540fca5c
FG
333 println!("remove unused file {:?}", item.file_name());
334 let dirfd = item.parent_fd();
335 let _res = unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) };
7759eef5
DM
336 }
337
338 Ok(())
339 }
4b4eba0b 340
41b373ec
DM
341 /// Returns the absolute path for a backup_group
342 pub fn group_path(&self, backup_group: &BackupGroup) -> PathBuf {
4b4eba0b
DM
343 let mut full_path = self.base_path();
344 full_path.push(backup_group.group_path());
41b373ec
DM
345 full_path
346 }
347
348 /// Returns the absolute path for backup_dir
349 pub fn snapshot_path(&self, backup_dir: &BackupDir) -> PathBuf {
350 let mut full_path = self.base_path();
351 full_path.push(backup_dir.relative_path());
352 full_path
353 }
354
de91418b
DC
355 /// Remove a complete backup group including all snapshots, returns true
356 /// if all snapshots were removed, and false if some were protected
42c2b5be 357 pub fn remove_backup_group(&self, backup_group: &BackupGroup) -> Result<bool, Error> {
41b373ec 358 let full_path = self.group_path(backup_group);
4b4eba0b 359
42c2b5be
TL
360 let _guard = proxmox_sys::fs::lock_dir_noblock(
361 &full_path,
362 "backup group",
363 "possible running backup",
364 )?;
c9756b40 365
4b4eba0b 366 log::info!("removing backup group {:?}", full_path);
4c0ae82e 367
de91418b
DC
368 let mut removed_all = true;
369
4c0ae82e
SR
370 // remove all individual backup dirs first to ensure nothing is using them
371 for snap in backup_group.list_backups(&self.base_path())? {
de91418b
DC
372 if snap.backup_dir.is_protected(self.base_path()) {
373 removed_all = false;
374 continue;
375 }
4c0ae82e
SR
376 self.remove_backup_dir(&snap.backup_dir, false)?;
377 }
378
de91418b
DC
379 if removed_all {
380 // no snapshots left, we can now safely remove the empty folder
42c2b5be
TL
381 std::fs::remove_dir_all(&full_path).map_err(|err| {
382 format_err!(
383 "removing backup group directory {:?} failed - {}",
384 full_path,
385 err,
386 )
387 })?;
de91418b 388 }
4b4eba0b 389
de91418b 390 Ok(removed_all)
4b4eba0b
DM
391 }
392
8f579717 393 /// Remove a backup directory including all content
42c2b5be 394 pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
41b373ec 395 let full_path = self.snapshot_path(backup_dir);
8f579717 396
1a374fcf 397 let (_guard, _manifest_guard);
c9756b40 398 if !force {
238a872d 399 _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
6bd0a00c 400 _manifest_guard = self.lock_manifest(backup_dir)?;
c9756b40
SR
401 }
402
de91418b
DC
403 if backup_dir.is_protected(self.base_path()) {
404 bail!("cannot remove protected snapshot");
405 }
406
8a1d68c8 407 log::info!("removing backup snapshot {:?}", full_path);
42c2b5be
TL
408 std::fs::remove_dir_all(&full_path).map_err(|err| {
409 format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
410 })?;
8f579717 411
179145dc
DC
412 // the manifest does not exists anymore, we do not need to keep the lock
413 if let Ok(path) = self.manifest_lock_path(backup_dir) {
414 // ignore errors
415 let _ = std::fs::remove_file(path);
416 }
417
8f579717
DM
418 Ok(())
419 }
420
41b373ec
DM
421 /// Returns the time of the last successful backup
422 ///
423 /// Or None if there is no backup in the group (or the group dir does not exist).
6a7be83e 424 pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
41b373ec
DM
425 let base_path = self.base_path();
426 let mut group_path = base_path.clone();
427 group_path.push(backup_group.group_path());
428
429 if group_path.exists() {
430 backup_group.last_successful_backup(&base_path)
431 } else {
432 Ok(None)
433 }
434 }
435
54552dda
DM
436 /// Returns the backup owner.
437 ///
e6dc35ac
FG
438 /// The backup owner is the entity who first created the backup group.
439 pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
54552dda
DM
440 let mut full_path = self.base_path();
441 full_path.push(backup_group.group_path());
442 full_path.push("owner");
25877d05 443 let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
dcf5a0f6 444 owner.trim_end().parse() // remove trailing newline
54552dda
DM
445 }
446
9751ef4b
DC
447 pub fn owns_backup(&self, backup_group: &BackupGroup, auth_id: &Authid) -> Result<bool, Error> {
448 let owner = self.get_owner(backup_group)?;
449
8e0b852f 450 Ok(check_backup_owner(&owner, auth_id).is_ok())
9751ef4b
DC
451 }
452
54552dda 453 /// Set the backup owner.
e7cb4dc5
WB
454 pub fn set_owner(
455 &self,
456 backup_group: &BackupGroup,
e6dc35ac 457 auth_id: &Authid,
e7cb4dc5
WB
458 force: bool,
459 ) -> Result<(), Error> {
54552dda
DM
460 let mut path = self.base_path();
461 path.push(backup_group.group_path());
462 path.push("owner");
463
464 let mut open_options = std::fs::OpenOptions::new();
465 open_options.write(true);
466 open_options.truncate(true);
467
468 if force {
469 open_options.create(true);
470 } else {
471 open_options.create_new(true);
472 }
473
42c2b5be
TL
474 let mut file = open_options
475 .open(&path)
54552dda
DM
476 .map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
477
e6dc35ac 478 writeln!(file, "{}", auth_id)
54552dda
DM
479 .map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
480
481 Ok(())
482 }
483
1fc82c41 484 /// Create (if it does not already exists) and lock a backup group
54552dda
DM
485 ///
486 /// And set the owner to 'userid'. If the group already exists, it returns the
487 /// current owner (instead of setting the owner).
1fc82c41 488 ///
1ffe0301 489 /// This also acquires an exclusive lock on the directory and returns the lock guard.
e7cb4dc5
WB
490 pub fn create_locked_backup_group(
491 &self,
492 backup_group: &BackupGroup,
e6dc35ac
FG
493 auth_id: &Authid,
494 ) -> Result<(Authid, DirLockGuard), Error> {
8731e40a 495 // create intermediate path first:
44288184 496 let mut full_path = self.base_path();
54552dda 497 full_path.push(backup_group.backup_type());
8731e40a
WB
498 std::fs::create_dir_all(&full_path)?;
499
54552dda
DM
500 full_path.push(backup_group.backup_id());
501
502 // create the last component now
503 match std::fs::create_dir(&full_path) {
504 Ok(_) => {
42c2b5be
TL
505 let guard = lock_dir_noblock(
506 &full_path,
507 "backup group",
508 "another backup is already running",
509 )?;
e6dc35ac 510 self.set_owner(backup_group, auth_id, false)?;
54552dda 511 let owner = self.get_owner(backup_group)?; // just to be sure
1fc82c41 512 Ok((owner, guard))
54552dda
DM
513 }
514 Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
42c2b5be
TL
515 let guard = lock_dir_noblock(
516 &full_path,
517 "backup group",
518 "another backup is already running",
519 )?;
54552dda 520 let owner = self.get_owner(backup_group)?; // just to be sure
1fc82c41 521 Ok((owner, guard))
54552dda
DM
522 }
523 Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
524 }
525 }
526
527 /// Creates a new backup snapshot inside a BackupGroup
528 ///
529 /// The BackupGroup directory needs to exist.
42c2b5be
TL
530 pub fn create_locked_backup_dir(
531 &self,
532 backup_dir: &BackupDir,
533 ) -> Result<(PathBuf, bool, DirLockGuard), Error> {
b3483782
DM
534 let relative_path = backup_dir.relative_path();
535 let mut full_path = self.base_path();
536 full_path.push(&relative_path);
ff3d3100 537
42c2b5be
TL
538 let lock = || {
539 lock_dir_noblock(
540 &full_path,
541 "snapshot",
542 "internal error - tried creating snapshot that's already in use",
543 )
544 };
f23f7543 545
8731e40a 546 match std::fs::create_dir(&full_path) {
f23f7543 547 Ok(_) => Ok((relative_path, true, lock()?)),
42c2b5be
TL
548 Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
549 Ok((relative_path, false, lock()?))
550 }
551 Err(e) => Err(e.into()),
8731e40a 552 }
ff3d3100
DM
553 }
554
7b125de3
TL
555 /// Get a streaming iter over top-level backup groups of a datatstore
556 ///
557 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
558 /// parsing errors.
559 pub fn iter_backup_groups(&self) -> Result<ListGroups, Error> {
560 ListGroups::new(self.base_path())
561 }
562
563 /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
564 ///
565 /// The iterated item's result is already unwrapped, if it contained an error it will be
566 /// logged. Can be useful in iterator chain commands
567 pub fn iter_backup_groups_ok(&self) -> Result<impl Iterator<Item = BackupGroup> + '_, Error> {
568 Ok(
569 ListGroups::new(self.base_path())?.filter_map(move |group| match group {
570 Ok(group) => Some(group),
571 Err(err) => {
572 log::error!("list groups error on datastore {} - {}", self.name(), err);
573 None
574 }
575 }),
576 )
577 }
578
c90dbb5c 579 /// Get a in-memory vector for all top-level backup groups of a datatstore
7b125de3
TL
580 ///
581 /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
c90dbb5c 582 pub fn list_backup_groups(&self) -> Result<Vec<BackupGroup>, Error> {
de015ce7 583 ListGroups::new(self.base_path())?.collect()
c90dbb5c
TL
584 }
585
3d5c11e5 586 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 587 let base = self.base_path();
3d5c11e5
DM
588
589 let mut list = vec![];
590
95cea65b
DM
591 use walkdir::WalkDir;
592
84466003 593 let walker = WalkDir::new(&base).into_iter();
95cea65b
DM
594
595 // make sure we skip .chunks (and other hidden files to keep it simple)
596 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
42c2b5be
TL
597 entry
598 .file_name()
95cea65b 599 .to_str()
d8d8af98 600 .map(|s| s.starts_with('.'))
95cea65b
DM
601 .unwrap_or(false)
602 }
c3b090ac
TL
603 let handle_entry_err = |err: walkdir::Error| {
604 if let Some(inner) = err.io_error() {
d08cff51
FG
605 if let Some(path) = err.path() {
606 if inner.kind() == io::ErrorKind::PermissionDenied {
c3b090ac
TL
607 // only allow to skip ext4 fsck directory, avoid GC if, for example,
608 // a user got file permissions wrong on datastore rsync to new server
609 if err.depth() > 1 || !path.ends_with("lost+found") {
d08cff51 610 bail!("cannot continue garbage-collection safely, permission denied on: {:?}", path)
c3b090ac 611 }
d08cff51 612 } else {
42c2b5be
TL
613 bail!(
614 "unexpected error on datastore traversal: {} - {:?}",
615 inner,
616 path
617 )
d08cff51
FG
618 }
619 } else {
620 bail!("unexpected error on datastore traversal: {}", inner)
c3b090ac
TL
621 }
622 }
623 Ok(())
624 };
95cea65b 625 for entry in walker.filter_entry(|e| !is_hidden(e)) {
c3b090ac
TL
626 let path = match entry {
627 Ok(entry) => entry.into_path(),
628 Err(err) => {
629 handle_entry_err(err)?;
42c2b5be
TL
630 continue;
631 }
c3b090ac 632 };
1e8da0a7 633 if let Ok(archive_type) = archive_type(&path) {
42c2b5be
TL
634 if archive_type == ArchiveType::FixedIndex
635 || archive_type == ArchiveType::DynamicIndex
636 {
95cea65b 637 list.push(path);
3d5c11e5
DM
638 }
639 }
640 }
641
642 Ok(list)
643 }
644
a660978c
DM
645 // mark chunks used by ``index`` as used
646 fn index_mark_used_chunks<I: IndexFile>(
647 &self,
648 index: I,
649 file_name: &Path, // only used for error reporting
650 status: &mut GarbageCollectionStatus,
c8449217 651 worker: &dyn WorkerTaskContext,
a660978c 652 ) -> Result<(), Error> {
a660978c
DM
653 status.index_file_count += 1;
654 status.index_data_bytes += index.index_bytes();
655
656 for pos in 0..index.index_count() {
f6b1d1cc 657 worker.check_abort()?;
0fd55b08 658 worker.fail_on_shutdown()?;
a660978c 659 let digest = index.index_digest(pos).unwrap();
4bc84a65 660 if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
c23192d3 661 task_warn!(
f6b1d1cc 662 worker,
d1d74c43 663 "warning: unable to access non-existent chunk {}, required by {:?}",
25877d05 664 hex::encode(digest),
f6b1d1cc 665 file_name,
f6b1d1cc 666 );
fd192564
SR
667
668 // touch any corresponding .bad files to keep them around, meaning if a chunk is
669 // rewritten correctly they will be removed automatically, as well as if no index
670 // file requires the chunk anymore (won't get to this loop then)
671 for i in 0..=9 {
672 let bad_ext = format!("{}.bad", i);
673 let mut bad_path = PathBuf::new();
674 bad_path.push(self.chunk_path(digest).0);
675 bad_path.set_extension(bad_ext);
4bc84a65 676 self.inner.chunk_store.cond_touch_path(&bad_path, false)?;
fd192564 677 }
a660978c
DM
678 }
679 }
680 Ok(())
681 }
682
f6b1d1cc
WB
683 fn mark_used_chunks(
684 &self,
685 status: &mut GarbageCollectionStatus,
c8449217 686 worker: &dyn WorkerTaskContext,
f6b1d1cc 687 ) -> Result<(), Error> {
3d5c11e5 688 let image_list = self.list_images()?;
8317873c
DM
689 let image_count = image_list.len();
690
8317873c
DM
691 let mut last_percentage: usize = 0;
692
cb4b721c
FG
693 let mut strange_paths_count: u64 = 0;
694
ea368a06 695 for (i, img) in image_list.into_iter().enumerate() {
f6b1d1cc 696 worker.check_abort()?;
0fd55b08 697 worker.fail_on_shutdown()?;
92da93b2 698
cb4b721c
FG
699 if let Some(backup_dir_path) = img.parent() {
700 let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
701 if let Some(backup_dir_str) = backup_dir_path.to_str() {
702 if BackupDir::from_str(backup_dir_str).is_err() {
703 strange_paths_count += 1;
704 }
705 }
706 }
707
efcac39d 708 match std::fs::File::open(&img) {
e0762002 709 Ok(file) => {
788d82d9 710 if let Ok(archive_type) = archive_type(&img) {
e0762002 711 if archive_type == ArchiveType::FixedIndex {
788d82d9 712 let index = FixedIndexReader::new(file).map_err(|e| {
efcac39d 713 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 714 })?;
788d82d9 715 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002 716 } else if archive_type == ArchiveType::DynamicIndex {
788d82d9 717 let index = DynamicIndexReader::new(file).map_err(|e| {
efcac39d 718 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 719 })?;
788d82d9 720 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002
DM
721 }
722 }
723 }
788d82d9 724 Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
efcac39d 725 Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
77703d95 726 }
8317873c 727
ea368a06 728 let percentage = (i + 1) * 100 / image_count;
8317873c 729 if percentage > last_percentage {
c23192d3 730 task_log!(
f6b1d1cc 731 worker,
7956877f 732 "marked {}% ({} of {} index files)",
f6b1d1cc 733 percentage,
ea368a06 734 i + 1,
f6b1d1cc
WB
735 image_count,
736 );
8317873c
DM
737 last_percentage = percentage;
738 }
3d5c11e5
DM
739 }
740
cb4b721c 741 if strange_paths_count > 0 {
c23192d3 742 task_log!(
cb4b721c
FG
743 worker,
744 "found (and marked) {} index files outside of expected directory scheme",
745 strange_paths_count,
746 );
747 }
748
3d5c11e5 749 Ok(())
f2b99c34
DM
750 }
751
752 pub fn last_gc_status(&self) -> GarbageCollectionStatus {
4bc84a65 753 self.inner.last_gc_status.lock().unwrap().clone()
f2b99c34 754 }
3d5c11e5 755
8545480a 756 pub fn garbage_collection_running(&self) -> bool {
4bc84a65 757 !matches!(self.inner.gc_mutex.try_lock(), Ok(_))
8545480a
DM
758 }
759
42c2b5be
TL
760 pub fn garbage_collection(
761 &self,
762 worker: &dyn WorkerTaskContext,
763 upid: &UPID,
764 ) -> Result<(), Error> {
4bc84a65 765 if let Ok(ref mut _mutex) = self.inner.gc_mutex.try_lock() {
c6772c92
TL
766 // avoids that we run GC if an old daemon process has still a
767 // running backup writer, which is not save as we have no "oldest
768 // writer" information and thus no safe atime cutoff
42c2b5be 769 let _exclusive_lock = self.inner.chunk_store.try_exclusive_lock()?;
43b13033 770
6ef1b649 771 let phase1_start_time = proxmox_time::epoch_i64();
42c2b5be
TL
772 let oldest_writer = self
773 .inner
774 .chunk_store
775 .oldest_writer()
776 .unwrap_or(phase1_start_time);
11861a48 777
64e53b28 778 let mut gc_status = GarbageCollectionStatus::default();
f6b1d1cc
WB
779 gc_status.upid = Some(upid.to_string());
780
c23192d3 781 task_log!(worker, "Start GC phase1 (mark used chunks)");
f6b1d1cc
WB
782
783 self.mark_used_chunks(&mut gc_status, worker)?;
784
c23192d3 785 task_log!(worker, "Start GC phase2 (sweep unused chunks)");
4bc84a65 786 self.inner.chunk_store.sweep_unused_chunks(
f6b1d1cc
WB
787 oldest_writer,
788 phase1_start_time,
789 &mut gc_status,
790 worker,
791 )?;
792
c23192d3 793 task_log!(
f6b1d1cc
WB
794 worker,
795 "Removed garbage: {}",
796 HumanByte::from(gc_status.removed_bytes),
797 );
c23192d3 798 task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
cf459b19 799 if gc_status.pending_bytes > 0 {
c23192d3 800 task_log!(
f6b1d1cc
WB
801 worker,
802 "Pending removals: {} (in {} chunks)",
803 HumanByte::from(gc_status.pending_bytes),
804 gc_status.pending_chunks,
805 );
cf459b19 806 }
a9767cf7 807 if gc_status.removed_bad > 0 {
c23192d3 808 task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
a9767cf7 809 }
cf459b19 810
b4fb2623 811 if gc_status.still_bad > 0 {
c23192d3 812 task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
b4fb2623
DM
813 }
814
c23192d3 815 task_log!(
f6b1d1cc
WB
816 worker,
817 "Original data usage: {}",
818 HumanByte::from(gc_status.index_data_bytes),
819 );
868c5852
DM
820
821 if gc_status.index_data_bytes > 0 {
42c2b5be
TL
822 let comp_per =
823 (gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
c23192d3 824 task_log!(
f6b1d1cc
WB
825 worker,
826 "On-Disk usage: {} ({:.2}%)",
827 HumanByte::from(gc_status.disk_bytes),
828 comp_per,
829 );
868c5852
DM
830 }
831
c23192d3 832 task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
868c5852 833
d6373f35 834 let deduplication_factor = if gc_status.disk_bytes > 0 {
42c2b5be 835 (gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
d6373f35
DM
836 } else {
837 1.0
838 };
839
c23192d3 840 task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
d6373f35 841
868c5852 842 if gc_status.disk_chunks > 0 {
42c2b5be 843 let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
c23192d3 844 task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
868c5852 845 }
64e53b28 846
b683fd58
DC
847 if let Ok(serialized) = serde_json::to_string(&gc_status) {
848 let mut path = self.base_path();
849 path.push(".gc-status");
850
21211748 851 let backup_user = pbs_config::backup_user()?;
b683fd58
DC
852 let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
853 // set the correct owner/group/permissions while saving file
854 // owner(rw) = backup, group(r)= backup
855 let options = CreateOptions::new()
856 .perm(mode)
857 .owner(backup_user.uid)
858 .group(backup_user.gid);
859
860 // ignore errors
e0a19d33 861 let _ = replace_file(path, serialized.as_bytes(), options, false);
b683fd58
DC
862 }
863
4bc84a65 864 *self.inner.last_gc_status.lock().unwrap() = gc_status;
64e53b28 865 } else {
d4b59ae0 866 bail!("Start GC failed - (already running/locked)");
64e53b28 867 }
3d5c11e5
DM
868
869 Ok(())
870 }
3b7ade9e 871
ccc3896f 872 pub fn try_shared_chunk_store_lock(&self) -> Result<ProcessLockSharedGuard, Error> {
4bc84a65 873 self.inner.chunk_store.try_shared_lock()
1cf5178a
DM
874 }
875
42c2b5be 876 pub fn chunk_path(&self, digest: &[u8; 32]) -> (PathBuf, String) {
4bc84a65 877 self.inner.chunk_store.chunk_path(digest)
d48a9955
DM
878 }
879
42c2b5be 880 pub fn cond_touch_chunk(
3b7ade9e 881 &self,
4ee8f53d 882 digest: &[u8; 32],
42c2b5be
TL
883 fail_if_not_exist: bool,
884 ) -> Result<bool, Error> {
885 self.inner
886 .chunk_store
887 .cond_touch_chunk(digest, fail_if_not_exist)
888 }
889
890 pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
4bc84a65 891 self.inner.chunk_store.insert_chunk(chunk, digest)
3b7ade9e 892 }
60f9a6ea 893
39f18b30 894 pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
60f9a6ea
DM
895 let mut path = self.base_path();
896 path.push(backup_dir.relative_path());
897 path.push(filename);
898
6ef1b649 899 proxmox_lang::try_block!({
39f18b30
DM
900 let mut file = std::fs::File::open(&path)?;
901 DataBlob::load_from_reader(&mut file)
42c2b5be
TL
902 })
903 .map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
39f18b30 904 }
e4439025 905
7f394c80 906 pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
4bc84a65 907 let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest);
7f394c80
DC
908 std::fs::metadata(chunk_path).map_err(Error::from)
909 }
910
39f18b30 911 pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
4bc84a65 912 let (chunk_path, digest_str) = self.inner.chunk_store.chunk_path(digest);
39f18b30 913
6ef1b649 914 proxmox_lang::try_block!({
39f18b30
DM
915 let mut file = std::fs::File::open(&chunk_path)?;
916 DataBlob::load_from_reader(&mut file)
42c2b5be
TL
917 })
918 .map_err(|err| {
919 format_err!(
920 "store '{}', unable to load chunk '{}' - {}",
921 self.name(),
922 digest_str,
923 err,
924 )
925 })
1a374fcf
SR
926 }
927
179145dc
DC
928 /// Returns the filename to lock a manifest
929 ///
930 /// Also creates the basedir. The lockfile is located in
931 /// '/run/proxmox-backup/locks/{datastore}/{type}/{id}/{timestamp}.index.json.lck'
42c2b5be 932 fn manifest_lock_path(&self, backup_dir: &BackupDir) -> Result<String, Error> {
179145dc
DC
933 let mut path = format!(
934 "/run/proxmox-backup/locks/{}/{}/{}",
935 self.name(),
936 backup_dir.group().backup_type(),
937 backup_dir.group().backup_id(),
938 );
939 std::fs::create_dir_all(&path)?;
940 use std::fmt::Write;
42c2b5be
TL
941 write!(
942 path,
943 "/{}{}",
944 backup_dir.backup_time_string(),
945 &MANIFEST_LOCK_NAME
946 )?;
179145dc
DC
947
948 Ok(path)
949 }
950
42c2b5be 951 fn lock_manifest(&self, backup_dir: &BackupDir) -> Result<BackupLockGuard, Error> {
179145dc 952 let path = self.manifest_lock_path(backup_dir)?;
1a374fcf
SR
953
954 // update_manifest should never take a long time, so if someone else has
955 // the lock we can simply block a bit and should get it soon
7526d864 956 open_backup_lockfile(&path, Some(Duration::from_secs(5)), true)
42c2b5be 957 .map_err(|err| format_err!("unable to acquire manifest lock {:?} - {}", &path, err))
1a374fcf 958 }
e4439025 959
1a374fcf 960 /// Load the manifest without a lock. Must not be written back.
42c2b5be 961 pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
39f18b30
DM
962 let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
963 let raw_size = blob.raw_size();
60f9a6ea 964 let manifest = BackupManifest::try_from(blob)?;
ff86ef00 965 Ok((manifest, raw_size))
60f9a6ea 966 }
e4439025 967
1a374fcf
SR
968 /// Update the manifest of the specified snapshot. Never write a manifest directly,
969 /// only use this method - anything else may break locking guarantees.
970 pub fn update_manifest(
e4439025
DM
971 &self,
972 backup_dir: &BackupDir,
1a374fcf 973 update_fn: impl FnOnce(&mut BackupManifest),
e4439025 974 ) -> Result<(), Error> {
1a374fcf 975 let _guard = self.lock_manifest(backup_dir)?;
9a37bd6c 976 let (mut manifest, _) = self.load_manifest(backup_dir)?;
1a374fcf
SR
977
978 update_fn(&mut manifest);
979
883aa6d5 980 let manifest = serde_json::to_value(manifest)?;
e4439025
DM
981 let manifest = serde_json::to_string_pretty(&manifest)?;
982 let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
983 let raw_data = blob.raw_data();
984
985 let mut path = self.base_path();
986 path.push(backup_dir.relative_path());
987 path.push(MANIFEST_BLOB_NAME);
988
1a374fcf 989 // atomic replace invalidates flock - no other writes past this point!
e0a19d33 990 replace_file(&path, raw_data, CreateOptions::new(), false)?;
e4439025
DM
991
992 Ok(())
993 }
0698f78d 994
8292d3d2 995 /// Updates the protection status of the specified snapshot.
42c2b5be 996 pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> {
8292d3d2
DC
997 let full_path = self.snapshot_path(backup_dir);
998
999 let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
1000
1001 let protected_path = backup_dir.protected_file(self.base_path());
1002 if protection {
1003 std::fs::File::create(protected_path)
1004 .map_err(|err| format_err!("could not create protection file: {}", err))?;
1005 } else if let Err(err) = std::fs::remove_file(protected_path) {
1006 // ignore error for non-existing file
1007 if err.kind() != std::io::ErrorKind::NotFound {
1008 bail!("could not remove protection file: {}", err);
1009 }
1010 }
1011
1012 Ok(())
1013 }
1014
0698f78d 1015 pub fn verify_new(&self) -> bool {
4bc84a65 1016 self.inner.verify_new
0698f78d 1017 }
4921a411
DC
1018
1019 /// returns a list of chunks sorted by their inode number on disk
1020 /// chunks that could not be stat'ed are at the end of the list
1021 pub fn get_chunks_in_order<F, A>(
1022 &self,
1023 index: &Box<dyn IndexFile + Send>,
1024 skip_chunk: F,
1025 check_abort: A,
1026 ) -> Result<Vec<(usize, u64)>, Error>
1027 where
1028 F: Fn(&[u8; 32]) -> bool,
1029 A: Fn(usize) -> Result<(), Error>,
1030 {
1031 let index_count = index.index_count();
1032 let mut chunk_list = Vec::with_capacity(index_count);
1033 use std::os::unix::fs::MetadataExt;
1034 for pos in 0..index_count {
1035 check_abort(pos)?;
1036
1037 let info = index.chunk_info(pos).unwrap();
1038
1039 if skip_chunk(&info.digest) {
1040 continue;
1041 }
1042
4bc84a65 1043 let ino = match self.inner.chunk_order {
fef61684
DC
1044 ChunkOrder::Inode => {
1045 match self.stat_chunk(&info.digest) {
1046 Err(_) => u64::MAX, // could not stat, move to end of list
1047 Ok(metadata) => metadata.ino(),
1048 }
1049 }
1050 ChunkOrder::None => 0,
4921a411
DC
1051 };
1052
1053 chunk_list.push((pos, ino));
1054 }
1055
4bc84a65 1056 match self.inner.chunk_order {
fef61684
DC
1057 // sorting by inode improves data locality, which makes it lots faster on spinners
1058 ChunkOrder::Inode => {
1059 chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(ino_b))
1060 }
1061 ChunkOrder::None => {}
1062 }
4921a411
DC
1063
1064 Ok(chunk_list)
1065 }
529de6c7 1066}
de015ce7 1067
33eb23d5
TL
1068/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
1069pub struct ListSnapshots {
1070 group: BackupGroup,
1071 fd: proxmox_sys::fs::ReadDir,
1072}
1073
1074impl ListSnapshots {
1075 pub fn new(group: BackupGroup, group_path: PathBuf) -> Result<Self, Error> {
1076 Ok(ListSnapshots {
1077 fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &group_path)?,
1078 group,
1079 })
1080 }
1081}
1082
1083impl Iterator for ListSnapshots {
1084 type Item = Result<BackupDir, Error>;
1085
1086 fn next(&mut self) -> Option<Self::Item> {
1087 loop {
1088 let item = self.fd.next()?;
1089 match item {
1090 Ok(ref entry) => {
1091 if let Ok(name) = entry.file_name().to_str() {
1092 match entry.file_type() {
1093 Some(nix::dir::Type::Directory) => {} // OK
1094 _ => continue,
1095 }
1096 if BACKUP_DATE_REGEX.is_match(name) {
1097 let backup_time = match proxmox_time::parse_rfc3339(&name) {
1098 Ok(time) => time,
1099 Err(err) => return Some(Err(err)),
1100 };
1101
1102 return Some(BackupDir::with_group(self.group.clone(), backup_time));
1103 }
1104 }
1105 continue; // file did not match regex or isn't valid utf-8
1106 }
1107 Err(err) => return Some(Err(err)),
1108 }
1109 }
1110 }
1111}
1112
de015ce7
TL
1113/// A iterator for a (single) level of Backup Groups
1114pub struct ListGroups {
1115 type_fd: proxmox_sys::fs::ReadDir,
1116 id_state: Option<(String, proxmox_sys::fs::ReadDir)>,
1117}
1118
1119impl ListGroups {
1120 pub fn new(base_path: PathBuf) -> Result<Self, Error> {
1121 Ok(ListGroups {
1122 type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?,
1123 id_state: None,
1124 })
1125 }
1126}
1127
1128impl Iterator for ListGroups {
1129 type Item = Result<BackupGroup, Error>;
1130
1131 fn next(&mut self) -> Option<Self::Item> {
1132 loop {
1133 if let Some((ref group_type, ref mut id_fd)) = self.id_state {
1134 let item = match id_fd.next() {
1135 Some(item) => item,
1136 None => {
1137 self.id_state = None;
1138 continue; // exhausted all IDs for the current group type, try others
1139 }
1140 };
1141 match item {
1142 Ok(ref entry) => {
1143 if let Ok(name) = entry.file_name().to_str() {
1144 match entry.file_type() {
1145 Some(nix::dir::Type::Directory) => {} // OK
1146 _ => continue,
1147 }
1148 if BACKUP_ID_REGEX.is_match(name) {
1149 return Some(Ok(BackupGroup::new(group_type, name)));
1150 }
1151 }
1152 continue; // file did not match regex or isn't valid utf-8
1153 }
1154 Err(err) => return Some(Err(err)),
1155 }
1156 } else {
1157 let item = self.type_fd.next()?;
1158 match item {
1159 Ok(ref entry) => {
1160 if let Ok(name) = entry.file_name().to_str() {
1161 match entry.file_type() {
1162 Some(nix::dir::Type::Directory) => {} // OK
1163 _ => continue,
1164 }
1165 if BACKUP_TYPE_REGEX.is_match(name) {
1166 // found a backup group type, descend into it to scan all IDs in it
1167 // by switching to the id-state branch
1168 let base_fd = entry.parent_fd();
1169 let id_dirfd = match proxmox_sys::fs::read_subdir(base_fd, name) {
1170 Ok(dirfd) => dirfd,
1171 Err(err) => return Some(Err(err.into())),
1172 };
1173 self.id_state = Some((name.to_owned(), id_dirfd));
1174 }
1175 }
1176 continue; // file did not match regex or isn't valid utf-8
1177 }
1178 Err(err) => return Some(Err(err)),
1179 }
1180 }
1181 }
1182 }
1183}