]> git.proxmox.com Git - proxmox-backup.git/blame - pbs-datastore/src/datastore.rs
api-types: DataStoreConfig::new for testing
[proxmox-backup.git] / pbs-datastore / src / datastore.rs
CommitLineData
42c2b5be
TL
1use std::collections::{HashMap, HashSet};
2use std::convert::TryFrom;
54552dda 3use std::io::{self, Write};
367f002e 4use std::path::{Path, PathBuf};
cb4b721c 5use std::str::FromStr;
42c2b5be 6use std::sync::{Arc, Mutex};
1a374fcf 7use std::time::Duration;
367f002e 8
f7d4e4b5 9use anyhow::{bail, format_err, Error};
2c32fdde 10use lazy_static::lazy_static;
e4439025 11
fef61684
DC
12use proxmox_schema::ApiType;
13
42c2b5be
TL
14use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
15use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
d5790a9f 16use proxmox_sys::process_locker::ProcessLockSharedGuard;
25877d05 17use proxmox_sys::WorkerTaskContext;
d5790a9f 18use proxmox_sys::{task_log, task_warn};
529de6c7 19
fef61684 20use pbs_api_types::{
988d575d 21 Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
6b0c6492 22 HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
fef61684 23};
118deb4d 24use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
529de6c7 25
42c2b5be 26use crate::backup_info::{BackupDir, BackupGroup};
6d5d305d
DM
27use crate::chunk_store::ChunkStore;
28use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
29use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
30use crate::index::IndexFile;
31use crate::manifest::{
42c2b5be
TL
32 archive_type, ArchiveType, BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
33 MANIFEST_LOCK_NAME,
6d5d305d 34};
4bc84a65 35use crate::task_tracking::update_active_operations;
42c2b5be 36use crate::DataBlob;
6d5d305d 37
367f002e 38lazy_static! {
42c2b5be
TL
39 static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStoreImpl>>> =
40 Mutex::new(HashMap::new());
b3483782 41}
ff3d3100 42
9751ef4b
DC
43/// checks if auth_id is owner, or, if owner is a token, if
44/// auth_id is the user of the token
42c2b5be
TL
45pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> {
46 let correct_owner =
47 owner == auth_id || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
9751ef4b
DC
48 if !correct_owner {
49 bail!("backup owner check failed ({} != {})", auth_id, owner);
50 }
51 Ok(())
52}
53
e5064ba6
DM
54/// Datastore Management
55///
56/// A Datastore can store severals backups, and provides the
57/// management interface for backup.
4bc84a65 58pub struct DataStoreImpl {
1629d2ad 59 chunk_store: Arc<ChunkStore>,
81b2a872 60 gc_mutex: Mutex<()>,
f2b99c34 61 last_gc_status: Mutex<GarbageCollectionStatus>,
0698f78d 62 verify_new: bool,
fef61684 63 chunk_order: ChunkOrder,
118deb4d
DC
64 last_generation: usize,
65 last_update: i64,
529de6c7
DM
66}
67
4bc84a65
HL
68pub struct DataStore {
69 inner: Arc<DataStoreImpl>,
70 operation: Option<Operation>,
71}
72
73impl Clone for DataStore {
74 fn clone(&self) -> Self {
75 let mut new_operation = self.operation;
76 if let Some(operation) = self.operation {
77 if let Err(e) = update_active_operations(self.name(), operation, 1) {
78 log::error!("could not update active operations - {}", e);
79 new_operation = None;
80 }
81 }
82
83 DataStore {
84 inner: self.inner.clone(),
85 operation: new_operation,
86 }
87 }
88}
89
90impl Drop for DataStore {
91 fn drop(&mut self) {
92 if let Some(operation) = self.operation {
93 if let Err(e) = update_active_operations(self.name(), operation, -1) {
94 log::error!("could not update active operations - {}", e);
95 }
96 }
97 }
98}
99
529de6c7 100impl DataStore {
e9d2fc93
HL
101 pub fn lookup_datastore(
102 name: &str,
103 operation: Option<Operation>,
104 ) -> Result<Arc<DataStore>, Error> {
118deb4d
DC
105 let version_cache = ConfigVersionCache::new()?;
106 let generation = version_cache.datastore_generation();
107 let now = proxmox_time::epoch_i64();
2c32fdde 108
e9d2fc93
HL
109 let (config, _digest) = pbs_config::datastore::config()?;
110 let config: DataStoreConfig = config.lookup("datastore", name)?;
111 let path = PathBuf::from(&config.path);
112
113 if let Some(maintenance_mode) = config.get_maintenance_mode() {
114 if let Err(error) = maintenance_mode.check(operation) {
115 bail!("datastore '{}' is in {}", name, error);
116 }
117 }
118
4bc84a65
HL
119 if let Some(operation) = operation {
120 update_active_operations(name, operation, 1)?;
121 }
122
515688d1 123 let mut map = DATASTORE_MAP.lock().unwrap();
118deb4d 124 let entry = map.get(name);
2c32fdde 125
118deb4d
DC
126 if let Some(datastore) = &entry {
127 if datastore.last_generation == generation && now < (datastore.last_update + 60) {
4bc84a65
HL
128 return Ok(Arc::new(Self {
129 inner: Arc::clone(datastore),
130 operation,
131 }));
2c32fdde
DM
132 }
133 }
134
118deb4d 135 let datastore = DataStore::open_with_path(name, &path, config, generation, now)?;
f0a61124
DM
136
137 let datastore = Arc::new(datastore);
138 map.insert(name.to_string(), datastore.clone());
2c32fdde 139
4bc84a65
HL
140 Ok(Arc::new(Self {
141 inner: datastore,
142 operation,
143 }))
2c32fdde
DM
144 }
145
062cf75c 146 /// removes all datastores that are not configured anymore
42c2b5be 147 pub fn remove_unused_datastores() -> Result<(), Error> {
e7d4be9d 148 let (config, _digest) = pbs_config::datastore::config()?;
062cf75c
DC
149
150 let mut map = DATASTORE_MAP.lock().unwrap();
151 // removes all elements that are not in the config
42c2b5be 152 map.retain(|key, _| config.sections.contains_key(key));
062cf75c
DC
153 Ok(())
154 }
155
118deb4d
DC
156 fn open_with_path(
157 store_name: &str,
158 path: &Path,
159 config: DataStoreConfig,
160 last_generation: usize,
161 last_update: i64,
4bc84a65 162 ) -> Result<DataStoreImpl, Error> {
277fc5a3 163 let chunk_store = ChunkStore::open(store_name, path)?;
529de6c7 164
b683fd58
DC
165 let mut gc_status_path = chunk_store.base_path();
166 gc_status_path.push(".gc-status");
167
168 let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
169 match serde_json::from_str(&state) {
170 Ok(state) => state,
171 Err(err) => {
172 eprintln!("error reading gc-status: {}", err);
173 GarbageCollectionStatus::default()
174 }
175 }
176 } else {
177 GarbageCollectionStatus::default()
178 };
f2b99c34 179
fef61684 180 let tuning: DatastoreTuning = serde_json::from_value(
42c2b5be
TL
181 DatastoreTuning::API_SCHEMA
182 .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
fef61684
DC
183 )?;
184 let chunk_order = tuning.chunk_order.unwrap_or(ChunkOrder::Inode);
185
4bc84a65 186 Ok(DataStoreImpl {
1629d2ad 187 chunk_store: Arc::new(chunk_store),
81b2a872 188 gc_mutex: Mutex::new(()),
f2b99c34 189 last_gc_status: Mutex::new(gc_status),
0698f78d 190 verify_new: config.verify_new.unwrap_or(false),
fef61684 191 chunk_order,
118deb4d
DC
192 last_generation,
193 last_update,
529de6c7
DM
194 })
195 }
196
d59397e6
WB
197 pub fn get_chunk_iterator(
198 &self,
199 ) -> Result<
25877d05 200 impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)>,
42c2b5be 201 Error,
d59397e6 202 > {
4bc84a65 203 self.inner.chunk_store.get_chunk_iterator()
d59397e6
WB
204 }
205
42c2b5be
TL
206 pub fn create_fixed_writer<P: AsRef<Path>>(
207 &self,
208 filename: P,
209 size: usize,
210 chunk_size: usize,
211 ) -> Result<FixedIndexWriter, Error> {
212 let index = FixedIndexWriter::create(
213 self.inner.chunk_store.clone(),
214 filename.as_ref(),
215 size,
216 chunk_size,
217 )?;
529de6c7
DM
218
219 Ok(index)
220 }
221
42c2b5be
TL
222 pub fn open_fixed_reader<P: AsRef<Path>>(
223 &self,
224 filename: P,
225 ) -> Result<FixedIndexReader, Error> {
226 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
a7c72ad9
DM
227
228 let index = FixedIndexReader::open(&full_path)?;
529de6c7
DM
229
230 Ok(index)
231 }
3d5c11e5 232
93d5d779 233 pub fn create_dynamic_writer<P: AsRef<Path>>(
42c2b5be
TL
234 &self,
235 filename: P,
93d5d779 236 ) -> Result<DynamicIndexWriter, Error> {
42c2b5be 237 let index = DynamicIndexWriter::create(self.inner.chunk_store.clone(), filename.as_ref())?;
0433db19
DM
238
239 Ok(index)
240 }
ff3d3100 241
42c2b5be
TL
242 pub fn open_dynamic_reader<P: AsRef<Path>>(
243 &self,
244 filename: P,
245 ) -> Result<DynamicIndexReader, Error> {
246 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
d48a9955
DM
247
248 let index = DynamicIndexReader::open(&full_path)?;
77703d95
DM
249
250 Ok(index)
251 }
252
5de2bced
WB
253 pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
254 where
255 P: AsRef<Path>,
256 {
257 let filename = filename.as_ref();
42c2b5be
TL
258 let out: Box<dyn IndexFile + Send> = match archive_type(filename)? {
259 ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?),
260 ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?),
261 _ => bail!("cannot open index file of unknown type: {:?}", filename),
262 };
5de2bced
WB
263 Ok(out)
264 }
265
1369bcdb 266 /// Fast index verification - only check if chunks exists
28570d19
DM
267 pub fn fast_index_verification(
268 &self,
269 index: &dyn IndexFile,
42c2b5be 270 checked: &mut HashSet<[u8; 32]>,
28570d19 271 ) -> Result<(), Error> {
1369bcdb
DM
272 for pos in 0..index.index_count() {
273 let info = index.chunk_info(pos).unwrap();
28570d19
DM
274 if checked.contains(&info.digest) {
275 continue;
276 }
277
42c2b5be
TL
278 self.stat_chunk(&info.digest).map_err(|err| {
279 format_err!(
280 "fast_index_verification error, stat_chunk {} failed - {}",
281 hex::encode(&info.digest),
282 err,
283 )
284 })?;
28570d19
DM
285
286 checked.insert(info.digest);
1369bcdb
DM
287 }
288
289 Ok(())
290 }
291
60f9a6ea 292 pub fn name(&self) -> &str {
4bc84a65 293 self.inner.chunk_store.name()
60f9a6ea
DM
294 }
295
ff3d3100 296 pub fn base_path(&self) -> PathBuf {
4bc84a65 297 self.inner.chunk_store.base_path()
ff3d3100
DM
298 }
299
c47e294e 300 /// Cleanup a backup directory
7759eef5
DM
301 ///
302 /// Removes all files not mentioned in the manifest.
42c2b5be
TL
303 pub fn cleanup_backup_dir(
304 &self,
db87d93e
WB
305 backup_dir: impl AsRef<pbs_api_types::BackupDir>,
306 manifest: &BackupManifest,
307 ) -> Result<(), Error> {
308 self.cleanup_backup_dir_do(backup_dir.as_ref(), manifest)
309 }
310
311 fn cleanup_backup_dir_do(
312 &self,
313 backup_dir: &pbs_api_types::BackupDir,
42c2b5be
TL
314 manifest: &BackupManifest,
315 ) -> Result<(), Error> {
7759eef5 316 let mut full_path = self.base_path();
db87d93e 317 full_path.push(backup_dir.to_string());
7759eef5
DM
318
319 let mut wanted_files = HashSet::new();
320 wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
1610c45a 321 wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
42c2b5be
TL
322 manifest.files().iter().for_each(|item| {
323 wanted_files.insert(item.filename.clone());
324 });
7759eef5 325
540fca5c
FG
326 for item in proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &full_path)?.flatten() {
327 if let Some(file_type) = item.file_type() {
42c2b5be
TL
328 if file_type != nix::dir::Type::File {
329 continue;
330 }
540fca5c
FG
331 }
332 let file_name = item.file_name().to_bytes();
42c2b5be
TL
333 if file_name == b"." || file_name == b".." {
334 continue;
335 };
540fca5c 336 if let Ok(name) = std::str::from_utf8(file_name) {
42c2b5be
TL
337 if wanted_files.contains(name) {
338 continue;
339 }
7759eef5 340 }
540fca5c
FG
341 println!("remove unused file {:?}", item.file_name());
342 let dirfd = item.parent_fd();
343 let _res = unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) };
7759eef5
DM
344 }
345
346 Ok(())
347 }
4b4eba0b 348
41b373ec 349 /// Returns the absolute path for a backup_group
db87d93e 350 pub fn group_path(&self, backup_group: &pbs_api_types::BackupGroup) -> PathBuf {
4b4eba0b 351 let mut full_path = self.base_path();
db87d93e 352 full_path.push(backup_group.to_string());
41b373ec
DM
353 full_path
354 }
355
356 /// Returns the absolute path for backup_dir
db87d93e 357 pub fn snapshot_path(&self, backup_dir: &pbs_api_types::BackupDir) -> PathBuf {
41b373ec 358 let mut full_path = self.base_path();
db87d93e 359 full_path.push(backup_dir.to_string());
41b373ec
DM
360 full_path
361 }
362
de91418b
DC
363 /// Remove a complete backup group including all snapshots, returns true
364 /// if all snapshots were removed, and false if some were protected
db87d93e
WB
365 pub fn remove_backup_group(
366 &self,
367 backup_group: &pbs_api_types::BackupGroup,
368 ) -> Result<bool, Error> {
6b0c6492 369 let backup_group = self.backup_group(backup_group.clone());
db87d93e
WB
370
371 let full_path = self.group_path(backup_group.as_ref());
4b4eba0b 372
42c2b5be
TL
373 let _guard = proxmox_sys::fs::lock_dir_noblock(
374 &full_path,
375 "backup group",
376 "possible running backup",
377 )?;
c9756b40 378
4b4eba0b 379 log::info!("removing backup group {:?}", full_path);
4c0ae82e 380
de91418b
DC
381 let mut removed_all = true;
382
4c0ae82e
SR
383 // remove all individual backup dirs first to ensure nothing is using them
384 for snap in backup_group.list_backups(&self.base_path())? {
de91418b
DC
385 if snap.backup_dir.is_protected(self.base_path()) {
386 removed_all = false;
387 continue;
388 }
db87d93e 389 self.remove_backup_dir(snap.backup_dir.as_ref(), false)?;
4c0ae82e
SR
390 }
391
de91418b
DC
392 if removed_all {
393 // no snapshots left, we can now safely remove the empty folder
42c2b5be
TL
394 std::fs::remove_dir_all(&full_path).map_err(|err| {
395 format_err!(
396 "removing backup group directory {:?} failed - {}",
397 full_path,
398 err,
399 )
400 })?;
de91418b 401 }
4b4eba0b 402
de91418b 403 Ok(removed_all)
4b4eba0b
DM
404 }
405
8f579717 406 /// Remove a backup directory including all content
db87d93e
WB
407 pub fn remove_backup_dir(
408 &self,
409 backup_dir: &pbs_api_types::BackupDir,
410 force: bool,
411 ) -> Result<(), Error> {
6b0c6492 412 let backup_dir = self.backup_dir(backup_dir.clone())?;
db87d93e
WB
413
414 let full_path = backup_dir.full_path(self.base_path());
8f579717 415
1a374fcf 416 let (_guard, _manifest_guard);
c9756b40 417 if !force {
238a872d 418 _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
db87d93e 419 _manifest_guard = self.lock_manifest(&backup_dir)?;
c9756b40
SR
420 }
421
de91418b
DC
422 if backup_dir.is_protected(self.base_path()) {
423 bail!("cannot remove protected snapshot");
424 }
425
8a1d68c8 426 log::info!("removing backup snapshot {:?}", full_path);
42c2b5be
TL
427 std::fs::remove_dir_all(&full_path).map_err(|err| {
428 format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
429 })?;
8f579717 430
179145dc 431 // the manifest does not exists anymore, we do not need to keep the lock
db87d93e 432 if let Ok(path) = self.manifest_lock_path(&backup_dir) {
179145dc
DC
433 // ignore errors
434 let _ = std::fs::remove_file(path);
435 }
436
8f579717
DM
437 Ok(())
438 }
439
41b373ec
DM
440 /// Returns the time of the last successful backup
441 ///
442 /// Or None if there is no backup in the group (or the group dir does not exist).
db87d93e
WB
443 pub fn last_successful_backup(
444 &self,
445 backup_group: &pbs_api_types::BackupGroup,
446 ) -> Result<Option<i64>, Error> {
6b0c6492 447 let backup_group = self.backup_group(backup_group.clone());
db87d93e 448
41b373ec
DM
449 let base_path = self.base_path();
450 let mut group_path = base_path.clone();
1f6a45c9 451 group_path.push(backup_group.relative_group_path());
41b373ec
DM
452
453 if group_path.exists() {
454 backup_group.last_successful_backup(&base_path)
455 } else {
456 Ok(None)
457 }
458 }
459
54552dda
DM
460 /// Returns the backup owner.
461 ///
e6dc35ac 462 /// The backup owner is the entity who first created the backup group.
db87d93e 463 pub fn get_owner(&self, backup_group: &pbs_api_types::BackupGroup) -> Result<Authid, Error> {
54552dda 464 let mut full_path = self.base_path();
db87d93e 465 full_path.push(backup_group.to_string());
54552dda 466 full_path.push("owner");
25877d05 467 let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
dcf5a0f6 468 owner.trim_end().parse() // remove trailing newline
54552dda
DM
469 }
470
db87d93e
WB
471 pub fn owns_backup(
472 &self,
473 backup_group: &pbs_api_types::BackupGroup,
474 auth_id: &Authid,
475 ) -> Result<bool, Error> {
9751ef4b
DC
476 let owner = self.get_owner(backup_group)?;
477
8e0b852f 478 Ok(check_backup_owner(&owner, auth_id).is_ok())
9751ef4b
DC
479 }
480
54552dda 481 /// Set the backup owner.
e7cb4dc5
WB
482 pub fn set_owner(
483 &self,
db87d93e 484 backup_group: &pbs_api_types::BackupGroup,
e6dc35ac 485 auth_id: &Authid,
e7cb4dc5
WB
486 force: bool,
487 ) -> Result<(), Error> {
54552dda 488 let mut path = self.base_path();
db87d93e 489 path.push(backup_group.to_string());
54552dda
DM
490 path.push("owner");
491
492 let mut open_options = std::fs::OpenOptions::new();
493 open_options.write(true);
494 open_options.truncate(true);
495
496 if force {
497 open_options.create(true);
498 } else {
499 open_options.create_new(true);
500 }
501
42c2b5be
TL
502 let mut file = open_options
503 .open(&path)
54552dda
DM
504 .map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
505
e6dc35ac 506 writeln!(file, "{}", auth_id)
54552dda
DM
507 .map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
508
509 Ok(())
510 }
511
1fc82c41 512 /// Create (if it does not already exists) and lock a backup group
54552dda
DM
513 ///
514 /// And set the owner to 'userid'. If the group already exists, it returns the
515 /// current owner (instead of setting the owner).
1fc82c41 516 ///
1ffe0301 517 /// This also acquires an exclusive lock on the directory and returns the lock guard.
e7cb4dc5
WB
518 pub fn create_locked_backup_group(
519 &self,
db87d93e 520 backup_group: &pbs_api_types::BackupGroup,
e6dc35ac
FG
521 auth_id: &Authid,
522 ) -> Result<(Authid, DirLockGuard), Error> {
8731e40a 523 // create intermediate path first:
44288184 524 let mut full_path = self.base_path();
db87d93e 525 full_path.push(backup_group.ty.as_str());
8731e40a
WB
526 std::fs::create_dir_all(&full_path)?;
527
db87d93e 528 full_path.push(&backup_group.id);
54552dda
DM
529
530 // create the last component now
531 match std::fs::create_dir(&full_path) {
532 Ok(_) => {
42c2b5be
TL
533 let guard = lock_dir_noblock(
534 &full_path,
535 "backup group",
536 "another backup is already running",
537 )?;
e6dc35ac 538 self.set_owner(backup_group, auth_id, false)?;
54552dda 539 let owner = self.get_owner(backup_group)?; // just to be sure
1fc82c41 540 Ok((owner, guard))
54552dda
DM
541 }
542 Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
42c2b5be
TL
543 let guard = lock_dir_noblock(
544 &full_path,
545 "backup group",
546 "another backup is already running",
547 )?;
54552dda 548 let owner = self.get_owner(backup_group)?; // just to be sure
1fc82c41 549 Ok((owner, guard))
54552dda
DM
550 }
551 Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
552 }
553 }
554
555 /// Creates a new backup snapshot inside a BackupGroup
556 ///
557 /// The BackupGroup directory needs to exist.
42c2b5be
TL
558 pub fn create_locked_backup_dir(
559 &self,
db87d93e 560 backup_dir: &pbs_api_types::BackupDir,
42c2b5be 561 ) -> Result<(PathBuf, bool, DirLockGuard), Error> {
db87d93e 562 let relative_path = PathBuf::from(backup_dir.to_string());
b3483782
DM
563 let mut full_path = self.base_path();
564 full_path.push(&relative_path);
ff3d3100 565
42c2b5be
TL
566 let lock = || {
567 lock_dir_noblock(
568 &full_path,
569 "snapshot",
570 "internal error - tried creating snapshot that's already in use",
571 )
572 };
f23f7543 573
8731e40a 574 match std::fs::create_dir(&full_path) {
f23f7543 575 Ok(_) => Ok((relative_path, true, lock()?)),
42c2b5be
TL
576 Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
577 Ok((relative_path, false, lock()?))
578 }
579 Err(e) => Err(e.into()),
8731e40a 580 }
ff3d3100
DM
581 }
582
7b125de3
TL
583 /// Get a streaming iter over top-level backup groups of a datatstore
584 ///
585 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
586 /// parsing errors.
587 pub fn iter_backup_groups(&self) -> Result<ListGroups, Error> {
588 ListGroups::new(self.base_path())
589 }
590
591 /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
592 ///
593 /// The iterated item's result is already unwrapped, if it contained an error it will be
594 /// logged. Can be useful in iterator chain commands
595 pub fn iter_backup_groups_ok(&self) -> Result<impl Iterator<Item = BackupGroup> + '_, Error> {
596 Ok(
597 ListGroups::new(self.base_path())?.filter_map(move |group| match group {
598 Ok(group) => Some(group),
599 Err(err) => {
600 log::error!("list groups error on datastore {} - {}", self.name(), err);
601 None
602 }
603 }),
604 )
605 }
606
c90dbb5c 607 /// Get a in-memory vector for all top-level backup groups of a datatstore
7b125de3
TL
608 ///
609 /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
c90dbb5c 610 pub fn list_backup_groups(&self) -> Result<Vec<BackupGroup>, Error> {
de015ce7 611 ListGroups::new(self.base_path())?.collect()
c90dbb5c
TL
612 }
613
3d5c11e5 614 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 615 let base = self.base_path();
3d5c11e5
DM
616
617 let mut list = vec![];
618
95cea65b
DM
619 use walkdir::WalkDir;
620
84466003 621 let walker = WalkDir::new(&base).into_iter();
95cea65b
DM
622
623 // make sure we skip .chunks (and other hidden files to keep it simple)
624 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
42c2b5be
TL
625 entry
626 .file_name()
95cea65b 627 .to_str()
d8d8af98 628 .map(|s| s.starts_with('.'))
95cea65b
DM
629 .unwrap_or(false)
630 }
c3b090ac
TL
631 let handle_entry_err = |err: walkdir::Error| {
632 if let Some(inner) = err.io_error() {
d08cff51
FG
633 if let Some(path) = err.path() {
634 if inner.kind() == io::ErrorKind::PermissionDenied {
c3b090ac
TL
635 // only allow to skip ext4 fsck directory, avoid GC if, for example,
636 // a user got file permissions wrong on datastore rsync to new server
637 if err.depth() > 1 || !path.ends_with("lost+found") {
d08cff51 638 bail!("cannot continue garbage-collection safely, permission denied on: {:?}", path)
c3b090ac 639 }
d08cff51 640 } else {
42c2b5be
TL
641 bail!(
642 "unexpected error on datastore traversal: {} - {:?}",
643 inner,
644 path
645 )
d08cff51
FG
646 }
647 } else {
648 bail!("unexpected error on datastore traversal: {}", inner)
c3b090ac
TL
649 }
650 }
651 Ok(())
652 };
95cea65b 653 for entry in walker.filter_entry(|e| !is_hidden(e)) {
c3b090ac
TL
654 let path = match entry {
655 Ok(entry) => entry.into_path(),
656 Err(err) => {
657 handle_entry_err(err)?;
42c2b5be
TL
658 continue;
659 }
c3b090ac 660 };
1e8da0a7 661 if let Ok(archive_type) = archive_type(&path) {
42c2b5be
TL
662 if archive_type == ArchiveType::FixedIndex
663 || archive_type == ArchiveType::DynamicIndex
664 {
95cea65b 665 list.push(path);
3d5c11e5
DM
666 }
667 }
668 }
669
670 Ok(list)
671 }
672
a660978c
DM
673 // mark chunks used by ``index`` as used
674 fn index_mark_used_chunks<I: IndexFile>(
675 &self,
676 index: I,
677 file_name: &Path, // only used for error reporting
678 status: &mut GarbageCollectionStatus,
c8449217 679 worker: &dyn WorkerTaskContext,
a660978c 680 ) -> Result<(), Error> {
a660978c
DM
681 status.index_file_count += 1;
682 status.index_data_bytes += index.index_bytes();
683
684 for pos in 0..index.index_count() {
f6b1d1cc 685 worker.check_abort()?;
0fd55b08 686 worker.fail_on_shutdown()?;
a660978c 687 let digest = index.index_digest(pos).unwrap();
4bc84a65 688 if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
c23192d3 689 task_warn!(
f6b1d1cc 690 worker,
d1d74c43 691 "warning: unable to access non-existent chunk {}, required by {:?}",
25877d05 692 hex::encode(digest),
f6b1d1cc 693 file_name,
f6b1d1cc 694 );
fd192564
SR
695
696 // touch any corresponding .bad files to keep them around, meaning if a chunk is
697 // rewritten correctly they will be removed automatically, as well as if no index
698 // file requires the chunk anymore (won't get to this loop then)
699 for i in 0..=9 {
700 let bad_ext = format!("{}.bad", i);
701 let mut bad_path = PathBuf::new();
702 bad_path.push(self.chunk_path(digest).0);
703 bad_path.set_extension(bad_ext);
4bc84a65 704 self.inner.chunk_store.cond_touch_path(&bad_path, false)?;
fd192564 705 }
a660978c
DM
706 }
707 }
708 Ok(())
709 }
710
f6b1d1cc
WB
711 fn mark_used_chunks(
712 &self,
713 status: &mut GarbageCollectionStatus,
c8449217 714 worker: &dyn WorkerTaskContext,
f6b1d1cc 715 ) -> Result<(), Error> {
3d5c11e5 716 let image_list = self.list_images()?;
8317873c
DM
717 let image_count = image_list.len();
718
8317873c
DM
719 let mut last_percentage: usize = 0;
720
cb4b721c
FG
721 let mut strange_paths_count: u64 = 0;
722
ea368a06 723 for (i, img) in image_list.into_iter().enumerate() {
f6b1d1cc 724 worker.check_abort()?;
0fd55b08 725 worker.fail_on_shutdown()?;
92da93b2 726
cb4b721c
FG
727 if let Some(backup_dir_path) = img.parent() {
728 let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
729 if let Some(backup_dir_str) = backup_dir_path.to_str() {
db87d93e 730 if pbs_api_types::BackupDir::from_str(backup_dir_str).is_err() {
cb4b721c
FG
731 strange_paths_count += 1;
732 }
733 }
734 }
735
efcac39d 736 match std::fs::File::open(&img) {
e0762002 737 Ok(file) => {
788d82d9 738 if let Ok(archive_type) = archive_type(&img) {
e0762002 739 if archive_type == ArchiveType::FixedIndex {
788d82d9 740 let index = FixedIndexReader::new(file).map_err(|e| {
efcac39d 741 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 742 })?;
788d82d9 743 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002 744 } else if archive_type == ArchiveType::DynamicIndex {
788d82d9 745 let index = DynamicIndexReader::new(file).map_err(|e| {
efcac39d 746 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 747 })?;
788d82d9 748 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002
DM
749 }
750 }
751 }
788d82d9 752 Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
efcac39d 753 Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
77703d95 754 }
8317873c 755
ea368a06 756 let percentage = (i + 1) * 100 / image_count;
8317873c 757 if percentage > last_percentage {
c23192d3 758 task_log!(
f6b1d1cc 759 worker,
7956877f 760 "marked {}% ({} of {} index files)",
f6b1d1cc 761 percentage,
ea368a06 762 i + 1,
f6b1d1cc
WB
763 image_count,
764 );
8317873c
DM
765 last_percentage = percentage;
766 }
3d5c11e5
DM
767 }
768
cb4b721c 769 if strange_paths_count > 0 {
c23192d3 770 task_log!(
cb4b721c
FG
771 worker,
772 "found (and marked) {} index files outside of expected directory scheme",
773 strange_paths_count,
774 );
775 }
776
3d5c11e5 777 Ok(())
f2b99c34
DM
778 }
779
780 pub fn last_gc_status(&self) -> GarbageCollectionStatus {
4bc84a65 781 self.inner.last_gc_status.lock().unwrap().clone()
f2b99c34 782 }
3d5c11e5 783
8545480a 784 pub fn garbage_collection_running(&self) -> bool {
4bc84a65 785 !matches!(self.inner.gc_mutex.try_lock(), Ok(_))
8545480a
DM
786 }
787
42c2b5be
TL
788 pub fn garbage_collection(
789 &self,
790 worker: &dyn WorkerTaskContext,
791 upid: &UPID,
792 ) -> Result<(), Error> {
4bc84a65 793 if let Ok(ref mut _mutex) = self.inner.gc_mutex.try_lock() {
c6772c92
TL
794 // avoids that we run GC if an old daemon process has still a
795 // running backup writer, which is not save as we have no "oldest
796 // writer" information and thus no safe atime cutoff
42c2b5be 797 let _exclusive_lock = self.inner.chunk_store.try_exclusive_lock()?;
43b13033 798
6ef1b649 799 let phase1_start_time = proxmox_time::epoch_i64();
42c2b5be
TL
800 let oldest_writer = self
801 .inner
802 .chunk_store
803 .oldest_writer()
804 .unwrap_or(phase1_start_time);
11861a48 805
64e53b28 806 let mut gc_status = GarbageCollectionStatus::default();
f6b1d1cc
WB
807 gc_status.upid = Some(upid.to_string());
808
c23192d3 809 task_log!(worker, "Start GC phase1 (mark used chunks)");
f6b1d1cc
WB
810
811 self.mark_used_chunks(&mut gc_status, worker)?;
812
c23192d3 813 task_log!(worker, "Start GC phase2 (sweep unused chunks)");
4bc84a65 814 self.inner.chunk_store.sweep_unused_chunks(
f6b1d1cc
WB
815 oldest_writer,
816 phase1_start_time,
817 &mut gc_status,
818 worker,
819 )?;
820
c23192d3 821 task_log!(
f6b1d1cc
WB
822 worker,
823 "Removed garbage: {}",
824 HumanByte::from(gc_status.removed_bytes),
825 );
c23192d3 826 task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
cf459b19 827 if gc_status.pending_bytes > 0 {
c23192d3 828 task_log!(
f6b1d1cc
WB
829 worker,
830 "Pending removals: {} (in {} chunks)",
831 HumanByte::from(gc_status.pending_bytes),
832 gc_status.pending_chunks,
833 );
cf459b19 834 }
a9767cf7 835 if gc_status.removed_bad > 0 {
c23192d3 836 task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
a9767cf7 837 }
cf459b19 838
b4fb2623 839 if gc_status.still_bad > 0 {
c23192d3 840 task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
b4fb2623
DM
841 }
842
c23192d3 843 task_log!(
f6b1d1cc
WB
844 worker,
845 "Original data usage: {}",
846 HumanByte::from(gc_status.index_data_bytes),
847 );
868c5852
DM
848
849 if gc_status.index_data_bytes > 0 {
42c2b5be
TL
850 let comp_per =
851 (gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
c23192d3 852 task_log!(
f6b1d1cc
WB
853 worker,
854 "On-Disk usage: {} ({:.2}%)",
855 HumanByte::from(gc_status.disk_bytes),
856 comp_per,
857 );
868c5852
DM
858 }
859
c23192d3 860 task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
868c5852 861
d6373f35 862 let deduplication_factor = if gc_status.disk_bytes > 0 {
42c2b5be 863 (gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
d6373f35
DM
864 } else {
865 1.0
866 };
867
c23192d3 868 task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
d6373f35 869
868c5852 870 if gc_status.disk_chunks > 0 {
42c2b5be 871 let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
c23192d3 872 task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
868c5852 873 }
64e53b28 874
b683fd58
DC
875 if let Ok(serialized) = serde_json::to_string(&gc_status) {
876 let mut path = self.base_path();
877 path.push(".gc-status");
878
21211748 879 let backup_user = pbs_config::backup_user()?;
b683fd58
DC
880 let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
881 // set the correct owner/group/permissions while saving file
882 // owner(rw) = backup, group(r)= backup
883 let options = CreateOptions::new()
884 .perm(mode)
885 .owner(backup_user.uid)
886 .group(backup_user.gid);
887
888 // ignore errors
e0a19d33 889 let _ = replace_file(path, serialized.as_bytes(), options, false);
b683fd58
DC
890 }
891
4bc84a65 892 *self.inner.last_gc_status.lock().unwrap() = gc_status;
64e53b28 893 } else {
d4b59ae0 894 bail!("Start GC failed - (already running/locked)");
64e53b28 895 }
3d5c11e5
DM
896
897 Ok(())
898 }
3b7ade9e 899
ccc3896f 900 pub fn try_shared_chunk_store_lock(&self) -> Result<ProcessLockSharedGuard, Error> {
4bc84a65 901 self.inner.chunk_store.try_shared_lock()
1cf5178a
DM
902 }
903
42c2b5be 904 pub fn chunk_path(&self, digest: &[u8; 32]) -> (PathBuf, String) {
4bc84a65 905 self.inner.chunk_store.chunk_path(digest)
d48a9955
DM
906 }
907
42c2b5be 908 pub fn cond_touch_chunk(
3b7ade9e 909 &self,
4ee8f53d 910 digest: &[u8; 32],
42c2b5be
TL
911 fail_if_not_exist: bool,
912 ) -> Result<bool, Error> {
913 self.inner
914 .chunk_store
915 .cond_touch_chunk(digest, fail_if_not_exist)
916 }
917
918 pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
4bc84a65 919 self.inner.chunk_store.insert_chunk(chunk, digest)
3b7ade9e 920 }
60f9a6ea 921
39f18b30 922 pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
60f9a6ea
DM
923 let mut path = self.base_path();
924 path.push(backup_dir.relative_path());
925 path.push(filename);
926
6ef1b649 927 proxmox_lang::try_block!({
39f18b30
DM
928 let mut file = std::fs::File::open(&path)?;
929 DataBlob::load_from_reader(&mut file)
42c2b5be
TL
930 })
931 .map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
39f18b30 932 }
e4439025 933
7f394c80 934 pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
4bc84a65 935 let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest);
7f394c80
DC
936 std::fs::metadata(chunk_path).map_err(Error::from)
937 }
938
39f18b30 939 pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
4bc84a65 940 let (chunk_path, digest_str) = self.inner.chunk_store.chunk_path(digest);
39f18b30 941
6ef1b649 942 proxmox_lang::try_block!({
39f18b30
DM
943 let mut file = std::fs::File::open(&chunk_path)?;
944 DataBlob::load_from_reader(&mut file)
42c2b5be
TL
945 })
946 .map_err(|err| {
947 format_err!(
948 "store '{}', unable to load chunk '{}' - {}",
949 self.name(),
950 digest_str,
951 err,
952 )
953 })
1a374fcf
SR
954 }
955
179145dc
DC
956 /// Returns the filename to lock a manifest
957 ///
958 /// Also creates the basedir. The lockfile is located in
959 /// '/run/proxmox-backup/locks/{datastore}/{type}/{id}/{timestamp}.index.json.lck'
42c2b5be 960 fn manifest_lock_path(&self, backup_dir: &BackupDir) -> Result<String, Error> {
179145dc
DC
961 let mut path = format!(
962 "/run/proxmox-backup/locks/{}/{}/{}",
963 self.name(),
db87d93e
WB
964 backup_dir.backup_type(),
965 backup_dir.backup_id(),
179145dc
DC
966 );
967 std::fs::create_dir_all(&path)?;
968 use std::fmt::Write;
42c2b5be
TL
969 write!(
970 path,
971 "/{}{}",
972 backup_dir.backup_time_string(),
973 &MANIFEST_LOCK_NAME
974 )?;
179145dc
DC
975
976 Ok(path)
977 }
978
42c2b5be 979 fn lock_manifest(&self, backup_dir: &BackupDir) -> Result<BackupLockGuard, Error> {
179145dc 980 let path = self.manifest_lock_path(backup_dir)?;
1a374fcf
SR
981
982 // update_manifest should never take a long time, so if someone else has
983 // the lock we can simply block a bit and should get it soon
7526d864 984 open_backup_lockfile(&path, Some(Duration::from_secs(5)), true)
42c2b5be 985 .map_err(|err| format_err!("unable to acquire manifest lock {:?} - {}", &path, err))
1a374fcf 986 }
e4439025 987
1a374fcf 988 /// Load the manifest without a lock. Must not be written back.
42c2b5be 989 pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
39f18b30
DM
990 let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
991 let raw_size = blob.raw_size();
60f9a6ea 992 let manifest = BackupManifest::try_from(blob)?;
ff86ef00 993 Ok((manifest, raw_size))
60f9a6ea 994 }
e4439025 995
1a374fcf
SR
996 /// Update the manifest of the specified snapshot. Never write a manifest directly,
997 /// only use this method - anything else may break locking guarantees.
998 pub fn update_manifest(
e4439025
DM
999 &self,
1000 backup_dir: &BackupDir,
1a374fcf 1001 update_fn: impl FnOnce(&mut BackupManifest),
e4439025 1002 ) -> Result<(), Error> {
1a374fcf 1003 let _guard = self.lock_manifest(backup_dir)?;
9a37bd6c 1004 let (mut manifest, _) = self.load_manifest(backup_dir)?;
1a374fcf
SR
1005
1006 update_fn(&mut manifest);
1007
883aa6d5 1008 let manifest = serde_json::to_value(manifest)?;
e4439025
DM
1009 let manifest = serde_json::to_string_pretty(&manifest)?;
1010 let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
1011 let raw_data = blob.raw_data();
1012
1013 let mut path = self.base_path();
1014 path.push(backup_dir.relative_path());
1015 path.push(MANIFEST_BLOB_NAME);
1016
1a374fcf 1017 // atomic replace invalidates flock - no other writes past this point!
e0a19d33 1018 replace_file(&path, raw_data, CreateOptions::new(), false)?;
e4439025
DM
1019
1020 Ok(())
1021 }
0698f78d 1022
8292d3d2 1023 /// Updates the protection status of the specified snapshot.
42c2b5be 1024 pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> {
db87d93e 1025 let full_path = backup_dir.full_path(self.base_path());
8292d3d2
DC
1026
1027 let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
1028
1029 let protected_path = backup_dir.protected_file(self.base_path());
1030 if protection {
1031 std::fs::File::create(protected_path)
1032 .map_err(|err| format_err!("could not create protection file: {}", err))?;
1033 } else if let Err(err) = std::fs::remove_file(protected_path) {
1034 // ignore error for non-existing file
1035 if err.kind() != std::io::ErrorKind::NotFound {
1036 bail!("could not remove protection file: {}", err);
1037 }
1038 }
1039
1040 Ok(())
1041 }
1042
0698f78d 1043 pub fn verify_new(&self) -> bool {
4bc84a65 1044 self.inner.verify_new
0698f78d 1045 }
4921a411
DC
1046
1047 /// returns a list of chunks sorted by their inode number on disk
1048 /// chunks that could not be stat'ed are at the end of the list
1049 pub fn get_chunks_in_order<F, A>(
1050 &self,
1051 index: &Box<dyn IndexFile + Send>,
1052 skip_chunk: F,
1053 check_abort: A,
1054 ) -> Result<Vec<(usize, u64)>, Error>
1055 where
1056 F: Fn(&[u8; 32]) -> bool,
1057 A: Fn(usize) -> Result<(), Error>,
1058 {
1059 let index_count = index.index_count();
1060 let mut chunk_list = Vec::with_capacity(index_count);
1061 use std::os::unix::fs::MetadataExt;
1062 for pos in 0..index_count {
1063 check_abort(pos)?;
1064
1065 let info = index.chunk_info(pos).unwrap();
1066
1067 if skip_chunk(&info.digest) {
1068 continue;
1069 }
1070
4bc84a65 1071 let ino = match self.inner.chunk_order {
fef61684
DC
1072 ChunkOrder::Inode => {
1073 match self.stat_chunk(&info.digest) {
1074 Err(_) => u64::MAX, // could not stat, move to end of list
1075 Ok(metadata) => metadata.ino(),
1076 }
1077 }
1078 ChunkOrder::None => 0,
4921a411
DC
1079 };
1080
1081 chunk_list.push((pos, ino));
1082 }
1083
4bc84a65 1084 match self.inner.chunk_order {
fef61684
DC
1085 // sorting by inode improves data locality, which makes it lots faster on spinners
1086 ChunkOrder::Inode => {
1087 chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(ino_b))
1088 }
1089 ChunkOrder::None => {}
1090 }
4921a411
DC
1091
1092 Ok(chunk_list)
1093 }
db87d93e 1094
6b0c6492
WB
1095 /// Open a backup group from this datastore.
1096 pub fn backup_group(&self, group: pbs_api_types::BackupGroup) -> BackupGroup {
1097 BackupGroup::new(group)
db87d93e
WB
1098 }
1099
6b0c6492
WB
1100 /// Open a backup group from this datastore.
1101 pub fn backup_group_from_parts<T>(&self, ty: BackupType, id: T) -> BackupGroup
1102 where
1103 T: Into<String>,
1104 {
1105 self.backup_group((ty, id.into()).into())
1106 }
1107
1108 /// Open a backup group from this datastore by backup group path such as `vm/100`.
1109 ///
1110 /// Convenience method for `store.backup_group(path.parse()?)`
1111 pub fn backup_group_from_path(&self, path: &str) -> Result<BackupGroup, Error> {
1112 Ok(self.backup_group(path.parse()?))
db87d93e
WB
1113 }
1114
6b0c6492
WB
1115 /// Open a snapshot (backup directory) from this datastore.
1116 pub fn backup_dir(&self, dir: pbs_api_types::BackupDir) -> Result<BackupDir, Error> {
1117 BackupDir::with_group(self.backup_group(dir.group), dir.time)
1118 }
1119
1120 /// Open a snapshot (backup directory) from this datastore.
db87d93e
WB
1121 pub fn backup_dir_from_parts<T>(
1122 &self,
1123 ty: BackupType,
1124 id: T,
1125 time: i64,
1126 ) -> Result<BackupDir, Error>
1127 where
1128 T: Into<String>,
1129 {
6b0c6492 1130 self.backup_dir((ty, id.into(), time).into())
db87d93e
WB
1131 }
1132
6b0c6492 1133 /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
db87d93e
WB
1134 pub fn backup_dir_with_rfc3339<T: Into<String>>(
1135 &self,
1136 group: BackupGroup,
1137 time_string: T,
1138 ) -> Result<BackupDir, Error> {
1139 BackupDir::with_rfc3339(group, time_string.into())
1140 }
1141
6b0c6492 1142 /// Open a snapshot (backup directory) from this datastore by a snapshot path.
db87d93e 1143 pub fn backup_dir_from_path(&self, path: &str) -> Result<BackupDir, Error> {
6b0c6492 1144 self.backup_dir(path.parse()?)
db87d93e 1145 }
529de6c7 1146}
de015ce7 1147
33eb23d5
TL
1148/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
1149pub struct ListSnapshots {
1150 group: BackupGroup,
1151 fd: proxmox_sys::fs::ReadDir,
1152}
1153
1154impl ListSnapshots {
1155 pub fn new(group: BackupGroup, group_path: PathBuf) -> Result<Self, Error> {
1156 Ok(ListSnapshots {
1157 fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &group_path)?,
1158 group,
1159 })
1160 }
1161}
1162
1163impl Iterator for ListSnapshots {
1164 type Item = Result<BackupDir, Error>;
1165
1166 fn next(&mut self) -> Option<Self::Item> {
1167 loop {
1168 let item = self.fd.next()?;
1169 match item {
1170 Ok(ref entry) => {
1171 if let Ok(name) = entry.file_name().to_str() {
1172 match entry.file_type() {
1173 Some(nix::dir::Type::Directory) => {} // OK
1174 _ => continue,
1175 }
1176 if BACKUP_DATE_REGEX.is_match(name) {
1177 let backup_time = match proxmox_time::parse_rfc3339(&name) {
1178 Ok(time) => time,
1179 Err(err) => return Some(Err(err)),
1180 };
1181
1182 return Some(BackupDir::with_group(self.group.clone(), backup_time));
1183 }
1184 }
1185 continue; // file did not match regex or isn't valid utf-8
1186 }
1187 Err(err) => return Some(Err(err)),
1188 }
1189 }
1190 }
1191}
1192
de015ce7
TL
1193/// A iterator for a (single) level of Backup Groups
1194pub struct ListGroups {
1195 type_fd: proxmox_sys::fs::ReadDir,
988d575d 1196 id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
de015ce7
TL
1197}
1198
1199impl ListGroups {
1200 pub fn new(base_path: PathBuf) -> Result<Self, Error> {
1201 Ok(ListGroups {
1202 type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?,
1203 id_state: None,
1204 })
1205 }
1206}
1207
1208impl Iterator for ListGroups {
1209 type Item = Result<BackupGroup, Error>;
1210
1211 fn next(&mut self) -> Option<Self::Item> {
1212 loop {
988d575d 1213 if let Some((group_type, ref mut id_fd)) = self.id_state {
de015ce7
TL
1214 let item = match id_fd.next() {
1215 Some(item) => item,
1216 None => {
1217 self.id_state = None;
1218 continue; // exhausted all IDs for the current group type, try others
1219 }
1220 };
1221 match item {
1222 Ok(ref entry) => {
1223 if let Ok(name) = entry.file_name().to_str() {
1224 match entry.file_type() {
1225 Some(nix::dir::Type::Directory) => {} // OK
1226 _ => continue,
1227 }
1228 if BACKUP_ID_REGEX.is_match(name) {
6b0c6492
WB
1229 return Some(Ok(BackupGroup::new(
1230 (group_type, name.to_owned()).into(),
1231 )));
de015ce7
TL
1232 }
1233 }
1234 continue; // file did not match regex or isn't valid utf-8
1235 }
1236 Err(err) => return Some(Err(err)),
1237 }
1238 } else {
1239 let item = self.type_fd.next()?;
1240 match item {
1241 Ok(ref entry) => {
1242 if let Ok(name) = entry.file_name().to_str() {
1243 match entry.file_type() {
1244 Some(nix::dir::Type::Directory) => {} // OK
1245 _ => continue,
1246 }
988d575d 1247 if let Ok(group_type) = BackupType::from_str(name) {
de015ce7
TL
1248 // found a backup group type, descend into it to scan all IDs in it
1249 // by switching to the id-state branch
1250 let base_fd = entry.parent_fd();
1251 let id_dirfd = match proxmox_sys::fs::read_subdir(base_fd, name) {
1252 Ok(dirfd) => dirfd,
1253 Err(err) => return Some(Err(err.into())),
1254 };
988d575d 1255 self.id_state = Some((group_type, id_dirfd));
de015ce7
TL
1256 }
1257 }
1258 continue; // file did not match regex or isn't valid utf-8
1259 }
1260 Err(err) => return Some(Err(err)),
1261 }
1262 }
1263 }
1264 }
1265}