]> git.proxmox.com Git - proxmox-backup.git/blame - pbs-datastore/src/datastore.rs
datastore: factor type out of ListGroups into ListGroupsType
[proxmox-backup.git] / pbs-datastore / src / datastore.rs
CommitLineData
42c2b5be 1use std::collections::{HashMap, HashSet};
54552dda 2use std::io::{self, Write};
4c7cc5b3 3use std::os::unix::io::AsRawFd;
367f002e 4use std::path::{Path, PathBuf};
cb4b721c 5use std::str::FromStr;
42c2b5be 6use std::sync::{Arc, Mutex};
367f002e 7
f7d4e4b5 8use anyhow::{bail, format_err, Error};
2c32fdde 9use lazy_static::lazy_static;
4c7cc5b3 10use nix::unistd::{unlinkat, UnlinkatFlags};
e4439025 11
fef61684
DC
12use proxmox_schema::ApiType;
13
42c2b5be
TL
14use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
15use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
d5790a9f 16use proxmox_sys::process_locker::ProcessLockSharedGuard;
25877d05 17use proxmox_sys::WorkerTaskContext;
d5790a9f 18use proxmox_sys::{task_log, task_warn};
529de6c7 19
fef61684 20use pbs_api_types::{
8c74349b 21 Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning,
118e9849 22 GarbageCollectionStatus, HumanByte, Operation, UPID,
fef61684 23};
529de6c7 24
42c2b5be 25use crate::backup_info::{BackupDir, BackupGroup};
6d5d305d
DM
26use crate::chunk_store::ChunkStore;
27use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
28use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
5444b914 29use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive};
6d5d305d 30use crate::index::IndexFile;
9ccf933b 31use crate::manifest::{archive_type, ArchiveType};
4bc84a65 32use crate::task_tracking::update_active_operations;
42c2b5be 33use crate::DataBlob;
6d5d305d 34
367f002e 35lazy_static! {
42c2b5be
TL
36 static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStoreImpl>>> =
37 Mutex::new(HashMap::new());
b3483782 38}
ff3d3100 39
9751ef4b
DC
40/// checks if auth_id is owner, or, if owner is a token, if
41/// auth_id is the user of the token
42c2b5be
TL
42pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> {
43 let correct_owner =
44 owner == auth_id || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
9751ef4b
DC
45 if !correct_owner {
46 bail!("backup owner check failed ({} != {})", auth_id, owner);
47 }
48 Ok(())
49}
50
e5064ba6
DM
51/// Datastore Management
52///
53/// A Datastore can store severals backups, and provides the
54/// management interface for backup.
4bc84a65 55pub struct DataStoreImpl {
1629d2ad 56 chunk_store: Arc<ChunkStore>,
81b2a872 57 gc_mutex: Mutex<()>,
f2b99c34 58 last_gc_status: Mutex<GarbageCollectionStatus>,
0698f78d 59 verify_new: bool,
fef61684 60 chunk_order: ChunkOrder,
51d900d1 61 last_digest: Option<[u8; 32]>,
529de6c7
DM
62}
63
6da20161
WB
64impl DataStoreImpl {
65 // This one just panics on everything
66 #[doc(hidden)]
615a50c1 67 pub(crate) unsafe fn new_test() -> Arc<Self> {
6da20161
WB
68 Arc::new(Self {
69 chunk_store: Arc::new(unsafe { ChunkStore::panic_store() }),
70 gc_mutex: Mutex::new(()),
71 last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
72 verify_new: false,
73 chunk_order: ChunkOrder::None,
51d900d1 74 last_digest: None,
6da20161
WB
75 })
76 }
77}
78
4bc84a65
HL
79pub struct DataStore {
80 inner: Arc<DataStoreImpl>,
81 operation: Option<Operation>,
82}
83
84impl Clone for DataStore {
85 fn clone(&self) -> Self {
86 let mut new_operation = self.operation;
87 if let Some(operation) = self.operation {
88 if let Err(e) = update_active_operations(self.name(), operation, 1) {
89 log::error!("could not update active operations - {}", e);
90 new_operation = None;
91 }
92 }
93
94 DataStore {
95 inner: self.inner.clone(),
96 operation: new_operation,
97 }
98 }
99}
100
101impl Drop for DataStore {
102 fn drop(&mut self) {
103 if let Some(operation) = self.operation {
104 if let Err(e) = update_active_operations(self.name(), operation, -1) {
105 log::error!("could not update active operations - {}", e);
106 }
107 }
108 }
109}
110
529de6c7 111impl DataStore {
6da20161
WB
112 // This one just panics on everything
113 #[doc(hidden)]
615a50c1 114 pub(crate) unsafe fn new_test() -> Arc<Self> {
6da20161
WB
115 Arc::new(Self {
116 inner: unsafe { DataStoreImpl::new_test() },
117 operation: None,
118 })
119 }
120
e9d2fc93
HL
121 pub fn lookup_datastore(
122 name: &str,
123 operation: Option<Operation>,
124 ) -> Result<Arc<DataStore>, Error> {
51d900d1
TL
125 // we could use the ConfigVersionCache's generation for staleness detection, but we load
126 // the config anyway -> just use digest, additional benefit: manual changes get detected
127 let (config, digest) = pbs_config::datastore::config()?;
e9d2fc93 128 let config: DataStoreConfig = config.lookup("datastore", name)?;
e9d2fc93
HL
129
130 if let Some(maintenance_mode) = config.get_maintenance_mode() {
131 if let Err(error) = maintenance_mode.check(operation) {
0408f60b 132 bail!("datastore '{name}' is in {error}");
e9d2fc93
HL
133 }
134 }
135
4bc84a65
HL
136 if let Some(operation) = operation {
137 update_active_operations(name, operation, 1)?;
138 }
139
33a1ef7a
TL
140 let mut datastore_cache = DATASTORE_MAP.lock().unwrap();
141 let entry = datastore_cache.get(name);
2c32fdde 142
c7f7236b 143 // reuse chunk store so that we keep using the same process locker instance!
0bd9c870 144 let chunk_store = if let Some(datastore) = &entry {
51d900d1
TL
145 let last_digest = datastore.last_digest.as_ref();
146 if let Some(true) = last_digest.map(|last_digest| last_digest == &digest) {
4bc84a65
HL
147 return Ok(Arc::new(Self {
148 inner: Arc::clone(datastore),
149 operation,
150 }));
2c32fdde 151 }
0bd9c870
DC
152 Arc::clone(&datastore.chunk_store)
153 } else {
154 Arc::new(ChunkStore::open(name, &config.path)?)
155 };
2c32fdde 156
51d900d1 157 let datastore = DataStore::with_store_and_config(chunk_store, config, Some(digest))?;
f0a61124
DM
158
159 let datastore = Arc::new(datastore);
33a1ef7a 160 datastore_cache.insert(name.to_string(), datastore.clone());
2c32fdde 161
4bc84a65
HL
162 Ok(Arc::new(Self {
163 inner: datastore,
164 operation,
165 }))
2c32fdde
DM
166 }
167
062cf75c 168 /// removes all datastores that are not configured anymore
42c2b5be 169 pub fn remove_unused_datastores() -> Result<(), Error> {
e7d4be9d 170 let (config, _digest) = pbs_config::datastore::config()?;
062cf75c
DC
171
172 let mut map = DATASTORE_MAP.lock().unwrap();
173 // removes all elements that are not in the config
42c2b5be 174 map.retain(|key, _| config.sections.contains_key(key));
062cf75c
DC
175 Ok(())
176 }
177
6da20161 178 /// Open a raw database given a name and a path.
c66fa32c
TL
179 ///
180 /// # Safety
181 /// See the safety section in `open_from_config`
519ca9d0 182 pub unsafe fn open_path(
6da20161
WB
183 name: &str,
184 path: impl AsRef<Path>,
185 operation: Option<Operation>,
186 ) -> Result<Arc<Self>, Error> {
187 let path = path
188 .as_ref()
189 .to_str()
190 .ok_or_else(|| format_err!("non-utf8 paths not supported"))?
191 .to_owned();
192 unsafe { Self::open_from_config(DataStoreConfig::new(name.to_owned(), path), operation) }
193 }
194
195 /// Open a datastore given a raw configuration.
c66fa32c
TL
196 ///
197 /// # Safety
74cad4a8 198 /// There's no memory safety implication, but as this is opening a new ChunkStore it will
c66fa32c
TL
199 /// create a new process locker instance, potentially on the same path as existing safely
200 /// created ones. This is dangerous as dropping the reference of this and thus the underlying
201 /// chunkstore's process locker will close all locks from our process on the config.path,
202 /// breaking guarantees we need to uphold for safe long backup + GC interaction on newer/older
203 /// process instances (from package update).
519ca9d0 204 unsafe fn open_from_config(
6da20161
WB
205 config: DataStoreConfig,
206 operation: Option<Operation>,
207 ) -> Result<Arc<Self>, Error> {
208 let name = config.name.clone();
209
210 let chunk_store = ChunkStore::open(&name, &config.path)?;
0bd9c870
DC
211 let inner = Arc::new(Self::with_store_and_config(
212 Arc::new(chunk_store),
213 config,
51d900d1 214 None,
0bd9c870 215 )?);
6da20161
WB
216
217 if let Some(operation) = operation {
218 update_active_operations(&name, operation, 1)?;
219 }
220
221 Ok(Arc::new(Self { inner, operation }))
222 }
223
224 fn with_store_and_config(
0bd9c870 225 chunk_store: Arc<ChunkStore>,
118deb4d 226 config: DataStoreConfig,
51d900d1 227 last_digest: Option<[u8; 32]>,
4bc84a65 228 ) -> Result<DataStoreImpl, Error> {
b683fd58
DC
229 let mut gc_status_path = chunk_store.base_path();
230 gc_status_path.push(".gc-status");
231
232 let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
233 match serde_json::from_str(&state) {
234 Ok(state) => state,
235 Err(err) => {
dce4b540 236 log::error!("error reading gc-status: {}", err);
b683fd58
DC
237 GarbageCollectionStatus::default()
238 }
239 }
240 } else {
241 GarbageCollectionStatus::default()
242 };
f2b99c34 243
fef61684 244 let tuning: DatastoreTuning = serde_json::from_value(
42c2b5be
TL
245 DatastoreTuning::API_SCHEMA
246 .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
fef61684
DC
247 )?;
248 let chunk_order = tuning.chunk_order.unwrap_or(ChunkOrder::Inode);
249
4bc84a65 250 Ok(DataStoreImpl {
0bd9c870 251 chunk_store,
81b2a872 252 gc_mutex: Mutex::new(()),
f2b99c34 253 last_gc_status: Mutex::new(gc_status),
0698f78d 254 verify_new: config.verify_new.unwrap_or(false),
fef61684 255 chunk_order,
51d900d1 256 last_digest,
529de6c7
DM
257 })
258 }
259
d59397e6
WB
260 pub fn get_chunk_iterator(
261 &self,
262 ) -> Result<
25877d05 263 impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)>,
42c2b5be 264 Error,
d59397e6 265 > {
4bc84a65 266 self.inner.chunk_store.get_chunk_iterator()
d59397e6
WB
267 }
268
42c2b5be
TL
269 pub fn create_fixed_writer<P: AsRef<Path>>(
270 &self,
271 filename: P,
272 size: usize,
273 chunk_size: usize,
274 ) -> Result<FixedIndexWriter, Error> {
275 let index = FixedIndexWriter::create(
276 self.inner.chunk_store.clone(),
277 filename.as_ref(),
278 size,
279 chunk_size,
280 )?;
529de6c7
DM
281
282 Ok(index)
283 }
284
42c2b5be
TL
285 pub fn open_fixed_reader<P: AsRef<Path>>(
286 &self,
287 filename: P,
288 ) -> Result<FixedIndexReader, Error> {
289 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
a7c72ad9
DM
290
291 let index = FixedIndexReader::open(&full_path)?;
529de6c7
DM
292
293 Ok(index)
294 }
3d5c11e5 295
93d5d779 296 pub fn create_dynamic_writer<P: AsRef<Path>>(
42c2b5be
TL
297 &self,
298 filename: P,
93d5d779 299 ) -> Result<DynamicIndexWriter, Error> {
42c2b5be 300 let index = DynamicIndexWriter::create(self.inner.chunk_store.clone(), filename.as_ref())?;
0433db19
DM
301
302 Ok(index)
303 }
ff3d3100 304
42c2b5be
TL
305 pub fn open_dynamic_reader<P: AsRef<Path>>(
306 &self,
307 filename: P,
308 ) -> Result<DynamicIndexReader, Error> {
309 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
d48a9955
DM
310
311 let index = DynamicIndexReader::open(&full_path)?;
77703d95
DM
312
313 Ok(index)
314 }
315
5de2bced
WB
316 pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
317 where
318 P: AsRef<Path>,
319 {
320 let filename = filename.as_ref();
42c2b5be
TL
321 let out: Box<dyn IndexFile + Send> = match archive_type(filename)? {
322 ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?),
323 ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?),
324 _ => bail!("cannot open index file of unknown type: {:?}", filename),
325 };
5de2bced
WB
326 Ok(out)
327 }
328
1369bcdb 329 /// Fast index verification - only check if chunks exists
28570d19
DM
330 pub fn fast_index_verification(
331 &self,
332 index: &dyn IndexFile,
42c2b5be 333 checked: &mut HashSet<[u8; 32]>,
28570d19 334 ) -> Result<(), Error> {
1369bcdb
DM
335 for pos in 0..index.index_count() {
336 let info = index.chunk_info(pos).unwrap();
28570d19
DM
337 if checked.contains(&info.digest) {
338 continue;
339 }
340
42c2b5be
TL
341 self.stat_chunk(&info.digest).map_err(|err| {
342 format_err!(
343 "fast_index_verification error, stat_chunk {} failed - {}",
344 hex::encode(&info.digest),
345 err,
346 )
347 })?;
28570d19
DM
348
349 checked.insert(info.digest);
1369bcdb
DM
350 }
351
352 Ok(())
353 }
354
60f9a6ea 355 pub fn name(&self) -> &str {
4bc84a65 356 self.inner.chunk_store.name()
60f9a6ea
DM
357 }
358
ff3d3100 359 pub fn base_path(&self) -> PathBuf {
4bc84a65 360 self.inner.chunk_store.base_path()
ff3d3100
DM
361 }
362
133d718f 363 /// Returns the absolute path for a backup namespace on this datastore
8c74349b
WB
364 pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
365 let mut path = self.base_path();
366 path.reserve(ns.path_len());
367 for part in ns.components() {
368 path.push("ns");
369 path.push(part);
370 }
371 path
372 }
373
5444b914
WB
374 /// Returns the absolute path for a backup_type
375 pub fn type_path(&self, ns: &BackupNamespace, backup_type: BackupType) -> PathBuf {
376 let mut full_path = self.namespace_path(ns);
377 full_path.push(backup_type.to_string());
378 full_path
379 }
380
41b373ec 381 /// Returns the absolute path for a backup_group
133d718f
WB
382 pub fn group_path(
383 &self,
384 ns: &BackupNamespace,
385 backup_group: &pbs_api_types::BackupGroup,
386 ) -> PathBuf {
387 let mut full_path = self.namespace_path(ns);
db87d93e 388 full_path.push(backup_group.to_string());
41b373ec
DM
389 full_path
390 }
391
392 /// Returns the absolute path for backup_dir
133d718f
WB
393 pub fn snapshot_path(
394 &self,
395 ns: &BackupNamespace,
396 backup_dir: &pbs_api_types::BackupDir,
397 ) -> PathBuf {
398 let mut full_path = self.namespace_path(ns);
db87d93e 399 full_path.push(backup_dir.to_string());
41b373ec
DM
400 full_path
401 }
402
dc3d716b
TL
403 /// Create a backup namespace.
404 pub fn create_namespace(
405 self: &Arc<Self>,
406 parent: &BackupNamespace,
407 name: String,
408 ) -> Result<BackupNamespace, Error> {
fc99c279 409 if !self.namespace_exists(parent) {
dc3d716b
TL
410 bail!("cannot create new namespace, parent {parent} doesn't already exists");
411 }
412
413 // construct ns before mkdir to enforce max-depth and name validity
414 let ns = BackupNamespace::from_parent_ns(parent, name)?;
415
ca3f8757 416 let mut ns_full_path = self.base_path();
dc3d716b
TL
417 ns_full_path.push(ns.path());
418
419 std::fs::create_dir_all(ns_full_path)?;
420
421 Ok(ns)
422 }
423
fc99c279
DC
424 /// Returns if the given namespace exists on the datastore
425 pub fn namespace_exists(&self, ns: &BackupNamespace) -> bool {
ca3f8757 426 let mut path = self.base_path();
fc99c279
DC
427 path.push(ns.path());
428 path.exists()
429 }
430
4c7cc5b3
TL
431 /// Remove all backup groups of a single namespace level but not the namespace itself.
432 ///
433 /// Does *not* descends into child-namespaces and doesn't remoes the namespace itself either.
434 ///
435 /// Returns true if all the groups were removed, and false if some were protected.
436 pub fn remove_namespace_groups(self: &Arc<Self>, ns: &BackupNamespace) -> Result<bool, Error> {
437 // FIXME: locking? The single groups/snapshots are already protected, so may not be
74cad4a8 438 // necessary (depends on what we all allow to do with namespaces)
4c7cc5b3
TL
439 log::info!("removing all groups in namespace {}:/{ns}", self.name());
440
441 let mut removed_all_groups = true;
442
443 for group in self.iter_backup_groups(ns.to_owned())? {
444 let removed_group = group?.destroy()?;
445 removed_all_groups = removed_all_groups && removed_group;
446 }
447
448 let base_file = std::fs::File::open(self.base_path())?;
449 let base_fd = base_file.as_raw_fd();
450 for ty in BackupType::iter() {
451 let mut ty_dir = ns.path();
452 ty_dir.push(ty.to_string());
453 // best effort only, but we probably should log the error
454 if let Err(err) = unlinkat(Some(base_fd), &ty_dir, UnlinkatFlags::RemoveDir) {
11ca8343 455 if err != nix::errno::Errno::ENOENT {
4c7cc5b3
TL
456 log::error!("failed to remove backup type {ty} in {ns} - {err}");
457 }
458 }
459 }
460
461 Ok(removed_all_groups)
462 }
463
d1f9ccea
TL
464 /// Remove a complete backup namespace optionally including all it's, and child namespaces',
465 /// groups. If `removed_groups` is false this only prunes empty namespaces.
4c7cc5b3 466 ///
d1f9ccea
TL
467 /// Returns true if everything requested, and false if some groups were protected or if some
468 /// namespaces weren't empty even though all groups were deleted (race with new backup)
4c7cc5b3
TL
469 pub fn remove_namespace_recursive(
470 self: &Arc<Self>,
471 ns: &BackupNamespace,
d1f9ccea 472 delete_groups: bool,
4c7cc5b3 473 ) -> Result<bool, Error> {
d1f9ccea
TL
474 let store = self.name();
475 let mut removed_all_requested = true;
476 if delete_groups {
477 log::info!("removing whole namespace recursively below {store}:/{ns}",);
478 for ns in self.recursive_iter_backup_ns(ns.to_owned())? {
479 let removed_ns_groups = self.remove_namespace_groups(&ns?)?;
480 removed_all_requested = removed_all_requested && removed_ns_groups;
481 }
482 } else {
483 log::info!("pruning empty namespace recursively below {store}:/{ns}");
4c7cc5b3
TL
484 }
485
486 // now try to delete the actual namespaces, bottom up so that we can use safe rmdir that
487 // will choke if a new backup/group appeared in the meantime (but not on an new empty NS)
488 let mut children = self
489 .recursive_iter_backup_ns(ns.to_owned())?
490 .collect::<Result<Vec<BackupNamespace>, Error>>()?;
491
bc001e12 492 children.sort_by_key(|b| std::cmp::Reverse(b.depth()));
4c7cc5b3
TL
493
494 let base_file = std::fs::File::open(self.base_path())?;
495 let base_fd = base_file.as_raw_fd();
496
497 for ns in children.iter() {
498 let mut ns_dir = ns.path();
499 ns_dir.push("ns");
500 let _ = unlinkat(Some(base_fd), &ns_dir, UnlinkatFlags::RemoveDir);
501
502 if !ns.is_root() {
503 match unlinkat(Some(base_fd), &ns.path(), UnlinkatFlags::RemoveDir) {
d1f9ccea 504 Ok(()) => log::debug!("removed namespace {ns}"),
11ca8343 505 Err(nix::errno::Errno::ENOENT) => {
d1f9ccea
TL
506 log::debug!("namespace {ns} already removed")
507 }
11ca8343 508 Err(nix::errno::Errno::ENOTEMPTY) if !delete_groups => {
3aafa613 509 removed_all_requested = false;
d1f9ccea
TL
510 log::debug!("skip removal of non-empty namespace {ns}")
511 }
512 Err(err) => {
513 removed_all_requested = false;
514 log::warn!("failed to remove namespace {ns} - {err}")
515 }
4c7cc5b3
TL
516 }
517 }
518 }
519
d1f9ccea 520 Ok(removed_all_requested)
4c7cc5b3
TL
521 }
522
f03649b8
TL
523 /// Remove a complete backup group including all snapshots.
524 ///
525 /// Returns true if all snapshots were removed, and false if some were protected
db87d93e 526 pub fn remove_backup_group(
6da20161 527 self: &Arc<Self>,
133d718f 528 ns: &BackupNamespace,
db87d93e
WB
529 backup_group: &pbs_api_types::BackupGroup,
530 ) -> Result<bool, Error> {
133d718f 531 let backup_group = self.backup_group(ns.clone(), backup_group.clone());
db87d93e 532
f03649b8 533 backup_group.destroy()
4b4eba0b
DM
534 }
535
8f579717 536 /// Remove a backup directory including all content
db87d93e 537 pub fn remove_backup_dir(
6da20161 538 self: &Arc<Self>,
133d718f 539 ns: &BackupNamespace,
db87d93e
WB
540 backup_dir: &pbs_api_types::BackupDir,
541 force: bool,
542 ) -> Result<(), Error> {
133d718f 543 let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
db87d93e 544
f03649b8 545 backup_dir.destroy(force)
8f579717
DM
546 }
547
41b373ec
DM
548 /// Returns the time of the last successful backup
549 ///
550 /// Or None if there is no backup in the group (or the group dir does not exist).
db87d93e 551 pub fn last_successful_backup(
6da20161 552 self: &Arc<Self>,
133d718f 553 ns: &BackupNamespace,
db87d93e
WB
554 backup_group: &pbs_api_types::BackupGroup,
555 ) -> Result<Option<i64>, Error> {
133d718f 556 let backup_group = self.backup_group(ns.clone(), backup_group.clone());
db87d93e 557
4b77d300 558 let group_path = backup_group.full_group_path();
41b373ec
DM
559
560 if group_path.exists() {
6da20161 561 backup_group.last_successful_backup()
41b373ec
DM
562 } else {
563 Ok(None)
564 }
565 }
566
133d718f
WB
567 /// Return the path of the 'owner' file.
568 fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
569 self.group_path(ns, group).join("owner")
570 }
571
54552dda
DM
572 /// Returns the backup owner.
573 ///
e6dc35ac 574 /// The backup owner is the entity who first created the backup group.
133d718f
WB
575 pub fn get_owner(
576 &self,
577 ns: &BackupNamespace,
578 backup_group: &pbs_api_types::BackupGroup,
579 ) -> Result<Authid, Error> {
580 let full_path = self.owner_path(ns, backup_group);
25877d05 581 let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
dcf5a0f6 582 owner.trim_end().parse() // remove trailing newline
54552dda
DM
583 }
584
db87d93e
WB
585 pub fn owns_backup(
586 &self,
133d718f 587 ns: &BackupNamespace,
db87d93e
WB
588 backup_group: &pbs_api_types::BackupGroup,
589 auth_id: &Authid,
590 ) -> Result<bool, Error> {
133d718f 591 let owner = self.get_owner(ns, backup_group)?;
9751ef4b 592
8e0b852f 593 Ok(check_backup_owner(&owner, auth_id).is_ok())
9751ef4b
DC
594 }
595
54552dda 596 /// Set the backup owner.
e7cb4dc5
WB
597 pub fn set_owner(
598 &self,
133d718f 599 ns: &BackupNamespace,
db87d93e 600 backup_group: &pbs_api_types::BackupGroup,
e6dc35ac 601 auth_id: &Authid,
e7cb4dc5
WB
602 force: bool,
603 ) -> Result<(), Error> {
133d718f 604 let path = self.owner_path(ns, backup_group);
54552dda
DM
605
606 let mut open_options = std::fs::OpenOptions::new();
607 open_options.write(true);
608 open_options.truncate(true);
609
610 if force {
611 open_options.create(true);
612 } else {
613 open_options.create_new(true);
614 }
615
42c2b5be
TL
616 let mut file = open_options
617 .open(&path)
54552dda
DM
618 .map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
619
e6dc35ac 620 writeln!(file, "{}", auth_id)
54552dda
DM
621 .map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
622
623 Ok(())
624 }
625
1fc82c41 626 /// Create (if it does not already exists) and lock a backup group
54552dda
DM
627 ///
628 /// And set the owner to 'userid'. If the group already exists, it returns the
629 /// current owner (instead of setting the owner).
1fc82c41 630 ///
1ffe0301 631 /// This also acquires an exclusive lock on the directory and returns the lock guard.
e7cb4dc5
WB
632 pub fn create_locked_backup_group(
633 &self,
133d718f 634 ns: &BackupNamespace,
db87d93e 635 backup_group: &pbs_api_types::BackupGroup,
e6dc35ac
FG
636 auth_id: &Authid,
637 ) -> Result<(Authid, DirLockGuard), Error> {
8731e40a 638 // create intermediate path first:
44288184 639 let mut full_path = self.base_path();
133d718f 640 for ns in ns.components() {
8c74349b
WB
641 full_path.push("ns");
642 full_path.push(ns);
643 }
db87d93e 644 full_path.push(backup_group.ty.as_str());
8731e40a
WB
645 std::fs::create_dir_all(&full_path)?;
646
db87d93e 647 full_path.push(&backup_group.id);
54552dda
DM
648
649 // create the last component now
650 match std::fs::create_dir(&full_path) {
651 Ok(_) => {
42c2b5be
TL
652 let guard = lock_dir_noblock(
653 &full_path,
654 "backup group",
655 "another backup is already running",
656 )?;
133d718f
WB
657 self.set_owner(ns, backup_group, auth_id, false)?;
658 let owner = self.get_owner(ns, backup_group)?; // just to be sure
1fc82c41 659 Ok((owner, guard))
54552dda
DM
660 }
661 Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
42c2b5be
TL
662 let guard = lock_dir_noblock(
663 &full_path,
664 "backup group",
665 "another backup is already running",
666 )?;
133d718f 667 let owner = self.get_owner(ns, backup_group)?; // just to be sure
1fc82c41 668 Ok((owner, guard))
54552dda
DM
669 }
670 Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
671 }
672 }
673
674 /// Creates a new backup snapshot inside a BackupGroup
675 ///
676 /// The BackupGroup directory needs to exist.
42c2b5be
TL
677 pub fn create_locked_backup_dir(
678 &self,
133d718f 679 ns: &BackupNamespace,
db87d93e 680 backup_dir: &pbs_api_types::BackupDir,
42c2b5be 681 ) -> Result<(PathBuf, bool, DirLockGuard), Error> {
133d718f
WB
682 let full_path = self.snapshot_path(ns, backup_dir);
683 let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
684 format_err!(
685 "failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
686 )
687 })?;
ff3d3100 688
42c2b5be
TL
689 let lock = || {
690 lock_dir_noblock(
691 &full_path,
692 "snapshot",
693 "internal error - tried creating snapshot that's already in use",
694 )
695 };
f23f7543 696
8731e40a 697 match std::fs::create_dir(&full_path) {
133d718f 698 Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
42c2b5be 699 Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
133d718f 700 Ok((relative_path.to_owned(), false, lock()?))
42c2b5be
TL
701 }
702 Err(e) => Err(e.into()),
8731e40a 703 }
ff3d3100
DM
704 }
705
90e38696
TL
706 /// Get a streaming iter over single-level backup namespaces of a datatstore
707 ///
708 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
709 /// parsing errors.
710 pub fn iter_backup_ns(
711 self: &Arc<DataStore>,
712 ns: BackupNamespace,
713 ) -> Result<ListNamespaces, Error> {
714 ListNamespaces::new(Arc::clone(self), ns)
715 }
716
717 /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
718 ///
719 /// The iterated item's result is already unwrapped, if it contained an error it will be
720 /// logged. Can be useful in iterator chain commands
721 pub fn iter_backup_ns_ok(
722 self: &Arc<DataStore>,
723 ns: BackupNamespace,
724 ) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
725 let this = Arc::clone(self);
726 Ok(
ca3f8757 727 ListNamespaces::new(Arc::clone(self), ns)?.filter_map(move |ns| match ns {
90e38696
TL
728 Ok(ns) => Some(ns),
729 Err(err) => {
730 log::error!("list groups error on datastore {} - {}", this.name(), err);
731 None
732 }
733 }),
734 )
735 }
736
737 /// Get a streaming iter over single-level backup namespaces of a datatstore
738 ///
739 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
740 /// parsing errors.
741 pub fn recursive_iter_backup_ns(
742 self: &Arc<DataStore>,
743 ns: BackupNamespace,
744 ) -> Result<ListNamespacesRecursive, Error> {
745 ListNamespacesRecursive::new(Arc::clone(self), ns)
746 }
747
748 /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
749 ///
750 /// The iterated item's result is already unwrapped, if it contained an error it will be
751 /// logged. Can be useful in iterator chain commands
752 pub fn recursive_iter_backup_ns_ok(
753 self: &Arc<DataStore>,
754 ns: BackupNamespace,
15a92724 755 max_depth: Option<usize>,
90e38696
TL
756 ) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
757 let this = Arc::clone(self);
15a92724 758 Ok(if let Some(depth) = max_depth {
ca3f8757 759 ListNamespacesRecursive::new_max_depth(Arc::clone(self), ns, depth)?
15a92724 760 } else {
ca3f8757 761 ListNamespacesRecursive::new(Arc::clone(self), ns)?
15a92724
TL
762 }
763 .filter_map(move |ns| match ns {
764 Ok(ns) => Some(ns),
765 Err(err) => {
766 log::error!("list groups error on datastore {} - {}", this.name(), err);
767 None
768 }
769 }))
90e38696
TL
770 }
771
5444b914
WB
772 /// Get a streaming iter over top-level backup groups of a datatstore of a particular type.
773 ///
774 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
775 /// parsing errors.
776 pub fn iter_backup_type(
777 self: &Arc<DataStore>,
778 ns: BackupNamespace,
779 ty: BackupType,
780 ) -> Result<ListGroupsType, Error> {
781 ListGroupsType::new(Arc::clone(self), ns, ty)
782 }
783
784 /// Get a streaming iter over top-level backup groups of a datatstore of a particular type,
785 /// filtered by `Ok` results
786 ///
787 /// The iterated item's result is already unwrapped, if it contained an error it will be
788 /// logged. Can be useful in iterator chain commands
789 pub fn iter_backup_type_ok(
790 self: &Arc<DataStore>,
791 ns: BackupNamespace,
792 ty: BackupType,
793 ) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
794 Ok(self.iter_backup_type(ns, ty)?.ok())
795 }
796
7b125de3
TL
797 /// Get a streaming iter over top-level backup groups of a datatstore
798 ///
799 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
800 /// parsing errors.
8c74349b
WB
801 pub fn iter_backup_groups(
802 self: &Arc<DataStore>,
803 ns: BackupNamespace,
804 ) -> Result<ListGroups, Error> {
805 ListGroups::new(Arc::clone(self), ns)
7b125de3
TL
806 }
807
808 /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
809 ///
810 /// The iterated item's result is already unwrapped, if it contained an error it will be
811 /// logged. Can be useful in iterator chain commands
6da20161
WB
812 pub fn iter_backup_groups_ok(
813 self: &Arc<DataStore>,
8c74349b 814 ns: BackupNamespace,
6da20161 815 ) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
5444b914 816 Ok(self.iter_backup_groups(ns)?.ok())
7b125de3
TL
817 }
818
c90dbb5c 819 /// Get a in-memory vector for all top-level backup groups of a datatstore
7b125de3
TL
820 ///
821 /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
8c74349b
WB
822 pub fn list_backup_groups(
823 self: &Arc<DataStore>,
824 ns: BackupNamespace,
825 ) -> Result<Vec<BackupGroup>, Error> {
826 ListGroups::new(Arc::clone(self), ns)?.collect()
c90dbb5c
TL
827 }
828
3d5c11e5 829 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 830 let base = self.base_path();
3d5c11e5
DM
831
832 let mut list = vec![];
833
95cea65b
DM
834 use walkdir::WalkDir;
835
84466003 836 let walker = WalkDir::new(&base).into_iter();
95cea65b
DM
837
838 // make sure we skip .chunks (and other hidden files to keep it simple)
839 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
42c2b5be
TL
840 entry
841 .file_name()
95cea65b 842 .to_str()
d8d8af98 843 .map(|s| s.starts_with('.'))
95cea65b
DM
844 .unwrap_or(false)
845 }
c3b090ac
TL
846 let handle_entry_err = |err: walkdir::Error| {
847 if let Some(inner) = err.io_error() {
d08cff51
FG
848 if let Some(path) = err.path() {
849 if inner.kind() == io::ErrorKind::PermissionDenied {
c3b090ac
TL
850 // only allow to skip ext4 fsck directory, avoid GC if, for example,
851 // a user got file permissions wrong on datastore rsync to new server
852 if err.depth() > 1 || !path.ends_with("lost+found") {
d08cff51 853 bail!("cannot continue garbage-collection safely, permission denied on: {:?}", path)
c3b090ac 854 }
d08cff51 855 } else {
42c2b5be
TL
856 bail!(
857 "unexpected error on datastore traversal: {} - {:?}",
858 inner,
859 path
860 )
d08cff51
FG
861 }
862 } else {
863 bail!("unexpected error on datastore traversal: {}", inner)
c3b090ac
TL
864 }
865 }
866 Ok(())
867 };
95cea65b 868 for entry in walker.filter_entry(|e| !is_hidden(e)) {
c3b090ac
TL
869 let path = match entry {
870 Ok(entry) => entry.into_path(),
871 Err(err) => {
872 handle_entry_err(err)?;
42c2b5be
TL
873 continue;
874 }
c3b090ac 875 };
1e8da0a7 876 if let Ok(archive_type) = archive_type(&path) {
42c2b5be
TL
877 if archive_type == ArchiveType::FixedIndex
878 || archive_type == ArchiveType::DynamicIndex
879 {
95cea65b 880 list.push(path);
3d5c11e5
DM
881 }
882 }
883 }
884
885 Ok(list)
886 }
887
a660978c
DM
888 // mark chunks used by ``index`` as used
889 fn index_mark_used_chunks<I: IndexFile>(
890 &self,
891 index: I,
892 file_name: &Path, // only used for error reporting
893 status: &mut GarbageCollectionStatus,
c8449217 894 worker: &dyn WorkerTaskContext,
a660978c 895 ) -> Result<(), Error> {
a660978c
DM
896 status.index_file_count += 1;
897 status.index_data_bytes += index.index_bytes();
898
899 for pos in 0..index.index_count() {
f6b1d1cc 900 worker.check_abort()?;
0fd55b08 901 worker.fail_on_shutdown()?;
a660978c 902 let digest = index.index_digest(pos).unwrap();
4bc84a65 903 if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
ca3f8757 904 let hex = hex::encode(digest);
c23192d3 905 task_warn!(
f6b1d1cc 906 worker,
ca3f8757 907 "warning: unable to access non-existent chunk {hex}, required by {file_name:?}"
f6b1d1cc 908 );
fd192564
SR
909
910 // touch any corresponding .bad files to keep them around, meaning if a chunk is
911 // rewritten correctly they will be removed automatically, as well as if no index
912 // file requires the chunk anymore (won't get to this loop then)
913 for i in 0..=9 {
914 let bad_ext = format!("{}.bad", i);
915 let mut bad_path = PathBuf::new();
916 bad_path.push(self.chunk_path(digest).0);
917 bad_path.set_extension(bad_ext);
4bc84a65 918 self.inner.chunk_store.cond_touch_path(&bad_path, false)?;
fd192564 919 }
a660978c
DM
920 }
921 }
922 Ok(())
923 }
924
f6b1d1cc
WB
925 fn mark_used_chunks(
926 &self,
927 status: &mut GarbageCollectionStatus,
c8449217 928 worker: &dyn WorkerTaskContext,
f6b1d1cc 929 ) -> Result<(), Error> {
3d5c11e5 930 let image_list = self.list_images()?;
8317873c
DM
931 let image_count = image_list.len();
932
8317873c
DM
933 let mut last_percentage: usize = 0;
934
cb4b721c
FG
935 let mut strange_paths_count: u64 = 0;
936
ea368a06 937 for (i, img) in image_list.into_iter().enumerate() {
f6b1d1cc 938 worker.check_abort()?;
0fd55b08 939 worker.fail_on_shutdown()?;
92da93b2 940
cb4b721c
FG
941 if let Some(backup_dir_path) = img.parent() {
942 let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
943 if let Some(backup_dir_str) = backup_dir_path.to_str() {
db87d93e 944 if pbs_api_types::BackupDir::from_str(backup_dir_str).is_err() {
cb4b721c
FG
945 strange_paths_count += 1;
946 }
947 }
948 }
949
efcac39d 950 match std::fs::File::open(&img) {
e0762002 951 Ok(file) => {
788d82d9 952 if let Ok(archive_type) = archive_type(&img) {
e0762002 953 if archive_type == ArchiveType::FixedIndex {
788d82d9 954 let index = FixedIndexReader::new(file).map_err(|e| {
efcac39d 955 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 956 })?;
788d82d9 957 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002 958 } else if archive_type == ArchiveType::DynamicIndex {
788d82d9 959 let index = DynamicIndexReader::new(file).map_err(|e| {
efcac39d 960 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 961 })?;
788d82d9 962 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002
DM
963 }
964 }
965 }
788d82d9 966 Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
efcac39d 967 Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
77703d95 968 }
8317873c 969
ea368a06 970 let percentage = (i + 1) * 100 / image_count;
8317873c 971 if percentage > last_percentage {
c23192d3 972 task_log!(
f6b1d1cc 973 worker,
7956877f 974 "marked {}% ({} of {} index files)",
f6b1d1cc 975 percentage,
ea368a06 976 i + 1,
f6b1d1cc
WB
977 image_count,
978 );
8317873c
DM
979 last_percentage = percentage;
980 }
3d5c11e5
DM
981 }
982
cb4b721c 983 if strange_paths_count > 0 {
c23192d3 984 task_log!(
cb4b721c
FG
985 worker,
986 "found (and marked) {} index files outside of expected directory scheme",
987 strange_paths_count,
988 );
989 }
990
3d5c11e5 991 Ok(())
f2b99c34
DM
992 }
993
994 pub fn last_gc_status(&self) -> GarbageCollectionStatus {
4bc84a65 995 self.inner.last_gc_status.lock().unwrap().clone()
f2b99c34 996 }
3d5c11e5 997
8545480a 998 pub fn garbage_collection_running(&self) -> bool {
4bc84a65 999 !matches!(self.inner.gc_mutex.try_lock(), Ok(_))
8545480a
DM
1000 }
1001
42c2b5be
TL
1002 pub fn garbage_collection(
1003 &self,
1004 worker: &dyn WorkerTaskContext,
1005 upid: &UPID,
1006 ) -> Result<(), Error> {
4bc84a65 1007 if let Ok(ref mut _mutex) = self.inner.gc_mutex.try_lock() {
c6772c92
TL
1008 // avoids that we run GC if an old daemon process has still a
1009 // running backup writer, which is not save as we have no "oldest
1010 // writer" information and thus no safe atime cutoff
42c2b5be 1011 let _exclusive_lock = self.inner.chunk_store.try_exclusive_lock()?;
43b13033 1012
6ef1b649 1013 let phase1_start_time = proxmox_time::epoch_i64();
42c2b5be
TL
1014 let oldest_writer = self
1015 .inner
1016 .chunk_store
1017 .oldest_writer()
1018 .unwrap_or(phase1_start_time);
11861a48 1019
bc001e12
TL
1020 let mut gc_status = GarbageCollectionStatus {
1021 upid: Some(upid.to_string()),
1022 ..Default::default()
1023 };
f6b1d1cc 1024
c23192d3 1025 task_log!(worker, "Start GC phase1 (mark used chunks)");
f6b1d1cc
WB
1026
1027 self.mark_used_chunks(&mut gc_status, worker)?;
1028
c23192d3 1029 task_log!(worker, "Start GC phase2 (sweep unused chunks)");
4bc84a65 1030 self.inner.chunk_store.sweep_unused_chunks(
f6b1d1cc
WB
1031 oldest_writer,
1032 phase1_start_time,
1033 &mut gc_status,
1034 worker,
1035 )?;
1036
c23192d3 1037 task_log!(
f6b1d1cc
WB
1038 worker,
1039 "Removed garbage: {}",
1040 HumanByte::from(gc_status.removed_bytes),
1041 );
c23192d3 1042 task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
cf459b19 1043 if gc_status.pending_bytes > 0 {
c23192d3 1044 task_log!(
f6b1d1cc
WB
1045 worker,
1046 "Pending removals: {} (in {} chunks)",
1047 HumanByte::from(gc_status.pending_bytes),
1048 gc_status.pending_chunks,
1049 );
cf459b19 1050 }
a9767cf7 1051 if gc_status.removed_bad > 0 {
c23192d3 1052 task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
a9767cf7 1053 }
cf459b19 1054
b4fb2623 1055 if gc_status.still_bad > 0 {
c23192d3 1056 task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
b4fb2623
DM
1057 }
1058
c23192d3 1059 task_log!(
f6b1d1cc
WB
1060 worker,
1061 "Original data usage: {}",
1062 HumanByte::from(gc_status.index_data_bytes),
1063 );
868c5852
DM
1064
1065 if gc_status.index_data_bytes > 0 {
42c2b5be
TL
1066 let comp_per =
1067 (gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
c23192d3 1068 task_log!(
f6b1d1cc
WB
1069 worker,
1070 "On-Disk usage: {} ({:.2}%)",
1071 HumanByte::from(gc_status.disk_bytes),
1072 comp_per,
1073 );
868c5852
DM
1074 }
1075
c23192d3 1076 task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
868c5852 1077
d6373f35 1078 let deduplication_factor = if gc_status.disk_bytes > 0 {
42c2b5be 1079 (gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
d6373f35
DM
1080 } else {
1081 1.0
1082 };
1083
c23192d3 1084 task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
d6373f35 1085
868c5852 1086 if gc_status.disk_chunks > 0 {
42c2b5be 1087 let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
c23192d3 1088 task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
868c5852 1089 }
64e53b28 1090
b683fd58
DC
1091 if let Ok(serialized) = serde_json::to_string(&gc_status) {
1092 let mut path = self.base_path();
1093 path.push(".gc-status");
1094
21211748 1095 let backup_user = pbs_config::backup_user()?;
b683fd58
DC
1096 let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
1097 // set the correct owner/group/permissions while saving file
1098 // owner(rw) = backup, group(r)= backup
1099 let options = CreateOptions::new()
1100 .perm(mode)
1101 .owner(backup_user.uid)
1102 .group(backup_user.gid);
1103
1104 // ignore errors
e0a19d33 1105 let _ = replace_file(path, serialized.as_bytes(), options, false);
b683fd58
DC
1106 }
1107
4bc84a65 1108 *self.inner.last_gc_status.lock().unwrap() = gc_status;
64e53b28 1109 } else {
d4b59ae0 1110 bail!("Start GC failed - (already running/locked)");
64e53b28 1111 }
3d5c11e5
DM
1112
1113 Ok(())
1114 }
3b7ade9e 1115
ccc3896f 1116 pub fn try_shared_chunk_store_lock(&self) -> Result<ProcessLockSharedGuard, Error> {
4bc84a65 1117 self.inner.chunk_store.try_shared_lock()
1cf5178a
DM
1118 }
1119
42c2b5be 1120 pub fn chunk_path(&self, digest: &[u8; 32]) -> (PathBuf, String) {
4bc84a65 1121 self.inner.chunk_store.chunk_path(digest)
d48a9955
DM
1122 }
1123
b298e9f1 1124 pub fn cond_touch_chunk(&self, digest: &[u8; 32], assert_exists: bool) -> Result<bool, Error> {
42c2b5be
TL
1125 self.inner
1126 .chunk_store
b298e9f1 1127 .cond_touch_chunk(digest, assert_exists)
42c2b5be
TL
1128 }
1129
1130 pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
4bc84a65 1131 self.inner.chunk_store.insert_chunk(chunk, digest)
3b7ade9e 1132 }
60f9a6ea 1133
7f394c80 1134 pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
4bc84a65 1135 let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest);
7f394c80
DC
1136 std::fs::metadata(chunk_path).map_err(Error::from)
1137 }
1138
39f18b30 1139 pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
4bc84a65 1140 let (chunk_path, digest_str) = self.inner.chunk_store.chunk_path(digest);
39f18b30 1141
6ef1b649 1142 proxmox_lang::try_block!({
39f18b30
DM
1143 let mut file = std::fs::File::open(&chunk_path)?;
1144 DataBlob::load_from_reader(&mut file)
42c2b5be
TL
1145 })
1146 .map_err(|err| {
1147 format_err!(
1148 "store '{}', unable to load chunk '{}' - {}",
1149 self.name(),
1150 digest_str,
1151 err,
1152 )
1153 })
1a374fcf
SR
1154 }
1155
8292d3d2 1156 /// Updates the protection status of the specified snapshot.
42c2b5be 1157 pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> {
6da20161 1158 let full_path = backup_dir.full_path();
8292d3d2
DC
1159
1160 let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
1161
6da20161 1162 let protected_path = backup_dir.protected_file();
8292d3d2
DC
1163 if protection {
1164 std::fs::File::create(protected_path)
1165 .map_err(|err| format_err!("could not create protection file: {}", err))?;
1166 } else if let Err(err) = std::fs::remove_file(protected_path) {
1167 // ignore error for non-existing file
1168 if err.kind() != std::io::ErrorKind::NotFound {
1169 bail!("could not remove protection file: {}", err);
1170 }
1171 }
1172
1173 Ok(())
1174 }
1175
0698f78d 1176 pub fn verify_new(&self) -> bool {
4bc84a65 1177 self.inner.verify_new
0698f78d 1178 }
4921a411 1179
bc001e12
TL
1180 /// returns a list of chunks sorted by their inode number on disk chunks that couldn't get
1181 /// stat'ed are placed at the end of the list
4921a411
DC
1182 pub fn get_chunks_in_order<F, A>(
1183 &self,
1184 index: &Box<dyn IndexFile + Send>,
1185 skip_chunk: F,
1186 check_abort: A,
1187 ) -> Result<Vec<(usize, u64)>, Error>
1188 where
1189 F: Fn(&[u8; 32]) -> bool,
1190 A: Fn(usize) -> Result<(), Error>,
1191 {
1192 let index_count = index.index_count();
1193 let mut chunk_list = Vec::with_capacity(index_count);
1194 use std::os::unix::fs::MetadataExt;
1195 for pos in 0..index_count {
1196 check_abort(pos)?;
1197
1198 let info = index.chunk_info(pos).unwrap();
1199
1200 if skip_chunk(&info.digest) {
1201 continue;
1202 }
1203
4bc84a65 1204 let ino = match self.inner.chunk_order {
fef61684
DC
1205 ChunkOrder::Inode => {
1206 match self.stat_chunk(&info.digest) {
1207 Err(_) => u64::MAX, // could not stat, move to end of list
1208 Ok(metadata) => metadata.ino(),
1209 }
1210 }
1211 ChunkOrder::None => 0,
4921a411
DC
1212 };
1213
1214 chunk_list.push((pos, ino));
1215 }
1216
4bc84a65 1217 match self.inner.chunk_order {
fef61684
DC
1218 // sorting by inode improves data locality, which makes it lots faster on spinners
1219 ChunkOrder::Inode => {
1220 chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(ino_b))
1221 }
1222 ChunkOrder::None => {}
1223 }
4921a411
DC
1224
1225 Ok(chunk_list)
1226 }
db87d93e 1227
6b0c6492 1228 /// Open a backup group from this datastore.
133d718f
WB
1229 pub fn backup_group(
1230 self: &Arc<Self>,
1231 ns: BackupNamespace,
1232 group: pbs_api_types::BackupGroup,
1233 ) -> BackupGroup {
ca3f8757 1234 BackupGroup::new(Arc::clone(self), ns, group)
db87d93e
WB
1235 }
1236
6b0c6492 1237 /// Open a backup group from this datastore.
8c74349b
WB
1238 pub fn backup_group_from_parts<T>(
1239 self: &Arc<Self>,
1240 ns: BackupNamespace,
1241 ty: BackupType,
1242 id: T,
1243 ) -> BackupGroup
6b0c6492
WB
1244 where
1245 T: Into<String>,
1246 {
133d718f 1247 self.backup_group(ns, (ty, id.into()).into())
6b0c6492
WB
1248 }
1249
133d718f 1250 /*
6b0c6492
WB
1251 /// Open a backup group from this datastore by backup group path such as `vm/100`.
1252 ///
1253 /// Convenience method for `store.backup_group(path.parse()?)`
6da20161 1254 pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> {
133d718f 1255 todo!("split out the namespace");
db87d93e 1256 }
133d718f 1257 */
db87d93e 1258
6b0c6492 1259 /// Open a snapshot (backup directory) from this datastore.
133d718f
WB
1260 pub fn backup_dir(
1261 self: &Arc<Self>,
1262 ns: BackupNamespace,
1263 dir: pbs_api_types::BackupDir,
1264 ) -> Result<BackupDir, Error> {
1265 BackupDir::with_group(self.backup_group(ns, dir.group), dir.time)
6b0c6492
WB
1266 }
1267
1268 /// Open a snapshot (backup directory) from this datastore.
db87d93e 1269 pub fn backup_dir_from_parts<T>(
6da20161 1270 self: &Arc<Self>,
8c74349b 1271 ns: BackupNamespace,
db87d93e
WB
1272 ty: BackupType,
1273 id: T,
1274 time: i64,
1275 ) -> Result<BackupDir, Error>
1276 where
1277 T: Into<String>,
1278 {
133d718f 1279 self.backup_dir(ns, (ty, id.into(), time).into())
db87d93e
WB
1280 }
1281
6b0c6492 1282 /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
db87d93e 1283 pub fn backup_dir_with_rfc3339<T: Into<String>>(
6da20161 1284 self: &Arc<Self>,
db87d93e
WB
1285 group: BackupGroup,
1286 time_string: T,
1287 ) -> Result<BackupDir, Error> {
1288 BackupDir::with_rfc3339(group, time_string.into())
1289 }
1290
133d718f 1291 /*
6b0c6492 1292 /// Open a snapshot (backup directory) from this datastore by a snapshot path.
6da20161 1293 pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> {
133d718f 1294 todo!("split out the namespace");
db87d93e 1295 }
133d718f 1296 */
529de6c7 1297}