]> git.proxmox.com Git - proxmox-backup.git/blame - pbs-datastore/src/datastore.rs
daily-update: inline variables into format string if possible
[proxmox-backup.git] / pbs-datastore / src / datastore.rs
CommitLineData
42c2b5be 1use std::collections::{HashMap, HashSet};
54552dda 2use std::io::{self, Write};
4c7cc5b3 3use std::os::unix::io::AsRawFd;
367f002e 4use std::path::{Path, PathBuf};
42c2b5be 5use std::sync::{Arc, Mutex};
367f002e 6
f7d4e4b5 7use anyhow::{bail, format_err, Error};
2c32fdde 8use lazy_static::lazy_static;
4c7cc5b3 9use nix::unistd::{unlinkat, UnlinkatFlags};
e4439025 10
08f8a3e5 11use proxmox_human_byte::HumanByte;
fef61684
DC
12use proxmox_schema::ApiType;
13
857f346c 14use proxmox_sys::error::SysError;
42c2b5be
TL
15use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
16use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
d5790a9f 17use proxmox_sys::process_locker::ProcessLockSharedGuard;
25877d05 18use proxmox_sys::WorkerTaskContext;
d5790a9f 19use proxmox_sys::{task_log, task_warn};
529de6c7 20
fef61684 21use pbs_api_types::{
647186dd 22 Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel,
08f8a3e5 23 DatastoreTuning, GarbageCollectionStatus, Operation, UPID,
fef61684 24};
529de6c7 25
42c2b5be 26use crate::backup_info::{BackupDir, BackupGroup};
6d5d305d
DM
27use crate::chunk_store::ChunkStore;
28use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
29use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
5444b914 30use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive};
6d5d305d 31use crate::index::IndexFile;
9ccf933b 32use crate::manifest::{archive_type, ArchiveType};
857f346c 33use crate::task_tracking::{self, update_active_operations};
42c2b5be 34use crate::DataBlob;
6d5d305d 35
367f002e 36lazy_static! {
42c2b5be
TL
37 static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStoreImpl>>> =
38 Mutex::new(HashMap::new());
b3483782 39}
ff3d3100 40
9751ef4b
DC
41/// checks if auth_id is owner, or, if owner is a token, if
42/// auth_id is the user of the token
42c2b5be
TL
43pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> {
44 let correct_owner =
45 owner == auth_id || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
9751ef4b
DC
46 if !correct_owner {
47 bail!("backup owner check failed ({} != {})", auth_id, owner);
48 }
49 Ok(())
50}
51
e5064ba6
DM
52/// Datastore Management
53///
54/// A Datastore can store severals backups, and provides the
55/// management interface for backup.
4bc84a65 56pub struct DataStoreImpl {
1629d2ad 57 chunk_store: Arc<ChunkStore>,
81b2a872 58 gc_mutex: Mutex<()>,
f2b99c34 59 last_gc_status: Mutex<GarbageCollectionStatus>,
0698f78d 60 verify_new: bool,
fef61684 61 chunk_order: ChunkOrder,
51d900d1 62 last_digest: Option<[u8; 32]>,
647186dd 63 sync_level: DatastoreFSyncLevel,
529de6c7
DM
64}
65
6da20161
WB
66impl DataStoreImpl {
67 // This one just panics on everything
68 #[doc(hidden)]
615a50c1 69 pub(crate) unsafe fn new_test() -> Arc<Self> {
6da20161
WB
70 Arc::new(Self {
71 chunk_store: Arc::new(unsafe { ChunkStore::panic_store() }),
72 gc_mutex: Mutex::new(()),
73 last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
74 verify_new: false,
c40a2f8b 75 chunk_order: Default::default(),
51d900d1 76 last_digest: None,
647186dd 77 sync_level: Default::default(),
6da20161
WB
78 })
79 }
80}
81
4bc84a65
HL
82pub struct DataStore {
83 inner: Arc<DataStoreImpl>,
84 operation: Option<Operation>,
85}
86
87impl Clone for DataStore {
88 fn clone(&self) -> Self {
89 let mut new_operation = self.operation;
90 if let Some(operation) = self.operation {
91 if let Err(e) = update_active_operations(self.name(), operation, 1) {
92 log::error!("could not update active operations - {}", e);
93 new_operation = None;
94 }
95 }
96
97 DataStore {
98 inner: self.inner.clone(),
99 operation: new_operation,
100 }
101 }
102}
103
104impl Drop for DataStore {
105 fn drop(&mut self) {
106 if let Some(operation) = self.operation {
107 if let Err(e) = update_active_operations(self.name(), operation, -1) {
108 log::error!("could not update active operations - {}", e);
109 }
110 }
111 }
112}
113
529de6c7 114impl DataStore {
6da20161
WB
115 // This one just panics on everything
116 #[doc(hidden)]
615a50c1 117 pub(crate) unsafe fn new_test() -> Arc<Self> {
6da20161
WB
118 Arc::new(Self {
119 inner: unsafe { DataStoreImpl::new_test() },
120 operation: None,
121 })
122 }
123
e9d2fc93
HL
124 pub fn lookup_datastore(
125 name: &str,
126 operation: Option<Operation>,
127 ) -> Result<Arc<DataStore>, Error> {
857f346c
WB
128 // Avoid TOCTOU between checking maintenance mode and updating active operation counter, as
129 // we use it to decide whether it is okay to delete the datastore.
130 let config_lock = pbs_config::datastore::lock_config()?;
131
51d900d1
TL
132 // we could use the ConfigVersionCache's generation for staleness detection, but we load
133 // the config anyway -> just use digest, additional benefit: manual changes get detected
134 let (config, digest) = pbs_config::datastore::config()?;
e9d2fc93 135 let config: DataStoreConfig = config.lookup("datastore", name)?;
e9d2fc93
HL
136
137 if let Some(maintenance_mode) = config.get_maintenance_mode() {
138 if let Err(error) = maintenance_mode.check(operation) {
0408f60b 139 bail!("datastore '{name}' is in {error}");
e9d2fc93
HL
140 }
141 }
142
4bc84a65
HL
143 if let Some(operation) = operation {
144 update_active_operations(name, operation, 1)?;
145 }
146
857f346c
WB
147 // Our operation is registered, unlock the config.
148 drop(config_lock);
149
33a1ef7a
TL
150 let mut datastore_cache = DATASTORE_MAP.lock().unwrap();
151 let entry = datastore_cache.get(name);
2c32fdde 152
c7f7236b 153 // reuse chunk store so that we keep using the same process locker instance!
0bd9c870 154 let chunk_store = if let Some(datastore) = &entry {
51d900d1
TL
155 let last_digest = datastore.last_digest.as_ref();
156 if let Some(true) = last_digest.map(|last_digest| last_digest == &digest) {
4bc84a65
HL
157 return Ok(Arc::new(Self {
158 inner: Arc::clone(datastore),
159 operation,
160 }));
2c32fdde 161 }
0bd9c870
DC
162 Arc::clone(&datastore.chunk_store)
163 } else {
647186dd
DC
164 let tuning: DatastoreTuning = serde_json::from_value(
165 DatastoreTuning::API_SCHEMA
166 .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
167 )?;
168 Arc::new(ChunkStore::open(
169 name,
170 &config.path,
171 tuning.sync_level.unwrap_or_default(),
172 )?)
0bd9c870 173 };
2c32fdde 174
51d900d1 175 let datastore = DataStore::with_store_and_config(chunk_store, config, Some(digest))?;
f0a61124
DM
176
177 let datastore = Arc::new(datastore);
33a1ef7a 178 datastore_cache.insert(name.to_string(), datastore.clone());
2c32fdde 179
4bc84a65
HL
180 Ok(Arc::new(Self {
181 inner: datastore,
182 operation,
183 }))
2c32fdde
DM
184 }
185
062cf75c 186 /// removes all datastores that are not configured anymore
42c2b5be 187 pub fn remove_unused_datastores() -> Result<(), Error> {
e7d4be9d 188 let (config, _digest) = pbs_config::datastore::config()?;
062cf75c
DC
189
190 let mut map = DATASTORE_MAP.lock().unwrap();
191 // removes all elements that are not in the config
42c2b5be 192 map.retain(|key, _| config.sections.contains_key(key));
062cf75c
DC
193 Ok(())
194 }
195
6da20161 196 /// Open a raw database given a name and a path.
c66fa32c
TL
197 ///
198 /// # Safety
199 /// See the safety section in `open_from_config`
519ca9d0 200 pub unsafe fn open_path(
6da20161
WB
201 name: &str,
202 path: impl AsRef<Path>,
203 operation: Option<Operation>,
204 ) -> Result<Arc<Self>, Error> {
205 let path = path
206 .as_ref()
207 .to_str()
208 .ok_or_else(|| format_err!("non-utf8 paths not supported"))?
209 .to_owned();
210 unsafe { Self::open_from_config(DataStoreConfig::new(name.to_owned(), path), operation) }
211 }
212
213 /// Open a datastore given a raw configuration.
c66fa32c
TL
214 ///
215 /// # Safety
74cad4a8 216 /// There's no memory safety implication, but as this is opening a new ChunkStore it will
c66fa32c
TL
217 /// create a new process locker instance, potentially on the same path as existing safely
218 /// created ones. This is dangerous as dropping the reference of this and thus the underlying
219 /// chunkstore's process locker will close all locks from our process on the config.path,
220 /// breaking guarantees we need to uphold for safe long backup + GC interaction on newer/older
221 /// process instances (from package update).
519ca9d0 222 unsafe fn open_from_config(
6da20161
WB
223 config: DataStoreConfig,
224 operation: Option<Operation>,
225 ) -> Result<Arc<Self>, Error> {
226 let name = config.name.clone();
227
647186dd
DC
228 let tuning: DatastoreTuning = serde_json::from_value(
229 DatastoreTuning::API_SCHEMA
230 .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
231 )?;
232 let chunk_store =
233 ChunkStore::open(&name, &config.path, tuning.sync_level.unwrap_or_default())?;
0bd9c870
DC
234 let inner = Arc::new(Self::with_store_and_config(
235 Arc::new(chunk_store),
236 config,
51d900d1 237 None,
0bd9c870 238 )?);
6da20161
WB
239
240 if let Some(operation) = operation {
241 update_active_operations(&name, operation, 1)?;
242 }
243
244 Ok(Arc::new(Self { inner, operation }))
245 }
246
247 fn with_store_and_config(
0bd9c870 248 chunk_store: Arc<ChunkStore>,
118deb4d 249 config: DataStoreConfig,
51d900d1 250 last_digest: Option<[u8; 32]>,
4bc84a65 251 ) -> Result<DataStoreImpl, Error> {
b683fd58
DC
252 let mut gc_status_path = chunk_store.base_path();
253 gc_status_path.push(".gc-status");
254
255 let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
256 match serde_json::from_str(&state) {
257 Ok(state) => state,
258 Err(err) => {
dce4b540 259 log::error!("error reading gc-status: {}", err);
b683fd58
DC
260 GarbageCollectionStatus::default()
261 }
262 }
263 } else {
264 GarbageCollectionStatus::default()
265 };
f2b99c34 266
fef61684 267 let tuning: DatastoreTuning = serde_json::from_value(
42c2b5be
TL
268 DatastoreTuning::API_SCHEMA
269 .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
fef61684 270 )?;
fef61684 271
4bc84a65 272 Ok(DataStoreImpl {
0bd9c870 273 chunk_store,
81b2a872 274 gc_mutex: Mutex::new(()),
f2b99c34 275 last_gc_status: Mutex::new(gc_status),
0698f78d 276 verify_new: config.verify_new.unwrap_or(false),
c40a2f8b 277 chunk_order: tuning.chunk_order.unwrap_or_default(),
51d900d1 278 last_digest,
647186dd 279 sync_level: tuning.sync_level.unwrap_or_default(),
529de6c7
DM
280 })
281 }
282
d59397e6
WB
283 pub fn get_chunk_iterator(
284 &self,
285 ) -> Result<
25877d05 286 impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)>,
42c2b5be 287 Error,
d59397e6 288 > {
4bc84a65 289 self.inner.chunk_store.get_chunk_iterator()
d59397e6
WB
290 }
291
42c2b5be
TL
292 pub fn create_fixed_writer<P: AsRef<Path>>(
293 &self,
294 filename: P,
295 size: usize,
296 chunk_size: usize,
297 ) -> Result<FixedIndexWriter, Error> {
298 let index = FixedIndexWriter::create(
299 self.inner.chunk_store.clone(),
300 filename.as_ref(),
301 size,
302 chunk_size,
303 )?;
529de6c7
DM
304
305 Ok(index)
306 }
307
42c2b5be
TL
308 pub fn open_fixed_reader<P: AsRef<Path>>(
309 &self,
310 filename: P,
311 ) -> Result<FixedIndexReader, Error> {
312 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
a7c72ad9
DM
313
314 let index = FixedIndexReader::open(&full_path)?;
529de6c7
DM
315
316 Ok(index)
317 }
3d5c11e5 318
93d5d779 319 pub fn create_dynamic_writer<P: AsRef<Path>>(
42c2b5be
TL
320 &self,
321 filename: P,
93d5d779 322 ) -> Result<DynamicIndexWriter, Error> {
42c2b5be 323 let index = DynamicIndexWriter::create(self.inner.chunk_store.clone(), filename.as_ref())?;
0433db19
DM
324
325 Ok(index)
326 }
ff3d3100 327
42c2b5be
TL
328 pub fn open_dynamic_reader<P: AsRef<Path>>(
329 &self,
330 filename: P,
331 ) -> Result<DynamicIndexReader, Error> {
332 let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
d48a9955
DM
333
334 let index = DynamicIndexReader::open(&full_path)?;
77703d95
DM
335
336 Ok(index)
337 }
338
5de2bced
WB
339 pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
340 where
341 P: AsRef<Path>,
342 {
343 let filename = filename.as_ref();
42c2b5be
TL
344 let out: Box<dyn IndexFile + Send> = match archive_type(filename)? {
345 ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?),
346 ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?),
347 _ => bail!("cannot open index file of unknown type: {:?}", filename),
348 };
5de2bced
WB
349 Ok(out)
350 }
351
1369bcdb 352 /// Fast index verification - only check if chunks exists
28570d19
DM
353 pub fn fast_index_verification(
354 &self,
355 index: &dyn IndexFile,
42c2b5be 356 checked: &mut HashSet<[u8; 32]>,
28570d19 357 ) -> Result<(), Error> {
1369bcdb
DM
358 for pos in 0..index.index_count() {
359 let info = index.chunk_info(pos).unwrap();
28570d19
DM
360 if checked.contains(&info.digest) {
361 continue;
362 }
363
42c2b5be
TL
364 self.stat_chunk(&info.digest).map_err(|err| {
365 format_err!(
366 "fast_index_verification error, stat_chunk {} failed - {}",
16f6766a 367 hex::encode(info.digest),
42c2b5be
TL
368 err,
369 )
370 })?;
28570d19
DM
371
372 checked.insert(info.digest);
1369bcdb
DM
373 }
374
375 Ok(())
376 }
377
60f9a6ea 378 pub fn name(&self) -> &str {
4bc84a65 379 self.inner.chunk_store.name()
60f9a6ea
DM
380 }
381
ff3d3100 382 pub fn base_path(&self) -> PathBuf {
4bc84a65 383 self.inner.chunk_store.base_path()
ff3d3100
DM
384 }
385
133d718f 386 /// Returns the absolute path for a backup namespace on this datastore
8c74349b
WB
387 pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
388 let mut path = self.base_path();
389 path.reserve(ns.path_len());
390 for part in ns.components() {
391 path.push("ns");
392 path.push(part);
393 }
394 path
395 }
396
5444b914
WB
397 /// Returns the absolute path for a backup_type
398 pub fn type_path(&self, ns: &BackupNamespace, backup_type: BackupType) -> PathBuf {
399 let mut full_path = self.namespace_path(ns);
400 full_path.push(backup_type.to_string());
401 full_path
402 }
403
41b373ec 404 /// Returns the absolute path for a backup_group
133d718f
WB
405 pub fn group_path(
406 &self,
407 ns: &BackupNamespace,
408 backup_group: &pbs_api_types::BackupGroup,
409 ) -> PathBuf {
410 let mut full_path = self.namespace_path(ns);
db87d93e 411 full_path.push(backup_group.to_string());
41b373ec
DM
412 full_path
413 }
414
415 /// Returns the absolute path for backup_dir
133d718f
WB
416 pub fn snapshot_path(
417 &self,
418 ns: &BackupNamespace,
419 backup_dir: &pbs_api_types::BackupDir,
420 ) -> PathBuf {
421 let mut full_path = self.namespace_path(ns);
db87d93e 422 full_path.push(backup_dir.to_string());
41b373ec
DM
423 full_path
424 }
425
dc3d716b
TL
426 /// Create a backup namespace.
427 pub fn create_namespace(
428 self: &Arc<Self>,
429 parent: &BackupNamespace,
430 name: String,
431 ) -> Result<BackupNamespace, Error> {
fc99c279 432 if !self.namespace_exists(parent) {
dc3d716b
TL
433 bail!("cannot create new namespace, parent {parent} doesn't already exists");
434 }
435
436 // construct ns before mkdir to enforce max-depth and name validity
437 let ns = BackupNamespace::from_parent_ns(parent, name)?;
438
ca3f8757 439 let mut ns_full_path = self.base_path();
dc3d716b
TL
440 ns_full_path.push(ns.path());
441
442 std::fs::create_dir_all(ns_full_path)?;
443
444 Ok(ns)
445 }
446
fc99c279
DC
447 /// Returns if the given namespace exists on the datastore
448 pub fn namespace_exists(&self, ns: &BackupNamespace) -> bool {
ca3f8757 449 let mut path = self.base_path();
fc99c279
DC
450 path.push(ns.path());
451 path.exists()
452 }
453
4c7cc5b3
TL
454 /// Remove all backup groups of a single namespace level but not the namespace itself.
455 ///
456 /// Does *not* descends into child-namespaces and doesn't remoes the namespace itself either.
457 ///
458 /// Returns true if all the groups were removed, and false if some were protected.
459 pub fn remove_namespace_groups(self: &Arc<Self>, ns: &BackupNamespace) -> Result<bool, Error> {
460 // FIXME: locking? The single groups/snapshots are already protected, so may not be
74cad4a8 461 // necessary (depends on what we all allow to do with namespaces)
4c7cc5b3
TL
462 log::info!("removing all groups in namespace {}:/{ns}", self.name());
463
464 let mut removed_all_groups = true;
465
466 for group in self.iter_backup_groups(ns.to_owned())? {
467 let removed_group = group?.destroy()?;
468 removed_all_groups = removed_all_groups && removed_group;
469 }
470
471 let base_file = std::fs::File::open(self.base_path())?;
472 let base_fd = base_file.as_raw_fd();
473 for ty in BackupType::iter() {
474 let mut ty_dir = ns.path();
475 ty_dir.push(ty.to_string());
476 // best effort only, but we probably should log the error
477 if let Err(err) = unlinkat(Some(base_fd), &ty_dir, UnlinkatFlags::RemoveDir) {
11ca8343 478 if err != nix::errno::Errno::ENOENT {
4c7cc5b3
TL
479 log::error!("failed to remove backup type {ty} in {ns} - {err}");
480 }
481 }
482 }
483
484 Ok(removed_all_groups)
485 }
486
d1f9ccea
TL
487 /// Remove a complete backup namespace optionally including all it's, and child namespaces',
488 /// groups. If `removed_groups` is false this only prunes empty namespaces.
4c7cc5b3 489 ///
d1f9ccea
TL
490 /// Returns true if everything requested, and false if some groups were protected or if some
491 /// namespaces weren't empty even though all groups were deleted (race with new backup)
4c7cc5b3
TL
492 pub fn remove_namespace_recursive(
493 self: &Arc<Self>,
494 ns: &BackupNamespace,
d1f9ccea 495 delete_groups: bool,
4c7cc5b3 496 ) -> Result<bool, Error> {
d1f9ccea
TL
497 let store = self.name();
498 let mut removed_all_requested = true;
499 if delete_groups {
500 log::info!("removing whole namespace recursively below {store}:/{ns}",);
501 for ns in self.recursive_iter_backup_ns(ns.to_owned())? {
502 let removed_ns_groups = self.remove_namespace_groups(&ns?)?;
503 removed_all_requested = removed_all_requested && removed_ns_groups;
504 }
505 } else {
506 log::info!("pruning empty namespace recursively below {store}:/{ns}");
4c7cc5b3
TL
507 }
508
509 // now try to delete the actual namespaces, bottom up so that we can use safe rmdir that
510 // will choke if a new backup/group appeared in the meantime (but not on an new empty NS)
511 let mut children = self
512 .recursive_iter_backup_ns(ns.to_owned())?
513 .collect::<Result<Vec<BackupNamespace>, Error>>()?;
514
bc001e12 515 children.sort_by_key(|b| std::cmp::Reverse(b.depth()));
4c7cc5b3
TL
516
517 let base_file = std::fs::File::open(self.base_path())?;
518 let base_fd = base_file.as_raw_fd();
519
520 for ns in children.iter() {
521 let mut ns_dir = ns.path();
522 ns_dir.push("ns");
523 let _ = unlinkat(Some(base_fd), &ns_dir, UnlinkatFlags::RemoveDir);
524
525 if !ns.is_root() {
526 match unlinkat(Some(base_fd), &ns.path(), UnlinkatFlags::RemoveDir) {
d1f9ccea 527 Ok(()) => log::debug!("removed namespace {ns}"),
11ca8343 528 Err(nix::errno::Errno::ENOENT) => {
d1f9ccea
TL
529 log::debug!("namespace {ns} already removed")
530 }
11ca8343 531 Err(nix::errno::Errno::ENOTEMPTY) if !delete_groups => {
3aafa613 532 removed_all_requested = false;
d1f9ccea
TL
533 log::debug!("skip removal of non-empty namespace {ns}")
534 }
535 Err(err) => {
536 removed_all_requested = false;
537 log::warn!("failed to remove namespace {ns} - {err}")
538 }
4c7cc5b3
TL
539 }
540 }
541 }
542
d1f9ccea 543 Ok(removed_all_requested)
4c7cc5b3
TL
544 }
545
f03649b8
TL
546 /// Remove a complete backup group including all snapshots.
547 ///
548 /// Returns true if all snapshots were removed, and false if some were protected
db87d93e 549 pub fn remove_backup_group(
6da20161 550 self: &Arc<Self>,
133d718f 551 ns: &BackupNamespace,
db87d93e
WB
552 backup_group: &pbs_api_types::BackupGroup,
553 ) -> Result<bool, Error> {
133d718f 554 let backup_group = self.backup_group(ns.clone(), backup_group.clone());
db87d93e 555
f03649b8 556 backup_group.destroy()
4b4eba0b
DM
557 }
558
8f579717 559 /// Remove a backup directory including all content
db87d93e 560 pub fn remove_backup_dir(
6da20161 561 self: &Arc<Self>,
133d718f 562 ns: &BackupNamespace,
db87d93e
WB
563 backup_dir: &pbs_api_types::BackupDir,
564 force: bool,
565 ) -> Result<(), Error> {
133d718f 566 let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
db87d93e 567
f03649b8 568 backup_dir.destroy(force)
8f579717
DM
569 }
570
41b373ec
DM
571 /// Returns the time of the last successful backup
572 ///
573 /// Or None if there is no backup in the group (or the group dir does not exist).
db87d93e 574 pub fn last_successful_backup(
6da20161 575 self: &Arc<Self>,
133d718f 576 ns: &BackupNamespace,
db87d93e
WB
577 backup_group: &pbs_api_types::BackupGroup,
578 ) -> Result<Option<i64>, Error> {
133d718f 579 let backup_group = self.backup_group(ns.clone(), backup_group.clone());
db87d93e 580
4b77d300 581 let group_path = backup_group.full_group_path();
41b373ec
DM
582
583 if group_path.exists() {
6da20161 584 backup_group.last_successful_backup()
41b373ec
DM
585 } else {
586 Ok(None)
587 }
588 }
589
133d718f
WB
590 /// Return the path of the 'owner' file.
591 fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
592 self.group_path(ns, group).join("owner")
593 }
594
54552dda
DM
595 /// Returns the backup owner.
596 ///
e6dc35ac 597 /// The backup owner is the entity who first created the backup group.
133d718f
WB
598 pub fn get_owner(
599 &self,
600 ns: &BackupNamespace,
601 backup_group: &pbs_api_types::BackupGroup,
602 ) -> Result<Authid, Error> {
603 let full_path = self.owner_path(ns, backup_group);
25877d05 604 let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
de6b0ea3
CE
605 owner
606 .trim_end() // remove trailing newline
607 .parse()
fa86b05d 608 .map_err(|err| format_err!("parsing owner for {backup_group} failed: {err}"))
54552dda
DM
609 }
610
db87d93e
WB
611 pub fn owns_backup(
612 &self,
133d718f 613 ns: &BackupNamespace,
db87d93e
WB
614 backup_group: &pbs_api_types::BackupGroup,
615 auth_id: &Authid,
616 ) -> Result<bool, Error> {
133d718f 617 let owner = self.get_owner(ns, backup_group)?;
9751ef4b 618
8e0b852f 619 Ok(check_backup_owner(&owner, auth_id).is_ok())
9751ef4b
DC
620 }
621
54552dda 622 /// Set the backup owner.
e7cb4dc5
WB
623 pub fn set_owner(
624 &self,
133d718f 625 ns: &BackupNamespace,
db87d93e 626 backup_group: &pbs_api_types::BackupGroup,
e6dc35ac 627 auth_id: &Authid,
e7cb4dc5
WB
628 force: bool,
629 ) -> Result<(), Error> {
133d718f 630 let path = self.owner_path(ns, backup_group);
54552dda
DM
631
632 let mut open_options = std::fs::OpenOptions::new();
633 open_options.write(true);
634 open_options.truncate(true);
635
636 if force {
637 open_options.create(true);
638 } else {
639 open_options.create_new(true);
640 }
641
42c2b5be
TL
642 let mut file = open_options
643 .open(&path)
54552dda
DM
644 .map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
645
e6dc35ac 646 writeln!(file, "{}", auth_id)
54552dda
DM
647 .map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
648
649 Ok(())
650 }
651
1fc82c41 652 /// Create (if it does not already exists) and lock a backup group
54552dda
DM
653 ///
654 /// And set the owner to 'userid'. If the group already exists, it returns the
655 /// current owner (instead of setting the owner).
1fc82c41 656 ///
1ffe0301 657 /// This also acquires an exclusive lock on the directory and returns the lock guard.
e7cb4dc5
WB
658 pub fn create_locked_backup_group(
659 &self,
133d718f 660 ns: &BackupNamespace,
db87d93e 661 backup_group: &pbs_api_types::BackupGroup,
e6dc35ac
FG
662 auth_id: &Authid,
663 ) -> Result<(Authid, DirLockGuard), Error> {
8731e40a 664 // create intermediate path first:
44288184 665 let mut full_path = self.base_path();
133d718f 666 for ns in ns.components() {
8c74349b
WB
667 full_path.push("ns");
668 full_path.push(ns);
669 }
db87d93e 670 full_path.push(backup_group.ty.as_str());
8731e40a
WB
671 std::fs::create_dir_all(&full_path)?;
672
db87d93e 673 full_path.push(&backup_group.id);
54552dda
DM
674
675 // create the last component now
676 match std::fs::create_dir(&full_path) {
677 Ok(_) => {
42c2b5be
TL
678 let guard = lock_dir_noblock(
679 &full_path,
680 "backup group",
681 "another backup is already running",
682 )?;
133d718f
WB
683 self.set_owner(ns, backup_group, auth_id, false)?;
684 let owner = self.get_owner(ns, backup_group)?; // just to be sure
1fc82c41 685 Ok((owner, guard))
54552dda
DM
686 }
687 Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
42c2b5be
TL
688 let guard = lock_dir_noblock(
689 &full_path,
690 "backup group",
691 "another backup is already running",
692 )?;
133d718f 693 let owner = self.get_owner(ns, backup_group)?; // just to be sure
1fc82c41 694 Ok((owner, guard))
54552dda
DM
695 }
696 Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
697 }
698 }
699
700 /// Creates a new backup snapshot inside a BackupGroup
701 ///
702 /// The BackupGroup directory needs to exist.
42c2b5be
TL
703 pub fn create_locked_backup_dir(
704 &self,
133d718f 705 ns: &BackupNamespace,
db87d93e 706 backup_dir: &pbs_api_types::BackupDir,
42c2b5be 707 ) -> Result<(PathBuf, bool, DirLockGuard), Error> {
133d718f
WB
708 let full_path = self.snapshot_path(ns, backup_dir);
709 let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
710 format_err!(
711 "failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
712 )
713 })?;
ff3d3100 714
42c2b5be
TL
715 let lock = || {
716 lock_dir_noblock(
717 &full_path,
718 "snapshot",
719 "internal error - tried creating snapshot that's already in use",
720 )
721 };
f23f7543 722
8731e40a 723 match std::fs::create_dir(&full_path) {
133d718f 724 Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
42c2b5be 725 Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
133d718f 726 Ok((relative_path.to_owned(), false, lock()?))
42c2b5be
TL
727 }
728 Err(e) => Err(e.into()),
8731e40a 729 }
ff3d3100
DM
730 }
731
90e38696
TL
732 /// Get a streaming iter over single-level backup namespaces of a datatstore
733 ///
734 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
735 /// parsing errors.
736 pub fn iter_backup_ns(
737 self: &Arc<DataStore>,
738 ns: BackupNamespace,
739 ) -> Result<ListNamespaces, Error> {
740 ListNamespaces::new(Arc::clone(self), ns)
741 }
742
743 /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
744 ///
745 /// The iterated item's result is already unwrapped, if it contained an error it will be
746 /// logged. Can be useful in iterator chain commands
747 pub fn iter_backup_ns_ok(
748 self: &Arc<DataStore>,
749 ns: BackupNamespace,
750 ) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
751 let this = Arc::clone(self);
752 Ok(
ca3f8757 753 ListNamespaces::new(Arc::clone(self), ns)?.filter_map(move |ns| match ns {
90e38696
TL
754 Ok(ns) => Some(ns),
755 Err(err) => {
756 log::error!("list groups error on datastore {} - {}", this.name(), err);
757 None
758 }
759 }),
760 )
761 }
762
763 /// Get a streaming iter over single-level backup namespaces of a datatstore
764 ///
765 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
766 /// parsing errors.
767 pub fn recursive_iter_backup_ns(
768 self: &Arc<DataStore>,
769 ns: BackupNamespace,
770 ) -> Result<ListNamespacesRecursive, Error> {
771 ListNamespacesRecursive::new(Arc::clone(self), ns)
772 }
773
774 /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
775 ///
776 /// The iterated item's result is already unwrapped, if it contained an error it will be
777 /// logged. Can be useful in iterator chain commands
778 pub fn recursive_iter_backup_ns_ok(
779 self: &Arc<DataStore>,
780 ns: BackupNamespace,
15a92724 781 max_depth: Option<usize>,
90e38696
TL
782 ) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
783 let this = Arc::clone(self);
15a92724 784 Ok(if let Some(depth) = max_depth {
ca3f8757 785 ListNamespacesRecursive::new_max_depth(Arc::clone(self), ns, depth)?
15a92724 786 } else {
ca3f8757 787 ListNamespacesRecursive::new(Arc::clone(self), ns)?
15a92724
TL
788 }
789 .filter_map(move |ns| match ns {
790 Ok(ns) => Some(ns),
791 Err(err) => {
792 log::error!("list groups error on datastore {} - {}", this.name(), err);
793 None
794 }
795 }))
90e38696
TL
796 }
797
5444b914
WB
798 /// Get a streaming iter over top-level backup groups of a datatstore of a particular type.
799 ///
800 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
801 /// parsing errors.
802 pub fn iter_backup_type(
803 self: &Arc<DataStore>,
804 ns: BackupNamespace,
805 ty: BackupType,
806 ) -> Result<ListGroupsType, Error> {
807 ListGroupsType::new(Arc::clone(self), ns, ty)
808 }
809
810 /// Get a streaming iter over top-level backup groups of a datatstore of a particular type,
811 /// filtered by `Ok` results
812 ///
813 /// The iterated item's result is already unwrapped, if it contained an error it will be
814 /// logged. Can be useful in iterator chain commands
815 pub fn iter_backup_type_ok(
816 self: &Arc<DataStore>,
817 ns: BackupNamespace,
818 ty: BackupType,
819 ) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
820 Ok(self.iter_backup_type(ns, ty)?.ok())
821 }
822
7b125de3
TL
823 /// Get a streaming iter over top-level backup groups of a datatstore
824 ///
825 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
826 /// parsing errors.
8c74349b
WB
827 pub fn iter_backup_groups(
828 self: &Arc<DataStore>,
829 ns: BackupNamespace,
830 ) -> Result<ListGroups, Error> {
831 ListGroups::new(Arc::clone(self), ns)
7b125de3
TL
832 }
833
834 /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
835 ///
836 /// The iterated item's result is already unwrapped, if it contained an error it will be
837 /// logged. Can be useful in iterator chain commands
6da20161
WB
838 pub fn iter_backup_groups_ok(
839 self: &Arc<DataStore>,
8c74349b 840 ns: BackupNamespace,
6da20161 841 ) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
5444b914 842 Ok(self.iter_backup_groups(ns)?.ok())
7b125de3
TL
843 }
844
c90dbb5c 845 /// Get a in-memory vector for all top-level backup groups of a datatstore
7b125de3
TL
846 ///
847 /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
8c74349b
WB
848 pub fn list_backup_groups(
849 self: &Arc<DataStore>,
850 ns: BackupNamespace,
851 ) -> Result<Vec<BackupGroup>, Error> {
852 ListGroups::new(Arc::clone(self), ns)?.collect()
c90dbb5c
TL
853 }
854
3d5c11e5 855 pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
ff3d3100 856 let base = self.base_path();
3d5c11e5
DM
857
858 let mut list = vec![];
859
95cea65b
DM
860 use walkdir::WalkDir;
861
a57413a5 862 let walker = WalkDir::new(base).into_iter();
95cea65b
DM
863
864 // make sure we skip .chunks (and other hidden files to keep it simple)
865 fn is_hidden(entry: &walkdir::DirEntry) -> bool {
42c2b5be
TL
866 entry
867 .file_name()
95cea65b 868 .to_str()
d8d8af98 869 .map(|s| s.starts_with('.'))
95cea65b
DM
870 .unwrap_or(false)
871 }
c3b090ac 872 let handle_entry_err = |err: walkdir::Error| {
5b16dffc
TL
873 // first, extract the actual IO error and the affected path
874 let (inner, path) = match (err.io_error(), err.path()) {
875 (None, _) => return Ok(()), // not an IO-error
876 (Some(inner), Some(path)) => (inner, path),
877 (Some(inner), None) => bail!("unexpected error on datastore traversal: {inner}"),
878 };
879 if inner.kind() == io::ErrorKind::PermissionDenied {
0d69dcb4 880 if err.depth() <= 1 && path.ends_with("lost+found") {
8fcd709c 881 // allow skipping of (root-only) ext4 fsck-directory on EPERM ..
5b16dffc 882 return Ok(());
c3b090ac 883 }
8fcd709c
FG
884 // .. but do not ignore EPERM in general, otherwise we might prune too many chunks.
885 // E.g., if users messed up with owner/perms on a rsync
5b16dffc 886 bail!("cannot continue garbage-collection safely, permission denied on: {path:?}");
9d1ba51d
GG
887 } else if inner.kind() == io::ErrorKind::NotFound {
888 log::info!("ignoring vanished file: {path:?}");
889 return Ok(());
5b16dffc
TL
890 } else {
891 bail!("unexpected error on datastore traversal: {inner} - {path:?}");
c3b090ac 892 }
c3b090ac 893 };
95cea65b 894 for entry in walker.filter_entry(|e| !is_hidden(e)) {
c3b090ac
TL
895 let path = match entry {
896 Ok(entry) => entry.into_path(),
897 Err(err) => {
898 handle_entry_err(err)?;
42c2b5be
TL
899 continue;
900 }
c3b090ac 901 };
1e8da0a7 902 if let Ok(archive_type) = archive_type(&path) {
42c2b5be
TL
903 if archive_type == ArchiveType::FixedIndex
904 || archive_type == ArchiveType::DynamicIndex
905 {
95cea65b 906 list.push(path);
3d5c11e5
DM
907 }
908 }
909 }
910
911 Ok(list)
912 }
913
a660978c
DM
914 // mark chunks used by ``index`` as used
915 fn index_mark_used_chunks<I: IndexFile>(
916 &self,
917 index: I,
918 file_name: &Path, // only used for error reporting
919 status: &mut GarbageCollectionStatus,
c8449217 920 worker: &dyn WorkerTaskContext,
a660978c 921 ) -> Result<(), Error> {
a660978c
DM
922 status.index_file_count += 1;
923 status.index_data_bytes += index.index_bytes();
924
925 for pos in 0..index.index_count() {
f6b1d1cc 926 worker.check_abort()?;
0fd55b08 927 worker.fail_on_shutdown()?;
a660978c 928 let digest = index.index_digest(pos).unwrap();
4bc84a65 929 if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
ca3f8757 930 let hex = hex::encode(digest);
c23192d3 931 task_warn!(
f6b1d1cc 932 worker,
ca3f8757 933 "warning: unable to access non-existent chunk {hex}, required by {file_name:?}"
f6b1d1cc 934 );
fd192564
SR
935
936 // touch any corresponding .bad files to keep them around, meaning if a chunk is
937 // rewritten correctly they will be removed automatically, as well as if no index
938 // file requires the chunk anymore (won't get to this loop then)
939 for i in 0..=9 {
940 let bad_ext = format!("{}.bad", i);
941 let mut bad_path = PathBuf::new();
942 bad_path.push(self.chunk_path(digest).0);
943 bad_path.set_extension(bad_ext);
4bc84a65 944 self.inner.chunk_store.cond_touch_path(&bad_path, false)?;
fd192564 945 }
a660978c
DM
946 }
947 }
948 Ok(())
949 }
950
f6b1d1cc
WB
951 fn mark_used_chunks(
952 &self,
953 status: &mut GarbageCollectionStatus,
c8449217 954 worker: &dyn WorkerTaskContext,
f6b1d1cc 955 ) -> Result<(), Error> {
3d5c11e5 956 let image_list = self.list_images()?;
8317873c
DM
957 let image_count = image_list.len();
958
8317873c
DM
959 let mut last_percentage: usize = 0;
960
cb4b721c
FG
961 let mut strange_paths_count: u64 = 0;
962
ea368a06 963 for (i, img) in image_list.into_iter().enumerate() {
f6b1d1cc 964 worker.check_abort()?;
0fd55b08 965 worker.fail_on_shutdown()?;
92da93b2 966
cb4b721c
FG
967 if let Some(backup_dir_path) = img.parent() {
968 let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
969 if let Some(backup_dir_str) = backup_dir_path.to_str() {
161a8864 970 if pbs_api_types::parse_ns_and_snapshot(backup_dir_str).is_err() {
cb4b721c
FG
971 strange_paths_count += 1;
972 }
973 }
974 }
975
efcac39d 976 match std::fs::File::open(&img) {
e0762002 977 Ok(file) => {
788d82d9 978 if let Ok(archive_type) = archive_type(&img) {
e0762002 979 if archive_type == ArchiveType::FixedIndex {
788d82d9 980 let index = FixedIndexReader::new(file).map_err(|e| {
efcac39d 981 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 982 })?;
788d82d9 983 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002 984 } else if archive_type == ArchiveType::DynamicIndex {
788d82d9 985 let index = DynamicIndexReader::new(file).map_err(|e| {
efcac39d 986 format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
2f0b9235 987 })?;
788d82d9 988 self.index_mark_used_chunks(index, &img, status, worker)?;
e0762002
DM
989 }
990 }
991 }
788d82d9 992 Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
efcac39d 993 Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
77703d95 994 }
8317873c 995
ea368a06 996 let percentage = (i + 1) * 100 / image_count;
8317873c 997 if percentage > last_percentage {
c23192d3 998 task_log!(
f6b1d1cc 999 worker,
7956877f 1000 "marked {}% ({} of {} index files)",
f6b1d1cc 1001 percentage,
ea368a06 1002 i + 1,
f6b1d1cc
WB
1003 image_count,
1004 );
8317873c
DM
1005 last_percentage = percentage;
1006 }
3d5c11e5
DM
1007 }
1008
cb4b721c 1009 if strange_paths_count > 0 {
c23192d3 1010 task_log!(
cb4b721c
FG
1011 worker,
1012 "found (and marked) {} index files outside of expected directory scheme",
1013 strange_paths_count,
1014 );
1015 }
1016
3d5c11e5 1017 Ok(())
f2b99c34
DM
1018 }
1019
1020 pub fn last_gc_status(&self) -> GarbageCollectionStatus {
4bc84a65 1021 self.inner.last_gc_status.lock().unwrap().clone()
f2b99c34 1022 }
3d5c11e5 1023
8545480a 1024 pub fn garbage_collection_running(&self) -> bool {
1c3f1e7c 1025 self.inner.gc_mutex.try_lock().is_err()
8545480a
DM
1026 }
1027
42c2b5be
TL
1028 pub fn garbage_collection(
1029 &self,
1030 worker: &dyn WorkerTaskContext,
1031 upid: &UPID,
1032 ) -> Result<(), Error> {
4bc84a65 1033 if let Ok(ref mut _mutex) = self.inner.gc_mutex.try_lock() {
c6772c92
TL
1034 // avoids that we run GC if an old daemon process has still a
1035 // running backup writer, which is not save as we have no "oldest
1036 // writer" information and thus no safe atime cutoff
42c2b5be 1037 let _exclusive_lock = self.inner.chunk_store.try_exclusive_lock()?;
43b13033 1038
6ef1b649 1039 let phase1_start_time = proxmox_time::epoch_i64();
42c2b5be
TL
1040 let oldest_writer = self
1041 .inner
1042 .chunk_store
1043 .oldest_writer()
1044 .unwrap_or(phase1_start_time);
11861a48 1045
bc001e12
TL
1046 let mut gc_status = GarbageCollectionStatus {
1047 upid: Some(upid.to_string()),
1048 ..Default::default()
1049 };
f6b1d1cc 1050
c23192d3 1051 task_log!(worker, "Start GC phase1 (mark used chunks)");
f6b1d1cc
WB
1052
1053 self.mark_used_chunks(&mut gc_status, worker)?;
1054
c23192d3 1055 task_log!(worker, "Start GC phase2 (sweep unused chunks)");
4bc84a65 1056 self.inner.chunk_store.sweep_unused_chunks(
f6b1d1cc
WB
1057 oldest_writer,
1058 phase1_start_time,
1059 &mut gc_status,
1060 worker,
1061 )?;
1062
c23192d3 1063 task_log!(
f6b1d1cc
WB
1064 worker,
1065 "Removed garbage: {}",
1066 HumanByte::from(gc_status.removed_bytes),
1067 );
c23192d3 1068 task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
cf459b19 1069 if gc_status.pending_bytes > 0 {
c23192d3 1070 task_log!(
f6b1d1cc
WB
1071 worker,
1072 "Pending removals: {} (in {} chunks)",
1073 HumanByte::from(gc_status.pending_bytes),
1074 gc_status.pending_chunks,
1075 );
cf459b19 1076 }
a9767cf7 1077 if gc_status.removed_bad > 0 {
c23192d3 1078 task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
a9767cf7 1079 }
cf459b19 1080
b4fb2623 1081 if gc_status.still_bad > 0 {
c23192d3 1082 task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
b4fb2623
DM
1083 }
1084
c23192d3 1085 task_log!(
f6b1d1cc
WB
1086 worker,
1087 "Original data usage: {}",
1088 HumanByte::from(gc_status.index_data_bytes),
1089 );
868c5852
DM
1090
1091 if gc_status.index_data_bytes > 0 {
42c2b5be
TL
1092 let comp_per =
1093 (gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
c23192d3 1094 task_log!(
f6b1d1cc
WB
1095 worker,
1096 "On-Disk usage: {} ({:.2}%)",
1097 HumanByte::from(gc_status.disk_bytes),
1098 comp_per,
1099 );
868c5852
DM
1100 }
1101
c23192d3 1102 task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
868c5852 1103
d6373f35 1104 let deduplication_factor = if gc_status.disk_bytes > 0 {
42c2b5be 1105 (gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
d6373f35
DM
1106 } else {
1107 1.0
1108 };
1109
c23192d3 1110 task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
d6373f35 1111
868c5852 1112 if gc_status.disk_chunks > 0 {
42c2b5be 1113 let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
c23192d3 1114 task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
868c5852 1115 }
64e53b28 1116
b683fd58
DC
1117 if let Ok(serialized) = serde_json::to_string(&gc_status) {
1118 let mut path = self.base_path();
1119 path.push(".gc-status");
1120
21211748 1121 let backup_user = pbs_config::backup_user()?;
b683fd58
DC
1122 let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
1123 // set the correct owner/group/permissions while saving file
1124 // owner(rw) = backup, group(r)= backup
1125 let options = CreateOptions::new()
1126 .perm(mode)
1127 .owner(backup_user.uid)
1128 .group(backup_user.gid);
1129
1130 // ignore errors
e0a19d33 1131 let _ = replace_file(path, serialized.as_bytes(), options, false);
b683fd58
DC
1132 }
1133
4bc84a65 1134 *self.inner.last_gc_status.lock().unwrap() = gc_status;
64e53b28 1135 } else {
d4b59ae0 1136 bail!("Start GC failed - (already running/locked)");
64e53b28 1137 }
3d5c11e5
DM
1138
1139 Ok(())
1140 }
3b7ade9e 1141
ccc3896f 1142 pub fn try_shared_chunk_store_lock(&self) -> Result<ProcessLockSharedGuard, Error> {
4bc84a65 1143 self.inner.chunk_store.try_shared_lock()
1cf5178a
DM
1144 }
1145
42c2b5be 1146 pub fn chunk_path(&self, digest: &[u8; 32]) -> (PathBuf, String) {
4bc84a65 1147 self.inner.chunk_store.chunk_path(digest)
d48a9955
DM
1148 }
1149
b298e9f1 1150 pub fn cond_touch_chunk(&self, digest: &[u8; 32], assert_exists: bool) -> Result<bool, Error> {
42c2b5be
TL
1151 self.inner
1152 .chunk_store
b298e9f1 1153 .cond_touch_chunk(digest, assert_exists)
42c2b5be
TL
1154 }
1155
1156 pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
4bc84a65 1157 self.inner.chunk_store.insert_chunk(chunk, digest)
3b7ade9e 1158 }
60f9a6ea 1159
7f394c80 1160 pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
4bc84a65 1161 let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest);
7f394c80
DC
1162 std::fs::metadata(chunk_path).map_err(Error::from)
1163 }
1164
39f18b30 1165 pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
4bc84a65 1166 let (chunk_path, digest_str) = self.inner.chunk_store.chunk_path(digest);
39f18b30 1167
6ef1b649 1168 proxmox_lang::try_block!({
39f18b30
DM
1169 let mut file = std::fs::File::open(&chunk_path)?;
1170 DataBlob::load_from_reader(&mut file)
42c2b5be
TL
1171 })
1172 .map_err(|err| {
1173 format_err!(
1174 "store '{}', unable to load chunk '{}' - {}",
1175 self.name(),
1176 digest_str,
1177 err,
1178 )
1179 })
1a374fcf
SR
1180 }
1181
8292d3d2 1182 /// Updates the protection status of the specified snapshot.
42c2b5be 1183 pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> {
6da20161 1184 let full_path = backup_dir.full_path();
8292d3d2 1185
c78437e3
FG
1186 if !full_path.exists() {
1187 bail!("snapshot {} does not exist!", backup_dir.dir());
1188 }
1189
8292d3d2
DC
1190 let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
1191
6da20161 1192 let protected_path = backup_dir.protected_file();
8292d3d2
DC
1193 if protection {
1194 std::fs::File::create(protected_path)
1195 .map_err(|err| format_err!("could not create protection file: {}", err))?;
1196 } else if let Err(err) = std::fs::remove_file(protected_path) {
1197 // ignore error for non-existing file
1198 if err.kind() != std::io::ErrorKind::NotFound {
1199 bail!("could not remove protection file: {}", err);
1200 }
1201 }
1202
1203 Ok(())
1204 }
1205
0698f78d 1206 pub fn verify_new(&self) -> bool {
4bc84a65 1207 self.inner.verify_new
0698f78d 1208 }
4921a411 1209
bc001e12
TL
1210 /// returns a list of chunks sorted by their inode number on disk chunks that couldn't get
1211 /// stat'ed are placed at the end of the list
4921a411
DC
1212 pub fn get_chunks_in_order<F, A>(
1213 &self,
e1db0670 1214 index: &(dyn IndexFile + Send),
4921a411
DC
1215 skip_chunk: F,
1216 check_abort: A,
1217 ) -> Result<Vec<(usize, u64)>, Error>
1218 where
1219 F: Fn(&[u8; 32]) -> bool,
1220 A: Fn(usize) -> Result<(), Error>,
1221 {
1222 let index_count = index.index_count();
1223 let mut chunk_list = Vec::with_capacity(index_count);
1224 use std::os::unix::fs::MetadataExt;
1225 for pos in 0..index_count {
1226 check_abort(pos)?;
1227
1228 let info = index.chunk_info(pos).unwrap();
1229
1230 if skip_chunk(&info.digest) {
1231 continue;
1232 }
1233
4bc84a65 1234 let ino = match self.inner.chunk_order {
fef61684
DC
1235 ChunkOrder::Inode => {
1236 match self.stat_chunk(&info.digest) {
1237 Err(_) => u64::MAX, // could not stat, move to end of list
1238 Ok(metadata) => metadata.ino(),
1239 }
1240 }
1241 ChunkOrder::None => 0,
4921a411
DC
1242 };
1243
1244 chunk_list.push((pos, ino));
1245 }
1246
4bc84a65 1247 match self.inner.chunk_order {
fef61684
DC
1248 // sorting by inode improves data locality, which makes it lots faster on spinners
1249 ChunkOrder::Inode => {
1250 chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(ino_b))
1251 }
1252 ChunkOrder::None => {}
1253 }
4921a411
DC
1254
1255 Ok(chunk_list)
1256 }
db87d93e 1257
6b0c6492 1258 /// Open a backup group from this datastore.
133d718f
WB
1259 pub fn backup_group(
1260 self: &Arc<Self>,
1261 ns: BackupNamespace,
1262 group: pbs_api_types::BackupGroup,
1263 ) -> BackupGroup {
ca3f8757 1264 BackupGroup::new(Arc::clone(self), ns, group)
db87d93e
WB
1265 }
1266
6b0c6492 1267 /// Open a backup group from this datastore.
8c74349b
WB
1268 pub fn backup_group_from_parts<T>(
1269 self: &Arc<Self>,
1270 ns: BackupNamespace,
1271 ty: BackupType,
1272 id: T,
1273 ) -> BackupGroup
6b0c6492
WB
1274 where
1275 T: Into<String>,
1276 {
133d718f 1277 self.backup_group(ns, (ty, id.into()).into())
6b0c6492
WB
1278 }
1279
133d718f 1280 /*
6b0c6492
WB
1281 /// Open a backup group from this datastore by backup group path such as `vm/100`.
1282 ///
1283 /// Convenience method for `store.backup_group(path.parse()?)`
6da20161 1284 pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> {
133d718f 1285 todo!("split out the namespace");
db87d93e 1286 }
133d718f 1287 */
db87d93e 1288
6b0c6492 1289 /// Open a snapshot (backup directory) from this datastore.
133d718f
WB
1290 pub fn backup_dir(
1291 self: &Arc<Self>,
1292 ns: BackupNamespace,
1293 dir: pbs_api_types::BackupDir,
1294 ) -> Result<BackupDir, Error> {
1295 BackupDir::with_group(self.backup_group(ns, dir.group), dir.time)
6b0c6492
WB
1296 }
1297
1298 /// Open a snapshot (backup directory) from this datastore.
db87d93e 1299 pub fn backup_dir_from_parts<T>(
6da20161 1300 self: &Arc<Self>,
8c74349b 1301 ns: BackupNamespace,
db87d93e
WB
1302 ty: BackupType,
1303 id: T,
1304 time: i64,
1305 ) -> Result<BackupDir, Error>
1306 where
1307 T: Into<String>,
1308 {
133d718f 1309 self.backup_dir(ns, (ty, id.into(), time).into())
db87d93e
WB
1310 }
1311
6b0c6492 1312 /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
db87d93e 1313 pub fn backup_dir_with_rfc3339<T: Into<String>>(
6da20161 1314 self: &Arc<Self>,
db87d93e
WB
1315 group: BackupGroup,
1316 time_string: T,
1317 ) -> Result<BackupDir, Error> {
1318 BackupDir::with_rfc3339(group, time_string.into())
1319 }
1320
133d718f 1321 /*
6b0c6492 1322 /// Open a snapshot (backup directory) from this datastore by a snapshot path.
6da20161 1323 pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> {
133d718f 1324 todo!("split out the namespace");
db87d93e 1325 }
133d718f 1326 */
647186dd
DC
1327
1328 /// Syncs the filesystem of the datastore if 'sync_level' is set to
1329 /// [`DatastoreFSyncLevel::Filesystem`]. Uses syncfs(2).
1330 pub fn try_ensure_sync_level(&self) -> Result<(), Error> {
1331 if self.inner.sync_level != DatastoreFSyncLevel::Filesystem {
1332 return Ok(());
1333 }
1334 let file = std::fs::File::open(self.base_path())?;
1335 let fd = file.as_raw_fd();
7be0a3fd 1336 log::info!("syncing filesystem");
647186dd
DC
1337 if unsafe { libc::syncfs(fd) } < 0 {
1338 bail!("error during syncfs: {}", std::io::Error::last_os_error());
1339 }
1340 Ok(())
1341 }
857f346c
WB
1342
1343 /// Destroy a datastore. This requires that there are no active operations on the datastore.
1344 ///
1345 /// This is a synchronous operation and should be run in a worker-thread.
1346 pub fn destroy(
1347 name: &str,
1348 destroy_data: bool,
1349 worker: &dyn WorkerTaskContext,
1350 ) -> Result<(), Error> {
1351 let config_lock = pbs_config::datastore::lock_config()?;
1352
1353 let (mut config, _digest) = pbs_config::datastore::config()?;
1354 let mut datastore_config: DataStoreConfig = config.lookup("datastore", name)?;
1355
1356 datastore_config.maintenance_mode = Some("type=delete".to_string());
1357 config.set_data(name, "datastore", &datastore_config)?;
1358 pbs_config::datastore::save_config(&config)?;
1359 drop(config_lock);
1360
1361 let (operations, _lock) = task_tracking::get_active_operations_locked(name)?;
1362
1363 if operations.read != 0 || operations.write != 0 {
1364 bail!("datastore is currently in use");
1365 }
1366
1367 let base = PathBuf::from(&datastore_config.path);
1368
1369 let mut ok = true;
1370 if destroy_data {
1371 let remove = |subdir, ok: &mut bool| {
1372 if let Err(err) = std::fs::remove_dir_all(base.join(subdir)) {
1373 if err.kind() != io::ErrorKind::NotFound {
1374 task_warn!(worker, "failed to remove {subdir:?} subdirectory: {err}");
1375 *ok = false;
1376 }
1377 }
1378 };
1379
1380 task_log!(worker, "Deleting datastore data...");
1381 remove("ns", &mut ok); // ns first
1382 remove("ct", &mut ok);
1383 remove("vm", &mut ok);
1384 remove("host", &mut ok);
1385
1386 if ok {
1387 if let Err(err) = std::fs::remove_file(base.join(".gc-status")) {
1388 if err.kind() != io::ErrorKind::NotFound {
1389 task_warn!(worker, "failed to remove .gc-status file: {err}");
1390 ok = false;
1391 }
1392 }
1393 }
1394
1395 // chunks get removed last and only if the backups were successfully deleted
1396 if ok {
1397 remove(".chunks", &mut ok);
1398 }
1399 }
1400
1401 // now the config
1402 if ok {
1403 task_log!(worker, "Removing datastore from config...");
1404 let _lock = pbs_config::datastore::lock_config()?;
1405 let _ = config.sections.remove(name);
1406 pbs_config::datastore::save_config(&config)?;
1407 }
1408
1409 // finally the lock & toplevel directory
1410 if destroy_data {
1411 if ok {
1412 if let Err(err) = std::fs::remove_file(base.join(".lock")) {
1413 if err.kind() != io::ErrorKind::NotFound {
1414 task_warn!(worker, "failed to remove .lock file: {err}");
1415 ok = false;
1416 }
1417 }
1418 }
1419
1420 if ok {
1421 task_log!(worker, "Finished deleting data.");
1422
1423 match std::fs::remove_dir(base) {
1424 Ok(()) => task_log!(worker, "Removed empty datastore directory."),
1425 Err(err) if err.kind() == io::ErrorKind::NotFound => {
1426 // weird, but ok
1427 }
1428 Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => {
1429 task_warn!(
1430 worker,
1431 "Cannot delete datastore directory (is it a mount point?)."
1432 )
1433 }
1434 Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => {
1435 task_warn!(worker, "Datastore directory not empty, not deleting.")
1436 }
1437 Err(err) => {
1438 task_warn!(worker, "Failed to remove datastore directory: {err}");
1439 }
1440 }
1441 } else {
1442 task_log!(worker, "There were errors deleting data.");
1443 }
1444 }
1445
1446 Ok(())
1447 }
529de6c7 1448}