1 use std
::collections
::{HashMap, HashSet}
;
2 use std
::io
::{self, Write}
;
3 use std
::os
::unix
::ffi
::OsStrExt
;
4 use std
::os
::unix
::io
::AsRawFd
;
5 use std
::path
::{Path, PathBuf}
;
6 use std
::sync
::{Arc, LazyLock, Mutex}
;
8 use anyhow
::{bail, format_err, Error}
;
9 use nix
::unistd
::{unlinkat, UnlinkatFlags}
;
10 use tracing
::{info, warn}
;
12 use proxmox_human_byte
::HumanByte
;
13 use proxmox_schema
::ApiType
;
15 use proxmox_sys
::error
::SysError
;
16 use proxmox_sys
::fs
::{file_read_optional_string, replace_file, CreateOptions}
;
17 use proxmox_sys
::fs
::{lock_dir_noblock, DirLockGuard}
;
18 use proxmox_sys
::linux
::procfs
::MountInfo
;
19 use proxmox_sys
::process_locker
::ProcessLockSharedGuard
;
20 use proxmox_worker_task
::WorkerTaskContext
;
23 ArchiveType
, Authid
, BackupGroupDeleteStats
, BackupNamespace
, BackupType
, ChunkOrder
,
24 DataStoreConfig
, DatastoreFSyncLevel
, DatastoreTuning
, GarbageCollectionStatus
,
25 MaintenanceMode
, MaintenanceType
, Operation
, UPID
,
28 use crate::backup_info
::{BackupDir, BackupGroup}
;
29 use crate::chunk_store
::ChunkStore
;
30 use crate::dynamic_index
::{DynamicIndexReader, DynamicIndexWriter}
;
31 use crate::fixed_index
::{FixedIndexReader, FixedIndexWriter}
;
32 use crate::hierarchy
::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive}
;
33 use crate::index
::IndexFile
;
34 use crate::task_tracking
::{self, update_active_operations}
;
37 static DATASTORE_MAP
: LazyLock
<Mutex
<HashMap
<String
, Arc
<DataStoreImpl
>>>> =
38 LazyLock
::new(|| Mutex
::new(HashMap
::new()));
40 /// checks if auth_id is owner, or, if owner is a token, if
41 /// auth_id is the user of the token
42 pub fn check_backup_owner(owner
: &Authid
, auth_id
: &Authid
) -> Result
<(), Error
> {
44 owner
== auth_id
|| (owner
.is_token() && &Authid
::from(owner
.user().clone()) == auth_id
);
46 bail
!("backup owner check failed ({} != {})", auth_id
, owner
);
51 /// Check if a device with a given UUID is currently mounted at store_mount_point by
52 /// comparing the `st_rdev` values of `/dev/disk/by-uuid/<uuid>` and the source device in
53 /// /proc/self/mountinfo.
55 /// If we can't check if it is mounted, we treat that as not mounted,
58 /// Reasons it could fail other than not being mounted where expected:
59 /// - could not read `/proc/self/mountinfo`
60 /// - could not stat `/dev/disk/by-uuid/<uuid>`
61 /// - `/dev/disk/by-uuid/<uuid>` is not a block device
63 /// Since these are very much out of our control, there is no real value in distinguishing
64 /// between them, so for this function they all are treated as 'device not mounted'
65 fn is_datastore_mounted_at(store_mount_point
: String
, device_uuid
: &str) -> bool
{
66 use nix
::sys
::stat
::SFlag
;
68 let store_mount_point
= Path
::new(&store_mount_point
);
70 let dev_node
= match nix
::sys
::stat
::stat(format
!("/dev/disk/by-uuid/{device_uuid}").as_str()) {
71 Ok(stat
) if SFlag
::from_bits_truncate(stat
.st_mode
) == SFlag
::S_IFBLK
=> stat
.st_rdev
,
75 let Ok(mount_info
) = MountInfo
::read() else {
79 for (_
, entry
) in mount_info
{
80 let Some(source
) = entry
.mount_source
else {
84 if entry
.mount_point
!= store_mount_point
|| !source
.as_bytes().starts_with(b
"/") {
88 if let Ok(stat
) = nix
::sys
::stat
::stat(source
.as_os_str()) {
89 let sflag
= SFlag
::from_bits_truncate(stat
.st_mode
);
91 if sflag
== SFlag
::S_IFBLK
&& stat
.st_rdev
== dev_node
{
100 pub fn get_datastore_mount_status(config
: &DataStoreConfig
) -> Option
<bool
> {
101 let device_uuid
= config
.backing_device
.as_ref()?
;
102 Some(is_datastore_mounted_at(config
.absolute_path(), device_uuid
))
105 pub fn ensure_datastore_is_mounted(config
: &DataStoreConfig
) -> Result
<(), Error
> {
106 match get_datastore_mount_status(config
) {
107 Some(true) => Ok(()),
108 Some(false) => Err(format_err
!("Datastore '{}' is not mounted", config
.name
)),
113 /// Datastore Management
115 /// A Datastore can store severals backups, and provides the
116 /// management interface for backup.
117 pub struct DataStoreImpl
{
118 chunk_store
: Arc
<ChunkStore
>,
120 last_gc_status
: Mutex
<GarbageCollectionStatus
>,
122 chunk_order
: ChunkOrder
,
123 last_digest
: Option
<[u8; 32]>,
124 sync_level
: DatastoreFSyncLevel
,
128 // This one just panics on everything
130 pub(crate) unsafe fn new_test() -> Arc
<Self> {
132 chunk_store
: Arc
::new(unsafe { ChunkStore::panic_store() }
),
133 gc_mutex
: Mutex
::new(()),
134 last_gc_status
: Mutex
::new(GarbageCollectionStatus
::default()),
136 chunk_order
: Default
::default(),
138 sync_level
: Default
::default(),
143 pub struct DataStore
{
144 inner
: Arc
<DataStoreImpl
>,
145 operation
: Option
<Operation
>,
148 impl Clone
for DataStore
{
149 fn clone(&self) -> Self {
150 let mut new_operation
= self.operation
;
151 if let Some(operation
) = self.operation
{
152 if let Err(e
) = update_active_operations(self.name(), operation
, 1) {
153 log
::error
!("could not update active operations - {}", e
);
154 new_operation
= None
;
159 inner
: self.inner
.clone(),
160 operation
: new_operation
,
165 impl Drop
for DataStore
{
167 if let Some(operation
) = self.operation
{
168 let mut last_task
= false;
169 match update_active_operations(self.name(), operation
, -1) {
170 Err(e
) => log
::error
!("could not update active operations - {}", e
),
171 Ok(updated_operations
) => {
172 last_task
= updated_operations
.read
+ updated_operations
.write
== 0;
176 // remove datastore from cache iff
177 // - last task finished, and
178 // - datastore is in a maintenance mode that mandates it
179 let remove_from_cache
= last_task
180 && pbs_config
::datastore
::config()
181 .and_then(|(s
, _
)| s
.lookup
::<DataStoreConfig
>("datastore", self.name()))
183 c
.get_maintenance_mode()
184 .map_or(false, |m
| m
.clear_from_cache())
187 if remove_from_cache
{
188 DATASTORE_MAP
.lock().unwrap().remove(self.name());
195 // This one just panics on everything
197 pub(crate) unsafe fn new_test() -> Arc
<Self> {
199 inner
: unsafe { DataStoreImpl::new_test() }
,
204 pub fn lookup_datastore(
206 operation
: Option
<Operation
>,
207 ) -> Result
<Arc
<DataStore
>, Error
> {
208 // Avoid TOCTOU between checking maintenance mode and updating active operation counter, as
209 // we use it to decide whether it is okay to delete the datastore.
210 let _config_lock
= pbs_config
::datastore
::lock_config()?
;
212 // we could use the ConfigVersionCache's generation for staleness detection, but we load
213 // the config anyway -> just use digest, additional benefit: manual changes get detected
214 let (config
, digest
) = pbs_config
::datastore
::config()?
;
215 let config
: DataStoreConfig
= config
.lookup("datastore", name
)?
;
217 if let Some(maintenance_mode
) = config
.get_maintenance_mode() {
218 if let Err(error
) = maintenance_mode
.check(operation
) {
219 bail
!("datastore '{name}' is unavailable: {error}");
223 if get_datastore_mount_status(&config
) == Some(false) {
224 let mut datastore_cache
= DATASTORE_MAP
.lock().unwrap();
225 datastore_cache
.remove(&config
.name
);
226 bail
!("datastore '{}' is not mounted", config
.name
);
229 let mut datastore_cache
= DATASTORE_MAP
.lock().unwrap();
230 let entry
= datastore_cache
.get(name
);
232 // reuse chunk store so that we keep using the same process locker instance!
233 let chunk_store
= if let Some(datastore
) = &entry
{
234 let last_digest
= datastore
.last_digest
.as_ref();
235 if let Some(true) = last_digest
.map(|last_digest
| last_digest
== &digest
) {
236 if let Some(operation
) = operation
{
237 update_active_operations(name
, operation
, 1)?
;
239 return Ok(Arc
::new(Self {
240 inner
: Arc
::clone(datastore
),
244 Arc
::clone(&datastore
.chunk_store
)
246 let tuning
: DatastoreTuning
= serde_json
::from_value(
247 DatastoreTuning
::API_SCHEMA
248 .parse_property_string(config
.tuning
.as_deref().unwrap_or(""))?
,
250 Arc
::new(ChunkStore
::open(
252 config
.absolute_path(),
253 tuning
.sync_level
.unwrap_or_default(),
257 let datastore
= DataStore
::with_store_and_config(chunk_store
, config
, Some(digest
))?
;
259 let datastore
= Arc
::new(datastore
);
260 datastore_cache
.insert(name
.to_string(), datastore
.clone());
262 if let Some(operation
) = operation
{
263 update_active_operations(name
, operation
, 1)?
;
272 /// removes all datastores that are not configured anymore
273 pub fn remove_unused_datastores() -> Result
<(), Error
> {
274 let (config
, _digest
) = pbs_config
::datastore
::config()?
;
276 let mut map
= DATASTORE_MAP
.lock().unwrap();
277 // removes all elements that are not in the config
278 map
.retain(|key
, _
| config
.sections
.contains_key(key
));
282 /// trigger clearing cache entry based on maintenance mode. Entry will only
283 /// be cleared iff there is no other task running, if there is, the end of the
284 /// last running task will trigger the clearing of the cache entry.
285 pub fn update_datastore_cache(name
: &str) -> Result
<(), Error
> {
286 let (config
, _digest
) = pbs_config
::datastore
::config()?
;
287 let datastore
: DataStoreConfig
= config
.lookup("datastore", name
)?
;
289 .get_maintenance_mode()
290 .map_or(false, |m
| m
.clear_from_cache())
292 // the datastore drop handler does the checking if tasks are running and clears the
293 // cache entry, so we just have to trigger it here
294 let _
= DataStore
::lookup_datastore(name
, Some(Operation
::Lookup
));
300 /// Open a raw database given a name and a path.
303 /// See the safety section in `open_from_config`
304 pub unsafe fn open_path(
306 path
: impl AsRef
<Path
>,
307 operation
: Option
<Operation
>,
308 ) -> Result
<Arc
<Self>, Error
> {
312 .ok_or_else(|| format_err
!("non-utf8 paths not supported"))?
314 unsafe { Self::open_from_config(DataStoreConfig::new(name.to_owned(), path), operation) }
317 /// Open a datastore given a raw configuration.
320 /// There's no memory safety implication, but as this is opening a new ChunkStore it will
321 /// create a new process locker instance, potentially on the same path as existing safely
322 /// created ones. This is dangerous as dropping the reference of this and thus the underlying
323 /// chunkstore's process locker will close all locks from our process on the config.path,
324 /// breaking guarantees we need to uphold for safe long backup + GC interaction on newer/older
325 /// process instances (from package update).
326 unsafe fn open_from_config(
327 config
: DataStoreConfig
,
328 operation
: Option
<Operation
>,
329 ) -> Result
<Arc
<Self>, Error
> {
330 let name
= config
.name
.clone();
332 ensure_datastore_is_mounted(&config
)?
;
334 let tuning
: DatastoreTuning
= serde_json
::from_value(
335 DatastoreTuning
::API_SCHEMA
336 .parse_property_string(config
.tuning
.as_deref().unwrap_or(""))?
,
338 let chunk_store
= ChunkStore
::open(
340 config
.absolute_path(),
341 tuning
.sync_level
.unwrap_or_default(),
343 let inner
= Arc
::new(Self::with_store_and_config(
344 Arc
::new(chunk_store
),
349 if let Some(operation
) = operation
{
350 update_active_operations(&name
, operation
, 1)?
;
353 Ok(Arc
::new(Self { inner, operation }
))
356 fn with_store_and_config(
357 chunk_store
: Arc
<ChunkStore
>,
358 config
: DataStoreConfig
,
359 last_digest
: Option
<[u8; 32]>,
360 ) -> Result
<DataStoreImpl
, Error
> {
361 let mut gc_status_path
= chunk_store
.base_path();
362 gc_status_path
.push(".gc-status");
364 let gc_status
= if let Some(state
) = file_read_optional_string(gc_status_path
)?
{
365 match serde_json
::from_str(&state
) {
368 log
::error
!("error reading gc-status: {}", err
);
369 GarbageCollectionStatus
::default()
373 GarbageCollectionStatus
::default()
376 let tuning
: DatastoreTuning
= serde_json
::from_value(
377 DatastoreTuning
::API_SCHEMA
378 .parse_property_string(config
.tuning
.as_deref().unwrap_or(""))?
,
383 gc_mutex
: Mutex
::new(()),
384 last_gc_status
: Mutex
::new(gc_status
),
385 verify_new
: config
.verify_new
.unwrap_or(false),
386 chunk_order
: tuning
.chunk_order
.unwrap_or_default(),
388 sync_level
: tuning
.sync_level
.unwrap_or_default(),
392 pub fn get_chunk_iterator(
395 impl Iterator
<Item
= (Result
<proxmox_sys
::fs
::ReadDirEntry
, Error
>, usize, bool
)>,
398 self.inner
.chunk_store
.get_chunk_iterator()
401 pub fn create_fixed_writer
<P
: AsRef
<Path
>>(
406 ) -> Result
<FixedIndexWriter
, Error
> {
407 let index
= FixedIndexWriter
::create(
408 self.inner
.chunk_store
.clone(),
417 pub fn open_fixed_reader
<P
: AsRef
<Path
>>(
420 ) -> Result
<FixedIndexReader
, Error
> {
421 let full_path
= self.inner
.chunk_store
.relative_path(filename
.as_ref());
423 let index
= FixedIndexReader
::open(&full_path
)?
;
428 pub fn create_dynamic_writer
<P
: AsRef
<Path
>>(
431 ) -> Result
<DynamicIndexWriter
, Error
> {
432 let index
= DynamicIndexWriter
::create(self.inner
.chunk_store
.clone(), filename
.as_ref())?
;
437 pub fn open_dynamic_reader
<P
: AsRef
<Path
>>(
440 ) -> Result
<DynamicIndexReader
, Error
> {
441 let full_path
= self.inner
.chunk_store
.relative_path(filename
.as_ref());
443 let index
= DynamicIndexReader
::open(&full_path
)?
;
448 pub fn open_index
<P
>(&self, filename
: P
) -> Result
<Box
<dyn IndexFile
+ Send
>, Error
>
452 let filename
= filename
.as_ref();
453 let out
: Box
<dyn IndexFile
+ Send
> = match ArchiveType
::from_path(filename
)?
{
454 ArchiveType
::DynamicIndex
=> Box
::new(self.open_dynamic_reader(filename
)?
),
455 ArchiveType
::FixedIndex
=> Box
::new(self.open_fixed_reader(filename
)?
),
456 _
=> bail
!("cannot open index file of unknown type: {:?}", filename
),
461 /// Fast index verification - only check if chunks exists
462 pub fn fast_index_verification(
464 index
: &dyn IndexFile
,
465 checked
: &mut HashSet
<[u8; 32]>,
466 ) -> Result
<(), Error
> {
467 for pos
in 0..index
.index_count() {
468 let info
= index
.chunk_info(pos
).unwrap();
469 if checked
.contains(&info
.digest
) {
473 self.stat_chunk(&info
.digest
).map_err(|err
| {
475 "fast_index_verification error, stat_chunk {} failed - {}",
476 hex
::encode(info
.digest
),
481 checked
.insert(info
.digest
);
487 pub fn name(&self) -> &str {
488 self.inner
.chunk_store
.name()
491 pub fn base_path(&self) -> PathBuf
{
492 self.inner
.chunk_store
.base_path()
495 /// Returns the absolute path for a backup namespace on this datastore
496 pub fn namespace_path(&self, ns
: &BackupNamespace
) -> PathBuf
{
497 let mut path
= self.base_path();
498 path
.reserve(ns
.path_len());
499 for part
in ns
.components() {
506 /// Returns the absolute path for a backup_type
507 pub fn type_path(&self, ns
: &BackupNamespace
, backup_type
: BackupType
) -> PathBuf
{
508 let mut full_path
= self.namespace_path(ns
);
509 full_path
.push(backup_type
.to_string());
513 /// Returns the absolute path for a backup_group
516 ns
: &BackupNamespace
,
517 backup_group
: &pbs_api_types
::BackupGroup
,
519 let mut full_path
= self.namespace_path(ns
);
520 full_path
.push(backup_group
.to_string());
524 /// Returns the absolute path for backup_dir
525 pub fn snapshot_path(
527 ns
: &BackupNamespace
,
528 backup_dir
: &pbs_api_types
::BackupDir
,
530 let mut full_path
= self.namespace_path(ns
);
531 full_path
.push(backup_dir
.to_string());
535 /// Create a backup namespace.
536 pub fn create_namespace(
538 parent
: &BackupNamespace
,
540 ) -> Result
<BackupNamespace
, Error
> {
541 if !self.namespace_exists(parent
) {
542 bail
!("cannot create new namespace, parent {parent} doesn't already exists");
545 // construct ns before mkdir to enforce max-depth and name validity
546 let ns
= BackupNamespace
::from_parent_ns(parent
, name
)?
;
548 let mut ns_full_path
= self.base_path();
549 ns_full_path
.push(ns
.path());
551 std
::fs
::create_dir_all(ns_full_path
)?
;
556 /// Returns if the given namespace exists on the datastore
557 pub fn namespace_exists(&self, ns
: &BackupNamespace
) -> bool
{
558 let mut path
= self.base_path();
559 path
.push(ns
.path());
563 /// Remove all backup groups of a single namespace level but not the namespace itself.
565 /// Does *not* descends into child-namespaces and doesn't remoes the namespace itself either.
567 /// Returns a tuple with the first item being true if all the groups were removed, and false if some were protected.
568 /// The second item returns the remove statistics.
569 pub fn remove_namespace_groups(
571 ns
: &BackupNamespace
,
572 ) -> Result
<(bool
, BackupGroupDeleteStats
), Error
> {
573 // FIXME: locking? The single groups/snapshots are already protected, so may not be
574 // necessary (depends on what we all allow to do with namespaces)
575 log
::info
!("removing all groups in namespace {}:/{ns}", self.name());
577 let mut removed_all_groups
= true;
578 let mut stats
= BackupGroupDeleteStats
::default();
580 for group
in self.iter_backup_groups(ns
.to_owned())?
{
581 let delete_stats
= group?
.destroy()?
;
582 stats
.add(&delete_stats
);
583 removed_all_groups
= removed_all_groups
&& delete_stats
.all_removed();
586 let base_file
= std
::fs
::File
::open(self.base_path())?
;
587 let base_fd
= base_file
.as_raw_fd();
588 for ty
in BackupType
::iter() {
589 let mut ty_dir
= ns
.path();
590 ty_dir
.push(ty
.to_string());
591 // best effort only, but we probably should log the error
592 if let Err(err
) = unlinkat(Some(base_fd
), &ty_dir
, UnlinkatFlags
::RemoveDir
) {
593 if err
!= nix
::errno
::Errno
::ENOENT
{
594 log
::error
!("failed to remove backup type {ty} in {ns} - {err}");
599 Ok((removed_all_groups
, stats
))
602 /// Remove a complete backup namespace optionally including all it's, and child namespaces',
603 /// groups. If `removed_groups` is false this only prunes empty namespaces.
605 /// Returns true if everything requested, and false if some groups were protected or if some
606 /// namespaces weren't empty even though all groups were deleted (race with new backup)
607 pub fn remove_namespace_recursive(
609 ns
: &BackupNamespace
,
611 ) -> Result
<(bool
, BackupGroupDeleteStats
), Error
> {
612 let store
= self.name();
613 let mut removed_all_requested
= true;
614 let mut stats
= BackupGroupDeleteStats
::default();
616 log
::info
!("removing whole namespace recursively below {store}:/{ns}",);
617 for ns
in self.recursive_iter_backup_ns(ns
.to_owned())?
{
618 let (removed_ns_groups
, delete_stats
) = self.remove_namespace_groups(&ns?
)?
;
619 stats
.add(&delete_stats
);
620 removed_all_requested
= removed_all_requested
&& removed_ns_groups
;
623 log
::info
!("pruning empty namespace recursively below {store}:/{ns}");
626 // now try to delete the actual namespaces, bottom up so that we can use safe rmdir that
627 // will choke if a new backup/group appeared in the meantime (but not on an new empty NS)
628 let mut children
= self
629 .recursive_iter_backup_ns(ns
.to_owned())?
630 .collect
::<Result
<Vec
<BackupNamespace
>, Error
>>()?
;
632 children
.sort_by_key(|b
| std
::cmp
::Reverse(b
.depth()));
634 let base_file
= std
::fs
::File
::open(self.base_path())?
;
635 let base_fd
= base_file
.as_raw_fd();
637 for ns
in children
.iter() {
638 let mut ns_dir
= ns
.path();
640 let _
= unlinkat(Some(base_fd
), &ns_dir
, UnlinkatFlags
::RemoveDir
);
643 match unlinkat(Some(base_fd
), &ns
.path(), UnlinkatFlags
::RemoveDir
) {
644 Ok(()) => log
::debug
!("removed namespace {ns}"),
645 Err(nix
::errno
::Errno
::ENOENT
) => {
646 log
::debug
!("namespace {ns} already removed")
648 Err(nix
::errno
::Errno
::ENOTEMPTY
) if !delete_groups
=> {
649 removed_all_requested
= false;
650 log
::debug
!("skip removal of non-empty namespace {ns}")
653 removed_all_requested
= false;
654 log
::warn
!("failed to remove namespace {ns} - {err}")
660 Ok((removed_all_requested
, stats
))
663 /// Remove a complete backup group including all snapshots.
665 /// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
666 /// and number of protected snaphsots, which therefore were not removed.
667 pub fn remove_backup_group(
669 ns
: &BackupNamespace
,
670 backup_group
: &pbs_api_types
::BackupGroup
,
671 ) -> Result
<BackupGroupDeleteStats
, Error
> {
672 let backup_group
= self.backup_group(ns
.clone(), backup_group
.clone());
674 backup_group
.destroy()
677 /// Remove a backup directory including all content
678 pub fn remove_backup_dir(
680 ns
: &BackupNamespace
,
681 backup_dir
: &pbs_api_types
::BackupDir
,
683 ) -> Result
<(), Error
> {
684 let backup_dir
= self.backup_dir(ns
.clone(), backup_dir
.clone())?
;
686 backup_dir
.destroy(force
)
689 /// Returns the time of the last successful backup
691 /// Or None if there is no backup in the group (or the group dir does not exist).
692 pub fn last_successful_backup(
694 ns
: &BackupNamespace
,
695 backup_group
: &pbs_api_types
::BackupGroup
,
696 ) -> Result
<Option
<i64>, Error
> {
697 let backup_group
= self.backup_group(ns
.clone(), backup_group
.clone());
699 let group_path
= backup_group
.full_group_path();
701 if group_path
.exists() {
702 backup_group
.last_successful_backup()
708 /// Return the path of the 'owner' file.
709 fn owner_path(&self, ns
: &BackupNamespace
, group
: &pbs_api_types
::BackupGroup
) -> PathBuf
{
710 self.group_path(ns
, group
).join("owner")
713 /// Returns the backup owner.
715 /// The backup owner is the entity who first created the backup group.
718 ns
: &BackupNamespace
,
719 backup_group
: &pbs_api_types
::BackupGroup
,
720 ) -> Result
<Authid
, Error
> {
721 let full_path
= self.owner_path(ns
, backup_group
);
722 let owner
= proxmox_sys
::fs
::file_read_firstline(full_path
)?
;
724 .trim_end() // remove trailing newline
726 .map_err(|err
| format_err
!("parsing owner for {backup_group} failed: {err}"))
731 ns
: &BackupNamespace
,
732 backup_group
: &pbs_api_types
::BackupGroup
,
734 ) -> Result
<bool
, Error
> {
735 let owner
= self.get_owner(ns
, backup_group
)?
;
737 Ok(check_backup_owner(&owner
, auth_id
).is_ok())
740 /// Set the backup owner.
743 ns
: &BackupNamespace
,
744 backup_group
: &pbs_api_types
::BackupGroup
,
747 ) -> Result
<(), Error
> {
748 let path
= self.owner_path(ns
, backup_group
);
750 let mut open_options
= std
::fs
::OpenOptions
::new();
751 open_options
.write(true);
752 open_options
.truncate(true);
755 open_options
.create(true);
757 open_options
.create_new(true);
760 let mut file
= open_options
762 .map_err(|err
| format_err
!("unable to create owner file {:?} - {}", path
, err
))?
;
764 writeln
!(file
, "{}", auth_id
)
765 .map_err(|err
| format_err
!("unable to write owner file {:?} - {}", path
, err
))?
;
770 /// Create (if it does not already exists) and lock a backup group
772 /// And set the owner to 'userid'. If the group already exists, it returns the
773 /// current owner (instead of setting the owner).
775 /// This also acquires an exclusive lock on the directory and returns the lock guard.
776 pub fn create_locked_backup_group(
778 ns
: &BackupNamespace
,
779 backup_group
: &pbs_api_types
::BackupGroup
,
781 ) -> Result
<(Authid
, DirLockGuard
), Error
> {
782 // create intermediate path first:
783 let mut full_path
= self.base_path();
784 for ns
in ns
.components() {
785 full_path
.push("ns");
788 full_path
.push(backup_group
.ty
.as_str());
789 std
::fs
::create_dir_all(&full_path
)?
;
791 full_path
.push(&backup_group
.id
);
793 // create the last component now
794 match std
::fs
::create_dir(&full_path
) {
796 let guard
= lock_dir_noblock(
799 "another backup is already running",
801 self.set_owner(ns
, backup_group
, auth_id
, false)?
;
802 let owner
= self.get_owner(ns
, backup_group
)?
; // just to be sure
805 Err(ref err
) if err
.kind() == io
::ErrorKind
::AlreadyExists
=> {
806 let guard
= lock_dir_noblock(
809 "another backup is already running",
811 let owner
= self.get_owner(ns
, backup_group
)?
; // just to be sure
814 Err(err
) => bail
!("unable to create backup group {:?} - {}", full_path
, err
),
818 /// Creates a new backup snapshot inside a BackupGroup
820 /// The BackupGroup directory needs to exist.
821 pub fn create_locked_backup_dir(
823 ns
: &BackupNamespace
,
824 backup_dir
: &pbs_api_types
::BackupDir
,
825 ) -> Result
<(PathBuf
, bool
, DirLockGuard
), Error
> {
826 let full_path
= self.snapshot_path(ns
, backup_dir
);
827 let relative_path
= full_path
.strip_prefix(self.base_path()).map_err(|err
| {
829 "failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
837 "internal error - tried creating snapshot that's already in use",
841 match std
::fs
::create_dir(&full_path
) {
842 Ok(_
) => Ok((relative_path
.to_owned(), true, lock()?
)),
843 Err(ref e
) if e
.kind() == io
::ErrorKind
::AlreadyExists
=> {
844 Ok((relative_path
.to_owned(), false, lock()?
))
846 Err(e
) => Err(e
.into()),
850 /// Get a streaming iter over single-level backup namespaces of a datatstore
852 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
854 pub fn iter_backup_ns(
855 self: &Arc
<DataStore
>,
857 ) -> Result
<ListNamespaces
, Error
> {
858 ListNamespaces
::new(Arc
::clone(self), ns
)
861 /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
863 /// The iterated item's result is already unwrapped, if it contained an error it will be
864 /// logged. Can be useful in iterator chain commands
865 pub fn iter_backup_ns_ok(
866 self: &Arc
<DataStore
>,
868 ) -> Result
<impl Iterator
<Item
= BackupNamespace
> + '
static, Error
> {
869 let this
= Arc
::clone(self);
871 ListNamespaces
::new(Arc
::clone(self), ns
)?
.filter_map(move |ns
| match ns
{
874 log
::error
!("list groups error on datastore {} - {}", this
.name(), err
);
881 /// Get a streaming iter over single-level backup namespaces of a datatstore
883 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
885 pub fn recursive_iter_backup_ns(
886 self: &Arc
<DataStore
>,
888 ) -> Result
<ListNamespacesRecursive
, Error
> {
889 ListNamespacesRecursive
::new(Arc
::clone(self), ns
)
892 /// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
894 /// The iterated item's result is already unwrapped, if it contained an error it will be
895 /// logged. Can be useful in iterator chain commands
896 pub fn recursive_iter_backup_ns_ok(
897 self: &Arc
<DataStore
>,
899 max_depth
: Option
<usize>,
900 ) -> Result
<impl Iterator
<Item
= BackupNamespace
> + '
static, Error
> {
901 let this
= Arc
::clone(self);
902 Ok(if let Some(depth
) = max_depth
{
903 ListNamespacesRecursive
::new_max_depth(Arc
::clone(self), ns
, depth
)?
905 ListNamespacesRecursive
::new(Arc
::clone(self), ns
)?
907 .filter_map(move |ns
| match ns
{
910 log
::error
!("list groups error on datastore {} - {}", this
.name(), err
);
916 /// Get a streaming iter over top-level backup groups of a datatstore of a particular type.
918 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
920 pub fn iter_backup_type(
921 self: &Arc
<DataStore
>,
924 ) -> Result
<ListGroupsType
, Error
> {
925 ListGroupsType
::new(Arc
::clone(self), ns
, ty
)
928 /// Get a streaming iter over top-level backup groups of a datastore of a particular type,
929 /// filtered by `Ok` results
931 /// The iterated item's result is already unwrapped, if it contained an error it will be
932 /// logged. Can be useful in iterator chain commands
933 pub fn iter_backup_type_ok(
934 self: &Arc
<DataStore
>,
937 ) -> Result
<impl Iterator
<Item
= BackupGroup
> + '
static, Error
> {
938 Ok(self.iter_backup_type(ns
, ty
)?
.ok())
941 /// Get a streaming iter over top-level backup groups of a datatstore
943 /// The iterated item is still a Result that can contain errors from rather unexptected FS or
945 pub fn iter_backup_groups(
946 self: &Arc
<DataStore
>,
948 ) -> Result
<ListGroups
, Error
> {
949 ListGroups
::new(Arc
::clone(self), ns
)
952 /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
954 /// The iterated item's result is already unwrapped, if it contained an error it will be
955 /// logged. Can be useful in iterator chain commands
956 pub fn iter_backup_groups_ok(
957 self: &Arc
<DataStore
>,
959 ) -> Result
<impl Iterator
<Item
= BackupGroup
> + '
static, Error
> {
960 Ok(self.iter_backup_groups(ns
)?
.ok())
963 /// Get a in-memory vector for all top-level backup groups of a datatstore
965 /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
966 pub fn list_backup_groups(
967 self: &Arc
<DataStore
>,
969 ) -> Result
<Vec
<BackupGroup
>, Error
> {
970 ListGroups
::new(Arc
::clone(self), ns
)?
.collect()
973 pub fn list_images(&self) -> Result
<Vec
<PathBuf
>, Error
> {
974 let base
= self.base_path();
976 let mut list
= vec
![];
978 use walkdir
::WalkDir
;
980 let walker
= WalkDir
::new(base
).into_iter();
982 // make sure we skip .chunks (and other hidden files to keep it simple)
983 fn is_hidden(entry
: &walkdir
::DirEntry
) -> bool
{
987 .map(|s
| s
.starts_with('
.'
))
990 let handle_entry_err
= |err
: walkdir
::Error
| {
991 // first, extract the actual IO error and the affected path
992 let (inner
, path
) = match (err
.io_error(), err
.path()) {
993 (None
, _
) => return Ok(()), // not an IO-error
994 (Some(inner
), Some(path
)) => (inner
, path
),
995 (Some(inner
), None
) => bail
!("unexpected error on datastore traversal: {inner}"),
997 if inner
.kind() == io
::ErrorKind
::PermissionDenied
{
998 if err
.depth() <= 1 && path
.ends_with("lost+found") {
999 // allow skipping of (root-only) ext4 fsck-directory on EPERM ..
1002 // .. but do not ignore EPERM in general, otherwise we might prune too many chunks.
1003 // E.g., if users messed up with owner/perms on a rsync
1004 bail
!("cannot continue garbage-collection safely, permission denied on: {path:?}");
1005 } else if inner
.kind() == io
::ErrorKind
::NotFound
{
1006 log
::info
!("ignoring vanished file: {path:?}");
1009 bail
!("unexpected error on datastore traversal: {inner} - {path:?}");
1012 for entry
in walker
.filter_entry(|e
| !is_hidden(e
)) {
1013 let path
= match entry
{
1014 Ok(entry
) => entry
.into_path(),
1016 handle_entry_err(err
)?
;
1020 if let Ok(archive_type
) = ArchiveType
::from_path(&path
) {
1021 if archive_type
== ArchiveType
::FixedIndex
1022 || archive_type
== ArchiveType
::DynamicIndex
1032 // mark chunks used by ``index`` as used
1033 fn index_mark_used_chunks
<I
: IndexFile
>(
1036 file_name
: &Path
, // only used for error reporting
1037 status
: &mut GarbageCollectionStatus
,
1038 worker
: &dyn WorkerTaskContext
,
1039 ) -> Result
<(), Error
> {
1040 status
.index_file_count
+= 1;
1041 status
.index_data_bytes
+= index
.index_bytes();
1043 for pos
in 0..index
.index_count() {
1044 worker
.check_abort()?
;
1045 worker
.fail_on_shutdown()?
;
1046 let digest
= index
.index_digest(pos
).unwrap();
1047 if !self.inner
.chunk_store
.cond_touch_chunk(digest
, false)?
{
1048 let hex
= hex
::encode(digest
);
1050 "warning: unable to access non-existent chunk {hex}, required by {file_name:?}"
1053 // touch any corresponding .bad files to keep them around, meaning if a chunk is
1054 // rewritten correctly they will be removed automatically, as well as if no index
1055 // file requires the chunk anymore (won't get to this loop then)
1057 let bad_ext
= format
!("{}.bad", i
);
1058 let mut bad_path
= PathBuf
::new();
1059 bad_path
.push(self.chunk_path(digest
).0);
1060 bad_path
.set_extension(bad_ext
);
1061 self.inner
.chunk_store
.cond_touch_path(&bad_path
, false)?
;
1068 fn mark_used_chunks(
1070 status
: &mut GarbageCollectionStatus
,
1071 worker
: &dyn WorkerTaskContext
,
1072 ) -> Result
<(), Error
> {
1073 let image_list
= self.list_images()?
;
1074 let image_count
= image_list
.len();
1076 let mut last_percentage
: usize = 0;
1078 let mut strange_paths_count
: u64 = 0;
1080 for (i
, img
) in image_list
.into_iter().enumerate() {
1081 worker
.check_abort()?
;
1082 worker
.fail_on_shutdown()?
;
1084 if let Some(backup_dir_path
) = img
.parent() {
1085 let backup_dir_path
= backup_dir_path
.strip_prefix(self.base_path())?
;
1086 if let Some(backup_dir_str
) = backup_dir_path
.to_str() {
1087 if pbs_api_types
::parse_ns_and_snapshot(backup_dir_str
).is_err() {
1088 strange_paths_count
+= 1;
1093 match std
::fs
::File
::open(&img
) {
1095 if let Ok(archive_type
) = ArchiveType
::from_path(&img
) {
1096 if archive_type
== ArchiveType
::FixedIndex
{
1097 let index
= FixedIndexReader
::new(file
).map_err(|e
| {
1098 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
1100 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
1101 } else if archive_type
== ArchiveType
::DynamicIndex
{
1102 let index
= DynamicIndexReader
::new(file
).map_err(|e
| {
1103 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
1105 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
1109 Err(err
) if err
.kind() == io
::ErrorKind
::NotFound
=> (), // ignore vanished files
1110 Err(err
) => bail
!("can't open index {} - {}", img
.to_string_lossy(), err
),
1113 let percentage
= (i
+ 1) * 100 / image_count
;
1114 if percentage
> last_percentage
{
1116 "marked {percentage}% ({} of {image_count} index files)",
1119 last_percentage
= percentage
;
1123 if strange_paths_count
> 0 {
1125 "found (and marked) {strange_paths_count} index files outside of expected directory scheme"
1132 pub fn last_gc_status(&self) -> GarbageCollectionStatus
{
1133 self.inner
.last_gc_status
.lock().unwrap().clone()
1136 pub fn garbage_collection_running(&self) -> bool
{
1137 self.inner
.gc_mutex
.try_lock().is_err()
1140 pub fn garbage_collection(
1142 worker
: &dyn WorkerTaskContext
,
1144 ) -> Result
<(), Error
> {
1145 if let Ok(ref mut _mutex
) = self.inner
.gc_mutex
.try_lock() {
1146 // avoids that we run GC if an old daemon process has still a
1147 // running backup writer, which is not save as we have no "oldest
1148 // writer" information and thus no safe atime cutoff
1149 let _exclusive_lock
= self.inner
.chunk_store
.try_exclusive_lock()?
;
1151 let (config
, _digest
) = pbs_config
::datastore
::config()?
;
1152 let gc_store_config
: DataStoreConfig
= config
.lookup("datastore", self.name())?
;
1153 let all_stores
= config
.convert_to_typed_array("datastore")?
;
1154 if let Err(err
) = gc_store_config
.ensure_not_nested(&all_stores
) {
1156 "Current datastore path: {path}",
1157 path
= gc_store_config
.absolute_path()
1159 bail
!("Aborting GC for safety reasons: {err}");
1162 let phase1_start_time
= proxmox_time
::epoch_i64();
1163 let oldest_writer
= self
1167 .unwrap_or(phase1_start_time
);
1169 let mut gc_status
= GarbageCollectionStatus
{
1170 upid
: Some(upid
.to_string()),
1171 ..Default
::default()
1174 info
!("Start GC phase1 (mark used chunks)");
1176 self.mark_used_chunks(&mut gc_status
, worker
)?
;
1178 info
!("Start GC phase2 (sweep unused chunks)");
1179 self.inner
.chunk_store
.sweep_unused_chunks(
1187 "Removed garbage: {}",
1188 HumanByte
::from(gc_status
.removed_bytes
),
1190 info
!("Removed chunks: {}", gc_status
.removed_chunks
);
1191 if gc_status
.pending_bytes
> 0 {
1193 "Pending removals: {} (in {} chunks)",
1194 HumanByte
::from(gc_status
.pending_bytes
),
1195 gc_status
.pending_chunks
,
1198 if gc_status
.removed_bad
> 0 {
1199 info
!("Removed bad chunks: {}", gc_status
.removed_bad
);
1202 if gc_status
.still_bad
> 0 {
1203 info
!("Leftover bad chunks: {}", gc_status
.still_bad
);
1207 "Original data usage: {}",
1208 HumanByte
::from(gc_status
.index_data_bytes
),
1211 if gc_status
.index_data_bytes
> 0 {
1213 (gc_status
.disk_bytes
as f64 * 100.) / gc_status
.index_data_bytes
as f64;
1215 "On-Disk usage: {} ({comp_per:.2}%)",
1216 HumanByte
::from(gc_status
.disk_bytes
)
1220 info
!("On-Disk chunks: {}", gc_status
.disk_chunks
);
1222 let deduplication_factor
= if gc_status
.disk_bytes
> 0 {
1223 (gc_status
.index_data_bytes
as f64) / (gc_status
.disk_bytes
as f64)
1228 info
!("Deduplication factor: {deduplication_factor:.2}");
1230 if gc_status
.disk_chunks
> 0 {
1231 let avg_chunk
= gc_status
.disk_bytes
/ (gc_status
.disk_chunks
as u64);
1232 info
!("Average chunk size: {}", HumanByte
::from(avg_chunk
));
1235 if let Ok(serialized
) = serde_json
::to_string(&gc_status
) {
1236 let mut path
= self.base_path();
1237 path
.push(".gc-status");
1239 let backup_user
= pbs_config
::backup_user()?
;
1240 let mode
= nix
::sys
::stat
::Mode
::from_bits_truncate(0o0644);
1241 // set the correct owner/group/permissions while saving file
1242 // owner(rw) = backup, group(r)= backup
1243 let options
= CreateOptions
::new()
1245 .owner(backup_user
.uid
)
1246 .group(backup_user
.gid
);
1249 let _
= replace_file(path
, serialized
.as_bytes(), options
, false);
1252 *self.inner
.last_gc_status
.lock().unwrap() = gc_status
;
1254 bail
!("Start GC failed - (already running/locked)");
1260 pub fn try_shared_chunk_store_lock(&self) -> Result
<ProcessLockSharedGuard
, Error
> {
1261 self.inner
.chunk_store
.try_shared_lock()
1264 pub fn chunk_path(&self, digest
: &[u8; 32]) -> (PathBuf
, String
) {
1265 self.inner
.chunk_store
.chunk_path(digest
)
1268 pub fn cond_touch_chunk(&self, digest
: &[u8; 32], assert_exists
: bool
) -> Result
<bool
, Error
> {
1271 .cond_touch_chunk(digest
, assert_exists
)
1274 pub fn insert_chunk(&self, chunk
: &DataBlob
, digest
: &[u8; 32]) -> Result
<(bool
, u64), Error
> {
1275 self.inner
.chunk_store
.insert_chunk(chunk
, digest
)
1278 pub fn stat_chunk(&self, digest
: &[u8; 32]) -> Result
<std
::fs
::Metadata
, Error
> {
1279 let (chunk_path
, _digest_str
) = self.inner
.chunk_store
.chunk_path(digest
);
1280 std
::fs
::metadata(chunk_path
).map_err(Error
::from
)
1283 pub fn load_chunk(&self, digest
: &[u8; 32]) -> Result
<DataBlob
, Error
> {
1284 let (chunk_path
, digest_str
) = self.inner
.chunk_store
.chunk_path(digest
);
1286 proxmox_lang
::try_block
!({
1287 let mut file
= std
::fs
::File
::open(&chunk_path
)?
;
1288 DataBlob
::load_from_reader(&mut file
)
1292 "store '{}', unable to load chunk '{}' - {}",
1300 /// Updates the protection status of the specified snapshot.
1301 pub fn update_protection(&self, backup_dir
: &BackupDir
, protection
: bool
) -> Result
<(), Error
> {
1302 let full_path
= backup_dir
.full_path();
1304 if !full_path
.exists() {
1305 bail
!("snapshot {} does not exist!", backup_dir
.dir());
1308 let _guard
= lock_dir_noblock(&full_path
, "snapshot", "possibly running or in use")?
;
1310 let protected_path
= backup_dir
.protected_file();
1312 std
::fs
::File
::create(protected_path
)
1313 .map_err(|err
| format_err
!("could not create protection file: {}", err
))?
;
1314 } else if let Err(err
) = std
::fs
::remove_file(protected_path
) {
1315 // ignore error for non-existing file
1316 if err
.kind() != std
::io
::ErrorKind
::NotFound
{
1317 bail
!("could not remove protection file: {}", err
);
1324 pub fn verify_new(&self) -> bool
{
1325 self.inner
.verify_new
1328 /// returns a list of chunks sorted by their inode number on disk chunks that couldn't get
1329 /// stat'ed are placed at the end of the list
1330 pub fn get_chunks_in_order
<F
, A
>(
1332 index
: &(dyn IndexFile
+ Send
),
1335 ) -> Result
<Vec
<(usize, u64)>, Error
>
1337 F
: Fn(&[u8; 32]) -> bool
,
1338 A
: Fn(usize) -> Result
<(), Error
>,
1340 let index_count
= index
.index_count();
1341 let mut chunk_list
= Vec
::with_capacity(index_count
);
1342 use std
::os
::unix
::fs
::MetadataExt
;
1343 for pos
in 0..index_count
{
1346 let info
= index
.chunk_info(pos
).unwrap();
1348 if skip_chunk(&info
.digest
) {
1352 let ino
= match self.inner
.chunk_order
{
1353 ChunkOrder
::Inode
=> {
1354 match self.stat_chunk(&info
.digest
) {
1355 Err(_
) => u64::MAX
, // could not stat, move to end of list
1356 Ok(metadata
) => metadata
.ino(),
1359 ChunkOrder
::None
=> 0,
1362 chunk_list
.push((pos
, ino
));
1365 match self.inner
.chunk_order
{
1366 // sorting by inode improves data locality, which makes it lots faster on spinners
1367 ChunkOrder
::Inode
=> {
1368 chunk_list
.sort_unstable_by(|(_
, ino_a
), (_
, ino_b
)| ino_a
.cmp(ino_b
))
1370 ChunkOrder
::None
=> {}
1376 /// Open a backup group from this datastore.
1377 pub fn backup_group(
1379 ns
: BackupNamespace
,
1380 group
: pbs_api_types
::BackupGroup
,
1382 BackupGroup
::new(Arc
::clone(self), ns
, group
)
1385 /// Open a backup group from this datastore.
1386 pub fn backup_group_from_parts
<T
>(
1388 ns
: BackupNamespace
,
1395 self.backup_group(ns
, (ty
, id
.into()).into())
1399 /// Open a backup group from this datastore by backup group path such as `vm/100`.
1401 /// Convenience method for `store.backup_group(path.parse()?)`
1402 pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> {
1403 todo!("split out the namespace");
1407 /// Open a snapshot (backup directory) from this datastore.
1410 ns
: BackupNamespace
,
1411 dir
: pbs_api_types
::BackupDir
,
1412 ) -> Result
<BackupDir
, Error
> {
1413 BackupDir
::with_group(self.backup_group(ns
, dir
.group
), dir
.time
)
1416 /// Open a snapshot (backup directory) from this datastore.
1417 pub fn backup_dir_from_parts
<T
>(
1419 ns
: BackupNamespace
,
1423 ) -> Result
<BackupDir
, Error
>
1427 self.backup_dir(ns
, (ty
, id
.into(), time
).into())
1430 /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
1431 pub fn backup_dir_with_rfc3339
<T
: Into
<String
>>(
1435 ) -> Result
<BackupDir
, Error
> {
1436 BackupDir
::with_rfc3339(group
, time_string
.into())
1440 /// Open a snapshot (backup directory) from this datastore by a snapshot path.
1441 pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> {
1442 todo!("split out the namespace");
1446 /// Syncs the filesystem of the datastore if 'sync_level' is set to
1447 /// [`DatastoreFSyncLevel::Filesystem`]. Uses syncfs(2).
1448 pub fn try_ensure_sync_level(&self) -> Result
<(), Error
> {
1449 if self.inner
.sync_level
!= DatastoreFSyncLevel
::Filesystem
{
1452 let file
= std
::fs
::File
::open(self.base_path())?
;
1453 let fd
= file
.as_raw_fd();
1454 log
::info
!("syncing filesystem");
1455 if unsafe { libc::syncfs(fd) }
< 0 {
1456 bail
!("error during syncfs: {}", std
::io
::Error
::last_os_error());
1461 /// Destroy a datastore. This requires that there are no active operations on the datastore.
1463 /// This is a synchronous operation and should be run in a worker-thread.
1464 pub fn destroy(name
: &str, destroy_data
: bool
) -> Result
<(), Error
> {
1465 let config_lock
= pbs_config
::datastore
::lock_config()?
;
1467 let (mut config
, _digest
) = pbs_config
::datastore
::config()?
;
1468 let mut datastore_config
: DataStoreConfig
= config
.lookup("datastore", name
)?
;
1470 datastore_config
.set_maintenance_mode(Some(MaintenanceMode
{
1471 ty
: MaintenanceType
::Delete
,
1475 config
.set_data(name
, "datastore", &datastore_config
)?
;
1476 pbs_config
::datastore
::save_config(&config
)?
;
1479 let (operations
, _lock
) = task_tracking
::get_active_operations_locked(name
)?
;
1481 if operations
.read
!= 0 || operations
.write
!= 0 {
1482 bail
!("datastore is currently in use");
1485 let base
= PathBuf
::from(datastore_config
.absolute_path());
1489 let remove
= |subdir
, ok
: &mut bool
| {
1490 if let Err(err
) = std
::fs
::remove_dir_all(base
.join(subdir
)) {
1491 if err
.kind() != io
::ErrorKind
::NotFound
{
1492 warn
!("failed to remove {subdir:?} subdirectory: {err}");
1498 info
!("Deleting datastore data...");
1499 remove("ns", &mut ok
); // ns first
1500 remove("ct", &mut ok
);
1501 remove("vm", &mut ok
);
1502 remove("host", &mut ok
);
1505 if let Err(err
) = std
::fs
::remove_file(base
.join(".gc-status")) {
1506 if err
.kind() != io
::ErrorKind
::NotFound
{
1507 warn
!("failed to remove .gc-status file: {err}");
1513 // chunks get removed last and only if the backups were successfully deleted
1515 remove(".chunks", &mut ok
);
1521 info
!("Removing datastore from config...");
1522 let _lock
= pbs_config
::datastore
::lock_config()?
;
1523 let _
= config
.sections
.remove(name
);
1524 pbs_config
::datastore
::save_config(&config
)?
;
1527 // finally the lock & toplevel directory
1530 if let Err(err
) = std
::fs
::remove_file(base
.join(".lock")) {
1531 if err
.kind() != io
::ErrorKind
::NotFound
{
1532 warn
!("failed to remove .lock file: {err}");
1539 info
!("Finished deleting data.");
1541 match std
::fs
::remove_dir(base
) {
1542 Ok(()) => info
!("Removed empty datastore directory."),
1543 Err(err
) if err
.kind() == io
::ErrorKind
::NotFound
=> {
1546 Err(err
) if err
.is_errno(nix
::errno
::Errno
::EBUSY
) => {
1547 if datastore_config
.backing_device
.is_none() {
1548 warn
!("Cannot delete datastore directory (is it a mount point?).")
1551 Err(err
) if err
.is_errno(nix
::errno
::Errno
::ENOTEMPTY
) => {
1552 warn
!("Datastore directory not empty, not deleting.")
1555 warn
!("Failed to remove datastore directory: {err}");
1559 info
!("There were errors deleting data.");