2 use std
::path
::PathBuf
;
4 use anyhow
::{bail, format_err, Error}
;
5 use const_format
::concatcp
;
6 use serde
::{Deserialize, Serialize}
;
9 api
, const_regex
, ApiStringFormat
, ApiType
, ArraySchema
, EnumEntry
, IntegerSchema
, ReturnType
,
10 Schema
, StringSchema
, Updater
, UpdaterType
,
14 Authid
, CryptMode
, Fingerprint
, GroupFilter
, MaintenanceMode
, MaintenanceType
, Userid
,
15 BACKUP_ID_RE
, BACKUP_NS_RE
, BACKUP_TIME_RE
, BACKUP_TYPE_RE
, DATASTORE_NOTIFY_STRING_SCHEMA
,
16 GC_SCHEDULE_SCHEMA
, GROUP_OR_SNAPSHOT_PATH_REGEX_STR
, PROXMOX_SAFE_ID_FORMAT
,
17 PROXMOX_SAFE_ID_REGEX_STR
, PRUNE_SCHEDULE_SCHEMA
, SHA256_HEX_REGEX
, SINGLE_LINE_COMMENT_SCHEMA
,
18 SNAPSHOT_PATH_REGEX_STR
, UPID
,
22 pub BACKUP_NAMESPACE_REGEX
= concatcp
!(r
"^", BACKUP_NS_RE
, r
"$");
24 pub BACKUP_TYPE_REGEX
= concatcp
!(r
"^(", BACKUP_TYPE_RE
, r
")$");
26 pub BACKUP_ID_REGEX
= concatcp
!(r
"^", BACKUP_ID_RE
, r
"$");
28 pub BACKUP_DATE_REGEX
= concatcp
!(r
"^", BACKUP_TIME_RE
,r
"$");
30 pub GROUP_PATH_REGEX
= concatcp
!(
31 r
"^(", BACKUP_TYPE_RE
, ")/",
32 r
"(", BACKUP_ID_RE
, r
")$",
35 pub BACKUP_FILE_REGEX
= r
"^.*\.([fd]idx|blob)$";
37 pub SNAPSHOT_PATH_REGEX
= concatcp
!(r
"^", SNAPSHOT_PATH_REGEX_STR
, r
"$");
38 pub GROUP_OR_SNAPSHOT_PATH_REGEX
= concatcp
!(r
"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR
, r
"$");
40 pub DATASTORE_MAP_REGEX
= concatcp
!(r
"^(?:", PROXMOX_SAFE_ID_REGEX_STR
, r
"=)?", PROXMOX_SAFE_ID_REGEX_STR
, r
"$");
43 pub const CHUNK_DIGEST_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&SHA256_HEX_REGEX
);
45 pub const DIR_NAME_SCHEMA
: Schema
= StringSchema
::new("Directory name")
50 pub const BACKUP_ARCHIVE_NAME_SCHEMA
: Schema
= StringSchema
::new("Backup archive name.")
51 .format(&PROXMOX_SAFE_ID_FORMAT
)
54 pub const BACKUP_ID_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&BACKUP_ID_REGEX
);
55 pub const BACKUP_GROUP_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&GROUP_PATH_REGEX
);
56 pub const BACKUP_NAMESPACE_FORMAT
: ApiStringFormat
=
57 ApiStringFormat
::Pattern(&BACKUP_NAMESPACE_REGEX
);
59 pub const BACKUP_ID_SCHEMA
: Schema
= StringSchema
::new("Backup ID.")
60 .format(&BACKUP_ID_FORMAT
)
63 pub const BACKUP_TYPE_SCHEMA
: Schema
= StringSchema
::new("Backup type.")
64 .format(&ApiStringFormat
::Enum(&[
65 EnumEntry
::new("vm", "Virtual Machine Backup"),
66 EnumEntry
::new("ct", "Container Backup"),
67 EnumEntry
::new("host", "Host Backup"),
71 pub const BACKUP_TIME_SCHEMA
: Schema
= IntegerSchema
::new("Backup time (Unix epoch.)")
75 pub const BACKUP_GROUP_SCHEMA
: Schema
= StringSchema
::new("Backup Group")
76 .format(&BACKUP_GROUP_FORMAT
)
79 /// The maximal, inclusive depth for namespaces from the root ns downwards
81 /// The datastore root name space is at depth zero (0), so we have in total eight (8) levels
82 pub const MAX_NAMESPACE_DEPTH
: usize = 7;
83 pub const MAX_BACKUP_NAMESPACE_LENGTH
: usize = 32 * 8; // 256
84 pub const BACKUP_NAMESPACE_SCHEMA
: Schema
= StringSchema
::new("Namespace.")
85 .format(&BACKUP_NAMESPACE_FORMAT
)
86 .max_length(MAX_BACKUP_NAMESPACE_LENGTH
) // 256
89 pub const NS_MAX_DEPTH_SCHEMA
: Schema
=
90 IntegerSchema
::new("How many levels of namespaces should be operated on (0 == no recursion)")
92 .maximum(MAX_NAMESPACE_DEPTH
as isize)
93 .default(MAX_NAMESPACE_DEPTH
as isize)
96 pub const NS_MAX_DEPTH_REDUCED_SCHEMA
: Schema
=
97 IntegerSchema
::new("How many levels of namespaces should be operated on (0 == no recursion, empty == automatic full recursion, namespace depths reduce maximum allowed value)")
99 .maximum(MAX_NAMESPACE_DEPTH
as isize)
102 pub const DATASTORE_SCHEMA
: Schema
= StringSchema
::new("Datastore name.")
103 .format(&PROXMOX_SAFE_ID_FORMAT
)
108 pub const CHUNK_DIGEST_SCHEMA
: Schema
= StringSchema
::new("Chunk digest (SHA256).")
109 .format(&CHUNK_DIGEST_FORMAT
)
112 pub const DATASTORE_MAP_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&DATASTORE_MAP_REGEX
);
114 pub const DATASTORE_MAP_SCHEMA
: Schema
= StringSchema
::new("Datastore mapping.")
115 .format(&DATASTORE_MAP_FORMAT
)
118 .type_text("(<source>=)?<target>")
121 pub const DATASTORE_MAP_ARRAY_SCHEMA
: Schema
=
122 ArraySchema
::new("Datastore mapping list.", &DATASTORE_MAP_SCHEMA
).schema();
124 pub const DATASTORE_MAP_LIST_SCHEMA
: Schema
= StringSchema
::new(
125 "A list of Datastore mappings (or single datastore), comma separated. \
126 For example 'a=b,e' maps the source datastore 'a' to target 'b and \
127 all other sources to the default 'e'. If no default is given, only the \
128 specified sources are mapped.",
130 .format(&ApiStringFormat
::PropertyString(
131 &DATASTORE_MAP_ARRAY_SCHEMA
,
135 pub const PRUNE_SCHEMA_KEEP_DAILY
: Schema
= IntegerSchema
::new("Number of daily backups to keep.")
139 pub const PRUNE_SCHEMA_KEEP_HOURLY
: Schema
=
140 IntegerSchema
::new("Number of hourly backups to keep.")
144 pub const PRUNE_SCHEMA_KEEP_LAST
: Schema
= IntegerSchema
::new("Number of backups to keep.")
148 pub const PRUNE_SCHEMA_KEEP_MONTHLY
: Schema
=
149 IntegerSchema
::new("Number of monthly backups to keep.")
153 pub const PRUNE_SCHEMA_KEEP_WEEKLY
: Schema
=
154 IntegerSchema
::new("Number of weekly backups to keep.")
158 pub const PRUNE_SCHEMA_KEEP_YEARLY
: Schema
=
159 IntegerSchema
::new("Number of yearly backups to keep.")
164 #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
165 #[serde(rename_all = "lowercase")]
166 /// The order to sort chunks by
167 pub enum ChunkOrder
{
168 /// Iterate chunks in the index order
170 /// Iterate chunks in inode order
176 #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
177 #[serde(rename_all = "lowercase")]
178 /// The level of syncing that is done when writing into a datastore.
179 pub enum DatastoreFSyncLevel
{
180 /// No special fsync or syncfs calls are triggered. The system default dirty write back
181 /// mechanism ensures that data gets is flushed eventually via the `dirty_writeback_centisecs`
182 /// and `dirty_expire_centisecs` kernel sysctls, defaulting to ~ 30s.
184 /// This mode provides generally the best performance, as all write back can happen async,
185 /// which reduces IO pressure.
186 /// But it may cause losing data on powerloss or system crash without any uninterruptible power
189 /// Triggers a fsync after writing any chunk on the datastore. While this can slow down
190 /// backups significantly, depending on the underlying file system and storage used, it
191 /// will ensure fine-grained consistency. Depending on the exact setup, there might be no
192 /// benefits over the file system level sync, so if the setup allows it, you should prefer
193 /// that one. Despite the possible negative impact in performance, it's the most consistent
196 /// Trigger a filesystem wide sync after all backup data got written but before finishing the
197 /// task. This allows that every finished backup is fully written back to storage
198 /// while reducing the impact on many file systems in contrast to the file level sync.
199 /// Depending on the setup, it might have a negative impact on unrelated write operations
200 /// of the underlying filesystem, but it is generally a good compromise between performance
214 #[derive(Serialize, Deserialize, Default)]
215 #[serde(rename_all = "kebab-case")]
216 /// Datastore tuning options
217 pub struct DatastoreTuning
{
218 /// Iterate chunks in this order
219 #[serde(skip_serializing_if = "Option::is_none")]
220 pub chunk_order
: Option
<ChunkOrder
>,
221 #[serde(skip_serializing_if = "Option::is_none")]
222 pub sync_level
: Option
<DatastoreFSyncLevel
>,
225 pub const DATASTORE_TUNING_STRING_SCHEMA
: Schema
= StringSchema
::new("Datastore tuning options")
226 .format(&ApiStringFormat
::PropertyString(
227 &DatastoreTuning
::API_SCHEMA
,
234 schema
: DATASTORE_SCHEMA
,
237 schema
: DIR_NAME_SCHEMA
,
245 schema
: DATASTORE_NOTIFY_STRING_SCHEMA
,
249 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
253 schema
: GC_SCHEDULE_SCHEMA
,
257 schema
: PRUNE_SCHEDULE_SCHEMA
,
260 type: crate::KeepOptions
,
263 description
: "If enabled, all new backups will be verified right after completion.",
269 schema
: DATASTORE_TUNING_STRING_SCHEMA
,
271 "maintenance-mode": {
273 format
: &ApiStringFormat
::PropertyString(&MaintenanceMode
::API_SCHEMA
),
278 #[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
279 #[serde(rename_all = "kebab-case")]
280 /// Datastore configuration properties.
281 pub struct DataStoreConfig
{
288 #[serde(skip_serializing_if = "Option::is_none")]
289 pub comment
: Option
<String
>,
291 #[serde(skip_serializing_if = "Option::is_none")]
292 pub gc_schedule
: Option
<String
>,
294 #[serde(skip_serializing_if = "Option::is_none")]
295 pub prune_schedule
: Option
<String
>,
298 pub keep
: crate::KeepOptions
,
300 /// If enabled, all backups will be verified right after completion.
301 #[serde(skip_serializing_if = "Option::is_none")]
302 pub verify_new
: Option
<bool
>,
304 /// Send job email notification to this user
305 #[serde(skip_serializing_if = "Option::is_none")]
306 pub notify_user
: Option
<Userid
>,
308 /// Send notification only for job errors
309 #[serde(skip_serializing_if = "Option::is_none")]
310 pub notify
: Option
<String
>,
312 /// Datastore tuning options
313 #[serde(skip_serializing_if = "Option::is_none")]
314 pub tuning
: Option
<String
>,
316 /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
317 #[serde(skip_serializing_if = "Option::is_none")]
318 pub maintenance_mode
: Option
<String
>,
321 impl DataStoreConfig
{
322 pub fn new(name
: String
, path
: String
) -> Self {
328 prune_schedule
: None
,
329 keep
: Default
::default(),
334 maintenance_mode
: None
,
338 pub fn get_maintenance_mode(&self) -> Option
<MaintenanceMode
> {
339 self.maintenance_mode
.as_ref().and_then(|str| {
340 MaintenanceMode
::deserialize(proxmox_schema
::de
::SchemaDeserializer
::new(
342 &MaintenanceMode
::API_SCHEMA
,
348 pub fn set_maintenance_mode(&mut self, new_mode
: Option
<MaintenanceMode
>) -> Result
<(), Error
> {
349 let current_type
= self.get_maintenance_mode().map(|mode
| mode
.ty
);
350 let new_type
= new_mode
.as_ref().map(|mode
| mode
.ty
);
353 Some(MaintenanceType
::ReadOnly
) => { /* always OK */ }
354 Some(MaintenanceType
::Offline
) => { /* always OK */ }
355 Some(MaintenanceType
::Delete
) => {
357 Some(MaintenanceType
::Delete
) => { /* allow to delete a deleted storage */ }
359 bail
!("datastore is being deleted")
363 None
=> { /* always OK */ }
366 let new_mode
= match new_mode
{
367 Some(new_mode
) => Some(
368 proxmox_schema
::property_string
::PropertyString
::new(new_mode
)
369 .to_property_string()?
,
374 self.maintenance_mode
= new_mode
;
383 schema
: DATASTORE_SCHEMA
,
387 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
391 format
: &ApiStringFormat
::PropertyString(&MaintenanceMode
::API_SCHEMA
),
396 #[derive(Serialize, Deserialize, Clone, PartialEq)]
397 #[serde(rename_all = "kebab-case")]
398 /// Basic information about a datastore.
399 pub struct DataStoreListItem
{
401 pub comment
: Option
<String
>,
402 /// If the datastore is in maintenance mode, information about it
403 #[serde(skip_serializing_if = "Option::is_none")]
404 pub maintenance
: Option
<String
>,
410 schema
: BACKUP_ARCHIVE_NAME_SCHEMA
,
418 #[derive(Serialize, Deserialize, Clone, PartialEq)]
419 #[serde(rename_all = "kebab-case")]
420 /// Basic information about archive files inside a backup snapshot.
421 pub struct BackupContent
{
422 pub filename
: String
,
423 /// Info if file is encrypted, signed, or neither.
424 #[serde(skip_serializing_if = "Option::is_none")]
425 pub crypt_mode
: Option
<CryptMode
>,
426 /// Archive size (from backup manifest).
427 #[serde(skip_serializing_if = "Option::is_none")]
428 pub size
: Option
<u64>,
432 #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
433 #[serde(rename_all = "lowercase")]
434 /// Result of a verify operation.
435 pub enum VerifyState
{
436 /// Verification was successful
438 /// Verification reported one or more errors
452 #[derive(Serialize, Deserialize, Clone, PartialEq)]
454 pub struct SnapshotVerifyState
{
455 /// UPID of the verify task
457 /// State of the verification. Enum.
458 pub state
: VerifyState
,
461 /// A namespace provides a logical separation between backup groups from different domains
462 /// (cluster, sites, ...) where uniqueness cannot be guaranteed anymore. It allows users to share a
463 /// datastore (i.e., one deduplication domain (chunk store)) with multiple (trusted) sites and
464 /// allows to form a hierarchy, for easier management and avoiding clashes between backup_ids.
466 /// NOTE: Namespaces are a logical boundary only, they do not provide a full secure separation as
467 /// the chunk store is still shared. So, users whom do not trust each other must not share a
470 /// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid
471 /// clashes with backup group IDs and future backup_types and to have a clean separation between
472 /// the namespace directories and the ones from a backup snapshot.
473 #[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, UpdaterType)]
474 pub struct BackupNamespace
{
475 /// The namespace subdirectories without the `ns/` intermediate directories.
478 /// Cache the total length for efficiency.
482 impl BackupNamespace
{
483 /// Returns a root namespace reference.
484 pub const fn root() -> Self {
491 /// True if this represents the root namespace.
492 pub fn is_root(&self) -> bool
{
493 self.inner
.is_empty()
496 /// Try to parse a string into a namespace.
497 pub fn new(name
: &str) -> Result
<Self, Error
> {
498 let mut this
= Self::root();
504 for name
in name
.split('
/'
) {
505 this
.push(name
.to_string())?
;
510 /// Try to parse a file path string (where each sub-namespace is separated by an `ns`
511 /// subdirectory) into a valid namespace.
512 pub fn from_path(mut path
: &str) -> Result
<Self, Error
> {
513 let mut this
= Self::root();
515 match path
.strip_prefix("ns/") {
516 Some(next
) => match next
.find('
/'
) {
518 this
.push(next
[..pos
].to_string())?
;
519 path
= &next
[(pos
+ 1)..];
522 this
.push(next
.to_string())?
;
526 None
if !path
.is_empty() => {
527 bail
!("invalid component in namespace path at {:?}", path
);
535 /// Create a new Namespace attached to parent
537 /// `name` must be a single level namespace ID, that is, no '/' is allowed.
538 /// This rule also avoids confusion about the name being a NS or NS-path
539 pub fn from_parent_ns(parent
: &Self, name
: String
) -> Result
<Self, Error
> {
540 let mut child
= parent
.to_owned();
545 /// Pop one level off the namespace hierarchy
546 pub fn pop(&mut self) -> Option
<String
> {
547 let dropped
= self.inner
.pop();
548 if let Some(ref dropped
) = dropped
{
549 self.len
= self.len
.saturating_sub(dropped
.len() + 1);
554 /// Get the namespace parent as owned BackupNamespace
555 pub fn parent(&self) -> Self {
560 let mut parent
= self.clone();
566 /// Create a new namespace directly from a vec.
570 /// Invalid contents may lead to inaccessible backups.
571 pub unsafe fn from_vec_unchecked(components
: Vec
<String
>) -> Self {
572 let mut this
= Self {
576 this
.recalculate_len();
580 /// Recalculate the length.
581 fn recalculate_len(&mut self) {
582 self.len
= self.inner
.len().max(1) - 1; // a slash between each component
583 for part
in &self.inner
{
584 self.len
+= part
.len();
588 /// The hierarchical depth of the namespace, 0 means top-level.
589 pub fn depth(&self) -> usize {
593 /// The logical name and ID of the namespace.
594 pub fn name(&self) -> String
{
598 /// The actual relative backing path of the namespace on the datastore.
599 pub fn path(&self) -> PathBuf
{
600 self.display_as_path().to_string().into()
603 /// Get the current namespace length.
605 /// This includes separating slashes, but does not include the `ns/` intermediate directories.
606 /// This is not the *path* length, but rather the length that would be produced via
609 pub fn name_len(&self) -> usize {
613 /// Get the current namespace path length.
615 /// This includes the `ns/` subdirectory strings.
616 pub fn path_len(&self) -> usize {
617 self.name_len() + 3 * self.inner
.len()
620 /// Enter a sub-namespace. Fails if nesting would become too deep or the name too long.
621 pub fn push(&mut self, subdir
: String
) -> Result
<(), Error
> {
622 if subdir
.contains('
/'
) {
623 bail
!("namespace component contained a slash");
629 /// Assumes `subdir` already does not contain any slashes.
630 /// Performs remaining checks and updates the length.
631 fn push_do(&mut self, subdir
: String
) -> Result
<(), Error
> {
632 let depth
= self.depth();
633 // check for greater equal to account for the to be added subdir
634 if depth
>= MAX_NAMESPACE_DEPTH
{
635 bail
!("namespace too deep, {depth} >= max {MAX_NAMESPACE_DEPTH}");
638 if self.len
+ subdir
.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH
{
639 bail
!("namespace length exceeded");
642 if !crate::PROXMOX_SAFE_ID_REGEX
.is_match(&subdir
) {
643 bail
!("not a valid namespace component: {subdir}");
646 if !self.inner
.is_empty() {
647 self.len
+= 1; // separating slash
649 self.len
+= subdir
.len();
650 self.inner
.push(subdir
);
654 /// Return an adapter which [`fmt::Display`]s as a path with `"ns/"` prefixes in front of every
656 pub fn display_as_path(&self) -> BackupNamespacePath
{
657 BackupNamespacePath(self)
660 /// Iterate over the subdirectories.
661 pub fn components(&self) -> impl Iterator
<Item
= &str> + '_
{
662 self.inner
.iter().map(String
::as_str
)
665 /// Map NS by replacing `source_prefix` with `target_prefix`
668 source_prefix
: &BackupNamespace
,
669 target_prefix
: &BackupNamespace
,
670 ) -> Result
<Self, Error
> {
673 .strip_prefix(&source_prefix
.inner
[..])
676 "Failed to map namespace - {source_prefix} is not a valid prefix of {self}",
680 let mut new
= target_prefix
.clone();
682 new
.push(item
.clone())?
;
687 /// Check whether adding `depth` levels of sub-namespaces exceeds the max depth limit
688 pub fn check_max_depth(&self, depth
: usize) -> Result
<(), Error
> {
689 let ns_depth
= self.depth();
690 if ns_depth
+ depth
> MAX_NAMESPACE_DEPTH
{
692 "namespace '{self}'s depth and recursion depth exceed limit: {ns_depth} + {depth} > {MAX_NAMESPACE_DEPTH}",
698 pub fn acl_path
<'a
>(&'a
self, store
: &'a
str) -> Vec
<&'a
str> {
699 let mut path
: Vec
<&str> = vec
!["datastore", store
];
704 path
.extend(self.inner
.iter().map(|comp
| comp
.as_str()));
709 /// Check whether this namespace contains another namespace.
711 /// If so, the depth is returned.
715 /// # use pbs_api_types::BackupNamespace;
716 /// let main: BackupNamespace = "a/b".parse().unwrap();
717 /// let sub: BackupNamespace = "a/b/c/d".parse().unwrap();
718 /// let other: BackupNamespace = "x/y".parse().unwrap();
719 /// assert_eq!(main.contains(&main), Some(0));
720 /// assert_eq!(main.contains(&sub), Some(2));
721 /// assert_eq!(sub.contains(&main), None);
722 /// assert_eq!(main.contains(&other), None);
724 pub fn contains(&self, other
: &BackupNamespace
) -> Option
<usize> {
727 .strip_prefix(&self.inner
[..])
728 .map(|suffix
| suffix
.len())
732 impl fmt
::Display
for BackupNamespace
{
733 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
736 let mut parts
= self.inner
.iter();
737 if let Some(first
) = parts
.next() {
748 serde_plain
::derive_deserialize_from_fromstr
!(BackupNamespace
, "valid backup namespace");
750 impl std
::str::FromStr
for BackupNamespace
{
753 fn from_str(name
: &str) -> Result
<Self, Self::Err
> {
758 serde_plain
::derive_serialize_from_display
!(BackupNamespace
);
760 impl ApiType
for BackupNamespace
{
761 const API_SCHEMA
: Schema
= BACKUP_NAMESPACE_SCHEMA
;
764 /// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`].
766 /// This implements [`fmt::Display`] such that it includes the `ns/` subdirectory prefix in front of
768 pub struct BackupNamespacePath
<'a
>(&'a BackupNamespace
);
770 impl fmt
::Display
for BackupNamespacePath
<'_
> {
771 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
773 for part
in &self.0.inner
{
784 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
785 #[serde(rename_all = "lowercase")]
786 pub enum BackupType
{
787 /// Virtual machines.
795 // NOTE: if you add new types, don't forget to adapt the iter below!
799 pub const fn as_str(&self) -> &'
static str {
801 BackupType
::Vm
=> "vm",
802 BackupType
::Ct
=> "ct",
803 BackupType
::Host
=> "host",
807 /// We used to have alphabetical ordering here when this was a string.
808 const fn order(self) -> u8 {
811 BackupType
::Host
=> 1,
817 pub fn iter() -> impl Iterator
<Item
= BackupType
> + Send
+ Sync
+ Unpin
+ '
static {
818 [BackupType
::Vm
, BackupType
::Ct
, BackupType
::Host
]
824 impl fmt
::Display
for BackupType
{
826 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
827 fmt
::Display
::fmt(self.as_str(), f
)
831 impl std
::str::FromStr
for BackupType
{
834 /// Parse a backup type.
835 fn from_str(ty
: &str) -> Result
<Self, Error
> {
837 "ct" => BackupType
::Ct
,
838 "host" => BackupType
::Host
,
839 "vm" => BackupType
::Vm
,
840 _
=> bail
!("invalid backup type {ty:?}"),
845 impl std
::cmp
::Ord
for BackupType
{
847 fn cmp(&self, other
: &Self) -> std
::cmp
::Ordering
{
848 self.order().cmp(&other
.order())
852 impl std
::cmp
::PartialOrd
for BackupType
{
853 fn partial_cmp(&self, other
: &Self) -> Option
<std
::cmp
::Ordering
> {
854 Some(self.cmp(other
))
860 "backup-type": { type: BackupType }
,
861 "backup-id": { schema: BACKUP_ID_SCHEMA }
,
864 #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
865 #[serde(rename_all = "kebab-case")]
866 /// A backup group (without a data store).
867 pub struct BackupGroup
{
869 #[serde(rename = "backup-type")]
873 #[serde(rename = "backup-id")]
878 pub fn new
<T
: Into
<String
>>(ty
: BackupType
, id
: T
) -> Self {
879 Self { ty, id: id.into() }
882 pub fn matches(&self, filter
: &crate::GroupFilter
) -> bool
{
883 use crate::FilterType
;
884 match &filter
.filter_type
{
885 FilterType
::Group(backup_group
) => {
886 match backup_group
.parse
::<BackupGroup
>() {
887 Ok(group
) => *self == group
,
888 Err(_
) => false, // shouldn't happen if value is schema-checked
891 FilterType
::BackupType(ty
) => self.ty
== *ty
,
892 FilterType
::Regex(regex
) => regex
.is_match(&self.to_string()),
896 pub fn apply_filters(&self, filters
: &[GroupFilter
]) -> bool
{
897 // since there will only be view filter in the list, an extra iteration to get the umber of
898 // include filter should not be an issue
899 let is_included
= if filters
.iter().filter(|f
| !f
.is_exclude
).count() == 0 {
904 .filter(|f
| !f
.is_exclude
)
905 .any(|filter
| self.matches(filter
))
911 .filter(|f
| f
.is_exclude
)
912 .any(|filter
| self.matches(filter
))
916 impl AsRef
<BackupGroup
> for BackupGroup
{
918 fn as_ref(&self) -> &Self {
923 impl From
<(BackupType
, String
)> for BackupGroup
{
925 fn from(data
: (BackupType
, String
)) -> Self {
933 impl std
::cmp
::Ord
for BackupGroup
{
934 fn cmp(&self, other
: &Self) -> std
::cmp
::Ordering
{
935 let type_order
= self.ty
.cmp(&other
.ty
);
936 if type_order
!= std
::cmp
::Ordering
::Equal
{
940 // try to compare IDs numerically
941 let id_self
= self.id
.parse
::<u64>();
942 let id_other
= other
.id
.parse
::<u64>();
943 match (id_self
, id_other
) {
944 (Ok(id_self
), Ok(id_other
)) => id_self
.cmp(&id_other
),
945 (Ok(_
), Err(_
)) => std
::cmp
::Ordering
::Less
,
946 (Err(_
), Ok(_
)) => std
::cmp
::Ordering
::Greater
,
947 _
=> self.id
.cmp(&other
.id
),
952 impl std
::cmp
::PartialOrd
for BackupGroup
{
953 fn partial_cmp(&self, other
: &Self) -> Option
<std
::cmp
::Ordering
> {
954 Some(self.cmp(other
))
958 impl fmt
::Display
for BackupGroup
{
959 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
960 write
!(f
, "{}/{}", self.ty
, self.id
)
964 impl std
::str::FromStr
for BackupGroup
{
967 /// Parse a backup group.
969 /// This parses strings like `vm/100".
970 fn from_str(path
: &str) -> Result
<Self, Error
> {
971 let cap
= GROUP_PATH_REGEX
973 .ok_or_else(|| format_err
!("unable to parse backup group path '{}'", path
))?
;
976 ty
: cap
.get(1).unwrap().as_str().parse()?
,
977 id
: cap
.get(2).unwrap().as_str().to_owned(),
984 "group": { type: BackupGroup }
,
985 "backup-time": { schema: BACKUP_TIME_SCHEMA }
,
988 /// Uniquely identify a Backup (relative to data store)
990 /// We also call this a backup snaphost.
991 #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)]
992 #[serde(rename_all = "kebab-case")]
993 pub struct BackupDir
{
996 pub group
: BackupGroup
,
998 /// Backup timestamp unix epoch.
999 #[serde(rename = "backup-time")]
1003 impl AsRef
<BackupGroup
> for BackupDir
{
1005 fn as_ref(&self) -> &BackupGroup
{
1010 impl AsRef
<BackupDir
> for BackupDir
{
1012 fn as_ref(&self) -> &Self {
1017 impl From
<(BackupGroup
, i64)> for BackupDir
{
1018 fn from(data
: (BackupGroup
, i64)) -> Self {
1026 impl From
<(BackupType
, String
, i64)> for BackupDir
{
1027 fn from(data
: (BackupType
, String
, i64)) -> Self {
1029 group
: (data
.0, data
.1).into(),
1036 pub fn with_rfc3339
<T
>(ty
: BackupType
, id
: T
, backup_time_string
: &str) -> Result
<Self, Error
>
1040 let time
= proxmox_time
::parse_rfc3339(backup_time_string
)?
;
1041 let group
= BackupGroup
::new(ty
, id
.into());
1042 Ok(Self { group, time }
)
1046 pub fn ty(&self) -> BackupType
{
1051 pub fn id(&self) -> &str {
1056 impl std
::str::FromStr
for BackupDir
{
1059 /// Parse a snapshot path.
1061 /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
1062 fn from_str(path
: &str) -> Result
<Self, Self::Err
> {
1063 let cap
= SNAPSHOT_PATH_REGEX
1065 .ok_or_else(|| format_err
!("unable to parse backup snapshot path '{}'", path
))?
;
1067 BackupDir
::with_rfc3339(
1068 cap
.get(1).unwrap().as_str().parse()?
,
1069 cap
.get(2).unwrap().as_str(),
1070 cap
.get(3).unwrap().as_str(),
1075 impl fmt
::Display
for BackupDir
{
1076 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1077 // FIXME: log error?
1078 let time
= proxmox_time
::epoch_to_rfc3339_utc(self.time
).map_err(|_
| fmt
::Error
)?
;
1079 write
!(f
, "{}/{}", self.group
, time
)
1083 /// Used when both a backup group or a directory can be valid.
1084 pub enum BackupPart
{
1089 impl std
::str::FromStr
for BackupPart
{
1092 /// Parse a path which can be either a backup group or a snapshot dir.
1093 fn from_str(path
: &str) -> Result
<Self, Error
> {
1094 let cap
= GROUP_OR_SNAPSHOT_PATH_REGEX
1096 .ok_or_else(|| format_err
!("unable to parse backup snapshot path '{}'", path
))?
;
1098 let ty
= cap
.get(1).unwrap().as_str().parse()?
;
1099 let id
= cap
.get(2).unwrap().as_str().to_string();
1101 Ok(match cap
.get(3) {
1102 Some(time
) => BackupPart
::Dir(BackupDir
::with_rfc3339(ty
, id
, time
.as_str())?
),
1103 None
=> BackupPart
::Group((ty
, id
).into()),
1110 "backup": { type: BackupDir }
,
1112 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
1116 type: SnapshotVerifyState
,
1125 schema
: BACKUP_ARCHIVE_NAME_SCHEMA
1134 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1135 #[serde(rename_all = "kebab-case")]
1136 /// Basic information about backup snapshot.
1137 pub struct SnapshotListItem
{
1139 pub backup
: BackupDir
,
1140 /// The first line from manifest "notes"
1141 #[serde(skip_serializing_if = "Option::is_none")]
1142 pub comment
: Option
<String
>,
1143 /// The result of the last run verify task
1144 #[serde(skip_serializing_if = "Option::is_none")]
1145 pub verification
: Option
<SnapshotVerifyState
>,
1146 /// Fingerprint of encryption key
1147 #[serde(skip_serializing_if = "Option::is_none")]
1148 pub fingerprint
: Option
<Fingerprint
>,
1149 /// List of contained archive files.
1150 pub files
: Vec
<BackupContent
>,
1151 /// Overall snapshot size (sum of all archive sizes).
1152 #[serde(skip_serializing_if = "Option::is_none")]
1153 pub size
: Option
<u64>,
1154 /// The owner of the snapshots group
1155 #[serde(skip_serializing_if = "Option::is_none")]
1156 pub owner
: Option
<Authid
>,
1157 /// Protection from prunes
1159 pub protected
: bool
,
1164 "backup": { type: BackupGroup }
,
1165 "last-backup": { schema: BACKUP_TIME_SCHEMA }
,
1171 schema
: BACKUP_ARCHIVE_NAME_SCHEMA
1180 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1181 #[serde(rename_all = "kebab-case")]
1182 /// Basic information about a backup group.
1183 pub struct GroupListItem
{
1185 pub backup
: BackupGroup
,
1187 pub last_backup
: i64,
1188 /// Number of contained snapshots
1189 pub backup_count
: u64,
1190 /// List of contained archive files.
1191 pub files
: Vec
<String
>,
1192 /// The owner of group
1193 #[serde(skip_serializing_if = "Option::is_none")]
1194 pub owner
: Option
<Authid
>,
1195 /// The first line from group "notes"
1196 #[serde(skip_serializing_if = "Option::is_none")]
1197 pub comment
: Option
<String
>,
1201 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1202 #[serde(rename_all = "kebab-case")]
1203 /// Basic information about a backup namespace.
1204 pub struct NamespaceListItem
{
1205 /// A backup namespace
1206 pub ns
: BackupNamespace
,
1209 //pub group_count: u64,
1210 //pub ns_count: u64,
1211 /// The first line from the namespace's "notes"
1212 #[serde(skip_serializing_if = "Option::is_none")]
1213 pub comment
: Option
<String
>,
1218 "backup": { type: BackupDir }
,
1221 #[derive(Serialize, Deserialize)]
1222 #[serde(rename_all = "kebab-case")]
1224 pub struct PruneListItem
{
1226 pub backup
: BackupDir
,
1252 #[derive(Serialize, Deserialize, Default)]
1253 /// Counts of groups/snapshots per BackupType.
1255 /// The counts for CT backups
1256 pub ct
: Option
<TypeCounts
>,
1257 /// The counts for Host backups
1258 pub host
: Option
<TypeCounts
>,
1259 /// The counts for VM backups
1260 pub vm
: Option
<TypeCounts
>,
1261 /// The counts for other backup types
1262 pub other
: Option
<TypeCounts
>,
1266 #[derive(Serialize, Deserialize, Default)]
1267 /// Backup Type group/snapshot counts.
1268 pub struct TypeCounts
{
1269 /// The number of groups of the type.
1271 /// The number of snapshots of the type.
1283 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
1284 #[serde(rename_all = "kebab-case")]
1285 /// Garbage collection status.
1286 pub struct GarbageCollectionStatus
{
1287 pub upid
: Option
<String
>,
1288 /// Number of processed index files.
1289 pub index_file_count
: usize,
1290 /// Sum of bytes referred by index files.
1291 pub index_data_bytes
: u64,
1292 /// Bytes used on disk.
1293 pub disk_bytes
: u64,
1294 /// Chunks used on disk.
1295 pub disk_chunks
: usize,
1296 /// Sum of removed bytes.
1297 pub removed_bytes
: u64,
1298 /// Number of removed chunks.
1299 pub removed_chunks
: usize,
1300 /// Sum of pending bytes (pending removal - kept for safety).
1301 pub pending_bytes
: u64,
1302 /// Number of pending chunks (pending removal - kept for safety).
1303 pub pending_chunks
: usize,
1304 /// Number of chunks marked as .bad by verify that have been removed by GC.
1305 pub removed_bad
: usize,
1306 /// Number of chunks still marked as .bad after garbage collection.
1307 pub still_bad
: usize,
1313 type: GarbageCollectionStatus
,
1317 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
1318 #[serde(rename_all = "kebab-case")]
1319 /// Garbage Collection general info
1320 pub struct GarbageCollectionJobStatus
{
1324 pub status
: GarbageCollectionStatus
,
1325 /// Schedule of the gc job
1326 #[serde(skip_serializing_if = "Option::is_none")]
1327 pub schedule
: Option
<String
>,
1328 /// Time of the next gc run
1329 #[serde(skip_serializing_if = "Option::is_none")]
1330 pub next_run
: Option
<i64>,
1331 /// Endtime of the last gc run
1332 #[serde(skip_serializing_if = "Option::is_none")]
1333 pub last_run_endtime
: Option
<i64>,
1334 /// State of the last gc run
1335 #[serde(skip_serializing_if = "Option::is_none")]
1336 pub last_run_state
: Option
<String
>,
1337 /// Duration of last gc run
1338 #[serde(skip_serializing_if = "Option::is_none")]
1339 pub duration
: Option
<i64>,
1345 type: GarbageCollectionStatus
,
1354 #[derive(Serialize, Deserialize)]
1355 #[serde(rename_all = "kebab-case")]
1356 /// Overall Datastore status and useful information.
1357 pub struct DataStoreStatus
{
1358 /// Total space (bytes).
1360 /// Used space (bytes).
1362 /// Available space (bytes).
1364 /// Status of last GC
1365 #[serde(skip_serializing_if = "Option::is_none")]
1366 pub gc_status
: Option
<GarbageCollectionStatus
>,
1367 /// Group/Snapshot counts
1368 #[serde(skip_serializing_if = "Option::is_none")]
1369 pub counts
: Option
<Counts
>,
1375 schema
: DATASTORE_SCHEMA
,
1382 description
: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
1387 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1388 #[serde(rename_all = "kebab-case")]
1389 /// Status of a Datastore
1390 pub struct DataStoreStatusListItem
{
1392 /// The Size of the underlying storage in bytes.
1393 #[serde(skip_serializing_if = "Option::is_none")]
1394 pub total
: Option
<u64>,
1395 /// The used bytes of the underlying storage.
1396 #[serde(skip_serializing_if = "Option::is_none")]
1397 pub used
: Option
<u64>,
1398 /// The available bytes of the underlying storage. (-1 on error)
1399 #[serde(skip_serializing_if = "Option::is_none")]
1400 pub avail
: Option
<u64>,
1401 /// A list of usages of the past (last Month).
1402 #[serde(skip_serializing_if = "Option::is_none")]
1403 pub history
: Option
<Vec
<Option
<f64>>>,
1404 /// History start time (epoch)
1405 #[serde(skip_serializing_if = "Option::is_none")]
1406 pub history_start
: Option
<u64>,
1407 /// History resolution (seconds)
1408 #[serde(skip_serializing_if = "Option::is_none")]
1409 pub history_delta
: Option
<u64>,
1410 /// Estimation of the UNIX epoch when the storage will be full.
1411 /// It's calculated via a simple Linear Regression (Least Squares) over the RRD data of the
1412 /// last Month. Missing if not enough data points are available yet. An estimate in the past
1413 /// means that usage is declining or not changing.
1414 #[serde(skip_serializing_if = "Option::is_none")]
1415 pub estimated_full_date
: Option
<i64>,
1416 /// An error description, for example, when the datastore could not be looked up
1417 #[serde(skip_serializing_if = "Option::is_none")]
1418 pub error
: Option
<String
>,
1419 /// Status of last GC
1420 #[serde(skip_serializing_if = "Option::is_none")]
1421 pub gc_status
: Option
<GarbageCollectionStatus
>,
1424 impl DataStoreStatusListItem
{
1425 pub fn empty(store
: &str, err
: Option
<String
>) -> Self {
1426 DataStoreStatusListItem
{
1427 store
: store
.to_owned(),
1432 history_start
: None
,
1433 history_delta
: None
,
1434 estimated_full_date
: None
,
1441 pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE
: ReturnType
= ReturnType
{
1443 schema
: &ArraySchema
::new(
1444 "Returns the list of snapshots.",
1445 &SnapshotListItem
::API_SCHEMA
,
1450 pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE
: ReturnType
= ReturnType
{
1452 schema
: &ArraySchema
::new(
1453 "Returns the list of archive files inside a backup snapshots.",
1454 &BackupContent
::API_SCHEMA
,
1459 pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE
: ReturnType
= ReturnType
{
1461 schema
: &ArraySchema
::new(
1462 "Returns the list of backup groups.",
1463 &GroupListItem
::API_SCHEMA
,
1468 pub const ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE
: ReturnType
= ReturnType
{
1470 schema
: &ArraySchema
::new(
1471 "Returns the list of backup namespaces.",
1472 &NamespaceListItem
::API_SCHEMA
,
1477 pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE
: ReturnType
= ReturnType
{
1479 schema
: &ArraySchema
::new(
1480 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
1481 &PruneListItem
::API_SCHEMA
,
1489 schema
: DATASTORE_SCHEMA
,
1492 schema
: NS_MAX_DEPTH_SCHEMA
,
1497 #[derive(Serialize, Deserialize)]
1498 #[serde(rename_all = "kebab-case")]
1499 /// A namespace mapping
1500 pub struct TapeRestoreNamespace
{
1501 /// The source datastore
1503 /// The source namespace. Root namespace if omitted.
1504 pub source
: Option
<BackupNamespace
>,
1505 /// The target namespace,
1506 #[serde(skip_serializing_if = "Option::is_none")]
1507 pub target
: Option
<BackupNamespace
>,
1508 /// The (optional) recursion depth
1509 #[serde(skip_serializing_if = "Option::is_none")]
1510 pub max_depth
: Option
<usize>,
1513 pub const TAPE_RESTORE_NAMESPACE_SCHEMA
: Schema
= StringSchema
::new("A namespace mapping")
1514 .format(&ApiStringFormat
::PropertyString(
1515 &TapeRestoreNamespace
::API_SCHEMA
,
1519 /// Parse snapshots in the form 'ns/foo/ns/bar/ct/100/1970-01-01T00:00:00Z'
1520 /// into a [`BackupNamespace`] and [`BackupDir`]
1521 pub fn parse_ns_and_snapshot(input
: &str) -> Result
<(BackupNamespace
, BackupDir
), Error
> {
1522 match input
.rmatch_indices('
/'
).nth(2) {
1524 let ns
= BackupNamespace
::from_path(&input
[..idx
])?
;
1525 let dir
: BackupDir
= input
[(idx
+ 1)..].parse()?
;
1528 None
=> Ok((BackupNamespace
::root(), input
.parse()?
)),
1532 /// Prints a [`BackupNamespace`] and [`BackupDir`] in the form of
1533 /// 'ns/foo/bar/ct/100/1970-01-01T00:00:00Z'
1534 pub fn print_ns_and_snapshot(ns
: &BackupNamespace
, dir
: &BackupDir
) -> String
{
1538 format
!("{}/{}", ns
.display_as_path(), dir
)
1542 /// Prints a Datastore name and [`BackupNamespace`] for logs/errors.
1543 pub fn print_store_and_ns(store
: &str, ns
: &BackupNamespace
) -> String
{
1545 format
!("datastore '{}', root namespace", store
)
1547 format
!("datastore '{}', namespace '{}'", store
, ns
)