2 use std
::path
::PathBuf
;
4 use anyhow
::{bail, format_err, Error}
;
5 use serde
::{Deserialize, Serialize}
;
8 api
, const_regex
, ApiStringFormat
, ApiType
, ArraySchema
, EnumEntry
, IntegerSchema
, ReturnType
,
9 Schema
, StringSchema
, Updater
, UpdaterType
,
13 Authid
, CryptMode
, Fingerprint
, MaintenanceMode
, Userid
, DATASTORE_NOTIFY_STRING_SCHEMA
,
14 GC_SCHEDULE_SCHEMA
, PROXMOX_SAFE_ID_FORMAT
, PRUNE_SCHEDULE_SCHEMA
, SHA256_HEX_REGEX
,
15 SINGLE_LINE_COMMENT_SCHEMA
, UPID
,
19 pub BACKUP_NAMESPACE_REGEX
= concat
!(r
"^", BACKUP_NS_RE
!(), r
"$");
21 pub BACKUP_TYPE_REGEX
= concat
!(r
"^(", BACKUP_TYPE_RE
!(), r
")$");
23 pub BACKUP_ID_REGEX
= concat
!(r
"^", BACKUP_ID_RE
!(), r
"$");
25 pub BACKUP_DATE_REGEX
= concat
!(r
"^", BACKUP_TIME_RE
!() ,r
"$");
27 pub GROUP_PATH_REGEX
= concat
!(
28 r
"^(", BACKUP_TYPE_RE
!(), ")/",
29 r
"(", BACKUP_ID_RE
!(), r
")$",
32 pub BACKUP_FILE_REGEX
= r
"^.*\.([fd]idx|blob)$";
34 pub SNAPSHOT_PATH_REGEX
= concat
!(r
"^", SNAPSHOT_PATH_REGEX_STR
!(), r
"$");
35 pub GROUP_OR_SNAPSHOT_PATH_REGEX
= concat
!(r
"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR
!(), r
"$");
37 pub DATASTORE_MAP_REGEX
= concat
!(r
"(:?", PROXMOX_SAFE_ID_REGEX_STR
!(), r
"=)?", PROXMOX_SAFE_ID_REGEX_STR
!());
40 pub const CHUNK_DIGEST_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&SHA256_HEX_REGEX
);
42 pub const DIR_NAME_SCHEMA
: Schema
= StringSchema
::new("Directory name")
47 pub const BACKUP_ARCHIVE_NAME_SCHEMA
: Schema
= StringSchema
::new("Backup archive name.")
48 .format(&PROXMOX_SAFE_ID_FORMAT
)
51 pub const BACKUP_ID_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&BACKUP_ID_REGEX
);
52 pub const BACKUP_GROUP_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&GROUP_PATH_REGEX
);
53 pub const BACKUP_NAMESPACE_FORMAT
: ApiStringFormat
=
54 ApiStringFormat
::Pattern(&BACKUP_NAMESPACE_REGEX
);
56 pub const BACKUP_ID_SCHEMA
: Schema
= StringSchema
::new("Backup ID.")
57 .format(&BACKUP_ID_FORMAT
)
60 pub const BACKUP_TYPE_SCHEMA
: Schema
= StringSchema
::new("Backup type.")
61 .format(&ApiStringFormat
::Enum(&[
62 EnumEntry
::new("vm", "Virtual Machine Backup"),
63 EnumEntry
::new("ct", "Container Backup"),
64 EnumEntry
::new("host", "Host Backup"),
68 pub const BACKUP_TIME_SCHEMA
: Schema
= IntegerSchema
::new("Backup time (Unix epoch.)")
72 pub const BACKUP_GROUP_SCHEMA
: Schema
= StringSchema
::new("Backup Group")
73 .format(&BACKUP_GROUP_FORMAT
)
76 /// The maximal, inclusive depth for namespaces from the root ns downwards
78 /// The datastore root name space is at depth zero (0), so we have in total eight (8) levels
79 pub const MAX_NAMESPACE_DEPTH
: usize = 7;
80 pub const MAX_BACKUP_NAMESPACE_LENGTH
: usize = 32 * 8; // 256
81 pub const BACKUP_NAMESPACE_SCHEMA
: Schema
= StringSchema
::new("Namespace.")
82 .format(&BACKUP_NAMESPACE_FORMAT
)
83 .max_length(MAX_BACKUP_NAMESPACE_LENGTH
) // 256
86 pub const NS_MAX_DEPTH_SCHEMA
: Schema
=
87 IntegerSchema
::new("How many levels of namespaces should be operated on (0 == no recursion)")
89 .maximum(MAX_NAMESPACE_DEPTH
as isize)
90 .default(MAX_NAMESPACE_DEPTH
as isize)
93 pub const NS_MAX_DEPTH_REDUCED_SCHEMA
: Schema
=
94 IntegerSchema
::new("How many levels of namespaces should be operated on (0 == no recursion, empty == automatic full recursion, namespace depths reduce maximum allowed value)")
96 .maximum(MAX_NAMESPACE_DEPTH
as isize)
99 pub const DATASTORE_SCHEMA
: Schema
= StringSchema
::new("Datastore name.")
100 .format(&PROXMOX_SAFE_ID_FORMAT
)
105 pub const CHUNK_DIGEST_SCHEMA
: Schema
= StringSchema
::new("Chunk digest (SHA256).")
106 .format(&CHUNK_DIGEST_FORMAT
)
109 pub const DATASTORE_MAP_FORMAT
: ApiStringFormat
= ApiStringFormat
::Pattern(&DATASTORE_MAP_REGEX
);
111 pub const DATASTORE_MAP_SCHEMA
: Schema
= StringSchema
::new("Datastore mapping.")
112 .format(&DATASTORE_MAP_FORMAT
)
115 .type_text("(<source>=)?<target>")
118 pub const DATASTORE_MAP_ARRAY_SCHEMA
: Schema
=
119 ArraySchema
::new("Datastore mapping list.", &DATASTORE_MAP_SCHEMA
).schema();
121 pub const DATASTORE_MAP_LIST_SCHEMA
: Schema
= StringSchema
::new(
122 "A list of Datastore mappings (or single datastore), comma separated. \
123 For example 'a=b,e' maps the source datastore 'a' to target 'b and \
124 all other sources to the default 'e'. If no default is given, only the \
125 specified sources are mapped.",
127 .format(&ApiStringFormat
::PropertyString(
128 &DATASTORE_MAP_ARRAY_SCHEMA
,
132 pub const PRUNE_SCHEMA_KEEP_DAILY
: Schema
= IntegerSchema
::new("Number of daily backups to keep.")
136 pub const PRUNE_SCHEMA_KEEP_HOURLY
: Schema
=
137 IntegerSchema
::new("Number of hourly backups to keep.")
141 pub const PRUNE_SCHEMA_KEEP_LAST
: Schema
= IntegerSchema
::new("Number of backups to keep.")
145 pub const PRUNE_SCHEMA_KEEP_MONTHLY
: Schema
=
146 IntegerSchema
::new("Number of monthly backups to keep.")
150 pub const PRUNE_SCHEMA_KEEP_WEEKLY
: Schema
=
151 IntegerSchema
::new("Number of weekly backups to keep.")
155 pub const PRUNE_SCHEMA_KEEP_YEARLY
: Schema
=
156 IntegerSchema
::new("Number of yearly backups to keep.")
161 #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
162 #[serde(rename_all = "lowercase")]
163 /// The order to sort chunks by
164 pub enum ChunkOrder
{
165 /// Iterate chunks in the index order
167 /// Iterate chunks in inode order
172 #[derive(PartialEq, Eq, Serialize, Deserialize)]
173 #[serde(rename_all = "lowercase")]
174 /// The level of syncing that is done when writing into a datastore.
175 pub enum DatastoreFSyncLevel
{
176 /// No special fsync or syncfs calls are triggered. The system default dirty write back
177 /// mechanism ensures that data gets is flushed eventually via the `dirty_writeback_centisecs`
178 /// and `dirty_expire_centisecs` kernel sysctls, defaulting to ~ 30s.
180 /// This mode provides generally the best performance, as all write back can happen async,
181 /// which reduces IO pressure.
182 /// But it may cause losing data on powerloss or system crash without any uninterruptible power
185 /// Triggers a fsync after writing any chunk on the datastore. While this can slow down
186 /// backups significantly, depending on the underlying file system and storage used, it
187 /// will ensure fine-grained consistency. Depending on the exact setup, there might be no
188 /// benefits over the file system level sync, so if the setup allows it, you should prefer
189 /// that one. Despite the possible negative impact in performance, it's the most consistent
192 /// Trigger a filesystem wide sync after all backup data got written but before finishing the
193 /// task. This allows that every finished backup is fully written back to storage
194 /// while reducing the impact on many file systems in contrast to the file level sync.
195 /// Depending on the setup, it might have a negative impact on unrelated write operations
196 /// of the underlying filesystem, but it is generally a good compromise between performance
201 impl Default
for DatastoreFSyncLevel
{
202 fn default() -> Self {
203 DatastoreFSyncLevel
::None
215 #[derive(Serialize, Deserialize, Default)]
216 #[serde(rename_all = "kebab-case")]
217 /// Datastore tuning options
218 pub struct DatastoreTuning
{
219 /// Iterate chunks in this order
220 pub chunk_order
: Option
<ChunkOrder
>,
221 pub sync_level
: Option
<DatastoreFSyncLevel
>,
224 pub const DATASTORE_TUNING_STRING_SCHEMA
: Schema
= StringSchema
::new("Datastore tuning options")
225 .format(&ApiStringFormat
::PropertyString(
226 &DatastoreTuning
::API_SCHEMA
,
233 schema
: DATASTORE_SCHEMA
,
236 schema
: DIR_NAME_SCHEMA
,
244 schema
: DATASTORE_NOTIFY_STRING_SCHEMA
,
248 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
252 schema
: GC_SCHEDULE_SCHEMA
,
256 schema
: PRUNE_SCHEDULE_SCHEMA
,
259 type: crate::KeepOptions
,
262 description
: "If enabled, all new backups will be verified right after completion.",
268 schema
: DATASTORE_TUNING_STRING_SCHEMA
,
270 "maintenance-mode": {
272 format
: &ApiStringFormat
::PropertyString(&MaintenanceMode
::API_SCHEMA
),
277 #[derive(Serialize, Deserialize, Updater)]
278 #[serde(rename_all = "kebab-case")]
279 /// Datastore configuration properties.
280 pub struct DataStoreConfig
{
287 #[serde(skip_serializing_if = "Option::is_none")]
288 pub comment
: Option
<String
>,
290 #[serde(skip_serializing_if = "Option::is_none")]
291 pub gc_schedule
: Option
<String
>,
293 #[serde(skip_serializing_if = "Option::is_none")]
294 pub prune_schedule
: Option
<String
>,
297 pub keep
: crate::KeepOptions
,
299 /// If enabled, all backups will be verified right after completion.
300 #[serde(skip_serializing_if = "Option::is_none")]
301 pub verify_new
: Option
<bool
>,
303 /// Send job email notification to this user
304 #[serde(skip_serializing_if = "Option::is_none")]
305 pub notify_user
: Option
<Userid
>,
307 /// Send notification only for job errors
308 #[serde(skip_serializing_if = "Option::is_none")]
309 pub notify
: Option
<String
>,
311 /// Datastore tuning options
312 #[serde(skip_serializing_if = "Option::is_none")]
313 pub tuning
: Option
<String
>,
315 /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
316 #[serde(skip_serializing_if = "Option::is_none")]
317 pub maintenance_mode
: Option
<String
>,
320 impl DataStoreConfig
{
321 pub fn new(name
: String
, path
: String
) -> Self {
327 prune_schedule
: None
,
328 keep
: Default
::default(),
333 maintenance_mode
: None
,
337 pub fn get_maintenance_mode(&self) -> Option
<MaintenanceMode
> {
338 self.maintenance_mode
340 .and_then(|str| MaintenanceMode
::API_SCHEMA
.parse_property_string(str).ok())
341 .and_then(|value
| MaintenanceMode
::deserialize(value
).ok())
348 schema
: DATASTORE_SCHEMA
,
352 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
356 format
: &ApiStringFormat
::PropertyString(&MaintenanceMode
::API_SCHEMA
),
361 #[derive(Serialize, Deserialize)]
362 #[serde(rename_all = "kebab-case")]
363 /// Basic information about a datastore.
364 pub struct DataStoreListItem
{
366 pub comment
: Option
<String
>,
367 /// If the datastore is in maintenance mode, information about it
368 #[serde(skip_serializing_if = "Option::is_none")]
369 pub maintenance
: Option
<String
>,
375 schema
: BACKUP_ARCHIVE_NAME_SCHEMA
,
383 #[derive(Serialize, Deserialize)]
384 #[serde(rename_all = "kebab-case")]
385 /// Basic information about archive files inside a backup snapshot.
386 pub struct BackupContent
{
387 pub filename
: String
,
388 /// Info if file is encrypted, signed, or neither.
389 #[serde(skip_serializing_if = "Option::is_none")]
390 pub crypt_mode
: Option
<CryptMode
>,
391 /// Archive size (from backup manifest).
392 #[serde(skip_serializing_if = "Option::is_none")]
393 pub size
: Option
<u64>,
397 #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
398 #[serde(rename_all = "lowercase")]
399 /// Result of a verify operation.
400 pub enum VerifyState
{
401 /// Verification was successful
403 /// Verification reported one or more errors
417 #[derive(Serialize, Deserialize)]
419 pub struct SnapshotVerifyState
{
420 /// UPID of the verify task
422 /// State of the verification. Enum.
423 pub state
: VerifyState
,
426 /// A namespace provides a logical separation between backup groups from different domains
427 /// (cluster, sites, ...) where uniqueness cannot be guaranteed anymore. It allows users to share a
428 /// datastore (i.e., one deduplication domain (chunk store)) with multiple (trusted) sites and
429 /// allows to form a hierarchy, for easier management and avoiding clashes between backup_ids.
431 /// NOTE: Namespaces are a logical boundary only, they do not provide a full secure separation as
432 /// the chunk store is still shared. So, users whom do not trust each other must not share a
435 /// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid
436 /// clashes with backup group IDs and future backup_types and to have a clean separation between
437 /// the namespace directories and the ones from a backup snapshot.
438 #[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, UpdaterType)]
439 pub struct BackupNamespace
{
440 /// The namespace subdirectories without the `ns/` intermediate directories.
443 /// Cache the total length for efficiency.
447 impl BackupNamespace
{
448 /// Returns a root namespace reference.
449 pub const fn root() -> Self {
456 /// True if this represents the root namespace.
457 pub fn is_root(&self) -> bool
{
458 self.inner
.is_empty()
461 /// Try to parse a string into a namespace.
462 pub fn new(name
: &str) -> Result
<Self, Error
> {
463 let mut this
= Self::root();
469 for name
in name
.split('
/'
) {
470 this
.push(name
.to_string())?
;
475 /// Try to parse a file path string (where each sub-namespace is separated by an `ns`
476 /// subdirectory) into a valid namespace.
477 pub fn from_path(mut path
: &str) -> Result
<Self, Error
> {
478 let mut this
= Self::root();
480 match path
.strip_prefix("ns/") {
481 Some(next
) => match next
.find('
/'
) {
483 this
.push(next
[..pos
].to_string())?
;
484 path
= &next
[(pos
+ 1)..];
487 this
.push(next
.to_string())?
;
491 None
if !path
.is_empty() => {
492 bail
!("invalid component in namespace path at {:?}", path
);
500 /// Create a new Namespace attached to parent
502 /// `name` must be a single level namespace ID, that is, no '/' is allowed.
503 /// This rule also avoids confusion about the name being a NS or NS-path
504 pub fn from_parent_ns(parent
: &Self, name
: String
) -> Result
<Self, Error
> {
505 let mut child
= parent
.to_owned();
510 /// Pop one level off the namespace hierarchy
511 pub fn pop(&mut self) -> Option
<String
> {
512 let dropped
= self.inner
.pop();
513 if let Some(ref dropped
) = dropped
{
514 self.len
= self.len
.saturating_sub(dropped
.len() + 1);
519 /// Get the namespace parent as owned BackupNamespace
520 pub fn parent(&self) -> Self {
525 let mut parent
= self.clone();
531 /// Create a new namespace directly from a vec.
535 /// Invalid contents may lead to inaccessible backups.
536 pub unsafe fn from_vec_unchecked(components
: Vec
<String
>) -> Self {
537 let mut this
= Self {
541 this
.recalculate_len();
545 /// Recalculate the length.
546 fn recalculate_len(&mut self) {
547 self.len
= self.inner
.len().max(1) - 1; // a slash between each component
548 for part
in &self.inner
{
549 self.len
+= part
.len();
553 /// The hierarchical depth of the namespace, 0 means top-level.
554 pub fn depth(&self) -> usize {
558 /// The logical name and ID of the namespace.
559 pub fn name(&self) -> String
{
563 /// The actual relative backing path of the namespace on the datastore.
564 pub fn path(&self) -> PathBuf
{
565 self.display_as_path().to_string().into()
568 /// Get the current namespace length.
570 /// This includes separating slashes, but does not include the `ns/` intermediate directories.
571 /// This is not the *path* length, but rather the length that would be produced via
574 pub fn name_len(&self) -> usize {
578 /// Get the current namespace path length.
580 /// This includes the `ns/` subdirectory strings.
581 pub fn path_len(&self) -> usize {
582 self.name_len() + 3 * self.inner
.len()
585 /// Enter a sub-namespace. Fails if nesting would become too deep or the name too long.
586 pub fn push(&mut self, subdir
: String
) -> Result
<(), Error
> {
587 if subdir
.contains('
/'
) {
588 bail
!("namespace component contained a slash");
594 /// Assumes `subdir` already does not contain any slashes.
595 /// Performs remaining checks and updates the length.
596 fn push_do(&mut self, subdir
: String
) -> Result
<(), Error
> {
597 let depth
= self.depth();
598 // check for greater equal to account for the to be added subdir
599 if depth
>= MAX_NAMESPACE_DEPTH
{
600 bail
!("namespace too deep, {depth} >= max {MAX_NAMESPACE_DEPTH}");
603 if self.len
+ subdir
.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH
{
604 bail
!("namespace length exceeded");
607 if !crate::PROXMOX_SAFE_ID_REGEX
.is_match(&subdir
) {
608 bail
!("not a valid namespace component: {subdir}");
611 if !self.inner
.is_empty() {
612 self.len
+= 1; // separating slash
614 self.len
+= subdir
.len();
615 self.inner
.push(subdir
);
619 /// Return an adapter which [`fmt::Display`]s as a path with `"ns/"` prefixes in front of every
621 pub fn display_as_path(&self) -> BackupNamespacePath
{
622 BackupNamespacePath(self)
625 /// Iterate over the subdirectories.
626 pub fn components(&self) -> impl Iterator
<Item
= &str> + '_
{
627 self.inner
.iter().map(String
::as_str
)
630 /// Map NS by replacing `source_prefix` with `target_prefix`
633 source_prefix
: &BackupNamespace
,
634 target_prefix
: &BackupNamespace
,
635 ) -> Result
<Self, Error
> {
638 .strip_prefix(&source_prefix
.inner
[..])
641 "Failed to map namespace - {source_prefix} is not a valid prefix of {self}",
645 let mut new
= target_prefix
.clone();
647 new
.push(item
.clone())?
;
652 /// Check whether adding `depth` levels of sub-namespaces exceeds the max depth limit
653 pub fn check_max_depth(&self, depth
: usize) -> Result
<(), Error
> {
654 let ns_depth
= self.depth();
655 if ns_depth
+ depth
> MAX_NAMESPACE_DEPTH
{
657 "namespace '{self}'s depth and recursion depth exceed limit: {ns_depth} + {depth} > {MAX_NAMESPACE_DEPTH}",
663 pub fn acl_path
<'a
>(&'a
self, store
: &'a
str) -> Vec
<&'a
str> {
664 let mut path
: Vec
<&str> = vec
!["datastore", store
];
669 path
.extend(self.inner
.iter().map(|comp
| comp
.as_str()));
674 /// Check whether this namespace contains another namespace.
676 /// If so, the depth is returned.
680 /// # use pbs_api_types::BackupNamespace;
681 /// let main: BackupNamespace = "a/b".parse().unwrap();
682 /// let sub: BackupNamespace = "a/b/c/d".parse().unwrap();
683 /// let other: BackupNamespace = "x/y".parse().unwrap();
684 /// assert_eq!(main.contains(&main), Some(0));
685 /// assert_eq!(main.contains(&sub), Some(2));
686 /// assert_eq!(sub.contains(&main), None);
687 /// assert_eq!(main.contains(&other), None);
689 pub fn contains(&self, other
: &BackupNamespace
) -> Option
<usize> {
692 .strip_prefix(&self.inner
[..])
693 .map(|suffix
| suffix
.len())
697 impl fmt
::Display
for BackupNamespace
{
698 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
701 let mut parts
= self.inner
.iter();
702 if let Some(first
) = parts
.next() {
713 serde_plain
::derive_deserialize_from_fromstr
!(BackupNamespace
, "valid backup namespace");
715 impl std
::str::FromStr
for BackupNamespace
{
718 fn from_str(name
: &str) -> Result
<Self, Self::Err
> {
723 serde_plain
::derive_serialize_from_display
!(BackupNamespace
);
725 impl ApiType
for BackupNamespace
{
726 const API_SCHEMA
: Schema
= BACKUP_NAMESPACE_SCHEMA
;
729 /// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`].
731 /// This implements [`fmt::Display`] such that it includes the `ns/` subdirectory prefix in front of
733 pub struct BackupNamespacePath
<'a
>(&'a BackupNamespace
);
735 impl fmt
::Display
for BackupNamespacePath
<'_
> {
736 fn fmt(&self, f
: &mut fmt
::Formatter
) -> fmt
::Result
{
738 for part
in &self.0.inner
{
749 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
750 #[serde(rename_all = "lowercase")]
751 pub enum BackupType
{
752 /// Virtual machines.
760 // NOTE: if you add new types, don't forget to adapt the iter below!
764 pub const fn as_str(&self) -> &'
static str {
766 BackupType
::Vm
=> "vm",
767 BackupType
::Ct
=> "ct",
768 BackupType
::Host
=> "host",
772 /// We used to have alphabetical ordering here when this was a string.
773 const fn order(self) -> u8 {
776 BackupType
::Host
=> 1,
782 pub fn iter() -> impl Iterator
<Item
= BackupType
> + Send
+ Sync
+ Unpin
+ '
static {
783 [BackupType
::Vm
, BackupType
::Ct
, BackupType
::Host
]
789 impl fmt
::Display
for BackupType
{
791 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
792 fmt
::Display
::fmt(self.as_str(), f
)
796 impl std
::str::FromStr
for BackupType
{
799 /// Parse a backup type.
800 fn from_str(ty
: &str) -> Result
<Self, Error
> {
802 "ct" => BackupType
::Ct
,
803 "host" => BackupType
::Host
,
804 "vm" => BackupType
::Vm
,
805 _
=> bail
!("invalid backup type {ty:?}"),
810 impl std
::cmp
::Ord
for BackupType
{
812 fn cmp(&self, other
: &Self) -> std
::cmp
::Ordering
{
813 self.order().cmp(&other
.order())
817 impl std
::cmp
::PartialOrd
for BackupType
{
818 fn partial_cmp(&self, other
: &Self) -> Option
<std
::cmp
::Ordering
> {
819 Some(self.cmp(other
))
825 "backup-type": { type: BackupType }
,
826 "backup-id": { schema: BACKUP_ID_SCHEMA }
,
829 #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
830 #[serde(rename_all = "kebab-case")]
831 /// A backup group (without a data store).
832 pub struct BackupGroup
{
834 #[serde(rename = "backup-type")]
838 #[serde(rename = "backup-id")]
843 pub fn new
<T
: Into
<String
>>(ty
: BackupType
, id
: T
) -> Self {
844 Self { ty, id: id.into() }
847 pub fn matches(&self, filter
: &crate::GroupFilter
) -> bool
{
848 use crate::GroupFilter
;
851 GroupFilter
::Group(backup_group
) => {
852 match backup_group
.parse
::<BackupGroup
>() {
853 Ok(group
) => *self == group
,
854 Err(_
) => false, // shouldn't happen if value is schema-checked
857 GroupFilter
::BackupType(ty
) => self.ty
== *ty
,
858 GroupFilter
::Regex(regex
) => regex
.is_match(&self.to_string()),
863 impl AsRef
<BackupGroup
> for BackupGroup
{
865 fn as_ref(&self) -> &Self {
870 impl From
<(BackupType
, String
)> for BackupGroup
{
872 fn from(data
: (BackupType
, String
)) -> Self {
880 impl std
::cmp
::Ord
for BackupGroup
{
881 fn cmp(&self, other
: &Self) -> std
::cmp
::Ordering
{
882 let type_order
= self.ty
.cmp(&other
.ty
);
883 if type_order
!= std
::cmp
::Ordering
::Equal
{
887 // try to compare IDs numerically
888 let id_self
= self.id
.parse
::<u64>();
889 let id_other
= other
.id
.parse
::<u64>();
890 match (id_self
, id_other
) {
891 (Ok(id_self
), Ok(id_other
)) => id_self
.cmp(&id_other
),
892 (Ok(_
), Err(_
)) => std
::cmp
::Ordering
::Less
,
893 (Err(_
), Ok(_
)) => std
::cmp
::Ordering
::Greater
,
894 _
=> self.id
.cmp(&other
.id
),
899 impl std
::cmp
::PartialOrd
for BackupGroup
{
900 fn partial_cmp(&self, other
: &Self) -> Option
<std
::cmp
::Ordering
> {
901 Some(self.cmp(other
))
905 impl fmt
::Display
for BackupGroup
{
906 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
907 write
!(f
, "{}/{}", self.ty
, self.id
)
911 impl std
::str::FromStr
for BackupGroup
{
914 /// Parse a backup group.
916 /// This parses strings like `vm/100".
917 fn from_str(path
: &str) -> Result
<Self, Error
> {
918 let cap
= GROUP_PATH_REGEX
920 .ok_or_else(|| format_err
!("unable to parse backup group path '{}'", path
))?
;
923 ty
: cap
.get(1).unwrap().as_str().parse()?
,
924 id
: cap
.get(2).unwrap().as_str().to_owned(),
931 "group": { type: BackupGroup }
,
932 "backup-time": { schema: BACKUP_TIME_SCHEMA }
,
935 /// Uniquely identify a Backup (relative to data store)
937 /// We also call this a backup snaphost.
938 #[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
939 #[serde(rename_all = "kebab-case")]
940 pub struct BackupDir
{
943 pub group
: BackupGroup
,
945 /// Backup timestamp unix epoch.
946 #[serde(rename = "backup-time")]
950 impl AsRef
<BackupGroup
> for BackupDir
{
952 fn as_ref(&self) -> &BackupGroup
{
957 impl AsRef
<BackupDir
> for BackupDir
{
959 fn as_ref(&self) -> &Self {
964 impl From
<(BackupGroup
, i64)> for BackupDir
{
965 fn from(data
: (BackupGroup
, i64)) -> Self {
973 impl From
<(BackupType
, String
, i64)> for BackupDir
{
974 fn from(data
: (BackupType
, String
, i64)) -> Self {
976 group
: (data
.0, data
.1).into(),
983 pub fn with_rfc3339
<T
>(ty
: BackupType
, id
: T
, backup_time_string
: &str) -> Result
<Self, Error
>
987 let time
= proxmox_time
::parse_rfc3339(backup_time_string
)?
;
988 let group
= BackupGroup
::new(ty
, id
.into());
989 Ok(Self { group, time }
)
993 pub fn ty(&self) -> BackupType
{
998 pub fn id(&self) -> &str {
1003 impl std
::str::FromStr
for BackupDir
{
1006 /// Parse a snapshot path.
1008 /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
1009 fn from_str(path
: &str) -> Result
<Self, Self::Err
> {
1010 let cap
= SNAPSHOT_PATH_REGEX
1012 .ok_or_else(|| format_err
!("unable to parse backup snapshot path '{}'", path
))?
;
1014 BackupDir
::with_rfc3339(
1015 cap
.get(1).unwrap().as_str().parse()?
,
1016 cap
.get(2).unwrap().as_str(),
1017 cap
.get(3).unwrap().as_str(),
1022 impl fmt
::Display
for BackupDir
{
1023 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
1024 // FIXME: log error?
1025 let time
= proxmox_time
::epoch_to_rfc3339_utc(self.time
).map_err(|_
| fmt
::Error
)?
;
1026 write
!(f
, "{}/{}", self.group
, time
)
1030 /// Used when both a backup group or a directory can be valid.
1031 pub enum BackupPart
{
1036 impl std
::str::FromStr
for BackupPart
{
1039 /// Parse a path which can be either a backup group or a snapshot dir.
1040 fn from_str(path
: &str) -> Result
<Self, Error
> {
1041 let cap
= GROUP_OR_SNAPSHOT_PATH_REGEX
1043 .ok_or_else(|| format_err
!("unable to parse backup snapshot path '{}'", path
))?
;
1045 let ty
= cap
.get(1).unwrap().as_str().parse()?
;
1046 let id
= cap
.get(2).unwrap().as_str().to_string();
1048 Ok(match cap
.get(3) {
1049 Some(time
) => BackupPart
::Dir(BackupDir
::with_rfc3339(ty
, id
, time
.as_str())?
),
1050 None
=> BackupPart
::Group((ty
, id
).into()),
1057 "backup": { type: BackupDir }
,
1059 schema
: SINGLE_LINE_COMMENT_SCHEMA
,
1063 type: SnapshotVerifyState
,
1072 schema
: BACKUP_ARCHIVE_NAME_SCHEMA
1081 #[derive(Serialize, Deserialize)]
1082 #[serde(rename_all = "kebab-case")]
1083 /// Basic information about backup snapshot.
1084 pub struct SnapshotListItem
{
1086 pub backup
: BackupDir
,
1087 /// The first line from manifest "notes"
1088 #[serde(skip_serializing_if = "Option::is_none")]
1089 pub comment
: Option
<String
>,
1090 /// The result of the last run verify task
1091 #[serde(skip_serializing_if = "Option::is_none")]
1092 pub verification
: Option
<SnapshotVerifyState
>,
1093 /// Fingerprint of encryption key
1094 #[serde(skip_serializing_if = "Option::is_none")]
1095 pub fingerprint
: Option
<Fingerprint
>,
1096 /// List of contained archive files.
1097 pub files
: Vec
<BackupContent
>,
1098 /// Overall snapshot size (sum of all archive sizes).
1099 #[serde(skip_serializing_if = "Option::is_none")]
1100 pub size
: Option
<u64>,
1101 /// The owner of the snapshots group
1102 #[serde(skip_serializing_if = "Option::is_none")]
1103 pub owner
: Option
<Authid
>,
1104 /// Protection from prunes
1106 pub protected
: bool
,
1111 "backup": { type: BackupGroup }
,
1112 "last-backup": { schema: BACKUP_TIME_SCHEMA }
,
1118 schema
: BACKUP_ARCHIVE_NAME_SCHEMA
1127 #[derive(Serialize, Deserialize)]
1128 #[serde(rename_all = "kebab-case")]
1129 /// Basic information about a backup group.
1130 pub struct GroupListItem
{
1132 pub backup
: BackupGroup
,
1134 pub last_backup
: i64,
1135 /// Number of contained snapshots
1136 pub backup_count
: u64,
1137 /// List of contained archive files.
1138 pub files
: Vec
<String
>,
1139 /// The owner of group
1140 #[serde(skip_serializing_if = "Option::is_none")]
1141 pub owner
: Option
<Authid
>,
1142 /// The first line from group "notes"
1143 #[serde(skip_serializing_if = "Option::is_none")]
1144 pub comment
: Option
<String
>,
1148 #[derive(Serialize, Deserialize)]
1149 #[serde(rename_all = "kebab-case")]
1150 /// Basic information about a backup namespace.
1151 pub struct NamespaceListItem
{
1152 /// A backup namespace
1153 pub ns
: BackupNamespace
,
1156 //pub group_count: u64,
1157 //pub ns_count: u64,
1158 /// The first line from the namespace's "notes"
1159 #[serde(skip_serializing_if = "Option::is_none")]
1160 pub comment
: Option
<String
>,
1165 "backup": { type: BackupDir }
,
1168 #[derive(Serialize, Deserialize)]
1169 #[serde(rename_all = "kebab-case")]
1171 pub struct PruneListItem
{
1173 pub backup
: BackupDir
,
1199 #[derive(Serialize, Deserialize, Default)]
1200 /// Counts of groups/snapshots per BackupType.
1202 /// The counts for CT backups
1203 pub ct
: Option
<TypeCounts
>,
1204 /// The counts for Host backups
1205 pub host
: Option
<TypeCounts
>,
1206 /// The counts for VM backups
1207 pub vm
: Option
<TypeCounts
>,
1208 /// The counts for other backup types
1209 pub other
: Option
<TypeCounts
>,
1213 #[derive(Serialize, Deserialize, Default)]
1214 /// Backup Type group/snapshot counts.
1215 pub struct TypeCounts
{
1216 /// The number of groups of the type.
1218 /// The number of snapshots of the type.
1230 #[derive(Clone, Default, Serialize, Deserialize)]
1231 #[serde(rename_all = "kebab-case")]
1232 /// Garbage collection status.
1233 pub struct GarbageCollectionStatus
{
1234 pub upid
: Option
<String
>,
1235 /// Number of processed index files.
1236 pub index_file_count
: usize,
1237 /// Sum of bytes referred by index files.
1238 pub index_data_bytes
: u64,
1239 /// Bytes used on disk.
1240 pub disk_bytes
: u64,
1241 /// Chunks used on disk.
1242 pub disk_chunks
: usize,
1243 /// Sum of removed bytes.
1244 pub removed_bytes
: u64,
1245 /// Number of removed chunks.
1246 pub removed_chunks
: usize,
1247 /// Sum of pending bytes (pending removal - kept for safety).
1248 pub pending_bytes
: u64,
1249 /// Number of pending chunks (pending removal - kept for safety).
1250 pub pending_chunks
: usize,
1251 /// Number of chunks marked as .bad by verify that have been removed by GC.
1252 pub removed_bad
: usize,
1253 /// Number of chunks still marked as .bad after garbage collection.
1254 pub still_bad
: usize,
1260 type: GarbageCollectionStatus
,
1269 #[derive(Serialize, Deserialize)]
1270 #[serde(rename_all = "kebab-case")]
1271 /// Overall Datastore status and useful information.
1272 pub struct DataStoreStatus
{
1273 /// Total space (bytes).
1275 /// Used space (bytes).
1277 /// Available space (bytes).
1279 /// Status of last GC
1280 #[serde(skip_serializing_if = "Option::is_none")]
1281 pub gc_status
: Option
<GarbageCollectionStatus
>,
1282 /// Group/Snapshot counts
1283 #[serde(skip_serializing_if = "Option::is_none")]
1284 pub counts
: Option
<Counts
>,
1290 schema
: DATASTORE_SCHEMA
,
1297 description
: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
1302 #[derive(Serialize, Deserialize)]
1303 #[serde(rename_all = "kebab-case")]
1304 /// Status of a Datastore
1305 pub struct DataStoreStatusListItem
{
1307 /// The Size of the underlying storage in bytes. (-1 on error)
1309 /// The used bytes of the underlying storage. (-1 on error)
1311 /// The available bytes of the underlying storage. (-1 on error)
1313 /// A list of usages of the past (last Month).
1314 #[serde(skip_serializing_if = "Option::is_none")]
1315 pub history
: Option
<Vec
<Option
<f64>>>,
1316 /// History start time (epoch)
1317 #[serde(skip_serializing_if = "Option::is_none")]
1318 pub history_start
: Option
<u64>,
1319 /// History resolution (seconds)
1320 #[serde(skip_serializing_if = "Option::is_none")]
1321 pub history_delta
: Option
<u64>,
1322 /// Estimation of the UNIX epoch when the storage will be full.
1323 /// It's calculated via a simple Linear Regression (Least Squares) over the RRD data of the
1324 /// last Month. Missing if not enough data points are available yet. An estimate in the past
1325 /// means that usage is declining or not changing.
1326 #[serde(skip_serializing_if = "Option::is_none")]
1327 pub estimated_full_date
: Option
<i64>,
1328 /// An error description, for example, when the datastore could not be looked up
1329 #[serde(skip_serializing_if = "Option::is_none")]
1330 pub error
: Option
<String
>,
1331 /// Status of last GC
1332 #[serde(skip_serializing_if = "Option::is_none")]
1333 pub gc_status
: Option
<GarbageCollectionStatus
>,
1336 impl DataStoreStatusListItem
{
1337 pub fn empty(store
: &str, err
: Option
<String
>) -> Self {
1338 DataStoreStatusListItem
{
1339 store
: store
.to_owned(),
1344 history_start
: None
,
1345 history_delta
: None
,
1346 estimated_full_date
: None
,
1353 pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE
: ReturnType
= ReturnType
{
1355 schema
: &ArraySchema
::new(
1356 "Returns the list of snapshots.",
1357 &SnapshotListItem
::API_SCHEMA
,
1362 pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE
: ReturnType
= ReturnType
{
1364 schema
: &ArraySchema
::new(
1365 "Returns the list of archive files inside a backup snapshots.",
1366 &BackupContent
::API_SCHEMA
,
1371 pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE
: ReturnType
= ReturnType
{
1373 schema
: &ArraySchema
::new(
1374 "Returns the list of backup groups.",
1375 &GroupListItem
::API_SCHEMA
,
1380 pub const ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE
: ReturnType
= ReturnType
{
1382 schema
: &ArraySchema
::new(
1383 "Returns the list of backup namespaces.",
1384 &NamespaceListItem
::API_SCHEMA
,
1389 pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE
: ReturnType
= ReturnType
{
1391 schema
: &ArraySchema
::new(
1392 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
1393 &PruneListItem
::API_SCHEMA
,
1401 schema
: DATASTORE_SCHEMA
,
1404 schema
: NS_MAX_DEPTH_SCHEMA
,
1409 #[derive(Serialize, Deserialize)]
1410 #[serde(rename_all = "kebab-case")]
1411 /// A namespace mapping
1412 pub struct TapeRestoreNamespace
{
1413 /// The source datastore
1415 /// The source namespace. Root namespace if omitted.
1416 pub source
: Option
<BackupNamespace
>,
1417 /// The target namespace,
1418 #[serde(skip_serializing_if = "Option::is_none")]
1419 pub target
: Option
<BackupNamespace
>,
1420 /// The (optional) recursion depth
1421 #[serde(skip_serializing_if = "Option::is_none")]
1422 pub max_depth
: Option
<usize>,
1425 pub const TAPE_RESTORE_NAMESPACE_SCHEMA
: Schema
= StringSchema
::new("A namespace mapping")
1426 .format(&ApiStringFormat
::PropertyString(
1427 &TapeRestoreNamespace
::API_SCHEMA
,
1431 /// Parse snapshots in the form 'ns/foo/ns/bar/ct/100/1970-01-01T00:00:00Z'
1432 /// into a [`BackupNamespace`] and [`BackupDir`]
1433 pub fn parse_ns_and_snapshot(input
: &str) -> Result
<(BackupNamespace
, BackupDir
), Error
> {
1434 match input
.rmatch_indices('
/'
).nth(2) {
1436 let ns
= BackupNamespace
::from_path(&input
[..idx
])?
;
1437 let dir
: BackupDir
= input
[(idx
+ 1)..].parse()?
;
1440 None
=> Ok((BackupNamespace
::root(), input
.parse()?
)),
1444 /// Prints a [`BackupNamespace`] and [`BackupDir`] in the form of
1445 /// 'ns/foo/bar/ct/100/1970-01-01T00:00:00Z'
1446 pub fn print_ns_and_snapshot(ns
: &BackupNamespace
, dir
: &BackupDir
) -> String
{
1450 format
!("{}/{}", ns
.display_as_path(), dir
)
1454 /// Prints a Datastore name and [`BackupNamespace`] for logs/errors.
1455 pub fn print_store_and_ns(store
: &str, ns
: &BackupNamespace
) -> String
{
1457 format
!("datastore '{}', root namespace", store
)
1459 format
!("datastore '{}', namespace '{}'", store
, ns
)