]> git.proxmox.com Git - proxmox-backup.git/blob - pbs-api-types/src/datastore.rs
clippy fixes
[proxmox-backup.git] / pbs-api-types / src / datastore.rs
1 use std::fmt;
2 use std::path::PathBuf;
3
4 use anyhow::{bail, format_err, Error};
5 use serde::{Deserialize, Serialize};
6
7 use proxmox_schema::{
8 api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType,
9 Schema, StringSchema, Updater, UpdaterType,
10 };
11
12 use crate::{
13 Authid, CryptMode, Fingerprint, MaintenanceMode, Userid, DATASTORE_NOTIFY_STRING_SCHEMA,
14 GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX,
15 SINGLE_LINE_COMMENT_SCHEMA, UPID,
16 };
17
18 const_regex! {
19 pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
20
21 pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
22
23 pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
24
25 pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
26
27 pub GROUP_PATH_REGEX = concat!(
28 r"^(", BACKUP_TYPE_RE!(), ")/",
29 r"(", BACKUP_ID_RE!(), r")$",
30 );
31
32 pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
33
34 pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
35 pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$");
36
37 pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
38 }
39
40 pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
41
42 pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name")
43 .min_length(1)
44 .max_length(4096)
45 .schema();
46
47 pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.")
48 .format(&PROXMOX_SAFE_ID_FORMAT)
49 .schema();
50
51 pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
52 pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX);
53 pub const BACKUP_NAMESPACE_FORMAT: ApiStringFormat =
54 ApiStringFormat::Pattern(&BACKUP_NAMESPACE_REGEX);
55
56 pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
57 .format(&BACKUP_ID_FORMAT)
58 .schema();
59
60 pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.")
61 .format(&ApiStringFormat::Enum(&[
62 EnumEntry::new("vm", "Virtual Machine Backup"),
63 EnumEntry::new("ct", "Container Backup"),
64 EnumEntry::new("host", "Host Backup"),
65 ]))
66 .schema();
67
68 pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)")
69 .minimum(1)
70 .schema();
71
72 pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group")
73 .format(&BACKUP_GROUP_FORMAT)
74 .schema();
75
76 /// The maximal, inclusive depth for namespaces from the root ns downwards
77 ///
78 /// The datastore root name space is at depth zero (0), so we have in total eight (8) levels
79 pub const MAX_NAMESPACE_DEPTH: usize = 7;
80 pub const MAX_BACKUP_NAMESPACE_LENGTH: usize = 32 * 8; // 256
81 pub const BACKUP_NAMESPACE_SCHEMA: Schema = StringSchema::new("Namespace.")
82 .format(&BACKUP_NAMESPACE_FORMAT)
83 .max_length(MAX_BACKUP_NAMESPACE_LENGTH) // 256
84 .schema();
85
86 pub const NS_MAX_DEPTH_SCHEMA: Schema =
87 IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion)")
88 .minimum(0)
89 .maximum(MAX_NAMESPACE_DEPTH as isize)
90 .default(MAX_NAMESPACE_DEPTH as isize)
91 .schema();
92
93 pub const NS_MAX_DEPTH_REDUCED_SCHEMA: Schema =
94 IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion, empty == automatic full recursion, namespace depths reduce maximum allowed value)")
95 .minimum(0)
96 .maximum(MAX_NAMESPACE_DEPTH as isize)
97 .schema();
98
99 pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
100 .format(&PROXMOX_SAFE_ID_FORMAT)
101 .min_length(3)
102 .max_length(32)
103 .schema();
104
105 pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
106 .format(&CHUNK_DIGEST_FORMAT)
107 .schema();
108
109 pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
110
111 pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
112 .format(&DATASTORE_MAP_FORMAT)
113 .min_length(3)
114 .max_length(65)
115 .type_text("(<source>=)?<target>")
116 .schema();
117
118 pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema =
119 ArraySchema::new("Datastore mapping list.", &DATASTORE_MAP_SCHEMA).schema();
120
121 pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
122 "A list of Datastore mappings (or single datastore), comma separated. \
123 For example 'a=b,e' maps the source datastore 'a' to target 'b and \
124 all other sources to the default 'e'. If no default is given, only the \
125 specified sources are mapped.",
126 )
127 .format(&ApiStringFormat::PropertyString(
128 &DATASTORE_MAP_ARRAY_SCHEMA,
129 ))
130 .schema();
131
132 pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
133 .minimum(1)
134 .schema();
135
136 pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
137 IntegerSchema::new("Number of hourly backups to keep.")
138 .minimum(1)
139 .schema();
140
141 pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
142 .minimum(1)
143 .schema();
144
145 pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
146 IntegerSchema::new("Number of monthly backups to keep.")
147 .minimum(1)
148 .schema();
149
150 pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
151 IntegerSchema::new("Number of weekly backups to keep.")
152 .minimum(1)
153 .schema();
154
155 pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
156 IntegerSchema::new("Number of yearly backups to keep.")
157 .minimum(1)
158 .schema();
159
160 #[api]
161 #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
162 #[serde(rename_all = "lowercase")]
163 /// The order to sort chunks by
164 pub enum ChunkOrder {
165 /// Iterate chunks in the index order
166 None,
167 /// Iterate chunks in inode order
168 Inode,
169 }
170
171 #[api]
172 #[derive(PartialEq, Eq, Serialize, Deserialize)]
173 #[serde(rename_all = "lowercase")]
174 /// The level of syncing that is done when writing into a datastore.
175 pub enum DatastoreFSyncLevel {
176 /// No special fsync or syncfs calls are triggered. The system default dirty write back
177 /// mechanism ensures that data gets is flushed eventually via the `dirty_writeback_centisecs`
178 /// and `dirty_expire_centisecs` kernel sysctls, defaulting to ~ 30s.
179 ///
180 /// This mode provides generally the best performance, as all write back can happen async,
181 /// which reduces IO pressure.
182 /// But it may cause losing data on powerloss or system crash without any uninterruptible power
183 /// supply.
184 None,
185 /// Triggers a fsync after writing any chunk on the datastore. While this can slow down
186 /// backups significantly, depending on the underlying file system and storage used, it
187 /// will ensure fine-grained consistency. Depending on the exact setup, there might be no
188 /// benefits over the file system level sync, so if the setup allows it, you should prefer
189 /// that one. Despite the possible negative impact in performance, it's the most consistent
190 /// mode.
191 File,
192 /// Trigger a filesystem wide sync after all backup data got written but before finishing the
193 /// task. This allows that every finished backup is fully written back to storage
194 /// while reducing the impact on many file systems in contrast to the file level sync.
195 /// Depending on the setup, it might have a negative impact on unrelated write operations
196 /// of the underlying filesystem, but it is generally a good compromise between performance
197 /// and consitency.
198 Filesystem,
199 }
200
201 impl Default for DatastoreFSyncLevel {
202 fn default() -> Self {
203 DatastoreFSyncLevel::None
204 }
205 }
206
207 #[api(
208 properties: {
209 "chunk-order": {
210 type: ChunkOrder,
211 optional: true,
212 },
213 },
214 )]
215 #[derive(Serialize, Deserialize, Default)]
216 #[serde(rename_all = "kebab-case")]
217 /// Datastore tuning options
218 pub struct DatastoreTuning {
219 /// Iterate chunks in this order
220 pub chunk_order: Option<ChunkOrder>,
221 pub sync_level: Option<DatastoreFSyncLevel>,
222 }
223
224 pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore tuning options")
225 .format(&ApiStringFormat::PropertyString(
226 &DatastoreTuning::API_SCHEMA,
227 ))
228 .schema();
229
230 #[api(
231 properties: {
232 name: {
233 schema: DATASTORE_SCHEMA,
234 },
235 path: {
236 schema: DIR_NAME_SCHEMA,
237 },
238 "notify-user": {
239 optional: true,
240 type: Userid,
241 },
242 "notify": {
243 optional: true,
244 schema: DATASTORE_NOTIFY_STRING_SCHEMA,
245 },
246 comment: {
247 optional: true,
248 schema: SINGLE_LINE_COMMENT_SCHEMA,
249 },
250 "gc-schedule": {
251 optional: true,
252 schema: GC_SCHEDULE_SCHEMA,
253 },
254 "prune-schedule": {
255 optional: true,
256 schema: PRUNE_SCHEDULE_SCHEMA,
257 },
258 keep: {
259 type: crate::KeepOptions,
260 },
261 "verify-new": {
262 description: "If enabled, all new backups will be verified right after completion.",
263 optional: true,
264 type: bool,
265 },
266 tuning: {
267 optional: true,
268 schema: DATASTORE_TUNING_STRING_SCHEMA,
269 },
270 "maintenance-mode": {
271 optional: true,
272 format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA),
273 type: String,
274 },
275 }
276 )]
277 #[derive(Serialize, Deserialize, Updater)]
278 #[serde(rename_all = "kebab-case")]
279 /// Datastore configuration properties.
280 pub struct DataStoreConfig {
281 #[updater(skip)]
282 pub name: String,
283
284 #[updater(skip)]
285 pub path: String,
286
287 #[serde(skip_serializing_if = "Option::is_none")]
288 pub comment: Option<String>,
289
290 #[serde(skip_serializing_if = "Option::is_none")]
291 pub gc_schedule: Option<String>,
292
293 #[serde(skip_serializing_if = "Option::is_none")]
294 pub prune_schedule: Option<String>,
295
296 #[serde(flatten)]
297 pub keep: crate::KeepOptions,
298
299 /// If enabled, all backups will be verified right after completion.
300 #[serde(skip_serializing_if = "Option::is_none")]
301 pub verify_new: Option<bool>,
302
303 /// Send job email notification to this user
304 #[serde(skip_serializing_if = "Option::is_none")]
305 pub notify_user: Option<Userid>,
306
307 /// Send notification only for job errors
308 #[serde(skip_serializing_if = "Option::is_none")]
309 pub notify: Option<String>,
310
311 /// Datastore tuning options
312 #[serde(skip_serializing_if = "Option::is_none")]
313 pub tuning: Option<String>,
314
315 /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
316 #[serde(skip_serializing_if = "Option::is_none")]
317 pub maintenance_mode: Option<String>,
318 }
319
320 impl DataStoreConfig {
321 pub fn new(name: String, path: String) -> Self {
322 Self {
323 name,
324 path,
325 comment: None,
326 gc_schedule: None,
327 prune_schedule: None,
328 keep: Default::default(),
329 verify_new: None,
330 notify_user: None,
331 notify: None,
332 tuning: None,
333 maintenance_mode: None,
334 }
335 }
336
337 pub fn get_maintenance_mode(&self) -> Option<MaintenanceMode> {
338 self.maintenance_mode
339 .as_ref()
340 .and_then(|str| MaintenanceMode::API_SCHEMA.parse_property_string(str).ok())
341 .and_then(|value| MaintenanceMode::deserialize(value).ok())
342 }
343 }
344
345 #[api(
346 properties: {
347 store: {
348 schema: DATASTORE_SCHEMA,
349 },
350 comment: {
351 optional: true,
352 schema: SINGLE_LINE_COMMENT_SCHEMA,
353 },
354 maintenance: {
355 optional: true,
356 format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA),
357 type: String,
358 }
359 },
360 )]
361 #[derive(Serialize, Deserialize)]
362 #[serde(rename_all = "kebab-case")]
363 /// Basic information about a datastore.
364 pub struct DataStoreListItem {
365 pub store: String,
366 pub comment: Option<String>,
367 /// If the datastore is in maintenance mode, information about it
368 #[serde(skip_serializing_if = "Option::is_none")]
369 pub maintenance: Option<String>,
370 }
371
372 #[api(
373 properties: {
374 "filename": {
375 schema: BACKUP_ARCHIVE_NAME_SCHEMA,
376 },
377 "crypt-mode": {
378 type: CryptMode,
379 optional: true,
380 },
381 },
382 )]
383 #[derive(Serialize, Deserialize)]
384 #[serde(rename_all = "kebab-case")]
385 /// Basic information about archive files inside a backup snapshot.
386 pub struct BackupContent {
387 pub filename: String,
388 /// Info if file is encrypted, signed, or neither.
389 #[serde(skip_serializing_if = "Option::is_none")]
390 pub crypt_mode: Option<CryptMode>,
391 /// Archive size (from backup manifest).
392 #[serde(skip_serializing_if = "Option::is_none")]
393 pub size: Option<u64>,
394 }
395
396 #[api()]
397 #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
398 #[serde(rename_all = "lowercase")]
399 /// Result of a verify operation.
400 pub enum VerifyState {
401 /// Verification was successful
402 Ok,
403 /// Verification reported one or more errors
404 Failed,
405 }
406
407 #[api(
408 properties: {
409 upid: {
410 type: UPID,
411 },
412 state: {
413 type: VerifyState,
414 },
415 },
416 )]
417 #[derive(Serialize, Deserialize)]
418 /// Task properties.
419 pub struct SnapshotVerifyState {
420 /// UPID of the verify task
421 pub upid: UPID,
422 /// State of the verification. Enum.
423 pub state: VerifyState,
424 }
425
426 /// A namespace provides a logical separation between backup groups from different domains
427 /// (cluster, sites, ...) where uniqueness cannot be guaranteed anymore. It allows users to share a
428 /// datastore (i.e., one deduplication domain (chunk store)) with multiple (trusted) sites and
429 /// allows to form a hierarchy, for easier management and avoiding clashes between backup_ids.
430 ///
431 /// NOTE: Namespaces are a logical boundary only, they do not provide a full secure separation as
432 /// the chunk store is still shared. So, users whom do not trust each other must not share a
433 /// datastore.
434 ///
435 /// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid
436 /// clashes with backup group IDs and future backup_types and to have a clean separation between
437 /// the namespace directories and the ones from a backup snapshot.
438 #[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, UpdaterType)]
439 pub struct BackupNamespace {
440 /// The namespace subdirectories without the `ns/` intermediate directories.
441 inner: Vec<String>,
442
443 /// Cache the total length for efficiency.
444 len: usize,
445 }
446
447 impl BackupNamespace {
448 /// Returns a root namespace reference.
449 pub const fn root() -> Self {
450 Self {
451 inner: Vec::new(),
452 len: 0,
453 }
454 }
455
456 /// True if this represents the root namespace.
457 pub fn is_root(&self) -> bool {
458 self.inner.is_empty()
459 }
460
461 /// Try to parse a string into a namespace.
462 pub fn new(name: &str) -> Result<Self, Error> {
463 let mut this = Self::root();
464
465 if name.is_empty() {
466 return Ok(this);
467 }
468
469 for name in name.split('/') {
470 this.push(name.to_string())?;
471 }
472 Ok(this)
473 }
474
475 /// Try to parse a file path string (where each sub-namespace is separated by an `ns`
476 /// subdirectory) into a valid namespace.
477 pub fn from_path(mut path: &str) -> Result<Self, Error> {
478 let mut this = Self::root();
479 loop {
480 match path.strip_prefix("ns/") {
481 Some(next) => match next.find('/') {
482 Some(pos) => {
483 this.push(next[..pos].to_string())?;
484 path = &next[(pos + 1)..];
485 }
486 None => {
487 this.push(next.to_string())?;
488 break;
489 }
490 },
491 None if !path.is_empty() => {
492 bail!("invalid component in namespace path at {:?}", path);
493 }
494 None => break,
495 }
496 }
497 Ok(this)
498 }
499
500 /// Create a new Namespace attached to parent
501 ///
502 /// `name` must be a single level namespace ID, that is, no '/' is allowed.
503 /// This rule also avoids confusion about the name being a NS or NS-path
504 pub fn from_parent_ns(parent: &Self, name: String) -> Result<Self, Error> {
505 let mut child = parent.to_owned();
506 child.push(name)?;
507 Ok(child)
508 }
509
510 /// Pop one level off the namespace hierarchy
511 pub fn pop(&mut self) -> Option<String> {
512 let dropped = self.inner.pop();
513 if let Some(ref dropped) = dropped {
514 self.len = self.len.saturating_sub(dropped.len() + 1);
515 }
516 dropped
517 }
518
519 /// Get the namespace parent as owned BackupNamespace
520 pub fn parent(&self) -> Self {
521 if self.is_root() {
522 return Self::root();
523 }
524
525 let mut parent = self.clone();
526 parent.pop();
527
528 parent
529 }
530
531 /// Create a new namespace directly from a vec.
532 ///
533 /// # Safety
534 ///
535 /// Invalid contents may lead to inaccessible backups.
536 pub unsafe fn from_vec_unchecked(components: Vec<String>) -> Self {
537 let mut this = Self {
538 inner: components,
539 len: 0,
540 };
541 this.recalculate_len();
542 this
543 }
544
545 /// Recalculate the length.
546 fn recalculate_len(&mut self) {
547 self.len = self.inner.len().max(1) - 1; // a slash between each component
548 for part in &self.inner {
549 self.len += part.len();
550 }
551 }
552
553 /// The hierarchical depth of the namespace, 0 means top-level.
554 pub fn depth(&self) -> usize {
555 self.inner.len()
556 }
557
558 /// The logical name and ID of the namespace.
559 pub fn name(&self) -> String {
560 self.to_string()
561 }
562
563 /// The actual relative backing path of the namespace on the datastore.
564 pub fn path(&self) -> PathBuf {
565 self.display_as_path().to_string().into()
566 }
567
568 /// Get the current namespace length.
569 ///
570 /// This includes separating slashes, but does not include the `ns/` intermediate directories.
571 /// This is not the *path* length, but rather the length that would be produced via
572 /// `.to_string()`.
573 #[inline]
574 pub fn name_len(&self) -> usize {
575 self.len
576 }
577
578 /// Get the current namespace path length.
579 ///
580 /// This includes the `ns/` subdirectory strings.
581 pub fn path_len(&self) -> usize {
582 self.name_len() + 3 * self.inner.len()
583 }
584
585 /// Enter a sub-namespace. Fails if nesting would become too deep or the name too long.
586 pub fn push(&mut self, subdir: String) -> Result<(), Error> {
587 if subdir.contains('/') {
588 bail!("namespace component contained a slash");
589 }
590
591 self.push_do(subdir)
592 }
593
594 /// Assumes `subdir` already does not contain any slashes.
595 /// Performs remaining checks and updates the length.
596 fn push_do(&mut self, subdir: String) -> Result<(), Error> {
597 let depth = self.depth();
598 // check for greater equal to account for the to be added subdir
599 if depth >= MAX_NAMESPACE_DEPTH {
600 bail!("namespace too deep, {depth} >= max {MAX_NAMESPACE_DEPTH}");
601 }
602
603 if self.len + subdir.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH {
604 bail!("namespace length exceeded");
605 }
606
607 if !crate::PROXMOX_SAFE_ID_REGEX.is_match(&subdir) {
608 bail!("not a valid namespace component: {subdir}");
609 }
610
611 if !self.inner.is_empty() {
612 self.len += 1; // separating slash
613 }
614 self.len += subdir.len();
615 self.inner.push(subdir);
616 Ok(())
617 }
618
619 /// Return an adapter which [`fmt::Display`]s as a path with `"ns/"` prefixes in front of every
620 /// component.
621 pub fn display_as_path(&self) -> BackupNamespacePath {
622 BackupNamespacePath(self)
623 }
624
625 /// Iterate over the subdirectories.
626 pub fn components(&self) -> impl Iterator<Item = &str> + '_ {
627 self.inner.iter().map(String::as_str)
628 }
629
630 /// Map NS by replacing `source_prefix` with `target_prefix`
631 pub fn map_prefix(
632 &self,
633 source_prefix: &BackupNamespace,
634 target_prefix: &BackupNamespace,
635 ) -> Result<Self, Error> {
636 let suffix = self
637 .inner
638 .strip_prefix(&source_prefix.inner[..])
639 .ok_or_else(|| {
640 format_err!(
641 "Failed to map namespace - {source_prefix} is not a valid prefix of {self}",
642 )
643 })?;
644
645 let mut new = target_prefix.clone();
646 for item in suffix {
647 new.push(item.clone())?;
648 }
649 Ok(new)
650 }
651
652 /// Check whether adding `depth` levels of sub-namespaces exceeds the max depth limit
653 pub fn check_max_depth(&self, depth: usize) -> Result<(), Error> {
654 let ns_depth = self.depth();
655 if ns_depth + depth > MAX_NAMESPACE_DEPTH {
656 bail!(
657 "namespace '{self}'s depth and recursion depth exceed limit: {ns_depth} + {depth} > {MAX_NAMESPACE_DEPTH}",
658 );
659 }
660 Ok(())
661 }
662
663 pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
664 let mut path: Vec<&str> = vec!["datastore", store];
665
666 if self.is_root() {
667 path
668 } else {
669 path.extend(self.inner.iter().map(|comp| comp.as_str()));
670 path
671 }
672 }
673
674 /// Check whether this namespace contains another namespace.
675 ///
676 /// If so, the depth is returned.
677 ///
678 /// Example:
679 /// ```
680 /// # use pbs_api_types::BackupNamespace;
681 /// let main: BackupNamespace = "a/b".parse().unwrap();
682 /// let sub: BackupNamespace = "a/b/c/d".parse().unwrap();
683 /// let other: BackupNamespace = "x/y".parse().unwrap();
684 /// assert_eq!(main.contains(&main), Some(0));
685 /// assert_eq!(main.contains(&sub), Some(2));
686 /// assert_eq!(sub.contains(&main), None);
687 /// assert_eq!(main.contains(&other), None);
688 /// ```
689 pub fn contains(&self, other: &BackupNamespace) -> Option<usize> {
690 other
691 .inner
692 .strip_prefix(&self.inner[..])
693 .map(|suffix| suffix.len())
694 }
695 }
696
697 impl fmt::Display for BackupNamespace {
698 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
699 use std::fmt::Write;
700
701 let mut parts = self.inner.iter();
702 if let Some(first) = parts.next() {
703 f.write_str(first)?;
704 }
705 for part in parts {
706 f.write_char('/')?;
707 f.write_str(part)?;
708 }
709 Ok(())
710 }
711 }
712
713 serde_plain::derive_deserialize_from_fromstr!(BackupNamespace, "valid backup namespace");
714
715 impl std::str::FromStr for BackupNamespace {
716 type Err = Error;
717
718 fn from_str(name: &str) -> Result<Self, Self::Err> {
719 Self::new(name)
720 }
721 }
722
723 serde_plain::derive_serialize_from_display!(BackupNamespace);
724
725 impl ApiType for BackupNamespace {
726 const API_SCHEMA: Schema = BACKUP_NAMESPACE_SCHEMA;
727 }
728
729 /// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`].
730 ///
731 /// This implements [`fmt::Display`] such that it includes the `ns/` subdirectory prefix in front of
732 /// every component.
733 pub struct BackupNamespacePath<'a>(&'a BackupNamespace);
734
735 impl fmt::Display for BackupNamespacePath<'_> {
736 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
737 let mut sep = "ns/";
738 for part in &self.0.inner {
739 f.write_str(sep)?;
740 sep = "/ns/";
741 f.write_str(part)?;
742 }
743 Ok(())
744 }
745 }
746
747 #[api]
748 /// Backup types.
749 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
750 #[serde(rename_all = "lowercase")]
751 pub enum BackupType {
752 /// Virtual machines.
753 Vm,
754
755 /// Containers.
756 Ct,
757
758 /// "Host" backups.
759 Host,
760 // NOTE: if you add new types, don't forget to adapt the iter below!
761 }
762
763 impl BackupType {
764 pub const fn as_str(&self) -> &'static str {
765 match self {
766 BackupType::Vm => "vm",
767 BackupType::Ct => "ct",
768 BackupType::Host => "host",
769 }
770 }
771
772 /// We used to have alphabetical ordering here when this was a string.
773 const fn order(self) -> u8 {
774 match self {
775 BackupType::Ct => 0,
776 BackupType::Host => 1,
777 BackupType::Vm => 2,
778 }
779 }
780
781 #[inline]
782 pub fn iter() -> impl Iterator<Item = BackupType> + Send + Sync + Unpin + 'static {
783 [BackupType::Vm, BackupType::Ct, BackupType::Host]
784 .iter()
785 .copied()
786 }
787 }
788
789 impl fmt::Display for BackupType {
790 #[inline]
791 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
792 fmt::Display::fmt(self.as_str(), f)
793 }
794 }
795
796 impl std::str::FromStr for BackupType {
797 type Err = Error;
798
799 /// Parse a backup type.
800 fn from_str(ty: &str) -> Result<Self, Error> {
801 Ok(match ty {
802 "ct" => BackupType::Ct,
803 "host" => BackupType::Host,
804 "vm" => BackupType::Vm,
805 _ => bail!("invalid backup type {ty:?}"),
806 })
807 }
808 }
809
810 impl std::cmp::Ord for BackupType {
811 #[inline]
812 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
813 self.order().cmp(&other.order())
814 }
815 }
816
817 impl std::cmp::PartialOrd for BackupType {
818 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
819 Some(self.cmp(other))
820 }
821 }
822
823 #[api(
824 properties: {
825 "backup-type": { type: BackupType },
826 "backup-id": { schema: BACKUP_ID_SCHEMA },
827 },
828 )]
829 #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
830 #[serde(rename_all = "kebab-case")]
831 /// A backup group (without a data store).
832 pub struct BackupGroup {
833 /// Backup type.
834 #[serde(rename = "backup-type")]
835 pub ty: BackupType,
836
837 /// Backup id.
838 #[serde(rename = "backup-id")]
839 pub id: String,
840 }
841
842 impl BackupGroup {
843 pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
844 Self { ty, id: id.into() }
845 }
846
847 pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
848 use crate::GroupFilter;
849
850 match filter {
851 GroupFilter::Group(backup_group) => {
852 match backup_group.parse::<BackupGroup>() {
853 Ok(group) => *self == group,
854 Err(_) => false, // shouldn't happen if value is schema-checked
855 }
856 }
857 GroupFilter::BackupType(ty) => self.ty == *ty,
858 GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
859 }
860 }
861 }
862
863 impl AsRef<BackupGroup> for BackupGroup {
864 #[inline]
865 fn as_ref(&self) -> &Self {
866 self
867 }
868 }
869
870 impl From<(BackupType, String)> for BackupGroup {
871 #[inline]
872 fn from(data: (BackupType, String)) -> Self {
873 Self {
874 ty: data.0,
875 id: data.1,
876 }
877 }
878 }
879
880 impl std::cmp::Ord for BackupGroup {
881 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
882 let type_order = self.ty.cmp(&other.ty);
883 if type_order != std::cmp::Ordering::Equal {
884 return type_order;
885 }
886
887 // try to compare IDs numerically
888 let id_self = self.id.parse::<u64>();
889 let id_other = other.id.parse::<u64>();
890 match (id_self, id_other) {
891 (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
892 (Ok(_), Err(_)) => std::cmp::Ordering::Less,
893 (Err(_), Ok(_)) => std::cmp::Ordering::Greater,
894 _ => self.id.cmp(&other.id),
895 }
896 }
897 }
898
899 impl std::cmp::PartialOrd for BackupGroup {
900 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
901 Some(self.cmp(other))
902 }
903 }
904
905 impl fmt::Display for BackupGroup {
906 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
907 write!(f, "{}/{}", self.ty, self.id)
908 }
909 }
910
911 impl std::str::FromStr for BackupGroup {
912 type Err = Error;
913
914 /// Parse a backup group.
915 ///
916 /// This parses strings like `vm/100".
917 fn from_str(path: &str) -> Result<Self, Error> {
918 let cap = GROUP_PATH_REGEX
919 .captures(path)
920 .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
921
922 Ok(Self {
923 ty: cap.get(1).unwrap().as_str().parse()?,
924 id: cap.get(2).unwrap().as_str().to_owned(),
925 })
926 }
927 }
928
929 #[api(
930 properties: {
931 "group": { type: BackupGroup },
932 "backup-time": { schema: BACKUP_TIME_SCHEMA },
933 },
934 )]
935 /// Uniquely identify a Backup (relative to data store)
936 ///
937 /// We also call this a backup snaphost.
938 #[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
939 #[serde(rename_all = "kebab-case")]
940 pub struct BackupDir {
941 /// Backup group.
942 #[serde(flatten)]
943 pub group: BackupGroup,
944
945 /// Backup timestamp unix epoch.
946 #[serde(rename = "backup-time")]
947 pub time: i64,
948 }
949
950 impl AsRef<BackupGroup> for BackupDir {
951 #[inline]
952 fn as_ref(&self) -> &BackupGroup {
953 &self.group
954 }
955 }
956
957 impl AsRef<BackupDir> for BackupDir {
958 #[inline]
959 fn as_ref(&self) -> &Self {
960 self
961 }
962 }
963
964 impl From<(BackupGroup, i64)> for BackupDir {
965 fn from(data: (BackupGroup, i64)) -> Self {
966 Self {
967 group: data.0,
968 time: data.1,
969 }
970 }
971 }
972
973 impl From<(BackupType, String, i64)> for BackupDir {
974 fn from(data: (BackupType, String, i64)) -> Self {
975 Self {
976 group: (data.0, data.1).into(),
977 time: data.2,
978 }
979 }
980 }
981
982 impl BackupDir {
983 pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
984 where
985 T: Into<String>,
986 {
987 let time = proxmox_time::parse_rfc3339(backup_time_string)?;
988 let group = BackupGroup::new(ty, id.into());
989 Ok(Self { group, time })
990 }
991
992 #[inline]
993 pub fn ty(&self) -> BackupType {
994 self.group.ty
995 }
996
997 #[inline]
998 pub fn id(&self) -> &str {
999 &self.group.id
1000 }
1001 }
1002
1003 impl std::str::FromStr for BackupDir {
1004 type Err = Error;
1005
1006 /// Parse a snapshot path.
1007 ///
1008 /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
1009 fn from_str(path: &str) -> Result<Self, Self::Err> {
1010 let cap = SNAPSHOT_PATH_REGEX
1011 .captures(path)
1012 .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
1013
1014 BackupDir::with_rfc3339(
1015 cap.get(1).unwrap().as_str().parse()?,
1016 cap.get(2).unwrap().as_str(),
1017 cap.get(3).unwrap().as_str(),
1018 )
1019 }
1020 }
1021
1022 impl fmt::Display for BackupDir {
1023 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1024 // FIXME: log error?
1025 let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
1026 write!(f, "{}/{}", self.group, time)
1027 }
1028 }
1029
1030 /// Used when both a backup group or a directory can be valid.
1031 pub enum BackupPart {
1032 Group(BackupGroup),
1033 Dir(BackupDir),
1034 }
1035
1036 impl std::str::FromStr for BackupPart {
1037 type Err = Error;
1038
1039 /// Parse a path which can be either a backup group or a snapshot dir.
1040 fn from_str(path: &str) -> Result<Self, Error> {
1041 let cap = GROUP_OR_SNAPSHOT_PATH_REGEX
1042 .captures(path)
1043 .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
1044
1045 let ty = cap.get(1).unwrap().as_str().parse()?;
1046 let id = cap.get(2).unwrap().as_str().to_string();
1047
1048 Ok(match cap.get(3) {
1049 Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ty, id, time.as_str())?),
1050 None => BackupPart::Group((ty, id).into()),
1051 })
1052 }
1053 }
1054
1055 #[api(
1056 properties: {
1057 "backup": { type: BackupDir },
1058 comment: {
1059 schema: SINGLE_LINE_COMMENT_SCHEMA,
1060 optional: true,
1061 },
1062 verification: {
1063 type: SnapshotVerifyState,
1064 optional: true,
1065 },
1066 fingerprint: {
1067 type: String,
1068 optional: true,
1069 },
1070 files: {
1071 items: {
1072 schema: BACKUP_ARCHIVE_NAME_SCHEMA
1073 },
1074 },
1075 owner: {
1076 type: Authid,
1077 optional: true,
1078 },
1079 },
1080 )]
1081 #[derive(Serialize, Deserialize)]
1082 #[serde(rename_all = "kebab-case")]
1083 /// Basic information about backup snapshot.
1084 pub struct SnapshotListItem {
1085 #[serde(flatten)]
1086 pub backup: BackupDir,
1087 /// The first line from manifest "notes"
1088 #[serde(skip_serializing_if = "Option::is_none")]
1089 pub comment: Option<String>,
1090 /// The result of the last run verify task
1091 #[serde(skip_serializing_if = "Option::is_none")]
1092 pub verification: Option<SnapshotVerifyState>,
1093 /// Fingerprint of encryption key
1094 #[serde(skip_serializing_if = "Option::is_none")]
1095 pub fingerprint: Option<Fingerprint>,
1096 /// List of contained archive files.
1097 pub files: Vec<BackupContent>,
1098 /// Overall snapshot size (sum of all archive sizes).
1099 #[serde(skip_serializing_if = "Option::is_none")]
1100 pub size: Option<u64>,
1101 /// The owner of the snapshots group
1102 #[serde(skip_serializing_if = "Option::is_none")]
1103 pub owner: Option<Authid>,
1104 /// Protection from prunes
1105 #[serde(default)]
1106 pub protected: bool,
1107 }
1108
1109 #[api(
1110 properties: {
1111 "backup": { type: BackupGroup },
1112 "last-backup": { schema: BACKUP_TIME_SCHEMA },
1113 "backup-count": {
1114 type: Integer,
1115 },
1116 files: {
1117 items: {
1118 schema: BACKUP_ARCHIVE_NAME_SCHEMA
1119 },
1120 },
1121 owner: {
1122 type: Authid,
1123 optional: true,
1124 },
1125 },
1126 )]
1127 #[derive(Serialize, Deserialize)]
1128 #[serde(rename_all = "kebab-case")]
1129 /// Basic information about a backup group.
1130 pub struct GroupListItem {
1131 #[serde(flatten)]
1132 pub backup: BackupGroup,
1133
1134 pub last_backup: i64,
1135 /// Number of contained snapshots
1136 pub backup_count: u64,
1137 /// List of contained archive files.
1138 pub files: Vec<String>,
1139 /// The owner of group
1140 #[serde(skip_serializing_if = "Option::is_none")]
1141 pub owner: Option<Authid>,
1142 /// The first line from group "notes"
1143 #[serde(skip_serializing_if = "Option::is_none")]
1144 pub comment: Option<String>,
1145 }
1146
1147 #[api()]
1148 #[derive(Serialize, Deserialize)]
1149 #[serde(rename_all = "kebab-case")]
1150 /// Basic information about a backup namespace.
1151 pub struct NamespaceListItem {
1152 /// A backup namespace
1153 pub ns: BackupNamespace,
1154
1155 // TODO?
1156 //pub group_count: u64,
1157 //pub ns_count: u64,
1158 /// The first line from the namespace's "notes"
1159 #[serde(skip_serializing_if = "Option::is_none")]
1160 pub comment: Option<String>,
1161 }
1162
1163 #[api(
1164 properties: {
1165 "backup": { type: BackupDir },
1166 },
1167 )]
1168 #[derive(Serialize, Deserialize)]
1169 #[serde(rename_all = "kebab-case")]
1170 /// Prune result.
1171 pub struct PruneListItem {
1172 #[serde(flatten)]
1173 pub backup: BackupDir,
1174
1175 /// Keep snapshot
1176 pub keep: bool,
1177 }
1178
1179 #[api(
1180 properties: {
1181 ct: {
1182 type: TypeCounts,
1183 optional: true,
1184 },
1185 host: {
1186 type: TypeCounts,
1187 optional: true,
1188 },
1189 vm: {
1190 type: TypeCounts,
1191 optional: true,
1192 },
1193 other: {
1194 type: TypeCounts,
1195 optional: true,
1196 },
1197 },
1198 )]
1199 #[derive(Serialize, Deserialize, Default)]
1200 /// Counts of groups/snapshots per BackupType.
1201 pub struct Counts {
1202 /// The counts for CT backups
1203 pub ct: Option<TypeCounts>,
1204 /// The counts for Host backups
1205 pub host: Option<TypeCounts>,
1206 /// The counts for VM backups
1207 pub vm: Option<TypeCounts>,
1208 /// The counts for other backup types
1209 pub other: Option<TypeCounts>,
1210 }
1211
1212 #[api()]
1213 #[derive(Serialize, Deserialize, Default)]
1214 /// Backup Type group/snapshot counts.
1215 pub struct TypeCounts {
1216 /// The number of groups of the type.
1217 pub groups: u64,
1218 /// The number of snapshots of the type.
1219 pub snapshots: u64,
1220 }
1221
1222 #[api(
1223 properties: {
1224 "upid": {
1225 optional: true,
1226 type: UPID,
1227 },
1228 },
1229 )]
1230 #[derive(Clone, Default, Serialize, Deserialize)]
1231 #[serde(rename_all = "kebab-case")]
1232 /// Garbage collection status.
1233 pub struct GarbageCollectionStatus {
1234 pub upid: Option<String>,
1235 /// Number of processed index files.
1236 pub index_file_count: usize,
1237 /// Sum of bytes referred by index files.
1238 pub index_data_bytes: u64,
1239 /// Bytes used on disk.
1240 pub disk_bytes: u64,
1241 /// Chunks used on disk.
1242 pub disk_chunks: usize,
1243 /// Sum of removed bytes.
1244 pub removed_bytes: u64,
1245 /// Number of removed chunks.
1246 pub removed_chunks: usize,
1247 /// Sum of pending bytes (pending removal - kept for safety).
1248 pub pending_bytes: u64,
1249 /// Number of pending chunks (pending removal - kept for safety).
1250 pub pending_chunks: usize,
1251 /// Number of chunks marked as .bad by verify that have been removed by GC.
1252 pub removed_bad: usize,
1253 /// Number of chunks still marked as .bad after garbage collection.
1254 pub still_bad: usize,
1255 }
1256
1257 #[api(
1258 properties: {
1259 "gc-status": {
1260 type: GarbageCollectionStatus,
1261 optional: true,
1262 },
1263 counts: {
1264 type: Counts,
1265 optional: true,
1266 },
1267 },
1268 )]
1269 #[derive(Serialize, Deserialize)]
1270 #[serde(rename_all = "kebab-case")]
1271 /// Overall Datastore status and useful information.
1272 pub struct DataStoreStatus {
1273 /// Total space (bytes).
1274 pub total: u64,
1275 /// Used space (bytes).
1276 pub used: u64,
1277 /// Available space (bytes).
1278 pub avail: u64,
1279 /// Status of last GC
1280 #[serde(skip_serializing_if = "Option::is_none")]
1281 pub gc_status: Option<GarbageCollectionStatus>,
1282 /// Group/Snapshot counts
1283 #[serde(skip_serializing_if = "Option::is_none")]
1284 pub counts: Option<Counts>,
1285 }
1286
1287 #[api(
1288 properties: {
1289 store: {
1290 schema: DATASTORE_SCHEMA,
1291 },
1292 history: {
1293 type: Array,
1294 optional: true,
1295 items: {
1296 type: Number,
1297 description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
1298 }
1299 },
1300 },
1301 )]
1302 #[derive(Serialize, Deserialize)]
1303 #[serde(rename_all = "kebab-case")]
1304 /// Status of a Datastore
1305 pub struct DataStoreStatusListItem {
1306 pub store: String,
1307 /// The Size of the underlying storage in bytes. (-1 on error)
1308 pub total: i64,
1309 /// The used bytes of the underlying storage. (-1 on error)
1310 pub used: i64,
1311 /// The available bytes of the underlying storage. (-1 on error)
1312 pub avail: i64,
1313 /// A list of usages of the past (last Month).
1314 #[serde(skip_serializing_if = "Option::is_none")]
1315 pub history: Option<Vec<Option<f64>>>,
1316 /// History start time (epoch)
1317 #[serde(skip_serializing_if = "Option::is_none")]
1318 pub history_start: Option<u64>,
1319 /// History resolution (seconds)
1320 #[serde(skip_serializing_if = "Option::is_none")]
1321 pub history_delta: Option<u64>,
1322 /// Estimation of the UNIX epoch when the storage will be full.
1323 /// It's calculated via a simple Linear Regression (Least Squares) over the RRD data of the
1324 /// last Month. Missing if not enough data points are available yet. An estimate in the past
1325 /// means that usage is declining or not changing.
1326 #[serde(skip_serializing_if = "Option::is_none")]
1327 pub estimated_full_date: Option<i64>,
1328 /// An error description, for example, when the datastore could not be looked up
1329 #[serde(skip_serializing_if = "Option::is_none")]
1330 pub error: Option<String>,
1331 /// Status of last GC
1332 #[serde(skip_serializing_if = "Option::is_none")]
1333 pub gc_status: Option<GarbageCollectionStatus>,
1334 }
1335
1336 impl DataStoreStatusListItem {
1337 pub fn empty(store: &str, err: Option<String>) -> Self {
1338 DataStoreStatusListItem {
1339 store: store.to_owned(),
1340 total: -1,
1341 used: -1,
1342 avail: -1,
1343 history: None,
1344 history_start: None,
1345 history_delta: None,
1346 estimated_full_date: None,
1347 error: err,
1348 gc_status: None,
1349 }
1350 }
1351 }
1352
1353 pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType {
1354 optional: false,
1355 schema: &ArraySchema::new(
1356 "Returns the list of snapshots.",
1357 &SnapshotListItem::API_SCHEMA,
1358 )
1359 .schema(),
1360 };
1361
1362 pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType {
1363 optional: false,
1364 schema: &ArraySchema::new(
1365 "Returns the list of archive files inside a backup snapshots.",
1366 &BackupContent::API_SCHEMA,
1367 )
1368 .schema(),
1369 };
1370
1371 pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType {
1372 optional: false,
1373 schema: &ArraySchema::new(
1374 "Returns the list of backup groups.",
1375 &GroupListItem::API_SCHEMA,
1376 )
1377 .schema(),
1378 };
1379
1380 pub const ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE: ReturnType = ReturnType {
1381 optional: false,
1382 schema: &ArraySchema::new(
1383 "Returns the list of backup namespaces.",
1384 &NamespaceListItem::API_SCHEMA,
1385 )
1386 .schema(),
1387 };
1388
1389 pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType {
1390 optional: false,
1391 schema: &ArraySchema::new(
1392 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
1393 &PruneListItem::API_SCHEMA,
1394 )
1395 .schema(),
1396 };
1397
1398 #[api(
1399 properties: {
1400 store: {
1401 schema: DATASTORE_SCHEMA,
1402 },
1403 "max-depth": {
1404 schema: NS_MAX_DEPTH_SCHEMA,
1405 optional: true,
1406 },
1407 },
1408 )]
1409 #[derive(Serialize, Deserialize)]
1410 #[serde(rename_all = "kebab-case")]
1411 /// A namespace mapping
1412 pub struct TapeRestoreNamespace {
1413 /// The source datastore
1414 pub store: String,
1415 /// The source namespace. Root namespace if omitted.
1416 pub source: Option<BackupNamespace>,
1417 /// The target namespace,
1418 #[serde(skip_serializing_if = "Option::is_none")]
1419 pub target: Option<BackupNamespace>,
1420 /// The (optional) recursion depth
1421 #[serde(skip_serializing_if = "Option::is_none")]
1422 pub max_depth: Option<usize>,
1423 }
1424
1425 pub const TAPE_RESTORE_NAMESPACE_SCHEMA: Schema = StringSchema::new("A namespace mapping")
1426 .format(&ApiStringFormat::PropertyString(
1427 &TapeRestoreNamespace::API_SCHEMA,
1428 ))
1429 .schema();
1430
1431 /// Parse snapshots in the form 'ns/foo/ns/bar/ct/100/1970-01-01T00:00:00Z'
1432 /// into a [`BackupNamespace`] and [`BackupDir`]
1433 pub fn parse_ns_and_snapshot(input: &str) -> Result<(BackupNamespace, BackupDir), Error> {
1434 match input.rmatch_indices('/').nth(2) {
1435 Some((idx, _)) => {
1436 let ns = BackupNamespace::from_path(&input[..idx])?;
1437 let dir: BackupDir = input[(idx + 1)..].parse()?;
1438 Ok((ns, dir))
1439 }
1440 None => Ok((BackupNamespace::root(), input.parse()?)),
1441 }
1442 }
1443
1444 /// Prints a [`BackupNamespace`] and [`BackupDir`] in the form of
1445 /// 'ns/foo/bar/ct/100/1970-01-01T00:00:00Z'
1446 pub fn print_ns_and_snapshot(ns: &BackupNamespace, dir: &BackupDir) -> String {
1447 if ns.is_root() {
1448 dir.to_string()
1449 } else {
1450 format!("{}/{}", ns.display_as_path(), dir)
1451 }
1452 }
1453
1454 /// Prints a Datastore name and [`BackupNamespace`] for logs/errors.
1455 pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String {
1456 if ns.is_root() {
1457 format!("datastore '{}', root namespace", store)
1458 } else {
1459 format!("datastore '{}', namespace '{}'", store, ns)
1460 }
1461 }