]> git.proxmox.com Git - proxmox-backup.git/blob - pbs-api-types/src/datastore.rs
GC: flatten existing status into job status
[proxmox-backup.git] / pbs-api-types / src / datastore.rs
1 use std::fmt;
2 use std::path::PathBuf;
3
4 use anyhow::{bail, format_err, Error};
5 use const_format::concatcp;
6 use serde::{Deserialize, Serialize};
7
8 use proxmox_schema::{
9 api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType,
10 Schema, StringSchema, Updater, UpdaterType,
11 };
12
13 use crate::{
14 Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, MaintenanceType, Userid,
15 BACKUP_ID_RE, BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA,
16 GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT,
17 PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA,
18 SNAPSHOT_PATH_REGEX_STR, UPID,
19 };
20
21 const_regex! {
22 pub BACKUP_NAMESPACE_REGEX = concatcp!(r"^", BACKUP_NS_RE, r"$");
23
24 pub BACKUP_TYPE_REGEX = concatcp!(r"^(", BACKUP_TYPE_RE, r")$");
25
26 pub BACKUP_ID_REGEX = concatcp!(r"^", BACKUP_ID_RE, r"$");
27
28 pub BACKUP_DATE_REGEX = concatcp!(r"^", BACKUP_TIME_RE ,r"$");
29
30 pub GROUP_PATH_REGEX = concatcp!(
31 r"^(", BACKUP_TYPE_RE, ")/",
32 r"(", BACKUP_ID_RE, r")$",
33 );
34
35 pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
36
37 pub SNAPSHOT_PATH_REGEX = concatcp!(r"^", SNAPSHOT_PATH_REGEX_STR, r"$");
38 pub GROUP_OR_SNAPSHOT_PATH_REGEX = concatcp!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR, r"$");
39
40 pub DATASTORE_MAP_REGEX = concatcp!(r"^(?:", PROXMOX_SAFE_ID_REGEX_STR, r"=)?", PROXMOX_SAFE_ID_REGEX_STR, r"$");
41 }
42
43 pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
44
45 pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name")
46 .min_length(1)
47 .max_length(4096)
48 .schema();
49
50 pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.")
51 .format(&PROXMOX_SAFE_ID_FORMAT)
52 .schema();
53
54 pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
55 pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX);
56 pub const BACKUP_NAMESPACE_FORMAT: ApiStringFormat =
57 ApiStringFormat::Pattern(&BACKUP_NAMESPACE_REGEX);
58
59 pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
60 .format(&BACKUP_ID_FORMAT)
61 .schema();
62
63 pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.")
64 .format(&ApiStringFormat::Enum(&[
65 EnumEntry::new("vm", "Virtual Machine Backup"),
66 EnumEntry::new("ct", "Container Backup"),
67 EnumEntry::new("host", "Host Backup"),
68 ]))
69 .schema();
70
71 pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)")
72 .minimum(1)
73 .schema();
74
75 pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group")
76 .format(&BACKUP_GROUP_FORMAT)
77 .schema();
78
79 /// The maximal, inclusive depth for namespaces from the root ns downwards
80 ///
81 /// The datastore root name space is at depth zero (0), so we have in total eight (8) levels
82 pub const MAX_NAMESPACE_DEPTH: usize = 7;
83 pub const MAX_BACKUP_NAMESPACE_LENGTH: usize = 32 * 8; // 256
84 pub const BACKUP_NAMESPACE_SCHEMA: Schema = StringSchema::new("Namespace.")
85 .format(&BACKUP_NAMESPACE_FORMAT)
86 .max_length(MAX_BACKUP_NAMESPACE_LENGTH) // 256
87 .schema();
88
89 pub const NS_MAX_DEPTH_SCHEMA: Schema =
90 IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion)")
91 .minimum(0)
92 .maximum(MAX_NAMESPACE_DEPTH as isize)
93 .default(MAX_NAMESPACE_DEPTH as isize)
94 .schema();
95
96 pub const NS_MAX_DEPTH_REDUCED_SCHEMA: Schema =
97 IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion, empty == automatic full recursion, namespace depths reduce maximum allowed value)")
98 .minimum(0)
99 .maximum(MAX_NAMESPACE_DEPTH as isize)
100 .schema();
101
102 pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
103 .format(&PROXMOX_SAFE_ID_FORMAT)
104 .min_length(3)
105 .max_length(32)
106 .schema();
107
108 pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
109 .format(&CHUNK_DIGEST_FORMAT)
110 .schema();
111
112 pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
113
114 pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
115 .format(&DATASTORE_MAP_FORMAT)
116 .min_length(3)
117 .max_length(65)
118 .type_text("(<source>=)?<target>")
119 .schema();
120
121 pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema =
122 ArraySchema::new("Datastore mapping list.", &DATASTORE_MAP_SCHEMA).schema();
123
124 pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
125 "A list of Datastore mappings (or single datastore), comma separated. \
126 For example 'a=b,e' maps the source datastore 'a' to target 'b and \
127 all other sources to the default 'e'. If no default is given, only the \
128 specified sources are mapped.",
129 )
130 .format(&ApiStringFormat::PropertyString(
131 &DATASTORE_MAP_ARRAY_SCHEMA,
132 ))
133 .schema();
134
135 pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
136 .minimum(1)
137 .schema();
138
139 pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
140 IntegerSchema::new("Number of hourly backups to keep.")
141 .minimum(1)
142 .schema();
143
144 pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
145 .minimum(1)
146 .schema();
147
148 pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
149 IntegerSchema::new("Number of monthly backups to keep.")
150 .minimum(1)
151 .schema();
152
153 pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
154 IntegerSchema::new("Number of weekly backups to keep.")
155 .minimum(1)
156 .schema();
157
158 pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
159 IntegerSchema::new("Number of yearly backups to keep.")
160 .minimum(1)
161 .schema();
162
163 #[api]
164 #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
165 #[serde(rename_all = "lowercase")]
166 /// The order to sort chunks by
167 pub enum ChunkOrder {
168 /// Iterate chunks in the index order
169 None,
170 /// Iterate chunks in inode order
171 #[default]
172 Inode,
173 }
174
175 #[api]
176 #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
177 #[serde(rename_all = "lowercase")]
178 /// The level of syncing that is done when writing into a datastore.
179 pub enum DatastoreFSyncLevel {
180 /// No special fsync or syncfs calls are triggered. The system default dirty write back
181 /// mechanism ensures that data gets is flushed eventually via the `dirty_writeback_centisecs`
182 /// and `dirty_expire_centisecs` kernel sysctls, defaulting to ~ 30s.
183 ///
184 /// This mode provides generally the best performance, as all write back can happen async,
185 /// which reduces IO pressure.
186 /// But it may cause losing data on powerloss or system crash without any uninterruptible power
187 /// supply.
188 None,
189 /// Triggers a fsync after writing any chunk on the datastore. While this can slow down
190 /// backups significantly, depending on the underlying file system and storage used, it
191 /// will ensure fine-grained consistency. Depending on the exact setup, there might be no
192 /// benefits over the file system level sync, so if the setup allows it, you should prefer
193 /// that one. Despite the possible negative impact in performance, it's the most consistent
194 /// mode.
195 File,
196 /// Trigger a filesystem wide sync after all backup data got written but before finishing the
197 /// task. This allows that every finished backup is fully written back to storage
198 /// while reducing the impact on many file systems in contrast to the file level sync.
199 /// Depending on the setup, it might have a negative impact on unrelated write operations
200 /// of the underlying filesystem, but it is generally a good compromise between performance
201 /// and consistency.
202 #[default]
203 Filesystem,
204 }
205
206 #[api(
207 properties: {
208 "chunk-order": {
209 type: ChunkOrder,
210 optional: true,
211 },
212 },
213 )]
214 #[derive(Serialize, Deserialize, Default)]
215 #[serde(rename_all = "kebab-case")]
216 /// Datastore tuning options
217 pub struct DatastoreTuning {
218 /// Iterate chunks in this order
219 #[serde(skip_serializing_if = "Option::is_none")]
220 pub chunk_order: Option<ChunkOrder>,
221 #[serde(skip_serializing_if = "Option::is_none")]
222 pub sync_level: Option<DatastoreFSyncLevel>,
223 }
224
225 pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore tuning options")
226 .format(&ApiStringFormat::PropertyString(
227 &DatastoreTuning::API_SCHEMA,
228 ))
229 .schema();
230
231 #[api(
232 properties: {
233 name: {
234 schema: DATASTORE_SCHEMA,
235 },
236 path: {
237 schema: DIR_NAME_SCHEMA,
238 },
239 "notify-user": {
240 optional: true,
241 type: Userid,
242 },
243 "notify": {
244 optional: true,
245 schema: DATASTORE_NOTIFY_STRING_SCHEMA,
246 },
247 comment: {
248 optional: true,
249 schema: SINGLE_LINE_COMMENT_SCHEMA,
250 },
251 "gc-schedule": {
252 optional: true,
253 schema: GC_SCHEDULE_SCHEMA,
254 },
255 "prune-schedule": {
256 optional: true,
257 schema: PRUNE_SCHEDULE_SCHEMA,
258 },
259 keep: {
260 type: crate::KeepOptions,
261 },
262 "verify-new": {
263 description: "If enabled, all new backups will be verified right after completion.",
264 optional: true,
265 type: bool,
266 },
267 tuning: {
268 optional: true,
269 schema: DATASTORE_TUNING_STRING_SCHEMA,
270 },
271 "maintenance-mode": {
272 optional: true,
273 format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA),
274 type: String,
275 },
276 }
277 )]
278 #[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
279 #[serde(rename_all = "kebab-case")]
280 /// Datastore configuration properties.
281 pub struct DataStoreConfig {
282 #[updater(skip)]
283 pub name: String,
284
285 #[updater(skip)]
286 pub path: String,
287
288 #[serde(skip_serializing_if = "Option::is_none")]
289 pub comment: Option<String>,
290
291 #[serde(skip_serializing_if = "Option::is_none")]
292 pub gc_schedule: Option<String>,
293
294 #[serde(skip_serializing_if = "Option::is_none")]
295 pub prune_schedule: Option<String>,
296
297 #[serde(flatten)]
298 pub keep: crate::KeepOptions,
299
300 /// If enabled, all backups will be verified right after completion.
301 #[serde(skip_serializing_if = "Option::is_none")]
302 pub verify_new: Option<bool>,
303
304 /// Send job email notification to this user
305 #[serde(skip_serializing_if = "Option::is_none")]
306 pub notify_user: Option<Userid>,
307
308 /// Send notification only for job errors
309 #[serde(skip_serializing_if = "Option::is_none")]
310 pub notify: Option<String>,
311
312 /// Datastore tuning options
313 #[serde(skip_serializing_if = "Option::is_none")]
314 pub tuning: Option<String>,
315
316 /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
317 #[serde(skip_serializing_if = "Option::is_none")]
318 pub maintenance_mode: Option<String>,
319 }
320
321 impl DataStoreConfig {
322 pub fn new(name: String, path: String) -> Self {
323 Self {
324 name,
325 path,
326 comment: None,
327 gc_schedule: None,
328 prune_schedule: None,
329 keep: Default::default(),
330 verify_new: None,
331 notify_user: None,
332 notify: None,
333 tuning: None,
334 maintenance_mode: None,
335 }
336 }
337
338 pub fn get_maintenance_mode(&self) -> Option<MaintenanceMode> {
339 self.maintenance_mode.as_ref().and_then(|str| {
340 MaintenanceMode::deserialize(proxmox_schema::de::SchemaDeserializer::new(
341 str,
342 &MaintenanceMode::API_SCHEMA,
343 ))
344 .ok()
345 })
346 }
347
348 pub fn set_maintenance_mode(&mut self, new_mode: Option<MaintenanceMode>) -> Result<(), Error> {
349 let current_type = self.get_maintenance_mode().map(|mode| mode.ty);
350 let new_type = new_mode.as_ref().map(|mode| mode.ty);
351
352 match current_type {
353 Some(MaintenanceType::ReadOnly) => { /* always OK */ }
354 Some(MaintenanceType::Offline) => { /* always OK */ }
355 Some(MaintenanceType::Delete) => {
356 match new_type {
357 Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ }
358 _ => {
359 bail!("datastore is being deleted")
360 }
361 }
362 }
363 None => { /* always OK */ }
364 }
365
366 let new_mode = match new_mode {
367 Some(new_mode) => Some(
368 proxmox_schema::property_string::PropertyString::new(new_mode)
369 .to_property_string()?,
370 ),
371 None => None,
372 };
373
374 self.maintenance_mode = new_mode;
375
376 Ok(())
377 }
378 }
379
380 #[api(
381 properties: {
382 store: {
383 schema: DATASTORE_SCHEMA,
384 },
385 comment: {
386 optional: true,
387 schema: SINGLE_LINE_COMMENT_SCHEMA,
388 },
389 maintenance: {
390 optional: true,
391 format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA),
392 type: String,
393 }
394 },
395 )]
396 #[derive(Serialize, Deserialize, Clone, PartialEq)]
397 #[serde(rename_all = "kebab-case")]
398 /// Basic information about a datastore.
399 pub struct DataStoreListItem {
400 pub store: String,
401 pub comment: Option<String>,
402 /// If the datastore is in maintenance mode, information about it
403 #[serde(skip_serializing_if = "Option::is_none")]
404 pub maintenance: Option<String>,
405 }
406
407 #[api(
408 properties: {
409 "filename": {
410 schema: BACKUP_ARCHIVE_NAME_SCHEMA,
411 },
412 "crypt-mode": {
413 type: CryptMode,
414 optional: true,
415 },
416 },
417 )]
418 #[derive(Serialize, Deserialize, Clone, PartialEq)]
419 #[serde(rename_all = "kebab-case")]
420 /// Basic information about archive files inside a backup snapshot.
421 pub struct BackupContent {
422 pub filename: String,
423 /// Info if file is encrypted, signed, or neither.
424 #[serde(skip_serializing_if = "Option::is_none")]
425 pub crypt_mode: Option<CryptMode>,
426 /// Archive size (from backup manifest).
427 #[serde(skip_serializing_if = "Option::is_none")]
428 pub size: Option<u64>,
429 }
430
431 #[api()]
432 #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
433 #[serde(rename_all = "lowercase")]
434 /// Result of a verify operation.
435 pub enum VerifyState {
436 /// Verification was successful
437 Ok,
438 /// Verification reported one or more errors
439 Failed,
440 }
441
442 #[api(
443 properties: {
444 upid: {
445 type: UPID,
446 },
447 state: {
448 type: VerifyState,
449 },
450 },
451 )]
452 #[derive(Serialize, Deserialize, Clone, PartialEq)]
453 /// Task properties.
454 pub struct SnapshotVerifyState {
455 /// UPID of the verify task
456 pub upid: UPID,
457 /// State of the verification. Enum.
458 pub state: VerifyState,
459 }
460
461 /// A namespace provides a logical separation between backup groups from different domains
462 /// (cluster, sites, ...) where uniqueness cannot be guaranteed anymore. It allows users to share a
463 /// datastore (i.e., one deduplication domain (chunk store)) with multiple (trusted) sites and
464 /// allows to form a hierarchy, for easier management and avoiding clashes between backup_ids.
465 ///
466 /// NOTE: Namespaces are a logical boundary only, they do not provide a full secure separation as
467 /// the chunk store is still shared. So, users whom do not trust each other must not share a
468 /// datastore.
469 ///
470 /// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid
471 /// clashes with backup group IDs and future backup_types and to have a clean separation between
472 /// the namespace directories and the ones from a backup snapshot.
473 #[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, UpdaterType)]
474 pub struct BackupNamespace {
475 /// The namespace subdirectories without the `ns/` intermediate directories.
476 inner: Vec<String>,
477
478 /// Cache the total length for efficiency.
479 len: usize,
480 }
481
482 impl BackupNamespace {
483 /// Returns a root namespace reference.
484 pub const fn root() -> Self {
485 Self {
486 inner: Vec::new(),
487 len: 0,
488 }
489 }
490
491 /// True if this represents the root namespace.
492 pub fn is_root(&self) -> bool {
493 self.inner.is_empty()
494 }
495
496 /// Try to parse a string into a namespace.
497 pub fn new(name: &str) -> Result<Self, Error> {
498 let mut this = Self::root();
499
500 if name.is_empty() {
501 return Ok(this);
502 }
503
504 for name in name.split('/') {
505 this.push(name.to_string())?;
506 }
507 Ok(this)
508 }
509
510 /// Try to parse a file path string (where each sub-namespace is separated by an `ns`
511 /// subdirectory) into a valid namespace.
512 pub fn from_path(mut path: &str) -> Result<Self, Error> {
513 let mut this = Self::root();
514 loop {
515 match path.strip_prefix("ns/") {
516 Some(next) => match next.find('/') {
517 Some(pos) => {
518 this.push(next[..pos].to_string())?;
519 path = &next[(pos + 1)..];
520 }
521 None => {
522 this.push(next.to_string())?;
523 break;
524 }
525 },
526 None if !path.is_empty() => {
527 bail!("invalid component in namespace path at {:?}", path);
528 }
529 None => break,
530 }
531 }
532 Ok(this)
533 }
534
535 /// Create a new Namespace attached to parent
536 ///
537 /// `name` must be a single level namespace ID, that is, no '/' is allowed.
538 /// This rule also avoids confusion about the name being a NS or NS-path
539 pub fn from_parent_ns(parent: &Self, name: String) -> Result<Self, Error> {
540 let mut child = parent.to_owned();
541 child.push(name)?;
542 Ok(child)
543 }
544
545 /// Pop one level off the namespace hierarchy
546 pub fn pop(&mut self) -> Option<String> {
547 let dropped = self.inner.pop();
548 if let Some(ref dropped) = dropped {
549 self.len = self.len.saturating_sub(dropped.len() + 1);
550 }
551 dropped
552 }
553
554 /// Get the namespace parent as owned BackupNamespace
555 pub fn parent(&self) -> Self {
556 if self.is_root() {
557 return Self::root();
558 }
559
560 let mut parent = self.clone();
561 parent.pop();
562
563 parent
564 }
565
566 /// Create a new namespace directly from a vec.
567 ///
568 /// # Safety
569 ///
570 /// Invalid contents may lead to inaccessible backups.
571 pub unsafe fn from_vec_unchecked(components: Vec<String>) -> Self {
572 let mut this = Self {
573 inner: components,
574 len: 0,
575 };
576 this.recalculate_len();
577 this
578 }
579
580 /// Recalculate the length.
581 fn recalculate_len(&mut self) {
582 self.len = self.inner.len().max(1) - 1; // a slash between each component
583 for part in &self.inner {
584 self.len += part.len();
585 }
586 }
587
588 /// The hierarchical depth of the namespace, 0 means top-level.
589 pub fn depth(&self) -> usize {
590 self.inner.len()
591 }
592
593 /// The logical name and ID of the namespace.
594 pub fn name(&self) -> String {
595 self.to_string()
596 }
597
598 /// The actual relative backing path of the namespace on the datastore.
599 pub fn path(&self) -> PathBuf {
600 self.display_as_path().to_string().into()
601 }
602
603 /// Get the current namespace length.
604 ///
605 /// This includes separating slashes, but does not include the `ns/` intermediate directories.
606 /// This is not the *path* length, but rather the length that would be produced via
607 /// `.to_string()`.
608 #[inline]
609 pub fn name_len(&self) -> usize {
610 self.len
611 }
612
613 /// Get the current namespace path length.
614 ///
615 /// This includes the `ns/` subdirectory strings.
616 pub fn path_len(&self) -> usize {
617 self.name_len() + 3 * self.inner.len()
618 }
619
620 /// Enter a sub-namespace. Fails if nesting would become too deep or the name too long.
621 pub fn push(&mut self, subdir: String) -> Result<(), Error> {
622 if subdir.contains('/') {
623 bail!("namespace component contained a slash");
624 }
625
626 self.push_do(subdir)
627 }
628
629 /// Assumes `subdir` already does not contain any slashes.
630 /// Performs remaining checks and updates the length.
631 fn push_do(&mut self, subdir: String) -> Result<(), Error> {
632 let depth = self.depth();
633 // check for greater equal to account for the to be added subdir
634 if depth >= MAX_NAMESPACE_DEPTH {
635 bail!("namespace too deep, {depth} >= max {MAX_NAMESPACE_DEPTH}");
636 }
637
638 if self.len + subdir.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH {
639 bail!("namespace length exceeded");
640 }
641
642 if !crate::PROXMOX_SAFE_ID_REGEX.is_match(&subdir) {
643 bail!("not a valid namespace component: {subdir}");
644 }
645
646 if !self.inner.is_empty() {
647 self.len += 1; // separating slash
648 }
649 self.len += subdir.len();
650 self.inner.push(subdir);
651 Ok(())
652 }
653
654 /// Return an adapter which [`fmt::Display`]s as a path with `"ns/"` prefixes in front of every
655 /// component.
656 pub fn display_as_path(&self) -> BackupNamespacePath {
657 BackupNamespacePath(self)
658 }
659
660 /// Iterate over the subdirectories.
661 pub fn components(&self) -> impl Iterator<Item = &str> + '_ {
662 self.inner.iter().map(String::as_str)
663 }
664
665 /// Map NS by replacing `source_prefix` with `target_prefix`
666 pub fn map_prefix(
667 &self,
668 source_prefix: &BackupNamespace,
669 target_prefix: &BackupNamespace,
670 ) -> Result<Self, Error> {
671 let suffix = self
672 .inner
673 .strip_prefix(&source_prefix.inner[..])
674 .ok_or_else(|| {
675 format_err!(
676 "Failed to map namespace - {source_prefix} is not a valid prefix of {self}",
677 )
678 })?;
679
680 let mut new = target_prefix.clone();
681 for item in suffix {
682 new.push(item.clone())?;
683 }
684 Ok(new)
685 }
686
687 /// Check whether adding `depth` levels of sub-namespaces exceeds the max depth limit
688 pub fn check_max_depth(&self, depth: usize) -> Result<(), Error> {
689 let ns_depth = self.depth();
690 if ns_depth + depth > MAX_NAMESPACE_DEPTH {
691 bail!(
692 "namespace '{self}'s depth and recursion depth exceed limit: {ns_depth} + {depth} > {MAX_NAMESPACE_DEPTH}",
693 );
694 }
695 Ok(())
696 }
697
698 pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
699 let mut path: Vec<&str> = vec!["datastore", store];
700
701 if self.is_root() {
702 path
703 } else {
704 path.extend(self.inner.iter().map(|comp| comp.as_str()));
705 path
706 }
707 }
708
709 /// Check whether this namespace contains another namespace.
710 ///
711 /// If so, the depth is returned.
712 ///
713 /// Example:
714 /// ```
715 /// # use pbs_api_types::BackupNamespace;
716 /// let main: BackupNamespace = "a/b".parse().unwrap();
717 /// let sub: BackupNamespace = "a/b/c/d".parse().unwrap();
718 /// let other: BackupNamespace = "x/y".parse().unwrap();
719 /// assert_eq!(main.contains(&main), Some(0));
720 /// assert_eq!(main.contains(&sub), Some(2));
721 /// assert_eq!(sub.contains(&main), None);
722 /// assert_eq!(main.contains(&other), None);
723 /// ```
724 pub fn contains(&self, other: &BackupNamespace) -> Option<usize> {
725 other
726 .inner
727 .strip_prefix(&self.inner[..])
728 .map(|suffix| suffix.len())
729 }
730 }
731
732 impl fmt::Display for BackupNamespace {
733 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
734 use std::fmt::Write;
735
736 let mut parts = self.inner.iter();
737 if let Some(first) = parts.next() {
738 f.write_str(first)?;
739 }
740 for part in parts {
741 f.write_char('/')?;
742 f.write_str(part)?;
743 }
744 Ok(())
745 }
746 }
747
748 serde_plain::derive_deserialize_from_fromstr!(BackupNamespace, "valid backup namespace");
749
750 impl std::str::FromStr for BackupNamespace {
751 type Err = Error;
752
753 fn from_str(name: &str) -> Result<Self, Self::Err> {
754 Self::new(name)
755 }
756 }
757
758 serde_plain::derive_serialize_from_display!(BackupNamespace);
759
760 impl ApiType for BackupNamespace {
761 const API_SCHEMA: Schema = BACKUP_NAMESPACE_SCHEMA;
762 }
763
764 /// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`].
765 ///
766 /// This implements [`fmt::Display`] such that it includes the `ns/` subdirectory prefix in front of
767 /// every component.
768 pub struct BackupNamespacePath<'a>(&'a BackupNamespace);
769
770 impl fmt::Display for BackupNamespacePath<'_> {
771 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
772 let mut sep = "ns/";
773 for part in &self.0.inner {
774 f.write_str(sep)?;
775 sep = "/ns/";
776 f.write_str(part)?;
777 }
778 Ok(())
779 }
780 }
781
782 #[api]
783 /// Backup types.
784 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
785 #[serde(rename_all = "lowercase")]
786 pub enum BackupType {
787 /// Virtual machines.
788 Vm,
789
790 /// Containers.
791 Ct,
792
793 /// "Host" backups.
794 Host,
795 // NOTE: if you add new types, don't forget to adapt the iter below!
796 }
797
798 impl BackupType {
799 pub const fn as_str(&self) -> &'static str {
800 match self {
801 BackupType::Vm => "vm",
802 BackupType::Ct => "ct",
803 BackupType::Host => "host",
804 }
805 }
806
807 /// We used to have alphabetical ordering here when this was a string.
808 const fn order(self) -> u8 {
809 match self {
810 BackupType::Ct => 0,
811 BackupType::Host => 1,
812 BackupType::Vm => 2,
813 }
814 }
815
816 #[inline]
817 pub fn iter() -> impl Iterator<Item = BackupType> + Send + Sync + Unpin + 'static {
818 [BackupType::Vm, BackupType::Ct, BackupType::Host]
819 .iter()
820 .copied()
821 }
822 }
823
824 impl fmt::Display for BackupType {
825 #[inline]
826 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
827 fmt::Display::fmt(self.as_str(), f)
828 }
829 }
830
831 impl std::str::FromStr for BackupType {
832 type Err = Error;
833
834 /// Parse a backup type.
835 fn from_str(ty: &str) -> Result<Self, Error> {
836 Ok(match ty {
837 "ct" => BackupType::Ct,
838 "host" => BackupType::Host,
839 "vm" => BackupType::Vm,
840 _ => bail!("invalid backup type {ty:?}"),
841 })
842 }
843 }
844
845 impl std::cmp::Ord for BackupType {
846 #[inline]
847 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
848 self.order().cmp(&other.order())
849 }
850 }
851
852 impl std::cmp::PartialOrd for BackupType {
853 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
854 Some(self.cmp(other))
855 }
856 }
857
858 #[api(
859 properties: {
860 "backup-type": { type: BackupType },
861 "backup-id": { schema: BACKUP_ID_SCHEMA },
862 },
863 )]
864 #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
865 #[serde(rename_all = "kebab-case")]
866 /// A backup group (without a data store).
867 pub struct BackupGroup {
868 /// Backup type.
869 #[serde(rename = "backup-type")]
870 pub ty: BackupType,
871
872 /// Backup id.
873 #[serde(rename = "backup-id")]
874 pub id: String,
875 }
876
877 impl BackupGroup {
878 pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
879 Self { ty, id: id.into() }
880 }
881
882 pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
883 use crate::FilterType;
884 match &filter.filter_type {
885 FilterType::Group(backup_group) => {
886 match backup_group.parse::<BackupGroup>() {
887 Ok(group) => *self == group,
888 Err(_) => false, // shouldn't happen if value is schema-checked
889 }
890 }
891 FilterType::BackupType(ty) => self.ty == *ty,
892 FilterType::Regex(regex) => regex.is_match(&self.to_string()),
893 }
894 }
895
896 pub fn apply_filters(&self, filters: &[GroupFilter]) -> bool {
897 // since there will only be view filter in the list, an extra iteration to get the umber of
898 // include filter should not be an issue
899 let is_included = if filters.iter().filter(|f| !f.is_exclude).count() == 0 {
900 true
901 } else {
902 filters
903 .iter()
904 .filter(|f| !f.is_exclude)
905 .any(|filter| self.matches(filter))
906 };
907
908 is_included
909 && !filters
910 .iter()
911 .filter(|f| f.is_exclude)
912 .any(|filter| self.matches(filter))
913 }
914 }
915
916 impl AsRef<BackupGroup> for BackupGroup {
917 #[inline]
918 fn as_ref(&self) -> &Self {
919 self
920 }
921 }
922
923 impl From<(BackupType, String)> for BackupGroup {
924 #[inline]
925 fn from(data: (BackupType, String)) -> Self {
926 Self {
927 ty: data.0,
928 id: data.1,
929 }
930 }
931 }
932
933 impl std::cmp::Ord for BackupGroup {
934 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
935 let type_order = self.ty.cmp(&other.ty);
936 if type_order != std::cmp::Ordering::Equal {
937 return type_order;
938 }
939
940 // try to compare IDs numerically
941 let id_self = self.id.parse::<u64>();
942 let id_other = other.id.parse::<u64>();
943 match (id_self, id_other) {
944 (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
945 (Ok(_), Err(_)) => std::cmp::Ordering::Less,
946 (Err(_), Ok(_)) => std::cmp::Ordering::Greater,
947 _ => self.id.cmp(&other.id),
948 }
949 }
950 }
951
952 impl std::cmp::PartialOrd for BackupGroup {
953 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
954 Some(self.cmp(other))
955 }
956 }
957
958 impl fmt::Display for BackupGroup {
959 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
960 write!(f, "{}/{}", self.ty, self.id)
961 }
962 }
963
964 impl std::str::FromStr for BackupGroup {
965 type Err = Error;
966
967 /// Parse a backup group.
968 ///
969 /// This parses strings like `vm/100".
970 fn from_str(path: &str) -> Result<Self, Error> {
971 let cap = GROUP_PATH_REGEX
972 .captures(path)
973 .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
974
975 Ok(Self {
976 ty: cap.get(1).unwrap().as_str().parse()?,
977 id: cap.get(2).unwrap().as_str().to_owned(),
978 })
979 }
980 }
981
982 #[api(
983 properties: {
984 "group": { type: BackupGroup },
985 "backup-time": { schema: BACKUP_TIME_SCHEMA },
986 },
987 )]
988 /// Uniquely identify a Backup (relative to data store)
989 ///
990 /// We also call this a backup snaphost.
991 #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Deserialize, Serialize)]
992 #[serde(rename_all = "kebab-case")]
993 pub struct BackupDir {
994 /// Backup group.
995 #[serde(flatten)]
996 pub group: BackupGroup,
997
998 /// Backup timestamp unix epoch.
999 #[serde(rename = "backup-time")]
1000 pub time: i64,
1001 }
1002
1003 impl AsRef<BackupGroup> for BackupDir {
1004 #[inline]
1005 fn as_ref(&self) -> &BackupGroup {
1006 &self.group
1007 }
1008 }
1009
1010 impl AsRef<BackupDir> for BackupDir {
1011 #[inline]
1012 fn as_ref(&self) -> &Self {
1013 self
1014 }
1015 }
1016
1017 impl From<(BackupGroup, i64)> for BackupDir {
1018 fn from(data: (BackupGroup, i64)) -> Self {
1019 Self {
1020 group: data.0,
1021 time: data.1,
1022 }
1023 }
1024 }
1025
1026 impl From<(BackupType, String, i64)> for BackupDir {
1027 fn from(data: (BackupType, String, i64)) -> Self {
1028 Self {
1029 group: (data.0, data.1).into(),
1030 time: data.2,
1031 }
1032 }
1033 }
1034
1035 impl BackupDir {
1036 pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
1037 where
1038 T: Into<String>,
1039 {
1040 let time = proxmox_time::parse_rfc3339(backup_time_string)?;
1041 let group = BackupGroup::new(ty, id.into());
1042 Ok(Self { group, time })
1043 }
1044
1045 #[inline]
1046 pub fn ty(&self) -> BackupType {
1047 self.group.ty
1048 }
1049
1050 #[inline]
1051 pub fn id(&self) -> &str {
1052 &self.group.id
1053 }
1054 }
1055
1056 impl std::str::FromStr for BackupDir {
1057 type Err = Error;
1058
1059 /// Parse a snapshot path.
1060 ///
1061 /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
1062 fn from_str(path: &str) -> Result<Self, Self::Err> {
1063 let cap = SNAPSHOT_PATH_REGEX
1064 .captures(path)
1065 .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
1066
1067 BackupDir::with_rfc3339(
1068 cap.get(1).unwrap().as_str().parse()?,
1069 cap.get(2).unwrap().as_str(),
1070 cap.get(3).unwrap().as_str(),
1071 )
1072 }
1073 }
1074
1075 impl fmt::Display for BackupDir {
1076 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1077 // FIXME: log error?
1078 let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
1079 write!(f, "{}/{}", self.group, time)
1080 }
1081 }
1082
1083 /// Used when both a backup group or a directory can be valid.
1084 pub enum BackupPart {
1085 Group(BackupGroup),
1086 Dir(BackupDir),
1087 }
1088
1089 impl std::str::FromStr for BackupPart {
1090 type Err = Error;
1091
1092 /// Parse a path which can be either a backup group or a snapshot dir.
1093 fn from_str(path: &str) -> Result<Self, Error> {
1094 let cap = GROUP_OR_SNAPSHOT_PATH_REGEX
1095 .captures(path)
1096 .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
1097
1098 let ty = cap.get(1).unwrap().as_str().parse()?;
1099 let id = cap.get(2).unwrap().as_str().to_string();
1100
1101 Ok(match cap.get(3) {
1102 Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ty, id, time.as_str())?),
1103 None => BackupPart::Group((ty, id).into()),
1104 })
1105 }
1106 }
1107
1108 #[api(
1109 properties: {
1110 "backup": { type: BackupDir },
1111 comment: {
1112 schema: SINGLE_LINE_COMMENT_SCHEMA,
1113 optional: true,
1114 },
1115 verification: {
1116 type: SnapshotVerifyState,
1117 optional: true,
1118 },
1119 fingerprint: {
1120 type: String,
1121 optional: true,
1122 },
1123 files: {
1124 items: {
1125 schema: BACKUP_ARCHIVE_NAME_SCHEMA
1126 },
1127 },
1128 owner: {
1129 type: Authid,
1130 optional: true,
1131 },
1132 },
1133 )]
1134 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1135 #[serde(rename_all = "kebab-case")]
1136 /// Basic information about backup snapshot.
1137 pub struct SnapshotListItem {
1138 #[serde(flatten)]
1139 pub backup: BackupDir,
1140 /// The first line from manifest "notes"
1141 #[serde(skip_serializing_if = "Option::is_none")]
1142 pub comment: Option<String>,
1143 /// The result of the last run verify task
1144 #[serde(skip_serializing_if = "Option::is_none")]
1145 pub verification: Option<SnapshotVerifyState>,
1146 /// Fingerprint of encryption key
1147 #[serde(skip_serializing_if = "Option::is_none")]
1148 pub fingerprint: Option<Fingerprint>,
1149 /// List of contained archive files.
1150 pub files: Vec<BackupContent>,
1151 /// Overall snapshot size (sum of all archive sizes).
1152 #[serde(skip_serializing_if = "Option::is_none")]
1153 pub size: Option<u64>,
1154 /// The owner of the snapshots group
1155 #[serde(skip_serializing_if = "Option::is_none")]
1156 pub owner: Option<Authid>,
1157 /// Protection from prunes
1158 #[serde(default)]
1159 pub protected: bool,
1160 }
1161
1162 #[api(
1163 properties: {
1164 "backup": { type: BackupGroup },
1165 "last-backup": { schema: BACKUP_TIME_SCHEMA },
1166 "backup-count": {
1167 type: Integer,
1168 },
1169 files: {
1170 items: {
1171 schema: BACKUP_ARCHIVE_NAME_SCHEMA
1172 },
1173 },
1174 owner: {
1175 type: Authid,
1176 optional: true,
1177 },
1178 },
1179 )]
1180 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1181 #[serde(rename_all = "kebab-case")]
1182 /// Basic information about a backup group.
1183 pub struct GroupListItem {
1184 #[serde(flatten)]
1185 pub backup: BackupGroup,
1186
1187 pub last_backup: i64,
1188 /// Number of contained snapshots
1189 pub backup_count: u64,
1190 /// List of contained archive files.
1191 pub files: Vec<String>,
1192 /// The owner of group
1193 #[serde(skip_serializing_if = "Option::is_none")]
1194 pub owner: Option<Authid>,
1195 /// The first line from group "notes"
1196 #[serde(skip_serializing_if = "Option::is_none")]
1197 pub comment: Option<String>,
1198 }
1199
1200 #[api()]
1201 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1202 #[serde(rename_all = "kebab-case")]
1203 /// Basic information about a backup namespace.
1204 pub struct NamespaceListItem {
1205 /// A backup namespace
1206 pub ns: BackupNamespace,
1207
1208 // TODO?
1209 //pub group_count: u64,
1210 //pub ns_count: u64,
1211 /// The first line from the namespace's "notes"
1212 #[serde(skip_serializing_if = "Option::is_none")]
1213 pub comment: Option<String>,
1214 }
1215
1216 #[api(
1217 properties: {
1218 "backup": { type: BackupDir },
1219 },
1220 )]
1221 #[derive(Serialize, Deserialize)]
1222 #[serde(rename_all = "kebab-case")]
1223 /// Prune result.
1224 pub struct PruneListItem {
1225 #[serde(flatten)]
1226 pub backup: BackupDir,
1227
1228 /// Keep snapshot
1229 pub keep: bool,
1230 }
1231
1232 #[api(
1233 properties: {
1234 ct: {
1235 type: TypeCounts,
1236 optional: true,
1237 },
1238 host: {
1239 type: TypeCounts,
1240 optional: true,
1241 },
1242 vm: {
1243 type: TypeCounts,
1244 optional: true,
1245 },
1246 other: {
1247 type: TypeCounts,
1248 optional: true,
1249 },
1250 },
1251 )]
1252 #[derive(Serialize, Deserialize, Default)]
1253 /// Counts of groups/snapshots per BackupType.
1254 pub struct Counts {
1255 /// The counts for CT backups
1256 pub ct: Option<TypeCounts>,
1257 /// The counts for Host backups
1258 pub host: Option<TypeCounts>,
1259 /// The counts for VM backups
1260 pub vm: Option<TypeCounts>,
1261 /// The counts for other backup types
1262 pub other: Option<TypeCounts>,
1263 }
1264
1265 #[api()]
1266 #[derive(Serialize, Deserialize, Default)]
1267 /// Backup Type group/snapshot counts.
1268 pub struct TypeCounts {
1269 /// The number of groups of the type.
1270 pub groups: u64,
1271 /// The number of snapshots of the type.
1272 pub snapshots: u64,
1273 }
1274
1275 #[api(
1276 properties: {
1277 "upid": {
1278 optional: true,
1279 type: UPID,
1280 },
1281 },
1282 )]
1283 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
1284 #[serde(rename_all = "kebab-case")]
1285 /// Garbage collection status.
1286 pub struct GarbageCollectionStatus {
1287 pub upid: Option<String>,
1288 /// Number of processed index files.
1289 pub index_file_count: usize,
1290 /// Sum of bytes referred by index files.
1291 pub index_data_bytes: u64,
1292 /// Bytes used on disk.
1293 pub disk_bytes: u64,
1294 /// Chunks used on disk.
1295 pub disk_chunks: usize,
1296 /// Sum of removed bytes.
1297 pub removed_bytes: u64,
1298 /// Number of removed chunks.
1299 pub removed_chunks: usize,
1300 /// Sum of pending bytes (pending removal - kept for safety).
1301 pub pending_bytes: u64,
1302 /// Number of pending chunks (pending removal - kept for safety).
1303 pub pending_chunks: usize,
1304 /// Number of chunks marked as .bad by verify that have been removed by GC.
1305 pub removed_bad: usize,
1306 /// Number of chunks still marked as .bad after garbage collection.
1307 pub still_bad: usize,
1308 }
1309
1310 #[api(
1311 properties: {
1312 "status": {
1313 type: GarbageCollectionStatus,
1314 },
1315 }
1316 )]
1317 #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
1318 #[serde(rename_all = "kebab-case")]
1319 /// Garbage Collection general info
1320 pub struct GarbageCollectionJobStatus {
1321 /// Datastore
1322 pub store: String,
1323 #[serde(flatten)]
1324 pub status: GarbageCollectionStatus,
1325 /// Schedule of the gc job
1326 #[serde(skip_serializing_if = "Option::is_none")]
1327 pub schedule: Option<String>,
1328 /// Time of the next gc run
1329 #[serde(skip_serializing_if = "Option::is_none")]
1330 pub next_run: Option<i64>,
1331 /// Endtime of the last gc run
1332 #[serde(skip_serializing_if = "Option::is_none")]
1333 pub last_run_endtime: Option<i64>,
1334 /// State of the last gc run
1335 #[serde(skip_serializing_if = "Option::is_none")]
1336 pub last_run_state: Option<String>,
1337 /// Duration of last gc run
1338 #[serde(skip_serializing_if = "Option::is_none")]
1339 pub duration: Option<i64>,
1340 }
1341
1342 #[api(
1343 properties: {
1344 "gc-status": {
1345 type: GarbageCollectionStatus,
1346 optional: true,
1347 },
1348 counts: {
1349 type: Counts,
1350 optional: true,
1351 },
1352 },
1353 )]
1354 #[derive(Serialize, Deserialize)]
1355 #[serde(rename_all = "kebab-case")]
1356 /// Overall Datastore status and useful information.
1357 pub struct DataStoreStatus {
1358 /// Total space (bytes).
1359 pub total: u64,
1360 /// Used space (bytes).
1361 pub used: u64,
1362 /// Available space (bytes).
1363 pub avail: u64,
1364 /// Status of last GC
1365 #[serde(skip_serializing_if = "Option::is_none")]
1366 pub gc_status: Option<GarbageCollectionStatus>,
1367 /// Group/Snapshot counts
1368 #[serde(skip_serializing_if = "Option::is_none")]
1369 pub counts: Option<Counts>,
1370 }
1371
1372 #[api(
1373 properties: {
1374 store: {
1375 schema: DATASTORE_SCHEMA,
1376 },
1377 history: {
1378 type: Array,
1379 optional: true,
1380 items: {
1381 type: Number,
1382 description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
1383 }
1384 },
1385 },
1386 )]
1387 #[derive(Serialize, Deserialize, Clone, PartialEq)]
1388 #[serde(rename_all = "kebab-case")]
1389 /// Status of a Datastore
1390 pub struct DataStoreStatusListItem {
1391 pub store: String,
1392 /// The Size of the underlying storage in bytes.
1393 #[serde(skip_serializing_if = "Option::is_none")]
1394 pub total: Option<u64>,
1395 /// The used bytes of the underlying storage.
1396 #[serde(skip_serializing_if = "Option::is_none")]
1397 pub used: Option<u64>,
1398 /// The available bytes of the underlying storage. (-1 on error)
1399 #[serde(skip_serializing_if = "Option::is_none")]
1400 pub avail: Option<u64>,
1401 /// A list of usages of the past (last Month).
1402 #[serde(skip_serializing_if = "Option::is_none")]
1403 pub history: Option<Vec<Option<f64>>>,
1404 /// History start time (epoch)
1405 #[serde(skip_serializing_if = "Option::is_none")]
1406 pub history_start: Option<u64>,
1407 /// History resolution (seconds)
1408 #[serde(skip_serializing_if = "Option::is_none")]
1409 pub history_delta: Option<u64>,
1410 /// Estimation of the UNIX epoch when the storage will be full.
1411 /// It's calculated via a simple Linear Regression (Least Squares) over the RRD data of the
1412 /// last Month. Missing if not enough data points are available yet. An estimate in the past
1413 /// means that usage is declining or not changing.
1414 #[serde(skip_serializing_if = "Option::is_none")]
1415 pub estimated_full_date: Option<i64>,
1416 /// An error description, for example, when the datastore could not be looked up
1417 #[serde(skip_serializing_if = "Option::is_none")]
1418 pub error: Option<String>,
1419 /// Status of last GC
1420 #[serde(skip_serializing_if = "Option::is_none")]
1421 pub gc_status: Option<GarbageCollectionStatus>,
1422 }
1423
1424 impl DataStoreStatusListItem {
1425 pub fn empty(store: &str, err: Option<String>) -> Self {
1426 DataStoreStatusListItem {
1427 store: store.to_owned(),
1428 total: None,
1429 used: None,
1430 avail: None,
1431 history: None,
1432 history_start: None,
1433 history_delta: None,
1434 estimated_full_date: None,
1435 error: err,
1436 gc_status: None,
1437 }
1438 }
1439 }
1440
1441 pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType {
1442 optional: false,
1443 schema: &ArraySchema::new(
1444 "Returns the list of snapshots.",
1445 &SnapshotListItem::API_SCHEMA,
1446 )
1447 .schema(),
1448 };
1449
1450 pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType {
1451 optional: false,
1452 schema: &ArraySchema::new(
1453 "Returns the list of archive files inside a backup snapshots.",
1454 &BackupContent::API_SCHEMA,
1455 )
1456 .schema(),
1457 };
1458
1459 pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType {
1460 optional: false,
1461 schema: &ArraySchema::new(
1462 "Returns the list of backup groups.",
1463 &GroupListItem::API_SCHEMA,
1464 )
1465 .schema(),
1466 };
1467
1468 pub const ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE: ReturnType = ReturnType {
1469 optional: false,
1470 schema: &ArraySchema::new(
1471 "Returns the list of backup namespaces.",
1472 &NamespaceListItem::API_SCHEMA,
1473 )
1474 .schema(),
1475 };
1476
1477 pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType {
1478 optional: false,
1479 schema: &ArraySchema::new(
1480 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
1481 &PruneListItem::API_SCHEMA,
1482 )
1483 .schema(),
1484 };
1485
1486 #[api(
1487 properties: {
1488 store: {
1489 schema: DATASTORE_SCHEMA,
1490 },
1491 "max-depth": {
1492 schema: NS_MAX_DEPTH_SCHEMA,
1493 optional: true,
1494 },
1495 },
1496 )]
1497 #[derive(Serialize, Deserialize)]
1498 #[serde(rename_all = "kebab-case")]
1499 /// A namespace mapping
1500 pub struct TapeRestoreNamespace {
1501 /// The source datastore
1502 pub store: String,
1503 /// The source namespace. Root namespace if omitted.
1504 pub source: Option<BackupNamespace>,
1505 /// The target namespace,
1506 #[serde(skip_serializing_if = "Option::is_none")]
1507 pub target: Option<BackupNamespace>,
1508 /// The (optional) recursion depth
1509 #[serde(skip_serializing_if = "Option::is_none")]
1510 pub max_depth: Option<usize>,
1511 }
1512
1513 pub const TAPE_RESTORE_NAMESPACE_SCHEMA: Schema = StringSchema::new("A namespace mapping")
1514 .format(&ApiStringFormat::PropertyString(
1515 &TapeRestoreNamespace::API_SCHEMA,
1516 ))
1517 .schema();
1518
1519 /// Parse snapshots in the form 'ns/foo/ns/bar/ct/100/1970-01-01T00:00:00Z'
1520 /// into a [`BackupNamespace`] and [`BackupDir`]
1521 pub fn parse_ns_and_snapshot(input: &str) -> Result<(BackupNamespace, BackupDir), Error> {
1522 match input.rmatch_indices('/').nth(2) {
1523 Some((idx, _)) => {
1524 let ns = BackupNamespace::from_path(&input[..idx])?;
1525 let dir: BackupDir = input[(idx + 1)..].parse()?;
1526 Ok((ns, dir))
1527 }
1528 None => Ok((BackupNamespace::root(), input.parse()?)),
1529 }
1530 }
1531
1532 /// Prints a [`BackupNamespace`] and [`BackupDir`] in the form of
1533 /// 'ns/foo/bar/ct/100/1970-01-01T00:00:00Z'
1534 pub fn print_ns_and_snapshot(ns: &BackupNamespace, dir: &BackupDir) -> String {
1535 if ns.is_root() {
1536 dir.to_string()
1537 } else {
1538 format!("{}/{}", ns.display_as_path(), dir)
1539 }
1540 }
1541
1542 /// Prints a Datastore name and [`BackupNamespace`] for logs/errors.
1543 pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String {
1544 if ns.is_root() {
1545 format!("datastore '{}', root namespace", store)
1546 } else {
1547 format!("datastore '{}', namespace '{}'", store, ns)
1548 }
1549 }