]> git.proxmox.com Git - proxmox-backup.git/blame - pbs-api-types/src/datastore.rs
split the namespace out of BackupGroup/Dir api types
[proxmox-backup.git] / pbs-api-types / src / datastore.rs
CommitLineData
988d575d 1use std::fmt;
8c74349b 2use std::path::PathBuf;
988d575d
WB
3
4use anyhow::{bail, format_err, Error};
8cc3760e
DM
5use serde::{Deserialize, Serialize};
6
6ef1b649
WB
7use proxmox_schema::{
8 api, const_regex, ApiStringFormat, ApiType, ArraySchema, EnumEntry, IntegerSchema, ReturnType,
c12a075b 9 Schema, StringSchema, Updater, UpdaterType,
8cc3760e
DM
10};
11
8cc3760e 12use crate::{
2a05c75f
HL
13 Authid, CryptMode, Fingerprint, MaintenanceMode, Userid, DATASTORE_NOTIFY_STRING_SCHEMA,
14 GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX,
15 SINGLE_LINE_COMMENT_SCHEMA, UPID,
8cc3760e
DM
16};
17
b22d785c 18const_regex! {
8c74349b
WB
19 pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
20
8cc3760e
DM
21 pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
22
23 pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
24
25 pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
26
8c74349b 27 pub GROUP_PATH_REGEX = concat!(
133d718f 28 r"^(", BACKUP_TYPE_RE!(), ")/",
8c74349b
WB
29 r"(", BACKUP_ID_RE!(), r")$",
30 );
8cc3760e
DM
31
32 pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
33
34 pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
8c74349b 35 pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$");
b68bd900 36
8cc3760e
DM
37 pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
38}
39
40pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
41
e7d4be9d
DM
42pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name")
43 .min_length(1)
44 .max_length(4096)
45 .schema();
46
8cc3760e
DM
47pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema = StringSchema::new("Backup archive name.")
48 .format(&PROXMOX_SAFE_ID_FORMAT)
49 .schema();
50
51pub const BACKUP_ID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
2b00c5ab 52pub const BACKUP_GROUP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GROUP_PATH_REGEX);
b68bd900
TL
53pub const BACKUP_NAMESPACE_FORMAT: ApiStringFormat =
54 ApiStringFormat::Pattern(&BACKUP_NAMESPACE_REGEX);
8cc3760e
DM
55
56pub const BACKUP_ID_SCHEMA: Schema = StringSchema::new("Backup ID.")
57 .format(&BACKUP_ID_FORMAT)
58 .schema();
59
60pub const BACKUP_TYPE_SCHEMA: Schema = StringSchema::new("Backup type.")
61 .format(&ApiStringFormat::Enum(&[
62 EnumEntry::new("vm", "Virtual Machine Backup"),
63 EnumEntry::new("ct", "Container Backup"),
64 EnumEntry::new("host", "Host Backup"),
65 ]))
66 .schema();
67
68pub const BACKUP_TIME_SCHEMA: Schema = IntegerSchema::new("Backup time (Unix epoch.)")
323ad7dd 69 .minimum(1)
8cc3760e
DM
70 .schema();
71
2b00c5ab
FG
72pub const BACKUP_GROUP_SCHEMA: Schema = StringSchema::new("Backup Group")
73 .format(&BACKUP_GROUP_FORMAT)
74 .schema();
75
42103c46
TL
76/// The maximal, inclusive depth for namespaces from the root ns downwards
77///
78/// The datastore root name space is at depth zero (0), so we have in total eight (8) levels
79pub const MAX_NAMESPACE_DEPTH: usize = 7;
b68bd900
TL
80pub const MAX_BACKUP_NAMESPACE_LENGTH: usize = 32 * 8; // 256
81pub const BACKUP_NAMESPACE_SCHEMA: Schema = StringSchema::new("Namespace.")
82 .format(&BACKUP_NAMESPACE_FORMAT)
83 .max_length(MAX_BACKUP_NAMESPACE_LENGTH) // 256
84 .schema();
85
08aa5fe7
FG
86pub const NS_MAX_DEPTH_SCHEMA: Schema =
87 IntegerSchema::new("How many levels of namespaces should be operated on (0 == no recursion)")
88 .minimum(0)
89 .maximum(MAX_NAMESPACE_DEPTH as isize)
90 .default(0)
91 .schema();
92
8cc3760e
DM
93pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
94 .format(&PROXMOX_SAFE_ID_FORMAT)
95 .min_length(3)
96 .max_length(32)
97 .schema();
98
99pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
100 .format(&CHUNK_DIGEST_FORMAT)
101 .schema();
102
103pub const DATASTORE_MAP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
104
105pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
106 .format(&DATASTORE_MAP_FORMAT)
107 .min_length(3)
108 .max_length(65)
109 .type_text("(<source>=)?<target>")
110 .schema();
111
b22d785c
TL
112pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema =
113 ArraySchema::new("Datastore mapping list.", &DATASTORE_MAP_SCHEMA).schema();
8cc3760e
DM
114
115pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
116 "A list of Datastore mappings (or single datastore), comma separated. \
117 For example 'a=b,e' maps the source datastore 'a' to target 'b and \
118 all other sources to the default 'e'. If no default is given, only the \
b22d785c
TL
119 specified sources are mapped.",
120)
121.format(&ApiStringFormat::PropertyString(
122 &DATASTORE_MAP_ARRAY_SCHEMA,
123))
124.schema();
8cc3760e 125
e7d4be9d
DM
126pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new("Number of daily backups to keep.")
127 .minimum(1)
128 .schema();
129
130pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema =
131 IntegerSchema::new("Number of hourly backups to keep.")
132 .minimum(1)
133 .schema();
134
135pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new("Number of backups to keep.")
136 .minimum(1)
137 .schema();
138
139pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema =
140 IntegerSchema::new("Number of monthly backups to keep.")
141 .minimum(1)
142 .schema();
143
144pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema =
145 IntegerSchema::new("Number of weekly backups to keep.")
146 .minimum(1)
147 .schema();
148
149pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema =
150 IntegerSchema::new("Number of yearly backups to keep.")
151 .minimum(1)
152 .schema();
153
89725197
DM
154#[api(
155 properties: {
156 "keep-last": {
157 schema: PRUNE_SCHEMA_KEEP_LAST,
158 optional: true,
159 },
160 "keep-hourly": {
161 schema: PRUNE_SCHEMA_KEEP_HOURLY,
162 optional: true,
163 },
164 "keep-daily": {
165 schema: PRUNE_SCHEMA_KEEP_DAILY,
166 optional: true,
167 },
168 "keep-weekly": {
169 schema: PRUNE_SCHEMA_KEEP_WEEKLY,
170 optional: true,
171 },
172 "keep-monthly": {
173 schema: PRUNE_SCHEMA_KEEP_MONTHLY,
174 optional: true,
175 },
176 "keep-yearly": {
177 schema: PRUNE_SCHEMA_KEEP_YEARLY,
178 optional: true,
179 },
180 }
181)]
182#[derive(Serialize, Deserialize, Default)]
183#[serde(rename_all = "kebab-case")]
184/// Common pruning options
185pub struct PruneOptions {
b22d785c 186 #[serde(skip_serializing_if = "Option::is_none")]
89725197 187 pub keep_last: Option<u64>,
b22d785c 188 #[serde(skip_serializing_if = "Option::is_none")]
89725197 189 pub keep_hourly: Option<u64>,
b22d785c 190 #[serde(skip_serializing_if = "Option::is_none")]
89725197 191 pub keep_daily: Option<u64>,
b22d785c 192 #[serde(skip_serializing_if = "Option::is_none")]
89725197 193 pub keep_weekly: Option<u64>,
b22d785c 194 #[serde(skip_serializing_if = "Option::is_none")]
89725197 195 pub keep_monthly: Option<u64>,
b22d785c 196 #[serde(skip_serializing_if = "Option::is_none")]
89725197
DM
197 pub keep_yearly: Option<u64>,
198}
199
fef61684
DC
200#[api]
201#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
202#[serde(rename_all = "lowercase")]
203/// The order to sort chunks by
204pub enum ChunkOrder {
205 /// Iterate chunks in the index order
206 None,
207 /// Iterate chunks in inode order
208 Inode,
209}
210
211#[api(
212 properties: {
213 "chunk-order": {
214 type: ChunkOrder,
215 optional: true,
216 },
217 },
218)]
219#[derive(Serialize, Deserialize, Default)]
220#[serde(rename_all = "kebab-case")]
221/// Datastore tuning options
222pub struct DatastoreTuning {
223 /// Iterate chunks in this order
224 pub chunk_order: Option<ChunkOrder>,
225}
226
b22d785c
TL
227pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore tuning options")
228 .format(&ApiStringFormat::PropertyString(
229 &DatastoreTuning::API_SCHEMA,
230 ))
fef61684
DC
231 .schema();
232
e7d4be9d
DM
233#[api(
234 properties: {
235 name: {
236 schema: DATASTORE_SCHEMA,
237 },
238 path: {
239 schema: DIR_NAME_SCHEMA,
240 },
241 "notify-user": {
242 optional: true,
243 type: Userid,
244 },
245 "notify": {
246 optional: true,
247 schema: DATASTORE_NOTIFY_STRING_SCHEMA,
248 },
249 comment: {
250 optional: true,
251 schema: SINGLE_LINE_COMMENT_SCHEMA,
252 },
253 "gc-schedule": {
254 optional: true,
255 schema: GC_SCHEDULE_SCHEMA,
256 },
257 "prune-schedule": {
258 optional: true,
259 schema: PRUNE_SCHEDULE_SCHEMA,
260 },
261 "keep-last": {
262 optional: true,
263 schema: PRUNE_SCHEMA_KEEP_LAST,
264 },
265 "keep-hourly": {
266 optional: true,
267 schema: PRUNE_SCHEMA_KEEP_HOURLY,
268 },
269 "keep-daily": {
270 optional: true,
271 schema: PRUNE_SCHEMA_KEEP_DAILY,
272 },
273 "keep-weekly": {
274 optional: true,
275 schema: PRUNE_SCHEMA_KEEP_WEEKLY,
276 },
277 "keep-monthly": {
278 optional: true,
279 schema: PRUNE_SCHEMA_KEEP_MONTHLY,
280 },
281 "keep-yearly": {
282 optional: true,
283 schema: PRUNE_SCHEMA_KEEP_YEARLY,
284 },
285 "verify-new": {
286 description: "If enabled, all new backups will be verified right after completion.",
287 optional: true,
288 type: bool,
289 },
fef61684
DC
290 tuning: {
291 optional: true,
292 schema: DATASTORE_TUNING_STRING_SCHEMA,
293 },
2a05c75f
HL
294 "maintenance-mode": {
295 optional: true,
296 format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA),
297 type: String,
298 },
e7d4be9d
DM
299 }
300)]
b22d785c
TL
301#[derive(Serialize, Deserialize, Updater)]
302#[serde(rename_all = "kebab-case")]
e7d4be9d
DM
303/// Datastore configuration properties.
304pub struct DataStoreConfig {
305 #[updater(skip)]
306 pub name: String,
307 #[updater(skip)]
308 pub path: String,
b22d785c 309 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 310 pub comment: Option<String>,
b22d785c 311 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 312 pub gc_schedule: Option<String>,
b22d785c 313 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 314 pub prune_schedule: Option<String>,
b22d785c 315 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 316 pub keep_last: Option<u64>,
b22d785c 317 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 318 pub keep_hourly: Option<u64>,
b22d785c 319 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 320 pub keep_daily: Option<u64>,
b22d785c 321 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 322 pub keep_weekly: Option<u64>,
b22d785c 323 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 324 pub keep_monthly: Option<u64>,
b22d785c 325 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d
DM
326 pub keep_yearly: Option<u64>,
327 /// If enabled, all backups will be verified right after completion.
b22d785c 328 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d
DM
329 pub verify_new: Option<bool>,
330 /// Send job email notification to this user
b22d785c 331 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d
DM
332 pub notify_user: Option<Userid>,
333 /// Send notification only for job errors
b22d785c 334 #[serde(skip_serializing_if = "Option::is_none")]
e7d4be9d 335 pub notify: Option<String>,
fef61684 336 /// Datastore tuning options
b22d785c 337 #[serde(skip_serializing_if = "Option::is_none")]
fef61684 338 pub tuning: Option<String>,
2a05c75f
HL
339 /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
340 #[serde(skip_serializing_if = "Option::is_none")]
341 pub maintenance_mode: Option<String>,
342}
343
344impl DataStoreConfig {
bb628c29
WB
345 pub fn new(name: String, path: String) -> Self {
346 Self {
347 name,
348 path,
349 comment: None,
350 gc_schedule: None,
351 prune_schedule: None,
352 keep_last: None,
353 keep_hourly: None,
354 keep_daily: None,
355 keep_weekly: None,
356 keep_monthly: None,
357 keep_yearly: None,
358 verify_new: None,
359 notify_user: None,
360 notify: None,
361 tuning: None,
362 maintenance_mode: None,
363 }
364 }
365
2a05c75f
HL
366 pub fn get_maintenance_mode(&self) -> Option<MaintenanceMode> {
367 self.maintenance_mode
368 .as_ref()
369 .and_then(|str| MaintenanceMode::API_SCHEMA.parse_property_string(str).ok())
370 .and_then(|value| MaintenanceMode::deserialize(value).ok())
371 }
e7d4be9d
DM
372}
373
8cc3760e
DM
374#[api(
375 properties: {
376 store: {
377 schema: DATASTORE_SCHEMA,
378 },
379 comment: {
380 optional: true,
381 schema: SINGLE_LINE_COMMENT_SCHEMA,
382 },
e022d13c
HL
383 maintenance: {
384 optional: true,
385 format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA),
386 type: String,
387 }
8cc3760e
DM
388 },
389)]
390#[derive(Serialize, Deserialize)]
391#[serde(rename_all = "kebab-case")]
392/// Basic information about a datastore.
393pub struct DataStoreListItem {
394 pub store: String,
395 pub comment: Option<String>,
e022d13c
HL
396 /// If the datastore is in maintenance mode, information about it
397 #[serde(skip_serializing_if = "Option::is_none")]
398 pub maintenance: Option<String>,
8cc3760e
DM
399}
400
401#[api(
402 properties: {
403 "filename": {
404 schema: BACKUP_ARCHIVE_NAME_SCHEMA,
405 },
406 "crypt-mode": {
407 type: CryptMode,
408 optional: true,
409 },
410 },
411)]
412#[derive(Serialize, Deserialize)]
413#[serde(rename_all = "kebab-case")]
414/// Basic information about archive files inside a backup snapshot.
415pub struct BackupContent {
416 pub filename: String,
417 /// Info if file is encrypted, signed, or neither.
418 #[serde(skip_serializing_if = "Option::is_none")]
419 pub crypt_mode: Option<CryptMode>,
420 /// Archive size (from backup manifest).
421 #[serde(skip_serializing_if = "Option::is_none")]
422 pub size: Option<u64>,
423}
424
425#[api()]
426#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
427#[serde(rename_all = "lowercase")]
428/// Result of a verify operation.
429pub enum VerifyState {
430 /// Verification was successful
431 Ok,
432 /// Verification reported one or more errors
433 Failed,
434}
435
436#[api(
437 properties: {
438 upid: {
439 type: UPID,
440 },
441 state: {
442 type: VerifyState,
443 },
444 },
445)]
446#[derive(Serialize, Deserialize)]
447/// Task properties.
448pub struct SnapshotVerifyState {
449 /// UPID of the verify task
450 pub upid: UPID,
451 /// State of the verification. Enum.
452 pub state: VerifyState,
453}
454
b68bd900
TL
455/// A namespace provides a logical separation between backup groups from different domains
456/// (cluster, sites, ...) where uniqueness cannot be guaranteed anymore. It allows users to share a
457/// datastore (i.e., one deduplication domain (chunk store)) with multiple (trusted) sites and
458/// allows to form a hierarchy, for easier management and avoiding clashes between backup_ids.
459///
460/// NOTE: Namespaces are a logical boundary only, they do not provide a full secure separation as
461/// the chunk store is still shared. So, users whom do not trust each other must not share a
462/// datastore.
463///
464/// Implementation note: The path a namespace resolves to is always prefixed with `/ns` to avoid
465/// clashes with backup group IDs and future backup_types and to have a clean separation between
466/// the namespace directories and the ones from a backup snapshot.
c12a075b 467#[derive(Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, UpdaterType)]
b68bd900
TL
468pub struct BackupNamespace {
469 /// The namespace subdirectories without the `ns/` intermediate directories.
470 inner: Vec<String>,
471
472 /// Cache the total length for efficiency.
473 len: usize,
474}
475
476impl BackupNamespace {
477 /// Returns a root namespace reference.
478 pub const fn root() -> Self {
479 Self {
480 inner: Vec::new(),
481 len: 0,
482 }
483 }
484
485 /// True if this represents the root namespace.
486 pub fn is_root(&self) -> bool {
487 self.inner.is_empty()
488 }
489
490 /// Try to parse a string into a namespace.
491 pub fn new(name: &str) -> Result<Self, Error> {
492 let mut this = Self::root();
6dd8a2ce
FG
493
494 if name.is_empty() {
495 return Ok(this);
496 }
497
b68bd900
TL
498 for name in name.split('/') {
499 this.push(name.to_string())?;
500 }
501 Ok(this)
502 }
503
504 /*
505 /// Try to parse a file system path (where each sub-namespace is separated by an `ns`
506 /// subdirectory) into a valid namespace.
507 pub fn from_path<T: AsRef<Path>>(path: T) -> Result<Self, Error> {
508 use std::path::Component;
509
510 let mut this = Self::root();
511 let mut next_is_ns = true;
512 for component in path.as_ref().components() {
513 match component {
514 Component::Normal(component) if next_is_ns => {
515 if component.to_str() != Some("ns") {
516 bail!("invalid component in path: {:?}", component);
517 }
518 next_is_ns = false;
519 }
520 Component::Normal(component) => {
521 this.push(
522 component
523 .to_str()
524 .ok_or_else(|| {
525 format_err!("invalid component in path: {:?}", component)
526 })?
527 .to_string(),
528 )?;
529 next_is_ns = true;
530 }
531 Component::RootDir => {
532 next_is_ns = true;
533 }
534 _ => bail!("invalid component in path: {:?}", component.as_os_str()),
535 }
536 }
537
538 Ok(this)
539 }
540 */
541
542 /// Try to parse a file path string (where each sub-namespace is separated by an `ns`
543 /// subdirectory) into a valid namespace.
544 pub fn from_path(mut path: &str) -> Result<Self, Error> {
545 let mut this = Self::root();
546 loop {
547 match path.strip_prefix("ns/") {
548 Some(next) => match next.find('/') {
549 Some(pos) => {
550 this.push(next[..pos].to_string())?;
551 path = &next[(pos + 1)..];
552 }
553 None => {
554 this.push(next.to_string())?;
555 break;
556 }
557 },
558 None if !path.is_empty() => {
559 bail!("invalid component in namespace path at {:?}", path);
560 }
561 None => break,
562 }
563 }
564 Ok(this)
565 }
566
02ec2ae9
TL
567 /// Create a new Namespace attached to parent
568 ///
569 /// `name` must be a single level namespace ID, that is, no '/' is allowed.
570 /// This rule also avoids confusion about the name being a NS or NS-path
571 pub fn from_parent_ns(parent: &Self, name: String) -> Result<Self, Error> {
572 let mut child = parent.to_owned();
573 child.push(name)?;
574 Ok(child)
575 }
576
352e13db
TL
577 /// Pop one level off the namespace hierachy
578 pub fn pop(&mut self) {
579 if let Some(dropped) = self.inner.pop() {
580 self.len = self.len.saturating_sub(dropped.len() + 1);
581 }
582 }
583
584 /// Get the namespace parent as owned BackupNamespace
585 pub fn parent(&self) -> Self {
586 if self.is_root() {
587 return Self::root();
588 }
589
590 let mut parent = self.clone();
591 parent.pop();
592
593 parent
594 }
595
b68bd900
TL
596 /// Create a new namespace directly from a vec.
597 ///
598 /// # Safety
599 ///
600 /// Invalid contents may lead to inaccessible backups.
601 pub unsafe fn from_vec_unchecked(components: Vec<String>) -> Self {
602 let mut this = Self {
603 inner: components,
604 len: 0,
605 };
606 this.recalculate_len();
607 this
608 }
609
610 /// Recalculate the length.
611 fn recalculate_len(&mut self) {
612 self.len = self.inner.len().max(1) - 1; // a slash between each component
613 for part in &self.inner {
614 self.len += part.len();
615 }
616 }
617
618 /// The hierarchical depth of the namespace, 0 means top-level.
619 pub fn depth(&self) -> usize {
620 self.inner.len()
621 }
622
623 /// The logical name and ID of the namespace.
624 pub fn name(&self) -> String {
625 self.to_string()
626 }
627
628 /// The actual relative backing path of the namespace on the datastore.
629 pub fn path(&self) -> PathBuf {
630 self.display_as_path().to_string().into()
631 }
632
633 /// Get the current namespace length.
634 ///
635 /// This includes separating slashes, but does not include the `ns/` intermediate directories.
636 /// This is not the *path* length, but rather the length that would be produced via
637 /// `.to_string()`.
638 #[inline]
639 pub fn name_len(&self) -> usize {
640 self.len
641 }
642
643 /// Get the current namespace path length.
644 ///
645 /// This includes the `ns/` subdirectory strings.
646 pub fn path_len(&self) -> usize {
647 self.name_len() + 3 * self.inner.len()
648 }
649
650 /// Enter a sub-namespace. Fails if nesting would become too deep or the name too long.
651 pub fn push(&mut self, subdir: String) -> Result<(), Error> {
652 if subdir.contains('/') {
653 bail!("namespace component contained a slash");
654 }
655
656 self.push_do(subdir)
657 }
658
659 /// Assumes `subdir` already does not contain any slashes.
660 /// Performs remaining checks and updates the length.
661 fn push_do(&mut self, subdir: String) -> Result<(), Error> {
42103c46 662 if self.depth() > MAX_NAMESPACE_DEPTH {
b68bd900
TL
663 bail!(
664 "namespace to deep, {} > max {}",
665 self.inner.len(),
666 MAX_NAMESPACE_DEPTH
667 );
668 }
669
670 if self.len + subdir.len() + 1 > MAX_BACKUP_NAMESPACE_LENGTH {
671 bail!("namespace length exceeded");
672 }
673
674 if !crate::PROXMOX_SAFE_ID_REGEX.is_match(&subdir) {
c2425132 675 bail!("not a valid namespace component: {subdir}");
b68bd900
TL
676 }
677
678 if !self.inner.is_empty() {
679 self.len += 1; // separating slash
680 }
681 self.len += subdir.len();
682 self.inner.push(subdir);
683 Ok(())
684 }
685
686 /// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every
687 /// component.
8c74349b 688 pub fn display_as_path(&self) -> BackupNamespacePath {
b68bd900
TL
689 BackupNamespacePath(self)
690 }
691
692 /// Iterate over the subdirectories.
693 pub fn components(&self) -> impl Iterator<Item = &str> + '_ {
694 self.inner.iter().map(String::as_str)
695 }
e687d1b8
FG
696
697 /// Map NS by replacing `source_prefix` with `target_prefix`
698 pub fn map_prefix(
699 &self,
700 source_prefix: &BackupNamespace,
701 target_prefix: &BackupNamespace,
702 ) -> Result<Self, Error> {
703 let mut mapped = target_prefix.clone();
704 let mut source_iter = source_prefix.components();
705 let mut self_iter = self.components();
706
707 while let Some(comp) = self_iter.next() {
708 if let Some(source_comp) = source_iter.next() {
709 if source_comp != comp {
710 bail!(
711 "Failed to map namespace - {} is not a valid prefix of {}",
712 source_prefix,
713 self
714 );
715 }
716 continue;
717 }
718 mapped.push(comp.to_owned())?;
719 }
720
721 Ok(mapped)
722 }
b68bd900
TL
723}
724
725impl fmt::Display for BackupNamespace {
726 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
727 use std::fmt::Write;
728
729 let mut parts = self.inner.iter();
730 if let Some(first) = parts.next() {
731 f.write_str(first)?;
732 }
733 for part in parts {
734 f.write_char('/')?;
735 f.write_str(part)?;
736 }
737 Ok(())
738 }
739}
740
741serde_plain::derive_deserialize_from_fromstr!(BackupNamespace, "valid backup namespace");
742
743impl std::str::FromStr for BackupNamespace {
744 type Err = Error;
745
746 fn from_str(name: &str) -> Result<Self, Self::Err> {
747 Self::new(name)
748 }
749}
750
751serde_plain::derive_serialize_from_display!(BackupNamespace);
752
753impl ApiType for BackupNamespace {
754 const API_SCHEMA: Schema = BACKUP_NAMESPACE_SCHEMA;
755}
756
757/// Helper to format a [`BackupNamespace`] as a path component of a [`BackupGroup`].
758///
759/// This implements [`Display`] such that it includes the `ns/` subdirectory prefix in front of
760/// every component.
761pub struct BackupNamespacePath<'a>(&'a BackupNamespace);
762
763impl fmt::Display for BackupNamespacePath<'_> {
764 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
765 let mut sep = "ns/";
766 for part in &self.0.inner {
767 f.write_str(sep)?;
768 sep = "/ns/";
769 f.write_str(part)?;
770 }
771 Ok(())
772 }
773}
774
988d575d
WB
775#[api]
776/// Backup types.
777#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
778#[serde(rename_all = "lowercase")]
779pub enum BackupType {
780 /// Virtual machines.
781 Vm,
782
783 /// Containers.
784 Ct,
785
786 /// "Host" backups.
787 Host,
77337b3b 788 // NOTE: if you add new types, don't forget to adapt the iter below!
988d575d
WB
789}
790
791impl BackupType {
792 pub const fn as_str(&self) -> &'static str {
793 match self {
794 BackupType::Vm => "vm",
795 BackupType::Ct => "ct",
796 BackupType::Host => "host",
797 }
798 }
799
800 /// We used to have alphabetical ordering here when this was a string.
801 const fn order(self) -> u8 {
802 match self {
803 BackupType::Ct => 0,
804 BackupType::Host => 1,
805 BackupType::Vm => 2,
806 }
807 }
77337b3b
TL
808
809 pub const fn iter() -> &'static [BackupType] {
810 &[Self::Vm, Self::Ct, Self::Host]
811 }
988d575d
WB
812}
813
814impl fmt::Display for BackupType {
815 #[inline]
816 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
817 fmt::Display::fmt(self.as_str(), f)
818 }
819}
820
821impl std::str::FromStr for BackupType {
822 type Err = Error;
823
824 /// Parse a backup type.
825 fn from_str(ty: &str) -> Result<Self, Error> {
826 Ok(match ty {
827 "ct" => BackupType::Ct,
828 "host" => BackupType::Host,
829 "vm" => BackupType::Vm,
830 _ => bail!("invalid backup type {ty:?}"),
831 })
832 }
833}
834
835impl std::cmp::Ord for BackupType {
836 #[inline]
837 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
838 self.order().cmp(&other.order())
839 }
840}
841
842impl std::cmp::PartialOrd for BackupType {
843 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
844 Some(self.cmp(other))
845 }
846}
847
8cc3760e
DM
848#[api(
849 properties: {
988d575d
WB
850 "backup-type": { type: BackupType },
851 "backup-id": { schema: BACKUP_ID_SCHEMA },
852 },
853)]
b444eb68 854#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
988d575d
WB
855#[serde(rename_all = "kebab-case")]
856/// A backup group (without a data store).
857pub struct BackupGroup {
858 /// Backup type.
859 #[serde(rename = "backup-type")]
860 pub ty: BackupType,
861
862 /// Backup id.
863 #[serde(rename = "backup-id")]
864 pub id: String,
865}
866
867impl BackupGroup {
133d718f
WB
868 pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
869 Self { ty, id: id.into() }
988d575d 870 }
b444eb68
WB
871
872 pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
873 use crate::GroupFilter;
874
875 match filter {
876 GroupFilter::Group(backup_group) => {
877 match backup_group.parse::<BackupGroup>() {
878 Ok(group) => *self == group,
879 Err(_) => false, // shouldn't happen if value is schema-checked
880 }
881 }
38aa71fc 882 GroupFilter::BackupType(ty) => self.ty == *ty,
b444eb68
WB
883 GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
884 }
885 }
886}
887
888impl AsRef<BackupGroup> for BackupGroup {
889 #[inline]
890 fn as_ref(&self) -> &Self {
891 self
892 }
988d575d
WB
893}
894
133d718f 895impl From<(BackupType, String)> for BackupGroup {
8c74349b 896 #[inline]
133d718f 897 fn from(data: (BackupType, String)) -> Self {
988d575d 898 Self {
133d718f
WB
899 ty: data.0,
900 id: data.1,
988d575d
WB
901 }
902 }
903}
904
905impl std::cmp::Ord for BackupGroup {
906 fn cmp(&self, other: &Self) -> std::cmp::Ordering {
907 let type_order = self.ty.cmp(&other.ty);
908 if type_order != std::cmp::Ordering::Equal {
909 return type_order;
910 }
8c74349b 911
988d575d
WB
912 // try to compare IDs numerically
913 let id_self = self.id.parse::<u64>();
914 let id_other = other.id.parse::<u64>();
915 match (id_self, id_other) {
916 (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
917 (Ok(_), Err(_)) => std::cmp::Ordering::Less,
918 (Err(_), Ok(_)) => std::cmp::Ordering::Greater,
919 _ => self.id.cmp(&other.id),
920 }
921 }
922}
923
924impl std::cmp::PartialOrd for BackupGroup {
925 fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
926 Some(self.cmp(other))
927 }
928}
929
930impl fmt::Display for BackupGroup {
931 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
133d718f 932 write!(f, "{}/{}", self.ty, self.id)
988d575d
WB
933 }
934}
935
936impl std::str::FromStr for BackupGroup {
937 type Err = Error;
938
939 /// Parse a backup group.
940 ///
941 /// This parses strings like `vm/100".
942 fn from_str(path: &str) -> Result<Self, Error> {
943 let cap = GROUP_PATH_REGEX
944 .captures(path)
945 .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
946
947 Ok(Self {
133d718f
WB
948 ty: cap.get(1).unwrap().as_str().parse()?,
949 id: cap.get(2).unwrap().as_str().to_owned(),
988d575d
WB
950 })
951 }
952}
953
954#[api(
955 properties: {
956 "group": { type: BackupGroup },
957 "backup-time": { schema: BACKUP_TIME_SCHEMA },
958 },
959)]
960/// Uniquely identify a Backup (relative to data store)
961///
962/// We also call this a backup snaphost.
963#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
964#[serde(rename_all = "kebab-case")]
965pub struct BackupDir {
966 /// Backup group.
967 #[serde(flatten)]
968 pub group: BackupGroup,
969
970 /// Backup timestamp unix epoch.
971 #[serde(rename = "backup-time")]
972 pub time: i64,
973}
974
b444eb68
WB
975impl AsRef<BackupGroup> for BackupDir {
976 #[inline]
977 fn as_ref(&self) -> &BackupGroup {
978 &self.group
979 }
980}
981
982impl AsRef<BackupDir> for BackupDir {
983 #[inline]
984 fn as_ref(&self) -> &Self {
985 self
986 }
987}
988
988d575d
WB
989impl From<(BackupGroup, i64)> for BackupDir {
990 fn from(data: (BackupGroup, i64)) -> Self {
991 Self {
992 group: data.0,
993 time: data.1,
994 }
995 }
996}
997
133d718f
WB
998impl From<(BackupType, String, i64)> for BackupDir {
999 fn from(data: (BackupType, String, i64)) -> Self {
988d575d 1000 Self {
133d718f
WB
1001 group: (data.0, data.1).into(),
1002 time: data.2,
988d575d
WB
1003 }
1004 }
1005}
1006
1007impl BackupDir {
133d718f 1008 pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
988d575d
WB
1009 where
1010 T: Into<String>,
1011 {
1012 let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
133d718f 1013 let group = BackupGroup::new(ty, id.into());
988d575d
WB
1014 Ok(Self { group, time })
1015 }
1016
8c74349b 1017 #[inline]
988d575d
WB
1018 pub fn ty(&self) -> BackupType {
1019 self.group.ty
1020 }
1021
8c74349b 1022 #[inline]
988d575d
WB
1023 pub fn id(&self) -> &str {
1024 &self.group.id
1025 }
1026}
1027
1028impl std::str::FromStr for BackupDir {
1029 type Err = Error;
1030
1031 /// Parse a snapshot path.
1032 ///
1033 /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
1034 fn from_str(path: &str) -> Result<Self, Self::Err> {
1035 let cap = SNAPSHOT_PATH_REGEX
1036 .captures(path)
1037 .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
1038
1039 BackupDir::with_rfc3339(
133d718f
WB
1040 cap.get(1).unwrap().as_str().parse()?,
1041 cap.get(2).unwrap().as_str(),
988d575d
WB
1042 cap.get(3).unwrap().as_str(),
1043 )
1044 }
1045}
1046
8c74349b
WB
1047impl fmt::Display for BackupDir {
1048 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
988d575d
WB
1049 // FIXME: log error?
1050 let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
1051 write!(f, "{}/{}", self.group, time)
1052 }
1053}
1054
8c74349b
WB
1055/// Used when both a backup group or a directory can be valid.
1056pub enum BackupPart {
1057 Group(BackupGroup),
1058 Dir(BackupDir),
1059}
1060
1061impl std::str::FromStr for BackupPart {
1062 type Err = Error;
1063
1064 /// Parse a path which can be either a backup group or a snapshot dir.
1065 fn from_str(path: &str) -> Result<Self, Error> {
1066 let cap = GROUP_OR_SNAPSHOT_PATH_REGEX
1067 .captures(path)
1068 .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
1069
133d718f
WB
1070 let ty = cap.get(1).unwrap().as_str().parse()?;
1071 let id = cap.get(2).unwrap().as_str().to_string();
8c74349b 1072
133d718f
WB
1073 Ok(match cap.get(3) {
1074 Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ty, id, time.as_str())?),
1075 None => BackupPart::Group((ty, id).into()),
8c74349b
WB
1076 })
1077 }
1078}
1079
988d575d
WB
1080#[api(
1081 properties: {
1082 "backup": { type: BackupDir },
8cc3760e
DM
1083 comment: {
1084 schema: SINGLE_LINE_COMMENT_SCHEMA,
1085 optional: true,
1086 },
1087 verification: {
1088 type: SnapshotVerifyState,
1089 optional: true,
1090 },
1091 fingerprint: {
1092 type: String,
1093 optional: true,
1094 },
1095 files: {
1096 items: {
1097 schema: BACKUP_ARCHIVE_NAME_SCHEMA
1098 },
1099 },
1100 owner: {
1101 type: Authid,
1102 optional: true,
1103 },
1104 },
1105)]
1106#[derive(Serialize, Deserialize)]
1107#[serde(rename_all = "kebab-case")]
1108/// Basic information about backup snapshot.
1109pub struct SnapshotListItem {
988d575d
WB
1110 #[serde(flatten)]
1111 pub backup: BackupDir,
8cc3760e
DM
1112 /// The first line from manifest "notes"
1113 #[serde(skip_serializing_if = "Option::is_none")]
1114 pub comment: Option<String>,
1115 /// The result of the last run verify task
1116 #[serde(skip_serializing_if = "Option::is_none")]
1117 pub verification: Option<SnapshotVerifyState>,
1118 /// Fingerprint of encryption key
1119 #[serde(skip_serializing_if = "Option::is_none")]
1120 pub fingerprint: Option<Fingerprint>,
1121 /// List of contained archive files.
1122 pub files: Vec<BackupContent>,
1123 /// Overall snapshot size (sum of all archive sizes).
1124 #[serde(skip_serializing_if = "Option::is_none")]
1125 pub size: Option<u64>,
1126 /// The owner of the snapshots group
1127 #[serde(skip_serializing_if = "Option::is_none")]
1128 pub owner: Option<Authid>,
02db7267 1129 /// Protection from prunes
94a6b336 1130 #[serde(default)]
02db7267 1131 pub protected: bool,
8cc3760e
DM
1132}
1133
1134#[api(
1135 properties: {
988d575d
WB
1136 "backup": { type: BackupGroup },
1137 "last-backup": { schema: BACKUP_TIME_SCHEMA },
8cc3760e
DM
1138 "backup-count": {
1139 type: Integer,
1140 },
1141 files: {
1142 items: {
1143 schema: BACKUP_ARCHIVE_NAME_SCHEMA
1144 },
1145 },
1146 owner: {
1147 type: Authid,
1148 optional: true,
1149 },
1150 },
1151)]
1152#[derive(Serialize, Deserialize)]
1153#[serde(rename_all = "kebab-case")]
1154/// Basic information about a backup group.
1155pub struct GroupListItem {
988d575d
WB
1156 #[serde(flatten)]
1157 pub backup: BackupGroup,
1158
8cc3760e
DM
1159 pub last_backup: i64,
1160 /// Number of contained snapshots
1161 pub backup_count: u64,
1162 /// List of contained archive files.
1163 pub files: Vec<String>,
1164 /// The owner of group
1165 #[serde(skip_serializing_if = "Option::is_none")]
1166 pub owner: Option<Authid>,
1167 /// The first line from group "notes"
1168 #[serde(skip_serializing_if = "Option::is_none")]
1169 pub comment: Option<String>,
1170}
1171
18934ae5
TL
1172#[api()]
1173#[derive(Serialize, Deserialize)]
1174#[serde(rename_all = "kebab-case")]
1175/// Basic information about a backup namespace.
1176pub struct NamespaceListItem {
1177 /// A backup namespace
1178 pub ns: BackupNamespace,
1179
1180 // TODO?
1181 //pub group_count: u64,
1182 //pub ns_count: u64,
1183 /// The first line from the namespace's "notes"
1184 #[serde(skip_serializing_if = "Option::is_none")]
1185 pub comment: Option<String>,
1186}
1187
8cc3760e
DM
1188#[api(
1189 properties: {
988d575d 1190 "backup": { type: BackupDir },
8cc3760e
DM
1191 },
1192)]
1193#[derive(Serialize, Deserialize)]
1194#[serde(rename_all = "kebab-case")]
1195/// Prune result.
1196pub struct PruneListItem {
988d575d
WB
1197 #[serde(flatten)]
1198 pub backup: BackupDir,
1199
8cc3760e
DM
1200 /// Keep snapshot
1201 pub keep: bool,
1202}
1203
1204#[api(
1205 properties: {
1206 ct: {
1207 type: TypeCounts,
1208 optional: true,
1209 },
1210 host: {
1211 type: TypeCounts,
1212 optional: true,
1213 },
1214 vm: {
1215 type: TypeCounts,
1216 optional: true,
1217 },
1218 other: {
1219 type: TypeCounts,
1220 optional: true,
1221 },
1222 },
1223)]
1224#[derive(Serialize, Deserialize, Default)]
1225/// Counts of groups/snapshots per BackupType.
1226pub struct Counts {
1227 /// The counts for CT backups
1228 pub ct: Option<TypeCounts>,
1229 /// The counts for Host backups
1230 pub host: Option<TypeCounts>,
1231 /// The counts for VM backups
1232 pub vm: Option<TypeCounts>,
1233 /// The counts for other backup types
1234 pub other: Option<TypeCounts>,
1235}
1236
1237#[api()]
1238#[derive(Serialize, Deserialize, Default)]
1239/// Backup Type group/snapshot counts.
1240pub struct TypeCounts {
1241 /// The number of groups of the type.
1242 pub groups: u64,
1243 /// The number of snapshots of the type.
1244 pub snapshots: u64,
1245}
1246
6227654a
DM
1247#[api(
1248 properties: {
1249 "upid": {
1250 optional: true,
1251 type: UPID,
1252 },
1253 },
1254)]
1255#[derive(Clone, Serialize, Deserialize)]
1256#[serde(rename_all = "kebab-case")]
1257/// Garbage collection status.
1258pub struct GarbageCollectionStatus {
1259 pub upid: Option<String>,
1260 /// Number of processed index files.
1261 pub index_file_count: usize,
1262 /// Sum of bytes referred by index files.
1263 pub index_data_bytes: u64,
1264 /// Bytes used on disk.
1265 pub disk_bytes: u64,
1266 /// Chunks used on disk.
1267 pub disk_chunks: usize,
1268 /// Sum of removed bytes.
1269 pub removed_bytes: u64,
1270 /// Number of removed chunks.
1271 pub removed_chunks: usize,
1272 /// Sum of pending bytes (pending removal - kept for safety).
1273 pub pending_bytes: u64,
1274 /// Number of pending chunks (pending removal - kept for safety).
1275 pub pending_chunks: usize,
1276 /// Number of chunks marked as .bad by verify that have been removed by GC.
1277 pub removed_bad: usize,
1278 /// Number of chunks still marked as .bad after garbage collection.
1279 pub still_bad: usize,
1280}
1281
1282impl Default for GarbageCollectionStatus {
1283 fn default() -> Self {
1284 GarbageCollectionStatus {
1285 upid: None,
1286 index_file_count: 0,
1287 index_data_bytes: 0,
1288 disk_bytes: 0,
1289 disk_chunks: 0,
1290 removed_bytes: 0,
1291 removed_chunks: 0,
1292 pending_bytes: 0,
1293 pending_chunks: 0,
1294 removed_bad: 0,
1295 still_bad: 0,
1296 }
1297 }
1298}
1299
1300#[api(
1301 properties: {
1302 "gc-status": {
1303 type: GarbageCollectionStatus,
1304 optional: true,
1305 },
1306 counts: {
1307 type: Counts,
1308 optional: true,
1309 },
1310 },
1311)]
1312#[derive(Serialize, Deserialize)]
b22d785c 1313#[serde(rename_all = "kebab-case")]
6227654a
DM
1314/// Overall Datastore status and useful information.
1315pub struct DataStoreStatus {
1316 /// Total space (bytes).
1317 pub total: u64,
1318 /// Used space (bytes).
1319 pub used: u64,
1320 /// Available space (bytes).
1321 pub avail: u64,
1322 /// Status of last GC
b22d785c 1323 #[serde(skip_serializing_if = "Option::is_none")]
6227654a
DM
1324 pub gc_status: Option<GarbageCollectionStatus>,
1325 /// Group/Snapshot counts
b22d785c 1326 #[serde(skip_serializing_if = "Option::is_none")]
6227654a
DM
1327 pub counts: Option<Counts>,
1328}
8cc3760e 1329
762f7d15
DM
1330#[api(
1331 properties: {
1332 store: {
1333 schema: DATASTORE_SCHEMA,
1334 },
1335 history: {
1336 type: Array,
1337 optional: true,
1338 items: {
1339 type: Number,
1340 description: "The usage of a time in the past. Either null or between 0.0 and 1.0.",
1341 }
1342 },
1343 },
1344)]
1345#[derive(Serialize, Deserialize)]
b22d785c 1346#[serde(rename_all = "kebab-case")]
762f7d15
DM
1347/// Status of a Datastore
1348pub struct DataStoreStatusListItem {
1349 pub store: String,
39ffb75d
DC
1350 /// The Size of the underlying storage in bytes. (-1 on error)
1351 pub total: i64,
1352 /// The used bytes of the underlying storage. (-1 on error)
1353 pub used: i64,
1354 /// The available bytes of the underlying storage. (-1 on error)
1355 pub avail: i64,
762f7d15 1356 /// A list of usages of the past (last Month).
b22d785c 1357 #[serde(skip_serializing_if = "Option::is_none")]
762f7d15
DM
1358 pub history: Option<Vec<Option<f64>>>,
1359 /// History start time (epoch)
b22d785c 1360 #[serde(skip_serializing_if = "Option::is_none")]
762f7d15
DM
1361 pub history_start: Option<u64>,
1362 /// History resolution (seconds)
b22d785c 1363 #[serde(skip_serializing_if = "Option::is_none")]
762f7d15
DM
1364 pub history_delta: Option<u64>,
1365 /// Estimation of the UNIX epoch when the storage will be full.
274ac755
TL
1366 /// It's calculated via a simple Linear Regression (Least Squares) over the RRD data of the
1367 /// last Month. Missing if not enough data points are available yet. An estimate in the past
1368 /// means that usage is declining or not changing.
b22d785c 1369 #[serde(skip_serializing_if = "Option::is_none")]
762f7d15
DM
1370 pub estimated_full_date: Option<i64>,
1371 /// An error description, for example, when the datastore could not be looked up
b22d785c 1372 #[serde(skip_serializing_if = "Option::is_none")]
762f7d15 1373 pub error: Option<String>,
8550de74
DC
1374 /// Status of last GC
1375 #[serde(skip_serializing_if = "Option::is_none")]
1376 pub gc_status: Option<GarbageCollectionStatus>,
762f7d15
DM
1377}
1378
8cc3760e
DM
1379pub const ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE: ReturnType = ReturnType {
1380 optional: false,
1381 schema: &ArraySchema::new(
1382 "Returns the list of snapshots.",
1383 &SnapshotListItem::API_SCHEMA,
b22d785c
TL
1384 )
1385 .schema(),
8cc3760e
DM
1386};
1387
1388pub const ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE: ReturnType = ReturnType {
1389 optional: false,
1390 schema: &ArraySchema::new(
1391 "Returns the list of archive files inside a backup snapshots.",
1392 &BackupContent::API_SCHEMA,
b22d785c
TL
1393 )
1394 .schema(),
8cc3760e
DM
1395};
1396
1397pub const ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE: ReturnType = ReturnType {
1398 optional: false,
1399 schema: &ArraySchema::new(
1400 "Returns the list of backup groups.",
1401 &GroupListItem::API_SCHEMA,
b22d785c
TL
1402 )
1403 .schema(),
8cc3760e
DM
1404};
1405
18934ae5
TL
1406pub const ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE: ReturnType = ReturnType {
1407 optional: false,
1408 schema: &ArraySchema::new(
1409 "Returns the list of backup namespaces.",
1410 &NamespaceListItem::API_SCHEMA,
1411 )
1412 .schema(),
1413};
1414
8cc3760e
DM
1415pub const ADMIN_DATASTORE_PRUNE_RETURN_TYPE: ReturnType = ReturnType {
1416 optional: false,
1417 schema: &ArraySchema::new(
1418 "Returns the list of snapshots and a flag indicating if there are kept or removed.",
1419 &PruneListItem::API_SCHEMA,
b22d785c
TL
1420 )
1421 .schema(),
8cc3760e 1422};