]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/api2/types/mod.rs
move remaining client tools to pbs-tools/datastore
[proxmox-backup.git] / src / api2 / types / mod.rs
index 718753248b55cbe42776b441c16b76040be45ae6..bd3c7ac5e583748bd177a655d6481ffd94884265 100644 (file)
@@ -1,28 +1,29 @@
+//! API Type Definitions
+
 use anyhow::bail;
 use serde::{Deserialize, Serialize};
 
 use proxmox::api::{api, schema::*};
 use proxmox::const_regex;
-use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
-
-use crate::backup::{CryptMode, Fingerprint, BACKUP_ID_REGEX};
-use crate::server::UPID;
 
-#[macro_use]
-mod macros;
+use pbs_datastore::catalog::CatalogEntryType;
 
-#[macro_use]
-mod userid;
-pub use userid::{Realm, RealmRef};
-pub use userid::{Tokenname, TokennameRef};
-pub use userid::{Username, UsernameRef};
-pub use userid::Userid;
-pub use userid::Authid;
-pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GROUP_ID_SCHEMA};
+use crate::{
+    backup::DirEntryAttribute,
+    config::acl::Role,
+};
 
 mod tape;
 pub use tape::*;
 
+mod file_restore;
+pub use file_restore::*;
+
+mod acme;
+pub use acme::*;
+
+pub use pbs_api_types::*;
+
 // File names: may not contain slashes, may not start with "."
 pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
     if name.starts_with('.') {
@@ -34,51 +35,14 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
     Ok(())
 });
 
-macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
-macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
-
-macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
-macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
-
 const_regex!{
-    pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
-    pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
-    pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
-    pub CIDR_V4_REGEX =  concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
-    pub CIDR_V6_REGEX =  concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
-    pub CIDR_REGEX =  concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|",  CIDR_V6_REGEX_STR!(), r")$");
-
-    pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
     pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; //  fixme: define in common_regex ?
 
-    pub PASSWORD_REGEX = r"^[[:^cntrl:]]*$"; // everything but control characters
-
-    /// Regex for safe identifiers.
-    ///
-    /// This
-    /// [article](https://dwheeler.com/essays/fixing-unix-linux-filenames.html)
-    /// contains further information why it is reasonable to restict
-    /// names this way. This is not only useful for filenames, but for
-    /// any identifier command line tools work with.
-    pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
-
     /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
     pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
     /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
     pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
 
-    pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
-
-    pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
-
-    pub DNS_NAME_REGEX =  concat!(r"^", DNS_NAME!(), r"$");
-
-    pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|",  IPRE!(), r")$");
-
-    pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|",  IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
-
-    pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
-
     pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
 
     pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
@@ -86,68 +50,45 @@ const_regex!{
     pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
 
     pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
+
+    pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
+
+    pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
 }
 
 pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
 
-pub const IP_V4_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&IP_V4_REGEX);
-
-pub const IP_V6_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&IP_V6_REGEX);
-
-pub const IP_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&IP_REGEX);
-
-pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
-
-pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
-
-pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
-
-pub const BACKUP_ID_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
-
-pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
-
 pub const HOSTNAME_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&HOSTNAME_REGEX);
 
 pub const DNS_NAME_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&DNS_NAME_REGEX);
 
+pub const DNS_ALIAS_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
+
 pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
 
-pub const PASSWORD_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&PASSWORD_REGEX);
-
 pub const ACL_PATH_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&ACL_PATH_REGEX);
 
 pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
 
-pub const CIDR_V4_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&CIDR_V4_REGEX);
-
-pub const CIDR_V6_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&CIDR_V6_REGEX);
-
-pub const CIDR_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&CIDR_REGEX);
-
 pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
 
 pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
 
+pub const DATASTORE_MAP_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
+
+pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
+
 pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
     .format(&PASSWORD_FORMAT)
     .min_length(1)
@@ -160,29 +101,12 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
     .max_length(64)
     .schema();
 
-pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
-    "X509 certificate fingerprint (sha256)."
-)
-    .format(&FINGERPRINT_SHA256_FORMAT)
-    .schema();
-
 pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
     "Tape encryption key fingerprint (sha256)."
 )
     .format(&FINGERPRINT_SHA256_FORMAT)
     .schema();
 
-pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
-    "Prevent changes if current configuration file has different \
-    SHA256 digest. This can be used to prevent concurrent \
-    modifications."
-)
-    .format(&PVE_CONFIG_DIGEST_FORMAT) .schema();
-
-
-pub const CHUNK_DIGEST_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
-
 pub const CHUNK_DIGEST_SCHEMA: Schema = StringSchema::new("Chunk digest (SHA256).")
     .format(&CHUNK_DIGEST_FORMAT)
     .schema();
@@ -277,57 +201,97 @@ pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
         EnumEntry::new("group", "Group")]))
     .schema();
 
-pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
-    StringSchema::new("Backup archive name.")
-    .format(&PROXMOX_SAFE_ID_FORMAT)
+#[api(
+    properties: {
+        propagate: {
+            schema: ACL_PROPAGATE_SCHEMA,
+        },
+       path: {
+            schema: ACL_PATH_SCHEMA,
+        },
+        ugid_type: {
+            schema: ACL_UGID_TYPE_SCHEMA,
+        },
+       ugid: {
+            type: String,
+            description: "User or Group ID.",
+        },
+       roleid: {
+            type: Role,
+        }
+    }
+)]
+#[derive(Serialize, Deserialize)]
+/// ACL list entry.
+pub struct AclListItem {
+    pub path: String,
+    pub ugid: String,
+    pub ugid_type: String,
+    pub propagate: bool,
+    pub roleid: String,
+}
+
+pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task ID.")
+    .max_length(256)
     .schema();
 
-pub const BACKUP_TYPE_SCHEMA: Schema =
-    StringSchema::new("Backup type.")
-    .format(&ApiStringFormat::Enum(&[
-        EnumEntry::new("vm", "Virtual Machine Backup"),
-        EnumEntry::new("ct", "Container Backup"),
-        EnumEntry::new("host", "Host Backup")]))
+pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
+    .format(&DATASTORE_MAP_FORMAT)
+    .min_length(3)
+    .max_length(65)
+    .type_text("(<source>=)?<target>")
     .schema();
 
-pub const BACKUP_ID_SCHEMA: Schema =
-    StringSchema::new("Backup ID.")
-    .format(&BACKUP_ID_FORMAT)
+pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
+    "Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
     .schema();
 
-pub const BACKUP_TIME_SCHEMA: Schema =
-    IntegerSchema::new("Backup time (Unix epoch.)")
-    .minimum(1_547_797_308)
+pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
+    "A list of Datastore mappings (or single datastore), comma separated. \
+    For example 'a=b,e' maps the source datastore 'a' to target 'b and \
+    all other sources to the default 'e'. If no default is given, only the \
+    specified sources are mapped.")
+    .format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
     .schema();
 
-pub const UPID_SCHEMA: Schema = StringSchema::new("Unique Process/Task ID.")
-    .max_length(256)
+pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
+    "A snapshot in the format: 'store:type/id/time")
+    .format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
+    .type_text("store:type/id/time")
     .schema();
 
-pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
-    .format(&PROXMOX_SAFE_ID_FORMAT)
-    .min_length(3)
-    .max_length(32)
+pub const MEDIA_SET_UUID_SCHEMA: Schema =
+    StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
+    .format(&UUID_FORMAT)
+    .schema();
+
+pub const MEDIA_UUID_SCHEMA: Schema =
+    StringSchema::new("Media Uuid.")
+    .format(&UUID_FORMAT)
     .schema();
 
 pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run sync job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run garbage collection job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run prune job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run verify job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
@@ -357,10 +321,6 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
     .minimum(1)
     .schema();
 
-pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
-    .format(&SINGLE_LINE_COMMENT_FORMAT)
-    .schema();
-
 pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).")
     .format(&HOSTNAME_FORMAT)
     .schema();
@@ -381,344 +341,13 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
     .max_length(64)
     .schema();
 
-// Complex type definitions
-
-#[api(
-    properties: {
-        store: {
-            schema: DATASTORE_SCHEMA,
-        },
-        comment: {
-            optional: true,
-            schema: SINGLE_LINE_COMMENT_SCHEMA,
-        },
-    },
-)]
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-/// Basic information about a datastore.
-pub struct DataStoreListItem {
-    pub store: String,
-    pub comment: Option<String>,
-}
-
-#[api(
-    properties: {
-        "backup-type": {
-            schema: BACKUP_TYPE_SCHEMA,
-        },
-        "backup-id": {
-            schema: BACKUP_ID_SCHEMA,
-        },
-        "last-backup": {
-            schema: BACKUP_TIME_SCHEMA,
-        },
-        "backup-count": {
-            type: Integer,
-        },
-        files: {
-            items: {
-                schema: BACKUP_ARCHIVE_NAME_SCHEMA
-            },
-        },
-        owner: {
-            type: Authid,
-            optional: true,
-        },
-    },
-)]
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-/// Basic information about a backup group.
-pub struct GroupListItem {
-    pub backup_type: String, // enum
-    pub backup_id: String,
-    pub last_backup: i64,
-    /// Number of contained snapshots
-    pub backup_count: u64,
-    /// List of contained archive files.
-    pub files: Vec<String>,
-    /// The owner of group
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub owner: Option<Authid>,
-}
-
-#[api()]
-#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
-#[serde(rename_all = "lowercase")]
-/// Result of a verify operation.
-pub enum VerifyState {
-    /// Verification was successful
-    Ok,
-    /// Verification reported one or more errors
-    Failed,
-}
-
-#[api(
-    properties: {
-        upid: {
-            schema: UPID_SCHEMA
-        },
-        state: {
-            type: VerifyState
-        },
-    },
-)]
-#[derive(Serialize, Deserialize)]
-/// Task properties.
-pub struct SnapshotVerifyState {
-    /// UPID of the verify task
-    pub upid: UPID,
-    /// State of the verification. Enum.
-    pub state: VerifyState,
-}
-
-#[api(
-    properties: {
-        "backup-type": {
-            schema: BACKUP_TYPE_SCHEMA,
-        },
-        "backup-id": {
-            schema: BACKUP_ID_SCHEMA,
-        },
-        "backup-time": {
-            schema: BACKUP_TIME_SCHEMA,
-        },
-        comment: {
-            schema: SINGLE_LINE_COMMENT_SCHEMA,
-            optional: true,
-        },
-        verification: {
-            type: SnapshotVerifyState,
-            optional: true,
-        },
-        fingerprint: {
-            type: String,
-            optional: true,
-        },
-        files: {
-            items: {
-                schema: BACKUP_ARCHIVE_NAME_SCHEMA
-            },
-        },
-        owner: {
-            type: Authid,
-            optional: true,
-        },
-    },
-)]
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-/// Basic information about backup snapshot.
-pub struct SnapshotListItem {
-    pub backup_type: String, // enum
-    pub backup_id: String,
-    pub backup_time: i64,
-    /// The first line from manifest "notes"
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub comment: Option<String>,
-    /// The result of the last run verify task
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub verification: Option<SnapshotVerifyState>,
-    /// Fingerprint of encryption key
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub fingerprint: Option<Fingerprint>,
-    /// List of contained archive files.
-    pub files: Vec<BackupContent>,
-    /// Overall snapshot size (sum of all archive sizes).
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub size: Option<u64>,
-    /// The owner of the snapshots group
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub owner: Option<Authid>,
-}
-
-#[api(
-    properties: {
-        "backup-type": {
-            schema: BACKUP_TYPE_SCHEMA,
-        },
-        "backup-id": {
-            schema: BACKUP_ID_SCHEMA,
-        },
-        "backup-time": {
-            schema: BACKUP_TIME_SCHEMA,
-        },
-    },
-)]
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-/// Prune result.
-pub struct PruneListItem {
-    pub backup_type: String, // enum
-    pub backup_id: String,
-    pub backup_time: i64,
-    /// Keep snapshot
-    pub keep: bool,
-}
-
-pub const PRUNE_SCHEMA_KEEP_DAILY: Schema = IntegerSchema::new(
-    "Number of daily backups to keep.")
-    .minimum(1)
-    .schema();
-
-pub const PRUNE_SCHEMA_KEEP_HOURLY: Schema = IntegerSchema::new(
-    "Number of hourly backups to keep.")
-    .minimum(1)
-    .schema();
-
-pub const PRUNE_SCHEMA_KEEP_LAST: Schema = IntegerSchema::new(
-    "Number of backups to keep.")
-    .minimum(1)
-    .schema();
-
-pub const PRUNE_SCHEMA_KEEP_MONTHLY: Schema = IntegerSchema::new(
-    "Number of monthly backups to keep.")
-    .minimum(1)
-    .schema();
-
-pub const PRUNE_SCHEMA_KEEP_WEEKLY: Schema = IntegerSchema::new(
-    "Number of weekly backups to keep.")
-    .minimum(1)
-    .schema();
-
-pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = IntegerSchema::new(
-    "Number of yearly backups to keep.")
-    .minimum(1)
+pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
+    .format(&PROXMOX_SAFE_ID_FORMAT)
+    .min_length(2)
+    .max_length(32)
     .schema();
 
-#[api(
-    properties: {
-        "filename": {
-            schema: BACKUP_ARCHIVE_NAME_SCHEMA,
-        },
-        "crypt-mode": {
-            type: CryptMode,
-            optional: true,
-        },
-    },
-)]
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-/// Basic information about archive files inside a backup snapshot.
-pub struct BackupContent {
-    pub filename: String,
-    /// Info if file is encrypted, signed, or neither.
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub crypt_mode: Option<CryptMode>,
-    /// Archive size (from backup manifest).
-    #[serde(skip_serializing_if="Option::is_none")]
-    pub size: Option<u64>,
-}
-
-#[api(
-    properties: {
-        "upid": {
-            optional: true,
-            schema: UPID_SCHEMA,
-        },
-    },
-)]
-#[derive(Clone, Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
-/// Garbage collection status.
-pub struct GarbageCollectionStatus {
-    pub upid: Option<String>,
-    /// Number of processed index files.
-    pub index_file_count: usize,
-    /// Sum of bytes referred by index files.
-    pub index_data_bytes: u64,
-    /// Bytes used on disk.
-    pub disk_bytes: u64,
-    /// Chunks used on disk.
-    pub disk_chunks: usize,
-    /// Sum of removed bytes.
-    pub removed_bytes: u64,
-    /// Number of removed chunks.
-    pub removed_chunks: usize,
-    /// Sum of pending bytes (pending removal - kept for safety).
-    pub pending_bytes: u64,
-    /// Number of pending chunks (pending removal - kept for safety).
-    pub pending_chunks: usize,
-    /// Number of chunks marked as .bad by verify that have been removed by GC.
-    pub removed_bad: usize,
-    /// Number of chunks still marked as .bad after garbage collection.
-    pub still_bad: usize,
-}
-
-impl Default for GarbageCollectionStatus {
-    fn default() -> Self {
-        GarbageCollectionStatus {
-            upid: None,
-            index_file_count: 0,
-            index_data_bytes: 0,
-            disk_bytes: 0,
-            disk_chunks: 0,
-            removed_bytes: 0,
-            removed_chunks: 0,
-            pending_bytes: 0,
-            pending_chunks: 0,
-            removed_bad: 0,
-            still_bad: 0,
-        }
-    }
-}
-
-
-#[api()]
-#[derive(Serialize, Deserialize)]
-/// Storage space usage information.
-pub struct StorageStatus {
-    /// Total space (bytes).
-    pub total: u64,
-    /// Used space (bytes).
-    pub used: u64,
-    /// Available space (bytes).
-    pub avail: u64,
-}
-
-#[api()]
-#[derive(Serialize, Deserialize, Default)]
-/// Backup Type group/snapshot counts.
-pub struct TypeCounts {
-    /// The number of groups of the type.
-    pub groups: u64,
-    /// The number of snapshots of the type.
-    pub snapshots: u64,
-}
-
-#[api(
-    properties: {
-        ct: {
-            type: TypeCounts,
-            optional: true,
-        },
-        host: {
-            type: TypeCounts,
-            optional: true,
-        },
-        vm: {
-            type: TypeCounts,
-            optional: true,
-        },
-        other: {
-            type: TypeCounts,
-            optional: true,
-        },
-    },
-)]
-#[derive(Serialize, Deserialize, Default)]
-/// Counts of groups/snapshots per BackupType.
-pub struct Counts {
-    /// The counts for CT backups
-    pub ct: Option<TypeCounts>,
-    /// The counts for Host backups
-    pub host: Option<TypeCounts>,
-    /// The counts for VM backups
-    pub vm: Option<TypeCounts>,
-    /// The counts for other backup types
-    pub other: Option<TypeCounts>,
-}
+// Complex type definitions
 
 #[api(
     properties: {
@@ -1077,7 +706,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
     ];
 
     for fingerprint in invalid_fingerprints.iter() {
-        if let Ok(_) = parse_simple_value(fingerprint, &schema) {
+        if parse_simple_value(fingerprint, &schema).is_ok() {
             bail!("test fingerprint '{}' failed -  got Ok() while exception an error.", fingerprint);
         }
     }
@@ -1118,7 +747,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
     ];
 
     for name in invalid_user_ids.iter() {
-        if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
+        if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
             bail!("test userid '{}' failed -  got Ok() while exception an error.", name);
         }
     }
@@ -1212,7 +841,7 @@ pub struct APTUpdateInfo {
 pub enum Notify {
     /// Never send notification
     Never,
-    /// Send notifications for failed and sucessful jobs
+    /// Send notifications for failed and successful jobs
     Always,
     /// Send notifications for failed jobs only
     Error,
@@ -1245,7 +874,205 @@ pub struct DatastoreNotify {
     pub sync: Option<Notify>,
 }
 
+/// An entry in a hierarchy of files for restore and listing.
+#[api()]
+#[derive(Serialize, Deserialize)]
+pub struct ArchiveEntry {
+    /// Base64-encoded full path to the file, including the filename
+    pub filepath: String,
+    /// Displayable filename text for UIs
+    pub text: String,
+    /// File or directory type of this entry
+    #[serde(rename = "type")]
+    pub entry_type: String,
+    /// Is this entry a leaf node, or does it have children (i.e. a directory)?
+    pub leaf: bool,
+    /// The file size, if entry_type is 'f' (file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub size: Option<u64>,
+    /// The file "last modified" time stamp, if entry_type is 'f' (file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub mtime: Option<i64>,
+}
+
+impl ArchiveEntry {
+    pub fn new(filepath: &[u8], entry_type: Option<&DirEntryAttribute>) -> Self {
+        let size = match entry_type {
+            Some(DirEntryAttribute::File { size, .. }) => Some(*size),
+            _ => None,
+        };
+        Self::new_with_size(filepath, entry_type, size)
+    }
+
+    pub fn new_with_size(
+        filepath: &[u8],
+        entry_type: Option<&DirEntryAttribute>,
+        size: Option<u64>,
+    ) -> Self {
+        Self {
+            filepath: base64::encode(filepath),
+            text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
+                .to_string(),
+            entry_type: match entry_type {
+                Some(entry_type) => CatalogEntryType::from(entry_type).to_string(),
+                None => "v".to_owned(),
+            },
+            leaf: !matches!(entry_type, None | Some(DirEntryAttribute::Directory { .. })),
+            size,
+            mtime: match entry_type {
+                Some(DirEntryAttribute::File { mtime, .. }) => Some(*mtime),
+                _ => None,
+            },
+        }
+    }
+}
+
 pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
     "Datastore notification setting")
     .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
     .schema();
+
+
+#[api(
+    properties: {
+        "next-run": {
+            description: "Estimated time of the next run (UNIX epoch).",
+            optional: true,
+            type: Integer,
+        },
+        "last-run-state": {
+            description: "Result of the last run.",
+            optional: true,
+            type: String,
+        },
+        "last-run-upid": {
+            description: "Task UPID of the last run.",
+            optional: true,
+            type: String,
+        },
+        "last-run-endtime": {
+            description: "Endtime of the last run.",
+            optional: true,
+            type: Integer,
+        },
+    }
+)]
+#[derive(Serialize,Deserialize,Default)]
+#[serde(rename_all="kebab-case")]
+/// Job Scheduling Status
+pub struct JobScheduleStatus {
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub next_run: Option<i64>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_state: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_upid: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_endtime: Option<i64>,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Node memory usage counters
+pub struct NodeMemoryCounters {
+    /// Total memory
+    pub total: u64,
+    /// Used memory
+    pub used: u64,
+    /// Free memory
+    pub free: u64,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Node swap usage counters
+pub struct NodeSwapCounters {
+    /// Total swap
+    pub total: u64,
+    /// Used swap
+    pub used: u64,
+    /// Free swap
+    pub free: u64,
+}
+
+#[api]
+#[derive(Serialize,Deserialize,Default)]
+#[serde(rename_all = "kebab-case")]
+/// Contains general node information such as the fingerprint`
+pub struct NodeInformation {
+    /// The SSL Fingerprint
+    pub fingerprint: String,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Information about the CPU
+pub struct NodeCpuInformation {
+    /// The CPU model
+    pub model: String,
+    /// The number of CPU sockets
+    pub sockets: usize,
+    /// The number of CPU cores (incl. threads)
+    pub cpus: usize,
+}
+
+#[api(
+    properties: {
+        memory: {
+            type: NodeMemoryCounters,
+        },
+        root: {
+            type: StorageStatus,
+        },
+        swap: {
+            type: NodeSwapCounters,
+        },
+        loadavg: {
+            type: Array,
+            items: {
+                type: Number,
+                description: "the load",
+            }
+        },
+        cpuinfo: {
+            type: NodeCpuInformation,
+        },
+        info: {
+            type: NodeInformation,
+        }
+    },
+)]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// The Node status
+pub struct NodeStatus {
+    pub memory: NodeMemoryCounters,
+    pub root: StorageStatus,
+    pub swap: NodeSwapCounters,
+    /// The current uptime of the server.
+    pub uptime: u64,
+    /// Load for 1, 5 and 15 minutes.
+    pub loadavg: [f64; 3],
+    /// The current kernel version.
+    pub kversion: String,
+    /// Total CPU usage since last query.
+    pub cpu: f64,
+    /// Total IO wait since last query.
+    pub wait: f64,
+    pub cpuinfo: NodeCpuInformation,
+    pub info: NodeInformation,
+}
+
+pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new(
+    "HTTP proxy configuration [http://]<host>[:port]")
+    .format(&ApiStringFormat::VerifyFn(|s| {
+        proxmox_http::ProxyConfig::parse_proxy_url(s)?;
+        Ok(())
+    }))
+    .min_length(1)
+    .max_length(128)
+    .type_text("[http://]<host>[:port]")
+    .schema();