]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/api2/types/mod.rs
typo fixes all over the place
[proxmox-backup.git] / src / api2 / types / mod.rs
index fb386d461990b5540c416427272d2f2dbddd9adf..3e720dad5009e31d5568a17cd5cab54a087e3355 100644 (file)
@@ -1,11 +1,23 @@
+//! API Type Definitions
+
 use anyhow::bail;
 use serde::{Deserialize, Serialize};
 
 use proxmox::api::{api, schema::*};
 use proxmox::const_regex;
-use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
-
-use crate::backup::CryptMode;
+use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
+
+use crate::{
+    backup::{
+        CryptMode,
+        Fingerprint,
+        BACKUP_ID_REGEX,
+        DirEntryAttribute,
+        CatalogEntryType,
+    },
+    server::UPID,
+    config::acl::Role,
+};
 
 #[macro_use]
 mod macros;
@@ -13,9 +25,14 @@ mod macros;
 #[macro_use]
 mod userid;
 pub use userid::{Realm, RealmRef};
+pub use userid::{Tokenname, TokennameRef};
 pub use userid::{Username, UsernameRef};
 pub use userid::Userid;
-pub use userid::PROXMOX_GROUP_ID_SCHEMA;
+pub use userid::Authid;
+pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GROUP_ID_SCHEMA};
+
+mod tape;
+pub use tape::*;
 
 // File names: may not contain slashes, may not start with "."
 pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
@@ -29,7 +46,7 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
 });
 
 macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
-macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
+macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
 
 macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
 macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
@@ -56,23 +73,32 @@ const_regex!{
     /// any identifier command line tools work with.
     pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
 
+    /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
+    pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
+    /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
+    pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
+
     pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
 
     pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
 
     pub DNS_NAME_REGEX =  concat!(r"^", DNS_NAME!(), r"$");
 
-    pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|",  IPRE!(), r"$");
+    pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|",  IPRE!(), r")$");
 
-    pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|",  IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
+    pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|",  IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
 
-    pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
+    pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
 
     pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
 
+    pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
+
     pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
 
     pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
+
+    pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
 }
 
 pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
@@ -90,12 +116,18 @@ pub const IP_FORMAT: ApiStringFormat =
 pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
 
-pub const CERT_FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
-    ApiStringFormat::Pattern(&CERT_FINGERPRINT_SHA256_REGEX);
+pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
 
 pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
 
+pub const BACKUP_ID_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
+
+pub const UUID_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&UUID_REGEX);
+
 pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
 
@@ -126,6 +158,9 @@ pub const CIDR_V6_FORMAT: ApiStringFormat =
 pub const CIDR_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&CIDR_REGEX);
 
+pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
+
 pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
 
@@ -144,17 +179,22 @@ pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
 pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
     "X509 certificate fingerprint (sha256)."
 )
-    .format(&CERT_FINGERPRINT_SHA256_FORMAT)
+    .format(&FINGERPRINT_SHA256_FORMAT)
     .schema();
 
-pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(r#"\
-Prevent changes if current configuration file has different SHA256 digest.
-This can be used to prevent concurrent modifications.
-"#
+pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
+    "Tape encryption key fingerprint (sha256)."
 )
-    .format(&PVE_CONFIG_DIGEST_FORMAT)
+    .format(&FINGERPRINT_SHA256_FORMAT)
     .schema();
 
+pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
+    "Prevent changes if current configuration file has different \
+    SHA256 digest. This can be used to prevent concurrent \
+    modifications."
+)
+    .format(&PVE_CONFIG_DIGEST_FORMAT) .schema();
+
 
 pub const CHUNK_DIGEST_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
@@ -253,6 +293,36 @@ pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
         EnumEntry::new("group", "Group")]))
     .schema();
 
+#[api(
+    properties: {
+        propagate: {
+            schema: ACL_PROPAGATE_SCHEMA,
+        },
+       path: {
+            schema: ACL_PATH_SCHEMA,
+        },
+        ugid_type: {
+            schema: ACL_UGID_TYPE_SCHEMA,
+        },
+       ugid: {
+            type: String,
+            description: "User or Group ID.",
+        },
+       roleid: {
+            type: Role,
+        }
+    }
+)]
+#[derive(Serialize, Deserialize)]
+/// ACL list entry.
+pub struct AclListItem {
+    pub path: String,
+    pub ugid: String,
+    pub ugid_type: String,
+    pub propagate: bool,
+    pub roleid: String,
+}
+
 pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
     StringSchema::new("Backup archive name.")
     .format(&PROXMOX_SAFE_ID_FORMAT)
@@ -268,7 +338,7 @@ pub const BACKUP_TYPE_SCHEMA: Schema =
 
 pub const BACKUP_ID_SCHEMA: Schema =
     StringSchema::new("Backup ID.")
-    .format(&PROXMOX_SAFE_ID_FORMAT)
+    .format(&BACKUP_ID_FORMAT)
     .schema();
 
 pub const BACKUP_TIME_SCHEMA: Schema =
@@ -286,19 +356,38 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
     .max_length(32)
     .schema();
 
+pub const MEDIA_SET_UUID_SCHEMA: Schema =
+    StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
+    .format(&UUID_FORMAT)
+    .schema();
+
+pub const MEDIA_UUID_SCHEMA: Schema =
+    StringSchema::new("Media Uuid.")
+    .format(&UUID_FORMAT)
+    .schema();
+
 pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run sync job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run garbage collection job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run prune job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
+    .schema();
+
+pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
+    "Run verify job at specified schedule.")
+    .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
@@ -318,6 +407,16 @@ pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
     .default(true)
     .schema();
 
+pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
+    "Do not verify backups that are already verified if their verification is not outdated.")
+    .default(true)
+    .schema();
+
+pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
+    "Days after that a verification becomes outdated")
+    .minimum(1)
+    .schema();
+
 pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
     .format(&SINGLE_LINE_COMMENT_FORMAT)
     .schema();
@@ -330,6 +429,12 @@ pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP addr
     .format(&DNS_NAME_OR_IP_FORMAT)
     .schema();
 
+pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
+    .format(&SUBSCRIPTION_KEY_FORMAT)
+    .min_length(15)
+    .max_length(16)
+    .schema();
+
 pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
     .format(&BLOCKDEVICE_NAME_FORMAT)
     .min_length(3)
@@ -338,6 +443,25 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
 
 // Complex type definitions
 
+#[api(
+    properties: {
+        store: {
+            schema: DATASTORE_SCHEMA,
+        },
+        comment: {
+            optional: true,
+            schema: SINGLE_LINE_COMMENT_SCHEMA,
+        },
+    },
+)]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all="kebab-case")]
+/// Basic information about a datastore.
+pub struct DataStoreListItem {
+    pub store: String,
+    pub comment: Option<String>,
+}
+
 #[api(
     properties: {
         "backup-type": {
@@ -358,7 +482,7 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
             },
         },
         owner: {
-            type: Userid,
+            type: Authid,
             optional: true,
         },
     },
@@ -376,7 +500,37 @@ pub struct GroupListItem {
     pub files: Vec<String>,
     /// The owner of group
     #[serde(skip_serializing_if="Option::is_none")]
-    pub owner: Option<Userid>,
+    pub owner: Option<Authid>,
+}
+
+#[api()]
+#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "lowercase")]
+/// Result of a verify operation.
+pub enum VerifyState {
+    /// Verification was successful
+    Ok,
+    /// Verification reported one or more errors
+    Failed,
+}
+
+#[api(
+    properties: {
+        upid: {
+            schema: UPID_SCHEMA
+        },
+        state: {
+            type: VerifyState
+        },
+    },
+)]
+#[derive(Serialize, Deserialize)]
+/// Task properties.
+pub struct SnapshotVerifyState {
+    /// UPID of the verify task
+    pub upid: UPID,
+    /// State of the verification. Enum.
+    pub state: VerifyState,
 }
 
 #[api(
@@ -390,13 +544,25 @@ pub struct GroupListItem {
         "backup-time": {
             schema: BACKUP_TIME_SCHEMA,
         },
+        comment: {
+            schema: SINGLE_LINE_COMMENT_SCHEMA,
+            optional: true,
+        },
+        verification: {
+            type: SnapshotVerifyState,
+            optional: true,
+        },
+        fingerprint: {
+            type: String,
+            optional: true,
+        },
         files: {
             items: {
                 schema: BACKUP_ARCHIVE_NAME_SCHEMA
             },
         },
         owner: {
-            type: Userid,
+            type: Authid,
             optional: true,
         },
     },
@@ -411,6 +577,12 @@ pub struct SnapshotListItem {
     /// The first line from manifest "notes"
     #[serde(skip_serializing_if="Option::is_none")]
     pub comment: Option<String>,
+    /// The result of the last run verify task
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub verification: Option<SnapshotVerifyState>,
+    /// Fingerprint of encryption key
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub fingerprint: Option<Fingerprint>,
     /// List of contained archive files.
     pub files: Vec<BackupContent>,
     /// Overall snapshot size (sum of all archive sizes).
@@ -418,7 +590,7 @@ pub struct SnapshotListItem {
     pub size: Option<u64>,
     /// The owner of the snapshots group
     #[serde(skip_serializing_if="Option::is_none")]
-    pub owner: Option<Userid>,
+    pub owner: Option<Authid>,
 }
 
 #[api(
@@ -528,6 +700,10 @@ pub struct GarbageCollectionStatus {
     pub pending_bytes: u64,
     /// Number of pending chunks (pending removal - kept for safety).
     pub pending_chunks: usize,
+    /// Number of chunks marked as .bad by verify that have been removed by GC.
+    pub removed_bad: usize,
+    /// Number of chunks still marked as .bad after garbage collection.
+    pub still_bad: usize,
 }
 
 impl Default for GarbageCollectionStatus {
@@ -542,6 +718,8 @@ impl Default for GarbageCollectionStatus {
             removed_chunks: 0,
             pending_bytes: 0,
             pending_chunks: 0,
+            removed_bad: 0,
+            still_bad: 0,
         }
     }
 }
@@ -559,10 +737,83 @@ pub struct StorageStatus {
     pub avail: u64,
 }
 
+#[api()]
+#[derive(Serialize, Deserialize, Default)]
+/// Backup Type group/snapshot counts.
+pub struct TypeCounts {
+    /// The number of groups of the type.
+    pub groups: u64,
+    /// The number of snapshots of the type.
+    pub snapshots: u64,
+}
+
+#[api(
+    properties: {
+        ct: {
+            type: TypeCounts,
+            optional: true,
+        },
+        host: {
+            type: TypeCounts,
+            optional: true,
+        },
+        vm: {
+            type: TypeCounts,
+            optional: true,
+        },
+        other: {
+            type: TypeCounts,
+            optional: true,
+        },
+    },
+)]
+#[derive(Serialize, Deserialize, Default)]
+/// Counts of groups/snapshots per BackupType.
+pub struct Counts {
+    /// The counts for CT backups
+    pub ct: Option<TypeCounts>,
+    /// The counts for Host backups
+    pub host: Option<TypeCounts>,
+    /// The counts for VM backups
+    pub vm: Option<TypeCounts>,
+    /// The counts for other backup types
+    pub other: Option<TypeCounts>,
+}
+
+#[api(
+    properties: {
+        "gc-status": {
+            type: GarbageCollectionStatus,
+            optional: true,
+        },
+        counts: {
+            type: Counts,
+            optional: true,
+        },
+    },
+)]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all="kebab-case")]
+/// Overall Datastore status and useful information.
+pub struct DataStoreStatus {
+    /// Total space (bytes).
+    pub total: u64,
+    /// Used space (bytes).
+    pub used: u64,
+    /// Available space (bytes).
+    pub avail: u64,
+    /// Status of last GC
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub gc_status: Option<GarbageCollectionStatus>,
+    /// Group/Snapshot counts
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub counts: Option<Counts>,
+}
+
 #[api(
     properties: {
         upid: { schema: UPID_SCHEMA },
-        user: { type: Userid },
+        user: { type: Authid },
     },
 )]
 #[derive(Serialize, Deserialize)]
@@ -581,8 +832,8 @@ pub struct TaskListItem {
     pub worker_type: String,
     /// Worker ID (arbitrary ASCII string)
     pub worker_id: Option<String>,
-    /// The user who started the task
-    pub user: Userid,
+    /// The authenticated entity who started the task
+    pub user: Authid,
     /// The task end time (Epoch)
     #[serde(skip_serializing_if="Option::is_none")]
     pub endtime: Option<i64>,
@@ -595,7 +846,7 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
     fn from(info: crate::server::TaskListInfo) -> Self {
         let (endtime, status) = info
             .state
-            .map_or_else(|| (None, None), |(a,b)| (Some(a), Some(b)));
+            .map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
 
         TaskListItem {
             upid: info.upid_str,
@@ -605,13 +856,27 @@ impl From<crate::server::TaskListInfo> for TaskListItem {
             starttime: info.upid.starttime,
             worker_type: info.upid.worker_type,
             worker_id: info.upid.worker_id,
-            user: info.upid.userid,
+            user: info.upid.auth_id,
             endtime,
             status,
         }
     }
 }
 
+#[api()]
+#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
+#[serde(rename_all = "lowercase")]
+pub enum TaskStateType {
+    /// Ok
+    OK,
+    /// Warning
+    Warning,
+    /// Error
+    Error,
+    /// Unknown
+    Unknown,
+}
+
 #[api()]
 #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
 #[serde(rename_all = "lowercase")]
@@ -654,7 +919,7 @@ pub enum LinuxBondMode {
     /// Broadcast policy
     broadcast = 3,
     /// IEEE 802.3ad Dynamic link aggregation
-    //#[serde(rename = "802.3ad")]
+    #[serde(rename = "802.3ad")]
     ieee802_3ad = 4,
     /// Adaptive transmit load balancing
     balance_tlb = 5,
@@ -662,6 +927,23 @@ pub enum LinuxBondMode {
     balance_alb = 6,
 }
 
+#[api()]
+#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+#[allow(non_camel_case_types)]
+#[repr(u8)]
+/// Bond Transmit Hash Policy for LACP (802.3ad)
+pub enum BondXmitHashPolicy {
+    /// Layer 2
+    layer2 = 0,
+    /// Layer 2+3
+    #[serde(rename = "layer2+3")]
+    layer2_3 = 1,
+    /// Layer 3+4
+    #[serde(rename = "layer3+4")]
+    layer3_4 = 2,
+}
+
 #[api()]
 #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
 #[serde(rename_all = "lowercase")]
@@ -767,7 +1049,15 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = StringSchema::new(
         bond_mode: {
             type: LinuxBondMode,
             optional: true,
-        }
+        },
+        "bond-primary": {
+            schema: NETWORK_INTERFACE_NAME_SCHEMA,
+            optional: true,
+        },
+        bond_xmit_hash_policy: {
+            type: BondXmitHashPolicy,
+            optional: true,
+        },
     }
 )]
 #[derive(Debug, Serialize, Deserialize)]
@@ -824,6 +1114,10 @@ pub struct Interface {
     pub slaves: Option<Vec<String>>,
     #[serde(skip_serializing_if="Option::is_none")]
     pub bond_mode: Option<LinuxBondMode>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(rename = "bond-primary")]
+    pub bond_primary: Option<String>,
+    pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
 }
 
 // Regression tests
@@ -843,7 +1137,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
     ];
 
     for fingerprint in invalid_fingerprints.iter() {
-        if let Ok(_) = parse_simple_value(fingerprint, &schema) {
+        if parse_simple_value(fingerprint, &schema).is_ok() {
             bail!("test fingerprint '{}' failed -  got Ok() while exception an error.", fingerprint);
         }
     }
@@ -884,7 +1178,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
     ];
 
     for name in invalid_user_ids.iter() {
-        if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
+        if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
             bail!("test userid '{}' failed -  got Ok() while exception an error.", name);
         }
     }
@@ -942,7 +1236,7 @@ pub enum RRDTimeFrameResolution {
 }
 
 #[api()]
-#[derive(Serialize, Deserialize)]
+#[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "PascalCase")]
 /// Describes a package for which an update is available.
 pub struct APTUpdateInfo {
@@ -966,4 +1260,220 @@ pub struct APTUpdateInfo {
     pub section: String,
     /// URL under which the package's changelog can be retrieved
     pub change_log_url: String,
+    /// Custom extra field for additional package information
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub extra_info: Option<String>,
+}
+
+#[api()]
+#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "lowercase")]
+/// When do we send notifications
+pub enum Notify {
+    /// Never send notification
+    Never,
+    /// Send notifications for failed and successful jobs
+    Always,
+    /// Send notifications for failed jobs only
+    Error,
+}
+
+#[api(
+    properties: {
+        gc: {
+            type: Notify,
+            optional: true,
+        },
+        verify: {
+            type: Notify,
+            optional: true,
+        },
+        sync: {
+            type: Notify,
+            optional: true,
+        },
+    },
+)]
+#[derive(Debug, Serialize, Deserialize)]
+/// Datastore notify settings
+pub struct DatastoreNotify {
+    /// Garbage collection settings
+    pub gc: Option<Notify>,
+    /// Verify job setting
+    pub verify: Option<Notify>,
+    /// Sync job setting
+    pub sync: Option<Notify>,
+}
+
+/// An entry in a hierarchy of files for restore and listing.
+#[api()]
+#[derive(Serialize, Deserialize)]
+pub struct ArchiveEntry {
+    /// Base64-encoded full path to the file, including the filename
+    pub filepath: String,
+    /// Displayable filename text for UIs
+    pub text: String,
+    /// File or directory type of this entry
+    #[serde(rename = "type")]
+    pub entry_type: String,
+    /// Is this entry a leaf node, or does it have children (i.e. a directory)?
+    pub leaf: bool,
+    /// The file size, if entry_type is 'f' (file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub size: Option<u64>,
+    /// The file "last modified" time stamp, if entry_type is 'f' (file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub mtime: Option<i64>,
+}
+
+impl ArchiveEntry {
+    pub fn new(filepath: &[u8], entry_type: &DirEntryAttribute) -> Self {
+        Self {
+            filepath: base64::encode(filepath),
+            text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
+                .to_string(),
+            entry_type: CatalogEntryType::from(entry_type).to_string(),
+            leaf: !matches!(entry_type, DirEntryAttribute::Directory { .. }),
+            size: match entry_type {
+                DirEntryAttribute::File { size, .. } => Some(*size),
+                _ => None
+            },
+            mtime: match entry_type {
+                DirEntryAttribute::File { mtime, .. } => Some(*mtime),
+                _ => None
+            },
+        }
+    }
+}
+
+pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
+    "Datastore notification setting")
+    .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
+    .schema();
+
+
+pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
+    .format(&SINGLE_LINE_COMMENT_FORMAT)
+    .min_length(1)
+    .max_length(64)
+    .schema();
+
+#[api(default: "scrypt")]
+#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
+#[serde(rename_all = "lowercase")]
+/// Key derivation function for password protected encryption keys.
+pub enum Kdf {
+    /// Do not encrypt the key.
+    None,
+    /// Encrypt they key with a password using SCrypt.
+    Scrypt,
+    /// Encrtypt the Key with a password using PBKDF2
+    PBKDF2,
+}
+
+impl Default for Kdf {
+    #[inline]
+    fn default() -> Self {
+        Kdf::Scrypt
+    }
+}
+
+#[api(
+    properties: {
+        kdf: {
+            type: Kdf,
+        },
+        fingerprint: {
+            schema: CERT_FINGERPRINT_SHA256_SCHEMA,
+            optional: true,
+        },
+    },
+)]
+#[derive(Deserialize, Serialize)]
+/// Encryption Key Information
+pub struct KeyInfo {
+    /// Path to key (if stored in a file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub path: Option<String>,
+    pub kdf: Kdf,
+    /// Key creation time
+    pub created: i64,
+    /// Key modification time
+    pub modified: i64,
+    /// Key fingerprint
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub fingerprint: Option<String>,
+    /// Password hint
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub hint: Option<String>,
+}
+
+#[api]
+#[derive(Deserialize, Serialize)]
+/// RSA public key information
+pub struct RsaPubKeyInfo {
+    /// Path to key (if stored in a file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub path: Option<String>,
+    /// RSA exponent
+    pub exponent: String,
+    /// Hex-encoded RSA modulus
+    pub modulus: String,
+    /// Key (modulus) length in bits
+    pub length: usize,
+}
+
+impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
+    type Error = anyhow::Error;
+
+    fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
+        let modulus = value.n().to_hex_str()?.to_string();
+        let exponent = value.e().to_dec_str()?.to_string();
+        let length = value.size() as usize * 8;
+
+        Ok(Self {
+            path: None,
+            exponent,
+            modulus,
+            length,
+        })
+    }
+}
+
+#[api(
+    properties: {
+        "next-run": {
+            description: "Estimated time of the next run (UNIX epoch).",
+            optional: true,
+            type: Integer,
+        },
+        "last-run-state": {
+            description: "Result of the last run.",
+            optional: true,
+            type: String,
+        },
+        "last-run-upid": {
+            description: "Task UPID of the last run.",
+            optional: true,
+            type: String,
+        },
+        "last-run-endtime": {
+            description: "Endtime of the last run.",
+            optional: true,
+            type: Integer,
+        },
+    }
+)]
+#[serde(rename_all="kebab-case")]
+#[derive(Serialize,Deserialize,Default)]
+/// Job Scheduling Status
+pub struct JobScheduleStatus {
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub next_run: Option<i64>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_state: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_upid: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_endtime: Option<i64>,
 }