+//! API Type Definitions
+
use anyhow::bail;
use serde::{Deserialize, Serialize};
use proxmox::api::{api, schema::*};
use proxmox::const_regex;
-use proxmox::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
+use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
+
+use pbs_datastore::catalog::CatalogEntryType;
-use crate::backup::CryptMode;
+use crate::{
+ backup::{
+ CryptMode,
+ Fingerprint,
+ DirEntryAttribute,
+ },
+ server::UPID,
+ config::acl::Role,
+};
#[macro_use]
mod macros;
#[macro_use]
mod userid;
pub use userid::{Realm, RealmRef};
+pub use userid::{Tokenname, TokennameRef};
pub use userid::{Username, UsernameRef};
pub use userid::Userid;
-pub use userid::PROXMOX_GROUP_ID_SCHEMA;
+pub use userid::Authid;
+pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GROUP_ID_SCHEMA};
+
+mod tape;
+pub use tape::*;
+
+mod file_restore;
+pub use file_restore::*;
+
+mod acme;
+pub use acme::*;
// File names: may not contain slashes, may not start with "."
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
Ok(())
});
+macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
+macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
+macro_rules! BACKUP_TIME_RE {
+ () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z")
+}
+macro_rules! SNAPSHOT_PATH_REGEX_STR {
+ () => (
+ concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
+ );
+}
+
macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
-macro_rules! DNS_NAME { () => (concat!(r"(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!())) }
+macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
+
+macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
+macro_rules! DNS_ALIAS_NAME {
+ () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
+}
macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
/// any identifier command line tools work with.
pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
+ /// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
+ pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
+ /// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:ACTUAL_JOB_ID'
+ pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
+
pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
- pub DNS_NAME_OR_IP_REGEX = concat!(r"^", DNS_NAME!(), "|", IPRE!(), r"$");
+ pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
+
+ pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
- pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE!() ,"):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
+ pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|", IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
- pub CERT_FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
+ pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
+ pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
+
pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
+
+ pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
+
+ pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
+
+ pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
+
+ pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
+
+ pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
+
+ pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
+
+ pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
+
+ pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
+
+ pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
}
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
-pub const CERT_FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&CERT_FINGERPRINT_SHA256_REGEX);
+pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
+ ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
+pub const BACKUP_ID_FORMAT: ApiStringFormat =
+ ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
+
+pub const UUID_FORMAT: ApiStringFormat =
+ ApiStringFormat::Pattern(&UUID_REGEX);
+
pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
pub const DNS_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_NAME_REGEX);
+pub const DNS_ALIAS_FORMAT: ApiStringFormat =
+ ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
+
pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
pub const CIDR_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&CIDR_REGEX);
+pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
+ ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
+
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
+pub const DATASTORE_MAP_FORMAT: ApiStringFormat =
+ ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
+
+pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
+ ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
+
pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
.format(&PASSWORD_FORMAT)
.min_length(1)
pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema = StringSchema::new(
"X509 certificate fingerprint (sha256)."
)
- .format(&CERT_FINGERPRINT_SHA256_FORMAT)
+ .format(&FINGERPRINT_SHA256_FORMAT)
.schema();
-pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(r#"\
-Prevent changes if current configuration file has different SHA256 digest.
-This can be used to prevent concurrent modifications.
-"#
+pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema = StringSchema::new(
+ "Tape encryption key fingerprint (sha256)."
)
- .format(&PVE_CONFIG_DIGEST_FORMAT)
+ .format(&FINGERPRINT_SHA256_FORMAT)
.schema();
+pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
+ "Prevent changes if current configuration file has different \
+ SHA256 digest. This can be used to prevent concurrent \
+ modifications."
+)
+ .format(&PVE_CONFIG_DIGEST_FORMAT) .schema();
+
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
EnumEntry::new("group", "Group")]))
.schema();
+#[api(
+ properties: {
+ propagate: {
+ schema: ACL_PROPAGATE_SCHEMA,
+ },
+ path: {
+ schema: ACL_PATH_SCHEMA,
+ },
+ ugid_type: {
+ schema: ACL_UGID_TYPE_SCHEMA,
+ },
+ ugid: {
+ type: String,
+ description: "User or Group ID.",
+ },
+ roleid: {
+ type: Role,
+ }
+ }
+)]
+#[derive(Serialize, Deserialize)]
+/// ACL list entry.
+pub struct AclListItem {
+ pub path: String,
+ pub ugid: String,
+ pub ugid_type: String,
+ pub propagate: bool,
+ pub roleid: String,
+}
+
pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
StringSchema::new("Backup archive name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
pub const BACKUP_ID_SCHEMA: Schema =
StringSchema::new("Backup ID.")
- .format(&PROXMOX_SAFE_ID_FORMAT)
+ .format(&BACKUP_ID_FORMAT)
.schema();
pub const BACKUP_TIME_SCHEMA: Schema =
.max_length(32)
.schema();
+pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
+ .format(&DATASTORE_MAP_FORMAT)
+ .min_length(3)
+ .max_length(65)
+ .type_text("(<source>=)?<target>")
+ .schema();
+
+pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
+ "Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
+ .schema();
+
+pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
+ "A list of Datastore mappings (or single datastore), comma separated. \
+ For example 'a=b,e' maps the source datastore 'a' to target 'b and \
+ all other sources to the default 'e'. If no default is given, only the \
+ specified sources are mapped.")
+ .format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
+ .schema();
+
+pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
+ "A snapshot in the format: 'store:type/id/time")
+ .format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
+ .type_text("store:type/id/time")
+ .schema();
+
+pub const MEDIA_SET_UUID_SCHEMA: Schema =
+ StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
+ .format(&UUID_FORMAT)
+ .schema();
+
+pub const MEDIA_UUID_SCHEMA: Schema =
+ StringSchema::new("Media Uuid.")
+ .format(&UUID_FORMAT)
+ .schema();
+
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run sync job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+ .type_text("<calendar-event>")
.schema();
pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+ .type_text("<calendar-event>")
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
"Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+ .type_text("<calendar-event>")
+ .schema();
+
+pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
+ "Run verify job at specified schedule.")
+ .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+ .type_text("<calendar-event>")
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.default(true)
.schema();
+pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
+ "Do not verify backups that are already verified if their verification is not outdated.")
+ .default(true)
+ .schema();
+
+pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema = IntegerSchema::new(
+ "Days after that a verification becomes outdated")
+ .minimum(1)
+ .schema();
+
pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.schema();
.format(&DNS_NAME_OR_IP_FORMAT)
.schema();
+pub const SUBSCRIPTION_KEY_SCHEMA: Schema = StringSchema::new("Proxmox Backup Server subscription key.")
+ .format(&SUBSCRIPTION_KEY_FORMAT)
+ .min_length(15)
+ .max_length(16)
+ .schema();
+
pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name (/sys/block/<name>).")
.format(&BLOCKDEVICE_NAME_FORMAT)
.min_length(3)
.max_length(64)
.schema();
+pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
+ .format(&PROXMOX_SAFE_ID_FORMAT)
+ .min_length(2)
+ .max_length(32)
+ .schema();
+
// Complex type definitions
+#[api(
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ comment: {
+ optional: true,
+ schema: SINGLE_LINE_COMMENT_SCHEMA,
+ },
+ },
+)]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all="kebab-case")]
+/// Basic information about a datastore.
+pub struct DataStoreListItem {
+ pub store: String,
+ pub comment: Option<String>,
+}
+
#[api(
properties: {
"backup-type": {
},
},
owner: {
- type: Userid,
+ type: Authid,
optional: true,
},
},
pub files: Vec<String>,
/// The owner of group
#[serde(skip_serializing_if="Option::is_none")]
- pub owner: Option<Userid>,
+ pub owner: Option<Authid>,
+}
+
+#[api()]
+#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "lowercase")]
+/// Result of a verify operation.
+pub enum VerifyState {
+ /// Verification was successful
+ Ok,
+ /// Verification reported one or more errors
+ Failed,
+}
+
+#[api(
+ properties: {
+ upid: {
+ schema: UPID_SCHEMA
+ },
+ state: {
+ type: VerifyState
+ },
+ },
+)]
+#[derive(Serialize, Deserialize)]
+/// Task properties.
+pub struct SnapshotVerifyState {
+ /// UPID of the verify task
+ pub upid: UPID,
+ /// State of the verification. Enum.
+ pub state: VerifyState,
}
#[api(
schema: SINGLE_LINE_COMMENT_SCHEMA,
optional: true,
},
+ verification: {
+ type: SnapshotVerifyState,
+ optional: true,
+ },
+ fingerprint: {
+ type: String,
+ optional: true,
+ },
files: {
items: {
schema: BACKUP_ARCHIVE_NAME_SCHEMA
},
},
owner: {
- type: Userid,
+ type: Authid,
optional: true,
},
},
/// The first line from manifest "notes"
#[serde(skip_serializing_if="Option::is_none")]
pub comment: Option<String>,
+ /// The result of the last run verify task
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub verification: Option<SnapshotVerifyState>,
+ /// Fingerprint of encryption key
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub fingerprint: Option<Fingerprint>,
/// List of contained archive files.
pub files: Vec<BackupContent>,
/// Overall snapshot size (sum of all archive sizes).
pub size: Option<u64>,
/// The owner of the snapshots group
#[serde(skip_serializing_if="Option::is_none")]
- pub owner: Option<Userid>,
+ pub owner: Option<Authid>,
}
#[api(
pub pending_bytes: u64,
/// Number of pending chunks (pending removal - kept for safety).
pub pending_chunks: usize,
+ /// Number of chunks marked as .bad by verify that have been removed by GC.
+ pub removed_bad: usize,
+ /// Number of chunks still marked as .bad after garbage collection.
+ pub still_bad: usize,
}
impl Default for GarbageCollectionStatus {
removed_chunks: 0,
pending_bytes: 0,
pending_chunks: 0,
+ removed_bad: 0,
+ still_bad: 0,
}
}
}
-
#[api()]
-#[derive(Serialize, Deserialize)]
+#[derive(Default, Serialize, Deserialize)]
/// Storage space usage information.
pub struct StorageStatus {
/// Total space (bytes).
pub avail: u64,
}
+#[api()]
+#[derive(Serialize, Deserialize, Default)]
+/// Backup Type group/snapshot counts.
+pub struct TypeCounts {
+ /// The number of groups of the type.
+ pub groups: u64,
+ /// The number of snapshots of the type.
+ pub snapshots: u64,
+}
+
+#[api(
+ properties: {
+ ct: {
+ type: TypeCounts,
+ optional: true,
+ },
+ host: {
+ type: TypeCounts,
+ optional: true,
+ },
+ vm: {
+ type: TypeCounts,
+ optional: true,
+ },
+ other: {
+ type: TypeCounts,
+ optional: true,
+ },
+ },
+)]
+#[derive(Serialize, Deserialize, Default)]
+/// Counts of groups/snapshots per BackupType.
+pub struct Counts {
+ /// The counts for CT backups
+ pub ct: Option<TypeCounts>,
+ /// The counts for Host backups
+ pub host: Option<TypeCounts>,
+ /// The counts for VM backups
+ pub vm: Option<TypeCounts>,
+ /// The counts for other backup types
+ pub other: Option<TypeCounts>,
+}
+
+#[api(
+ properties: {
+ "gc-status": {
+ type: GarbageCollectionStatus,
+ optional: true,
+ },
+ counts: {
+ type: Counts,
+ optional: true,
+ },
+ },
+)]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all="kebab-case")]
+/// Overall Datastore status and useful information.
+pub struct DataStoreStatus {
+ /// Total space (bytes).
+ pub total: u64,
+ /// Used space (bytes).
+ pub used: u64,
+ /// Available space (bytes).
+ pub avail: u64,
+ /// Status of last GC
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub gc_status: Option<GarbageCollectionStatus>,
+ /// Group/Snapshot counts
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub counts: Option<Counts>,
+}
+
#[api(
properties: {
upid: { schema: UPID_SCHEMA },
- user: { type: Userid },
+ user: { type: Authid },
},
)]
#[derive(Serialize, Deserialize)]
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
- /// The user who started the task
- pub user: Userid,
+ /// The authenticated entity who started the task
+ pub user: Authid,
/// The task end time (Epoch)
#[serde(skip_serializing_if="Option::is_none")]
pub endtime: Option<i64>,
starttime: info.upid.starttime,
worker_type: info.upid.worker_type,
worker_id: info.upid.worker_id,
- user: info.upid.userid,
+ user: info.upid.auth_id,
endtime,
status,
}
}
}
+#[api()]
+#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
+#[serde(rename_all = "lowercase")]
+pub enum TaskStateType {
+ /// Ok
+ OK,
+ /// Warning
+ Warning,
+ /// Error
+ Error,
+ /// Unknown
+ Unknown,
+}
+
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Broadcast policy
broadcast = 3,
/// IEEE 802.3ad Dynamic link aggregation
- //#[serde(rename = "802.3ad")]
+ #[serde(rename = "802.3ad")]
ieee802_3ad = 4,
/// Adaptive transmit load balancing
balance_tlb = 5,
balance_alb = 6,
}
+#[api()]
+#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+#[allow(non_camel_case_types)]
+#[repr(u8)]
+/// Bond Transmit Hash Policy for LACP (802.3ad)
+pub enum BondXmitHashPolicy {
+ /// Layer 2
+ layer2 = 0,
+ /// Layer 2+3
+ #[serde(rename = "layer2+3")]
+ layer2_3 = 1,
+ /// Layer 3+4
+ #[serde(rename = "layer3+4")]
+ layer3_4 = 2,
+}
+
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
bond_mode: {
type: LinuxBondMode,
optional: true,
- }
+ },
+ "bond-primary": {
+ schema: NETWORK_INTERFACE_NAME_SCHEMA,
+ optional: true,
+ },
+ bond_xmit_hash_policy: {
+ type: BondXmitHashPolicy,
+ optional: true,
+ },
}
)]
#[derive(Debug, Serialize, Deserialize)]
pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if="Option::is_none")]
pub bond_mode: Option<LinuxBondMode>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ #[serde(rename = "bond-primary")]
+ pub bond_primary: Option<String>,
+ pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
}
// Regression tests
];
for fingerprint in invalid_fingerprints.iter() {
- if let Ok(_) = parse_simple_value(fingerprint, &schema) {
+ if parse_simple_value(fingerprint, &schema).is_ok() {
bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
}
}
];
for name in invalid_user_ids.iter() {
- if let Ok(_) = parse_simple_value(name, &Userid::API_SCHEMA) {
+ if parse_simple_value(name, &Userid::API_SCHEMA).is_ok() {
bail!("test userid '{}' failed - got Ok() while exception an error.", name);
}
}
}
#[api()]
-#[derive(Serialize, Deserialize)]
+#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
/// Describes a package for which an update is available.
pub struct APTUpdateInfo {
pub section: String,
/// URL under which the package's changelog can be retrieved
pub change_log_url: String,
+ /// Custom extra field for additional package information
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub extra_info: Option<String>,
+}
+
+#[api()]
+#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "lowercase")]
+/// When do we send notifications
+pub enum Notify {
+ /// Never send notification
+ Never,
+ /// Send notifications for failed and successful jobs
+ Always,
+ /// Send notifications for failed jobs only
+ Error,
+}
+
+#[api(
+ properties: {
+ gc: {
+ type: Notify,
+ optional: true,
+ },
+ verify: {
+ type: Notify,
+ optional: true,
+ },
+ sync: {
+ type: Notify,
+ optional: true,
+ },
+ },
+)]
+#[derive(Debug, Serialize, Deserialize)]
+/// Datastore notify settings
+pub struct DatastoreNotify {
+ /// Garbage collection settings
+ pub gc: Option<Notify>,
+ /// Verify job setting
+ pub verify: Option<Notify>,
+ /// Sync job setting
+ pub sync: Option<Notify>,
+}
+
+/// An entry in a hierarchy of files for restore and listing.
+#[api()]
+#[derive(Serialize, Deserialize)]
+pub struct ArchiveEntry {
+ /// Base64-encoded full path to the file, including the filename
+ pub filepath: String,
+ /// Displayable filename text for UIs
+ pub text: String,
+ /// File or directory type of this entry
+ #[serde(rename = "type")]
+ pub entry_type: String,
+ /// Is this entry a leaf node, or does it have children (i.e. a directory)?
+ pub leaf: bool,
+ /// The file size, if entry_type is 'f' (file)
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub size: Option<u64>,
+ /// The file "last modified" time stamp, if entry_type is 'f' (file)
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub mtime: Option<i64>,
+}
+
+impl ArchiveEntry {
+ pub fn new(filepath: &[u8], entry_type: Option<&DirEntryAttribute>) -> Self {
+ let size = match entry_type {
+ Some(DirEntryAttribute::File { size, .. }) => Some(*size),
+ _ => None,
+ };
+ Self::new_with_size(filepath, entry_type, size)
+ }
+
+ pub fn new_with_size(
+ filepath: &[u8],
+ entry_type: Option<&DirEntryAttribute>,
+ size: Option<u64>,
+ ) -> Self {
+ Self {
+ filepath: base64::encode(filepath),
+ text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
+ .to_string(),
+ entry_type: match entry_type {
+ Some(entry_type) => CatalogEntryType::from(entry_type).to_string(),
+ None => "v".to_owned(),
+ },
+ leaf: !matches!(entry_type, None | Some(DirEntryAttribute::Directory { .. })),
+ size,
+ mtime: match entry_type {
+ Some(DirEntryAttribute::File { mtime, .. }) => Some(*mtime),
+ _ => None,
+ },
+ }
+ }
+}
+
+pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
+ "Datastore notification setting")
+ .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
+ .schema();
+
+
+pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
+ .format(&SINGLE_LINE_COMMENT_FORMAT)
+ .min_length(1)
+ .max_length(64)
+ .schema();
+
+#[api(default: "scrypt")]
+#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
+#[serde(rename_all = "lowercase")]
+/// Key derivation function for password protected encryption keys.
+pub enum Kdf {
+ /// Do not encrypt the key.
+ None,
+ /// Encrypt they key with a password using SCrypt.
+ Scrypt,
+ /// Encrtypt the Key with a password using PBKDF2
+ PBKDF2,
+}
+
+impl Default for Kdf {
+ #[inline]
+ fn default() -> Self {
+ Kdf::Scrypt
+ }
+}
+
+#[api(
+ properties: {
+ kdf: {
+ type: Kdf,
+ },
+ fingerprint: {
+ schema: CERT_FINGERPRINT_SHA256_SCHEMA,
+ optional: true,
+ },
+ },
+)]
+#[derive(Deserialize, Serialize)]
+/// Encryption Key Information
+pub struct KeyInfo {
+ /// Path to key (if stored in a file)
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub path: Option<String>,
+ pub kdf: Kdf,
+ /// Key creation time
+ pub created: i64,
+ /// Key modification time
+ pub modified: i64,
+ /// Key fingerprint
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub fingerprint: Option<String>,
+ /// Password hint
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub hint: Option<String>,
+}
+
+#[api]
+#[derive(Deserialize, Serialize)]
+/// RSA public key information
+pub struct RsaPubKeyInfo {
+ /// Path to key (if stored in a file)
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub path: Option<String>,
+ /// RSA exponent
+ pub exponent: String,
+ /// Hex-encoded RSA modulus
+ pub modulus: String,
+ /// Key (modulus) length in bits
+ pub length: usize,
+}
+
+impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
+ type Error = anyhow::Error;
+
+ fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
+ let modulus = value.n().to_hex_str()?.to_string();
+ let exponent = value.e().to_dec_str()?.to_string();
+ let length = value.size() as usize * 8;
+
+ Ok(Self {
+ path: None,
+ exponent,
+ modulus,
+ length,
+ })
+ }
+}
+
+#[api(
+ properties: {
+ "next-run": {
+ description: "Estimated time of the next run (UNIX epoch).",
+ optional: true,
+ type: Integer,
+ },
+ "last-run-state": {
+ description: "Result of the last run.",
+ optional: true,
+ type: String,
+ },
+ "last-run-upid": {
+ description: "Task UPID of the last run.",
+ optional: true,
+ type: String,
+ },
+ "last-run-endtime": {
+ description: "Endtime of the last run.",
+ optional: true,
+ type: Integer,
+ },
+ }
+)]
+#[derive(Serialize,Deserialize,Default)]
+#[serde(rename_all="kebab-case")]
+/// Job Scheduling Status
+pub struct JobScheduleStatus {
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub next_run: Option<i64>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub last_run_state: Option<String>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub last_run_upid: Option<String>,
+ #[serde(skip_serializing_if="Option::is_none")]
+ pub last_run_endtime: Option<i64>,
}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Node memory usage counters
+pub struct NodeMemoryCounters {
+ /// Total memory
+ pub total: u64,
+ /// Used memory
+ pub used: u64,
+ /// Free memory
+ pub free: u64,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Node swap usage counters
+pub struct NodeSwapCounters {
+ /// Total swap
+ pub total: u64,
+ /// Used swap
+ pub used: u64,
+ /// Free swap
+ pub free: u64,
+}
+
+#[api]
+#[derive(Serialize,Deserialize,Default)]
+#[serde(rename_all = "kebab-case")]
+/// Contains general node information such as the fingerprint`
+pub struct NodeInformation {
+ /// The SSL Fingerprint
+ pub fingerprint: String,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Information about the CPU
+pub struct NodeCpuInformation {
+ /// The CPU model
+ pub model: String,
+ /// The number of CPU sockets
+ pub sockets: usize,
+ /// The number of CPU cores (incl. threads)
+ pub cpus: usize,
+}
+
+#[api(
+ properties: {
+ memory: {
+ type: NodeMemoryCounters,
+ },
+ root: {
+ type: StorageStatus,
+ },
+ swap: {
+ type: NodeSwapCounters,
+ },
+ loadavg: {
+ type: Array,
+ items: {
+ type: Number,
+ description: "the load",
+ }
+ },
+ cpuinfo: {
+ type: NodeCpuInformation,
+ },
+ info: {
+ type: NodeInformation,
+ }
+ },
+)]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// The Node status
+pub struct NodeStatus {
+ pub memory: NodeMemoryCounters,
+ pub root: StorageStatus,
+ pub swap: NodeSwapCounters,
+ /// The current uptime of the server.
+ pub uptime: u64,
+ /// Load for 1, 5 and 15 minutes.
+ pub loadavg: [f64; 3],
+ /// The current kernel version.
+ pub kversion: String,
+ /// Total CPU usage since last query.
+ pub cpu: f64,
+ /// Total IO wait since last query.
+ pub wait: f64,
+ pub cpuinfo: NodeCpuInformation,
+ pub info: NodeInformation,
+}
+
+pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new(
+ "HTTP proxy configuration [http://]<host>[:port]")
+ .format(&ApiStringFormat::VerifyFn(|s| {
+ proxmox_http::ProxyConfig::parse_proxy_url(s)?;
+ Ok(())
+ }))
+ .min_length(1)
+ .max_length(128)
+ .type_text("[http://]<host>[:port]")
+ .schema();