]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/api2/types/mod.rs
add pbs-datastore module
[proxmox-backup.git] / src / api2 / types / mod.rs
index 7cb1cdcef5e5dadc32bfe237082e78e553ba69cf..c9ba6db6a46f6fdc734405a65a9727107cfbdea5 100644 (file)
@@ -1,3 +1,5 @@
+//! API Type Definitions
+
 use anyhow::bail;
 use serde::{Deserialize, Serialize};
 
@@ -5,8 +7,17 @@ use proxmox::api::{api, schema::*};
 use proxmox::const_regex;
 use proxmox::{IPRE, IPRE_BRACKET, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
 
-use crate::backup::{CryptMode, Fingerprint, BACKUP_ID_REGEX};
-use crate::server::UPID;
+use pbs_datastore::catalog::CatalogEntryType;
+
+use crate::{
+    backup::{
+        CryptMode,
+        Fingerprint,
+        DirEntryAttribute,
+    },
+    server::UPID,
+    config::acl::Role,
+};
 
 #[macro_use]
 mod macros;
@@ -23,6 +34,12 @@ pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GRO
 mod tape;
 pub use tape::*;
 
+mod file_restore;
+pub use file_restore::*;
+
+mod acme;
+pub use acme::*;
+
 // File names: may not contain slashes, may not start with "."
 pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
     if name.starts_with('.') {
@@ -34,9 +51,25 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
     Ok(())
 });
 
+macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
+macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
+macro_rules! BACKUP_TIME_RE {
+    () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z")
+}
+macro_rules! SNAPSHOT_PATH_REGEX_STR {
+    () => (
+        concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
+    );
+}
+
 macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
 macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
 
+macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
+macro_rules! DNS_ALIAS_NAME {
+    () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
+}
+
 macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
 macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
 
@@ -73,6 +106,8 @@ const_regex!{
 
     pub DNS_NAME_REGEX =  concat!(r"^", DNS_NAME!(), r"$");
 
+    pub DNS_ALIAS_REGEX =  concat!(r"^", DNS_ALIAS_NAME!(), r"$");
+
     pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|",  IPRE!(), r")$");
 
     pub BACKUP_REPO_URL_REGEX = concat!(r"^^(?:(?:(", USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(), ")@)?(", DNS_NAME!(), "|",  IPRE_BRACKET!() ,"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$");
@@ -86,6 +121,24 @@ const_regex!{
     pub BLOCKDEVICE_NAME_REGEX = r"^(:?(:?h|s|x?v)d[a-z]+)|(:?nvme\d+n\d+)$";
 
     pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
+
+    pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
+
+    pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
+
+    pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
+
+    pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
+
+    pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
+
+    pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
+
+    pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
+
+    pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
+
+    pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
 }
 
 pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
@@ -112,6 +165,9 @@ pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
 pub const BACKUP_ID_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&BACKUP_ID_REGEX);
 
+pub const UUID_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&UUID_REGEX);
+
 pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
 
@@ -121,6 +177,9 @@ pub const HOSTNAME_FORMAT: ApiStringFormat =
 pub const DNS_NAME_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&DNS_NAME_REGEX);
 
+pub const DNS_ALIAS_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
+
 pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
 
@@ -148,6 +207,12 @@ pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
 pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
 
+pub const DATASTORE_MAP_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&DATASTORE_MAP_REGEX);
+
+pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
+    ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
+
 pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
     .format(&PASSWORD_FORMAT)
     .min_length(1)
@@ -277,6 +342,36 @@ pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new(
         EnumEntry::new("group", "Group")]))
     .schema();
 
+#[api(
+    properties: {
+        propagate: {
+            schema: ACL_PROPAGATE_SCHEMA,
+        },
+       path: {
+            schema: ACL_PATH_SCHEMA,
+        },
+        ugid_type: {
+            schema: ACL_UGID_TYPE_SCHEMA,
+        },
+       ugid: {
+            type: String,
+            description: "User or Group ID.",
+        },
+       roleid: {
+            type: Role,
+        }
+    }
+)]
+#[derive(Serialize, Deserialize)]
+/// ACL list entry.
+pub struct AclListItem {
+    pub path: String,
+    pub ugid: String,
+    pub ugid_type: String,
+    pub propagate: bool,
+    pub roleid: String,
+}
+
 pub const BACKUP_ARCHIVE_NAME_SCHEMA: Schema =
     StringSchema::new("Backup archive name.")
     .format(&PROXMOX_SAFE_ID_FORMAT)
@@ -310,24 +405,63 @@ pub const DATASTORE_SCHEMA: Schema = StringSchema::new("Datastore name.")
     .max_length(32)
     .schema();
 
+pub const DATASTORE_MAP_SCHEMA: Schema = StringSchema::new("Datastore mapping.")
+    .format(&DATASTORE_MAP_FORMAT)
+    .min_length(3)
+    .max_length(65)
+    .type_text("(<source>=)?<target>")
+    .schema();
+
+pub const DATASTORE_MAP_ARRAY_SCHEMA: Schema = ArraySchema::new(
+    "Datastore mapping list.", &DATASTORE_MAP_SCHEMA)
+    .schema();
+
+pub const DATASTORE_MAP_LIST_SCHEMA: Schema = StringSchema::new(
+    "A list of Datastore mappings (or single datastore), comma separated. \
+    For example 'a=b,e' maps the source datastore 'a' to target 'b and \
+    all other sources to the default 'e'. If no default is given, only the \
+    specified sources are mapped.")
+    .format(&ApiStringFormat::PropertyString(&DATASTORE_MAP_ARRAY_SCHEMA))
+    .schema();
+
+pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema = StringSchema::new(
+    "A snapshot in the format: 'store:type/id/time")
+    .format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
+    .type_text("store:type/id/time")
+    .schema();
+
+pub const MEDIA_SET_UUID_SCHEMA: Schema =
+    StringSchema::new("MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).")
+    .format(&UUID_FORMAT)
+    .schema();
+
+pub const MEDIA_UUID_SCHEMA: Schema =
+    StringSchema::new("Media Uuid.")
+    .format(&UUID_FORMAT)
+    .schema();
+
 pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run sync job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const GC_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run garbage collection job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run prune job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const VERIFICATION_SCHEDULE_SCHEMA: Schema = StringSchema::new(
     "Run verify job at specified schedule.")
     .format(&ApiStringFormat::VerifyFn(crate::tools::systemd::time::verify_calendar_event))
+    .type_text("<calendar-event>")
     .schema();
 
 pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
@@ -381,6 +515,12 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema = StringSchema::new("Block device name
     .max_length(64)
     .schema();
 
+pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
+    .format(&PROXMOX_SAFE_ID_FORMAT)
+    .min_length(2)
+    .max_length(32)
+    .schema();
+
 // Complex type definitions
 
 #[api(
@@ -664,9 +804,8 @@ impl Default for GarbageCollectionStatus {
     }
 }
 
-
 #[api()]
-#[derive(Serialize, Deserialize)]
+#[derive(Default, Serialize, Deserialize)]
 /// Storage space usage information.
 pub struct StorageStatus {
     /// Total space (bytes).
@@ -1212,7 +1351,7 @@ pub struct APTUpdateInfo {
 pub enum Notify {
     /// Never send notification
     Never,
-    /// Send notifications for failed and sucessful jobs
+    /// Send notifications for failed and successful jobs
     Always,
     /// Send notifications for failed jobs only
     Error,
@@ -1245,7 +1384,293 @@ pub struct DatastoreNotify {
     pub sync: Option<Notify>,
 }
 
+/// An entry in a hierarchy of files for restore and listing.
+#[api()]
+#[derive(Serialize, Deserialize)]
+pub struct ArchiveEntry {
+    /// Base64-encoded full path to the file, including the filename
+    pub filepath: String,
+    /// Displayable filename text for UIs
+    pub text: String,
+    /// File or directory type of this entry
+    #[serde(rename = "type")]
+    pub entry_type: String,
+    /// Is this entry a leaf node, or does it have children (i.e. a directory)?
+    pub leaf: bool,
+    /// The file size, if entry_type is 'f' (file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub size: Option<u64>,
+    /// The file "last modified" time stamp, if entry_type is 'f' (file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub mtime: Option<i64>,
+}
+
+impl ArchiveEntry {
+    pub fn new(filepath: &[u8], entry_type: Option<&DirEntryAttribute>) -> Self {
+        let size = match entry_type {
+            Some(DirEntryAttribute::File { size, .. }) => Some(*size),
+            _ => None,
+        };
+        Self::new_with_size(filepath, entry_type, size)
+    }
+
+    pub fn new_with_size(
+        filepath: &[u8],
+        entry_type: Option<&DirEntryAttribute>,
+        size: Option<u64>,
+    ) -> Self {
+        Self {
+            filepath: base64::encode(filepath),
+            text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
+                .to_string(),
+            entry_type: match entry_type {
+                Some(entry_type) => CatalogEntryType::from(entry_type).to_string(),
+                None => "v".to_owned(),
+            },
+            leaf: !matches!(entry_type, None | Some(DirEntryAttribute::Directory { .. })),
+            size,
+            mtime: match entry_type {
+                Some(DirEntryAttribute::File { mtime, .. }) => Some(*mtime),
+                _ => None,
+            },
+        }
+    }
+}
+
 pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
     "Datastore notification setting")
     .format(&ApiStringFormat::PropertyString(&DatastoreNotify::API_SCHEMA))
     .schema();
+
+
+pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
+    .format(&SINGLE_LINE_COMMENT_FORMAT)
+    .min_length(1)
+    .max_length(64)
+    .schema();
+
+#[api(default: "scrypt")]
+#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
+#[serde(rename_all = "lowercase")]
+/// Key derivation function for password protected encryption keys.
+pub enum Kdf {
+    /// Do not encrypt the key.
+    None,
+    /// Encrypt they key with a password using SCrypt.
+    Scrypt,
+    /// Encrtypt the Key with a password using PBKDF2
+    PBKDF2,
+}
+
+impl Default for Kdf {
+    #[inline]
+    fn default() -> Self {
+        Kdf::Scrypt
+    }
+}
+
+#[api(
+    properties: {
+        kdf: {
+            type: Kdf,
+        },
+        fingerprint: {
+            schema: CERT_FINGERPRINT_SHA256_SCHEMA,
+            optional: true,
+        },
+    },
+)]
+#[derive(Deserialize, Serialize)]
+/// Encryption Key Information
+pub struct KeyInfo {
+    /// Path to key (if stored in a file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub path: Option<String>,
+    pub kdf: Kdf,
+    /// Key creation time
+    pub created: i64,
+    /// Key modification time
+    pub modified: i64,
+    /// Key fingerprint
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub fingerprint: Option<String>,
+    /// Password hint
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub hint: Option<String>,
+}
+
+#[api]
+#[derive(Deserialize, Serialize)]
+/// RSA public key information
+pub struct RsaPubKeyInfo {
+    /// Path to key (if stored in a file)
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub path: Option<String>,
+    /// RSA exponent
+    pub exponent: String,
+    /// Hex-encoded RSA modulus
+    pub modulus: String,
+    /// Key (modulus) length in bits
+    pub length: usize,
+}
+
+impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
+    type Error = anyhow::Error;
+
+    fn try_from(value: openssl::rsa::Rsa<openssl::pkey::Public>) -> Result<Self, Self::Error> {
+        let modulus = value.n().to_hex_str()?.to_string();
+        let exponent = value.e().to_dec_str()?.to_string();
+        let length = value.size() as usize * 8;
+
+        Ok(Self {
+            path: None,
+            exponent,
+            modulus,
+            length,
+        })
+    }
+}
+
+#[api(
+    properties: {
+        "next-run": {
+            description: "Estimated time of the next run (UNIX epoch).",
+            optional: true,
+            type: Integer,
+        },
+        "last-run-state": {
+            description: "Result of the last run.",
+            optional: true,
+            type: String,
+        },
+        "last-run-upid": {
+            description: "Task UPID of the last run.",
+            optional: true,
+            type: String,
+        },
+        "last-run-endtime": {
+            description: "Endtime of the last run.",
+            optional: true,
+            type: Integer,
+        },
+    }
+)]
+#[derive(Serialize,Deserialize,Default)]
+#[serde(rename_all="kebab-case")]
+/// Job Scheduling Status
+pub struct JobScheduleStatus {
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub next_run: Option<i64>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_state: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_upid: Option<String>,
+    #[serde(skip_serializing_if="Option::is_none")]
+    pub last_run_endtime: Option<i64>,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Node memory usage counters
+pub struct NodeMemoryCounters {
+    /// Total memory
+    pub total: u64,
+    /// Used memory
+    pub used: u64,
+    /// Free memory
+    pub free: u64,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Node swap usage counters
+pub struct NodeSwapCounters {
+    /// Total swap
+    pub total: u64,
+    /// Used swap
+    pub used: u64,
+    /// Free swap
+    pub free: u64,
+}
+
+#[api]
+#[derive(Serialize,Deserialize,Default)]
+#[serde(rename_all = "kebab-case")]
+/// Contains general node information such as the fingerprint`
+pub struct NodeInformation {
+    /// The SSL Fingerprint
+    pub fingerprint: String,
+}
+
+#[api]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// Information about the CPU
+pub struct NodeCpuInformation {
+    /// The CPU model
+    pub model: String,
+    /// The number of CPU sockets
+    pub sockets: usize,
+    /// The number of CPU cores (incl. threads)
+    pub cpus: usize,
+}
+
+#[api(
+    properties: {
+        memory: {
+            type: NodeMemoryCounters,
+        },
+        root: {
+            type: StorageStatus,
+        },
+        swap: {
+            type: NodeSwapCounters,
+        },
+        loadavg: {
+            type: Array,
+            items: {
+                type: Number,
+                description: "the load",
+            }
+        },
+        cpuinfo: {
+            type: NodeCpuInformation,
+        },
+        info: {
+            type: NodeInformation,
+        }
+    },
+)]
+#[derive(Serialize, Deserialize, Default)]
+#[serde(rename_all = "kebab-case")]
+/// The Node status
+pub struct NodeStatus {
+    pub memory: NodeMemoryCounters,
+    pub root: StorageStatus,
+    pub swap: NodeSwapCounters,
+    /// The current uptime of the server.
+    pub uptime: u64,
+    /// Load for 1, 5 and 15 minutes.
+    pub loadavg: [f64; 3],
+    /// The current kernel version.
+    pub kversion: String,
+    /// Total CPU usage since last query.
+    pub cpu: f64,
+    /// Total IO wait since last query.
+    pub wait: f64,
+    pub cpuinfo: NodeCpuInformation,
+    pub info: NodeInformation,
+}
+
+pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new(
+    "HTTP proxy configuration [http://]<host>[:port]")
+    .format(&ApiStringFormat::VerifyFn(|s| {
+        proxmox_http::ProxyConfig::parse_proxy_url(s)?;
+        Ok(())
+    }))
+    .min_length(1)
+    .max_length(128)
+    .type_text("[http://]<host>[:port]")
+    .schema();