The type is a real enum.
All are API types and implement Display and FromStr. The
ordering is the same as it is in pbs-datastore.
Also, they are now flattened into a few structs instead of
being copied manually.
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
use anyhow::Error;
-use pbs_api_types::Authid;
+use pbs_api_types::{Authid, BackupType};
use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
pub struct DummyWriter {
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
- let client =
- BackupReader::start(client, None, "store2", "host", "elsa", backup_time, true).await?;
+ let client = BackupReader::start(
+ client,
+ None,
+ "store2",
+ BackupType::Host,
+ "elsa",
+ backup_time,
+ true,
+ )
+ .await?;
let start = std::time::SystemTime::now();
use anyhow::Error;
-use pbs_api_types::Authid;
+use pbs_api_types::{Authid, BackupType};
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
async fn upload_speed() -> Result<f64, Error> {
client,
None,
datastore,
- "host",
+ BackupType::Host,
"speedtest",
backup_time,
false,
+use std::fmt;
+
+use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
use proxmox_schema::{
pub state: VerifyState,
}
+#[api]
+/// Backup types.
+#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
+#[serde(rename_all = "lowercase")]
+pub enum BackupType {
+ /// Virtual machines.
+ Vm,
+
+ /// Containers.
+ Ct,
+
+ /// "Host" backups.
+ Host,
+}
+
+impl BackupType {
+ pub const fn as_str(&self) -> &'static str {
+ match self {
+ BackupType::Vm => "vm",
+ BackupType::Ct => "ct",
+ BackupType::Host => "host",
+ }
+ }
+
+ /// We used to have alphabetical ordering here when this was a string.
+ const fn order(self) -> u8 {
+ match self {
+ BackupType::Ct => 0,
+ BackupType::Host => 1,
+ BackupType::Vm => 2,
+ }
+ }
+}
+
+impl fmt::Display for BackupType {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.as_str(), f)
+ }
+}
+
+impl std::str::FromStr for BackupType {
+ type Err = Error;
+
+ /// Parse a backup type.
+ fn from_str(ty: &str) -> Result<Self, Error> {
+ Ok(match ty {
+ "ct" => BackupType::Ct,
+ "host" => BackupType::Host,
+ "vm" => BackupType::Vm,
+ _ => bail!("invalid backup type {ty:?}"),
+ })
+ }
+}
+
+impl std::cmp::Ord for BackupType {
+ #[inline]
+ fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ self.order().cmp(&other.order())
+ }
+}
+
+impl std::cmp::PartialOrd for BackupType {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
#[api(
properties: {
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ },
+)]
+#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+/// A backup group (without a data store).
+pub struct BackupGroup {
+ /// Backup type.
+ #[serde(rename = "backup-type")]
+ pub ty: BackupType,
+
+ /// Backup id.
+ #[serde(rename = "backup-id")]
+ pub id: String,
+}
+
+impl BackupGroup {
+ pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
+ Self { ty, id: id.into() }
+ }
+}
+
+impl From<(BackupType, String)> for BackupGroup {
+ fn from(data: (BackupType, String)) -> Self {
+ Self {
+ ty: data.0,
+ id: data.1,
+ }
+ }
+}
+
+impl std::cmp::Ord for BackupGroup {
+ fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ let type_order = self.ty.cmp(&other.ty);
+ if type_order != std::cmp::Ordering::Equal {
+ return type_order;
+ }
+ // try to compare IDs numerically
+ let id_self = self.id.parse::<u64>();
+ let id_other = other.id.parse::<u64>();
+ match (id_self, id_other) {
+ (Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
+ (Ok(_), Err(_)) => std::cmp::Ordering::Less,
+ (Err(_), Ok(_)) => std::cmp::Ordering::Greater,
+ _ => self.id.cmp(&other.id),
+ }
+ }
+}
+
+impl std::cmp::PartialOrd for BackupGroup {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl fmt::Display for BackupGroup {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}/{}", self.ty, self.id)
+ }
+}
+
+impl std::str::FromStr for BackupGroup {
+ type Err = Error;
+
+ /// Parse a backup group.
+ ///
+ /// This parses strings like `vm/100".
+ fn from_str(path: &str) -> Result<Self, Error> {
+ let cap = GROUP_PATH_REGEX
+ .captures(path)
+ .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
+
+ Ok(Self {
+ ty: cap.get(1).unwrap().as_str().parse()?,
+ id: cap.get(2).unwrap().as_str().to_owned(),
+ })
+ }
+}
+
+#[api(
+ properties: {
+ "group": { type: BackupGroup },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ },
+)]
+/// Uniquely identify a Backup (relative to data store)
+///
+/// We also call this a backup snaphost.
+#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
+#[serde(rename_all = "kebab-case")]
+pub struct BackupDir {
+ /// Backup group.
+ #[serde(flatten)]
+ pub group: BackupGroup,
+
+ /// Backup timestamp unix epoch.
+ #[serde(rename = "backup-time")]
+ pub time: i64,
+}
+
+impl From<(BackupGroup, i64)> for BackupDir {
+ fn from(data: (BackupGroup, i64)) -> Self {
+ Self {
+ group: data.0,
+ time: data.1,
+ }
+ }
+}
+
+impl From<(BackupType, String, i64)> for BackupDir {
+ fn from(data: (BackupType, String, i64)) -> Self {
+ Self {
+ group: (data.0, data.1).into(),
+ time: data.2,
+ }
+ }
+}
+
+impl BackupDir {
+ pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
+ where
+ T: Into<String>,
+ {
+ let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
+ let group = BackupGroup::new(ty, id.into());
+ Ok(Self { group, time })
+ }
+
+ pub fn ty(&self) -> BackupType {
+ self.group.ty
+ }
+
+ pub fn id(&self) -> &str {
+ &self.group.id
+ }
+}
+
+impl std::str::FromStr for BackupDir {
+ type Err = Error;
+
+ /// Parse a snapshot path.
+ ///
+ /// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
+ fn from_str(path: &str) -> Result<Self, Self::Err> {
+ let cap = SNAPSHOT_PATH_REGEX
+ .captures(path)
+ .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
+
+ BackupDir::with_rfc3339(
+ cap.get(1).unwrap().as_str().parse()?,
+ cap.get(2).unwrap().as_str(),
+ cap.get(3).unwrap().as_str(),
+ )
+ }
+}
+
+impl std::fmt::Display for BackupDir {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // FIXME: log error?
+ let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
+ write!(f, "{}/{}", self.group, time)
+ }
+}
+
+#[api(
+ properties: {
+ "backup": { type: BackupDir },
comment: {
schema: SINGLE_LINE_COMMENT_SCHEMA,
optional: true,
#[serde(rename_all = "kebab-case")]
/// Basic information about backup snapshot.
pub struct SnapshotListItem {
- pub backup_type: String, // enum
- pub backup_id: String,
- pub backup_time: i64,
+ #[serde(flatten)]
+ pub backup: BackupDir,
/// The first line from manifest "notes"
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[api(
properties: {
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "last-backup": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ "backup": { type: BackupGroup },
+ "last-backup": { schema: BACKUP_TIME_SCHEMA },
"backup-count": {
type: Integer,
},
#[serde(rename_all = "kebab-case")]
/// Basic information about a backup group.
pub struct GroupListItem {
- pub backup_type: String, // enum
- pub backup_id: String,
+ #[serde(flatten)]
+ pub backup: BackupGroup,
+
pub last_backup: i64,
/// Number of contained snapshots
pub backup_count: u64,
#[api(
properties: {
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ "backup": { type: BackupDir },
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Prune result.
pub struct PruneListItem {
- pub backup_type: String, // enum
- pub backup_id: String,
- pub backup_time: i64,
+ #[serde(flatten)]
+ pub backup: BackupDir,
+
/// Keep snapshot
pub keep: bool,
}
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
use proxmox_uuid::Uuid;
-use crate::{BACKUP_ID_SCHEMA, BACKUP_TYPE_SCHEMA, FINGERPRINT_SHA256_FORMAT};
+use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT};
const_regex! {
pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":", SNAPSHOT_PATH_REGEX_STR!(), r"$");
optional: true,
},
"backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
+ type: BackupType,
optional: true,
},
"backup-id": {
pub label_text: Option<String>,
pub media: Option<Uuid>,
pub media_set: Option<Uuid>,
- pub backup_type: Option<String>,
+ pub backup_type: Option<BackupType>,
pub backup_id: Option<String>,
}
use futures::future::AbortHandle;
use serde_json::{json, Value};
+use pbs_api_types::BackupType;
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader;
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
- backup_type: &str,
+ backup_type: BackupType,
backup_id: &str,
backup_time: i64,
debug: bool,
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
-use pbs_api_types::HumanByte;
+use pbs_api_types::{BackupType, HumanByte};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
- backup_type: &str,
+ backup_type: BackupType,
backup_id: &str,
backup_time: i64,
debug: bool,
item["backup-type"].as_str(),
item["backup-time"].as_i64(),
) {
+ let backup_type = match backup_type.parse() {
+ Ok(ty) => ty,
+ Err(_) => {
+ // FIXME: print error in completion?
+ continue;
+ }
+ };
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
}
use anyhow::{bail, format_err, Error};
use pbs_api_types::{
- GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, GROUP_PATH_REGEX, SNAPSHOT_PATH_REGEX,
+ BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, GROUP_PATH_REGEX,
+ SNAPSHOT_PATH_REGEX,
};
use super::manifest::MANIFEST_BLOB_NAME;
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
pub struct BackupGroup {
/// Type of backup
- backup_type: String,
+ backup_type: BackupType,
/// Unique (for this type) ID
backup_id: String,
}
}
impl BackupGroup {
- pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
+ pub fn new<T: Into<String>>(backup_type: BackupType, backup_id: T) -> Self {
Self {
- backup_type: backup_type.into(),
+ backup_type,
backup_id: backup_id.into(),
}
}
- pub fn backup_type(&self) -> &str {
- &self.backup_type
+ pub fn backup_type(&self) -> BackupType {
+ self.backup_type
}
pub fn backup_id(&self) -> &str {
pub fn group_path(&self) -> PathBuf {
let mut relative_path = PathBuf::new();
- relative_path.push(&self.backup_type);
+ relative_path.push(self.backup_type.as_str());
relative_path.push(&self.backup_id);
}
let backup_dir =
- BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
+ BackupDir::with_rfc3339(self.backup_type, &self.backup_id, backup_time)?;
let files = list_backup_files(l2_fd, backup_time)?;
let protected = backup_dir.is_protected(base_path.to_owned());
Ok(group) => &group == self,
Err(_) => false, // shouldn't happen if value is schema-checked
},
- GroupFilter::BackupType(backup_type) => self.backup_type() == backup_type,
+ GroupFilter::BackupType(backup_type) => self.backup_type().as_str() == backup_type,
GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
}
}
}
+impl From<&BackupGroup> for pbs_api_types::BackupGroup {
+ fn from(group: &BackupGroup) -> pbs_api_types::BackupGroup {
+ (group.backup_type, group.backup_id.clone()).into()
+ }
+}
+
+impl From<BackupGroup> for pbs_api_types::BackupGroup {
+ fn from(group: BackupGroup) -> pbs_api_types::BackupGroup {
+ (group.backup_type, group.backup_id).into()
+ }
+}
+
impl std::fmt::Display for BackupGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let backup_type = self.backup_type();
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
Ok(Self {
- backup_type: cap.get(1).unwrap().as_str().to_owned(),
+ backup_type: cap.get(1).unwrap().as_str().parse()?,
backup_id: cap.get(2).unwrap().as_str().to_owned(),
})
}
}
impl BackupDir {
- pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
+ pub fn new<T>(backup_type: BackupType, backup_id: T, backup_time: i64) -> Result<Self, Error>
where
T: Into<String>,
- U: Into<String>,
{
- let group = BackupGroup::new(backup_type.into(), backup_id.into());
+ let group = BackupGroup::new(backup_type, backup_id.into());
BackupDir::with_group(group, backup_time)
}
- pub fn with_rfc3339<T, U, V>(
- backup_type: T,
- backup_id: U,
- backup_time_string: V,
+ pub fn with_rfc3339<T, U>(
+ backup_type: BackupType,
+ backup_id: T,
+ backup_time_string: U,
) -> Result<Self, Error>
where
T: Into<String>,
U: Into<String>,
- V: Into<String>,
{
let backup_time_string = backup_time_string.into();
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
- let group = BackupGroup::new(backup_type.into(), backup_id.into());
+ let group = BackupGroup::new(backup_type, backup_id.into());
Ok(Self {
group,
backup_time,
}
}
+impl From<&BackupDir> for pbs_api_types::BackupDir {
+ fn from(dir: &BackupDir) -> pbs_api_types::BackupDir {
+ (
+ pbs_api_types::BackupGroup::from(dir.group.clone()),
+ dir.backup_time,
+ )
+ .into()
+ }
+}
+
+impl From<BackupDir> for pbs_api_types::BackupDir {
+ fn from(dir: BackupDir) -> pbs_api_types::BackupDir {
+ (pbs_api_types::BackupGroup::from(dir.group), dir.backup_time).into()
+ }
+}
+
impl std::str::FromStr for BackupDir {
type Err = Error;
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
BackupDir::with_rfc3339(
- cap.get(1).unwrap().as_str(),
+ cap.get(1).unwrap().as_str().parse()?,
cap.get(2).unwrap().as_str(),
cap.get(3).unwrap().as_str(),
)
use proxmox_sys::{task_log, task_warn};
use pbs_api_types::{
- Authid, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus, HumanByte,
- Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, BACKUP_TYPE_REGEX, UPID,
+ Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
+ HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
};
use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
let mut full_path = self.base_path();
- full_path.push(backup_group.backup_type());
+ full_path.push(backup_group.backup_type().as_str());
std::fs::create_dir_all(&full_path)?;
full_path.push(backup_group.backup_id());
/// A iterator for a (single) level of Backup Groups
pub struct ListGroups {
type_fd: proxmox_sys::fs::ReadDir,
- id_state: Option<(String, proxmox_sys::fs::ReadDir)>,
+ id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
}
impl ListGroups {
fn next(&mut self) -> Option<Self::Item> {
loop {
- if let Some((ref group_type, ref mut id_fd)) = self.id_state {
+ if let Some((group_type, ref mut id_fd)) = self.id_state {
let item = match id_fd.next() {
Some(item) => item,
None => {
Some(nix::dir::Type::Directory) => {} // OK
_ => continue,
}
- if BACKUP_TYPE_REGEX.is_match(name) {
+ if let Ok(group_type) = BackupType::from_str(name) {
// found a backup group type, descend into it to scan all IDs in it
// by switching to the id-state branch
let base_fd = entry.parent_fd();
Ok(dirfd) => dirfd,
Err(err) => return Some(Err(err.into())),
};
- self.id_state = Some((name.to_owned(), id_dirfd));
+ self.id_state = Some((group_type, id_dirfd));
}
}
continue; // file did not match regex or isn't valid utf-8
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
-use pbs_api_types::{CryptMode, Fingerprint};
+use pbs_api_types::{BackupType, CryptMode, Fingerprint};
use pbs_tools::crypt_config::CryptConfig;
use crate::BackupDir;
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct BackupManifest {
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
files: Vec<FileInfo>,
impl BackupManifest {
pub fn new(snapshot: BackupDir) -> Self {
Self {
- backup_type: snapshot.group().backup_type().into(),
+ backup_type: snapshot.group().backup_type(),
backup_id: snapshot.group().backup_id().into(),
backup_time: snapshot.backup_time(),
files: Vec::new(),
};
use proxmox_schema::{api, ApiType, ReturnType};
+use pbs_api_types::BackupType;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupRepository, BackupWriter};
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
client,
crypt_config.clone(),
repo.store(),
- "host",
+ BackupType::Host,
"benchmark",
backup_time,
false,
client,
crypt_config.clone(),
repo.store(),
- &backup_type,
+ backup_type,
&backup_id,
backup_time,
true,
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{
- Authid, CryptMode, Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions,
- RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
- BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
+ Authid, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte, PruneListItem,
+ PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA,
+ BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
+ TRAFFIC_CONTROL_RATE_SCHEMA,
};
use pbs_client::catalog_shell::Shell;
use pbs_client::tools::{
let mut args = json!({});
if let Some(group) = group {
- args["backup-type"] = group.backup_type().into();
+ args["backup-type"] = group.backup_type().to_string().into();
args["backup-id"] = group.backup_id().into();
}
client: &HttpClient,
store: &str,
group: BackupGroup,
-) -> Result<(String, String, i64), Error> {
+) -> Result<(BackupType, String, i64), Error> {
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
);
}
- list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
+ list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
- let backup_time = list[0].backup_time;
+ let backup_time = list[0].backup.time;
Ok((
group.backup_type().to_owned(),
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
- let group = BackupGroup::new(item.backup_type, item.backup_id);
+ let group = BackupGroup::new(item.backup.ty, item.backup.id);
Ok(group.group_path().to_str().unwrap().to_owned())
};
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
+ let snapshot = BackupDir::new(item.backup.ty, item.backup.id, item.last_backup)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
let group: BackupGroup = group.parse()?;
- param["backup-type"] = group.backup_type().into();
+ param["backup-type"] = group.backup_type().to_string().into();
param["backup-id"] = group.backup_id().into();
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
.as_str()
.unwrap_or(proxmox_sys::nodename());
- let backup_type = param["backup-type"].as_str().unwrap_or("host");
+ let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
let include_dev = param["include-dev"].as_array();
client,
crypt_config.clone(),
repo.store(),
- &backup_type,
+ backup_type,
&backup_id,
backup_time,
true,
if let Some(dry_run) = dry_run {
api_param["dry-run"] = dry_run.into();
}
- api_param["backup-type"] = group.backup_type().into();
+ api_param["backup-type"] = group.backup_type().to_string().into();
api_param["backup-id"] = group.backup_id().into();
let mut result = client.post(&path, Some(api_param)).await?;
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
+ let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
client,
crypt_config.clone(),
repo.store(),
- &backup_type,
+ backup_type,
&backup_id,
backup_time,
true,
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
+ let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
use pxar::EntryKind;
use pbs_api_types::{
- Authid, BackupContent, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
+ Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
let comment = file_read_firstline(¬e_path).ok();
group_info.push(GroupListItem {
- backup_type: group.backup_type().to_string(),
- backup_id: group.backup_id().to_string(),
+ backup: group.into(),
last_backup: last_backup.backup_dir.backup_time(),
owner: Some(owner),
backup_count,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
},
},
access: {
/// Delete backup group including all snapshots.
pub fn delete_group(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
},
},
returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
/// List snapshot files.
pub fn list_snapshot_files(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
_info: &ApiMethod,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
},
},
access: {
/// Delete backup snapshot.
pub fn delete_snapshot(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
_info: &ApiMethod,
streaming: true,
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
"backup-type": {
optional: true,
- schema: BACKUP_TYPE_SCHEMA,
+ type: BackupType,
},
"backup-id": {
optional: true,
/// List backup snapshots.
pub fn list_snapshots(
store: String,
- backup_type: Option<String>,
+ backup_type: Option<BackupType>,
backup_id: Option<String>,
_param: Value,
_info: &ApiMethod,
};
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
- let backup_type = group.backup_type().to_string();
- let backup_id = group.backup_id().to_string();
- let backup_time = info.backup_dir.backup_time();
+ let backup = pbs_api_types::BackupDir {
+ group: group.into(),
+ time: info.backup_dir.backup_time(),
+ };
let protected = info.backup_dir.is_protected(datastore.base_path());
match get_all_snapshot_files(&datastore, &info) {
let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
SnapshotListItem {
- backup_type,
- backup_id,
- backup_time,
+ backup,
comment,
verification,
fingerprint,
.collect();
SnapshotListItem {
- backup_type,
- backup_id,
- backup_time,
+ backup,
comment: None,
verification: None,
fingerprint: None,
// only include groups with snapshots, counting/displaying emtpy groups can confuse
if snapshot_count > 0 {
let type_count = match group.backup_type() {
- "ct" => counts.ct.get_or_insert(Default::default()),
- "vm" => counts.vm.get_or_insert(Default::default()),
- "host" => counts.host.get_or_insert(Default::default()),
- _ => counts.other.get_or_insert(Default::default()),
+ BackupType::Ct => counts.ct.get_or_insert(Default::default()),
+ BackupType::Vm => counts.vm.get_or_insert(Default::default()),
+ BackupType::Host => counts.host.get_or_insert(Default::default()),
};
type_count.groups += 1;
schema: DATASTORE_SCHEMA,
},
"backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
+ type: BackupType,
optional: true,
},
"backup-id": {
/// or all backups in the datastore.
pub fn verify(
store: String,
- backup_type: Option<String>,
+ backup_type: Option<BackupType>,
backup_id: Option<String>,
backup_time: Option<i64>,
ignore_verified: Option<bool>,
#[api(
input: {
properties: {
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-type": { type: BackupType },
"dry-run": {
optional: true,
type: bool,
/// Prune a group on the datastore
pub fn prune(
backup_id: String,
- backup_type: String,
+ backup_type: BackupType,
dry_run: bool,
prune_options: PruneOptions,
store: String,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let group = BackupGroup::new(&backup_type, &backup_id);
+ let group = BackupGroup::new(backup_type, &backup_id);
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
- let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
+ let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
let mut prune_result = Vec::new();
let file_name = required_string_param(¶m, "file-name")?.to_owned();
- let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
let backup_id = required_string_param(¶m, "backup-id")?;
let backup_time = required_integer_param(¶m, "backup-time")?;
let file_name = required_string_param(¶m, "file-name")?.to_owned();
- let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
let backup_id = required_string_param(¶m, "backup-id")?;
let backup_time = required_integer_param(¶m, "backup-time")?;
let file_name = CLIENT_LOG_BLOB_NAME;
- let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
let backup_id = required_string_param(¶m, "backup-id")?;
let backup_time = required_integer_param(¶m, "backup-time")?;
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
"filepath": {
description: "Base64 encoded path.",
type: String,
/// Get the entries of the given path of the catalog
pub fn catalog(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
filepath: String,
let filepath = required_string_param(¶m, "filepath")?.to_owned();
- let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
let backup_id = required_string_param(¶m, "backup-id")?;
let backup_time = required_integer_param(¶m, "backup-time")?;
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
},
},
access: {
/// Get "notes" for a backup group
pub fn get_group_notes(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
notes: {
description: "A multiline text.",
},
/// Set "notes" for a backup group
pub fn set_group_notes(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
notes: String,
rpcenv: &mut dyn RpcEnvironment,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
},
},
access: {
/// Get "notes" for a specific backup
pub fn get_notes(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
rpcenv: &mut dyn RpcEnvironment,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
notes: {
description: "A multiline text.",
},
/// Set "notes" for a specific backup
pub fn set_notes(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
notes: String,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
},
},
access: {
/// Query protection for a specific backup
pub fn get_protection(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
rpcenv: &mut dyn RpcEnvironment,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
- "backup-time": {
- schema: BACKUP_TIME_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
+ "backup-time": { schema: BACKUP_TIME_SCHEMA },
protected: {
description: "Enable/disable protection.",
},
/// En- or disable protection for a specific backup
pub fn set_protection(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
backup_time: i64,
protected: bool,
#[api(
input: {
properties: {
- store: {
- schema: DATASTORE_SCHEMA,
- },
- "backup-type": {
- schema: BACKUP_TYPE_SCHEMA,
- },
- "backup-id": {
- schema: BACKUP_ID_SCHEMA,
- },
+ store: { schema: DATASTORE_SCHEMA },
+ "backup-type": { type: BackupType },
+ "backup-id": { schema: BACKUP_ID_SCHEMA },
"new-owner": {
type: Authid,
},
/// Change owner of a backup group
pub fn set_backup_owner(
store: String,
- backup_type: String,
+ backup_type: BackupType,
backup_id: String,
new_owner: Authid,
rpcenv: &mut dyn RpcEnvironment,
use proxmox_sys::sortable;
use pbs_api_types::{
- Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
+ Authid, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
};
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
let backup_id = required_string_param(¶m, "backup-id")?;
let backup_time = required_integer_param(¶m, "backup-time")?;
let backup_group = BackupGroup::new(backup_type, backup_id);
- let worker_type = if backup_type == "host" && backup_id == "benchmark" {
+ let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
if !benchmark {
bail!("unable to run benchmark without --benchmark flags");
}
use proxmox_sys::sortable;
use pbs_api_types::{
- Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
- BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_READ,
+ Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
+ BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
+ PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::BackupDir;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
- let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
let backup_id = required_string_param(¶m, "backup-id")?;
let backup_time = required_integer_param(¶m, "backup-time")?;
for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
let backup_dir: BackupDir = snapshot.parse()?;
- if let Some(ref backup_type) = filter.backup_type {
+ if let Some(backup_type) = filter.backup_type {
if backup_dir.group().backup_type() != backup_type {
continue;
}
use proxmox_sys::{task_log, WorkerTaskContext};
-use pbs_api_types::{Authid, CryptMode, SnapshotVerifyState, VerifyState, UPID};
+use pbs_api_types::{Authid, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID};
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
Ok(list) => list
- .filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
+ .filter(|group| {
+ !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
+ })
.filter(filter_by_owner)
.collect::<Vec<BackupGroup>>(),
Err(err) => {
.await
}) {
for item in data {
- list.push(format!("{}/{}", item.backup_type, item.backup_id));
+ list.push(format!("{}/{}", item.backup.ty, item.backup.id));
}
}
}
list.extend(
groups
.iter()
- .map(|group| format!("group:{}/{}", group.backup_type, group.backup_id)),
+ .map(|group| format!("group:{}/{}", group.backup.ty, group.backup.id)),
);
}
}
let mut result = client.get(&path, Some(args)).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
- list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
+ list.sort_unstable_by(|a, b| a.backup.time.cmp(&b.backup.time));
client.login().await?; // make sure auth is complete
};
for (pos, item) in list.into_iter().enumerate() {
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
+ let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
// in-progress backups can't be synced
if item.size.is_none() {
let total_count = list.len();
list.sort_unstable_by(|a, b| {
- let type_order = a.backup_type.cmp(&b.backup_type);
+ let type_order = a.backup.ty.cmp(&b.backup.ty);
if type_order == std::cmp::Ordering::Equal {
- a.backup_id.cmp(&b.backup_id)
+ a.backup.id.cmp(&b.backup.id)
} else {
type_order
}
let list: Vec<BackupGroup> = list
.into_iter()
- .map(|item| BackupGroup::new(item.backup_type, item.backup_id))
+ .map(|item| BackupGroup::new(item.backup.ty, item.backup.id))
.collect();
let list = if let Some(ref group_filter) = ¶ms.group_filter {