use anyhow::Error;
-use pbs_api_types::{Authid, BackupType};
+use pbs_api_types::{Authid, BackupNamespace, BackupType};
use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
pub struct DummyWriter {
client,
None,
"store2",
- BackupType::Host,
- "elsa",
- backup_time,
+ &(
+ BackupNamespace::root(),
+ BackupType::Host,
+ "elsa".to_string(),
+ backup_time,
+ )
+ .into(),
true,
)
.await?;
use anyhow::Error;
-use pbs_api_types::{Authid, BackupType};
+use pbs_api_types::{Authid, BackupNamespace, BackupType};
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
async fn upload_speed() -> Result<f64, Error> {
client,
None,
datastore,
- BackupType::Host,
- "speedtest",
- backup_time,
+ &(
+ BackupNamespace::root(),
+ BackupType::Host,
+ "speedtest".to_string(),
+ backup_time,
+ )
+ .into(),
false,
true,
)
use std::fmt;
-use std::path::{Path, PathBuf};
+use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
};
const_regex! {
+ pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
+
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
- pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$");
+ pub GROUP_PATH_REGEX = concat!(
+ r"^(", BACKUP_NS_PATH_RE!(), r")?",
+ r"(", BACKUP_TYPE_RE!(), ")/",
+ r"(", BACKUP_ID_RE!(), r")$",
+ );
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
-
- pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
+ pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$");
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
}
/// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every
/// component.
- fn display_as_path(&self) -> BackupNamespacePath {
+ pub fn display_as_path(&self) -> BackupNamespacePath {
BackupNamespacePath(self)
}
#[api(
properties: {
+ "backup-ns": { type: BackupNamespace },
"backup-type": { type: BackupType },
"backup-id": { schema: BACKUP_ID_SCHEMA },
},
#[serde(rename_all = "kebab-case")]
/// A backup group (without a data store).
pub struct BackupGroup {
+ /// An optional namespace this backup belongs to.
+ #[serde(
+ rename = "backup-ns",
+ skip_serializing_if = "BackupNamespace::is_root",
+ default
+ )]
+ pub ns: BackupNamespace,
+
/// Backup type.
#[serde(rename = "backup-type")]
pub ty: BackupType,
}
impl BackupGroup {
- pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
- Self { ty, id: id.into() }
+ pub fn new<T: Into<String>>(ns: BackupNamespace, ty: BackupType, id: T) -> Self {
+ Self {
+ ns,
+ ty,
+ id: id.into(),
+ }
}
pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
}
}
-impl From<(BackupType, String)> for BackupGroup {
- fn from(data: (BackupType, String)) -> Self {
+impl From<(BackupNamespace, BackupType, String)> for BackupGroup {
+ #[inline]
+ fn from(data: (BackupNamespace, BackupType, String)) -> Self {
Self {
- ty: data.0,
- id: data.1,
+ ns: data.0,
+ ty: data.1,
+ id: data.2,
}
}
}
impl std::cmp::Ord for BackupGroup {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ let ns_order = self.ns.cmp(&other.ns);
+ if ns_order != std::cmp::Ordering::Equal {
+ return ns_order;
+ }
+
let type_order = self.ty.cmp(&other.ty);
if type_order != std::cmp::Ordering::Equal {
return type_order;
}
+
// try to compare IDs numerically
let id_self = self.id.parse::<u64>();
let id_other = other.id.parse::<u64>();
impl fmt::Display for BackupGroup {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{}/{}", self.ty, self.id)
+ if self.ns.is_root() {
+ write!(f, "{}/{}", self.ty, self.id)
+ } else {
+ write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id)
+ }
}
}
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
Ok(Self {
- ty: cap.get(1).unwrap().as_str().parse()?,
- id: cap.get(2).unwrap().as_str().to_owned(),
+ ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?,
+ ty: cap.get(2).unwrap().as_str().parse()?,
+ id: cap.get(3).unwrap().as_str().to_owned(),
})
}
}
}
}
-impl From<(BackupType, String, i64)> for BackupDir {
- fn from(data: (BackupType, String, i64)) -> Self {
+impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir {
+ fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self {
Self {
- group: (data.0, data.1).into(),
- time: data.2,
+ group: (data.0, data.1, data.2).into(),
+ time: data.3,
}
}
}
impl BackupDir {
- pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
+ pub fn with_rfc3339<T>(
+ ns: BackupNamespace,
+ ty: BackupType,
+ id: T,
+ backup_time_string: &str,
+ ) -> Result<Self, Error>
where
T: Into<String>,
{
let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
- let group = BackupGroup::new(ty, id.into());
+ let group = BackupGroup::new(ns, ty, id.into());
Ok(Self { group, time })
}
+ #[inline]
pub fn ty(&self) -> BackupType {
self.group.ty
}
+ #[inline]
pub fn id(&self) -> &str {
&self.group.id
}
+
+ #[inline]
+ pub fn ns(&self) -> &BackupNamespace {
+ &self.group.ns
+ }
}
impl std::str::FromStr for BackupDir {
.captures(path)
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
+ let ns = match cap.get(1) {
+ Some(cap) => BackupNamespace::from_path(cap.as_str())?,
+ None => BackupNamespace::root(),
+ };
BackupDir::with_rfc3339(
- cap.get(1).unwrap().as_str().parse()?,
- cap.get(2).unwrap().as_str(),
+ ns,
+ cap.get(2).unwrap().as_str().parse()?,
cap.get(3).unwrap().as_str(),
+ cap.get(4).unwrap().as_str(),
)
}
}
-impl std::fmt::Display for BackupDir {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl fmt::Display for BackupDir {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// FIXME: log error?
let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
write!(f, "{}/{}", self.group, time)
}
}
+/// Used when both a backup group or a directory can be valid.
+pub enum BackupPart {
+ Group(BackupGroup),
+ Dir(BackupDir),
+}
+
+impl std::str::FromStr for BackupPart {
+ type Err = Error;
+
+ /// Parse a path which can be either a backup group or a snapshot dir.
+ fn from_str(path: &str) -> Result<Self, Error> {
+ let cap = GROUP_OR_SNAPSHOT_PATH_REGEX
+ .captures(path)
+ .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
+
+ let ns = match cap.get(1) {
+ Some(cap) => BackupNamespace::from_path(cap.as_str())?,
+ None => BackupNamespace::root(),
+ };
+ let ty = cap.get(2).unwrap().as_str().parse()?;
+ let id = cap.get(3).unwrap().as_str().to_string();
+
+ Ok(match cap.get(4) {
+ Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?),
+ None => BackupPart::Group((ns, ty, id).into()),
+ })
+ }
+}
+
#[api(
properties: {
"backup": { type: BackupDir },
);
}
+#[rustfmt::skip]
+#[macro_export]
+macro_rules! BACKUP_NS_PATH_RE {
+ () => (
+ concat!(r"(:?ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!())
+ );
+}
+
#[rustfmt::skip]
#[macro_export]
macro_rules! SNAPSHOT_PATH_REGEX_STR {
() => (
- concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")")
+ concat!(
+ r"(", BACKUP_NS_PATH_RE!(), ")?",
+ r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
+ )
);
}
+#[macro_export]
+macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
+ () => {
+ concat!(SNAPSHOT_PATH_REGEX_STR!(), "?")
+ };
+}
+
mod acl;
pub use acl::*;
use futures::future::AbortHandle;
use serde_json::{json, Value};
-use pbs_api_types::BackupType;
+use pbs_api_types::BackupDir;
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader;
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
- backup_type: BackupType,
- backup_id: &str,
- backup_time: i64,
+ backup: &BackupDir,
debug: bool,
) -> Result<Arc<BackupReader>, Error> {
let param = json!({
- "backup-type": backup_type,
- "backup-id": backup_id,
- "backup-time": backup_time,
+ "backup-ns": backup.ns(),
+ "backup-type": backup.ty(),
+ "backup-id": backup.id(),
+ "backup-time": backup.time,
"store": datastore,
"debug": debug,
});
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
-use pbs_api_types::{BackupType, HumanByte};
+use pbs_api_types::{BackupDir, HumanByte};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
- backup_type: BackupType,
- backup_id: &str,
- backup_time: i64,
+ backup: &BackupDir,
debug: bool,
benchmark: bool,
) -> Result<Arc<BackupWriter>, Error> {
let param = json!({
- "backup-type": backup_type,
- "backup-id": backup_id,
- "backup-time": backup_time,
+ "backup-ns": backup.ns(),
+ "backup-type": backup.ty(),
+ "backup-id": backup.id(),
+ "backup-time": backup.time,
"store": datastore,
"debug": debug,
"benchmark": benchmark
};
let query = json_object_to_query(json!({
+ "backup-ns": snapshot.group.ns,
"backup-type": snapshot.group.ty,
"backup-id": snapshot.group.id,
"backup-time": snapshot.time,
let store = unsafe { DataStore::open_path("", &base, None)? };
- for group in store.iter_backup_groups()? {
+ for group in store.iter_backup_groups(Default::default())? {
let group = group?;
println!("found group {}", group);
}
}
-impl std::fmt::Display for BackupGroup {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- let backup_type = self.backup_type();
- let id = self.backup_id();
- write!(f, "{}/{}", backup_type, id)
+impl fmt::Display for BackupGroup {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.group, f)
}
}
}
}
-impl std::fmt::Display for BackupDir {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl fmt::Display for BackupDir {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}/{}", self.dir.group, self.backup_time_string)
}
}
use proxmox_sys::{task_log, task_warn};
use pbs_api_types::{
- Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
- HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
+ Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning,
+ GarbageCollectionStatus, HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
};
use pbs_config::ConfigVersionCache;
self.inner.chunk_store.base_path()
}
+ pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
+ let mut path = self.base_path();
+ path.reserve(ns.path_len());
+ for part in ns.components() {
+ path.push("ns");
+ path.push(part);
+ }
+ path
+ }
+
/// Cleanup a backup directory
///
/// Removes all files not mentioned in the manifest.
) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
let mut full_path = self.base_path();
+ for ns in backup_group.ns.components() {
+ full_path.push("ns");
+ full_path.push(ns);
+ }
full_path.push(backup_group.ty.as_str());
std::fs::create_dir_all(&full_path)?;
///
/// The iterated item is still a Result that can contain errors from rather unexptected FS or
/// parsing errors.
- pub fn iter_backup_groups(self: &Arc<DataStore>) -> Result<ListGroups, Error> {
- ListGroups::new(Arc::clone(self))
+ pub fn iter_backup_groups(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ ) -> Result<ListGroups, Error> {
+ ListGroups::new(Arc::clone(self), ns)
}
/// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
/// logged. Can be useful in iterator chain commands
pub fn iter_backup_groups_ok(
self: &Arc<DataStore>,
+ ns: BackupNamespace,
) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
let this = Arc::clone(self);
Ok(
- ListGroups::new(Arc::clone(&self))?.filter_map(move |group| match group {
+ ListGroups::new(Arc::clone(&self), ns)?.filter_map(move |group| match group {
Ok(group) => Some(group),
Err(err) => {
log::error!("list groups error on datastore {} - {}", this.name(), err);
/// Get a in-memory vector for all top-level backup groups of a datatstore
///
/// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
- pub fn list_backup_groups(self: &Arc<DataStore>) -> Result<Vec<BackupGroup>, Error> {
- ListGroups::new(Arc::clone(self))?.collect()
+ pub fn list_backup_groups(
+ self: &Arc<DataStore>,
+ ns: BackupNamespace,
+ ) -> Result<Vec<BackupGroup>, Error> {
+ ListGroups::new(Arc::clone(self), ns)?.collect()
}
pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
}
/// Open a backup group from this datastore.
- pub fn backup_group_from_parts<T>(self: &Arc<Self>, ty: BackupType, id: T) -> BackupGroup
+ pub fn backup_group_from_parts<T>(
+ self: &Arc<Self>,
+ ns: BackupNamespace,
+ ty: BackupType,
+ id: T,
+ ) -> BackupGroup
where
T: Into<String>,
{
- self.backup_group((ty, id.into()).into())
+ self.backup_group((ns, ty, id.into()).into())
}
/// Open a backup group from this datastore by backup group path such as `vm/100`.
/// Open a snapshot (backup directory) from this datastore.
pub fn backup_dir_from_parts<T>(
self: &Arc<Self>,
+ ns: BackupNamespace,
ty: BackupType,
id: T,
time: i64,
where
T: Into<String>,
{
- self.backup_dir((ty, id.into(), time).into())
+ self.backup_dir((ns, ty, id.into(), time).into())
}
/// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
/// A iterator for a (single) level of Backup Groups
pub struct ListGroups {
store: Arc<DataStore>,
+ ns: BackupNamespace,
type_fd: proxmox_sys::fs::ReadDir,
id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
}
impl ListGroups {
- pub fn new(store: Arc<DataStore>) -> Result<Self, Error> {
+ pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+ let mut base_path = store.base_path().to_owned();
+ base_path.push(ns.path());
Ok(ListGroups {
- type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.base_path())?,
+ type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?,
store,
+ ns,
id_state: None,
})
}
if BACKUP_ID_REGEX.is_match(name) {
return Some(Ok(BackupGroup::new(
Arc::clone(&self.store),
- (group_type, name.to_owned()).into(),
+ (self.ns.clone(), group_type, name.to_owned()).into(),
)));
}
}
};
use proxmox_schema::{api, ApiType, ReturnType};
-use pbs_api_types::BackupType;
+use pbs_api_types::{BackupNamespace, BackupType};
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupRepository, BackupWriter};
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
client,
crypt_config.clone(),
repo.store(),
- BackupType::Host,
- "benchmark",
- backup_time,
+ &(
+ BackupNamespace::root(),
+ BackupType::Host,
+ "benchmark".to_string(),
+ backup_time,
+ )
+ .into(),
false,
true,
)
use pbs_tools::json::required_string_param;
use crate::{
- api_datastore_latest_snapshot, complete_backup_snapshot, complete_group_or_snapshot,
- complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key,
- extract_repository_from_value, format_key_source, record_repository, BackupDir, BackupGroup,
+ complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name,
+ complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group,
+ extract_repository_from_value, format_key_source, record_repository, BackupDir,
BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
};
let client = connect(&repo)?;
- let client = BackupReader::start(
- client,
- crypt_config.clone(),
- repo.store(),
- snapshot.group.ty,
- &snapshot.group.id,
- snapshot.time,
- true,
- )
- .await?;
+ let client =
+ BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let path = required_string_param(¶m, "snapshot")?;
let archive_name = required_string_param(¶m, "archive-name")?;
- let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
- let group: BackupGroup = path.parse()?;
- api_datastore_latest_snapshot(&client, repo.store(), group).await?
- } else {
- let snapshot: BackupDir = path.parse()?;
- (snapshot.group.ty, snapshot.group.id, snapshot.time)
- };
+ let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let crypto = crypto_parameters(¶m)?;
client,
crypt_config.clone(),
repo.store(),
- backup_type,
- &backup_id,
- backup_time,
+ &backup_dir,
true,
)
.await?;
use anyhow::{bail, format_err, Error};
use futures::stream::{StreamExt, TryStreamExt};
+use serde::Deserialize;
use serde_json::{json, Value};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{
- Authid, BackupDir, BackupGroup, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte,
- PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus,
- BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
- TRAFFIC_CONTROL_RATE_SCHEMA,
+ Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode,
+ Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions, RateLimitConfig,
+ SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
+ BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
};
use pbs_client::catalog_shell::Shell;
use pbs_client::tools::{
client: &HttpClient,
store: &str,
group: BackupGroup,
-) -> Result<(BackupType, String, i64), Error> {
+) -> Result<BackupDir, Error> {
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
- Ok((group.ty, group.id, list[0].backup.time))
+ Ok((group, list[0].backup.time).into())
+}
+
+pub async fn dir_or_last_from_group(
+ client: &HttpClient,
+ repo: &BackupRepository,
+ path: &str,
+) -> Result<BackupDir, Error> {
+ match path.parse::<BackupPart>()? {
+ BackupPart::Dir(dir) => Ok(dir),
+ BackupPart::Group(group) => {
+ api_datastore_latest_snapshot(&client, repo.store(), group).await
+ }
+ }
}
async fn backup_directory<P: AsRef<Path>>(
record_repository(&repo);
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
- let item: GroupListItem = serde_json::from_value(record.to_owned())?;
- let group = BackupGroup::new(item.backup.ty, item.backup.id);
- Ok(group.to_string())
+ let item = GroupListItem::deserialize(record)?;
+ Ok(item.backup.to_string())
};
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
- let item: GroupListItem = serde_json::from_value(record.to_owned())?;
+ let item = GroupListItem::deserialize(record)?;
let snapshot = BackupDir {
group: item.backup,
time: item.last_backup,
};
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
- let item: GroupListItem = serde_json::from_value(record.to_owned())?;
+ let item = GroupListItem::deserialize(record)?;
Ok(pbs_tools::format::render_backup_file_list(&item.files))
};
optional: true,
default: false,
},
+ "backup-ns": {
+ schema: BACKUP_NAMESPACE_SCHEMA,
+ optional: true,
+ },
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
optional: true,
.as_str()
.unwrap_or(proxmox_sys::nodename());
+ let backup_namespace: BackupNamespace = match param.get("backup-ns") {
+ Some(ns) => ns
+ .as_str()
+ .ok_or_else(|| format_err!("bad namespace {:?}", ns))?
+ .parse()?,
+ None => BackupNamespace::root(),
+ };
+
let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
let include_dev = param["include-dev"].as_array();
let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo);
- println!(
- "Starting backup: {}/{}/{}",
+ let snapshot = BackupDir::from((
+ backup_namespace,
backup_type,
- backup_id,
- pbs_datastore::BackupDir::backup_time_to_string(backup_time)?
- );
+ backup_id.to_owned(),
+ backup_time,
+ ));
+ println!("Starting backup: {snapshot}");
println!("Client name: {}", proxmox_sys::nodename());
client,
crypt_config.clone(),
repo.store(),
- backup_type,
- backup_id,
- backup_time,
+ &snapshot,
verbose,
false,
)
None
};
- let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
let mut manifest = BackupManifest::new(snapshot);
let mut catalog = None;
let path = json::required_string_param(¶m, "snapshot")?;
- let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
- let group: BackupGroup = path.parse()?;
- api_datastore_latest_snapshot(&client, repo.store(), group).await?
- } else {
- let snapshot: BackupDir = path.parse()?;
- (snapshot.group.ty, snapshot.group.id, snapshot.time)
- };
+ let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let target = json::required_string_param(¶m, "target")?;
let target = if target == "-" { None } else { Some(target) };
client,
crypt_config.clone(),
repo.store(),
- backup_type,
- &backup_id,
- backup_time,
+ &backup_dir,
true,
)
.await?;
use proxmox_sys::fd::Fd;
use proxmox_sys::sortable;
-use pbs_api_types::{BackupDir, BackupGroup};
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_config::key_config::load_and_decrypt_key;
use pbs_tools::json::required_string_param;
use crate::{
- api_datastore_latest_snapshot, complete_group_or_snapshot, complete_img_archive_name,
- complete_pxar_archive_name, complete_repository, connect, extract_repository_from_value,
+ complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name,
+ complete_repository, connect, dir_or_last_from_group, extract_repository_from_value,
record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
};
record_repository(&repo);
let path = required_string_param(¶m, "snapshot")?;
- let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
- let group: BackupGroup = path.parse()?;
- api_datastore_latest_snapshot(&client, repo.store(), group).await?
- } else {
- let snapshot: BackupDir = path.parse()?;
- (snapshot.group.ty, snapshot.group.id, snapshot.time)
- };
+ let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
let crypt_config = match keyfile {
client,
crypt_config.clone(),
repo.store(),
- backup_type,
- &backup_id,
- backup_time,
+ &backup_dir,
true,
)
.await?;
driver: Option<BlockDriverType>,
) -> Result<Vec<ArchiveEntry>, Error> {
let client = connect(&repo)?;
- let client = BackupReader::start(
- client,
- crypt_config.clone(),
- repo.store(),
- snapshot.group.ty,
- &snapshot.group.id,
- snapshot.time,
- true,
- )
- .await?;
+ let client =
+ BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
};
let client = connect(&repo)?;
- let client = BackupReader::start(
- client,
- crypt_config.clone(),
- repo.store(),
- snapshot.group.ty,
- &snapshot.group.id,
- snapshot.time,
- true,
- )
- .await?;
+ let client =
+ BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
let (manifest, _) = client.download_manifest().await?;
match path {
use futures::*;
use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode};
+use serde::Deserialize;
use serde_json::{json, Value};
use tokio_stream::wrappers::ReceiverStream;
use pxar::EntryKind;
use pbs_api_types::{
- Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
- GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
- SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
- BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
- PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
- PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
+ Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
+ DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
+ RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
+ BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
+ IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
+ PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
+ UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo;
check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
StoreProgress, CATALOG_NAME,
};
-use pbs_tools::json::{required_integer_param, required_string_param};
+use pbs_tools::json::required_string_param;
use proxmox_rest_server::{formatter, WorkerTask};
use crate::api2::node::rrd::create_value_from_rrd;
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
datastore
- .iter_backup_groups()?
+ .iter_backup_groups(Default::default())? // FIXME: Namespaces and recursion parameters!
.try_fold(Vec::new(), |mut group_info, group| {
let group = group?;
let owner = match datastore.get_owner(group.as_ref()) {
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
+ group: {
+ type: pbs_api_types::BackupGroup,
+ flatten: true,
+ },
},
},
access: {
/// Delete backup group including all snapshots.
pub fn delete_group(
store: String,
- backup_type: BackupType,
- backup_id: String,
+ group: pbs_api_types::BackupGroup,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ backup_dir: {
+ type: pbs_api_types::BackupDir,
+ flatten: true,
+ },
},
},
returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
/// List snapshot files.
pub fn list_snapshot_files(
store: String,
- backup_type: BackupType,
- backup_id: String,
- backup_time: i64,
+ backup_dir: pbs_api_types::BackupDir,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
- let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let snapshot = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner(
&datastore,
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ backup_dir: {
+ type: pbs_api_types::BackupDir,
+ flatten: true,
+ },
},
},
access: {
/// Delete backup snapshot.
pub fn delete_snapshot(
store: String,
- backup_type: BackupType,
- backup_id: String,
- backup_time: i64,
+ backup_dir: pbs_api_types::BackupDir,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let snapshot = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner(
&datastore,
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
+ "backup-ns": {
+ type: BackupNamespace,
+ optional: true,
+ },
"backup-type": {
optional: true,
type: BackupType,
/// List backup snapshots.
pub fn list_snapshots(
store: String,
+ backup_ns: Option<BackupNamespace>,
backup_type: Option<BackupType>,
backup_id: Option<String>,
_param: Value,
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
+ let backup_ns = backup_ns.unwrap_or_default();
+
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
// backup group and provide an error free (Err -> None) accessor
let groups = match (backup_type, backup_id) {
(Some(backup_type), Some(backup_id)) => {
- vec![datastore.backup_group_from_parts(backup_type, backup_id)]
+ vec![datastore.backup_group_from_parts(backup_ns, backup_type, backup_id)]
}
+ // FIXME: Recursion
(Some(backup_type), None) => datastore
- .iter_backup_groups_ok()?
+ .iter_backup_groups_ok(backup_ns)?
.filter(|group| group.backup_type() == backup_type)
.collect(),
+ // FIXME: Recursion
(None, Some(backup_id)) => datastore
- .iter_backup_groups_ok()?
+ .iter_backup_groups_ok(backup_ns)?
.filter(|group| group.backup_id() == backup_id)
.collect(),
- _ => datastore.list_backup_groups()?,
+ // FIXME: Recursion
+ (None, None) => datastore.list_backup_groups(backup_ns)?,
};
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
filter_owner: Option<&Authid>,
) -> Result<Counts, Error> {
store
- .iter_backup_groups_ok()?
+ .iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
.filter(|group| {
let owner = match store.get_owner(group.as_ref()) {
Ok(owner) => owner,
store: {
schema: DATASTORE_SCHEMA,
},
+ "backup-ns": {
+ type: BackupNamespace,
+ optional: true,
+ },
"backup-type": {
type: BackupType,
optional: true,
/// or all backups in the datastore.
pub fn verify(
store: String,
+ backup_ns: Option<BackupNamespace>,
backup_type: Option<BackupType>,
backup_id: Option<String>,
backup_time: Option<i64>,
let mut backup_group = None;
let mut worker_type = "verify";
+ // FIXME: Recursion
+ // FIXME: Namespaces and worker ID, could this be an issue?
+ let backup_ns = backup_ns.unwrap_or_default();
+
match (backup_type, backup_id, backup_time) {
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!(
- "{}:{}/{}/{:08X}",
- store, backup_type, backup_id, backup_time
+ "{}:{}/{}/{}/{:08X}",
+ store,
+ backup_ns.display_as_path(),
+ backup_type,
+ backup_id,
+ backup_time
);
- let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let dir =
+ datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
worker_type = "verify_snapshot";
}
(Some(backup_type), Some(backup_id), None) => {
- worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
- let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
+ worker_id = format!(
+ "{}:{}/{}/{}",
+ store,
+ backup_ns.display_as_path(),
+ backup_type,
+ backup_id
+ );
+ let group = pbs_api_types::BackupGroup::from((backup_ns, backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
#[api(
input: {
properties: {
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-type": { type: BackupType },
+ group: {
+ type: pbs_api_types::BackupGroup,
+ flatten: true,
+ },
"dry-run": {
optional: true,
type: bool,
)]
/// Prune a group on the datastore
pub fn prune(
- backup_id: String,
- backup_type: BackupType,
+ group: pbs_api_types::BackupGroup,
dry_run: bool,
prune_options: PruneOptions,
store: String,
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- let group = datastore.backup_group_from_parts(backup_type, &backup_id);
+ let group = datastore.backup_group(group);
check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
- let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
+ let worker_id = format!("{}:{}", store, group);
let mut prune_result = Vec::new();
);
task_log!(
worker,
- "Starting prune on store \"{}\" group \"{}/{}\"",
+ "Starting prune on store \"{}\" group \"{}\"",
store,
- backup_type,
- backup_id
+ group,
);
}
let file_name = required_string_param(¶m, "file-name")?.to_owned();
- let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
- let backup_id = required_string_param(¶m, "backup-id")?.to_owned();
- let backup_time = required_integer_param(¶m, "backup-time")?;
-
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?;
check_priv_or_backup_owner(
&datastore,
let file_name = required_string_param(¶m, "file-name")?.to_owned();
- let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
- let backup_id = required_string_param(¶m, "backup-id")?.to_owned();
- let backup_time = required_integer_param(¶m, "backup-time")?;
-
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?;
check_priv_or_backup_owner(
&datastore,
let file_name = CLIENT_LOG_BLOB_NAME;
- let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
- let backup_id = required_string_param(¶m, "backup-id")?;
- let backup_time = required_integer_param(¶m, "backup-time")?;
-
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let owner = datastore.get_owner(backup_dir.as_ref())?;
bail!("backup already contains a log.");
}
- println!(
- "Upload backup log to {}/{}/{}/{}/{}",
- store,
- backup_type,
- backup_id,
- backup_dir.backup_time_string(),
- file_name
- );
+ println!("Upload backup log to {store}/{backup_dir}/{file_name}");
let data = req_body
.map_err(Error::from)
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ backup_dir: {
+ type: pbs_api_types::BackupDir,
+ flatten: true,
+ },
"filepath": {
description: "Base64 encoded path.",
type: String,
/// Get the entries of the given path of the catalog
pub fn catalog(
store: String,
- backup_type: BackupType,
- backup_id: String,
- backup_time: i64,
+ backup_dir: pbs_api_types::BackupDir,
filepath: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ArchiveEntry>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner(
&datastore,
let filepath = required_string_param(¶m, "filepath")?.to_owned();
- let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
- let backup_id = required_string_param(¶m, "backup-id")?;
- let backup_time = required_integer_param(¶m, "backup-time")?;
-
let tar = param["tar"].as_bool().unwrap_or(false);
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(Deserialize::deserialize(¶m)?)?;
check_priv_or_backup_owner(
&datastore,
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
+ backup_group: {
+ type: pbs_api_types::BackupGroup,
+ flatten: true,
+ },
},
},
access: {
/// Get "notes" for a backup group
pub fn get_group_notes(
store: String,
- backup_type: BackupType,
- backup_id: String,
+ backup_group: pbs_api_types::BackupGroup,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
+ backup_group: {
+ type: pbs_api_types::BackupGroup,
+ flatten: true,
+ },
notes: {
description: "A multiline text.",
},
/// Set "notes" for a backup group
pub fn set_group_notes(
store: String,
- backup_type: BackupType,
- backup_id: String,
+ backup_group: pbs_api_types::BackupGroup,
notes: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ backup_dir: {
+ type: pbs_api_types::BackupDir,
+ flatten: true,
+ },
},
},
access: {
/// Get "notes" for a specific backup
pub fn get_notes(
store: String,
- backup_type: BackupType,
- backup_id: String,
- backup_time: i64,
+ backup_dir: pbs_api_types::BackupDir,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner(
&datastore,
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ backup_dir: {
+ type: pbs_api_types::BackupDir,
+ flatten: true,
+ },
notes: {
description: "A multiline text.",
},
/// Set "notes" for a specific backup
pub fn set_notes(
store: String,
- backup_type: BackupType,
- backup_id: String,
- backup_time: i64,
+ backup_dir: pbs_api_types::BackupDir,
notes: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner(
&datastore,
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ backup_dir: {
+ type: pbs_api_types::BackupDir,
+ flatten: true,
+ },
},
},
access: {
/// Query protection for a specific backup
pub fn get_protection(
store: String,
- backup_type: BackupType,
- backup_id: String,
- backup_time: i64,
+ backup_dir: pbs_api_types::BackupDir,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<bool, Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner(
&datastore,
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
- "backup-time": { schema: BACKUP_TIME_SCHEMA },
+ backup_dir: {
+ type: pbs_api_types::BackupDir,
+ flatten: true,
+ },
protected: {
description: "Enable/disable protection.",
},
/// En- or disable protection for a specific backup
pub fn set_protection(
store: String,
- backup_type: BackupType,
- backup_id: String,
- backup_time: i64,
+ backup_dir: pbs_api_types::BackupDir,
protected: bool,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner(
&datastore,
input: {
properties: {
store: { schema: DATASTORE_SCHEMA },
- "backup-type": { type: BackupType },
- "backup-id": { schema: BACKUP_ID_SCHEMA },
+ backup_group: {
+ type: pbs_api_types::BackupGroup,
+ flatten: true,
+ },
"new-owner": {
type: Authid,
},
/// Change owner of a backup group
pub fn set_backup_owner(
store: String,
- backup_type: BackupType,
- backup_id: String,
+ backup_group: pbs_api_types::BackupGroup,
new_owner: Authid,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- let backup_group = datastore.backup_group_from_parts(backup_type, backup_id);
+ let backup_group = datastore.backup_group(backup_group);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
use hyper::header::{HeaderValue, UPGRADE};
use hyper::http::request::Parts;
use hyper::{Body, Request, Response, StatusCode};
+use serde::Deserialize;
use serde_json::{json, Value};
use proxmox_router::list_subdirs_api_method;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
- let backup_id = required_string_param(¶m, "backup-id")?;
- let backup_time = required_integer_param(¶m, "backup-time")?;
+ let backup_dir_arg = pbs_api_types::BackupDir::deserialize(¶m)?;
let protocols = parts
.headers
);
}
- let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
+ let worker_id = format!("{}:{}/{}", store, backup_dir_arg.ty(), backup_dir_arg.id());
let env_type = rpcenv.env_type();
- let backup_group = datastore.backup_group_from_parts(backup_type, backup_id);
+ let backup_group = datastore.backup_group(backup_dir_arg.group.clone());
- let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
+ let worker_type = if backup_group.backup_type() == BackupType::Host
+ && backup_group.backup_id() == "benchmark"
+ {
if !benchmark {
bail!("unable to run benchmark without --benchmark flags");
}
}
};
- let backup_dir = backup_group.backup_dir(backup_time)?;
+ let backup_dir = backup_group.backup_dir(backup_dir_arg.time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
use hyper::header::{self, HeaderValue, UPGRADE};
use hyper::http::request::Parts;
use hyper::{Body, Request, Response, StatusCode};
+use serde::Deserialize;
use serde_json::Value;
use proxmox_router::{
use proxmox_sys::sortable;
use pbs_api_types::{
- Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
- BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
- PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
+ Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
+ BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
+ PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType};
use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
-use pbs_tools::json::{required_integer_param, required_string_param};
+use pbs_tools::json::required_string_param;
use proxmox_rest_server::{H2Service, WorkerTask};
use proxmox_sys::fs::lock_dir_noblock_shared;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
- let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
- let backup_id = required_string_param(¶m, "backup-id")?;
- let backup_time = required_integer_param(¶m, "backup-time")?;
+ let backup_dir = pbs_api_types::BackupDir::deserialize(¶m)?;
let protocols = parts
.headers
let env_type = rpcenv.env_type();
- let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
+ let backup_dir = datastore.backup_dir(backup_dir)?;
if !priv_read {
let owner = datastore.get_owner(backup_dir.as_ref())?;
let correct_owner = owner == auth_id
let worker_id = format!(
"{}:{}/{}/{:08X}",
store,
- backup_type,
- backup_id,
- backup_dir.backup_time()
+ backup_dir.backup_type(),
+ backup_dir.backup_id(),
+ backup_dir.backup_time(),
);
WorkerTask::spawn(
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?;
- let mut group_list = datastore.list_backup_groups()?;
+ // FIXME: Namespaces! Probably just recurse for now? Not sure about the usage here...
+ let mut group_list = datastore.list_backup_groups(Default::default())?;
group_list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
}
};
- let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
+ // FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
+ let mut list = match verify_worker
+ .datastore
+ .iter_backup_groups_ok(Default::default())
+ {
Ok(list) => list
.filter(|group| {
!(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
let privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
- for group in datastore.iter_backup_groups()? {
+ // FIXME: Namespaces and recursion!
+ for group in datastore.iter_backup_groups(Default::default())? {
let group = group?;
let list = group.list_backups()?;
continue;
}
- let backup_time = snapshot.time;
-
- remote_snapshots.insert(backup_time);
+ remote_snapshots.insert(snapshot.time);
if let Some(last_sync_time) = last_sync {
- if last_sync_time > backup_time {
- skip_info.update(backup_time);
+ if last_sync_time > snapshot.time {
+ skip_info.update(snapshot.time);
continue;
}
}
options,
)?;
- let reader = BackupReader::start(
- new_client,
- None,
- params.source.store(),
- snapshot.group.ty,
- &snapshot.group.id,
- backup_time,
- true,
- )
- .await?;
+ let reader =
+ BackupReader::start(new_client, None, params.source.store(), &snapshot, true).await?;
let result = pull_snapshot_from(
worker,
// explicit create shared lock to prevent GC on newly created chunks
let _shared_store_lock = params.store.try_shared_chunk_store_lock()?;
+ // FIXME: Namespaces! AND: If we make this API call recurse down namespaces we need to do the
+ // same down in the `remove_vanished` case!
let path = format!("api2/json/admin/datastore/{}/groups", params.source.store());
let mut result = client
if params.remove_vanished {
let result: Result<(), Error> = proxmox_lang::try_block!({
- for local_group in params.store.iter_backup_groups()? {
+ // FIXME: See above comment about namespaces & recursion
+ for local_group in params.store.iter_backup_groups(Default::default())? {
let local_group = local_group?;
if new_groups.contains(local_group.as_ref()) {
continue;