+//! Datastore Management
+
use std::collections::HashSet;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
-use std::sync::{Arc, Mutex};
-use std::path::{Path, PathBuf};
-use std::pin::Pin;
+use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
use futures::*;
use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode};
use serde_json::{json, Value};
+use tokio_stream::wrappers::ReceiverStream;
-use proxmox::api::{
- api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
- RpcEnvironment, RpcEnvironmentType, Permission
+use proxmox_sys::sortable;
+use proxmox_sys::fs::{
+ file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
+};
+use proxmox_router::{
+ list_subdirs_api_method, http_err, ApiResponseFuture, ApiHandler, ApiMethod, Router,
+ RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
};
-use proxmox::api::router::SubdirMap;
-use proxmox::api::schema::*;
-use proxmox::tools::fs::{replace_file, CreateOptions};
-use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
+use proxmox_schema::*;
+use proxmox_sys::{task_log, task_warn};
+use proxmox_async::blocking::WrappedReaderStream;
+use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
-use pxar::accessor::aio::{Accessor, FileContents, FileEntry};
+use pxar::accessor::aio::Accessor;
use pxar::EntryKind;
-use crate::api2::types::*;
-use crate::api2::node::rrd::create_value_from_rrd;
-use crate::backup::*;
-use crate::config::datastore;
-use crate::config::cached_user_info::CachedUserInfo;
-
-use crate::server::{jobstate::Job, WorkerTask};
-use crate::tools::{
- self,
- zip::{ZipEncoder, ZipEntry},
- AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream,
+use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
+ DataStoreListItem, GarbageCollectionStatus, GroupListItem,
+ SnapshotListItem, SnapshotVerifyState, PruneOptions,
+ DataStoreStatus, RRDMode, RRDTimeFrame,
+ BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
+ BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
+ IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
+ VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_AUDIT,
+ PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
+ PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
+
+};
+use pbs_client::pxar::create_zip;
+use pbs_datastore::{
+ check_backup_owner, DataStore, BackupDir, BackupGroup, StoreProgress, LocalChunkReader,
+ CATALOG_NAME,
};
+use pbs_datastore::backup_info::BackupInfo;
+use pbs_datastore::cached_chunk_reader::CachedChunkReader;
+use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
+use pbs_datastore::data_blob::DataBlob;
+use pbs_datastore::data_blob_reader::DataBlobReader;
+use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
+use pbs_datastore::fixed_index::{FixedIndexReader};
+use pbs_datastore::index::IndexFile;
+use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
+use pbs_datastore::prune::compute_prune_info;
+use pbs_tools::json::{required_integer_param, required_string_param};
+use pbs_config::CachedUserInfo;
+use proxmox_rest_server::{WorkerTask, formatter};
-use crate::config::acl::{
- PRIV_DATASTORE_AUDIT,
- PRIV_DATASTORE_MODIFY,
- PRIV_DATASTORE_READ,
- PRIV_DATASTORE_PRUNE,
- PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_VERIFY,
+use crate::api2::node::rrd::create_value_from_rrd;
+use crate::backup::{
+ verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
};
+use crate::server::jobstate::Job;
+
+
+const GROUP_NOTES_FILE_NAME: &str = "notes";
+
+fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
+ let mut note_path = store.base_path();
+ note_path.push(group.group_path());
+ note_path.push(GROUP_NOTES_FILE_NAME);
+ note_path
+}
+
fn check_priv_or_backup_owner(
store: &DataStore,
group: &BackupGroup,
required_privs: u64,
) -> Result<(), Error> {
let user_info = CachedUserInfo::new()?;
- let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]);
+ let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
if privs & required_privs == 0 {
let owner = store.get_owner(group)?;
Ok(())
}
-fn check_backup_owner(
- owner: &Authid,
- auth_id: &Authid,
-) -> Result<(), Error> {
- let correct_owner = owner == auth_id
- || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
- if !correct_owner {
- bail!("backup owner check failed ({} != {})", auth_id, owner);
- }
- Ok(())
-}
-
fn read_backup_index(
store: &DataStore,
backup_dir: &BackupDir,
info: &BackupInfo,
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
- let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?;
+ let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
acc.insert(item.filename.clone());
},
},
},
- returns: {
- type: Array,
- description: "Returns the list of backup groups.",
- items: {
- type: GroupListItem,
- }
- },
+ returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"],
},
)]
/// List backup groups.
-fn list_groups(
+pub fn list_groups(
store: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
})
.to_owned();
+ let note_path = get_group_note_path(&datastore, &group);
+ let comment = file_read_firstline(¬e_path).ok();
+
group_info.push(GroupListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
owner: Some(owner),
backup_count,
files: last_backup.files,
+ comment,
});
group_info
Ok(group_info)
}
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
+ true),
+ },
+)]
+/// Delete backup group including all snapshots.
+pub fn delete_group(
+ store: String,
+ backup_type: String,
+ backup_id: String,
+ _info: &ApiMethod,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+
+ let group = BackupGroup::new(backup_type, backup_id);
+ let datastore = DataStore::lookup_datastore(&store)?;
+
+ check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
+
+ if !datastore.remove_backup_group(&group)? {
+ bail!("did not delete whole group because of protected snapthots");
+ }
+
+ Ok(Value::Null)
+}
+
#[api(
input: {
properties: {
},
},
},
- returns: {
- type: Array,
- description: "Returns the list of archive files inside a backup snapshots.",
- items: {
- type: BackupContent,
- }
- },
+ returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"],
},
)]
/// Delete backup snapshot.
-fn delete_snapshot(
+pub fn delete_snapshot(
store: String,
backup_type: String,
backup_id: String,
},
},
},
- returns: {
- type: Array,
- description: "Returns the list of snapshots.",
- items: {
- type: SnapshotListItem,
- }
- },
+ returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"],
let backup_type = group.backup_type().to_string();
let backup_id = group.backup_id().to_string();
let backup_time = info.backup_dir.backup_time();
+ let protected = info.backup_dir.is_protected(base_path.clone());
match get_all_snapshot_files(&datastore, &info) {
Ok((manifest, files)) => {
files,
size,
owner,
+ protected,
}
},
Err(err) => {
let files = info
.files
.into_iter()
- .map(|x| BackupContent {
- filename: x.to_string(),
+ .map(|filename| BackupContent {
+ filename,
size: None,
crypt_mode: None,
})
files,
size: None,
owner,
+ protected,
}
},
}
snapshots.extend(
group_backups
.into_iter()
- .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info))
+ .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
);
Ok(snapshots)
groups.iter()
.filter(|group| {
- let owner = match store.get_owner(&group) {
+ let owner = match store.get_owner(group) {
Ok(owner) => owner,
Err(err) => {
eprintln!("Failed to get owner of group '{}/{}' - {}",
.try_fold(Counts::default(), |mut counts, group| {
let snapshot_count = group.list_backups(&base_path)?.len() as u64;
- let type_count = match group.backup_type() {
- "ct" => counts.ct.get_or_insert(Default::default()),
- "vm" => counts.vm.get_or_insert(Default::default()),
- "host" => counts.host.get_or_insert(Default::default()),
- _ => counts.other.get_or_insert(Default::default()),
- };
+ // only include groups with snapshots (avoid confusing users
+ // by counting/displaying emtpy groups)
+ if snapshot_count > 0 {
+ let type_count = match group.backup_type() {
+ "ct" => counts.ct.get_or_insert(Default::default()),
+ "vm" => counts.vm.get_or_insert(Default::default()),
+ "host" => counts.host.get_or_insert(Default::default()),
+ _ => counts.other.get_or_insert(Default::default()),
+ };
- type_count.groups += 1;
- type_count.snapshots += snapshot_count;
+ type_count.groups += 1;
+ type_count.snapshots += snapshot_count;
+ }
Ok(counts)
})
schema: BACKUP_ID_SCHEMA,
optional: true,
},
+ "ignore-verified": {
+ schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
+ optional: true,
+ },
+ "outdated-after": {
+ schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
+ optional: true,
+ },
"backup-time": {
schema: BACKUP_TIME_SCHEMA,
optional: true,
backup_type: Option<String>,
backup_id: Option<String>,
backup_time: Option<i64>,
+ ignore_verified: Option<bool>,
+ outdated_after: Option<i64>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
+ let ignore_verified = ignore_verified.unwrap_or(true);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let worker_id;
_ => bail!("parameters do not specify a backup group or snapshot"),
}
- let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
+ let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = WorkerTask::new_thread(
worker_type,
- Some(worker_id.clone()),
- auth_id.clone(),
+ Some(worker_id),
+ auth_id.to_string(),
to_stdout,
move |worker| {
- let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
- let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
-
+ let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut res = Vec::new();
if !verify_backup_dir(
- datastore,
+ &verify_worker,
&backup_dir,
- verified_chunks,
- corrupt_chunks,
- worker.clone(),
worker.upid().clone(),
- None,
+ Some(&move |manifest| {
+ verify_filter(ignore_verified, outdated_after, manifest)
+ }),
)? {
res.push(backup_dir.to_string());
}
res
} else if let Some(backup_group) = backup_group {
let failed_dirs = verify_backup_group(
- datastore,
+ &verify_worker,
&backup_group,
- verified_chunks,
- corrupt_chunks,
&mut StoreProgress::new(1),
- worker.clone(),
worker.upid(),
- None,
+ Some(&move |manifest| {
+ verify_filter(ignore_verified, outdated_after, manifest)
+ }),
)?;
failed_dirs
} else {
None
};
- verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
+ verify_all_backups(
+ &verify_worker,
+ worker.upid(),
+ owner,
+ Some(&move |manifest| {
+ verify_filter(ignore_verified, outdated_after, manifest)
+ }),
+ )?
};
- if failed_dirs.len() > 0 {
- worker.log("Failed to verify the following snapshots/groups:");
+ if !failed_dirs.is_empty() {
+ task_log!(worker, "Failed to verify the following snapshots/groups:");
for dir in failed_dirs {
- worker.log(format!("\t{}", dir));
+ task_log!(worker, "\t{}", dir);
}
bail!("verification failed - please check the log for details");
}
Ok(json!(upid_str))
}
-#[macro_export]
-macro_rules! add_common_prune_prameters {
- ( [ $( $list1:tt )* ] ) => {
- add_common_prune_prameters!([$( $list1 )* ] , [])
- };
- ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
- [
- $( $list1 )*
- (
- "keep-daily",
- true,
- &PRUNE_SCHEMA_KEEP_DAILY,
- ),
- (
- "keep-hourly",
- true,
- &PRUNE_SCHEMA_KEEP_HOURLY,
- ),
- (
- "keep-last",
- true,
- &PRUNE_SCHEMA_KEEP_LAST,
- ),
- (
- "keep-monthly",
- true,
- &PRUNE_SCHEMA_KEEP_MONTHLY,
- ),
- (
- "keep-weekly",
- true,
- &PRUNE_SCHEMA_KEEP_WEEKLY,
- ),
- (
- "keep-yearly",
- true,
- &PRUNE_SCHEMA_KEEP_YEARLY,
- ),
- $( $list2 )*
- ]
- }
-}
-
-pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
- "Returns the list of snapshots and a flag indicating if there are kept or removed.",
- &PruneListItem::API_SCHEMA
-).schema();
-
-const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&prune),
- &ObjectSchema::new(
- "Prune the datastore.",
- &add_common_prune_prameters!([
- ("backup-id", false, &BACKUP_ID_SCHEMA),
- ("backup-type", false, &BACKUP_TYPE_SCHEMA),
- ("dry-run", true, &BooleanSchema::new(
- "Just show what prune would do, but do not delete anything.")
- .schema()
- ),
- ],[
- ("store", false, &DATASTORE_SCHEMA),
- ])
- ))
- .returns(&API_RETURN_SCHEMA_PRUNE)
- .access(None, &Permission::Privilege(
- &["datastore", "{store}"],
- PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
- true)
-);
-
-fn prune(
- param: Value,
- _info: &ApiMethod,
+#[api(
+ input: {
+ properties: {
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "dry-run": {
+ optional: true,
+ type: bool,
+ default: false,
+ description: "Just show what prune would do, but do not delete anything.",
+ },
+ "prune-options": {
+ type: PruneOptions,
+ flatten: true,
+ },
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
+ },
+)]
+/// Prune a group on the datastore
+pub fn prune(
+ backup_id: String,
+ backup_type: String,
+ dry_run: bool,
+ prune_options: PruneOptions,
+ store: String,
+ _param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let store = tools::required_string_param(¶m, "store")?;
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let dry_run = param["dry-run"].as_bool().unwrap_or(false);
-
- let group = BackupGroup::new(backup_type, backup_id);
+ let group = BackupGroup::new(&backup_type, &backup_id);
let datastore = DataStore::lookup_datastore(&store)?;
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
- let prune_options = PruneOptions {
- keep_last: param["keep-last"].as_u64(),
- keep_hourly: param["keep-hourly"].as_u64(),
- keep_daily: param["keep-daily"].as_u64(),
- keep_weekly: param["keep-weekly"].as_u64(),
- keep_monthly: param["keep-monthly"].as_u64(),
- keep_yearly: param["keep-yearly"].as_u64(),
- };
-
- let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
+ let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
let mut prune_result = Vec::new();
prune_info.reverse(); // delete older snapshots first
- let keep_all = !prune_options.keeps_something();
+ let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
if dry_run {
- for (info, mut keep) in prune_info {
- if keep_all { keep = true; }
+ for (info, mark) in prune_info {
+ let keep = keep_all || mark.keep();
let backup_time = info.backup_dir.backup_time();
let group = info.backup_dir.group();
"backup-id": group.backup_id(),
"backup-time": backup_time,
"keep": keep,
+ "protected": mark.protected(),
}));
}
return Ok(json!(prune_result));
// We use a WorkerTask just to have a task log, but run synchrounously
- let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?;
+ let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
if keep_all {
- worker.log("No prune selection - keeping all files.");
+ task_log!(worker, "No prune selection - keeping all files.");
} else {
- worker.log(format!("retention options: {}", prune_options.cli_options_string()));
- worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
- store, backup_type, backup_id));
+ task_log!(worker, "retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options));
+ task_log!(worker, "Starting prune on store \"{}\" group \"{}/{}\"",
+ store, backup_type, backup_id);
}
- for (info, mut keep) in prune_info {
- if keep_all { keep = true; }
+ for (info, mark) in prune_info {
+ let keep = keep_all || mark.keep();
let backup_time = info.backup_dir.backup_time();
let timestamp = info.backup_dir.backup_time_string();
group.backup_type(),
group.backup_id(),
timestamp,
- if keep { "keep" } else { "remove" },
+ mark,
);
- worker.log(msg);
+ task_log!(worker, "{}", msg);
prune_result.push(json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
"backup-time": backup_time,
"keep": keep,
+ "protected": mark.protected(),
}));
if !(dry_run || keep) {
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
- worker.warn(
- format!(
- "failed to remove dir {:?}: {}",
- info.backup_dir.relative_path(), err
- )
+ task_warn!(
+ worker,
+ "failed to remove dir {:?}: {}",
+ info.backup_dir.relative_path(),
+ err,
);
}
}
Ok(json!(prune_result))
}
+#[api(
+ input: {
+ properties: {
+ "dry-run": {
+ optional: true,
+ type: bool,
+ default: false,
+ description: "Just show what prune would do, but do not delete anything.",
+ },
+ "prune-options": {
+ type: PruneOptions,
+ flatten: true,
+ },
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ schema: UPID_SCHEMA,
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
+ },
+)]
+/// Prune the datastore
+pub fn prune_datastore(
+ dry_run: bool,
+ prune_options: PruneOptions,
+ store: String,
+ _param: Value,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<String, Error> {
+
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+
+ let datastore = DataStore::lookup_datastore(&store)?;
+
+ let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
+
+ let upid_str = WorkerTask::new_thread(
+ "prune",
+ Some(store.clone()),
+ auth_id.to_string(),
+ to_stdout,
+ move |worker| crate::server::prune_datastore(
+ worker,
+ auth_id,
+ prune_options,
+ &store,
+ datastore,
+ dry_run
+ ),
+ )?;
+
+ Ok(upid_str)
+}
+
#[api(
input: {
properties: {
},
)]
/// Start garbage collection.
-fn start_garbage_collection(
+pub fn start_garbage_collection(
store: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
let job = Job::new("garbage_collection", &store)
.map_err(|_| format_err!("garbage collection already running"))?;
- let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
+ let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
.map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
returns: {
description: "List the accessible datastores.",
type: Array,
- items: {
- description: "Datastore name and description.",
- type: DataStoreListItem,
- },
+ items: { type: DataStoreListItem },
},
access: {
permission: &Permission::Anybody,
},
)]
/// Datastore list
-fn get_datastore_list(
+pub fn get_datastore_list(
_param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<DataStoreListItem>, Error> {
- let (config, _digest) = datastore::config()?;
+ let (config, _digest) = pbs_config::datastore::config()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let mut list = Vec::new();
for (store, (_, data)) in &config.sections {
- let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
+ let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
if allowed {
list.push(
}
}
- Ok(list.into())
+ Ok(list)
}
#[sortable]
true)
);
-fn download_file(
+pub fn download_file(
_parts: Parts,
_req_body: Body,
param: Value,
) -> ApiResponseFuture {
async move {
- let store = tools::required_string_param(¶m, "store")?;
+ let store = required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
+ let file_name = required_string_param(¶m, "file-name")?.to_owned();
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+ let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_id = required_string_param(¶m, "backup-id")?;
+ let backup_time = required_integer_param(¶m, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
- .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze()))
+ .map_ok(|bytes| bytes.freeze())
.map_err(move |err| {
eprintln!("error during streaming of '{:?}' - {}", &path, err);
err
true)
);
-fn download_file_decoded(
+pub fn download_file_decoded(
_parts: Parts,
_req_body: Body,
param: Value,
) -> ApiResponseFuture {
async move {
- let store = tools::required_string_param(¶m, "store")?;
+ let store = required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
+ let file_name = required_string_param(¶m, "file-name")?.to_owned();
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+ let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_id = required_string_param(¶m, "backup-id")?;
+ let backup_time = required_integer_param(¶m, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
manifest.verify_file(&file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
- let reader = AsyncIndexReader::new(index, chunk_reader);
+ let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
Body::wrap_stream(AsyncReaderStream::new(reader)
.map_err(move |err| {
eprintln!("error during streaming of '{:?}' - {}", path, err);
manifest.verify_file(&file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
- let reader = AsyncIndexReader::new(index, chunk_reader);
+ let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
.map_err(move |err| {
eprintln!("error during streaming of '{:?}' - {}", path, err);
&Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
);
-fn upload_backup_log(
+pub fn upload_backup_log(
_parts: Parts,
req_body: Body,
param: Value,
) -> ApiResponseFuture {
async move {
- let store = tools::required_string_param(¶m, "store")?;
+ let store = required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store)?;
let file_name = CLIENT_LOG_BLOB_NAME;
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+ let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_id = required_string_param(¶m, "backup-id")?;
+ let backup_time = required_integer_param(¶m, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
// always verify blob/CRC at server side
let blob = DataBlob::load_from_reader(&mut &data[..])?;
- replace_file(&path, blob.raw_data(), CreateOptions::new())?;
+ replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
// fixme: use correct formatter
- Ok(crate::server::formatter::json_response(Ok(Value::Null)))
+ Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
}.boxed()
}
},
)]
/// Get the entries of the given path of the catalog
-fn catalog(
+pub fn catalog(
store: String,
backup_type: String,
backup_id: String,
backup_time: i64,
filepath: String,
- _param: Value,
- _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
+) -> Result<Vec<ArchiveEntry>, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
let (csum, size) = index.compute_csum();
- manifest.verify_file(&file_name, &csum, size)?;
+ manifest.verify_file(file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let mut catalog_reader = CatalogReader::new(reader);
- let mut current = catalog_reader.root()?;
- let mut components = vec![];
-
-
- if filepath != "root" {
- components = base64::decode(filepath)?;
- if components.len() > 0 && components[0] == '/' as u8 {
- components.remove(0);
- }
- for component in components.split(|c| *c == '/' as u8) {
- if let Some(entry) = catalog_reader.lookup(¤t, component)? {
- current = entry;
- } else {
- bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components));
- }
- }
- }
-
- let mut res = Vec::new();
-
- for direntry in catalog_reader.read_dir(¤t)? {
- let mut components = components.clone();
- components.push('/' as u8);
- components.extend(&direntry.name);
- let path = base64::encode(components);
- let text = String::from_utf8_lossy(&direntry.name);
- let mut entry = json!({
- "filepath": path,
- "text": text,
- "type": CatalogEntryType::from(&direntry.attr).to_string(),
- "leaf": true,
- });
- match direntry.attr {
- DirEntryAttribute::Directory { start: _ } => {
- entry["leaf"] = false.into();
- },
- DirEntryAttribute::File { size, mtime } => {
- entry["size"] = size.into();
- entry["mtime"] = mtime.into();
- },
- _ => {},
- }
- res.push(entry);
- }
- Ok(res.into())
-}
-
-fn recurse_files<'a, T, W>(
- zip: &'a mut ZipEncoder<W>,
- decoder: &'a mut Accessor<T>,
- prefix: &'a Path,
- file: FileEntry<T>,
-) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>>
-where
- T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static,
- W: tokio::io::AsyncWrite + Unpin + Send + 'static,
-{
- Box::pin(async move {
- let metadata = file.entry().metadata();
- let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf();
-
- match file.kind() {
- EntryKind::File { .. } => {
- let entry = ZipEntry::new(
- path,
- metadata.stat.mtime.secs,
- metadata.stat.mode as u16,
- true,
- );
- zip.add_entry(entry, Some(file.contents().await?))
- .await
- .map_err(|err| format_err!("could not send file entry: {}", err))?;
- }
- EntryKind::Hardlink(_) => {
- let realfile = decoder.follow_hardlink(&file).await?;
- let entry = ZipEntry::new(
- path,
- metadata.stat.mtime.secs,
- metadata.stat.mode as u16,
- true,
- );
- zip.add_entry(entry, Some(realfile.contents().await?))
- .await
- .map_err(|err| format_err!("could not send file entry: {}", err))?;
- }
- EntryKind::Directory => {
- let dir = file.enter_directory().await?;
- let mut readdir = dir.read_dir();
- let entry = ZipEntry::new(
- path,
- metadata.stat.mtime.secs,
- metadata.stat.mode as u16,
- false,
- );
- zip.add_entry::<FileContents<T>>(entry, None).await?;
- while let Some(entry) = readdir.next().await {
- let entry = entry?.decode_entry().await?;
- recurse_files(zip, decoder, prefix, entry).await?;
- }
- }
- _ => {} // ignore all else
- };
+ let path = if filepath != "root" && filepath != "/" {
+ base64::decode(filepath)?
+ } else {
+ vec![b'/']
+ };
- Ok(())
- })
+ catalog_reader.list_dir_contents(&path)
}
#[sortable]
true)
);
-fn pxar_file_download(
+pub fn pxar_file_download(
_parts: Parts,
_req_body: Body,
param: Value,
) -> ApiResponseFuture {
async move {
- let store = tools::required_string_param(¶m, "store")?;
- let datastore = DataStore::lookup_datastore(&store)?;
+ let store = required_string_param(¶m, "store")?;
+ let datastore = DataStore::lookup_datastore(store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let filepath = tools::required_string_param(¶m, "filepath")?.to_owned();
+ let filepath = required_string_param(¶m, "filepath")?.to_owned();
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+ let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_id = required_string_param(¶m, "backup-id")?;
+ let backup_time = required_integer_param(¶m, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
let mut components = base64::decode(&filepath)?;
- if components.len() > 0 && components[0] == '/' as u8 {
+ if !components.is_empty() && components[0] == b'/' {
components.remove(0);
}
- let mut split = components.splitn(2, |c| *c == '/' as u8);
+ let mut split = components.splitn(2, |c| *c == b'/');
let pxar_name = std::str::from_utf8(split.next().unwrap())?;
- let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?;
+ let file_path = split.next().unwrap_or(b"/");
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
let (csum, size) = index.compute_csum();
- manifest.verify_file(&pxar_name, &csum, size)?;
+ manifest.verify_file(pxar_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let decoder = Accessor::new(reader, archive_size).await?;
let root = decoder.open_root().await?;
+ let path = OsStr::from_bytes(file_path).to_os_string();
let file = root
- .lookup(OsStr::from_bytes(file_path)).await?
- .ok_or(format_err!("error opening '{:?}'", file_path))?;
+ .lookup(&path).await?
+ .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
let body = match file.kind() {
EntryKind::File { .. } => Body::wrap_stream(
.map_err(move |err| {
eprintln!(
"error during streaming of hardlink '{:?}' - {}",
- filepath, err
+ path, err
);
err
}),
),
EntryKind::Directory => {
let (sender, receiver) = tokio::sync::mpsc::channel(100);
- let mut prefix = PathBuf::new();
- let mut components = file.entry().path().components();
- components.next_back(); // discar last
- for comp in components {
- prefix.push(comp);
- }
-
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
-
- crate::server::spawn_internal_task(async move {
- let mut zipencoder = ZipEncoder::new(channelwriter);
- let mut decoder = decoder;
- recurse_files(&mut zipencoder, &mut decoder, &prefix, file)
- .await
- .map_err(|err| eprintln!("error during creating of zip: {}", err))?;
-
- zipencoder
- .finish()
- .await
- .map_err(|err| eprintln!("error during finishing of zip: {}", err))
- });
-
- Body::wrap_stream(receiver.map_err(move |err| {
- eprintln!("error during streaming of zip '{:?}' - {}", filepath, err);
+ proxmox_rest_server::spawn_internal_task(
+ create_zip(channelwriter, decoder, path.clone(), false)
+ );
+ Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
+ eprintln!("error during streaming of zip '{:?}' - {}", path, err);
err
}))
}
schema: DATASTORE_SCHEMA,
},
timeframe: {
- type: RRDTimeFrameResolution,
+ type: RRDTimeFrame,
},
cf: {
type: RRDMode,
},
)]
/// Read datastore stats
-fn get_rrd_stats(
+pub fn get_rrd_stats(
store: String,
- timeframe: RRDTimeFrameResolution,
+ timeframe: RRDTimeFrame,
cf: RRDMode,
_param: Value,
) -> Result<Value, Error> {
)
}
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
+ },
+)]
+/// Get "notes" for a backup group
+pub fn get_group_notes(
+ store: String,
+ backup_type: String,
+ backup_id: String,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<String, Error> {
+ let datastore = DataStore::lookup_datastore(&store)?;
+
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let backup_group = BackupGroup::new(backup_type, backup_id);
+
+ check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
+
+ let note_path = get_group_note_path(&datastore, &backup_group);
+ Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
+}
+
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ notes: {
+ description: "A multiline text.",
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"],
+ PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
+ true),
+ },
+)]
+/// Set "notes" for a backup group
+pub fn set_group_notes(
+ store: String,
+ backup_type: String,
+ backup_id: String,
+ notes: String,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let datastore = DataStore::lookup_datastore(&store)?;
+
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let backup_group = BackupGroup::new(backup_type, backup_id);
+
+ check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
+
+ let note_path = get_group_note_path(&datastore, &backup_group);
+ replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
+
+ Ok(())
+}
+
#[api(
input: {
properties: {
},
)]
/// Get "notes" for a specific backup
-fn get_notes(
+pub fn get_notes(
store: String,
backup_type: String,
backup_id: String,
},
)]
/// Set "notes" for a specific backup
-fn set_notes(
+pub fn set_notes(
store: String,
backup_type: String,
backup_id: String,
Ok(())
}
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ "backup-time": {
+ schema: BACKUP_TIME_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
+ },
+)]
+/// Query protection for a specific backup
+pub fn get_protection(
+ store: String,
+ backup_type: String,
+ backup_id: String,
+ backup_time: i64,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<bool, Error> {
+ let datastore = DataStore::lookup_datastore(&store)?;
+
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
+
+ check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
+
+ Ok(backup_dir.is_protected(datastore.base_path()))
+}
+
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ "backup-time": {
+ schema: BACKUP_TIME_SCHEMA,
+ },
+ protected: {
+ description: "Enable/disable protection.",
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"],
+ PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
+ true),
+ },
+)]
+/// En- or disable protection for a specific backup
+pub fn set_protection(
+ store: String,
+ backup_type: String,
+ backup_id: String,
+ backup_time: i64,
+ protected: bool,
+ rpcenv: &mut dyn RpcEnvironment,
+) -> Result<(), Error> {
+ let datastore = DataStore::lookup_datastore(&store)?;
+
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
+
+ check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+
+ datastore.update_protection(&backup_dir, protected)
+}
+
#[api(
input: {
properties: {
},
)]
/// Change owner of a backup group
-fn set_backup_owner(
+pub fn set_backup_owner(
store: String,
backup_type: String,
backup_id: String,
.get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
.post(&API_METHOD_START_GARBAGE_COLLECTION)
),
+ (
+ "group-notes",
+ &Router::new()
+ .get(&API_METHOD_GET_GROUP_NOTES)
+ .put(&API_METHOD_SET_GROUP_NOTES)
+ ),
(
"groups",
&Router::new()
.get(&API_METHOD_LIST_GROUPS)
+ .delete(&API_METHOD_DELETE_GROUP)
),
(
"notes",
.get(&API_METHOD_GET_NOTES)
.put(&API_METHOD_SET_NOTES)
),
+ (
+ "protected",
+ &Router::new()
+ .get(&API_METHOD_GET_PROTECTION)
+ .put(&API_METHOD_SET_PROTECTION)
+ ),
(
"prune",
&Router::new()
.post(&API_METHOD_PRUNE)
),
+ (
+ "prune-datastore",
+ &Router::new()
+ .post(&API_METHOD_PRUNE_DATASTORE)
+ ),
(
"pxar-file-download",
&Router::new()