use std::convert::TryFrom;
use chrono::{TimeZone, Local};
-use failure::*;
+use anyhow::{bail, Error};
use futures::*;
use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode};
use serde_json::{json, Value};
-use proxmox::api::api;
-use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
+use proxmox::api::{
+ api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
+ RpcEnvironment, RpcEnvironmentType, Permission, UserInformation};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
use crate::api2::types::*;
use crate::backup::*;
use crate::config::datastore;
+use crate::config::cached_user_info::CachedUserInfo;
+
use crate::server::WorkerTask;
use crate::tools;
+use crate::config::acl::{
+ PRIV_DATASTORE_AUDIT,
+ PRIV_DATASTORE_MODIFY,
+ PRIV_DATASTORE_READ,
+ PRIV_DATASTORE_PRUNE,
+ PRIV_DATASTORE_BACKUP,
+};
+
+fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> {
+ let owner = store.get_owner(group)?;
+ if &owner != userid {
+ bail!("backup owner check failed ({} != {})", userid, owner);
+ }
+ Ok(())
+}
fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
type: GroupListItem,
}
},
+ access: {
+ permission: &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
+ true),
+ },
)]
/// List backup groups.
fn list_groups(
store: String,
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
+ let username = rpcenv.get_user().unwrap();
+ let user_info = CachedUserInfo::new()?;
+ let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
+
let datastore = DataStore::lookup_datastore(&store)?;
let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
BackupInfo::sort_list(&mut list, false);
let info = &list[0];
+
let group = info.backup_dir.group();
+ let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
+ if !list_all {
+ let owner = datastore.get_owner(group)?;
+ if owner != username { continue; }
+ }
+
let result_item = GroupListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
type: BackupContent,
}
},
+ access: {
+ permission: &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
+ true),
+ },
)]
/// List snapshot files.
-fn list_snapshot_files(
+pub fn list_snapshot_files(
store: String,
backup_type: String,
backup_id: String,
backup_time: i64,
_info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
+ let username = rpcenv.get_user().unwrap();
+ let user_info = CachedUserInfo::new()?;
+ let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
+
let datastore = DataStore::lookup_datastore(&store)?;
+
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
+ let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0;
+ if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
+
let mut files = read_backup_index(&datastore, &snapshot)?;
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
},
},
},
+ access: {
+ permission: &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
+ true),
+ },
)]
/// Delete backup snapshot.
fn delete_snapshot(
backup_id: String,
backup_time: i64,
_info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
+ let username = rpcenv.get_user().unwrap();
+ let user_info = CachedUserInfo::new()?;
+ let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
+
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
let datastore = DataStore::lookup_datastore(&store)?;
+ let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
+ if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
+
datastore.remove_backup_dir(&snapshot)?;
Ok(Value::Null)
type: SnapshotListItem,
}
},
+ access: {
+ permission: &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
+ true),
+ },
)]
/// List backup snapshots.
-fn list_snapshots (
- param: Value,
+pub fn list_snapshots (
+ store: String,
+ backup_type: Option<String>,
+ backup_id: Option<String>,
+ _param: Value,
_info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> {
- let store = tools::required_string_param(¶m, "store")?;
- let backup_type = param["backup-type"].as_str();
- let backup_id = param["backup-id"].as_str();
+ let username = rpcenv.get_user().unwrap();
+ let user_info = CachedUserInfo::new()?;
+ let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
- let datastore = DataStore::lookup_datastore(store)?;
+ let datastore = DataStore::lookup_datastore(&store)?;
let base_path = datastore.base_path();
for info in backup_list {
let group = info.backup_dir.group();
- if let Some(backup_type) = backup_type {
+ if let Some(ref backup_type) = backup_type {
if backup_type != group.backup_type() { continue; }
}
- if let Some(backup_id) = backup_id {
+ if let Some(ref backup_id) = backup_id {
if backup_id != group.backup_id() { continue; }
}
+ let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
+ if !list_all {
+ let owner = datastore.get_owner(group)?;
+ if owner != username { continue; }
+ }
+
let mut result_item = SnapshotListItem {
backup_type: group.backup_type().to_string(),
backup_id: group.backup_id().to_string(),
returns: {
type: StorageStatus,
},
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
+ },
)]
/// Get datastore status.
-fn status(
+pub fn status(
store: String,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
}
}
+pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new(
+ "Returns the list of snapshots and a flag indicating if there are kept or removed.",
+ PruneListItem::API_SCHEMA
+).schema();
+
const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&prune),
&ObjectSchema::new(
],[
("store", false, &DATASTORE_SCHEMA),
])
- )
+ ))
+ .returns(&API_RETURN_SCHEMA_PRUNE)
+ .access(None, &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
+ true)
);
fn prune(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let store = param["store"].as_str().unwrap();
-
+ let store = tools::required_string_param(¶m, "store")?;
let backup_type = tools::required_string_param(¶m, "backup-type")?;
let backup_id = tools::required_string_param(¶m, "backup-id")?;
+ let username = rpcenv.get_user().unwrap();
+ let user_info = CachedUserInfo::new()?;
+ let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
+
let dry_run = param["dry-run"].as_bool().unwrap_or(false);
let group = BackupGroup::new(backup_type, backup_id);
- let datastore = DataStore::lookup_datastore(store)?;
+ let datastore = DataStore::lookup_datastore(&store)?;
+
+ let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
+ if !allowed { check_backup_owner(&datastore, &group, &username)?; }
let prune_options = PruneOptions {
keep_last: param["keep-last"].as_u64(),
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
+ let mut prune_result = Vec::new();
+
+ let list = group.list_backups(&datastore.base_path())?;
+
+ let mut prune_info = compute_prune_info(list, &prune_options)?;
+
+ prune_info.reverse(); // delete older snapshots first
+
+ let keep_all = !prune_options.keeps_something();
+
+ if dry_run {
+ for (info, mut keep) in prune_info {
+ if keep_all { keep = true; }
+
+ let backup_time = info.backup_dir.backup_time();
+ let group = info.backup_dir.group();
+
+ prune_result.push(json!({
+ "backup-type": group.backup_type(),
+ "backup-id": group.backup_id(),
+ "backup-time": backup_time.timestamp(),
+ "keep": keep,
+ }));
+ }
+ return Ok(json!(prune_result));
+ }
+
+
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
+
let result = try_block! {
- if !prune_options.keeps_something() {
+ if keep_all {
worker.log("No prune selection - keeping all files.");
- return Ok(());
} else {
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
- if dry_run {
- worker.log(format!("Testing prune on store \"{}\" group \"{}/{}\"",
- store, backup_type, backup_id));
- } else {
- worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
- store, backup_type, backup_id));
- }
+ worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
+ store, backup_type, backup_id));
}
- let list = group.list_backups(&datastore.base_path())?;
-
- let mut prune_info = compute_prune_info(list, &prune_options)?;
+ for (info, mut keep) in prune_info {
+ if keep_all { keep = true; }
- prune_info.reverse(); // delete older snapshots first
-
- for (info, keep) in prune_info {
let backup_time = info.backup_dir.backup_time();
let timestamp = BackupDir::backup_time_to_string(backup_time);
let group = info.backup_dir.group();
+
let msg = format!(
"{}/{}/{} {}",
group.backup_type(),
worker.log(msg);
+ prune_result.push(json!({
+ "backup-type": group.backup_type(),
+ "backup-id": group.backup_id(),
+ "backup-time": backup_time.timestamp(),
+ "keep": keep,
+ }));
+
if !(dry_run || keep) {
datastore.remove_backup_dir(&info.backup_dir)?;
}
if let Err(err) = result {
bail!("prune failed - {}", err);
- }
+ };
- Ok(json!(worker.to_string())) // return the UPID
+ Ok(json!(prune_result))
}
#[api(
returns: {
schema: UPID_SCHEMA,
},
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
+ },
)]
/// Start garbage collection.
fn start_garbage_collection(
"garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker|
{
worker.log(format!("starting garbage collection on store {}", store));
- datastore.garbage_collection(worker)
+ datastore.garbage_collection(&worker)
})?;
Ok(json!(upid_str))
}
-#[sortable]
-pub const API_METHOD_GARBAGE_COLLECTION_STATUS: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&garbage_collection_status),
- &ObjectSchema::new(
- "Garbage collection status.",
- &sorted!([
- ("store", false, &DATASTORE_SCHEMA),
- ])
- )
-);
-
-fn garbage_collection_status(
- param: Value,
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ type: GarbageCollectionStatus,
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// Garbage collection status.
+pub fn garbage_collection_status(
+ store: String,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- let store = param["store"].as_str().unwrap();
+) -> Result<GarbageCollectionStatus, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- println!("Garbage collection status on store {}", store);
-
let status = datastore.last_gc_status();
- Ok(serde_json::to_value(&status)?)
+ Ok(status)
}
-
+#[api(
+ returns: {
+ description: "List the accessible datastores.",
+ type: Array,
+ items: {
+ description: "Datastore name and description.",
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ comment: {
+ optional: true,
+ schema: SINGLE_LINE_COMMENT_SCHEMA,
+ },
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Anybody,
+ },
+)]
+/// Datastore list
fn get_datastore_list(
_param: Value,
_info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let (config, _digest) = datastore::config()?;
- Ok(config.convert_to_array("store", None))
+ let username = rpcenv.get_user().unwrap();
+ let user_info = CachedUserInfo::new()?;
+
+ let mut list = Vec::new();
+
+ for (store, (_, data)) in &config.sections {
+ let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
+ let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
+ if allowed {
+ let mut entry = json!({ "store": store });
+ if let Some(comment) = data["comment"].as_str() {
+ entry["comment"] = comment.into();
+ }
+ list.push(entry);
+ }
+ }
+
+ Ok(list.into())
}
#[sortable]
("backup-type", false, &BACKUP_TYPE_SCHEMA),
("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
- ("file-name", false, &StringSchema::new("Raw file name.")
- .format(&FILENAME_FORMAT)
- .schema()
- ),
+ ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
]),
)
+).access(None, &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
+ true)
);
fn download_file(
_req_body: Body,
param: Value,
_info: &ApiMethod,
- _rpcenv: Box<dyn RpcEnvironment>,
+ rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
let store = tools::required_string_param(¶m, "store")?;
-
let datastore = DataStore::lookup_datastore(store)?;
+ let username = rpcenv.get_user().unwrap();
+ let user_info = CachedUserInfo::new()?;
+ let user_privs = user_info.lookup_privs(&username, &["datastore", &store]);
+
let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
let backup_type = tools::required_string_param(¶m, "backup-type")?;
let backup_id = tools::required_string_param(¶m, "backup-id")?;
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+
+ let allowed = (user_privs & PRIV_DATASTORE_READ) != 0;
+ if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; }
+
println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
-
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&upload_backup_log),
&ObjectSchema::new(
- "Download single raw file from backup snapshot.",
+ "Upload the client backup log file into a backup snapshot ('client.log.blob').",
&sorted!([
("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
]),
)
+).access(
+ Some("Only the backup creator/owner is allowed to do this."),
+ &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
);
fn upload_backup_log(
req_body: Body,
param: Value,
_info: &ApiMethod,
- _rpcenv: Box<dyn RpcEnvironment>,
+ rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
async move {
let store = tools::required_string_param(¶m, "store")?;
-
let datastore = DataStore::lookup_datastore(store)?;
let file_name = "client.log.blob";
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+ let username = rpcenv.get_user().unwrap();
+ check_backup_owner(&datastore, backup_dir.group(), &username)?;
+
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
path.push(&file_name);
pub const ROUTER: Router = Router::new()
- .get(
- &ApiMethod::new(
- &ApiHandler::Sync(&get_datastore_list),
- &ObjectSchema::new("Directory index.", &[])
- )
- )
+ .get(&API_METHOD_GET_DATASTORE_LIST)
.match_all("store", &DATASTORE_INFO_ROUTER);