use std::collections::{HashSet, HashMap};
+use std::convert::TryFrom;
use chrono::{TimeZone, Local};
-use failure::*;
+use anyhow::{bail, Error};
use futures::*;
use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode};
use serde_json::{json, Value};
-use proxmox::{sortable, identity};
-use proxmox::api::api;
-use proxmox::api::{http_err, list_subdirs_api_method};
-use proxmox::api::{ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
+use proxmox::api::{
+ api, ApiResponseFuture, ApiHandler, ApiMethod, Router,
+ RpcEnvironment, RpcEnvironmentType, Permission};
use proxmox::api::router::SubdirMap;
use proxmox::api::schema::*;
-use proxmox::tools::try_block;
use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions};
+use proxmox::try_block;
+use proxmox::{http_err, identity, list_subdirs_api_method, sortable};
use crate::api2::types::*;
use crate::backup::*;
use crate::config::datastore;
use crate::server::WorkerTask;
use crate::tools;
+use crate::config::acl::{PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_ALLOCATE_SPACE};
-fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Value, Error> {
+fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Vec<BackupContent>, Error> {
let mut path = store.base_path();
path.push(backup_dir.relative_path());
path.push("index.json.blob");
let raw_data = file_get_contents(&path)?;
- let data = DataBlob::from_raw(raw_data)?.decode(None)?;
- let index_size = data.len();
- let mut result: Value = serde_json::from_reader(&mut &data[..])?;
+ let index_size = raw_data.len() as u64;
+ let blob = DataBlob::from_raw(raw_data)?;
- let mut result = result["files"].take();
+ let manifest = BackupManifest::try_from(blob)?;
- if result == Value::Null {
- bail!("missing 'files' property in backup index {:?}", path);
+ let mut result = Vec::new();
+ for item in manifest.files() {
+ result.push(BackupContent {
+ filename: item.filename.clone(),
+ size: Some(item.size),
+ });
}
- result.as_array_mut().unwrap().push(json!({
- "filename": "index.json.blob",
- "size": index_size,
- }));
+ result.push(BackupContent {
+ filename: "index.json.blob".to_string(),
+ size: Some(index_size),
+ });
Ok(result)
}
group_hash
}
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ type: Array,
+ description: "Returns the list of backup groups.",
+ items: {
+ type: GroupListItem,
+ }
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// List backup groups.
fn list_groups(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
+ store: String,
+) -> Result<Vec<GroupListItem>, Error> {
- let store = param["store"].as_str().unwrap();
-
- let datastore = DataStore::lookup_datastore(store)?;
+ let datastore = DataStore::lookup_datastore(&store)?;
let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
let group_hash = group_backups(backup_list);
- let mut groups = vec![];
+ let mut groups = Vec::new();
for (_group_id, mut list) in group_hash {
let info = &list[0];
let group = info.backup_dir.group();
- groups.push(json!({
- "backup-type": group.backup_type(),
- "backup-id": group.backup_id(),
- "last-backup": info.backup_dir.backup_time().timestamp(),
- "backup-count": list.len() as u64,
- "files": info.files,
- }));
+ let result_item = GroupListItem {
+ backup_type: group.backup_type().to_string(),
+ backup_id: group.backup_id().to_string(),
+ last_backup: info.backup_dir.backup_time().timestamp(),
+ backup_count: list.len() as u64,
+ files: info.files.clone(),
+ };
+ groups.push(result_item);
}
- Ok(json!(groups))
+ Ok(groups)
}
-fn list_snapshot_files (
- param: Value,
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ "backup-time": {
+ schema: BACKUP_TIME_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ type: Array,
+ description: "Returns the list of archive files inside a backup snapshots.",
+ items: {
+ type: BackupContent,
+ }
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// List snapshot files.
+pub fn list_snapshot_files(
+ store: String,
+ backup_type: String,
+ backup_id: String,
+ backup_time: i64,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- let store = tools::required_string_param(¶m, "store")?;
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+) -> Result<Vec<BackupContent>, Error> {
- let datastore = DataStore::lookup_datastore(store)?;
+ let datastore = DataStore::lookup_datastore(&store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
let mut files = read_backup_index(&datastore, &snapshot)?;
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
- let file_set = files.as_array().unwrap().iter().fold(HashSet::new(), |mut acc, item| {
- acc.insert(item["filename"].as_str().unwrap().to_owned());
+ let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
+ acc.insert(item.filename.clone());
acc
});
for file in info.files {
if file_set.contains(&file) { continue; }
- files.as_array_mut().unwrap().push(json!({ "filename": file }));
+ files.push(BackupContent { filename: file, size: None });
}
Ok(files)
}
-fn delete_snapshots (
- param: Value,
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ "backup-type": {
+ schema: BACKUP_TYPE_SCHEMA,
+ },
+ "backup-id": {
+ schema: BACKUP_ID_SCHEMA,
+ },
+ "backup-time": {
+ schema: BACKUP_TIME_SCHEMA,
+ },
+ },
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false),
+ },
+)]
+/// Delete backup snapshot.
+fn delete_snapshot(
+ store: String,
+ backup_type: String,
+ backup_id: String,
+ backup_time: i64,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let store = tools::required_string_param(¶m, "store")?;
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let backup_time = tools::required_integer_param(¶m, "backup-time")?;
-
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
- let datastore = DataStore::lookup_datastore(store)?;
+ let datastore = DataStore::lookup_datastore(&store)?;
datastore.remove_backup_dir(&snapshot)?;
type: SnapshotListItem,
}
},
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
)]
/// List backup snapshots.
-fn list_snapshots (
+pub fn list_snapshots (
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
let mut backup_size = 0;
- for item in index.as_array().unwrap().iter() {
- if let Some(item_size) = item["size"].as_u64() {
+ for item in index.iter() {
+ if let Some(item_size) = item.size {
backup_size += item_size;
}
}
Ok(snapshots)
}
-#[sortable]
-const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&status),
- &ObjectSchema::new(
- "Get datastore status.",
- &sorted!([
- ("store", false, &DATASTORE_SCHEMA),
- ]),
- )
-);
-
-fn status(
- param: Value,
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ type: StorageStatus,
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// Get datastore status.
+pub fn status(
+ store: String,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
+) -> Result<StorageStatus, Error> {
- let store = param["store"].as_str().unwrap();
-
- let datastore = DataStore::lookup_datastore(store)?;
+ let datastore = DataStore::lookup_datastore(&store)?;
let base_path = datastore.base_path();
nix::errno::Errno::result(res)?;
let bsize = stat.f_bsize as u64;
- Ok(json!({
- "total": stat.f_blocks*bsize,
- "used": (stat.f_blocks-stat.f_bfree)*bsize,
- "avail": stat.f_bavail*bsize,
- }))
+
+ Ok(StorageStatus {
+ total: stat.f_blocks*bsize,
+ used: (stat.f_blocks-stat.f_bfree)*bsize,
+ avail: stat.f_bavail*bsize,
+ })
}
#[macro_export]
("store", false, &DATASTORE_SCHEMA),
])
)
-);
+).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false));
fn prune(
param: Value,
let worker_id = format!("{}_{}_{}", store, backup_type, backup_id);
+ let mut prune_result = Vec::new();
+
+ let list = group.list_backups(&datastore.base_path())?;
+
+ let mut prune_info = compute_prune_info(list, &prune_options)?;
+
+ prune_info.reverse(); // delete older snapshots first
+
+ let keep_all = !prune_options.keeps_something();
+
+ if dry_run {
+ for (info, mut keep) in prune_info {
+ if keep_all { keep = true; }
+
+ let backup_time = info.backup_dir.backup_time();
+ let group = info.backup_dir.group();
+
+ prune_result.push(json!({
+ "backup-type": group.backup_type(),
+ "backup-id": group.backup_id(),
+ "backup-time": backup_time.timestamp(),
+ "keep": keep,
+ }));
+ }
+ return Ok(json!(prune_result));
+ }
+
+
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?;
+
let result = try_block! {
- if !prune_options.keeps_something() {
+ if keep_all {
worker.log("No prune selection - keeping all files.");
- return Ok(());
} else {
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
- if dry_run {
- worker.log(format!("Testing prune on store \"{}\" group \"{}/{}\"",
- store, backup_type, backup_id));
- } else {
- worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
- store, backup_type, backup_id));
- }
+ worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
+ store, backup_type, backup_id));
}
- let list = group.list_backups(&datastore.base_path())?;
-
- let mut prune_info = compute_prune_info(list, &prune_options)?;
-
- prune_info.reverse(); // delete older snapshots first
+ for (info, mut keep) in prune_info {
+ if keep_all { keep = true; }
- for (info, keep) in prune_info {
let backup_time = info.backup_dir.backup_time();
let timestamp = BackupDir::backup_time_to_string(backup_time);
let group = info.backup_dir.group();
+
let msg = format!(
"{}/{}/{} {}",
group.backup_type(),
worker.log(msg);
+ prune_result.push(json!({
+ "backup-type": group.backup_type(),
+ "backup-id": group.backup_id(),
+ "backup-time": backup_time.timestamp(),
+ "keep": keep,
+ }));
+
if !(dry_run || keep) {
datastore.remove_backup_dir(&info.backup_dir)?;
}
if let Err(err) = result {
bail!("prune failed - {}", err);
- }
+ };
- Ok(json!(worker.to_string())) // return the UPID
+ Ok(json!(prune_result))
}
-#[sortable]
-pub const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&start_garbage_collection),
- &ObjectSchema::new(
- "Start garbage collection.",
- &sorted!([
- ("store", false, &DATASTORE_SCHEMA),
- ])
- )
-);
-
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ schema: UPID_SCHEMA,
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false),
+ },
+)]
+/// Start garbage collection.
fn start_garbage_collection(
- param: Value,
+ store: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let store = param["store"].as_str().unwrap().to_string();
-
let datastore = DataStore::lookup_datastore(&store)?;
println!("Starting garbage collection on store {}", store);
Ok(json!(upid_str))
}
-#[sortable]
-pub const API_METHOD_GARBAGE_COLLECTION_STATUS: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&garbage_collection_status),
- &ObjectSchema::new(
- "Garbage collection status.",
- &sorted!([
- ("store", false, &DATASTORE_SCHEMA),
- ])
- )
-);
-
-fn garbage_collection_status(
- param: Value,
+#[api(
+ input: {
+ properties: {
+ store: {
+ schema: DATASTORE_SCHEMA,
+ },
+ },
+ },
+ returns: {
+ type: GarbageCollectionStatus,
+ },
+ access: {
+ permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// Garbage collection status.
+pub fn garbage_collection_status(
+ store: String,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- let store = param["store"].as_str().unwrap();
+) -> Result<GarbageCollectionStatus, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
- println!("Garbage collection status on store {}", store);
-
let status = datastore.last_gc_status();
- Ok(serde_json::to_value(&status)?)
+ Ok(status)
}
-
+#[api(
+ access: {
+ permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_AUDIT, false),
+ },
+)]
+/// Datastore list
fn get_datastore_list(
_param: Value,
_info: &ApiMethod,
let (config, _digest) = datastore::config()?;
- Ok(config.convert_to_array("store", None))
+ Ok(config.convert_to_array("store", None, &[]))
}
#[sortable]
("backup-type", false, &BACKUP_TYPE_SCHEMA),
("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
- ("file-name", false, &StringSchema::new("Raw file name.")
- .format(&FILENAME_FORMAT)
- .schema()
- ),
+ ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
]),
)
-);
+).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false));
fn download_file(
_parts: Parts,
("backup-time", false, &BACKUP_TIME_SCHEMA),
]),
)
-);
+).access(None, &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_ALLOCATE_SPACE, false));
fn upload_backup_log(
_parts: Parts,
(
"files",
&Router::new()
- .get(
- &ApiMethod::new(
- &ApiHandler::Sync(&list_snapshot_files),
- &ObjectSchema::new(
- "List snapshot files.",
- &sorted!([
- ("store", false, &DATASTORE_SCHEMA),
- ("backup-type", false, &BACKUP_TYPE_SCHEMA),
- ("backup-id", false, &BACKUP_ID_SCHEMA),
- ("backup-time", false, &BACKUP_TIME_SCHEMA),
- ]),
- )
- )
- )
+ .get(&API_METHOD_LIST_SNAPSHOT_FILES)
),
(
"gc",
(
"groups",
&Router::new()
- .get(
- &ApiMethod::new(
- &ApiHandler::Sync(&list_groups),
- &ObjectSchema::new(
- "List backup groups.",
- &sorted!([ ("store", false, &DATASTORE_SCHEMA) ]),
- )
- )
- )
+ .get(&API_METHOD_LIST_GROUPS)
),
(
"prune",
"snapshots",
&Router::new()
.get(&API_METHOD_LIST_SNAPSHOTS)
- .delete(
- &ApiMethod::new(
- &ApiHandler::Sync(&delete_snapshots),
- &ObjectSchema::new(
- "Delete backup snapshot.",
- &sorted!([
- ("store", false, &DATASTORE_SCHEMA),
- ("backup-type", false, &BACKUP_TYPE_SCHEMA),
- ("backup-id", false, &BACKUP_ID_SCHEMA),
- ("backup-time", false, &BACKUP_TIME_SCHEMA),
- ]),
- )
- )
- )
+ .delete(&API_METHOD_DELETE_SNAPSHOT)
),
(
"status",
pub const ROUTER: Router = Router::new()
- .get(
- &ApiMethod::new(
- &ApiHandler::Sync(&get_datastore_list),
- &ObjectSchema::new("Directory index.", &[])
- )
- )
+ .get(&API_METHOD_GET_DATASTORE_LIST)
.match_all("store", &DATASTORE_INFO_ROUTER);