+use std::collections::{HashSet, HashMap};
+
+use chrono::{TimeZone, Local};
use failure::*;
use futures::*;
-
-use crate::tools;
-use crate::api_schema::*;
-use crate::api_schema::router::*;
-//use crate::server::rest::*;
+use hyper::http::request::Parts;
+use hyper::{header, Body, Response, StatusCode};
use serde_json::{json, Value};
-use std::collections::{HashSet, HashMap};
-use chrono::{DateTime, Datelike, TimeZone, Local};
-use std::path::PathBuf;
-use std::sync::Arc;
-use crate::config::datastore;
+use proxmox::{sortable, identity};
+use proxmox::api::{http_err, list_subdirs_api_method};
+use proxmox::api::{ApiFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType};
+use proxmox::api::router::SubdirMap;
+use proxmox::api::schema::*;
+use proxmox::tools::{try_block, fs::file_get_contents, fs::file_set_contents};
+use crate::api2::types::*;
use crate::backup::*;
+use crate::config::datastore;
use crate::server::WorkerTask;
+use crate::tools;
-use hyper::{header, Body, Response, StatusCode};
-use hyper::http::request::Parts;
+fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result<Value, Error> {
+
+ let mut path = store.base_path();
+ path.push(backup_dir.relative_path());
+ path.push("index.json.blob");
+
+ let raw_data = file_get_contents(&path)?;
+ let data = DataBlob::from_raw(raw_data)?.decode(None)?;
+ let index_size = data.len();
+ let mut result: Value = serde_json::from_reader(&mut &data[..])?;
+
+ let mut result = result["files"].take();
+
+ if result == Value::Null {
+ bail!("missing 'files' property in backup index {:?}", path);
+ }
+
+ result.as_array_mut().unwrap().push(json!({
+ "filename": "index.json.blob",
+ "size": index_size,
+ }));
+
+ Ok(result)
+}
fn group_backups(backup_list: Vec<BackupInfo>) -> HashMap<String, Vec<BackupInfo>> {
group_hash
}
-fn mark_selections<F: Fn(DateTime<Local>, &BackupInfo) -> String> (
- mark: &mut HashSet<PathBuf>,
- list: &Vec<BackupInfo>,
- keep: usize,
- select_id: F,
-){
- let mut hash = HashSet::new();
- for info in list {
- let local_time = info.backup_dir.backup_time().with_timezone(&Local);
- if hash.len() >= keep as usize { break; }
- let backup_id = info.backup_dir.relative_path();
- let sel_id: String = select_id(local_time, &info);
- if !hash.contains(&sel_id) {
- hash.insert(sel_id);
- //println!(" KEEP ID {} {}", backup_id, local_time.format("%c"));
- mark.insert(backup_id);
- }
- }
-}
-
fn list_groups(
param: Value,
_info: &ApiMethod,
let backup_id = tools::required_string_param(¶m, "backup-id")?;
let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+ let datastore = DataStore::lookup_datastore(store)?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
- let datastore = DataStore::lookup_datastore(store)?;
+ let mut files = read_backup_index(&datastore, &snapshot)?;
+
+ let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
- let path = datastore.base_path();
- let files = BackupInfo::list_files(&path, &snapshot)?;
+ let file_set = files.as_array().unwrap().iter().fold(HashSet::new(), |mut acc, item| {
+ acc.insert(item["filename"].as_str().unwrap().to_owned());
+ acc
+ });
+
+ for file in info.files {
+ if file_set.contains(&file) { continue; }
+ files.as_array_mut().unwrap().push(json!({ "filename": file }));
+ }
- Ok(json!(files))
+ Ok(files)
}
fn delete_snapshots (
) -> Result<Value, Error> {
let store = tools::required_string_param(¶m, "store")?;
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
-
- let group = BackupGroup::new(backup_type, backup_id);
+ let backup_type = param["backup-type"].as_str();
+ let backup_id = param["backup-id"].as_str();
let datastore = DataStore::lookup_datastore(store)?;
let base_path = datastore.base_path();
- let backup_list = group.list_backups(&base_path)?;
+ let backup_list = BackupInfo::list_backups(&base_path)?;
let mut snapshots = vec![];
for info in backup_list {
- snapshots.push(json!({
+ let group = info.backup_dir.group();
+ if let Some(backup_type) = backup_type {
+ if backup_type != group.backup_type() { continue; }
+ }
+ if let Some(backup_id) = backup_id {
+ if backup_id != group.backup_id() { continue; }
+ }
+
+ let mut result_item = json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
"backup-time": info.backup_dir.backup_time().timestamp(),
"files": info.files,
- }));
+ });
+
+ if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) {
+ let mut backup_size = 0;
+ for item in index.as_array().unwrap().iter() {
+ if let Some(item_size) = item["size"].as_u64() {
+ backup_size += item_size;
+ }
+ }
+ result_item["size"] = backup_size.into();
+ }
+
+ snapshots.push(result_item);
}
Ok(json!(snapshots))
}
-fn prune(
+fn status(
param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
let datastore = DataStore::lookup_datastore(store)?;
- let mut keep_all = true;
+ let base_path = datastore.base_path();
- for opt in &["keep-last", "keep-daily", "keep-weekly", "keep-weekly", "keep-yearly"] {
- if !param[opt].is_null() {
- keep_all = false;
- break;
- }
- }
+ let mut stat: libc::statfs64 = unsafe { std::mem::zeroed() };
- let worker = WorkerTask::new("prune", Some(store.to_owned()), "root@pam", true)?;
- let result = try_block! {
- if keep_all {
- worker.log("No selection - keeping all files.");
- return Ok(());
- } else {
- worker.log(format!("Starting prune on store {}", store));
- }
+ use nix::NixPath;
- let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
+ let res = base_path.with_nix_path(|cstr| unsafe { libc::statfs64(cstr.as_ptr(), &mut stat) })?;
+ nix::errno::Errno::result(res)?;
- let group_hash = group_backups(backup_list);
+ let bsize = stat.f_bsize as u64;
+ Ok(json!({
+ "total": stat.f_blocks*bsize,
+ "used": (stat.f_blocks-stat.f_bfree)*bsize,
+ "avail": stat.f_bavail*bsize,
+ }))
+}
- for (_group_id, mut list) in group_hash {
+#[macro_export]
+macro_rules! add_common_prune_prameters {
+ ( [ $( $list1:tt )* ] ) => {
+ add_common_prune_prameters!([$( $list1 )* ] , [])
+ };
+ ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => {
+ [
+ $( $list1 )*
+ (
+ "keep-daily",
+ true,
+ &IntegerSchema::new("Number of daily backups to keep.")
+ .minimum(1)
+ .schema()
+ ),
+ (
+ "keep-last",
+ true,
+ &IntegerSchema::new("Number of backups to keep.")
+ .minimum(1)
+ .schema()
+ ),
+ (
+ "keep-monthly",
+ true,
+ &IntegerSchema::new("Number of monthly backups to keep.")
+ .minimum(1)
+ .schema()
+ ),
+ (
+ "keep-weekly",
+ true,
+ &IntegerSchema::new("Number of weekly backups to keep.")
+ .minimum(1)
+ .schema()
+ ),
+ (
+ "keep-yearly",
+ true,
+ &IntegerSchema::new("Number of yearly backups to keep.")
+ .minimum(1)
+ .schema()
+ ),
+ $( $list2 )*
+ ]
+ }
+}
- let mut mark = HashSet::new();
+const API_METHOD_STATUS: ApiMethod = ApiMethod::new(
+ &ApiHandler::Sync(&status),
+ &ObjectSchema::new(
+ "Get datastore status.",
+ &add_common_prune_prameters!([],[
+ ("store", false, &StringSchema::new("Datastore name.").schema()),
+ ]),
+ )
+);
- BackupInfo::sort_list(&mut list, false);
+fn prune(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
- if let Some(keep_last) = param["keep-last"].as_u64() {
- list.iter().take(keep_last as usize).for_each(|info| {
- mark.insert(info.backup_dir.relative_path());
- });
- }
+ let store = param["store"].as_str().unwrap();
- if let Some(keep_daily) = param["keep-daily"].as_u64() {
- mark_selections(&mut mark, &list, keep_daily as usize, |local_time, _info| {
- format!("{}/{}/{}", local_time.year(), local_time.month(), local_time.day())
- });
- }
+ let backup_type = tools::required_string_param(¶m, "backup-type")?;
+ let backup_id = tools::required_string_param(¶m, "backup-id")?;
- if let Some(keep_weekly) = param["keep-weekly"].as_u64() {
- mark_selections(&mut mark, &list, keep_weekly as usize, |local_time, _info| {
- format!("{}/{}", local_time.year(), local_time.iso_week().week())
- });
- }
+ let group = BackupGroup::new(backup_type, backup_id);
- if let Some(keep_monthly) = param["keep-monthly"].as_u64() {
- mark_selections(&mut mark, &list, keep_monthly as usize, |local_time, _info| {
- format!("{}/{}", local_time.year(), local_time.month())
- });
- }
+ let datastore = DataStore::lookup_datastore(store)?;
- if let Some(keep_yearly) = param["keep-yearly"].as_u64() {
- mark_selections(&mut mark, &list, keep_yearly as usize, |local_time, _info| {
- format!("{}/{}", local_time.year(), local_time.year())
- });
- }
+ let prune_options = PruneOptions {
+ keep_last: param["keep-last"].as_u64(),
+ keep_daily: param["keep-daily"].as_u64(),
+ keep_weekly: param["keep-weekly"].as_u64(),
+ keep_monthly: param["keep-monthly"].as_u64(),
+ keep_yearly: param["keep-yearly"].as_u64(),
+ };
- let mut remove_list: Vec<BackupInfo> = list.into_iter()
- .filter(|info| !mark.contains(&info.backup_dir.relative_path())).collect();
+ let worker = WorkerTask::new("prune", Some(store.to_owned()), "root@pam", true)?;
+ let result = try_block! {
+ if !prune_options.keeps_something() {
+ worker.log("No prune selection - keeping all files.");
+ return Ok(());
+ } else {
+ worker.log(format!("Starting prune on store {}", store));
+ }
+
+ let list = group.list_backups(&datastore.base_path())?;
- BackupInfo::sort_list(&mut remove_list, true);
+ let mut prune_info = compute_prune_info(list, &prune_options)?;
- for info in remove_list {
- worker.log(format!("remove {:?}", info.backup_dir));
+ prune_info.reverse(); // delete older snapshots first
+
+ for (info, keep) in prune_info {
+ if keep {
+ worker.log(format!("keep {:?}", info.backup_dir.relative_path()));
+ } else {
+ worker.log(format!("remove {:?}", info.backup_dir.relative_path()));
datastore.remove_backup_dir(&info.backup_dir)?;
}
}
Ok(json!(null))
}
-pub fn add_common_prune_prameters(schema: ObjectSchema) -> ObjectSchema {
-
- schema
- .optional(
- "keep-last",
- IntegerSchema::new("Number of backups to keep.")
- .minimum(1)
- )
- .optional(
- "keep-daily",
- IntegerSchema::new("Number of daily backups to keep.")
- .minimum(1)
- )
- .optional(
- "keep-weekly",
- IntegerSchema::new("Number of weekly backups to keep.")
- .minimum(1)
- )
- .optional(
- "keep-monthly",
- IntegerSchema::new("Number of monthly backups to keep.")
- .minimum(1)
- )
- .optional(
- "keep-yearly",
- IntegerSchema::new("Number of yearly backups to keep.")
- .minimum(1)
- )
-}
-
-fn api_method_prune() -> ApiMethod {
- ApiMethod::new(
- prune,
- add_common_prune_prameters(
- ObjectSchema::new("Prune the datastore.")
- .required(
- "store",
- StringSchema::new("Datastore name.")
- )
- )
+const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
+ &ApiHandler::Sync(&prune),
+ &ObjectSchema::new(
+ "Prune the datastore.",
+ &add_common_prune_prameters!([
+ ("backup-id", false, &BACKUP_ID_SCHEMA),
+ ("backup-type", false, &BACKUP_TYPE_SCHEMA),
+ ],[
+ ("store", false, &StringSchema::new("Datastore name.").schema()),
+ ])
)
-}
+);
fn start_garbage_collection(
param: Value,
Ok(json!(upid_str))
}
-pub fn api_method_start_garbage_collection() -> ApiMethod {
- ApiMethod::new(
- start_garbage_collection,
- ObjectSchema::new("Start garbage collection.")
- .required("store", StringSchema::new("Datastore name."))
+#[sortable]
+pub const API_METHOD_START_GARBAGE_COLLECTION: ApiMethod = ApiMethod::new(
+ &ApiHandler::Sync(&start_garbage_collection),
+ &ObjectSchema::new(
+ "Start garbage collection.",
+ &sorted!([
+ ("store", false, &StringSchema::new("Datastore name.").schema()),
+ ])
)
-}
+);
fn garbage_collection_status(
param: Value,
Ok(serde_json::to_value(&status)?)
}
-pub fn api_method_garbage_collection_status() -> ApiMethod {
- ApiMethod::new(
- garbage_collection_status,
- ObjectSchema::new("Garbage collection status.")
- .required("store", StringSchema::new("Datastore name."))
+#[sortable]
+pub const API_METHOD_GARBAGE_COLLECTION_STATUS: ApiMethod = ApiMethod::new(
+ &ApiHandler::Sync(&garbage_collection_status),
+ &ObjectSchema::new(
+ "Garbage collection status.",
+ &sorted!([
+ ("store", false, &StringSchema::new("Datastore name.").schema()),
+ ])
)
-}
+);
-fn get_backup_list(
- param: Value,
+fn get_datastore_list(
+ _param: Value,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- //let config = datastore::config()?;
+ let config = datastore::config()?;
- let store = param["store"].as_str().unwrap();
+ Ok(config.convert_to_array("store"))
+}
- let datastore = DataStore::lookup_datastore(store)?;
- let mut list = vec![];
+fn download_file(
+ _parts: Parts,
+ _req_body: Body,
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: Box<dyn RpcEnvironment>,
+) -> ApiFuture {
- let backup_list = BackupInfo::list_backups(&datastore.base_path())?;
+ async move {
+ let store = tools::required_string_param(¶m, "store")?;
- for info in backup_list {
- list.push(json!({
- "backup-type": info.backup_dir.group().backup_type(),
- "backup-id": info.backup_dir.group().backup_id(),
- "backup-time": info.backup_dir.backup_time().timestamp(),
- "files": info.files,
- }));
- }
+ let datastore = DataStore::lookup_datastore(store)?;
- let result = json!(list);
+ let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
- Ok(result)
-}
+ let backup_type = tools::required_string_param(¶m, "backup-type")?;
+ let backup_id = tools::required_string_param(¶m, "backup-id")?;
+ let backup_time = tools::required_integer_param(¶m, "backup-time")?;
-fn get_datastore_list(
- _param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
+ println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
+ backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
- let config = datastore::config()?;
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
- Ok(config.convert_to_array("store"))
+ let mut path = datastore.base_path();
+ path.push(backup_dir.relative_path());
+ path.push(&file_name);
+
+ let file = tokio::fs::File::open(path)
+ .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
+ .await?;
+
+ let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
+ .map_ok(|bytes| hyper::Chunk::from(bytes.freeze()));
+ let body = Body::wrap_stream(payload);
+
+ // fixme: set other headers ?
+ Ok(Response::builder()
+ .status(StatusCode::OK)
+ .header(header::CONTENT_TYPE, "application/octet-stream")
+ .body(body)
+ .unwrap())
+ }.boxed()
}
+#[sortable]
+pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
+ &ApiHandler::AsyncHttp(&download_file),
+ &ObjectSchema::new(
+ "Download single raw file from backup snapshot.",
+ &sorted!([
+ ("store", false, &StringSchema::new("Datastore name.").schema()),
+ ("backup-type", false, &BACKUP_TYPE_SCHEMA),
+ ("backup-id", false, &BACKUP_ID_SCHEMA),
+ ("backup-time", false, &BACKUP_TIME_SCHEMA),
+ ("file-name", false, &StringSchema::new("Raw file name.")
+ .format(&FILENAME_FORMAT)
+ .schema()
+ ),
+ ]),
+ )
+);
-fn download_file(
+fn upload_backup_log(
_parts: Parts,
- _req_body: Body,
+ req_body: Body,
param: Value,
- _info: &ApiAsyncMethod,
+ _info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
-) -> Result<BoxFut, Error> {
+) -> ApiFuture {
- let store = tools::required_string_param(¶m, "store")?;
- let file_name = tools::required_string_param(¶m, "file-name")?.to_owned();
+ async move {
+ let store = tools::required_string_param(¶m, "store")?;
- let backup_type = tools::required_string_param(¶m, "backup-type")?;
- let backup_id = tools::required_string_param(¶m, "backup-id")?;
- let backup_time = tools::required_integer_param(¶m, "backup-time")?;
+ let datastore = DataStore::lookup_datastore(store)?;
- println!("Download {} from {} ({}/{}/{}/{})", file_name, store,
- backup_type, backup_id, Local.timestamp(backup_time, 0), file_name);
-
- let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
-
- let mut path = backup_dir.relative_path();
- path.push(&file_name);
-
- let response_future = tokio::fs::File::open(file_name)
- .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err)))
- .and_then(move |file| {
- let payload = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new()).
- map(|bytes| {
- //sigh - howto avoid copy here? or the whole map() ??
- hyper::Chunk::from(bytes.to_vec())
- });
- let body = Body::wrap_stream(payload);
-
- // fixme: set other headers ?
- Ok(Response::builder()
- .status(StatusCode::OK)
- .header(header::CONTENT_TYPE, "application/octet-stream")
- .body(body)
- .unwrap())
- });
+ let file_name = "client.log.blob";
- Ok(Box::new(response_future))
-}
+ let backup_type = tools::required_string_param(¶m, "backup-type")?;
+ let backup_id = tools::required_string_param(¶m, "backup-id")?;
+ let backup_time = tools::required_integer_param(¶m, "backup-time")?;
-pub fn api_method_download_file() -> ApiAsyncMethod {
- ApiAsyncMethod::new(
- download_file,
- ObjectSchema::new("Download single raw file from backup snapshot.")
- .required("store", StringSchema::new("Datastore name."))
- .required("backup-type", StringSchema::new("Backup type.")
- .format(Arc::new(ApiStringFormat::Enum(&["ct", "host"]))))
- .required("backup-id", StringSchema::new("Backup ID."))
- .required("backup-time", IntegerSchema::new("Backup time (Unix epoch.)")
- .minimum(1547797308))
- .required("file-name", StringSchema::new("Raw file name."))
- )
+ let backup_dir = BackupDir::new(backup_type, backup_id, backup_time);
+
+ let mut path = datastore.base_path();
+ path.push(backup_dir.relative_path());
+ path.push(&file_name);
+
+ if path.exists() {
+ bail!("backup already contains a log.");
+ }
+
+ println!("Upload backup log to {}/{}/{}/{}/{}", store,
+ backup_type, backup_id, BackupDir::backup_time_to_string(backup_dir.backup_time()), file_name);
+
+ let data = req_body
+ .map_err(Error::from)
+ .try_fold(Vec::new(), |mut acc, chunk| {
+ acc.extend_from_slice(&*chunk);
+ future::ok::<_, Error>(acc)
+ })
+ .await?;
+
+ let blob = DataBlob::from_raw(data)?;
+ // always verify CRC at server side
+ blob.verify_crc()?;
+ let raw_data = blob.raw_data();
+ file_set_contents(&path, raw_data, None)?;
+
+ // fixme: use correct formatter
+ Ok(crate::server::formatter::json_response(Ok(Value::Null)))
+ }.boxed()
}
-pub fn router() -> Router {
-
- let store_schema: Arc<Schema> = Arc::new(
- StringSchema::new("Datastore name.").into()
- );
-
- let datastore_info = Router::new()
- .subdir(
- "backups",
- Router::new()
- .get(ApiMethod::new(
- get_backup_list,
- ObjectSchema::new("List backups.")
- .required("store", store_schema.clone()))))
- .subdir(
- "download",
- Router::new()
- .download(api_method_download_file())
- )
- .subdir(
- "gc",
- Router::new()
- .get(api_method_garbage_collection_status())
- .post(api_method_start_garbage_collection()))
- .subdir(
- "files",
- Router::new()
- .get(
- ApiMethod::new(
- list_snapshot_files,
- ObjectSchema::new("List snapshot files.")
- .required("store", store_schema.clone())
- .required("backup-type", StringSchema::new("Backup type."))
- .required("backup-id", StringSchema::new("Backup ID."))
- .required("backup-time", IntegerSchema::new("Backup time (Unix epoch.)")
- .minimum(1547797308))
+#[sortable]
+pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
+ &ApiHandler::AsyncHttp(&upload_backup_log),
+ &ObjectSchema::new(
+ "Download single raw file from backup snapshot.",
+ &sorted!([
+ ("store", false, &StringSchema::new("Datastore name.").schema()),
+ ("backup-type", false, &BACKUP_TYPE_SCHEMA),
+ ("backup-id", false, &BACKUP_ID_SCHEMA),
+ ("backup-time", false, &BACKUP_TIME_SCHEMA),
+ ]),
+ )
+);
+
+const STORE_SCHEMA: Schema = StringSchema::new("Datastore name.").schema();
+
+#[sortable]
+const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
+ (
+ "download",
+ &Router::new()
+ .download(&API_METHOD_DOWNLOAD_FILE)
+ ),
+ (
+ "files",
+ &Router::new()
+ .get(
+ &ApiMethod::new(
+ &ApiHandler::Sync(&list_snapshot_files),
+ &ObjectSchema::new(
+ "List snapshot files.",
+ &sorted!([
+ ("store", false, &STORE_SCHEMA),
+ ("backup-type", false, &BACKUP_TYPE_SCHEMA),
+ ("backup-id", false, &BACKUP_ID_SCHEMA),
+ ("backup-time", false, &BACKUP_TIME_SCHEMA),
+ ]),
)
)
- )
- .subdir(
- "groups",
- Router::new()
- .get(ApiMethod::new(
- list_groups,
- ObjectSchema::new("List backup groups.")
- .required("store", store_schema.clone()))))
- .subdir(
- "snapshots",
- Router::new()
- .get(
- ApiMethod::new(
- list_snapshots,
- ObjectSchema::new("List backup groups.")
- .required("store", store_schema.clone())
- .required("backup-type", StringSchema::new("Backup type."))
- .required("backup-id", StringSchema::new("Backup ID."))
+ )
+ ),
+ (
+ "gc",
+ &Router::new()
+ .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
+ .post(&API_METHOD_START_GARBAGE_COLLECTION)
+ ),
+ (
+ "groups",
+ &Router::new()
+ .get(
+ &ApiMethod::new(
+ &ApiHandler::Sync(&list_groups),
+ &ObjectSchema::new(
+ "List backup groups.",
+ &sorted!([ ("store", false, &STORE_SCHEMA) ]),
)
)
- .delete(
- ApiMethod::new(
- delete_snapshots,
- ObjectSchema::new("Delete backup snapshot.")
- .required("store", store_schema.clone())
- .required("backup-type", StringSchema::new("Backup type."))
- .required("backup-id", StringSchema::new("Backup ID."))
- .required("backup-time", IntegerSchema::new("Backup time (Unix epoch.)")
- .minimum(1547797308))
+ )
+ ),
+ (
+ "prune",
+ &Router::new()
+ .post(&API_METHOD_PRUNE)
+ ),
+ (
+ "snapshots",
+ &Router::new()
+ .get(
+ &ApiMethod::new(
+ &ApiHandler::Sync(&list_snapshots),
+ &ObjectSchema::new(
+ "List backup groups.",
+ &sorted!([
+ ("store", false, &STORE_SCHEMA),
+ ("backup-type", true, &BACKUP_TYPE_SCHEMA),
+ ("backup-id", true, &BACKUP_ID_SCHEMA),
+ ]),
)
)
+ )
+ .delete(
+ &ApiMethod::new(
+ &ApiHandler::Sync(&delete_snapshots),
+ &ObjectSchema::new(
+ "Delete backup snapshot.",
+ &sorted!([
+ ("store", false, &STORE_SCHEMA),
+ ("backup-type", false, &BACKUP_TYPE_SCHEMA),
+ ("backup-id", false, &BACKUP_ID_SCHEMA),
+ ("backup-time", false, &BACKUP_TIME_SCHEMA),
+ ]),
+ )
+ )
+ )
+ ),
+ (
+ "status",
+ &Router::new()
+ .get(&API_METHOD_STATUS)
+ ),
+ (
+ "upload-backup-log",
+ &Router::new()
+ .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
+ ),
+];
+
+const DATASTORE_INFO_ROUTER: Router = Router::new()
+ .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
+ .subdirs(DATASTORE_INFO_SUBDIRS);
+
+
+pub const ROUTER: Router = Router::new()
+ .get(
+ &ApiMethod::new(
+ &ApiHandler::Sync(&get_datastore_list),
+ &ObjectSchema::new("Directory index.", &[])
)
- .subdir(
- "prune",
- Router::new()
- .post(api_method_prune())
- )
- .list_subdirs();
-
-
-
- let route = Router::new()
- .get(ApiMethod::new(
- get_datastore_list,
- ObjectSchema::new("Directory index.")))
- .match_all("store", datastore_info);
-
-
-
- route
-}
+ )
+ .match_all("store", &DATASTORE_INFO_ROUTER);