+//#[macro_use]
extern crate proxmox_backup;
use failure::*;
//use std::os::unix::io::AsRawFd;
-use chrono::{DateTime, Local, TimeZone};
+use chrono::{Local, Utc, TimeZone};
use std::path::{Path, PathBuf};
-use std::collections::HashMap;
+use std::collections::{HashSet, HashMap};
+use std::io::Write;
+use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
use proxmox_backup::tools;
use proxmox_backup::cli::*;
+use proxmox_backup::api2::types::*;
use proxmox_backup::api_schema::*;
use proxmox_backup::api_schema::router::*;
use proxmox_backup::client::*;
use proxmox_backup::backup::*;
+use proxmox_backup::pxar;
+
//use proxmox_backup::backup::image_index::*;
//use proxmox_backup::config::datastore;
//use proxmox_backup::pxar::encoder::*;
//use proxmox_backup::backup::datastore::*;
use serde_json::{json, Value};
-use hyper::Body;
+//use hyper::Body;
use std::sync::Arc;
use regex::Regex;
use xdg::BaseDirectories;
use lazy_static::lazy_static;
+use futures::*;
+use tokio::sync::mpsc;
lazy_static! {
- static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|raw)):(.+)$").unwrap();
+ static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$").unwrap();
+
+ static ref REPO_URL_SCHEMA: Arc<Schema> = Arc::new(
+ StringSchema::new("Repository URL.")
+ .format(BACKUP_REPO_URL.clone())
+ .max_length(256)
+ .into()
+ );
+}
+
+
+fn get_default_repository() -> Option<String> {
+ std::env::var("PBS_REPOSITORY").ok()
+}
+
+fn extract_repository_from_value(
+ param: &Value,
+) -> Result<BackupRepository, Error> {
+
+ let repo_url = param["repository"]
+ .as_str()
+ .map(String::from)
+ .or_else(get_default_repository)
+ .ok_or_else(|| format_err!("unable to get (default) repository"))?;
+
+ let repo: BackupRepository = repo_url.parse()?;
+
+ Ok(repo)
}
+fn extract_repository_from_map(
+ param: &HashMap<String, String>,
+) -> Option<BackupRepository> {
+
+ param.get("repository")
+ .map(String::from)
+ .or_else(get_default_repository)
+ .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
+}
fn record_repository(repo: &BackupRepository) {
_ => return,
};
- let mut data = tools::file_get_json(&path).unwrap_or(json!({}));
+ let mut data = file_get_json(&path, None).unwrap_or(json!({}));
let repo = repo.to_string();
let new_data = json!(map);
- let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None);
+ let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
}
fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
_ => return result,
};
- let data = tools::file_get_json(&path).unwrap_or(json!({}));
+ let data = file_get_json(&path, None).unwrap_or(json!({}));
if let Some(map) = data.as_object() {
for (repo, _count) in map {
}
fn backup_directory<P: AsRef<Path>>(
- client: &mut HttpClient,
- repo: &BackupRepository,
+ client: &BackupClient,
dir_path: P,
archive_name: &str,
- backup_id: &str,
- backup_time: DateTime<Local>,
- chunk_size: Option<u64>,
- all_file_systems: bool,
+ chunk_size: Option<usize>,
+ device_set: Option<HashSet<u64>>,
verbose: bool,
-) -> Result<(), Error> {
+ skip_lost_and_found: bool,
+ crypt_config: Option<Arc<CryptConfig>>,
+) -> Result<BackupStats, Error> {
- let mut param = json!({
- "archive-name": archive_name,
- "backup-type": "host",
- "backup-id": backup_id,
- "backup-time": backup_time.timestamp(),
- });
-
- if let Some(size) = chunk_size {
- param["chunk-size"] = size.into();
- }
+ let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found)?;
+ let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
- let query = tools::json_object_to_query(param)?;
+ let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
- let path = format!("api2/json/admin/datastore/{}/pxar?{}", repo.store(), query);
+ let stream = rx
+ .map_err(Error::from)
+ .and_then(|x| x); // flatten
- let stream = PxarBackupStream::open(dir_path.as_ref(), all_file_systems, verbose)?;
-
- let body = Body::wrap_stream(stream);
+ // spawn chunker inside a separate task so that it can run parallel
+ tokio::spawn(
+ tx.send_all(chunk_stream.then(|r| Ok(r)))
+ .map_err(|_| {}).map(|_| ())
+ );
- client.upload("application/x-proxmox-backup-pxar", body, &path)?;
+ let stats = client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
- Ok(())
+ Ok(stats)
}
-/****
-fn backup_image(datastore: &DataStore, file: &std::fs::File, size: usize, target: &str, chunk_size: usize) -> Result<(), Error> {
-
- let mut target = PathBuf::from(target);
-
- if let Some(ext) = target.extension() {
- if ext != "fidx" {
- bail!("got wrong file extension - expected '.fidx'");
- }
- } else {
- target.set_extension("fidx");
- }
+fn backup_image<P: AsRef<Path>>(
+ client: &BackupClient,
+ image_path: P,
+ archive_name: &str,
+ image_size: u64,
+ chunk_size: Option<usize>,
+ _verbose: bool,
+ crypt_config: Option<Arc<CryptConfig>>,
+) -> Result<BackupStats, Error> {
- let mut index = datastore.create_image_writer(&target, size, chunk_size)?;
+ let path = image_path.as_ref().to_owned();
- tools::file_chunker(file, chunk_size, |pos, chunk| {
- index.add_chunk(pos, chunk)?;
- Ok(true)
- })?;
+ let file = tokio::fs::File::open(path).wait()?;
- index.close()?; // commit changes
+ let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
+ .map_err(Error::from);
- Ok(())
-}
-*/
+ let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
-fn strip_chunked_file_expenstions(list: Vec<String>) -> Vec<String> {
+ let stats = client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
- let mut result = vec![];
-
- for file in list.into_iter() {
- if file.ends_with(".didx") {
- result.push(file[..file.len()-5].to_owned());
- } else if file.ends_with(".fidx") {
- result.push(file[..file.len()-5].to_owned());
- } else {
- result.push(file); // should not happen
- }
- }
-
- result
+ Ok(stats)
}
-/* not used:
-fn list_backups(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
-) -> Result<Value, Error> {
-
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
-
- let mut client = HttpClient::new(repo.host(), repo.user());
-
- let path = format!("api2/json/admin/datastore/{}/backups", repo.store());
-
- let result = client.get(&path)?;
-
- record_repository(&repo);
-
- // fixme: implement and use output formatter instead ..
- let list = result["data"].as_array().unwrap();
-
- for item in list {
-
- let id = item["backup-id"].as_str().unwrap();
- let btype = item["backup-type"].as_str().unwrap();
- let epoch = item["backup-time"].as_i64().unwrap();
-
- let backup_dir = BackupDir::new(btype, id, epoch);
+fn strip_server_file_expenstion(name: &str) -> String {
- let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_chunked_file_expenstions(files);
-
- for filename in files {
- let path = backup_dir.relative_path().to_str().unwrap().to_owned();
- println!("{} | {}/{}", backup_dir.backup_time().format("%c"), path, filename);
- }
+ if name.ends_with(".didx") {
+ return name[..name.len()-5].to_owned();
+ } else if name.ends_with(".fidx") {
+ return name[..name.len()-5].to_owned();
+ } else if name.ends_with(".blob") {
+ return name[..name.len()-5].to_owned();
+ } else {
+ return name.to_owned(); // should not happen
}
-
- //Ok(result)
- Ok(Value::Null)
}
- */
fn list_backup_groups(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
- let mut client = HttpClient::new(repo.host(), repo.user());
+ let client = HttpClient::new(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
- let mut result = client.get(&path)?;
+ let mut result = client.get(&path, None).wait()?;
record_repository(&repo);
}
});
+ let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+
+ let mut result = vec![];
+
for item in list {
let id = item["backup-id"].as_str().unwrap();
let btype = item["backup-type"].as_str().unwrap();
let epoch = item["last-backup"].as_i64().unwrap();
- let last_backup = Local.timestamp(epoch, 0);
+ let last_backup = Utc.timestamp(epoch, 0);
let backup_count = item["backup-count"].as_u64().unwrap();
let group = BackupGroup::new(btype, id);
let path = group.group_path().to_str().unwrap().to_owned();
- let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_chunked_file_expenstions(files);
-
- println!("{:20} | {} | {:5} | {}", path, last_backup.format("%c"),
- backup_count, tools::join(&files, ' '));
+ let files = item["files"].as_array().unwrap().iter()
+ .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
+
+ if output_format == "text" {
+ println!(
+ "{:20} | {} | {:5} | {}",
+ path,
+ BackupDir::backup_time_to_string(last_backup),
+ backup_count,
+ tools::join(&files, ' '),
+ );
+ } else {
+ result.push(json!({
+ "backup-type": btype,
+ "backup-id": id,
+ "last-backup": epoch,
+ "backup-count": backup_count,
+ "files": files,
+ }));
+ }
}
- //Ok(result)
+ if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
+
Ok(Value::Null)
}
fn list_snapshots(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
- let path = tools::required_string_param(¶m, "group")?;
- let group = BackupGroup::parse(path)?;
+ let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
- let query = tools::json_object_to_query(json!({
- "backup-type": group.backup_type(),
- "backup-id": group.backup_id(),
- }))?;
+ let client = HttpClient::new(repo.host(), repo.user())?;
- let mut client = HttpClient::new(repo.host(), repo.user());
+ let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
- let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
+ let mut args = json!({});
+ if let Some(path) = param["group"].as_str() {
+ let group = BackupGroup::parse(path)?;
+ args["backup-type"] = group.backup_type().into();
+ args["backup-id"] = group.backup_id().into();
+ }
- // fixme: params
- let result = client.get(&path)?;
+ let result = client.get(&path, Some(args)).wait()?;
record_repository(&repo);
- // fixme: implement and use output formatter instead ..
let list = result["data"].as_array().unwrap();
+ let mut result = vec![];
+
for item in list {
let id = item["backup-id"].as_str().unwrap();
let path = snapshot.relative_path().to_str().unwrap().to_owned();
- let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_chunked_file_expenstions(files);
+ let files = item["files"].as_array().unwrap().iter()
+ .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
- println!("{} | {} | {}", path, snapshot.backup_time().format("%c"), tools::join(&files, ' '));
+ if output_format == "text" {
+ let size_str = if let Some(size) = item["size"].as_u64() {
+ size.to_string()
+ } else {
+ String::from("-")
+ };
+ println!("{} | {} | {}", path, size_str, tools::join(&files, ' '));
+ } else {
+ let mut data = json!({
+ "backup-type": btype,
+ "backup-id": id,
+ "backup-time": epoch,
+ "files": files,
+ });
+ if let Some(size) = item["size"].as_u64() {
+ data["size"] = size.into();
+ }
+ result.push(data);
+ }
}
+ if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
+
Ok(Value::Null)
}
fn forget_snapshots(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
let path = tools::required_string_param(¶m, "snapshot")?;
let snapshot = BackupDir::parse(path)?;
- let query = tools::json_object_to_query(json!({
+ let mut client = HttpClient::new(repo.host(), repo.user())?;
+
+ let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
+
+ let result = client.delete(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time().timestamp(),
- }))?;
+ }))).wait()?;
- let mut client = HttpClient::new(repo.host(), repo.user());
+ record_repository(&repo);
- let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
+ Ok(result)
+}
- let result = client.delete(&path)?;
+fn list_snapshot_files(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let repo = extract_repository_from_value(¶m)?;
+
+ let path = tools::required_string_param(¶m, "snapshot")?;
+ let snapshot = BackupDir::parse(path)?;
+
+ let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+
+ let client = HttpClient::new(repo.host(), repo.user())?;
+
+ let path = format!("api2/json/admin/datastore/{}/files", repo.store());
+
+ let mut result = client.get(&path, Some(json!({
+ "backup-type": snapshot.group().backup_type(),
+ "backup-id": snapshot.group().backup_id(),
+ "backup-time": snapshot.backup_time().timestamp(),
+ }))).wait()?;
record_repository(&repo);
- Ok(result)
+ let list: Value = result["data"].take();
+
+ if output_format == "text" {
+ for item in list.as_array().unwrap().iter() {
+ println!(
+ "{} {}",
+ strip_server_file_expenstion(item["filename"].as_str().unwrap()),
+ item["size"].as_u64().unwrap_or(0),
+ );
+ }
+ } else {
+ format_and_print_result(&list, &output_format);
+ }
+
+ Ok(Value::Null)
}
fn start_garbage_collection(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
- let mut client = HttpClient::new(repo.host(), repo.user());
+ let mut client = HttpClient::new(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
- let result = client.post(&path)?;
+ let result = client.post(&path, None).wait()?;
record_repository(&repo);
fn create_backup(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
+ let repo = extract_repository_from_value(¶m)?;
let backupspec_list = tools::required_array_param(¶m, "backupspec")?;
- let repo: BackupRepository = repo_url.parse()?;
-
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
+ let skip_lost_and_found = param["skip-lost-and-found"].as_bool().unwrap_or(false);
+
let verbose = param["verbose"].as_bool().unwrap_or(false);
- let chunk_size_opt = param["chunk-size"].as_u64().map(|v| v*1024);
+ let backup_time_opt = param["backup-time"].as_i64();
+
+ let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
if let Some(size) = chunk_size_opt {
verify_chunk_size(size)?;
}
- let backup_id = param["host-id"].as_str().unwrap_or(&tools::nodename());
+ let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
+
+ let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
+
+ let backup_type = param["backup-type"].as_str().unwrap_or("host");
+
+ let include_dev = param["include-dev"].as_array();
+
+ let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
+
+ if let Some(include_dev) = include_dev {
+ if all_file_systems {
+ bail!("option 'all-file-systems' conflicts with option 'include-dev'");
+ }
+
+ let mut set = HashSet::new();
+ for path in include_dev {
+ let path = path.as_str().unwrap();
+ let stat = nix::sys::stat::stat(path)
+ .map_err(|err| format_err!("fstat {:?} failed - {}", path, err))?;
+ set.insert(stat.st_dev);
+ }
+ devices = Some(set);
+ }
let mut upload_list = vec![];
+ enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
+
for backupspec in backupspec_list {
let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
- let stat = match nix::sys::stat::stat(filename) {
- Ok(s) => s,
+ use std::os::unix::fs::FileTypeExt;
+
+ let metadata = match std::fs::metadata(filename) {
+ Ok(m) => m,
Err(err) => bail!("unable to access '{}' - {}", filename, err),
};
+ let file_type = metadata.file_type();
- if (stat.st_mode & libc::S_IFDIR) != 0 {
-
- upload_list.push((filename.to_owned(), target.to_owned()));
+ let extension = target.rsplit('.').next()
+ .ok_or(format_err!("missing target file extenion '{}'", target))?;
- } else if (stat.st_mode & (libc::S_IFREG|libc::S_IFBLK)) != 0 {
- if stat.st_size <= 0 { bail!("got strange file size '{}'", stat.st_size); }
- let _size = stat.st_size as usize;
+ match extension {
+ "pxar" => {
+ if !file_type.is_dir() {
+ bail!("got unexpected file type (expected directory)");
+ }
+ upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
+ }
+ "img" => {
- panic!("implement me");
+ if !(file_type.is_file() || file_type.is_block_device()) {
+ bail!("got unexpected file type (expected file or block device)");
+ }
- //backup_image(&datastore, &file, size, &target, chunk_size)?;
+ let size = image_size(&PathBuf::from(filename))?;
- // let idx = datastore.open_image_reader(target)?;
- // idx.print_info();
+ if size == 0 { bail!("got zero-sized file '{}'", filename); }
- } else {
- bail!("unsupported file type (expected a directory, file or block device)");
+ upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
+ }
+ "conf" => {
+ if !file_type.is_file() {
+ bail!("got unexpected file type (expected regular file)");
+ }
+ upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
+ }
+ "log" => {
+ if !file_type.is_file() {
+ bail!("got unexpected file type (expected regular file)");
+ }
+ upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
+ }
+ _ => {
+ bail!("got unknown archive extension '{}'", extension);
+ }
}
}
- let backup_time = Local.timestamp(Local::now().timestamp(), 0);
+ let backup_time = Utc.timestamp(backup_time_opt.unwrap_or(Utc::now().timestamp()), 0);
- let mut client = HttpClient::new(repo.host(), repo.user());
+ let client = HttpClient::new(repo.host(), repo.user())?;
+ record_repository(&repo);
- client.login()?; // login before starting backup
+ println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
- record_repository(&repo);
+ println!("Client name: {}", proxmox::tools::nodename());
+
+ let start_time = Local::now();
- println!("Starting backup");
- println!("Client name: {}", tools::nodename());
- println!("Start Time: {}", backup_time.to_rfc3339());
+ println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
+
+ let (crypt_config, rsa_encrypted_key) = match keyfile {
+ None => (None, None),
+ Some(path) => {
+ let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+
+ let crypt_config = CryptConfig::new(key)?;
+
+ let path = master_pubkey_path()?;
+ if path.exists() {
+ let pem_data = file_get_contents(&path)?;
+ let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
+ let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
+ (Some(Arc::new(crypt_config)), Some(enc_key))
+ } else {
+ (Some(Arc::new(crypt_config)), None)
+ }
+ }
+ };
+
+ let client = client.start_backup(repo.store(), backup_type, &backup_id, backup_time, verbose).wait()?;
+
+ let mut file_list = vec![];
+
+ for (backup_type, filename, target, size) in upload_list {
+ match backup_type {
+ BackupType::CONFIG => {
+ println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
+ let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
+ file_list.push((target, stats));
+ }
+ BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
+ println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
+ let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
+ file_list.push((target, stats));
+ }
+ BackupType::PXAR => {
+ println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
+ let stats = backup_directory(
+ &client,
+ &filename,
+ &target,
+ chunk_size_opt,
+ devices.clone(),
+ verbose,
+ skip_lost_and_found,
+ crypt_config.clone(),
+ )?;
+ file_list.push((target, stats));
+ }
+ BackupType::IMAGE => {
+ println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
+ let stats = backup_image(
+ &client,
+ &filename,
+ &target,
+ size,
+ chunk_size_opt,
+ verbose,
+ crypt_config.clone(),
+ )?;
+ file_list.push((target, stats));
+ }
+ }
+ }
- for (filename, target) in upload_list {
- println!("Upload '{}' to '{:?}' as {}", filename, repo, target);
- backup_directory(&mut client, &repo, &filename, &target, backup_id, backup_time,
- chunk_size_opt, all_file_systems, verbose)?;
+ if let Some(rsa_encrypted_key) = rsa_encrypted_key {
+ let target = "rsa-encrypted.key";
+ println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
+ let stats = client.upload_blob_from_data(rsa_encrypted_key, target, None, false, false).wait()?;
+ file_list.push((format!("{}.blob", target), stats));
+
+ // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
+ /*
+ let mut buffer2 = vec![0u8; rsa.size() as usize];
+ let pem_data = file_get_contents("master-private.pem")?;
+ let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
+ let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
+ println!("TEST {} {:?}", len, buffer2);
+ */
}
- let end_time = Local.timestamp(Local::now().timestamp(), 0);
- let elapsed = end_time.signed_duration_since(backup_time);
+ // create index.json
+ let file_list = file_list.iter()
+ .fold(vec![], |mut acc, (filename, stats)| {
+ acc.push(json!({
+ "filename": filename,
+ "size": stats.size,
+ "csum": proxmox::tools::digest_to_hex(&stats.csum),
+ }));
+ acc
+ });
+
+ let index = json!({
+ "backup-type": backup_type,
+ "backup-id": backup_id,
+ "backup-time": backup_time.timestamp(),
+ "files": file_list,
+ });
+
+ println!("Upload index.json to '{:?}'", repo);
+ let index_data = serde_json::to_string_pretty(&index)?.into();
+ client.upload_blob_from_data(index_data, "index.json.blob", crypt_config.clone(), true, true).wait()?;
+
+ client.finish().wait()?;
+
+ let end_time = Local::now();
+ let elapsed = end_time.signed_duration_since(start_time);
println!("Duration: {}", elapsed);
- println!("End Time: {}", end_time.to_rfc3339());
+ println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
Ok(Value::Null)
}
fn restore(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
- let archive_name = tools::required_string_param(¶m, "archive-name")?;
+ let verbose = param["verbose"].as_bool().unwrap_or(false);
- let mut client = HttpClient::new(repo.host(), repo.user());
+ let allow_existing_dirs = param["allow-existing-dirs"].as_bool().unwrap_or(false);
- client.login()?; // login before starting
+ let archive_name = tools::required_string_param(¶m, "archive-name")?;
+
+ let client = HttpClient::new(repo.host(), repo.user())?;
record_repository(&repo);
let path = tools::required_string_param(¶m, "snapshot")?;
- let query;
-
- if path.matches('/').count() == 1 {
+ let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group = BackupGroup::parse(path)?;
- let subquery = tools::json_object_to_query(json!({
+ let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
+ let result = client.get(&path, Some(json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
- }))?;
-
- let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), subquery);
- let result = client.get(&path)?;
+ }))).wait()?;
let list = result["data"].as_array().unwrap();
if list.len() == 0 {
bail!("backup group '{}' does not contain any snapshots:", path);
}
- query = tools::json_object_to_query(json!({
- "backup-type": group.backup_type(),
- "backup-id": group.backup_id(),
- "backup-time": list[0]["backup-time"].as_i64().unwrap(),
- "archive-name": archive_name,
- }))?;
+ let epoch = list[0]["backup-time"].as_i64().unwrap();
+ let backup_time = Utc.timestamp(epoch, 0);
+ (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
} else {
let snapshot = BackupDir::parse(path)?;
-
- query = tools::json_object_to_query(json!({
- "backup-type": snapshot.group().backup_type(),
- "backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
- "archive-name": archive_name,
- }))?;
- }
+ (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
+ };
let target = tools::required_string_param(¶m, "target")?;
+ let target = if target == "-" { None } else { Some(target) };
+
+ let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
+
+ let crypt_config = match keyfile {
+ None => None,
+ Some(path) => {
+ let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+ Some(Arc::new(CryptConfig::new(key)?))
+ }
+ };
+
+ let server_archive_name = if archive_name.ends_with(".pxar") {
+ format!("{}.didx", archive_name)
+ } else if archive_name.ends_with(".img") {
+ format!("{}.fidx", archive_name)
+ } else {
+ format!("{}.blob", archive_name)
+ };
+
+ let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
+
+ use std::os::unix::fs::OpenOptionsExt;
+
+ let tmpfile = std::fs::OpenOptions::new()
+ .write(true)
+ .read(true)
+ .custom_flags(libc::O_TMPFILE)
+ .open("/tmp")?;
+
+ if server_archive_name.ends_with(".blob") {
+
+ let writer = Vec::with_capacity(1024*1024);
+ let blob_data = client.download(&server_archive_name, writer).wait()?;
+ let blob = DataBlob::from_raw(blob_data)?;
+ blob.verify_crc()?;
+
+ let raw_data = match crypt_config {
+ Some(ref crypt_config) => blob.decode(Some(crypt_config))?,
+ None => blob.decode(None)?,
+ };
+
+ if let Some(target) = target {
+ file_set_contents(target, &raw_data, None)?;
+ } else {
+ let stdout = std::io::stdout();
+ let mut writer = stdout.lock();
+ writer.write_all(&raw_data)
+ .map_err(|err| format_err!("unable to pipe data - {}", err))?;
+ }
+
+ } else if server_archive_name.ends_with(".didx") {
+ let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
+
+ let index = DynamicIndexReader::new(tmpfile)
+ .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
+
+ let most_used = index.find_most_used_chunks(8);
+
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+
+ let mut reader = BufferedDynamicReader::new(index, chunk_reader);
+
+ if let Some(target) = target {
+
+ let feature_flags = pxar::flags::DEFAULT;
+ let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
+ if verbose {
+ println!("{:?}", path);
+ }
+ Ok(())
+ });
+ decoder.set_allow_existing_dirs(allow_existing_dirs);
+
+
+ decoder.restore(Path::new(target), &Vec::new())?;
+ } else {
+ let stdout = std::io::stdout();
+ let mut writer = stdout.lock();
+
+ std::io::copy(&mut reader, &mut writer)
+ .map_err(|err| format_err!("unable to pipe data - {}", err))?;
+ }
+ } else if server_archive_name.ends_with(".fidx") {
+ let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
+
+ let index = FixedIndexReader::new(tmpfile)
+ .map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
+
+ let most_used = index.find_most_used_chunks(8);
- if archive_name.ends_with(".pxar") {
- let path = format!("api2/json/admin/datastore/{}/pxar?{}", repo.store(), query);
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
- println!("DOWNLOAD FILE {} to {}", path, target);
+ let mut reader = BufferedFixedReader::new(index, chunk_reader);
- let target = PathBuf::from(target);
- let writer = PxarBackupWriter::new(&target, true)?;
- client.download(&path, Box::new(writer))?;
+ if let Some(target) = target {
+ let mut writer = std::fs::OpenOptions::new()
+ .write(true)
+ .create(true)
+ .create_new(true)
+ .open(target)
+ .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
+
+ std::io::copy(&mut reader, &mut writer)
+ .map_err(|err| format_err!("unable to store data - {}", err))?;
+ } else {
+ let stdout = std::io::stdout();
+ let mut writer = stdout.lock();
+
+ std::io::copy(&mut reader, &mut writer)
+ .map_err(|err| format_err!("unable to pipe data - {}", err))?;
+ }
} else {
- bail!("unknown file extensions - unable to download '{}'", archive_name);
+ bail!("unknown archive file extension (expected .pxar of .img)");
}
Ok(Value::Null)
}
+fn upload_log(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let logfile = tools::required_string_param(¶m, "logfile")?;
+ let repo = extract_repository_from_value(¶m)?;
+
+ let snapshot = tools::required_string_param(¶m, "snapshot")?;
+ let snapshot = BackupDir::parse(snapshot)?;
+
+ let mut client = HttpClient::new(repo.host(), repo.user())?;
+
+ let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
+
+ let crypt_config = match keyfile {
+ None => None,
+ Some(path) => {
+ let (key, _created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+ let crypt_config = CryptConfig::new(key)?;
+ Some(crypt_config)
+ }
+ };
+
+ let data = file_get_contents(logfile)?;
+
+ let blob = if let Some(ref crypt_config) = crypt_config {
+ DataBlob::encode(&data, Some(crypt_config), true)?
+ } else {
+ DataBlob::encode(&data, None, true)?
+ };
+
+ let raw_data = blob.into_inner();
+
+ let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
+
+ let args = json!({
+ "backup-type": snapshot.group().backup_type(),
+ "backup-id": snapshot.group().backup_id(),
+ "backup-time": snapshot.backup_time().timestamp(),
+ });
+
+ let body = hyper::Body::from(raw_data);
+
+ let result = client.upload("application/octet-stream", body, &path, Some(args)).wait()?;
+
+ Ok(result)
+}
+
fn prune(
mut param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
- let mut client = HttpClient::new(repo.host(), repo.user());
+ let mut client = HttpClient::new(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
+ let group = tools::required_string_param(¶m, "group")?;
+ let group = BackupGroup::parse(group)?;
+
param.as_object_mut().unwrap().remove("repository");
+ param.as_object_mut().unwrap().remove("group");
- let result = client.post_json(&path, param)?;
+ param["backup-type"] = group.backup_type().into();
+ param["backup-id"] = group.backup_id().into();
+
+ let _result = client.post(&path, Some(param)).wait()?;
record_repository(&repo);
- Ok(result)
+ Ok(Value::Null)
+}
+
+fn status(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let repo = extract_repository_from_value(¶m)?;
+
+ let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+
+ let client = HttpClient::new(repo.host(), repo.user())?;
+
+ let path = format!("api2/json/admin/datastore/{}/status", repo.store());
+
+ let result = client.get(&path, None).wait()?;
+ let data = &result["data"];
+
+ record_repository(&repo);
+
+ if output_format == "text" {
+ let total = data["total"].as_u64().unwrap();
+ let used = data["used"].as_u64().unwrap();
+ let avail = data["avail"].as_u64().unwrap();
+ let roundup = total/200;
+
+ println!(
+ "total: {} used: {} ({} %) available: {}",
+ total,
+ used,
+ ((used+roundup)*100)/total,
+ avail,
+ );
+ } else {
+ format_and_print_result(data, &output_format);
+ }
+
+ Ok(Value::Null)
}
+// like get, but simply ignore errors and return Null instead
fn try_get(repo: &BackupRepository, url: &str) -> Value {
- let mut client = HttpClient::new(repo.host(), repo.user());
+ let client = match HttpClient::new(repo.host(), repo.user()) {
+ Ok(v) => v,
+ _ => return Value::Null,
+ };
- let mut resp = match client.try_get(url) {
+ let mut resp = match client.get(url, None).wait() {
Ok(v) => v,
_ => return Value::Null,
};
Value::Null
}
-fn extract_repo(param: &HashMap<String, String>) -> Option<BackupRepository> {
-
- let repo_url = match param.get("repository") {
- Some(v) => v,
- _ => return None,
- };
-
- let repo: BackupRepository = match repo_url.parse() {
- Ok(v) => v,
- _ => return None,
- };
-
- Some(repo)
-}
-
fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
- let repo = match extract_repo(param) {
+ let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let mut result = vec![];
- let repo = match extract_repo(param) {
+ let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
result
}
-fn complete_archive_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
- let repo = match extract_repo(param) {
+ let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
if let Some(list) = data.as_array() {
for item in list {
- if let Some(filename) = item.as_str() {
+ if let Some(filename) = item["filename"].as_str() {
result.push(filename.to_owned());
}
}
}
- strip_chunked_file_expenstions(result)
+ result
+}
+
+fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+
+ complete_server_file_name(arg, param)
+ .iter().map(|v| strip_server_file_expenstion(&v)).collect()
}
fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
result
}
-fn main() {
+fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
- let repo_url_schema: Arc<Schema> = Arc::new(
- StringSchema::new("Repository URL.")
- .format(BACKUP_REPO_URL.clone())
- .max_length(256)
+ // fixme: implement other input methods
+
+ use std::env::VarError::*;
+ match std::env::var("PBS_ENCRYPTION_PASSWORD") {
+ Ok(p) => return Ok(p.as_bytes().to_vec()),
+ Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
+ Err(NotPresent) => {
+ // Try another method
+ }
+ }
+
+ // If we're on a TTY, query the user for a password
+ if crate::tools::tty::stdin_isatty() {
+ return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
+ }
+
+ bail!("no password input mechanism available");
+}
+
+fn key_create(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let path = tools::required_string_param(¶m, "path")?;
+ let path = PathBuf::from(path);
+
+ let kdf = param["kdf"].as_str().unwrap_or("scrypt");
+
+ let key = proxmox::sys::linux::random_data(32)?;
+
+ if kdf == "scrypt" {
+ // always read passphrase from tty
+ if !crate::tools::tty::stdin_isatty() {
+ bail!("unable to read passphrase - no tty");
+ }
+
+ let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
+
+ let key_config = encrypt_key_with_passphrase(&key, &password)?;
+
+ store_key_config(&path, false, key_config)?;
+
+ Ok(Value::Null)
+ } else if kdf == "none" {
+ let created = Local.timestamp(Local::now().timestamp(), 0);
+
+ store_key_config(&path, false, KeyConfig {
+ kdf: None,
+ created,
+ modified: created,
+ data: key,
+ })?;
+
+ Ok(Value::Null)
+ } else {
+ unreachable!();
+ }
+}
+
+fn master_pubkey_path() -> Result<PathBuf, Error> {
+ let base = BaseDirectories::with_prefix("proxmox-backup")?;
+
+ // usually $HOME/.config/proxmox-backup/master-public.pem
+ let path = base.place_config_file("master-public.pem")?;
+
+ Ok(path)
+}
+
+fn key_import_master_pubkey(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let path = tools::required_string_param(¶m, "path")?;
+ let path = PathBuf::from(path);
+
+ let pem_data = file_get_contents(&path)?;
+
+ if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
+ bail!("Unable to decode PEM data - {}", err);
+ }
+
+ let target_path = master_pubkey_path()?;
+
+ file_set_contents(&target_path, &pem_data, None)?;
+
+ println!("Imported public master key to {:?}", target_path);
+
+ Ok(Value::Null)
+}
+
+fn key_create_master_key(
+ _param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ // we need a TTY to query the new password
+ if !crate::tools::tty::stdin_isatty() {
+ bail!("unable to create master key - no tty");
+ }
+
+ let rsa = openssl::rsa::Rsa::generate(4096)?;
+ let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
+
+ let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
+ let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
+
+ if new_pw != verify_pw {
+ bail!("Password verification fail!");
+ }
+
+ if new_pw.len() < 5 {
+ bail!("Password is too short!");
+ }
+
+ let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
+ let filename_pub = "master-public.pem";
+ println!("Writing public master key to {}", filename_pub);
+ file_set_contents(filename_pub, pub_key.as_slice(), None)?;
+
+ let cipher = openssl::symm::Cipher::aes_256_cbc();
+ let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
+
+ let filename_priv = "master-private.pem";
+ println!("Writing private master key to {}", filename_priv);
+ file_set_contents(filename_priv, priv_key.as_slice(), None)?;
+
+ Ok(Value::Null)
+}
+
+fn key_change_passphrase(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let path = tools::required_string_param(¶m, "path")?;
+ let path = PathBuf::from(path);
+
+ let kdf = param["kdf"].as_str().unwrap_or("scrypt");
+
+ // we need a TTY to query the new password
+ if !crate::tools::tty::stdin_isatty() {
+ bail!("unable to change passphrase - no tty");
+ }
+
+ let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+
+ if kdf == "scrypt" {
+
+ let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
+ let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
+
+ if new_pw != verify_pw {
+ bail!("Password verification fail!");
+ }
+
+ if new_pw.len() < 5 {
+ bail!("Password is too short!");
+ }
+
+ let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
+ new_key_config.created = created; // keep original value
+
+ store_key_config(&path, true, new_key_config)?;
+
+ Ok(Value::Null)
+ } else if kdf == "none" {
+ let modified = Local.timestamp(Local::now().timestamp(), 0);
+
+ store_key_config(&path, true, KeyConfig {
+ kdf: None,
+ created, // keep original value
+ modified,
+ data: key.to_vec(),
+ })?;
+
+ Ok(Value::Null)
+ } else {
+ unreachable!();
+ }
+}
+
+fn key_mgmt_cli() -> CliCommandMap {
+
+ let kdf_schema: Arc<Schema> = Arc::new(
+ StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
+ .format(Arc::new(ApiStringFormat::Enum(&["scrypt", "none"])))
+ .default("scrypt")
.into()
);
+ let key_create_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ key_create,
+ ObjectSchema::new("Create a new encryption key.")
+ .required("path", StringSchema::new("File system path."))
+ .optional("kdf", kdf_schema.clone())
+ ))
+ .arg_param(vec!["path"])
+ .completion_cb("path", tools::complete_file_name);
+
+ let key_change_passphrase_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ key_change_passphrase,
+ ObjectSchema::new("Change the passphrase required to decrypt the key.")
+ .required("path", StringSchema::new("File system path."))
+ .optional("kdf", kdf_schema.clone())
+ ))
+ .arg_param(vec!["path"])
+ .completion_cb("path", tools::complete_file_name);
+
+ let key_create_master_key_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ key_create_master_key,
+ ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.")
+ ));
+
+ let key_import_master_pubkey_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ key_import_master_pubkey,
+ ObjectSchema::new("Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.")
+ .required("path", StringSchema::new("File system path."))
+ ))
+ .arg_param(vec!["path"])
+ .completion_cb("path", tools::complete_file_name);
+
+ let cmd_def = CliCommandMap::new()
+ .insert("create".to_owned(), key_create_cmd_def.into())
+ .insert("create-master-key".to_owned(), key_create_master_key_cmd_def.into())
+ .insert("import-master-pubkey".to_owned(), key_import_master_pubkey_cmd_def.into())
+ .insert("change-passphrase".to_owned(), key_change_passphrase_cmd_def.into());
+
+ cmd_def
+}
+
+fn main() {
+
let backup_source_schema: Arc<Schema> = Arc::new(
StringSchema::new("Backup source specification ([<label>:<path>]).")
.format(Arc::new(ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)))
ApiMethod::new(
create_backup,
ObjectSchema::new("Create (host) backup.")
- .required("repository", repo_url_schema.clone())
.required(
"backupspec",
ArraySchema::new(
backup_source_schema,
).min_length(1)
)
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional(
+ "include-dev",
+ ArraySchema::new(
+ "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
+ StringSchema::new("Path to file.").into()
+ )
+ )
+ .optional(
+ "keyfile",
+ StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
.optional(
"verbose",
BooleanSchema::new("Verbose output.").default(false))
.optional(
- "host-id",
- StringSchema::new("Use specified ID for the backup group name ('host/<id>'). The default is the system hostname."))
+ "skip-lost-and-found",
+ BooleanSchema::new("Skip lost+found directory").default(false))
+ .optional(
+ "backup-type",
+ BACKUP_TYPE_SCHEMA.clone()
+ )
+ .optional(
+ "backup-id",
+ BACKUP_ID_SCHEMA.clone()
+ )
+ .optional(
+ "backup-time",
+ BACKUP_TIME_SCHEMA.clone()
+ )
.optional(
"chunk-size",
IntegerSchema::new("Chunk size in KB. Must be a power of 2.")
.default(4096)
)
))
- .arg_param(vec!["repository", "backupspec"])
+ .arg_param(vec!["backupspec"])
.completion_cb("repository", complete_repository)
.completion_cb("backupspec", complete_backup_source)
+ .completion_cb("keyfile", tools::complete_file_name)
.completion_cb("chunk-size", complete_chunk_size);
+ let upload_log_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ upload_log,
+ ObjectSchema::new("Upload backup log file.")
+ .required("snapshot", StringSchema::new("Snapshot path."))
+ .required("logfile", StringSchema::new("The path to the log file you want to upload."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional(
+ "keyfile",
+ StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
+ ))
+ .arg_param(vec!["snapshot", "logfile"])
+ .completion_cb("snapshot", complete_group_or_snapshot)
+ .completion_cb("logfile", tools::complete_file_name)
+ .completion_cb("keyfile", tools::complete_file_name)
+ .completion_cb("repository", complete_repository);
+
let list_cmd_def = CliCommand::new(
ApiMethod::new(
list_backup_groups,
ObjectSchema::new("List backup groups.")
- .required("repository", repo_url_schema.clone())
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("output-format", OUTPUT_FORMAT.clone())
))
- .arg_param(vec!["repository"])
.completion_cb("repository", complete_repository);
let snapshots_cmd_def = CliCommand::new(
ApiMethod::new(
list_snapshots,
ObjectSchema::new("List backup snapshots.")
- .required("repository", repo_url_schema.clone())
- .required("group", StringSchema::new("Backup group."))
+ .optional("group", StringSchema::new("Backup group."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("output-format", OUTPUT_FORMAT.clone())
))
- .arg_param(vec!["repository", "group"])
+ .arg_param(vec!["group"])
.completion_cb("group", complete_backup_group)
.completion_cb("repository", complete_repository);
ApiMethod::new(
forget_snapshots,
ObjectSchema::new("Forget (remove) backup snapshots.")
- .required("repository", repo_url_schema.clone())
.required("snapshot", StringSchema::new("Snapshot path."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
))
- .arg_param(vec!["repository", "snapshot"])
+ .arg_param(vec!["snapshot"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot);
ApiMethod::new(
start_garbage_collection,
ObjectSchema::new("Start garbage collection for a specific repository.")
- .required("repository", repo_url_schema.clone())
+ .optional("repository", REPO_URL_SCHEMA.clone())
))
- .arg_param(vec!["repository"])
.completion_cb("repository", complete_repository);
let restore_cmd_def = CliCommand::new(
ApiMethod::new(
restore,
ObjectSchema::new("Restore backup repository.")
- .required("repository", repo_url_schema.clone())
.required("snapshot", StringSchema::new("Group/Snapshot path."))
.required("archive-name", StringSchema::new("Backup archive name."))
- .required("target", StringSchema::new("Target directory path."))
+ .required("target", StringSchema::new(r###"Target directory path. Use '-' to write to stdandard output.
+
+We do not extraxt '.pxar' archives when writing to stdandard output.
+
+"###
+ ))
+ .optional(
+ "allow-existing-dirs",
+ BooleanSchema::new("Do not fail if directories already exists.").default(false))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("keyfile", StringSchema::new("Path to encryption key."))
+ .optional(
+ "verbose",
+ BooleanSchema::new("Verbose output.").default(false)
+ )
))
- .arg_param(vec!["repository", "snapshot", "archive-name", "target"])
+ .arg_param(vec!["snapshot", "archive-name", "target"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("archive-name", complete_archive_name)
.completion_cb("target", tools::complete_file_name);
+ let files_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ list_snapshot_files,
+ ObjectSchema::new("List snapshot files.")
+ .required("snapshot", StringSchema::new("Snapshot path."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("output-format", OUTPUT_FORMAT.clone())
+ ))
+ .arg_param(vec!["snapshot"])
+ .completion_cb("repository", complete_repository)
+ .completion_cb("snapshot", complete_group_or_snapshot);
+
let prune_cmd_def = CliCommand::new(
ApiMethod::new(
prune,
proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
ObjectSchema::new("Prune backup repository.")
- .required("repository", repo_url_schema.clone())
+ .required("group", StringSchema::new("Backup group."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
)
))
- .arg_param(vec!["repository"])
+ .arg_param(vec!["group"])
+ .completion_cb("group", complete_backup_group)
+ .completion_cb("repository", complete_repository);
+
+ let status_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ status,
+ ObjectSchema::new("Get repository status.")
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("output-format", OUTPUT_FORMAT.clone())
+ ))
.completion_cb("repository", complete_repository);
let cmd_def = CliCommandMap::new()
.insert("backup".to_owned(), backup_cmd_def.into())
+ .insert("upload-log".to_owned(), upload_log_cmd_def.into())
.insert("forget".to_owned(), forget_cmd_def.into())
.insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
.insert("list".to_owned(), list_cmd_def.into())
.insert("prune".to_owned(), prune_cmd_def.into())
.insert("restore".to_owned(), restore_cmd_def.into())
- .insert("snapshots".to_owned(), snapshots_cmd_def.into());
-
- run_cli_command(cmd_def.into());
+ .insert("snapshots".to_owned(), snapshots_cmd_def.into())
+ .insert("files".to_owned(), files_cmd_def.into())
+ .insert("status".to_owned(), status_cmd_def.into())
+ .insert("key".to_owned(), key_mgmt_cli().into());
+
+ hyper::rt::run(futures::future::lazy(move || {
+ run_cli_command(cmd_def.into());
+ Ok(())
+ }));
}