-use failure::*;
-use nix::unistd::{fork, ForkResult, pipe};
-use std::os::unix::io::RawFd;
-use chrono::{Local, DateTime, Utc, TimeZone};
-use std::path::{Path, PathBuf};
use std::collections::{HashSet, HashMap};
-use std::ffi::OsStr;
-use std::io::{Write, Seek, SeekFrom};
-use std::os::unix::fs::OpenOptionsExt;
-
-use proxmox::{sortable, identity};
-use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
-use proxmox::sys::linux::tty;
-use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
-use proxmox::api::schema::*;
-use proxmox::api::cli::*;
-use proxmox::api::api;
-
-use proxmox_backup::tools;
-use proxmox_backup::api2::types::*;
-use proxmox_backup::client::*;
-use proxmox_backup::backup::*;
-use proxmox_backup::pxar::{ self, catalog::* };
-
-//use proxmox_backup::backup::image_index::*;
-//use proxmox_backup::config::datastore;
-//use proxmox_backup::pxar::encoder::*;
-//use proxmox_backup::backup::datastore::*;
+use std::convert::TryFrom;
+use std::io::{self, Read, Write, Seek, SeekFrom};
+use std::os::unix::io::{FromRawFd, RawFd};
+use std::path::{Path, PathBuf};
+use std::pin::Pin;
+use std::sync::{Arc, Mutex};
+use std::task::Context;
+use anyhow::{bail, format_err, Error};
+use futures::future::FutureExt;
+use futures::stream::{StreamExt, TryStreamExt};
use serde_json::{json, Value};
-//use hyper::Body;
-use std::sync::{Arc, Mutex};
-//use regex::Regex;
+use tokio::sync::mpsc;
use xdg::BaseDirectories;
-use futures::*;
-use tokio::sync::mpsc;
+use pathpatterns::{MatchEntry, MatchType, PatternFlag};
+use proxmox::{
+ tools::{
+ time::{strftime_local, epoch_i64},
+ fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
+ },
+ api::{
+ api,
+ ApiHandler,
+ ApiMethod,
+ RpcEnvironment,
+ schema::*,
+ cli::*,
+ },
+};
+use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
+
+use proxmox_backup::tools;
+use proxmox_backup::api2::types::*;
+use proxmox_backup::api2::version;
+use proxmox_backup::client::*;
+use proxmox_backup::pxar::catalog::*;
+use proxmox_backup::config::user::complete_user_name;
+use proxmox_backup::backup::{
+ archive_type,
+ decrypt_key,
+ verify_chunk_size,
+ ArchiveType,
+ AsyncReadChunk,
+ BackupDir,
+ BackupGroup,
+ BackupManifest,
+ BufferedDynamicReader,
+ CATALOG_NAME,
+ CatalogReader,
+ CatalogWriter,
+ ChunkStream,
+ CryptConfig,
+ CryptMode,
+ DataBlob,
+ DynamicIndexReader,
+ FixedChunkStream,
+ FixedIndexReader,
+ IndexFile,
+ MANIFEST_BLOB_NAME,
+ Shell,
+};
+
+mod proxmox_backup_client;
+use proxmox_backup_client::*;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
-proxmox::const_regex! {
- BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
-}
-const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
+pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
.format(&BACKUP_REPO_URL)
.max_length(256)
.schema();
-const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
- "Backup source specification ([<label>:<path>]).")
- .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
+pub const KEYFILE_SCHEMA: Schema = StringSchema::new(
+ "Path to encryption key. All data will be encrypted using this key.")
.schema();
-const KEYFILE_SCHEMA: Schema = StringSchema::new(
- "Path to encryption key. All data will be encrypted using this key.")
+pub const KEYFD_SCHEMA: Schema = IntegerSchema::new(
+ "Pass an encryption key via an already opened file descriptor.")
+ .minimum(0)
.schema();
const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
std::env::var("PBS_REPOSITORY").ok()
}
-fn extract_repository_from_value(
+pub fn extract_repository_from_value(
param: &Value,
) -> Result<BackupRepository, Error> {
let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
}
-fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
+pub fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
result
}
-fn render_backup_file_list(files: &[String]) -> String {
- let mut files: Vec<String> = files.iter()
- .map(|v| strip_server_file_expenstion(&v))
- .collect();
-
- files.sort();
-
- tools::join(&files, ' ')
-}
-
-fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
+fn connect(server: &str, port: u16, userid: &Userid) -> Result<HttpClient, Error> {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
.fingerprint_cache(true)
.ticket_cache(true);
- HttpClient::new(server, userid, options)
+ HttpClient::new(server, port, userid, options)
}
async fn view_task_result(
Ok(result["data"].take())
}
-async fn api_datastore_latest_snapshot(
+pub async fn api_datastore_latest_snapshot(
client: &HttpClient,
store: &str,
group: BackupGroup,
-) -> Result<(String, String, DateTime<Utc>), Error> {
+) -> Result<(String, String, i64), Error> {
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
- let backup_time = Utc.timestamp(list[0].backup_time, 0);
+ let backup_time = list[0].backup_time;
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
}
-
async fn backup_directory<P: AsRef<Path>>(
client: &BackupWriter,
+ previous_manifest: Option<Arc<BackupManifest>>,
dir_path: P,
archive_name: &str,
chunk_size: Option<usize>,
device_set: Option<HashSet<u64>>,
verbose: bool,
skip_lost_and_found: bool,
- crypt_config: Option<Arc<CryptConfig>>,
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
+ exclude_pattern: Vec<MatchEntry>,
entries_max: usize,
+ compress: bool,
+ encrypt: bool,
) -> Result<BackupStats, Error> {
let pxar_stream = PxarBackupStream::open(
verbose,
skip_lost_and_found,
catalog,
+ exclude_pattern,
entries_max,
)?;
let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
});
let stats = client
- .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
+ .upload_stream(previous_manifest, archive_name, stream, "dynamic", None, compress, encrypt)
.await?;
Ok(stats)
async fn backup_image<P: AsRef<Path>>(
client: &BackupWriter,
+ previous_manifest: Option<Arc<BackupManifest>>,
image_path: P,
archive_name: &str,
image_size: u64,
chunk_size: Option<usize>,
+ compress: bool,
+ encrypt: bool,
_verbose: bool,
- crypt_config: Option<Arc<CryptConfig>>,
) -> Result<BackupStats, Error> {
let path = image_path.as_ref().to_owned();
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
let stats = client
- .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
+ .upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size), compress, encrypt)
.await?;
Ok(stats)
}
-fn strip_server_file_expenstion(name: &str) -> String {
-
- if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
- name[..name.len()-5].to_owned()
- } else {
- name.to_owned() // should not happen
- }
-}
-
#[api(
input: {
properties: {
let repo = extract_repository_from_value(¶m)?;
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
Ok(group.group_path().to_str().unwrap().to_owned())
};
- let render_backup_timestamp = |v: &Value, _record: &Value| -> Result<String, Error> {
- let epoch = v.as_i64().unwrap();
- let last_backup = Utc.timestamp(epoch, 0);
- Ok(BackupDir::backup_time_to_string(last_backup))
+ let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
+ let item: GroupListItem = serde_json::from_value(record.to_owned())?;
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
+ Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
- Ok(render_backup_file_list(&item.files))
+ Ok(tools::format::render_backup_file_list(&item.files))
};
let options = default_table_format_options()
.sortby("backup-type", false)
.sortby("backup-id", false)
.column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
- .column(ColumnConfig::new("last-backup").renderer(render_backup_timestamp))
+ .column(
+ ColumnConfig::new("last-backup")
+ .renderer(render_last_backup)
+ .header("last snapshot")
+ .right_align(false)
+ )
.column(ColumnConfig::new("backup-count"))
.column(ColumnConfig::new("files").renderer(render_files));
Ok(Value::Null)
}
+#[api(
+ input: {
+ properties: {
+ repository: {
+ schema: REPO_URL_SCHEMA,
+ optional: true,
+ },
+ group: {
+ type: String,
+ description: "Backup group.",
+ },
+ "new-owner": {
+ type: Userid,
+ },
+ }
+ }
+)]
+/// Change owner of a backup group
+async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> {
+
+ let repo = extract_repository_from_value(¶m)?;
+
+ let mut client = connect(repo.host(), repo.port(), repo.user())?;
+
+ param.as_object_mut().unwrap().remove("repository");
+
+ let group: BackupGroup = group.parse()?;
+
+ param["backup-type"] = group.backup_type().into();
+ param["backup-id"] = group.backup_id().into();
+
+ let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
+ client.post(&path, Some(param)).await?;
+
+ record_repository(&repo);
+
+ Ok(())
+}
+
#[api(
input: {
properties: {
let output_format = get_output_format(¶m);
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(repo.host(), repo.port(), repo.user())?;
- let group = if let Some(path) = param["group"].as_str() {
- Some(BackupGroup::parse(path)?)
+ let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
+ Some(path.parse()?)
} else {
None
};
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
- Ok(render_backup_file_list(&item.files))
+ let mut filenames = Vec::new();
+ for file in &item.files {
+ filenames.push(file.filename.to_string());
+ }
+ Ok(tools::format::render_backup_file_list(&filenames[..]))
};
let options = default_table_format_options()
.sortby("backup-id", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
- .column(ColumnConfig::new("size"))
+ .column(ColumnConfig::new("size").renderer(tools::format::render_bytes_human_readable))
.column(ColumnConfig::new("files").renderer(render_files))
;
let repo = extract_repository_from_value(¶m)?;
let path = tools::required_string_param(¶m, "snapshot")?;
- let snapshot = BackupDir::parse(path)?;
+ let snapshot: BackupDir = path.parse()?;
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let result = client.delete(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
}))).await?;
record_repository(&repo);
let repo = extract_repository_from_value(¶m)?;
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(repo.host(), repo.port(), repo.user())?;
client.login().await?;
record_repository(&repo);
schema: REPO_URL_SCHEMA,
optional: true,
},
- snapshot: {
- type: String,
- description: "Snapshot path.",
- },
+ "output-format": {
+ schema: OUTPUT_FORMAT,
+ optional: true,
+ },
}
}
)]
-/// Dump catalog.
-async fn dump_catalog(param: Value) -> Result<Value, Error> {
-
- let repo = extract_repository_from_value(¶m)?;
-
- let path = tools::required_string_param(¶m, "snapshot")?;
- let snapshot = BackupDir::parse(path)?;
+/// Show client and optional server version
+async fn api_version(param: Value) -> Result<(), Error> {
- let keyfile = param["keyfile"].as_str().map(PathBuf::from);
+ let output_format = get_output_format(¶m);
- let crypt_config = match keyfile {
- None => None,
- Some(path) => {
- let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
- Some(Arc::new(CryptConfig::new(key)?))
+ let mut version_info = json!({
+ "client": {
+ "version": version::PROXMOX_PKG_VERSION,
+ "release": version::PROXMOX_PKG_RELEASE,
+ "repoid": version::PROXMOX_PKG_REPOID,
}
- };
-
- let client = connect(repo.host(), repo.user())?;
-
- let client = BackupReader::start(
- client,
- crypt_config.clone(),
- repo.store(),
- &snapshot.group().backup_type(),
- &snapshot.group().backup_id(),
- snapshot.backup_time(),
- true,
- ).await?;
-
- let manifest = client.download_manifest().await?;
-
- let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
-
- let most_used = index.find_most_used_chunks(8);
-
- let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
-
- let mut reader = BufferedDynamicReader::new(index, chunk_reader);
-
- let mut catalogfile = std::fs::OpenOptions::new()
- .write(true)
- .read(true)
- .custom_flags(libc::O_TMPFILE)
- .open("/tmp")?;
-
- std::io::copy(&mut reader, &mut catalogfile)
- .map_err(|err| format_err!("unable to download catalog - {}", err))?;
-
- catalogfile.seek(SeekFrom::Start(0))?;
-
- let mut catalog_reader = CatalogReader::new(catalogfile);
+ });
- catalog_reader.dump()?;
+ let repo = extract_repository_from_value(¶m);
+ if let Ok(repo) = repo {
+ let client = connect(repo.host(), repo.port(), repo.user())?;
- record_repository(&repo);
+ match client.get("api2/json/version", None).await {
+ Ok(mut result) => version_info["server"] = result["data"].take(),
+ Err(e) => eprintln!("could not connect to server - {}", e),
+ }
+ }
+ if output_format == "text" {
+ println!("client version: {}.{}", version::PROXMOX_PKG_VERSION, version::PROXMOX_PKG_RELEASE);
+ if let Some(server) = version_info["server"].as_object() {
+ let server_version = server["version"].as_str().unwrap();
+ let server_release = server["release"].as_str().unwrap();
+ println!("server version: {}.{}", server_version, server_release);
+ }
+ } else {
+ format_and_print_result(&version_info, &output_format);
+ }
- Ok(Value::Null)
+ Ok(())
}
+
#[api(
input: {
properties: {
let repo = extract_repository_from_value(¶m)?;
let path = tools::required_string_param(¶m, "snapshot")?;
- let snapshot = BackupDir::parse(path)?;
+ let snapshot: BackupDir = path.parse()?;
let output_format = get_output_format(¶m);
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
let mut result = client.get(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
}))).await?;
record_repository(&repo);
let output_format = get_output_format(¶m);
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
Ok(Value::Null)
}
-fn parse_backupspec(value: &str) -> Result<(&str, &str), Error> {
-
- if let Some(caps) = (BACKUPSPEC_REGEX.regex_obj)().captures(value) {
- return Ok((caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()));
- }
- bail!("unable to parse directory specification '{}'", value);
-}
-
fn spawn_catalog_upload(
client: Arc<BackupWriter>,
- crypt_config: Option<Arc<CryptConfig>>,
+ encrypt: bool,
) -> Result<
(
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
tokio::spawn(async move {
let catalog_upload_result = client
- .upload_stream(CATALOG_NAME, catalog_chunk_stream, "dynamic", None, crypt_config)
+ .upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None, true, encrypt)
.await;
if let Err(ref err) = catalog_upload_result {
Ok((catalog, catalog_result_rx))
}
+fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Error> {
+ let keyfile = match param.get("keyfile") {
+ Some(Value::String(keyfile)) => Some(keyfile),
+ Some(_) => bail!("bad --keyfile parameter type"),
+ None => None,
+ };
+
+ let key_fd = match param.get("keyfd") {
+ Some(Value::Number(key_fd)) => Some(
+ RawFd::try_from(key_fd
+ .as_i64()
+ .ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
+ )
+ .map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
+ ),
+ Some(_) => bail!("bad --keyfd parameter type"),
+ None => None,
+ };
+
+ let crypt_mode: Option<CryptMode> = match param.get("crypt-mode") {
+ Some(mode) => Some(serde_json::from_value(mode.clone())?),
+ None => None,
+ };
+
+ let keydata = match (keyfile, key_fd) {
+ (None, None) => None,
+ (Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
+ (Some(keyfile), None) => Some(file_get_contents(keyfile)?),
+ (None, Some(fd)) => {
+ let input = unsafe { std::fs::File::from_raw_fd(fd) };
+ let mut data = Vec::new();
+ let _len: usize = { input }.read_to_end(&mut data)
+ .map_err(|err| {
+ format_err!("error reading encryption key from fd {}: {}", fd, err)
+ })?;
+ Some(data)
+ }
+ };
+
+ Ok(match (keydata, crypt_mode) {
+ // no parameters:
+ (None, None) => match key::read_optional_default_encryption_key()? {
+ Some(key) => (Some(key), CryptMode::Encrypt),
+ None => (None, CryptMode::None),
+ },
+
+ // just --crypt-mode=none
+ (None, Some(CryptMode::None)) => (None, CryptMode::None),
+
+ // just --crypt-mode other than none
+ (None, Some(crypt_mode)) => match key::read_optional_default_encryption_key()? {
+ None => bail!("--crypt-mode without --keyfile and no default key file available"),
+ Some(key) => (Some(key), crypt_mode),
+ }
+
+ // just --keyfile
+ (Some(key), None) => (Some(key), CryptMode::Encrypt),
+
+ // --keyfile and --crypt-mode=none
+ (Some(_), Some(CryptMode::None)) => {
+ bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
+ }
+
+ // --keyfile and --crypt-mode other than none
+ (Some(key), Some(crypt_mode)) => (Some(key), crypt_mode),
+ })
+}
+
#[api(
input: {
properties: {
schema: KEYFILE_SCHEMA,
optional: true,
},
+ "keyfd": {
+ schema: KEYFD_SCHEMA,
+ optional: true,
+ },
+ "crypt-mode": {
+ type: CryptMode,
+ optional: true,
+ },
"skip-lost-and-found": {
type: Boolean,
description: "Skip lost+found directory.",
schema: CHUNK_SIZE_SCHEMA,
optional: true,
},
+ "exclude": {
+ type: Array,
+ description: "List of paths or patterns for matching files to exclude.",
+ optional: true,
+ items: {
+ type: String,
+ description: "Path or match pattern.",
+ }
+ },
"entries-max": {
type: Integer,
description: "Max number of entries to hold in memory.",
optional: true,
- default: pxar::ENCODER_MAX_ENTRIES as isize,
+ default: proxmox_backup::pxar::ENCODER_MAX_ENTRIES as isize,
},
"verbose": {
type: Boolean,
verify_chunk_size(size)?;
}
- let keyfile = param["keyfile"].as_str().map(PathBuf::from);
+ let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
let include_dev = param["include-dev"].as_array();
- let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
+ let entries_max = param["entries-max"].as_u64()
+ .unwrap_or(proxmox_backup::pxar::ENCODER_MAX_ENTRIES as u64);
+
+ let empty = Vec::new();
+ let exclude_args = param["exclude"].as_array().unwrap_or(&empty);
+
+ let mut pattern_list = Vec::with_capacity(exclude_args.len());
+ for entry in exclude_args {
+ let entry = entry.as_str().ok_or_else(|| format_err!("Invalid pattern string slice"))?;
+ pattern_list.push(
+ MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Exclude)
+ .map_err(|err| format_err!("invalid exclude pattern entry: {}", err))?
+ );
+ }
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
}
let mut upload_list = vec![];
-
- enum BackupType { PXAR, IMAGE, CONFIG, LOGFILE };
-
- let mut upload_catalog = false;
+ let mut target_set = HashSet::new();
for backupspec in backupspec_list {
- let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
+ let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
+ let filename = &spec.config_string;
+ let target = &spec.archive_name;
+
+ if target_set.contains(target) {
+ bail!("got target twice: '{}'", target);
+ }
+ target_set.insert(target.to_string());
use std::os::unix::fs::FileTypeExt;
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
let file_type = metadata.file_type();
- let extension = target.rsplit('.').next()
- .ok_or_else(|| format_err!("missing target file extenion '{}'", target))?;
-
- match extension {
- "pxar" => {
+ match spec.spec_type {
+ BackupSpecificationType::PXAR => {
if !file_type.is_dir() {
bail!("got unexpected file type (expected directory)");
}
- upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
- upload_catalog = true;
+ upload_list.push((BackupSpecificationType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
}
- "img" => {
-
+ BackupSpecificationType::IMAGE => {
if !(file_type.is_file() || file_type.is_block_device()) {
bail!("got unexpected file type (expected file or block device)");
}
if size == 0 { bail!("got zero-sized file '{}'", filename); }
- upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
+ upload_list.push((BackupSpecificationType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
}
- "conf" => {
+ BackupSpecificationType::CONFIG => {
if !file_type.is_file() {
bail!("got unexpected file type (expected regular file)");
}
- upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
+ upload_list.push((BackupSpecificationType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
}
- "log" => {
+ BackupSpecificationType::LOGFILE => {
if !file_type.is_file() {
bail!("got unexpected file type (expected regular file)");
}
- upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
- }
- _ => {
- bail!("got unknown archive extension '{}'", extension);
+ upload_list.push((BackupSpecificationType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
}
}
}
- let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
+ let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(repo.host(), repo.port(), repo.user())?;
record_repository(&repo);
- println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
+ println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
println!("Client name: {}", proxmox::tools::nodename());
- let start_time = Local::now();
+ let start_time = std::time::Instant::now();
- println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
+ println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
- let (crypt_config, rsa_encrypted_key) = match keyfile {
+ let (crypt_config, rsa_encrypted_key) = match keydata {
None => (None, None),
- Some(path) => {
- let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
+ Some(key) => {
+ let (key, created) = decrypt_key(&key, &key::get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
- let path = master_pubkey_path()?;
- if path.exists() {
- let pem_data = file_get_contents(&path)?;
- let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
- let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
- (Some(Arc::new(crypt_config)), Some(enc_key))
- } else {
- (Some(Arc::new(crypt_config)), None)
+ match key::find_master_pubkey()? {
+ Some(ref path) if path.exists() => {
+ let pem_data = file_get_contents(path)?;
+ let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
+ let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
+ (Some(Arc::new(crypt_config)), Some(enc_key))
+ }
+ _ => (Some(Arc::new(crypt_config)), None),
}
}
};
let client = BackupWriter::start(
client,
+ crypt_config.clone(),
repo.store(),
backup_type,
&backup_id,
backup_time,
verbose,
+ false
).await?;
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
+ let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
+ Some(Arc::new(previous_manifest))
+ } else {
+ None
+ };
+
+ let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let mut manifest = BackupManifest::new(snapshot);
- let (catalog, catalog_result_rx) = spawn_catalog_upload(client.clone(), crypt_config.clone())?;
+ let mut catalog = None;
+ let mut catalog_result_tx = None;
for (backup_type, filename, target, size) in upload_list {
match backup_type {
- BackupType::CONFIG => {
- println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
+ BackupSpecificationType::CONFIG => {
+ println!("Upload config file '{}' to '{}' as {}", filename, repo, target);
let stats = client
- .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
+ .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
.await?;
- manifest.add_file(target, stats.size, stats.csum)?;
+ manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
}
- BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
- println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
+ BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
+ println!("Upload log file '{}' to '{}' as {}", filename, repo, target);
let stats = client
- .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
+ .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
.await?;
- manifest.add_file(target, stats.size, stats.csum)?;
+ manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
}
- BackupType::PXAR => {
- println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
+ BackupSpecificationType::PXAR => {
+ // start catalog upload on first use
+ if catalog.is_none() {
+ let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
+ catalog = Some(cat);
+ catalog_result_tx = Some(res);
+ }
+ let catalog = catalog.as_ref().unwrap();
+
+ println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
let stats = backup_directory(
&client,
+ previous_manifest.clone(),
&filename,
&target,
chunk_size_opt,
devices.clone(),
verbose,
skip_lost_and_found,
- crypt_config.clone(),
catalog.clone(),
+ pattern_list.clone(),
entries_max as usize,
+ true,
+ crypt_mode == CryptMode::Encrypt,
).await?;
- manifest.add_file(target, stats.size, stats.csum)?;
+ manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
catalog.lock().unwrap().end_directory()?;
}
- BackupType::IMAGE => {
+ BackupSpecificationType::IMAGE => {
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
let stats = backup_image(
&client,
- &filename,
+ previous_manifest.clone(),
+ &filename,
&target,
size,
chunk_size_opt,
+ true,
+ crypt_mode == CryptMode::Encrypt,
verbose,
- crypt_config.clone(),
).await?;
- manifest.add_file(target, stats.size, stats.csum)?;
+ manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
}
}
}
// finalize and upload catalog
- if upload_catalog {
+ if let Some(catalog) = catalog {
let mutex = Arc::try_unwrap(catalog)
.map_err(|_| format_err!("unable to get catalog (still used)"))?;
let mut catalog = mutex.into_inner().unwrap();
drop(catalog); // close upload stream
- let stats = catalog_result_rx.await??;
-
- manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
+ if let Some(catalog_result_rx) = catalog_result_tx {
+ let stats = catalog_result_rx.await??;
+ manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypt_mode)?;
+ }
}
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
- let target = "rsa-encrypted.key";
+ let target = "rsa-encrypted.key.blob";
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
let stats = client
- .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
+ .upload_blob_from_data(rsa_encrypted_key, target, false, false)
.await?;
- manifest.add_file(format!("{}.blob", target), stats.size, stats.csum)?;
+ manifest.add_file(target.to_string(), stats.size, stats.csum, crypt_mode)?;
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
/*
println!("TEST {} {:?}", len, buffer2);
*/
}
-
// create manifest (index.json)
- let manifest = manifest.into_json();
+ // manifests are never encrypted, but include a signature
+ let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
+ .map_err(|err| format_err!("unable to format manifest - {}", err))?;
- println!("Upload index.json to '{:?}'", repo);
- let manifest = serde_json::to_string_pretty(&manifest)?.into();
+
+ if verbose { println!("Upload index.json to '{}'", repo) };
client
- .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, crypt_config.clone(), true, true)
+ .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
.await?;
client.finish().await?;
- let end_time = Local::now();
- let elapsed = end_time.signed_duration_since(start_time);
- println!("Duration: {}", elapsed);
+ let end_time = std::time::Instant::now();
+ let elapsed = end_time.duration_since(start_time);
+ println!("Duration: {:.2}s", elapsed.as_secs_f64());
- println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
+ println!("End Time: {}", strftime_local("%c", epoch_i64())?);
Ok(Value::Null)
}
result
}
-fn dump_image<W: Write>(
+async fn dump_image<W: Write>(
client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>,
+ crypt_mode: CryptMode,
index: FixedIndexReader,
mut writer: W,
verbose: bool,
let most_used = index.find_most_used_chunks(8);
- let mut chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
// and thus slows down reading. Instead, directly use RemoteChunkReader
for pos in 0..index.index_count() {
let digest = index.index_digest(pos).unwrap();
- let raw_data = chunk_reader.read_chunk(&digest)?;
+ let raw_data = chunk_reader.read_chunk(&digest).await?;
writer.write_all(&raw_data)?;
bytes += raw_data.len();
if verbose {
Ok(())
}
+fn parse_archive_type(name: &str) -> (String, ArchiveType) {
+ if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
+ (name.into(), archive_type(name).unwrap())
+ } else if name.ends_with(".pxar") {
+ (format!("{}.didx", name), ArchiveType::DynamicIndex)
+ } else if name.ends_with(".img") {
+ (format!("{}.fidx", name), ArchiveType::FixedIndex)
+ } else {
+ (format!("{}.blob", name), ArchiveType::Blob)
+ }
+}
+
#[api(
input: {
properties: {
schema: KEYFILE_SCHEMA,
optional: true,
},
+ "keyfd": {
+ schema: KEYFD_SCHEMA,
+ optional: true,
+ },
+ "crypt-mode": {
+ type: CryptMode,
+ optional: true,
+ },
}
}
)]
let archive_name = tools::required_string_param(¶m, "archive-name")?;
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(repo.host(), repo.port(), repo.user())?;
record_repository(&repo);
let path = tools::required_string_param(¶m, "snapshot")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
- let group = BackupGroup::parse(path)?;
+ let group: BackupGroup = path.parse()?;
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
- let snapshot = BackupDir::parse(path)?;
+ let snapshot: BackupDir = path.parse()?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
};
let target = tools::required_string_param(¶m, "target")?;
let target = if target == "-" { None } else { Some(target) };
- let keyfile = param["keyfile"].as_str().map(PathBuf::from);
+ let (keydata, _crypt_mode) = keyfile_parameters(¶m)?;
- let crypt_config = match keyfile {
+ let crypt_config = match keydata {
None => None,
- Some(path) => {
- let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
+ Some(key) => {
+ let (key, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
- let server_archive_name = if archive_name.ends_with(".pxar") {
- format!("{}.didx", archive_name)
- } else if archive_name.ends_with(".img") {
- format!("{}.fidx", archive_name)
- } else {
- format!("{}.blob", archive_name)
- };
-
let client = BackupReader::start(
client,
crypt_config.clone(),
true,
).await?;
- let manifest = client.download_manifest().await?;
+ let (manifest, backup_index_data) = client.download_manifest().await?;
- if server_archive_name == MANIFEST_BLOB_NAME {
- let backup_index_data = manifest.into_json().to_string();
+ let (archive_name, archive_type) = parse_archive_type(archive_name);
+
+ if archive_name == MANIFEST_BLOB_NAME {
if let Some(target) = target {
- replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
+ replace_file(target, &backup_index_data, CreateOptions::new())?;
} else {
let stdout = std::io::stdout();
let mut writer = stdout.lock();
- writer.write_all(backup_index_data.as_bytes())
+ writer.write_all(&backup_index_data)
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
- } else if server_archive_name.ends_with(".blob") {
+ return Ok(Value::Null);
+ }
+
+ let file_info = manifest.lookup_file_info(&archive_name)?;
+
+ if archive_type == ArchiveType::Blob {
- let mut reader = client.download_blob(&manifest, &server_archive_name).await?;
+ let mut reader = client.download_blob(&manifest, &archive_name).await?;
if let Some(target) = target {
let mut writer = std::fs::OpenOptions::new()
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
- } else if server_archive_name.ends_with(".didx") {
+ } else if archive_type == ArchiveType::DynamicIndex {
- let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
+ let index = client.download_dynamic_index(&manifest, &archive_name).await?;
let most_used = index.find_most_used_chunks(8);
- let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
if let Some(target) = target {
-
- let feature_flags = pxar::flags::DEFAULT;
- let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags);
- decoder.set_callback(move |path| {
- if verbose {
- eprintln!("{:?}", path);
- }
- Ok(())
- });
- decoder.set_allow_existing_dirs(allow_existing_dirs);
-
- decoder.restore(Path::new(target), &Vec::new())?;
+ proxmox_backup::pxar::extract_archive(
+ pxar::decoder::Decoder::from_std(reader)?,
+ Path::new(target),
+ &[],
+ true,
+ proxmox_backup::pxar::Flags::DEFAULT,
+ allow_existing_dirs,
+ |path| {
+ if verbose {
+ println!("{:?}", path);
+ }
+ },
+ None,
+ )
+ .map_err(|err| format_err!("error extracting archive - {}", err))?;
} else {
let mut writer = std::fs::OpenOptions::new()
.write(true)
std::io::copy(&mut reader, &mut writer)
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
- } else if server_archive_name.ends_with(".fidx") {
+ } else if archive_type == ArchiveType::FixedIndex {
- let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
+ let index = client.download_fixed_index(&manifest, &archive_name).await?;
let mut writer = if let Some(target) = target {
std::fs::OpenOptions::new()
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
};
- dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose)?;
-
- } else {
- bail!("unknown archive file extension (expected .pxar of .img)");
+ dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?;
}
Ok(Value::Null)
schema: KEYFILE_SCHEMA,
optional: true,
},
+ "keyfd": {
+ schema: KEYFD_SCHEMA,
+ optional: true,
+ },
+ "crypt-mode": {
+ type: CryptMode,
+ optional: true,
+ },
}
}
)]
let repo = extract_repository_from_value(¶m)?;
let snapshot = tools::required_string_param(¶m, "snapshot")?;
- let snapshot = BackupDir::parse(snapshot)?;
+ let snapshot: BackupDir = snapshot.parse()?;
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(repo.host(), repo.port(), repo.user())?;
- let keyfile = param["keyfile"].as_str().map(PathBuf::from);
+ let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
- let crypt_config = match keyfile {
+ let crypt_config = match keydata {
None => None,
- Some(path) => {
- let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
+ Some(key) => {
+ let (key, _created) = decrypt_key(&key, &key::get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}
let data = file_get_contents(logfile)?;
- let blob = DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?;
+ // fixme: howto sign log?
+ let blob = match crypt_mode {
+ CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
+ CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
+ };
let raw_data = blob.into_inner();
let args = json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
});
let body = hyper::Body::from(raw_data);
("group", false, &StringSchema::new("Backup group.").schema()),
], [
("output-format", true, &OUTPUT_FORMAT),
+ (
+ "quiet",
+ true,
+ &BooleanSchema::new("Minimal output - only show removals.")
+ .schema()
+ ),
("repository", true, &REPO_URL_SCHEMA),
])
)
async fn prune_async(mut param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(¶m)?;
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
let group = tools::required_string_param(¶m, "group")?;
- let group = BackupGroup::parse(group)?;
+ let group: BackupGroup = group.parse()?;
let output_format = get_output_format(¶m);
+ let quiet = param["quiet"].as_bool().unwrap_or(false);
+
param.as_object_mut().unwrap().remove("repository");
param.as_object_mut().unwrap().remove("group");
param.as_object_mut().unwrap().remove("output-format");
+ param.as_object_mut().unwrap().remove("quiet");
param["backup-type"] = group.backup_type().into();
param["backup-id"] = group.backup_id().into();
- let result = client.post(&path, Some(param)).await?;
+ let mut result = client.post(&path, Some(param)).await?;
record_repository(&repo);
- view_task_result(client, result, &output_format).await?;
+ let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
+ let item: PruneListItem = serde_json::from_value(record.to_owned())?;
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
+ Ok(snapshot.relative_path().to_str().unwrap().to_owned())
+ };
+
+ let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
+ Ok(match v.as_bool() {
+ Some(true) => "keep",
+ Some(false) => "remove",
+ None => "unknown",
+ }.to_string())
+ };
+
+ let options = default_table_format_options()
+ .sortby("backup-type", false)
+ .sortby("backup-id", false)
+ .sortby("backup-time", false)
+ .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
+ .column(ColumnConfig::new("backup-time").renderer(tools::format::render_epoch).header("date"))
+ .column(ColumnConfig::new("keep").renderer(render_prune_action).header("action"))
+ ;
+
+ let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_PRUNE;
+
+ let mut data = result["data"].take();
+
+ if quiet {
+ let list: Vec<Value> = data.as_array().unwrap().iter().filter(|item| {
+ item["keep"].as_bool() == Some(false)
+ }).map(|v| v.clone()).collect();
+ data = list.into();
+ }
+
+ format_and_print_result_full(&mut data, info, &output_format, &options);
Ok(Value::Null)
}
optional: true,
},
}
- }
+ },
+ returns: {
+ type: StorageStatus,
+ },
)]
/// Get repository status.
async fn status(param: Value) -> Result<Value, Error> {
let output_format = get_output_format(¶m);
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(repo.host(), repo.port(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
let mut result = client.get(&path, None).await?;
- let mut data = result["data"].take();
+ let mut data = result["data"]["storage"].take();
record_repository(&repo);
.column(ColumnConfig::new("used").renderer(render_total_percentage))
.column(ColumnConfig::new("avail").renderer(render_total_percentage));
- let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
+ let schema = &API_RETURN_SCHEMA_STATUS;
format_and_print_result_full(&mut data, schema, &output_format, &options);
.fingerprint_cache(true)
.ticket_cache(true);
- let client = match HttpClient::new(repo.host(), repo.user(), options) {
+ let client = match HttpClient::new(repo.host(), repo.port(), repo.user(), options) {
Ok(v) => v,
_ => return Value::Null,
};
result
}
-fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+pub fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
}
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
{
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
- result.push(snapshot.relative_path().to_str().unwrap().to_owned());
+ if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
+ result.push(snapshot.relative_path().to_str().unwrap().to_owned());
+ }
}
}
}
_ => return result,
};
- let snapshot = match param.get("snapshot") {
+ let snapshot: BackupDir = match param.get("snapshot") {
Some(path) => {
- match BackupDir::parse(path) {
+ match path.parse() {
Ok(v) => v,
_ => return result,
}
let query = tools::json_object_to_query(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
})).unwrap();
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
- .map(|v| strip_server_file_expenstion(&v))
+ .map(|v| tools::format::strip_server_file_extension(&v))
+ .collect()
+}
+
+pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+ complete_server_file_name(arg, param)
+ .iter()
+ .filter_map(|name| {
+ if name.ends_with(".pxar.didx") {
+ Some(tools::format::strip_server_file_extension(name))
+ } else {
+ None
+ }
+ })
.collect()
}
-fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
- .filter_map(|v| {
- let name = strip_server_file_expenstion(&v);
- if name.ends_with(".pxar") {
- Some(name)
+ .filter_map(|name| {
+ if name.ends_with(".img.fidx") {
+ Some(tools::format::strip_server_file_extension(name))
} else {
None
}
result
}
-fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
-
- // fixme: implement other input methods
+use proxmox_backup::client::RemoteChunkReader;
+/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better
+/// async use!
+///
+/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture<Chunk>`,
+/// so that we can properly access it from multiple threads simultaneously while not issuing
+/// duplicate simultaneous reads over http.
+pub struct BufferedDynamicReadAt {
+ inner: Mutex<BufferedDynamicReader<RemoteChunkReader>>,
+}
- use std::env::VarError::*;
- match std::env::var("PBS_ENCRYPTION_PASSWORD") {
- Ok(p) => return Ok(p.as_bytes().to_vec()),
- Err(NotUnicode(_)) => bail!("PBS_ENCRYPTION_PASSWORD contains bad characters"),
- Err(NotPresent) => {
- // Try another method
+impl BufferedDynamicReadAt {
+ fn new(inner: BufferedDynamicReader<RemoteChunkReader>) -> Self {
+ Self {
+ inner: Mutex::new(inner),
}
}
+}
- // If we're on a TTY, query the user for a password
- if tty::stdin_isatty() {
- return Ok(tty::read_password("Encryption Key Password: ")?);
+impl ReadAt for BufferedDynamicReadAt {
+ fn start_read_at<'a>(
+ self: Pin<&'a Self>,
+ _cx: &mut Context,
+ buf: &'a mut [u8],
+ offset: u64,
+ ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
+ MaybeReady::Ready(tokio::task::block_in_place(move || {
+ let mut reader = self.inner.lock().unwrap();
+ reader.seek(SeekFrom::Start(offset))?;
+ Ok(reader.read(buf)?)
+ }))
}
- bail!("no password input mechanism available");
+ fn poll_complete<'a>(
+ self: Pin<&'a Self>,
+ _op: ReadAtOperation<'a>,
+ ) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
+ panic!("LocalDynamicReadAt::start_read_at returned Pending");
+ }
}
-fn key_create(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
+fn main() {
- let path = tools::required_string_param(¶m, "path")?;
- let path = PathBuf::from(path);
+ let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
+ .arg_param(&["backupspec"])
+ .completion_cb("repository", complete_repository)
+ .completion_cb("backupspec", complete_backup_source)
+ .completion_cb("keyfile", tools::complete_file_name)
+ .completion_cb("chunk-size", complete_chunk_size);
- let kdf = param["kdf"].as_str().unwrap_or("scrypt");
-
- let key = proxmox::sys::linux::random_data(32)?;
-
- if kdf == "scrypt" {
- // always read passphrase from tty
- if !tty::stdin_isatty() {
- bail!("unable to read passphrase - no tty");
- }
-
- let password = tty::read_and_verify_password("Encryption Key Password: ")?;
-
- let key_config = encrypt_key_with_passphrase(&key, &password)?;
-
- store_key_config(&path, false, key_config)?;
-
- Ok(Value::Null)
- } else if kdf == "none" {
- let created = Local.timestamp(Local::now().timestamp(), 0);
-
- store_key_config(&path, false, KeyConfig {
- kdf: None,
- created,
- modified: created,
- data: key,
- })?;
-
- Ok(Value::Null)
- } else {
- unreachable!();
- }
-}
-
-fn master_pubkey_path() -> Result<PathBuf, Error> {
- let base = BaseDirectories::with_prefix("proxmox-backup")?;
-
- // usually $HOME/.config/proxmox-backup/master-public.pem
- let path = base.place_config_file("master-public.pem")?;
-
- Ok(path)
-}
-
-fn key_import_master_pubkey(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- let path = tools::required_string_param(¶m, "path")?;
- let path = PathBuf::from(path);
-
- let pem_data = file_get_contents(&path)?;
-
- if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
- bail!("Unable to decode PEM data - {}", err);
- }
-
- let target_path = master_pubkey_path()?;
-
- replace_file(&target_path, &pem_data, CreateOptions::new())?;
-
- println!("Imported public master key to {:?}", target_path);
-
- Ok(Value::Null)
-}
-
-fn key_create_master_key(
- _param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- // we need a TTY to query the new password
- if !tty::stdin_isatty() {
- bail!("unable to create master key - no tty");
- }
-
- let rsa = openssl::rsa::Rsa::generate(4096)?;
- let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
-
-
- let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
-
- let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
- let filename_pub = "master-public.pem";
- println!("Writing public master key to {}", filename_pub);
- replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
-
- let cipher = openssl::symm::Cipher::aes_256_cbc();
- let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
-
- let filename_priv = "master-private.pem";
- println!("Writing private master key to {}", filename_priv);
- replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
-
- Ok(Value::Null)
-}
-
-fn key_change_passphrase(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
- let path = tools::required_string_param(¶m, "path")?;
- let path = PathBuf::from(path);
-
- let kdf = param["kdf"].as_str().unwrap_or("scrypt");
-
- // we need a TTY to query the new password
- if !tty::stdin_isatty() {
- bail!("unable to change passphrase - no tty");
- }
-
- let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
-
- if kdf == "scrypt" {
-
- let password = tty::read_and_verify_password("New Password: ")?;
-
- let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
- new_key_config.created = created; // keep original value
-
- store_key_config(&path, true, new_key_config)?;
-
- Ok(Value::Null)
- } else if kdf == "none" {
- let modified = Local.timestamp(Local::now().timestamp(), 0);
-
- store_key_config(&path, true, KeyConfig {
- kdf: None,
- created, // keep original value
- modified,
- data: key.to_vec(),
- })?;
-
- Ok(Value::Null)
- } else {
- unreachable!();
- }
-}
-
-fn key_mgmt_cli() -> CliCommandMap {
-
- const KDF_SCHEMA: Schema =
- StringSchema::new("Key derivation function. Choose 'none' to store the key unecrypted.")
- .format(&ApiStringFormat::Enum(&["scrypt", "none"]))
- .default("scrypt")
- .schema();
-
- #[sortable]
- const API_METHOD_KEY_CREATE: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&key_create),
- &ObjectSchema::new(
- "Create a new encryption key.",
- &sorted!([
- ("path", false, &StringSchema::new("File system path.").schema()),
- ("kdf", true, &KDF_SCHEMA),
- ]),
- )
- );
-
- let key_create_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE)
- .arg_param(&["path"])
- .completion_cb("path", tools::complete_file_name);
-
- #[sortable]
- const API_METHOD_KEY_CHANGE_PASSPHRASE: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&key_change_passphrase),
- &ObjectSchema::new(
- "Change the passphrase required to decrypt the key.",
- &sorted!([
- ("path", false, &StringSchema::new("File system path.").schema()),
- ("kdf", true, &KDF_SCHEMA),
- ]),
- )
- );
-
- let key_change_passphrase_cmd_def = CliCommand::new(&API_METHOD_KEY_CHANGE_PASSPHRASE)
- .arg_param(&["path"])
- .completion_cb("path", tools::complete_file_name);
-
- const API_METHOD_KEY_CREATE_MASTER_KEY: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&key_create_master_key),
- &ObjectSchema::new("Create a new 4096 bit RSA master pub/priv key pair.", &[])
- );
-
- let key_create_master_key_cmd_def = CliCommand::new(&API_METHOD_KEY_CREATE_MASTER_KEY);
-
- #[sortable]
- const API_METHOD_KEY_IMPORT_MASTER_PUBKEY: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&key_import_master_pubkey),
- &ObjectSchema::new(
- "Import a new RSA public key and use it as master key. The key is expected to be in '.pem' format.",
- &sorted!([ ("path", false, &StringSchema::new("File system path.").schema()) ]),
- )
- );
-
- let key_import_master_pubkey_cmd_def = CliCommand::new(&API_METHOD_KEY_IMPORT_MASTER_PUBKEY)
- .arg_param(&["path"])
- .completion_cb("path", tools::complete_file_name);
-
- CliCommandMap::new()
- .insert("create", key_create_cmd_def)
- .insert("create-master-key", key_create_master_key_cmd_def)
- .insert("import-master-pubkey", key_import_master_pubkey_cmd_def)
- .insert("change-passphrase", key_change_passphrase_cmd_def)
-}
-
-fn mount(
- param: Value,
- _info: &ApiMethod,
- _rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
- let verbose = param["verbose"].as_bool().unwrap_or(false);
- if verbose {
- // This will stay in foreground with debug output enabled as None is
- // passed for the RawFd.
- return proxmox_backup::tools::runtime::main(mount_do(param, None));
- }
-
- // Process should be deamonized.
- // Make sure to fork before the async runtime is instantiated to avoid troubles.
- let pipe = pipe()?;
- match fork() {
- Ok(ForkResult::Parent { .. }) => {
- nix::unistd::close(pipe.1).unwrap();
- // Blocks the parent process until we are ready to go in the child
- let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
- Ok(Value::Null)
- }
- Ok(ForkResult::Child) => {
- nix::unistd::close(pipe.0).unwrap();
- nix::unistd::setsid().unwrap();
- proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
- }
- Err(_) => bail!("failed to daemonize process"),
- }
-}
-
-async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
- let repo = extract_repository_from_value(¶m)?;
- let archive_name = tools::required_string_param(¶m, "archive-name")?;
- let target = tools::required_string_param(¶m, "target")?;
- let client = connect(repo.host(), repo.user())?;
-
- record_repository(&repo);
-
- let path = tools::required_string_param(¶m, "snapshot")?;
- let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
- let group = BackupGroup::parse(path)?;
- api_datastore_latest_snapshot(&client, repo.store(), group).await?
- } else {
- let snapshot = BackupDir::parse(path)?;
- (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
- };
-
- let keyfile = param["keyfile"].as_str().map(PathBuf::from);
- let crypt_config = match keyfile {
- None => None,
- Some(path) => {
- let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
- Some(Arc::new(CryptConfig::new(key)?))
- }
- };
-
- let server_archive_name = if archive_name.ends_with(".pxar") {
- format!("{}.didx", archive_name)
- } else {
- bail!("Can only mount pxar archives.");
- };
-
- let client = BackupReader::start(
- client,
- crypt_config.clone(),
- repo.store(),
- &backup_type,
- &backup_id,
- backup_time,
- true,
- ).await?;
-
- let manifest = client.download_manifest().await?;
-
- if server_archive_name.ends_with(".didx") {
- let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
- let most_used = index.find_most_used_chunks(8);
- let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
- let reader = BufferedDynamicReader::new(index, chunk_reader);
- let decoder = pxar::Decoder::new(reader)?;
- let options = OsStr::new("ro,default_permissions");
- let mut session = pxar::fuse::Session::new(decoder, &options, pipe.is_none())
- .map_err(|err| format_err!("pxar mount failed: {}", err))?;
-
- // Mount the session but not call fuse deamonize as this will cause
- // issues with the runtime after the fork
- let deamonize = false;
- session.mount(&Path::new(target), deamonize)?;
-
- if let Some(pipe) = pipe {
- nix::unistd::chdir(Path::new("/")).unwrap();
- // Finish creation of deamon by redirecting filedescriptors.
- let nullfd = nix::fcntl::open(
- "/dev/null",
- nix::fcntl::OFlag::O_RDWR,
- nix::sys::stat::Mode::empty(),
- ).unwrap();
- nix::unistd::dup2(nullfd, 0).unwrap();
- nix::unistd::dup2(nullfd, 1).unwrap();
- nix::unistd::dup2(nullfd, 2).unwrap();
- if nullfd > 2 {
- nix::unistd::close(nullfd).unwrap();
- }
- // Signal the parent process that we are done with the setup and it can
- // terminate.
- nix::unistd::write(pipe, &[0u8])?;
- nix::unistd::close(pipe).unwrap();
- }
-
- let multithreaded = true;
- session.run_loop(multithreaded)?;
- } else {
- bail!("unknown archive file extension (expected .pxar)");
- }
-
- Ok(Value::Null)
-}
-
-#[api(
- input: {
- properties: {
- "snapshot": {
- type: String,
- description: "Group/Snapshot path.",
- },
- "archive-name": {
- type: String,
- description: "Backup archive name.",
- },
- "repository": {
- optional: true,
- schema: REPO_URL_SCHEMA,
- },
- "keyfile": {
- optional: true,
- type: String,
- description: "Path to encryption key.",
- },
- },
- },
-)]
-/// Shell to interactively inspect and restore snapshots.
-async fn catalog_shell(param: Value) -> Result<(), Error> {
- let repo = extract_repository_from_value(¶m)?;
- let client = connect(repo.host(), repo.user())?;
- let path = tools::required_string_param(¶m, "snapshot")?;
- let archive_name = tools::required_string_param(¶m, "archive-name")?;
-
- let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
- let group = BackupGroup::parse(path)?;
- api_datastore_latest_snapshot(&client, repo.store(), group).await?
- } else {
- let snapshot = BackupDir::parse(path)?;
- (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
- };
-
- let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
- let crypt_config = match keyfile {
- None => None,
- Some(path) => {
- let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
- Some(Arc::new(CryptConfig::new(key)?))
- }
- };
-
- let server_archive_name = if archive_name.ends_with(".pxar") {
- format!("{}.didx", archive_name)
- } else {
- bail!("Can only mount pxar archives.");
- };
-
- let client = BackupReader::start(
- client,
- crypt_config.clone(),
- repo.store(),
- &backup_type,
- &backup_id,
- backup_time,
- true,
- ).await?;
-
- let tmpfile = std::fs::OpenOptions::new()
- .write(true)
- .read(true)
- .custom_flags(libc::O_TMPFILE)
- .open("/tmp")?;
-
- let manifest = client.download_manifest().await?;
-
- let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
- let most_used = index.find_most_used_chunks(8);
- let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), most_used);
- let reader = BufferedDynamicReader::new(index, chunk_reader);
- let mut decoder = pxar::Decoder::new(reader)?;
- decoder.set_callback(|path| {
- println!("{:?}", path);
- Ok(())
- });
-
- let tmpfile = client.download(CATALOG_NAME, tmpfile).await?;
- let index = DynamicIndexReader::new(tmpfile)
- .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
-
- // Note: do not use values stored in index (not trusted) - instead, computed them again
- let (csum, size) = index.compute_csum();
- manifest.verify_file(CATALOG_NAME, &csum, size)?;
-
- let most_used = index.find_most_used_chunks(8);
- let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
- let mut reader = BufferedDynamicReader::new(index, chunk_reader);
- let mut catalogfile = std::fs::OpenOptions::new()
- .write(true)
- .read(true)
- .custom_flags(libc::O_TMPFILE)
- .open("/tmp")?;
-
- std::io::copy(&mut reader, &mut catalogfile)
- .map_err(|err| format_err!("unable to download catalog - {}", err))?;
-
- catalogfile.seek(SeekFrom::Start(0))?;
- let catalog_reader = CatalogReader::new(catalogfile);
- let state = Shell::new(
- catalog_reader,
- &server_archive_name,
- decoder,
- )?;
-
- println!("Starting interactive shell");
- state.shell()?;
-
- record_repository(&repo);
-
- Ok(())
-}
-
-fn catalog_mgmt_cli() -> CliCommandMap {
- let catalog_shell_cmd_def = CliCommand::new(&API_METHOD_CATALOG_SHELL)
- .arg_param(&["snapshot", "archive-name"])
- .completion_cb("repository", complete_repository)
- .completion_cb("archive-name", complete_pxar_archive_name)
- .completion_cb("snapshot", complete_group_or_snapshot);
-
- let catalog_dump_cmd_def = CliCommand::new(&API_METHOD_DUMP_CATALOG)
- .arg_param(&["snapshot"])
- .completion_cb("repository", complete_repository)
- .completion_cb("snapshot", complete_backup_snapshot);
-
- CliCommandMap::new()
- .insert("dump", catalog_dump_cmd_def)
- .insert("shell", catalog_shell_cmd_def)
-}
-
-#[api(
- input: {
- properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- limit: {
- description: "The maximal number of tasks to list.",
- type: Integer,
- optional: true,
- minimum: 1,
- maximum: 1000,
- default: 50,
- },
- "output-format": {
- schema: OUTPUT_FORMAT,
- optional: true,
- },
- }
- }
-)]
-/// List running server tasks for this repo user
-async fn task_list(param: Value) -> Result<Value, Error> {
-
- let output_format = get_output_format(¶m);
-
- let repo = extract_repository_from_value(¶m)?;
- let client = connect(repo.host(), repo.user())?;
-
- let limit = param["limit"].as_u64().unwrap_or(50) as usize;
-
- let args = json!({
- "running": true,
- "start": 0,
- "limit": limit,
- "userfilter": repo.user(),
- "store": repo.store(),
- });
- let result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
-
- let data = &result["data"];
-
- if output_format == "text" {
- for item in data.as_array().unwrap() {
- println!(
- "{} {}",
- item["upid"].as_str().unwrap(),
- item["status"].as_str().unwrap_or("running"),
- );
- }
- } else {
- format_and_print_result(data, &output_format);
- }
-
- Ok(Value::Null)
-}
-
-#[api(
- input: {
- properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- upid: {
- schema: UPID_SCHEMA,
- },
- }
- }
-)]
-/// Display the task log.
-async fn task_log(param: Value) -> Result<Value, Error> {
-
- let repo = extract_repository_from_value(¶m)?;
- let upid = tools::required_string_param(¶m, "upid")?;
-
- let client = connect(repo.host(), repo.user())?;
-
- display_task_log(client, upid, true).await?;
-
- Ok(Value::Null)
-}
-
-#[api(
- input: {
- properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- upid: {
- schema: UPID_SCHEMA,
- },
- }
- }
-)]
-/// Try to stop a specific task.
-async fn task_stop(param: Value) -> Result<Value, Error> {
-
- let repo = extract_repository_from_value(¶m)?;
- let upid_str = tools::required_string_param(¶m, "upid")?;
-
- let mut client = connect(repo.host(), repo.user())?;
-
- let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
- let _ = client.delete(&path, None).await?;
-
- Ok(Value::Null)
-}
-
-fn task_mgmt_cli() -> CliCommandMap {
-
- let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
- .completion_cb("repository", complete_repository);
-
- let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
- .arg_param(&["upid"]);
-
- let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
- .arg_param(&["upid"]);
-
- CliCommandMap::new()
- .insert("log", task_log_cmd_def)
- .insert("list", task_list_cmd_def)
- .insert("stop", task_stop_cmd_def)
-}
-
-fn main() {
-
- let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
- .arg_param(&["backupspec"])
+ let benchmark_cmd_def = CliCommand::new(&API_METHOD_BENCHMARK)
.completion_cb("repository", complete_repository)
- .completion_cb("backupspec", complete_backup_source)
- .completion_cb("keyfile", tools::complete_file_name)
- .completion_cb("chunk-size", complete_chunk_size);
+ .completion_cb("keyfile", tools::complete_file_name);
let upload_log_cmd_def = CliCommand::new(&API_METHOD_UPLOAD_LOG)
.arg_param(&["snapshot", "logfile"])
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
.completion_cb("repository", complete_repository);
- #[sortable]
- const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
- &ApiHandler::Sync(&mount),
- &ObjectSchema::new(
- "Mount pxar archive.",
- &sorted!([
- ("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
- ("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
- ("target", false, &StringSchema::new("Target directory path.").schema()),
- ("repository", true, &REPO_URL_SCHEMA),
- ("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
- ("verbose", true, &BooleanSchema::new("Verbose output.").default(false).schema()),
- ]),
- )
- );
-
- let mount_cmd_def = CliCommand::new(&API_METHOD_MOUNT)
- .arg_param(&["snapshot", "archive-name", "target"])
- .completion_cb("repository", complete_repository)
- .completion_cb("snapshot", complete_group_or_snapshot)
- .completion_cb("archive-name", complete_pxar_archive_name)
- .completion_cb("target", tools::complete_file_name);
+ let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
+ .completion_cb("repository", complete_repository);
+ let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
+ .arg_param(&["group", "new-owner"])
+ .completion_cb("group", complete_backup_group)
+ .completion_cb("new-owner", complete_user_name)
+ .completion_cb("repository", complete_repository);
let cmd_def = CliCommandMap::new()
.insert("backup", backup_cmd_def)
.insert("snapshots", snapshots_cmd_def)
.insert("files", files_cmd_def)
.insert("status", status_cmd_def)
- .insert("key", key_mgmt_cli())
- .insert("mount", mount_cmd_def)
+ .insert("key", key::cli())
+ .insert("mount", mount_cmd_def())
+ .insert("map", map_cmd_def())
+ .insert("unmap", unmap_cmd_def())
.insert("catalog", catalog_mgmt_cli())
- .insert("task", task_mgmt_cli());
+ .insert("task", task_mgmt_cli())
+ .insert("version", version_cmd_def)
+ .insert("benchmark", benchmark_cmd_def)
+ .insert("change-owner", change_owner_cmd_def);
- run_cli_command(cmd_def, Some(|future| {
+ let rpcenv = CliEnvironment::new();
+ run_cli_command(cmd_def, rpcenv, Some(|future| {
proxmox_backup::tools::runtime::main(future)
}));
}