use chrono::{Local, TimeZone};
use std::path::{Path, PathBuf};
use std::collections::HashMap;
+use std::io::Write;
use proxmox_backup::tools;
use proxmox_backup::cli::*;
use proxmox_backup::api_schema::router::*;
use proxmox_backup::client::*;
use proxmox_backup::backup::*;
+use proxmox_backup::pxar;
+
//use proxmox_backup::backup::image_index::*;
//use proxmox_backup::config::datastore;
//use proxmox_backup::pxar::encoder::*;
}
+fn get_default_repository() -> Option<String> {
+ std::env::var("PBS_REPOSITORY").ok()
+}
+
+fn extract_repository_from_value(
+ param: &Value,
+) -> Result<BackupRepository, Error> {
+
+ let repo_url = param["repository"]
+ .as_str()
+ .map(String::from)
+ .or_else(get_default_repository)
+ .ok_or_else(|| format_err!("unable to get (default) repository"))?;
+
+ let repo: BackupRepository = repo_url.parse()?;
+
+ Ok(repo)
+}
+
+fn extract_repository_from_map(
+ param: &HashMap<String, String>,
+) -> Option<BackupRepository> {
+
+ param.get("repository")
+ .map(String::from)
+ .or_else(get_default_repository)
+ .and_then(|repo_url| repo_url.parse::<BackupRepository>().ok())
+}
+
fn record_repository(repo: &BackupRepository) {
let base = match BaseDirectories::with_prefix("proxmox-backup") {
Ok(())
}
-fn strip_chunked_file_expenstions(list: Vec<String>) -> Vec<String> {
+fn strip_server_file_expenstions(list: Vec<String>) -> Vec<String> {
let mut result = vec![];
result.push(file[..file.len()-5].to_owned());
} else if file.ends_with(".fidx") {
result.push(file[..file.len()-5].to_owned());
+ } else if file.ends_with(".blob") {
+ result.push(file[..file.len()-5].to_owned());
} else {
result.push(file); // should not happen
}
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
let mut client = HttpClient::new(repo.host(), repo.user())?;
let backup_dir = BackupDir::new(btype, id, epoch);
let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_chunked_file_expenstions(files);
+ let files = strip_server_file_expenstions(files);
for filename in files {
let path = backup_dir.relative_path().to_str().unwrap().to_owned();
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
let client = HttpClient::new(repo.host(), repo.user())?;
let path = group.group_path().to_str().unwrap().to_owned();
let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_chunked_file_expenstions(files);
+ let files = strip_server_file_expenstions(files);
println!("{:20} | {} | {:5} | {}", path, last_backup.format("%c"),
backup_count, tools::join(&files, ' '));
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
let path = tools::required_string_param(¶m, "group")?;
let group = BackupGroup::parse(path)?;
let path = snapshot.relative_path().to_str().unwrap().to_owned();
let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_chunked_file_expenstions(files);
+ let files = strip_server_file_expenstions(files);
println!("{} | {} | {}", path, snapshot.backup_time().format("%c"), tools::join(&files, ' '));
}
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
let path = tools::required_string_param(¶m, "snapshot")?;
let snapshot = BackupDir::parse(path)?;
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
let mut client = HttpClient::new(repo.host(), repo.user())?;
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
+ let repo = extract_repository_from_value(¶m)?;
let backupspec_list = tools::required_array_param(¶m, "backupspec")?;
- let repo: BackupRepository = repo_url.parse()?;
-
let all_file_systems = param["all-file-systems"].as_bool().unwrap_or(false);
let verbose = param["verbose"].as_bool().unwrap_or(false);
println!("Client name: {}", tools::nodename());
println!("Start Time: {}", backup_time.to_rfc3339());
- let crypt_config = match keyfile {
- None => None,
+ let (crypt_config, rsa_encrypted_key) = match keyfile {
+ None => (None, None),
Some(path) => {
- let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
- Some(Arc::new(CryptConfig::new(key)?))
+ let (key, created) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+
+ let crypt_config = CryptConfig::new(key)?;
+
+ let path = master_pubkey_path()?;
+ if path.exists() {
+ let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
+ let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
+ let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
+ (Some(Arc::new(crypt_config)), Some(enc_key))
+ } else {
+ (Some(Arc::new(crypt_config)), None)
+ }
}
};
match backup_type {
BackupType::CONFIG => {
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
- client.upload_config(&filename, &target).wait()?;
+ client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
}
BackupType::PXAR => {
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
}
}
+ if let Some(rsa_encrypted_key) = rsa_encrypted_key {
+ let target = "rsa-encrypted.key";
+ println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
+ client.upload_blob_from_data(rsa_encrypted_key, target, None, false).wait()?;
+
+ // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
+ /*
+ let mut buffer2 = vec![0u8; rsa.size() as usize];
+ let pem_data = proxmox_backup::tools::file_get_contents("master-private.pem")?;
+ let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
+ let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
+ println!("TEST {} {:?}", len, buffer2);
+ */
+ }
+
client.finish().wait()?;
let end_time = Local.timestamp(Local::now().timestamp(), 0);
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
+
+ let verbose = param["verbose"].as_bool().unwrap_or(false);
let archive_name = tools::required_string_param(¶m, "archive-name")?;
- let mut client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user())?;
record_repository(&repo);
let path = tools::required_string_param(¶m, "snapshot")?;
- let query;
-
- if path.matches('/').count() == 1 {
+ let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group = BackupGroup::parse(path)?;
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
bail!("backup group '{}' does not contain any snapshots:", path);
}
- query = tools::json_object_to_query(json!({
- "backup-type": group.backup_type(),
- "backup-id": group.backup_id(),
- "backup-time": list[0]["backup-time"].as_i64().unwrap(),
- "archive-name": archive_name,
- }))?;
+ let epoch = list[0]["backup-time"].as_i64().unwrap();
+ let backup_time = Local.timestamp(epoch, 0);
+ (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
} else {
let snapshot = BackupDir::parse(path)?;
-
- query = tools::json_object_to_query(json!({
- "backup-type": snapshot.group().backup_type(),
- "backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
- "archive-name": archive_name,
- }))?;
- }
+ (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
+ };
let target = tools::required_string_param(¶m, "target")?;
+ let target = if target == "-" { None } else { Some(target) };
- if archive_name.ends_with(".pxar") {
- let path = format!("api2/json/admin/datastore/{}/pxar?{}", repo.store(), query);
+ let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
- println!("DOWNLOAD FILE {} to {}", path, target);
+ let crypt_config = match keyfile {
+ None => None,
+ Some(path) => {
+ let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+ Some(Arc::new(CryptConfig::new(key)?))
+ }
+ };
- let target = PathBuf::from(target);
- let writer = PxarDecodeWriter::new(&target, true)?;
- client.download(&path, Box::new(writer)).wait()?;
+ let server_archive_name = if archive_name.ends_with(".pxar") {
+ format!("{}.didx", archive_name)
+ } else if archive_name.ends_with(".img") {
+ format!("{}.fidx", archive_name)
} else {
- bail!("unknown file extensions - unable to download '{}'", archive_name);
+ format!("{}.blob", archive_name)
+ };
+
+ let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
+
+ use std::os::unix::fs::OpenOptionsExt;
+
+ let tmpfile = std::fs::OpenOptions::new()
+ .write(true)
+ .read(true)
+ .custom_flags(libc::O_TMPFILE)
+ .open("/tmp")?;
+
+ if server_archive_name.ends_with(".blob") {
+
+ let writer = Vec::with_capacity(1024*1024);
+ let blob_data = client.download(&server_archive_name, writer).wait()?;
+ let blob = DataBlob::from_raw(blob_data)?;
+ blob.verify_crc()?;
+
+ let raw_data = match crypt_config {
+ Some(ref crypt_config) => blob.decode(Some(crypt_config))?,
+ None => blob.decode(None)?,
+ };
+
+ if let Some(target) = target {
+ crate::tools::file_set_contents(target, &raw_data, None)?;
+ } else {
+ let stdout = std::io::stdout();
+ let mut writer = stdout.lock();
+ writer.write_all(&raw_data)
+ .map_err(|err| format_err!("unable to pipe data - {}", err))?;
+ }
+
+ } else if server_archive_name.ends_with(".didx") {
+ let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
+
+ let index = DynamicIndexReader::new(tmpfile)
+ .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
+
+ let most_used = index.find_most_used_chunks(8);
+
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+
+ let mut reader = BufferedDynamicReader::new(index, chunk_reader);
+
+ if let Some(target) = target {
+
+ let feature_flags = pxar::CA_FORMAT_DEFAULT;
+ let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
+ if verbose {
+ println!("{:?}", path);
+ }
+ Ok(())
+ });
+
+ decoder.restore(Path::new(target))?;
+ } else {
+ let stdout = std::io::stdout();
+ let mut writer = stdout.lock();
+
+ std::io::copy(&mut reader, &mut writer)
+ .map_err(|err| format_err!("unable to pipe data - {}", err))?;
+ }
+ } else if server_archive_name.ends_with(".fidx") {
+ let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
+
+ let index = FixedIndexReader::new(tmpfile)
+ .map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
+
+ let most_used = index.find_most_used_chunks(8);
+
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+
+ let mut reader = BufferedFixedReader::new(index, chunk_reader);
+
+ if let Some(target) = target {
+ let mut writer = std::fs::OpenOptions::new()
+ .write(true)
+ .create(true)
+ .create_new(true)
+ .open(target)
+ .map_err(|err| format_err!("unable to create target file {:?} - {}", target, err))?;
+
+ std::io::copy(&mut reader, &mut writer)
+ .map_err(|err| format_err!("unable to store data - {}", err))?;
+ } else {
+ let stdout = std::io::stdout();
+ let mut writer = stdout.lock();
+
+ std::io::copy(&mut reader, &mut writer)
+ .map_err(|err| format_err!("unable to pipe data - {}", err))?;
+ }
+ } else {
+ bail!("unknown archive file extension (expected .pxar of .img)");
}
Ok(Value::Null)
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
- let repo_url = tools::required_string_param(¶m, "repository")?;
- let repo: BackupRepository = repo_url.parse()?;
+ let repo = extract_repository_from_value(¶m)?;
let mut client = HttpClient::new(repo.host(), repo.user())?;
Value::Null
}
-fn extract_repo(param: &HashMap<String, String>) -> Option<BackupRepository> {
-
- let repo_url = match param.get("repository") {
- Some(v) => v,
- _ => return None,
- };
-
- let repo: BackupRepository = match repo_url.parse() {
- Ok(v) => v,
- _ => return None,
- };
-
- Some(repo)
-}
-
fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
- let repo = match extract_repo(param) {
+ let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
let mut result = vec![];
- let repo = match extract_repo(param) {
+ let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
result
}
-fn complete_archive_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
- let repo = match extract_repo(param) {
+ let repo = match extract_repository_from_map(param) {
Some(v) => v,
_ => return result,
};
}
}
- strip_chunked_file_expenstions(result)
+ result
+}
+
+fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+
+ let result = complete_server_file_name(arg, param);
+
+ strip_server_file_expenstions(result)
}
fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
}
}
+fn master_pubkey_path() -> Result<PathBuf, Error> {
+ let base = BaseDirectories::with_prefix("proxmox-backup")?;
+
+ // usually $HOME/.config/proxmox-backup/master-public.pem
+ let path = base.place_config_file("master-public.pem")?;
+
+ Ok(path)
+}
+
fn key_import_master_pubkey(
param: Value,
_info: &ApiMethod,
bail!("Unable to decode PEM data - {}", err);
}
- let base = BaseDirectories::with_prefix("proxmox-backup")?;
-
- // usually $HOME/.config/proxmox-backup/master-public.pem
- let target_path = base.place_config_file("master-public.pem")?;
+ let target_path = master_pubkey_path()?;
proxmox_backup::tools::file_set_contents(&target_path, &pem_data, None)?;
cmd_def
}
-
fn main() {
let backup_source_schema: Arc<Schema> = Arc::new(
ApiMethod::new(
create_backup,
ObjectSchema::new("Create (host) backup.")
- .required("repository", REPO_URL_SCHEMA.clone())
.required(
"backupspec",
ArraySchema::new(
backup_source_schema,
).min_length(1)
)
+ .optional("repository", REPO_URL_SCHEMA.clone())
.optional(
"keyfile",
StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
.default(4096)
)
))
- .arg_param(vec!["repository", "backupspec"])
+ .arg_param(vec!["backupspec"])
.completion_cb("repository", complete_repository)
.completion_cb("backupspec", complete_backup_source)
.completion_cb("keyfile", tools::complete_file_name)
ApiMethod::new(
list_backup_groups,
ObjectSchema::new("List backup groups.")
- .required("repository", REPO_URL_SCHEMA.clone())
+ .optional("repository", REPO_URL_SCHEMA.clone())
))
- .arg_param(vec!["repository"])
.completion_cb("repository", complete_repository);
let snapshots_cmd_def = CliCommand::new(
ApiMethod::new(
list_snapshots,
ObjectSchema::new("List backup snapshots.")
- .required("repository", REPO_URL_SCHEMA.clone())
.required("group", StringSchema::new("Backup group."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
))
- .arg_param(vec!["repository", "group"])
+ .arg_param(vec!["group"])
.completion_cb("group", complete_backup_group)
.completion_cb("repository", complete_repository);
ApiMethod::new(
forget_snapshots,
ObjectSchema::new("Forget (remove) backup snapshots.")
- .required("repository", REPO_URL_SCHEMA.clone())
.required("snapshot", StringSchema::new("Snapshot path."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
))
- .arg_param(vec!["repository", "snapshot"])
+ .arg_param(vec!["snapshot"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot);
ApiMethod::new(
start_garbage_collection,
ObjectSchema::new("Start garbage collection for a specific repository.")
- .required("repository", REPO_URL_SCHEMA.clone())
+ .optional("repository", REPO_URL_SCHEMA.clone())
))
- .arg_param(vec!["repository"])
.completion_cb("repository", complete_repository);
let restore_cmd_def = CliCommand::new(
ApiMethod::new(
restore,
ObjectSchema::new("Restore backup repository.")
- .required("repository", REPO_URL_SCHEMA.clone())
.required("snapshot", StringSchema::new("Group/Snapshot path."))
.required("archive-name", StringSchema::new("Backup archive name."))
- .required("target", StringSchema::new("Target directory path."))
+ .required("target", StringSchema::new(r###"Target directory path. Use '-' to write to stdandard output.
+
+We do not extraxt '.pxar' archives when writing to stdandard output.
+
+"###
+ ))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("keyfile", StringSchema::new("Path to encryption key."))
+ .optional(
+ "verbose",
+ BooleanSchema::new("Verbose output.").default(false)
+ )
))
- .arg_param(vec!["repository", "snapshot", "archive-name", "target"])
+ .arg_param(vec!["snapshot", "archive-name", "target"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_group_or_snapshot)
.completion_cb("archive-name", complete_archive_name)
prune,
proxmox_backup::api2::admin::datastore::add_common_prune_prameters(
ObjectSchema::new("Prune backup repository.")
- .required("repository", REPO_URL_SCHEMA.clone())
+ .optional("repository", REPO_URL_SCHEMA.clone())
)
))
- .arg_param(vec!["repository"])
.completion_cb("repository", complete_repository);
let cmd_def = CliCommandMap::new()