use std::path::{Path, PathBuf};
use std::collections::{HashSet, HashMap};
use std::io::Write;
+use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
use proxmox_backup::tools;
use proxmox_backup::cli::*;
_ => return,
};
- let mut data = tools::file_get_json(&path, None).unwrap_or(json!({}));
+ let mut data = file_get_json(&path, None).unwrap_or(json!({}));
let repo = repo.to_string();
let new_data = json!(map);
- let _ = tools::file_set_contents(path, new_data.to_string().as_bytes(), None);
+ let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
}
fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
_ => return result,
};
- let data = tools::file_get_json(&path, None).unwrap_or(json!({}));
+ let data = file_get_json(&path, None).unwrap_or(json!({}));
if let Some(map) = data.as_object() {
for (repo, _count) in map {
verbose: bool,
skip_lost_and_found: bool,
crypt_config: Option<Arc<CryptConfig>>,
-) -> Result<(), Error> {
+) -> Result<BackupStats, Error> {
let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found)?;
let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
.map_err(|_| {}).map(|_| ())
);
- client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
+ let stats = client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
- Ok(())
+ Ok(stats)
}
fn backup_image<P: AsRef<Path>>(
chunk_size: Option<usize>,
_verbose: bool,
crypt_config: Option<Arc<CryptConfig>>,
-) -> Result<(), Error> {
+) -> Result<BackupStats, Error> {
let path = image_path.as_ref().to_owned();
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
- client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
+ let stats = client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
- Ok(())
+ Ok(stats)
}
-fn strip_server_file_expenstions(list: Vec<String>) -> Vec<String> {
+fn strip_server_file_expenstion(name: &str) -> String {
- let mut result = vec![];
-
- for file in list.into_iter() {
- if file.ends_with(".didx") {
- result.push(file[..file.len()-5].to_owned());
- } else if file.ends_with(".fidx") {
- result.push(file[..file.len()-5].to_owned());
- } else if file.ends_with(".blob") {
- result.push(file[..file.len()-5].to_owned());
- } else {
- result.push(file); // should not happen
- }
+ if name.ends_with(".didx") {
+ return name[..name.len()-5].to_owned();
+ } else if name.ends_with(".fidx") {
+ return name[..name.len()-5].to_owned();
+ } else if name.ends_with(".blob") {
+ return name[..name.len()-5].to_owned();
+ } else {
+ return name.to_owned(); // should not happen
}
-
- result
}
fn list_backup_groups(
let path = group.group_path().to_str().unwrap().to_owned();
- let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_server_file_expenstions(files);
+ let files = item["files"].as_array().unwrap().iter()
+ .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
if output_format == "text" {
println!(
let path = snapshot.relative_path().to_str().unwrap().to_owned();
- let files = item["files"].as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_owned()).collect();
- let files = strip_server_file_expenstions(files);
+ let files = item["files"].as_array().unwrap().iter()
+ .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
if output_format == "text" {
println!("{} | {}", path, tools::join(&files, ' '));
Ok(result)
}
+fn list_snapshot_files(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let repo = extract_repository_from_value(¶m)?;
+
+ let path = tools::required_string_param(¶m, "snapshot")?;
+ let snapshot = BackupDir::parse(path)?;
+
+ let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+
+ let client = HttpClient::new(repo.host(), repo.user())?;
+
+ let path = format!("api2/json/admin/datastore/{}/files", repo.store());
+
+ let result = client.get(&path, Some(json!({
+ "backup-type": snapshot.group().backup_type(),
+ "backup-id": snapshot.group().backup_id(),
+ "backup-time": snapshot.backup_time().timestamp(),
+ }))).wait()?;
+
+ record_repository(&repo);
+
+ let list: Vec<String> = result["data"].as_array().unwrap().iter()
+ .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
+
+ if output_format == "text" {
+ for file in list {
+ println!("{}", file);
+ }
+ } else {
+ format_and_print_result(&list.into(), &output_format);
+ }
+
+ Ok(Value::Null)
+}
+
fn start_garbage_collection(
param: Value,
_info: &ApiMethod,
bail!("got unexpected file type (expected file or block device)");
}
- let size = tools::image_size(&PathBuf::from(filename))?;
+ let size = image_size(&PathBuf::from(filename))?;
if size == 0 { bail!("got zero-sized file '{}'", filename); }
let path = master_pubkey_path()?;
if path.exists() {
- let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
+ let pem_data = file_get_contents(&path)?;
let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
(Some(Arc::new(crypt_config)), Some(enc_key))
let client = client.start_backup(repo.store(), backup_type, &backup_id, backup_time, verbose).wait()?;
+ let mut file_list = vec![];
+
for (backup_type, filename, target, size) in upload_list {
match backup_type {
BackupType::CONFIG => {
println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
- client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
+ let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
+ file_list.push((target, stats));
}
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
- client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
+ let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
+ file_list.push((target, stats));
}
BackupType::PXAR => {
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
- backup_directory(
+ let stats = backup_directory(
&client,
&filename,
&target,
skip_lost_and_found,
crypt_config.clone(),
)?;
+ file_list.push((target, stats));
}
BackupType::IMAGE => {
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
- backup_image(
+ let stats = backup_image(
&client,
&filename,
&target,
verbose,
crypt_config.clone(),
)?;
+ file_list.push((target, stats));
}
}
}
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
let target = "rsa-encrypted.key";
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
- client.upload_blob_from_data(rsa_encrypted_key, target, None, false).wait()?;
+ let stats = client.upload_blob_from_data(rsa_encrypted_key, target, None, false, false).wait()?;
+ file_list.push((target.to_owned(), stats));
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
/*
let mut buffer2 = vec![0u8; rsa.size() as usize];
- let pem_data = proxmox_backup::tools::file_get_contents("master-private.pem")?;
+ let pem_data = file_get_contents("master-private.pem")?;
let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
println!("TEST {} {:?}", len, buffer2);
*/
}
+ // create index.json
+ let file_list = file_list.iter()
+ .fold(json!({}), |mut acc, (filename, stats)| {
+ acc[filename] = json!({
+ "size": stats.size,
+ });
+ acc
+ });
+
+ let index = json!({
+ "backup-type": backup_type,
+ "backup-id": backup_id,
+ "backup-time": backup_time.timestamp(),
+ "files": file_list,
+ });
+
+ println!("Upload index.json to '{:?}'", repo);
+ let index_data = serde_json::to_string_pretty(&index)?.into();
+ client.upload_blob_from_data(index_data, "index.json", crypt_config.clone(), true, true).wait()?;
+
client.finish().wait()?;
let end_time = Local::now();
};
if let Some(target) = target {
- crate::tools::file_set_contents(target, &raw_data, None)?;
+ file_set_contents(target, &raw_data, None)?;
} else {
let stdout = std::io::stdout();
let mut writer = stdout.lock();
if let Some(target) = target {
- let feature_flags = pxar::CA_FORMAT_DEFAULT;
+ let feature_flags = pxar::flags::DEFAULT;
let mut decoder = pxar::SequentialDecoder::new(&mut reader, feature_flags, |path| {
if verbose {
println!("{:?}", path);
}
};
- let data = crate::tools::file_get_contents(logfile)?;
+ let data = file_get_contents(logfile)?;
let blob = if let Some(ref crypt_config) = crypt_config {
DataBlob::encode(&data, Some(crypt_config), true)?
param["backup-type"] = group.backup_type().into();
param["backup-id"] = group.backup_id().into();
- let result = client.post(&path, Some(param)).wait()?;
+ let _result = client.post(&path, Some(param)).wait()?;
record_repository(&repo);
- Ok(result)
+ Ok(Value::Null)
}
fn status(
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
- let result = complete_server_file_name(arg, param);
-
- strip_server_file_expenstions(result)
+ complete_server_file_name(arg, param)
+ .iter().map(|v| strip_server_file_expenstion(&v)).collect()
}
fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let path = tools::required_string_param(¶m, "path")?;
let path = PathBuf::from(path);
- let pem_data = proxmox_backup::tools::file_get_contents(&path)?;
+ let pem_data = file_get_contents(&path)?;
if let Err(err) = openssl::pkey::PKey::public_key_from_pem(&pem_data) {
bail!("Unable to decode PEM data - {}", err);
let target_path = master_pubkey_path()?;
- proxmox_backup::tools::file_set_contents(&target_path, &pem_data, None)?;
+ file_set_contents(&target_path, &pem_data, None)?;
println!("Imported public master key to {:?}", target_path);
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
let filename_pub = "master-public.pem";
println!("Writing public master key to {}", filename_pub);
- proxmox_backup::tools::file_set_contents(filename_pub, pub_key.as_slice(), None)?;
+ file_set_contents(filename_pub, pub_key.as_slice(), None)?;
let cipher = openssl::symm::Cipher::aes_256_cbc();
let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
let filename_priv = "master-private.pem";
println!("Writing private master key to {}", filename_priv);
- proxmox_backup::tools::file_set_contents(filename_priv, priv_key.as_slice(), None)?;
+ file_set_contents(filename_priv, priv_key.as_slice(), None)?;
Ok(Value::Null)
}
.completion_cb("archive-name", complete_archive_name)
.completion_cb("target", tools::complete_file_name);
+ let files_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ list_snapshot_files,
+ ObjectSchema::new("List snapshot files.")
+ .required("snapshot", StringSchema::new("Snapshot path."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("output-format", OUTPUT_FORMAT.clone())
+ ))
+ .arg_param(vec!["snapshot"])
+ .completion_cb("repository", complete_repository)
+ .completion_cb("snapshot", complete_group_or_snapshot);
+
let prune_cmd_def = CliCommand::new(
ApiMethod::new(
prune,
.insert("prune".to_owned(), prune_cmd_def.into())
.insert("restore".to_owned(), restore_cmd_def.into())
.insert("snapshots".to_owned(), snapshots_cmd_def.into())
+ .insert("files".to_owned(), files_cmd_def.into())
.insert("status".to_owned(), status_cmd_def.into())
.insert("key".to_owned(), key_mgmt_cli().into());