use std::path::{Path, PathBuf};
use std::collections::{HashSet, HashMap};
use std::io::Write;
+use std::os::unix::fs::OpenOptionsExt;
+
use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
use proxmox_backup::tools;
use proxmox_backup::api_schema::router::*;
use proxmox_backup::client::*;
use proxmox_backup::backup::*;
-use proxmox_backup::pxar;
+use proxmox_backup::pxar::{ self, catalog::* };
//use proxmox_backup::backup::image_index::*;
//use proxmox_backup::config::datastore;
use serde_json::{json, Value};
//use hyper::Body;
-use std::sync::Arc;
+use std::sync::{Arc, Mutex};
use regex::Regex;
use xdg::BaseDirectories;
verbose: bool,
skip_lost_and_found: bool,
crypt_config: Option<Arc<CryptConfig>>,
+ catalog: Arc<Mutex<SimpleCatalog>>,
) -> Result<BackupStats, Error> {
- let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found)?;
+ let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found, catalog)?;
let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
Ok(result)
}
+fn dump_catalog(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+
+ let repo = extract_repository_from_value(¶m)?;
+
+ let path = tools::required_string_param(¶m, "snapshot")?;
+ let snapshot = BackupDir::parse(path)?;
+
+ let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
+
+ let crypt_config = match keyfile {
+ None => None,
+ Some(path) => {
+ let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+ Some(Arc::new(CryptConfig::new(key)?))
+ }
+ };
+
+ let client = HttpClient::new(repo.host(), repo.user())?;
+
+ let client = client.start_backup_reader(
+ repo.store(),
+ &snapshot.group().backup_type(),
+ &snapshot.group().backup_id(),
+ snapshot.backup_time(), true).wait()?;
+
+ let writer = Vec::with_capacity(1024*1024);
+ let blob_data = client.download("catalog.blob", writer).wait()?;
+ let blob = DataBlob::from_raw(blob_data)?;
+ blob.verify_crc()?;
+
+ let raw_data = match crypt_config {
+ Some(ref crypt_config) => blob.decode(Some(crypt_config))?,
+ None => blob.decode(None)?,
+ };
+
+ let slice = &raw_data[..];
+ let mut catalog_reader = pxar::catalog::SimpleCatalogReader::new(slice);
+
+ catalog_reader.dump()?;
+
+ record_repository(&repo);
+
+ Ok(Value::Null)
+}
+
fn list_snapshot_files(
param: Value,
_info: &ApiMethod,
};
let file_type = metadata.file_type();
- let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
+ let extension = target.rsplit('.').next()
+ .ok_or(format_err!("missing target file extenion '{}'", target))?;
match extension {
"pxar" => {
if !file_type.is_dir() {
bail!("got unexpected file type (expected directory)");
}
- upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
+ upload_list.push((BackupType::PXAR, filename.to_owned(), format!("{}.didx", target), 0));
}
"img" => {
if size == 0 { bail!("got zero-sized file '{}'", filename); }
- upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
+ upload_list.push((BackupType::IMAGE, filename.to_owned(), format!("{}.fidx", target), size));
}
"conf" => {
if !file_type.is_file() {
bail!("got unexpected file type (expected regular file)");
}
- upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
+ upload_list.push((BackupType::CONFIG, filename.to_owned(), format!("{}.blob", target), metadata.len()));
}
"log" => {
if !file_type.is_file() {
bail!("got unexpected file type (expected regular file)");
}
- upload_list.push((BackupType::LOGFILE, filename.to_owned(), target.to_owned(), metadata.len()));
+ upload_list.push((BackupType::LOGFILE, filename.to_owned(), format!("{}.blob", target), metadata.len()));
}
_ => {
bail!("got unknown archive extension '{}'", extension);
let mut file_list = vec![];
+ let catalog_filename = format!("/tmp/pbs-catalog-{}.cat", std::process::id());
+ let catalog = Arc::new(Mutex::new(SimpleCatalog::new(&catalog_filename)?));
+ let mut upload_catalog = false;
+
for (backup_type, filename, target, size) in upload_list {
match backup_type {
BackupType::CONFIG => {
file_list.push((target, stats));
}
BackupType::PXAR => {
+ upload_catalog = true;
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
+ catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
let stats = backup_directory(
&client,
&filename,
verbose,
skip_lost_and_found,
crypt_config.clone(),
+ catalog.clone(),
)?;
file_list.push((target, stats));
+ catalog.lock().unwrap().end_directory()?;
}
BackupType::IMAGE => {
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
}
}
+ // finalize and upload catalog
+ if upload_catalog {
+ let mutex = Arc::try_unwrap(catalog)
+ .map_err(|_| format_err!("unable to get catalog (still used)"))?;
+ drop(mutex); // close catalog
+
+ let target = "catalog.blob";
+ let stats = client.upload_blob_from_file(&catalog_filename, target, crypt_config.clone(), true).wait()?;
+ file_list.push((target.to_owned(), stats));
+
+ let _ = std::fs::remove_file(&catalog_filename);
+ }
+
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
let target = "rsa-encrypted.key";
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
let stats = client.upload_blob_from_data(rsa_encrypted_key, target, None, false, false).wait()?;
- file_list.push((target.to_owned(), stats));
+ file_list.push((format!("{}.blob", target), stats));
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
/*
acc.push(json!({
"filename": filename,
"size": stats.size,
+ "csum": proxmox::tools::digest_to_hex(&stats.csum),
}));
acc
});
println!("Upload index.json to '{:?}'", repo);
let index_data = serde_json::to_string_pretty(&index)?.into();
- client.upload_blob_from_data(index_data, "index.json", crypt_config.clone(), true, true).wait()?;
+ client.upload_blob_from_data(index_data, "index.json.blob", crypt_config.clone(), true, true).wait()?;
client.finish().wait()?;
let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
- use std::os::unix::fs::OpenOptionsExt;
-
let tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
- let mut result = vec![];
-
- let repo = match extract_repository_from_map(param) {
- Some(v) => v,
- _ => return result,
- };
-
if arg.matches('/').count() < 2 {
let groups = complete_backup_group(arg, param);
+ let mut result = vec![];
for group in groups {
result.push(group.to_string());
result.push(format!("{}/", group));
return result;
}
- let mut parts = arg.split('/');
- let query = tools::json_object_to_query(json!({
- "backup-type": parts.next().unwrap(),
- "backup-id": parts.next().unwrap(),
- })).unwrap();
+ complete_backup_snapshot(arg, param)
+}
- let path = format!("api2/json/admin/datastore/{}/snapshots?{}", repo.store(), query);
+fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+
+ let mut result = vec![];
+
+ let repo = match extract_repository_from_map(param) {
+ Some(v) => v,
+ _ => return result,
+ };
+
+ let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let data = try_get(&repo, &path);
if let Some(list) = data.as_array() {
for item in list {
- if let Some(filename) = item.as_str() {
+ if let Some(filename) = item["filename"].as_str() {
result.push(filename.to_owned());
}
}
StringSchema::new("Path to encryption key. All data will be encrypted using this key."))
))
.arg_param(vec!["snapshot", "logfile"])
- .completion_cb("snapshot", complete_group_or_snapshot)
+ .completion_cb("snapshot", complete_backup_snapshot)
.completion_cb("logfile", tools::complete_file_name)
.completion_cb("keyfile", tools::complete_file_name)
.completion_cb("repository", complete_repository);
))
.arg_param(vec!["snapshot"])
.completion_cb("repository", complete_repository)
- .completion_cb("snapshot", complete_group_or_snapshot);
+ .completion_cb("snapshot", complete_backup_snapshot);
let garbage_collect_cmd_def = CliCommand::new(
ApiMethod::new(
))
.arg_param(vec!["snapshot"])
.completion_cb("repository", complete_repository)
- .completion_cb("snapshot", complete_group_or_snapshot);
+ .completion_cb("snapshot", complete_backup_snapshot);
+
+ let catalog_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ dump_catalog,
+ ObjectSchema::new("Dump catalog.")
+ .required("snapshot", StringSchema::new("Snapshot path."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ ))
+ .arg_param(vec!["snapshot"])
+ .completion_cb("repository", complete_repository)
+ .completion_cb("snapshot", complete_backup_snapshot);
let prune_cmd_def = CliCommand::new(
ApiMethod::new(
.insert("backup".to_owned(), backup_cmd_def.into())
.insert("upload-log".to_owned(), upload_log_cmd_def.into())
.insert("forget".to_owned(), forget_cmd_def.into())
+ .insert("catalog".to_owned(), catalog_cmd_def.into())
.insert("garbage-collect".to_owned(), garbage_collect_cmd_def.into())
.insert("list".to_owned(), list_cmd_def.into())
.insert("prune".to_owned(), prune_cmd_def.into())