extern crate proxmox_backup;
use failure::*;
-//use std::os::unix::io::AsRawFd;
+use nix::unistd::{fork, ForkResult, pipe};
+use std::os::unix::io::RawFd;
use chrono::{Local, Utc, TimeZone};
use std::path::{Path, PathBuf};
use std::collections::{HashSet, HashMap};
-use std::io::{BufReader, Write, Seek, SeekFrom};
+use std::ffi::OsStr;
+use std::io::{BufReader, Read, Write, Seek, SeekFrom};
use std::os::unix::fs::OpenOptionsExt;
use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
result
}
-fn backup_directory<P: AsRef<Path>>(
+fn compute_file_csum(file: &mut std::fs::File) -> Result<([u8; 32], u64), Error> {
+
+ file.seek(SeekFrom::Start(0))?;
+
+ let mut hasher = openssl::sha::Sha256::new();
+ let mut buffer = proxmox::tools::vec::undefined(256*1024);
+ let mut size: u64 = 0;
+
+ loop {
+ let count = match file.read(&mut buffer) {
+ Ok(count) => count,
+ Err(ref err) if err.kind() == std::io::ErrorKind::Interrupted => { continue; }
+ Err(err) => return Err(err.into()),
+ };
+ if count == 0 {
+ break;
+ }
+ size += count as u64;
+ hasher.update(&buffer[..count]);
+ }
+
+ let csum = hasher.finish();
+
+ Ok((csum, size))
+}
+
+
+async fn backup_directory<P: AsRef<Path>>(
client: &BackupClient,
dir_path: P,
archive_name: &str,
) -> Result<BackupStats, Error> {
let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found, catalog)?;
- let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
+ let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
- let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
+ let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
let stream = rx
- .map_err(Error::from)
- .and_then(|x| x); // flatten
+ .map_err(Error::from);
// spawn chunker inside a separate task so that it can run parallel
- tokio::spawn(
- tx.send_all(chunk_stream.then(|r| Ok(r)))
- .map_err(|_| {}).map(|_| ())
- );
+ tokio::spawn(async move {
+ let _ = tx.send_all(&mut chunk_stream).await;
+ });
- let stats = client.upload_stream(archive_name, stream, "dynamic", None, crypt_config).wait()?;
+ let stats = client
+ .upload_stream(archive_name, stream, "dynamic", None, crypt_config)
+ .await?;
Ok(stats)
}
-fn backup_image<P: AsRef<Path>>(
+async fn backup_image<P: AsRef<Path>>(
client: &BackupClient,
image_path: P,
archive_name: &str,
let path = image_path.as_ref().to_owned();
- let file = tokio::fs::File::open(path).wait()?;
+ let file = tokio::fs::File::open(path).await?;
let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
.map_err(Error::from);
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
- let stats = client.upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config).wait()?;
+ let stats = client
+ .upload_stream(archive_name, stream, "fixed", Some(image_size), crypt_config)
+ .await?;
Ok(stats)
}
let repo = extract_repository_from_value(¶m)?;
- let client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
- let mut result = client.get(&path, None).wait()?;
+ let mut result = async_main(async move {
+ client.get(&path, None).await
+ })?;
record_repository(&repo);
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
- let client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
args["backup-id"] = group.backup_id().into();
}
- let result = client.get(&path, Some(args)).wait()?;
+ let result = async_main(async move {
+ client.get(&path, Some(args)).await
+ })?;
record_repository(&repo);
let path = tools::required_string_param(¶m, "snapshot")?;
let snapshot = BackupDir::parse(path)?;
- let mut client = HttpClient::new(repo.host(), repo.user())?;
+ let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
- let result = client.delete(&path, Some(json!({
- "backup-type": snapshot.group().backup_type(),
- "backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
- }))).wait()?;
+ let result = async_main(async move {
+ client.delete(&path, Some(json!({
+ "backup-type": snapshot.group().backup_type(),
+ "backup-id": snapshot.group().backup_id(),
+ "backup-time": snapshot.backup_time().timestamp(),
+ }))).await
+ })?;
record_repository(&repo);
let repo = extract_repository_from_value(¶m)?;
- let client = HttpClient::new(repo.host(), repo.user())?;
- client.login().wait()?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ async_main(async move { client.login().await })?;
record_repository(&repo);
}
};
- let client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
- let client = client.start_backup_reader(
- repo.store(),
- &snapshot.group().backup_type(),
- &snapshot.group().backup_id(),
- snapshot.backup_time(), true).wait()?;
+ async_main(async move {
+ let client = client.start_backup_reader(
+ repo.store(),
+ &snapshot.group().backup_type(),
+ &snapshot.group().backup_id(),
+ snapshot.backup_time(), true).await?;
- let blob_file = std::fs::OpenOptions::new()
- .read(true)
- .write(true)
- .custom_flags(libc::O_TMPFILE)
- .open("/tmp")?;
+ let backup_index_data = download_index_blob(client.clone(), crypt_config.clone()).await?;
+ let backup_index: Value = serde_json::from_slice(&backup_index_data[..])?;
- let mut blob_file = client.download("catalog.blob", blob_file).wait()?;
+ let blob_file = std::fs::OpenOptions::new()
+ .read(true)
+ .write(true)
+ .custom_flags(libc::O_TMPFILE)
+ .open("/tmp")?;
- blob_file.seek(SeekFrom::Start(0))?;
+ let mut blob_file = client.download(CATALOG_BLOB_NAME, blob_file).await?;
- let reader = BufReader::new(blob_file);
- let mut catalog_reader = CatalogBlobReader::new(reader, crypt_config)?;
+ let (csum, size) = compute_file_csum(&mut blob_file)?;
+ verify_index_file(&backup_index, CATALOG_BLOB_NAME, &csum, size)?;
- catalog_reader.dump()?;
+ blob_file.seek(SeekFrom::Start(0))?;
- record_repository(&repo);
+ let reader = BufReader::new(blob_file);
+ let mut catalog_reader = CatalogBlobReader::new(reader, crypt_config)?;
+
+ catalog_reader.dump()?;
+
+ record_repository(&repo);
+
+ Ok::<(), Error>(())
+ })?;
Ok(Value::Null)
}
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
- let client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
- let mut result = client.get(&path, Some(json!({
- "backup-type": snapshot.group().backup_type(),
- "backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
- }))).wait()?;
+ let mut result = async_main(async move {
+ client.get(&path, Some(json!({
+ "backup-type": snapshot.group().backup_type(),
+ "backup-id": snapshot.group().backup_id(),
+ "backup-time": snapshot.backup_time().timestamp(),
+ }))).await
+ })?;
record_repository(&repo);
let repo = extract_repository_from_value(¶m)?;
- let mut client = HttpClient::new(repo.host(), repo.user())?;
+ let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
- let result = client.post(&path, None).wait()?;
+ let result = async_main(async move { client.post(&path, None).await })?;
record_repository(&repo);
use std::os::unix::fs::FileTypeExt;
- let metadata = match std::fs::metadata(filename) {
- Ok(m) => m,
- Err(err) => bail!("unable to access '{}' - {}", filename, err),
- };
+ let metadata = std::fs::metadata(filename)
+ .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
let file_type = metadata.file_type();
let extension = target.rsplit('.').next()
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or(Utc::now().timestamp()), 0);
- let client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
record_repository(&repo);
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
}
};
- let client = client.start_backup(repo.store(), backup_type, &backup_id, backup_time, verbose).wait()?;
-
- let mut file_list = vec![];
-
- // fixme: encrypt/sign catalog?
- let catalog_file = std::fs::OpenOptions::new()
- .write(true)
- .read(true)
- .custom_flags(libc::O_TMPFILE)
- .open("/tmp")?;
-
- let catalog = Arc::new(Mutex::new(CatalogBlobWriter::new_compressed(catalog_file)?));
- let mut upload_catalog = false;
-
- for (backup_type, filename, target, size) in upload_list {
- match backup_type {
- BackupType::CONFIG => {
- println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
- let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
- file_list.push((target, stats));
- }
- BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
- println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
- let stats = client.upload_blob_from_file(&filename, &target, crypt_config.clone(), true).wait()?;
- file_list.push((target, stats));
- }
- BackupType::PXAR => {
- upload_catalog = true;
- println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
- catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
- let stats = backup_directory(
- &client,
- &filename,
- &target,
- chunk_size_opt,
- devices.clone(),
- verbose,
- skip_lost_and_found,
- crypt_config.clone(),
- catalog.clone(),
- )?;
- file_list.push((target, stats));
- catalog.lock().unwrap().end_directory()?;
- }
- BackupType::IMAGE => {
- println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
- let stats = backup_image(
- &client,
- &filename,
- &target,
- size,
- chunk_size_opt,
- verbose,
- crypt_config.clone(),
- )?;
- file_list.push((target, stats));
+ async_main(async move {
+ let client = client
+ .start_backup(repo.store(), backup_type, &backup_id, backup_time, verbose)
+ .await?;
+
+ let mut file_list = vec![];
+
+ // fixme: encrypt/sign catalog?
+ let catalog_file = std::fs::OpenOptions::new()
+ .write(true)
+ .read(true)
+ .custom_flags(libc::O_TMPFILE)
+ .open("/tmp")?;
+
+ let catalog = Arc::new(Mutex::new(CatalogBlobWriter::new_compressed(catalog_file)?));
+ let mut upload_catalog = false;
+
+ for (backup_type, filename, target, size) in upload_list {
+ match backup_type {
+ BackupType::CONFIG => {
+ println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
+ let stats = client
+ .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
+ .await?;
+ file_list.push((target, stats));
+ }
+ BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
+ println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
+ let stats = client
+ .upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
+ .await?;
+ file_list.push((target, stats));
+ }
+ BackupType::PXAR => {
+ upload_catalog = true;
+ println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
+ catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
+ let stats = backup_directory(
+ &client,
+ &filename,
+ &target,
+ chunk_size_opt,
+ devices.clone(),
+ verbose,
+ skip_lost_and_found,
+ crypt_config.clone(),
+ catalog.clone(),
+ ).await?;
+ file_list.push((target, stats));
+ catalog.lock().unwrap().end_directory()?;
+ }
+ BackupType::IMAGE => {
+ println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
+ let stats = backup_image(
+ &client,
+ &filename,
+ &target,
+ size,
+ chunk_size_opt,
+ verbose,
+ crypt_config.clone(),
+ ).await?;
+ file_list.push((target, stats));
+ }
}
}
- }
- // finalize and upload catalog
- if upload_catalog {
- let mutex = Arc::try_unwrap(catalog)
- .map_err(|_| format_err!("unable to get catalog (still used)"))?;
- let mut catalog_file = mutex.into_inner().unwrap().finish()?;
+ // finalize and upload catalog
+ if upload_catalog {
+ let mutex = Arc::try_unwrap(catalog)
+ .map_err(|_| format_err!("unable to get catalog (still used)"))?;
+ let mut catalog_file = mutex.into_inner().unwrap().finish()?;
- let target = "catalog.blob";
+ let target = CATALOG_BLOB_NAME;
- catalog_file.seek(SeekFrom::Start(0))?;
+ catalog_file.seek(SeekFrom::Start(0))?;
- let stats = client.upload_blob(catalog_file, target).wait()?;
- file_list.push((target.to_owned(), stats));
- }
+ let stats = client.upload_blob(catalog_file, target).await?;
+ file_list.push((target.to_owned(), stats));
+ }
- if let Some(rsa_encrypted_key) = rsa_encrypted_key {
- let target = "rsa-encrypted.key";
- println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
- let stats = client.upload_blob_from_data(rsa_encrypted_key, target, None, false, false).wait()?;
- file_list.push((format!("{}.blob", target), stats));
-
- // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
- /*
- let mut buffer2 = vec![0u8; rsa.size() as usize];
- let pem_data = file_get_contents("master-private.pem")?;
- let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
- let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
- println!("TEST {} {:?}", len, buffer2);
- */
- }
+ if let Some(rsa_encrypted_key) = rsa_encrypted_key {
+ let target = "rsa-encrypted.key";
+ println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
+ let stats = client
+ .upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
+ .await?;
+ file_list.push((format!("{}.blob", target), stats));
+
+ // openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
+ /*
+ let mut buffer2 = vec![0u8; rsa.size() as usize];
+ let pem_data = file_get_contents("master-private.pem")?;
+ let rsa = openssl::rsa::Rsa::private_key_from_pem(&pem_data)?;
+ let len = rsa.private_decrypt(&buffer, &mut buffer2, openssl::rsa::Padding::PKCS1)?;
+ println!("TEST {} {:?}", len, buffer2);
+ */
+ }
- // create index.json
- let file_list = file_list.iter()
- .fold(vec![], |mut acc, (filename, stats)| {
- acc.push(json!({
- "filename": filename,
- "size": stats.size,
- "csum": proxmox::tools::digest_to_hex(&stats.csum),
- }));
- acc
- });
+ // create index.json
+ let file_list = file_list.iter()
+ .fold(vec![], |mut acc, (filename, stats)| {
+ acc.push(json!({
+ "filename": filename,
+ "size": stats.size,
+ "csum": proxmox::tools::digest_to_hex(&stats.csum),
+ }));
+ acc
+ });
- let index = json!({
- "backup-type": backup_type,
- "backup-id": backup_id,
- "backup-time": backup_time.timestamp(),
- "files": file_list,
- });
+ let index = json!({
+ "backup-type": backup_type,
+ "backup-id": backup_id,
+ "backup-time": backup_time.timestamp(),
+ "files": file_list,
+ });
- println!("Upload index.json to '{:?}'", repo);
- let index_data = serde_json::to_string_pretty(&index)?.into();
- client.upload_blob_from_data(index_data, "index.json.blob", crypt_config.clone(), true, true).wait()?;
+ println!("Upload index.json to '{:?}'", repo);
+ let index_data = serde_json::to_string_pretty(&index)?.into();
+ client
+ .upload_blob_from_data(index_data, "index.json.blob", crypt_config.clone(), true, true)
+ .await?;
- client.finish().wait()?;
+ client.finish().await?;
- let end_time = Local::now();
- let elapsed = end_time.signed_duration_since(start_time);
- println!("Duration: {}", elapsed);
+ let end_time = Local::now();
+ let elapsed = end_time.signed_duration_since(start_time);
+ println!("Duration: {}", elapsed);
- println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
+ println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
- Ok(Value::Null)
+ Ok(Value::Null)
+ })
}
fn complete_backup_source(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
+ async_main(restore_do(param))
+}
+
+async fn download_index_blob(client: Arc<BackupReader>, crypt_config: Option<Arc<CryptConfig>>) -> Result<Vec<u8>, Error> {
+
+ let index_data = client.download(INDEX_BLOB_NAME, Vec::with_capacity(64*1024)).await?;
+ let blob = DataBlob::from_raw(index_data)?;
+ blob.verify_crc()?;
+ blob.decode(crypt_config)
+}
+
+fn verify_index_file(backup_index: &Value, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> {
+
+ let files = backup_index["files"]
+ .as_array()
+ .ok_or_else(|| format_err!("mailformed index - missing 'files' property"))?;
+
+ let info = files.iter().find(|v| {
+ match v["filename"].as_str() {
+ Some(filename) => filename == name,
+ None => false,
+ }
+ });
+
+ let info = match info {
+ None => bail!("index does not contain file '{}'", name),
+ Some(info) => info,
+ };
+
+ match info["size"].as_u64() {
+ None => bail!("index does not contain property 'size' for file '{}'", name),
+ Some(expected_size) => {
+ if expected_size != size {
+ bail!("verify index failed - wrong size for file '{}'", name);
+ }
+ }
+ };
+
+ match info["csum"].as_str() {
+ None => bail!("index does not contain property 'csum' for file '{}'", name),
+ Some(expected_csum) => {
+ let expected_csum = &proxmox::tools::hex_to_digest(expected_csum)?;
+ if expected_csum != csum {
+ bail!("verify index failed - wrong checksum for file '{}'", name);
+ }
+ }
+ };
+
+ Ok(())
+}
+async fn restore_do(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(¶m)?;
let verbose = param["verbose"].as_bool().unwrap_or(false);
let archive_name = tools::required_string_param(¶m, "archive-name")?;
- let client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
record_repository(&repo);
let result = client.get(&path, Some(json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
- }))).wait()?;
+ }))).await?;
let list = result["data"].as_array().unwrap();
if list.len() == 0 {
format!("{}.blob", archive_name)
};
- let client = client.start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true).wait()?;
+ let client = client
+ .start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true)
+ .await?;
let tmpfile = std::fs::OpenOptions::new()
.write(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
- const INDEX_BLOB_NAME: &str = "index.json.blob";
- let index_data = client.download(INDEX_BLOB_NAME, Vec::with_capacity(64*1024)).wait()?;
- let blob = DataBlob::from_raw(index_data)?;
- blob.verify_crc()?;
- let backup_index_data = blob.decode(crypt_config.clone())?;
+ let backup_index_data = download_index_blob(client.clone(), crypt_config.clone()).await?;
let backup_index: Value = serde_json::from_slice(&backup_index_data[..])?;
if server_archive_name == INDEX_BLOB_NAME {
}
} else if server_archive_name.ends_with(".blob") {
- let mut tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
+ let mut tmpfile = client.download(&server_archive_name, tmpfile).await?;
+
+ let (csum, size) = compute_file_csum(&mut tmpfile)?;
+ verify_index_file(&backup_index, &server_archive_name, &csum, size)?;
+
tmpfile.seek(SeekFrom::Start(0))?;
let mut reader = DataBlobReader::new(tmpfile, crypt_config)?;
}
} else if server_archive_name.ends_with(".didx") {
- let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
+ let tmpfile = client.download(&server_archive_name, tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
+ // Note: do not use values stored in index (not trusted) - instead, computed them again
+ let (csum, size) = index.compute_csum();
+
+ verify_index_file(&backup_index, &server_archive_name, &csum, size)?;
+
let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
} else if server_archive_name.ends_with(".fidx") {
- let tmpfile = client.download(&server_archive_name, tmpfile).wait()?;
+ let tmpfile = client.download(&server_archive_name, tmpfile).await?;
let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", archive_name, err))?;
+ // Note: do not use values stored in index (not trusted) - instead, computed them again
+ let (csum, size) = index.compute_csum();
+
+ verify_index_file(&backup_index, &server_archive_name, &csum, size)?;
+
let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
let snapshot = tools::required_string_param(¶m, "snapshot")?;
let snapshot = BackupDir::parse(snapshot)?;
- let mut client = HttpClient::new(repo.host(), repo.user())?;
+ let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
let body = hyper::Body::from(raw_data);
- let result = client.upload("application/octet-stream", body, &path, Some(args)).wait()?;
-
- Ok(result)
+ async_main(async move {
+ client.upload("application/octet-stream", body, &path, Some(args)).await
+ })
}
fn prune(
let repo = extract_repository_from_value(¶m)?;
- let mut client = HttpClient::new(repo.host(), repo.user())?;
+ let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
param["backup-type"] = group.backup_type().into();
param["backup-id"] = group.backup_id().into();
- let _result = client.post(&path, Some(param)).wait()?;
+ let _result = async_main(async move { client.post(&path, Some(param)).await })?;
record_repository(&repo);
let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
- let client = HttpClient::new(repo.host(), repo.user())?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
- let result = client.get(&path, None).wait()?;
+ let result = async_main(async move { client.get(&path, None).await })?;
let data = &result["data"];
record_repository(&repo);
}
// like get, but simply ignore errors and return Null instead
-fn try_get(repo: &BackupRepository, url: &str) -> Value {
+async fn try_get(repo: &BackupRepository, url: &str) -> Value {
- let client = match HttpClient::new(repo.host(), repo.user()) {
+ let client = match HttpClient::new(repo.host(), repo.user(), None) {
Ok(v) => v,
_ => return Value::Null,
};
- let mut resp = match client.get(url, None).wait() {
+ let mut resp = match client.get(url, None).await {
Ok(v) => v,
_ => return Value::Null,
};
}
fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+ async_main(async { complete_backup_group_do(param).await })
+}
+
+async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
- let data = try_get(&repo, &path);
+ let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
}
fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+ async_main(async { complete_group_or_snapshot_do(arg, param).await })
+}
+
+async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
if arg.matches('/').count() < 2 {
- let groups = complete_backup_group(arg, param);
+ let groups = complete_backup_group_do(param).await;
let mut result = vec![];
for group in groups {
result.push(group.to_string());
return result;
}
- complete_backup_snapshot(arg, param)
+ complete_backup_snapshot_do(param).await
}
fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+ async_main(async { complete_backup_snapshot_do(param).await })
+}
+
+async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
- let data = try_get(&repo, &path);
+ let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
}
fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+ async_main(async { complete_server_file_name_do(param).await })
+}
+
+async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
let mut result = vec![];
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
- let data = try_get(&repo, &path);
+ let data = try_get(&repo, &path).await;
if let Some(list) = data.as_array() {
for item in list {
}
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
-
complete_server_file_name(arg, param)
- .iter().map(|v| strip_server_file_expenstion(&v)).collect()
+ .iter()
+ .map(|v| strip_server_file_expenstion(&v))
+ .collect()
}
fn complete_chunk_size(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
cmd_def
}
+
+fn mount(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Value, Error> {
+ let verbose = param["verbose"].as_bool().unwrap_or(false);
+ if verbose {
+ // This will stay in foreground with debug output enabled as None is
+ // passed for the RawFd.
+ return async_main(mount_do(param, None));
+ }
+
+ // Process should be deamonized.
+ // Make sure to fork before the async runtime is instantiated to avoid troubles.
+ let pipe = pipe()?;
+ match fork() {
+ Ok(ForkResult::Parent { child: _, .. }) => {
+ nix::unistd::close(pipe.1).unwrap();
+ // Blocks the parent process until we are ready to go in the child
+ let _res = nix::unistd::read(pipe.0, &mut [0]).unwrap();
+ Ok(Value::Null)
+ }
+ Ok(ForkResult::Child) => {
+ nix::unistd::close(pipe.0).unwrap();
+ nix::unistd::setsid().unwrap();
+ async_main(mount_do(param, Some(pipe.1)))
+ }
+ Err(_) => bail!("failed to daemonize process"),
+ }
+}
+
+async fn mount_do(param: Value, pipe: Option<RawFd>) -> Result<Value, Error> {
+ let repo = extract_repository_from_value(¶m)?;
+ let archive_name = tools::required_string_param(¶m, "archive-name")?;
+ let target = tools::required_string_param(¶m, "target")?;
+ let client = HttpClient::new(repo.host(), repo.user(), None)?;
+
+ record_repository(&repo);
+
+ let path = tools::required_string_param(¶m, "snapshot")?;
+ let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
+ let group = BackupGroup::parse(path)?;
+
+ let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
+ let result = client.get(&path, Some(json!({
+ "backup-type": group.backup_type(),
+ "backup-id": group.backup_id(),
+ }))).await?;
+
+ let list = result["data"].as_array().unwrap();
+ if list.len() == 0 {
+ bail!("backup group '{}' does not contain any snapshots:", path);
+ }
+
+ let epoch = list[0]["backup-time"].as_i64().unwrap();
+ let backup_time = Utc.timestamp(epoch, 0);
+ (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
+ } else {
+ let snapshot = BackupDir::parse(path)?;
+ (snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
+ };
+
+ let keyfile = param["keyfile"].as_str().map(|p| PathBuf::from(p));
+ let crypt_config = match keyfile {
+ None => None,
+ Some(path) => {
+ let (key, _) = load_and_decrtypt_key(&path, get_encryption_key_password)?;
+ Some(Arc::new(CryptConfig::new(key)?))
+ }
+ };
+
+ let server_archive_name = if archive_name.ends_with(".pxar") {
+ format!("{}.didx", archive_name)
+ } else {
+ bail!("Can only mount pxar archives.");
+ };
+
+ let client = client
+ .start_backup_reader(repo.store(), &backup_type, &backup_id, backup_time, true)
+ .await?;
+
+ let tmpfile = std::fs::OpenOptions::new()
+ .write(true)
+ .read(true)
+ .custom_flags(libc::O_TMPFILE)
+ .open("/tmp")?;
+
+ let backup_index_data = download_index_blob(client.clone(), crypt_config.clone()).await?;
+ let backup_index: Value = serde_json::from_slice(&backup_index_data[..])?;
+ if server_archive_name.ends_with(".didx") {
+ let tmpfile = client.download(&server_archive_name, tmpfile).await?;
+ let index = DynamicIndexReader::new(tmpfile)
+ .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", archive_name, err))?;
+
+ // Note: do not use values stored in index (not trusted) - instead, computed them again
+ let (csum, size) = index.compute_csum();
+ verify_index_file(&backup_index, &server_archive_name, &csum, size)?;
+
+ let most_used = index.find_most_used_chunks(8);
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+ let reader = BufferedDynamicReader::new(index, chunk_reader);
+ let decoder =
+ pxar::Decoder::<Box<dyn pxar::fuse::ReadSeek>, fn(&Path) -> Result<(), Error>>::new(
+ Box::new(reader),
+ |_| Ok(()),
+ )?;
+ let options = OsStr::new("ro,default_permissions");
+ let mut session = pxar::fuse::Session::from_decoder(decoder, &options, pipe.is_none())
+ .map_err(|err| format_err!("pxar mount failed: {}", err))?;
+
+ // Mount the session but not call fuse deamonize as this will cause
+ // issues with the runtime after the fork
+ let deamonize = false;
+ session.mount(&Path::new(target), deamonize)?;
+
+ if let Some(pipe) = pipe {
+ nix::unistd::chdir(Path::new("/")).unwrap();
+ // Finish creation of deamon by redirecting filedescriptors.
+ let nullfd = nix::fcntl::open(
+ "/dev/null",
+ nix::fcntl::OFlag::O_RDWR,
+ nix::sys::stat::Mode::empty(),
+ ).unwrap();
+ nix::unistd::dup2(nullfd, 0).unwrap();
+ nix::unistd::dup2(nullfd, 1).unwrap();
+ nix::unistd::dup2(nullfd, 2).unwrap();
+ if nullfd > 2 {
+ nix::unistd::close(nullfd).unwrap();
+ }
+ // Signal the parent process that we are done with the setup and it can
+ // terminate.
+ nix::unistd::write(pipe, &mut [0u8])?;
+ nix::unistd::close(pipe).unwrap();
+ }
+
+ let multithreaded = true;
+ session.run_loop(multithreaded)?;
+ } else {
+ bail!("unknown archive file extension (expected .pxar)");
+ }
+
+ Ok(Value::Null)
+}
+
fn main() {
let backup_source_schema: Arc<Schema> = Arc::new(
))
.completion_cb("repository", complete_repository);
+ let mount_cmd_def = CliCommand::new(
+ ApiMethod::new(
+ mount,
+ ObjectSchema::new("Mount pxar archive.")
+ .required("snapshot", StringSchema::new("Group/Snapshot path."))
+ .required("archive-name", StringSchema::new("Backup archive name."))
+ .required("target", StringSchema::new("Target directory path."))
+ .optional("repository", REPO_URL_SCHEMA.clone())
+ .optional("keyfile", StringSchema::new("Path to encryption key."))
+ .optional("verbose", BooleanSchema::new("Verbose output.").default(false))
+ ))
+ .arg_param(vec!["snapshot", "archive-name", "target"])
+ .completion_cb("repository", complete_repository)
+ .completion_cb("snapshot", complete_group_or_snapshot)
+ .completion_cb("archive-name", complete_archive_name)
+ .completion_cb("target", tools::complete_file_name);
+
let cmd_def = CliCommandMap::new()
.insert("backup".to_owned(), backup_cmd_def.into())
.insert("upload-log".to_owned(), upload_log_cmd_def.into())
.insert("snapshots".to_owned(), snapshots_cmd_def.into())
.insert("files".to_owned(), files_cmd_def.into())
.insert("status".to_owned(), status_cmd_def.into())
- .insert("key".to_owned(), key_mgmt_cli().into());
+ .insert("key".to_owned(), key_mgmt_cli().into())
+ .insert("mount".to_owned(), mount_cmd_def.into());
- hyper::rt::run(futures::future::lazy(move || {
- run_cli_command(cmd_def.into());
- Ok(())
- }));
+ run_cli_command(cmd_def.into());
+}
+fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
+ let rt = tokio::runtime::Runtime::new().unwrap();
+ let ret = rt.block_on(fut);
+ rt.shutdown_now();
+ ret
}