use failure::*;
//use std::os::unix::io::AsRawFd;
-use chrono::{DateTime, Local, TimeZone};
+use chrono::{Local, TimeZone};
use std::path::{Path, PathBuf};
use std::collections::HashMap;
//use proxmox_backup::backup::datastore::*;
use serde_json::{json, Value};
-use hyper::Body;
+//use hyper::Body;
use std::sync::Arc;
use regex::Regex;
use xdg::BaseDirectories;
use tokio::sync::mpsc;
lazy_static! {
- static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|raw)):(.+)$").unwrap();
+ static ref BACKUPSPEC_REGEX: Regex = Regex::new(r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf)):(.+)$").unwrap();
}
client: &BackupClient,
dir_path: P,
archive_name: &str,
- chunk_size: Option<u64>,
+ chunk_size: Option<usize>,
all_file_systems: bool,
verbose: bool,
) -> Result<(), Error> {
- if let Some(_size) = chunk_size {
- unimplemented!();
- }
-
let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), all_file_systems, verbose)?;
- let chunk_stream = ChunkStream::new(pxar_stream);
+ let chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
// spawn chunker inside a separate task so that it can run parallel
tokio::spawn(
tx.send_all(chunk_stream.then(|r| Ok(r)))
- .map_err(|e| {}).map(|_| ())
+ .map_err(|_| {}).map(|_| ())
);
client.upload_stream(archive_name, stream, "dynamic", None).wait()?;
Ok(())
}
-/****
-fn backup_image(datastore: &DataStore, file: &std::fs::File, size: usize, target: &str, chunk_size: usize) -> Result<(), Error> {
+fn backup_image<P: AsRef<Path>>(
+ client: &BackupClient,
+ image_path: P,
+ archive_name: &str,
+ image_size: u64,
+ chunk_size: Option<usize>,
+ _verbose: bool,
+) -> Result<(), Error> {
- let mut target = PathBuf::from(target);
+ let path = image_path.as_ref().to_owned();
- if let Some(ext) = target.extension() {
- if ext != "fidx" {
- bail!("got wrong file extension - expected '.fidx'");
- }
- } else {
- target.set_extension("fidx");
- }
+ let file = tokio::fs::File::open(path).wait()?;
- let mut index = datastore.create_image_writer(&target, size, chunk_size)?;
+ let stream = tokio::codec::FramedRead::new(file, tokio::codec::BytesCodec::new())
+ .map_err(Error::from);
- tools::file_chunker(file, chunk_size, |pos, chunk| {
- index.add_chunk(pos, chunk)?;
- Ok(true)
- })?;
+ let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
- index.close()?; // commit changes
+ client.upload_stream(archive_name, stream, "fixed", Some(image_size)).wait()?;
Ok(())
}
-*/
fn strip_chunked_file_expenstions(list: Vec<String>) -> Vec<String> {
fn list_backups(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;
fn list_backup_groups(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;
fn list_snapshots(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;
fn forget_snapshots(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;
fn start_garbage_collection(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;
fn create_backup(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;
let verbose = param["verbose"].as_bool().unwrap_or(false);
- let chunk_size_opt = param["chunk-size"].as_u64().map(|v| v*1024);
+ let chunk_size_opt = param["chunk-size"].as_u64().map(|v| (v*1024) as usize);
if let Some(size) = chunk_size_opt {
verify_chunk_size(size)?;
let mut upload_list = vec![];
+ enum BackupType { PXAR, IMAGE, CONFIG };
+
for backupspec in backupspec_list {
let (target, filename) = parse_backupspec(backupspec.as_str().unwrap())?;
};
let file_type = metadata.file_type();
- if file_type.is_dir() {
-
- upload_list.push((filename.to_owned(), target.to_owned()));
-
- } else if file_type.is_file() || file_type.is_block_device() {
-
- let size = tools::image_size(&PathBuf::from(filename))?;
+ let extension = Path::new(target).extension().map(|s| s.to_str().unwrap()).unwrap();
- if size == 0 { bail!("got zero-sized file '{}'", filename); }
+ match extension {
+ "pxar" => {
+ if !file_type.is_dir() {
+ bail!("got unexpected file type (expected directory)");
+ }
+ upload_list.push((BackupType::PXAR, filename.to_owned(), target.to_owned(), 0));
+ }
+ "img" => {
- panic!("implement me");
+ if !(file_type.is_file() || file_type.is_block_device()) {
+ bail!("got unexpected file type (expected file or block device)");
+ }
- //backup_image(&datastore, &file, size, &target, chunk_size)?;
+ let size = tools::image_size(&PathBuf::from(filename))?;
- // let idx = datastore.open_image_reader(target)?;
- // idx.print_info();
+ if size == 0 { bail!("got zero-sized file '{}'", filename); }
- } else {
- bail!("unsupported file type (expected a directory, file or block device)");
+ upload_list.push((BackupType::IMAGE, filename.to_owned(), target.to_owned(), size));
+ }
+ "conf" => {
+ if !file_type.is_file() {
+ bail!("got unexpected file type (expected regular file)");
+ }
+ upload_list.push((BackupType::CONFIG, filename.to_owned(), target.to_owned(), metadata.len()));
+ }
+ _ => {
+ bail!("got unknown archive extension '{}'", extension);
+ }
}
}
let client = client.start_backup(repo.store(), "host", &backup_id, verbose).wait()?;
- for (filename, target) in upload_list {
- println!("Upload '{}' to '{:?}' as {}", filename, repo, target);
- backup_directory(&client, &filename, &target, chunk_size_opt, all_file_systems, verbose)?;
+ for (backup_type, filename, target, size) in upload_list {
+ match backup_type {
+ BackupType::CONFIG => {
+ println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
+ client.upload_config(&filename, &target).wait()?;
+ }
+ BackupType::PXAR => {
+ println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
+ backup_directory(&client, &filename, &target, chunk_size_opt, all_file_systems, verbose)?;
+ }
+ BackupType::IMAGE => {
+ println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
+ backup_image(&client, &filename, &target, size, chunk_size_opt, verbose)?;
+ }
+ }
}
client.finish().wait()?;
fn restore(
param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;
fn prune(
mut param: Value,
_info: &ApiMethod,
- _rpcenv: &mut RpcEnvironment,
+ _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let repo_url = tools::required_string_param(¶m, "repository")?;