use std::collections::{HashSet, HashMap};
-use std::io::{self, Write, Seek, SeekFrom};
+use std::convert::TryFrom;
+use std::io::{self, Read, Write, Seek, SeekFrom};
+use std::os::unix::io::{FromRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::Context;
use anyhow::{bail, format_err, Error};
-use chrono::{Local, DateTime, Utc, TimeZone};
use futures::future::FutureExt;
use futures::stream::{StreamExt, TryStreamExt};
use serde_json::{json, Value};
use xdg::BaseDirectories;
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
-use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
-use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
-use proxmox::api::schema::*;
-use proxmox::api::cli::*;
-use proxmox::api::api;
+use proxmox::{
+ tools::{
+ time::{strftime_local, epoch_i64},
+ fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size},
+ },
+ api::{
+ api,
+ ApiHandler,
+ ApiMethod,
+ RpcEnvironment,
+ schema::*,
+ cli::*,
+ },
+};
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use proxmox_backup::tools;
+use proxmox_backup::api2::access::user::UserWithTokens;
use proxmox_backup::api2::types::*;
+use proxmox_backup::api2::version;
use proxmox_backup::client::*;
use proxmox_backup::pxar::catalog::*;
use proxmox_backup::backup::{
archive_type,
- load_and_decrypt_key,
+ decrypt_key,
verify_chunk_size,
ArchiveType,
AsyncReadChunk,
"Path to encryption key. All data will be encrypted using this key.")
.schema();
+pub const KEYFD_SCHEMA: Schema = IntegerSchema::new(
+ "Pass an encryption key via an already opened file descriptor.")
+ .minimum(0)
+ .schema();
+
const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new(
"Chunk size in KB. Must be a power of 2.")
.minimum(64)
result
}
-fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
+fn connect(repo: &BackupRepository) -> Result<HttpClient, Error> {
+ connect_do(repo.host(), repo.port(), repo.auth_id())
+ .map_err(|err| format_err!("error building client for repository {} - {}", repo, err))
+}
+fn connect_do(server: &str, port: u16, auth_id: &Authid) -> Result<HttpClient, Error> {
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
use std::env::VarError::*;
.fingerprint_cache(true)
.ticket_cache(true);
- HttpClient::new(server, userid, options)
+ HttpClient::new(server, port, auth_id, options)
}
async fn view_task_result(
client: &HttpClient,
store: &str,
group: BackupGroup,
-) -> Result<(String, String, DateTime<Utc>), Error> {
+) -> Result<(String, String, i64), Error> {
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
- let backup_time = Utc.timestamp(list[0].backup_time, 0);
+ let backup_time = list[0].backup_time;
Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
}
async fn backup_directory<P: AsRef<Path>>(
client: &BackupWriter,
- crypt_mode: CryptMode,
previous_manifest: Option<Arc<BackupManifest>>,
dir_path: P,
archive_name: &str,
catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
exclude_pattern: Vec<MatchEntry>,
entries_max: usize,
+ compress: bool,
+ encrypt: bool,
) -> Result<BackupStats, Error> {
let pxar_stream = PxarBackupStream::open(
});
let stats = client
- .upload_stream(crypt_mode, previous_manifest, archive_name, stream, "dynamic", None)
+ .upload_stream(previous_manifest, archive_name, stream, "dynamic", None, compress, encrypt)
.await?;
Ok(stats)
async fn backup_image<P: AsRef<Path>>(
client: &BackupWriter,
- crypt_mode: CryptMode,
previous_manifest: Option<Arc<BackupManifest>>,
image_path: P,
archive_name: &str,
image_size: u64,
chunk_size: Option<usize>,
+ compress: bool,
+ encrypt: bool,
_verbose: bool,
) -> Result<BackupStats, Error> {
let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4*1024*1024));
let stats = client
- .upload_stream(crypt_mode, previous_manifest, archive_name, stream, "fixed", Some(image_size))
+ .upload_stream(previous_manifest, archive_name, stream, "fixed", Some(image_size), compress, encrypt)
.await?;
Ok(stats)
let repo = extract_repository_from_value(¶m)?;
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.last_backup)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
Ok(Value::Null)
}
+#[api(
+ input: {
+ properties: {
+ repository: {
+ schema: REPO_URL_SCHEMA,
+ optional: true,
+ },
+ group: {
+ type: String,
+ description: "Backup group.",
+ },
+ "new-owner": {
+ type: Authid,
+ },
+ }
+ }
+)]
+/// Change owner of a backup group
+async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Error> {
+
+ let repo = extract_repository_from_value(¶m)?;
+
+ let mut client = connect(&repo)?;
+
+ param.as_object_mut().unwrap().remove("repository");
+
+ let group: BackupGroup = group.parse()?;
+
+ param["backup-type"] = group.backup_type().into();
+ param["backup-id"] = group.backup_id().into();
+
+ let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
+ client.post(&path, Some(param)).await?;
+
+ record_repository(&repo);
+
+ Ok(())
+}
+
#[api(
input: {
properties: {
let output_format = get_output_format(¶m);
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(&repo)?;
let group: Option<BackupGroup> = if let Some(path) = param["group"].as_str() {
Some(path.parse()?)
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
.sortby("backup-id", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
- .column(ColumnConfig::new("size"))
+ .column(ColumnConfig::new("size").renderer(tools::format::render_bytes_human_readable))
.column(ColumnConfig::new("files").renderer(render_files))
;
let path = tools::required_string_param(¶m, "snapshot")?;
let snapshot: BackupDir = path.parse()?;
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let result = client.delete(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
}))).await?;
record_repository(&repo);
let repo = extract_repository_from_value(¶m)?;
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(&repo)?;
client.login().await?;
record_repository(&repo);
Ok(Value::Null)
}
+#[api(
+ input: {
+ properties: {
+ repository: {
+ schema: REPO_URL_SCHEMA,
+ optional: true,
+ },
+ "output-format": {
+ schema: OUTPUT_FORMAT,
+ optional: true,
+ },
+ }
+ }
+)]
+/// Show client and optional server version
+async fn api_version(param: Value) -> Result<(), Error> {
+
+ let output_format = get_output_format(¶m);
+
+ let mut version_info = json!({
+ "client": {
+ "version": version::PROXMOX_PKG_VERSION,
+ "release": version::PROXMOX_PKG_RELEASE,
+ "repoid": version::PROXMOX_PKG_REPOID,
+ }
+ });
+
+ let repo = extract_repository_from_value(¶m);
+ if let Ok(repo) = repo {
+ let client = connect(&repo)?;
+
+ match client.get("api2/json/version", None).await {
+ Ok(mut result) => version_info["server"] = result["data"].take(),
+ Err(e) => eprintln!("could not connect to server - {}", e),
+ }
+ }
+ if output_format == "text" {
+ println!("client version: {}.{}", version::PROXMOX_PKG_VERSION, version::PROXMOX_PKG_RELEASE);
+ if let Some(server) = version_info["server"].as_object() {
+ let server_version = server["version"].as_str().unwrap();
+ let server_release = server["release"].as_str().unwrap();
+ println!("server version: {}.{}", server_version, server_release);
+ }
+ } else {
+ format_and_print_result(&version_info, &output_format);
+ }
+
+ Ok(())
+}
+
#[api(
input: {
let output_format = get_output_format(¶m);
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
let mut result = client.get(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
}))).await?;
record_repository(&repo);
let output_format = get_output_format(¶m);
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
fn spawn_catalog_upload(
client: Arc<BackupWriter>,
- crypt_mode: CryptMode,
+ encrypt: bool,
) -> Result<
(
Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
tokio::spawn(async move {
let catalog_upload_result = client
- .upload_stream(crypt_mode, None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None)
+ .upload_stream(None, CATALOG_NAME, catalog_chunk_stream, "dynamic", None, true, encrypt)
.await;
if let Err(ref err) = catalog_upload_result {
Ok((catalog, catalog_result_rx))
}
-fn keyfile_parameters(param: &Value) -> Result<(Option<PathBuf>, CryptMode), Error> {
+fn keyfile_parameters(param: &Value) -> Result<(Option<Vec<u8>>, CryptMode), Error> {
let keyfile = match param.get("keyfile") {
Some(Value::String(keyfile)) => Some(keyfile),
Some(_) => bail!("bad --keyfile parameter type"),
None => None,
};
+ let key_fd = match param.get("keyfd") {
+ Some(Value::Number(key_fd)) => Some(
+ RawFd::try_from(key_fd
+ .as_i64()
+ .ok_or_else(|| format_err!("bad key fd: {:?}", key_fd))?
+ )
+ .map_err(|err| format_err!("bad key fd: {:?}: {}", key_fd, err))?
+ ),
+ Some(_) => bail!("bad --keyfd parameter type"),
+ None => None,
+ };
+
let crypt_mode: Option<CryptMode> = match param.get("crypt-mode") {
Some(mode) => Some(serde_json::from_value(mode.clone())?),
None => None,
};
- Ok(match (keyfile, crypt_mode) {
+ let keydata = match (keyfile, key_fd) {
+ (None, None) => None,
+ (Some(_), Some(_)) => bail!("--keyfile and --keyfd are mutually exclusive"),
+ (Some(keyfile), None) => {
+ println!("Using encryption key file: {}", keyfile);
+ Some(file_get_contents(keyfile)?)
+ },
+ (None, Some(fd)) => {
+ let input = unsafe { std::fs::File::from_raw_fd(fd) };
+ let mut data = Vec::new();
+ let _len: usize = { input }.read_to_end(&mut data)
+ .map_err(|err| {
+ format_err!("error reading encryption key from fd {}: {}", fd, err)
+ })?;
+ println!("Using encryption key from file descriptor");
+ Some(data)
+ }
+ };
+
+ Ok(match (keydata, crypt_mode) {
// no parameters:
- (None, None) => (key::find_default_encryption_key()?, CryptMode::Encrypt),
+ (None, None) => match key::read_optional_default_encryption_key()? {
+ Some(key) => {
+ println!("Encrypting with default encryption key!");
+ (Some(key), CryptMode::Encrypt)
+ },
+ None => (None, CryptMode::None),
+ },
// just --crypt-mode=none
(None, Some(CryptMode::None)) => (None, CryptMode::None),
// just --crypt-mode other than none
- (None, Some(crypt_mode)) => match key::find_default_encryption_key()? {
+ (None, Some(crypt_mode)) => match key::read_optional_default_encryption_key()? {
None => bail!("--crypt-mode without --keyfile and no default key file available"),
- Some(path) => (Some(path), crypt_mode),
+ Some(key) => {
+ println!("Encrypting with default encryption key!");
+ (Some(key), crypt_mode)
+ },
}
// just --keyfile
- (Some(keyfile), None) => (Some(PathBuf::from(keyfile)), CryptMode::Encrypt),
+ (Some(key), None) => (Some(key), CryptMode::Encrypt),
// --keyfile and --crypt-mode=none
(Some(_), Some(CryptMode::None)) => {
- bail!("--keyfile and --crypt-mode=none are mutually exclusive");
+ bail!("--keyfile/--keyfd and --crypt-mode=none are mutually exclusive");
}
// --keyfile and --crypt-mode other than none
- (Some(keyfile), Some(crypt_mode)) => (Some(PathBuf::from(keyfile)), crypt_mode),
+ (Some(key), Some(crypt_mode)) => (Some(key), crypt_mode),
})
}
description: "Path to file.",
}
},
+ "all-file-systems": {
+ type: Boolean,
+ description: "Include all mounted subdirectories.",
+ optional: true,
+ },
keyfile: {
schema: KEYFILE_SCHEMA,
optional: true,
},
+ "keyfd": {
+ schema: KEYFD_SCHEMA,
+ optional: true,
+ },
"crypt-mode": {
type: CryptMode,
optional: true,
verify_chunk_size(size)?;
}
- let (keyfile, crypt_mode) = keyfile_parameters(¶m)?;
+ let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
let backup_id = param["backup-id"].as_str().unwrap_or(&proxmox::tools::nodename());
}
let mut upload_list = vec![];
+ let mut target_set = HashSet::new();
for backupspec in backupspec_list {
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
let filename = &spec.config_string;
let target = &spec.archive_name;
+ if target_set.contains(target) {
+ bail!("got target twice: '{}'", target);
+ }
+ target_set.insert(target.to_string());
+
use std::os::unix::fs::FileTypeExt;
let metadata = std::fs::metadata(filename)
}
}
- let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
+ let backup_time = backup_time_opt.unwrap_or_else(|| epoch_i64());
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(&repo)?;
record_repository(&repo);
- println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
+ println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time)?);
println!("Client name: {}", proxmox::tools::nodename());
- let start_time = Local::now();
+ let start_time = std::time::Instant::now();
- println!("Starting protocol: {}", start_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
+ println!("Starting backup protocol: {}", strftime_local("%c", epoch_i64())?);
- let (crypt_config, rsa_encrypted_key) = match keyfile {
+ let (crypt_config, rsa_encrypted_key) = match keydata {
None => (None, None),
- Some(path) => {
- let (key, created) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
+ Some(key) => {
+ let (key, created, _fingerprint) = decrypt_key(&key, &key::get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
- let path = master_pubkey_path()?;
- if path.exists() {
- let pem_data = file_get_contents(&path)?;
- let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
- let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
- (Some(Arc::new(crypt_config)), Some(enc_key))
- } else {
- (Some(Arc::new(crypt_config)), None)
+ match key::find_master_pubkey()? {
+ Some(ref path) if path.exists() => {
+ let pem_data = file_get_contents(path)?;
+ let rsa = openssl::rsa::Rsa::public_key_from_pem(&pem_data)?;
+ let enc_key = crypt_config.generate_rsa_encoded_key(rsa, created)?;
+ (Some(Arc::new(crypt_config)), Some(enc_key))
+ }
+ _ => (Some(Arc::new(crypt_config)), None),
}
}
};
&backup_id,
backup_time,
verbose,
+ false
).await?;
let previous_manifest = if let Ok(previous_manifest) = client.download_previous_manifest().await {
None
};
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time.timestamp());
+ let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let mut manifest = BackupManifest::new(snapshot);
let mut catalog = None;
for (backup_type, filename, target, size) in upload_list {
match backup_type {
BackupSpecificationType::CONFIG => {
- println!("Upload config file '{}' to '{:?}' as {}", filename, repo, target);
+ println!("Upload config file '{}' to '{}' as {}", filename, repo, target);
let stats = client
- .upload_blob_from_file(&filename, &target, true, crypt_mode)
+ .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
.await?;
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
}
BackupSpecificationType::LOGFILE => { // fixme: remove - not needed anymore ?
- println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
+ println!("Upload log file '{}' to '{}' as {}", filename, repo, target);
let stats = client
- .upload_blob_from_file(&filename, &target, true, crypt_mode)
+ .upload_blob_from_file(&filename, &target, true, crypt_mode == CryptMode::Encrypt)
.await?;
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
}
BackupSpecificationType::PXAR => {
// start catalog upload on first use
if catalog.is_none() {
- let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode)?;
+ let (cat, res) = spawn_catalog_upload(client.clone(), crypt_mode == CryptMode::Encrypt)?;
catalog = Some(cat);
catalog_result_tx = Some(res);
}
let catalog = catalog.as_ref().unwrap();
- println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
+ println!("Upload directory '{}' to '{}' as {}", filename, repo, target);
catalog.lock().unwrap().start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
let stats = backup_directory(
&client,
- crypt_mode,
previous_manifest.clone(),
&filename,
&target,
catalog.clone(),
pattern_list.clone(),
entries_max as usize,
+ true,
+ crypt_mode == CryptMode::Encrypt,
).await?;
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
catalog.lock().unwrap().end_directory()?;
println!("Upload image '{}' to '{:?}' as {}", filename, repo, target);
let stats = backup_image(
&client,
- crypt_mode,
previous_manifest.clone(),
&filename,
&target,
size,
chunk_size_opt,
+ true,
+ crypt_mode == CryptMode::Encrypt,
verbose,
).await?;
manifest.add_file(target, stats.size, stats.csum, crypt_mode)?;
}
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
- let target = "rsa-encrypted.key";
+ let target = "rsa-encrypted.key.blob";
println!("Upload RSA encoded key to '{:?}' as {}", repo, target);
let stats = client
- .upload_blob_from_data(rsa_encrypted_key, target, false, CryptMode::None)
+ .upload_blob_from_data(rsa_encrypted_key, target, false, false)
.await?;
- manifest.add_file(format!("{}.blob", target), stats.size, stats.csum, crypt_mode)?;
+ manifest.add_file(target.to_string(), stats.size, stats.csum, crypt_mode)?;
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
/*
println!("TEST {} {:?}", len, buffer2);
*/
}
-
// create manifest (index.json)
- let manifest = manifest.into_json();
-
- println!("Upload index.json to '{:?}'", repo);
- let manifest = serde_json::to_string_pretty(&manifest)?.into();
- // manifests are never encrypted
- let manifest_crypt_mode = match crypt_mode {
- CryptMode::None => CryptMode::None,
- _ => CryptMode::SignOnly,
- };
+ // manifests are never encrypted, but include a signature
+ let manifest = manifest.to_string(crypt_config.as_ref().map(Arc::as_ref))
+ .map_err(|err| format_err!("unable to format manifest - {}", err))?;
+
+
+ if verbose { println!("Upload index.json to '{}'", repo) };
client
- .upload_blob_from_data(manifest, MANIFEST_BLOB_NAME, true, manifest_crypt_mode)
+ .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, true, false)
.await?;
client.finish().await?;
- let end_time = Local::now();
- let elapsed = end_time.signed_duration_since(start_time);
- println!("Duration: {}", elapsed);
+ let end_time = std::time::Instant::now();
+ let elapsed = end_time.duration_since(start_time);
+ println!("Duration: {:.2}s", elapsed.as_secs_f64());
- println!("End Time: {}", end_time.to_rfc3339_opts(chrono::SecondsFormat::Secs, false));
+ println!("End Time: {}", strftime_local("%c", epoch_i64())?);
Ok(Value::Null)
}
async fn dump_image<W: Write>(
client: Arc<BackupReader>,
crypt_config: Option<Arc<CryptConfig>>,
+ crypt_mode: CryptMode,
index: FixedIndexReader,
mut writer: W,
verbose: bool,
let most_used = index.find_most_used_chunks(8);
- let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, crypt_mode, most_used);
// Note: we avoid using BufferedFixedReader, because that add an additional buffer/copy
// and thus slows down reading. Instead, directly use RemoteChunkReader
schema: KEYFILE_SCHEMA,
optional: true,
},
+ "keyfd": {
+ schema: KEYFD_SCHEMA,
+ optional: true,
+ },
"crypt-mode": {
type: CryptMode,
optional: true,
let archive_name = tools::required_string_param(¶m, "archive-name")?;
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(&repo)?;
record_repository(&repo);
let target = tools::required_string_param(¶m, "target")?;
let target = if target == "-" { None } else { Some(target) };
- let (keyfile, _crypt_mode) = keyfile_parameters(¶m)?;
+ let (keydata, _crypt_mode) = keyfile_parameters(¶m)?;
- let crypt_config = match keyfile {
+ let crypt_config = match keydata {
None => None,
- Some(path) => {
- let (key, _) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
+ Some(key) => {
+ let (key, _, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
true,
).await?;
- let manifest = client.download_manifest().await?;
+ let (manifest, backup_index_data) = client.download_manifest().await?;
let (archive_name, archive_type) = parse_archive_type(archive_name);
if archive_name == MANIFEST_BLOB_NAME {
- let backup_index_data = manifest.into_json().to_string();
if let Some(target) = target {
- replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
+ replace_file(target, &backup_index_data, CreateOptions::new())?;
} else {
let stdout = std::io::stdout();
let mut writer = stdout.lock();
- writer.write_all(backup_index_data.as_bytes())
+ writer.write_all(&backup_index_data)
.map_err(|err| format_err!("unable to pipe data - {}", err))?;
}
- } else if archive_type == ArchiveType::Blob {
+ return Ok(Value::Null);
+ }
+
+ let file_info = manifest.lookup_file_info(&archive_name)?;
+
+ if archive_type == ArchiveType::Blob {
let mut reader = client.download_blob(&manifest, &archive_name).await?;
let most_used = index.find_most_used_chunks(8);
- let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, most_used);
+ let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
pxar::decoder::Decoder::from_std(reader)?,
Path::new(target),
&[],
+ true,
proxmox_backup::pxar::Flags::DEFAULT,
allow_existing_dirs,
|path| {
println!("{:?}", path);
}
},
+ None,
)
.map_err(|err| format_err!("error extracting archive - {}", err))?;
} else {
.map_err(|err| format_err!("unable to open /dev/stdout - {}", err))?
};
- dump_image(client.clone(), crypt_config.clone(), index, &mut writer, verbose).await?;
+ dump_image(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), index, &mut writer, verbose).await?;
}
Ok(Value::Null)
schema: KEYFILE_SCHEMA,
optional: true,
},
+ "keyfd": {
+ schema: KEYFD_SCHEMA,
+ optional: true,
+ },
"crypt-mode": {
type: CryptMode,
optional: true,
let snapshot = tools::required_string_param(¶m, "snapshot")?;
let snapshot: BackupDir = snapshot.parse()?;
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(&repo)?;
- let (keyfile, crypt_mode) = keyfile_parameters(¶m)?;
+ let (keydata, crypt_mode) = keyfile_parameters(¶m)?;
- let crypt_config = match keyfile {
+ let crypt_config = match keydata {
None => None,
- Some(path) => {
- let (key, _created) = load_and_decrypt_key(&path, &key::get_encryption_key_password)?;
+ Some(key) => {
+ let (key, _created, _) = decrypt_key(&key, &key::get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}
let data = file_get_contents(logfile)?;
+ // fixme: howto sign log?
let blob = match crypt_mode {
- CryptMode::None => DataBlob::encode(&data, None, true)?,
- CryptMode::Encrypt => {
- DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?
- }
- CryptMode::SignOnly => DataBlob::create_signed(
- &data,
- crypt_config
- .ok_or_else(|| format_err!("cannot sign without crypt config"))?
- .as_ref(),
- true,
- )?,
+ CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
+ CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
};
let raw_data = blob.into_inner();
let args = json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
});
let body = hyper::Body::from(raw_data);
async fn prune_async(mut param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(¶m)?;
- let mut client = connect(repo.host(), repo.user())?;
+ let mut client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
- let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
};
optional: true,
},
}
- }
+ },
+ returns: {
+ type: StorageStatus,
+ },
)]
/// Get repository status.
async fn status(param: Value) -> Result<Value, Error> {
let output_format = get_output_format(¶m);
- let client = connect(repo.host(), repo.user())?;
+ let client = connect(&repo)?;
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
.column(ColumnConfig::new("used").renderer(render_total_percentage))
.column(ColumnConfig::new("avail").renderer(render_total_percentage));
- let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
+ let schema = &API_RETURN_SCHEMA_STATUS;
format_and_print_result_full(&mut data, schema, &output_format, &options);
.fingerprint_cache(true)
.ticket_cache(true);
- let client = match HttpClient::new(repo.host(), repo.user(), options) {
+ let client = match HttpClient::new(repo.host(), repo.port(), repo.auth_id(), options) {
Ok(v) => v,
_ => return Value::Null,
};
if let (Some(backup_id), Some(backup_type), Some(backup_time)) =
(item["backup-id"].as_str(), item["backup-type"].as_str(), item["backup-time"].as_i64())
{
- let snapshot = BackupDir::new(backup_type, backup_id, backup_time);
- result.push(snapshot.relative_path().to_str().unwrap().to_owned());
+ if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
+ result.push(snapshot.relative_path().to_str().unwrap().to_owned());
+ }
}
}
}
let query = tools::json_object_to_query(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
- "backup-time": snapshot.backup_time().timestamp(),
+ "backup-time": snapshot.backup_time(),
})).unwrap();
let path = format!("api2/json/admin/datastore/{}/files?{}", repo.store(), query);
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
- .map(|v| tools::format::strip_server_file_expenstion(&v))
+ .map(|v| tools::format::strip_server_file_extension(&v))
.collect()
}
pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
- .filter_map(|v| {
- let name = tools::format::strip_server_file_expenstion(&v);
- if name.ends_with(".pxar") {
- Some(name)
+ .filter_map(|name| {
+ if name.ends_with(".pxar.didx") {
+ Some(tools::format::strip_server_file_extension(name))
+ } else {
+ None
+ }
+ })
+ .collect()
+}
+
+pub fn complete_img_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+ complete_server_file_name(arg, param)
+ .iter()
+ .filter_map(|name| {
+ if name.ends_with(".img.fidx") {
+ Some(tools::format::strip_server_file_extension(name))
} else {
None
}
result
}
-fn master_pubkey_path() -> Result<PathBuf, Error> {
- let base = BaseDirectories::with_prefix("proxmox-backup")?;
+fn complete_auth_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+ proxmox_backup::tools::runtime::main(async { complete_auth_id_do(param).await })
+}
- // usually $HOME/.config/proxmox-backup/master-public.pem
- let path = base.place_config_file("master-public.pem")?;
+async fn complete_auth_id_do(param: &HashMap<String, String>) -> Vec<String> {
- Ok(path)
+ let mut result = vec![];
+
+ let repo = match extract_repository_from_map(param) {
+ Some(v) => v,
+ _ => return result,
+ };
+
+ let data = try_get(&repo, "api2/json/access/users?include_tokens=true").await;
+
+ if let Ok(parsed) = serde_json::from_value::<Vec<UserWithTokens>>(data) {
+ for user in parsed {
+ result.push(user.userid.to_string());
+ for token in user.tokens {
+ result.push(token.tokenid.to_string());
+ }
+ }
+ };
+
+ result
}
use proxmox_backup::client::RemoteChunkReader;
buf: &'a mut [u8],
offset: u64,
) -> MaybeReady<io::Result<usize>, ReadAtOperation<'a>> {
- use std::io::Read;
MaybeReady::Ready(tokio::task::block_in_place(move || {
let mut reader = self.inner.lock().unwrap();
reader.seek(SeekFrom::Start(offset))?;
let logout_cmd_def = CliCommand::new(&API_METHOD_API_LOGOUT)
.completion_cb("repository", complete_repository);
+ let version_cmd_def = CliCommand::new(&API_METHOD_API_VERSION)
+ .completion_cb("repository", complete_repository);
+
+ let change_owner_cmd_def = CliCommand::new(&API_METHOD_CHANGE_BACKUP_OWNER)
+ .arg_param(&["group", "new-owner"])
+ .completion_cb("group", complete_backup_group)
+ .completion_cb("new-owner", complete_auth_id)
+ .completion_cb("repository", complete_repository);
+
let cmd_def = CliCommandMap::new()
.insert("backup", backup_cmd_def)
.insert("upload-log", upload_log_cmd_def)
.insert("status", status_cmd_def)
.insert("key", key::cli())
.insert("mount", mount_cmd_def())
+ .insert("map", map_cmd_def())
+ .insert("unmap", unmap_cmd_def())
.insert("catalog", catalog_mgmt_cli())
.insert("task", task_mgmt_cli())
- .insert("benchmark", benchmark_cmd_def);
+ .insert("version", version_cmd_def)
+ .insert("benchmark", benchmark_cmd_def)
+ .insert("change-owner", change_owner_cmd_def);
let rpcenv = CliEnvironment::new();
run_cli_command(cmd_def, rpcenv, Some(|future| {