use failure::*;
use nix::unistd::{fork, ForkResult, pipe};
use std::os::unix::io::RawFd;
-use chrono::{Local, Utc, TimeZone};
+use chrono::{Local, DateTime, Utc, TimeZone};
use std::path::{Path, PathBuf};
use std::collections::{HashSet, HashMap};
use std::ffi::OsStr;
use std::os::unix::fs::OpenOptionsExt;
use proxmox::{sortable, identity};
-use proxmox::tools::fs::{file_get_contents, file_get_json, file_set_contents, image_size};
+use proxmox::tools::fs::{file_get_contents, file_get_json, replace_file, CreateOptions, image_size};
+use proxmox::sys::linux::tty;
use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment};
use proxmox::api::schema::*;
use proxmox::api::cli::*;
use futures::*;
use tokio::sync::mpsc;
-proxmox::api::const_regex! {
+const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
+const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
+
+proxmox::const_regex! {
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(?:pxar|img|conf|log)):(.+)$";
}
let new_data = json!(map);
- let _ = file_set_contents(path, new_data.to_string().as_bytes(), None);
+ let _ = replace_file(path, new_data.to_string().as_bytes(), CreateOptions::new());
}
fn complete_repository(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
result
}
+fn connect(server: &str, userid: &str) -> Result<HttpClient, Error> {
+
+ let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
+
+ use std::env::VarError::*;
+ let password = match std::env::var(ENV_VAR_PBS_PASSWORD) {
+ Ok(p) => Some(p),
+ Err(NotUnicode(_)) => bail!(format!("{} contains bad characters", ENV_VAR_PBS_PASSWORD)),
+ Err(NotPresent) => None,
+ };
+
+ let options = HttpClientOptions::new()
+ .prefix(Some("proxmox-backup".to_string()))
+ .password(password)
+ .interactive(true)
+ .fingerprint(fingerprint)
+ .fingerprint_cache(true)
+ .ticket_cache(true);
+
+ HttpClient::new(server, userid, options)
+}
+
async fn view_task_result(
client: HttpClient,
result: Value,
Ok(())
}
+async fn api_datastore_list_snapshots(
+ client: &HttpClient,
+ store: &str,
+ group: Option<BackupGroup>,
+) -> Result<Value, Error> {
+
+ let path = format!("api2/json/admin/datastore/{}/snapshots", store);
+
+ let mut args = json!({});
+ if let Some(group) = group {
+ args["backup-type"] = group.backup_type().into();
+ args["backup-id"] = group.backup_id().into();
+ }
+
+ let mut result = client.get(&path, Some(args)).await?;
+
+ Ok(result["data"].take())
+}
+
+async fn api_datastore_latest_snapshot(
+ client: &HttpClient,
+ store: &str,
+ group: BackupGroup,
+) -> Result<(String, String, DateTime<Utc>), Error> {
+
+ let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
+ let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
+
+ if list.is_empty() {
+ bail!("backup group {:?} does not contain any snapshots.", group.group_path());
+ }
+
+ list.sort_unstable_by(|a, b| b.backup_time.cmp(&a.backup_time));
+
+ let backup_time = Utc.timestamp(list[0].backup_time, 0);
+
+ Ok((group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time))
+}
+
+
async fn backup_directory<P: AsRef<Path>>(
client: &BackupWriter,
dir_path: P,
verbose: bool,
skip_lost_and_found: bool,
crypt_config: Option<Arc<CryptConfig>>,
- catalog: Arc<Mutex<CatalogWriter<SenderWriter>>>,
+ catalog: Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
+ entries_max: usize,
) -> Result<BackupStats, Error> {
- let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), device_set, verbose, skip_lost_and_found, catalog)?;
+ let pxar_stream = PxarBackupStream::open(
+ dir_path.as_ref(),
+ device_set,
+ verbose,
+ skip_lost_and_found,
+ catalog,
+ entries_max,
+ )?;
let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size);
let (mut tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks
Ok(stats)
}
-fn strip_server_file_expenstion(name: &str) -> String {
-
- if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {
- name[..name.len()-5].to_owned()
- } else {
- name.to_owned() // should not happen
- }
-}
-
#[api(
input: {
properties: {
/// List backup groups.
async fn list_backup_groups(param: Value) -> Result<Value, Error> {
+ let output_format = get_output_format(¶m);
+
let repo = extract_repository_from_value(¶m)?;
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
record_repository(&repo);
- // fixme: implement and use output formatter instead ..
- let list = result["data"].as_array_mut().unwrap();
-
- list.sort_unstable_by(|a, b| {
- let a_id = a["backup-id"].as_str().unwrap();
- let a_backup_type = a["backup-type"].as_str().unwrap();
- let b_id = b["backup-id"].as_str().unwrap();
- let b_backup_type = b["backup-type"].as_str().unwrap();
-
- let type_order = a_backup_type.cmp(b_backup_type);
- if type_order == std::cmp::Ordering::Equal {
- a_id.cmp(b_id)
- } else {
- type_order
- }
- });
-
- let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
-
- let mut result = vec![];
-
- for item in list {
+ let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
+ let item: GroupListItem = serde_json::from_value(record.to_owned())?;
+ let group = BackupGroup::new(item.backup_type, item.backup_id);
+ Ok(group.group_path().to_str().unwrap().to_owned())
+ };
- let id = item["backup-id"].as_str().unwrap();
- let btype = item["backup-type"].as_str().unwrap();
- let epoch = item["last-backup"].as_i64().unwrap();
+ let render_backup_timestamp = |v: &Value, _record: &Value| -> Result<String, Error> {
+ let epoch = v.as_i64().unwrap();
let last_backup = Utc.timestamp(epoch, 0);
- let backup_count = item["backup-count"].as_u64().unwrap();
+ Ok(BackupDir::backup_time_to_string(last_backup))
+ };
- let group = BackupGroup::new(btype, id);
+ let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
+ let item: GroupListItem = serde_json::from_value(record.to_owned())?;
+ Ok(tools::format::render_backup_file_list(&item.files))
+ };
- let path = group.group_path().to_str().unwrap().to_owned();
+ let options = default_table_format_options()
+ .sortby("backup-type", false)
+ .sortby("backup-id", false)
+ .column(ColumnConfig::new("backup-id").renderer(render_group_path).header("group"))
+ .column(ColumnConfig::new("last-backup").renderer(render_backup_timestamp))
+ .column(ColumnConfig::new("backup-count"))
+ .column(ColumnConfig::new("files").renderer(render_files));
- let files = item["files"].as_array().unwrap().iter()
- .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
+ let mut data: Value = result["data"].take();
- if output_format == "text" {
- println!(
- "{:20} | {} | {:5} | {}",
- path,
- BackupDir::backup_time_to_string(last_backup),
- backup_count,
- tools::join(&files, ' '),
- );
- } else {
- result.push(json!({
- "backup-type": btype,
- "backup-id": id,
- "last-backup": epoch,
- "backup-count": backup_count,
- "files": files,
- }));
- }
- }
+ let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_GROUPS;
- if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
+ format_and_print_result_full(&mut data, info, &output_format, &options);
Ok(Value::Null)
}
let repo = extract_repository_from_value(¶m)?;
- let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+ let output_format = get_output_format(¶m);
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
-
- let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
+ let client = connect(repo.host(), repo.user())?;
- let mut args = json!({});
- if let Some(path) = param["group"].as_str() {
- let group = BackupGroup::parse(path)?;
- args["backup-type"] = group.backup_type().into();
- args["backup-id"] = group.backup_id().into();
- }
+ let group = if let Some(path) = param["group"].as_str() {
+ Some(BackupGroup::parse(path)?)
+ } else {
+ None
+ };
- let result = client.get(&path, Some(args)).await?;
+ let mut data = api_datastore_list_snapshots(&client, repo.store(), group).await?;
record_repository(&repo);
- let list = result["data"].as_array().unwrap();
-
- let mut result = vec![];
-
- for item in list {
+ let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
+ let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
+ let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time);
+ Ok(snapshot.relative_path().to_str().unwrap().to_owned())
+ };
- let id = item["backup-id"].as_str().unwrap();
- let btype = item["backup-type"].as_str().unwrap();
- let epoch = item["backup-time"].as_i64().unwrap();
+ let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
+ let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
+ Ok(tools::format::render_backup_file_list(&item.files))
+ };
- let snapshot = BackupDir::new(btype, id, epoch);
+ let options = default_table_format_options()
+ .sortby("backup-type", false)
+ .sortby("backup-id", false)
+ .sortby("backup-time", false)
+ .column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
+ .column(ColumnConfig::new("size"))
+ .column(ColumnConfig::new("files").renderer(render_files))
+ ;
- let path = snapshot.relative_path().to_str().unwrap().to_owned();
+ let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOTS;
- let files = item["files"].as_array().unwrap().iter()
- .map(|v| strip_server_file_expenstion(v.as_str().unwrap())).collect();
-
- if output_format == "text" {
- let size_str = if let Some(size) = item["size"].as_u64() {
- size.to_string()
- } else {
- String::from("-")
- };
- println!("{} | {} | {}", path, size_str, tools::join(&files, ' '));
- } else {
- let mut data = json!({
- "backup-type": btype,
- "backup-id": id,
- "backup-time": epoch,
- "files": files,
- });
- if let Some(size) = item["size"].as_u64() {
- data["size"] = size.into();
- }
- result.push(data);
- }
- }
-
- if output_format != "text" { format_and_print_result(&result.into(), &output_format); }
+ format_and_print_result_full(&mut data, info, &output_format, &options);
Ok(Value::Null)
}
let path = tools::required_string_param(¶m, "snapshot")?;
let snapshot = BackupDir::parse(path)?;
- let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let mut client = connect(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let repo = extract_repository_from_value(¶m)?;
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
client.login().await?;
record_repository(&repo);
let repo = extract_repository_from_value(¶m)?;
- delete_ticket_info(repo.host(), repo.user())?;
+ delete_ticket_info("proxmox-backup", repo.host(), repo.user())?;
Ok(Value::Null)
}
let crypt_config = match keyfile {
None => None,
Some(path) => {
- let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
+ let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
let client = BackupReader::start(
client,
let path = tools::required_string_param(¶m, "snapshot")?;
let snapshot = BackupDir::parse(path)?;
- let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+ let output_format = get_output_format(¶m);
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
record_repository(&repo);
- let list: Value = result["data"].take();
+ let info = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_LIST_SNAPSHOT_FILES;
- if output_format == "text" {
- for item in list.as_array().unwrap().iter() {
- println!(
- "{} {}",
- strip_server_file_expenstion(item["filename"].as_str().unwrap()),
- item["size"].as_u64().unwrap_or(0),
- );
- }
- } else {
- format_and_print_result(&list, &output_format);
- }
+ let mut data: Value = result["data"].take();
+
+ let options = default_table_format_options();
+
+ format_and_print_result_full(&mut data, info, &output_format, &options);
Ok(Value::Null)
}
#[api(
- input: {
+ input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
- }
- }
+ "output-format": {
+ schema: OUTPUT_FORMAT,
+ optional: true,
+ },
+ },
+ },
)]
/// Start garbage collection for a specific repository.
async fn start_garbage_collection(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(¶m)?;
- let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
- let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let output_format = get_output_format(¶m);
+
+ let mut client = connect(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/gc", repo.store());
crypt_config: Option<Arc<CryptConfig>>,
) -> Result<
(
- Arc<Mutex<CatalogWriter<SenderWriter>>>,
+ Arc<Mutex<CatalogWriter<crate::tools::StdChannelWriter>>>,
tokio::sync::oneshot::Receiver<Result<BackupStats, Error>>
), Error>
{
- let (catalog_tx, catalog_rx) = mpsc::channel(10); // allow to buffer 10 writes
- let catalog_stream = catalog_rx.map_err(Error::from);
+ let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes
+ let catalog_stream = crate::tools::StdChannelStream(catalog_rx);
let catalog_chunk_size = 512*1024;
let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size));
- let catalog = Arc::new(Mutex::new(CatalogWriter::new(SenderWriter::new(catalog_tx))?));
+ let catalog = Arc::new(Mutex::new(CatalogWriter::new(crate::tools::StdChannelWriter::new(catalog_tx))?));
let (catalog_result_tx, catalog_result_rx) = tokio::sync::oneshot::channel();
schema: CHUNK_SIZE_SCHEMA,
optional: true,
},
+ "entries-max": {
+ type: Integer,
+ description: "Max number of entries to hold in memory.",
+ optional: true,
+ default: pxar::ENCODER_MAX_ENTRIES as isize,
+ },
+ "verbose": {
+ type: Boolean,
+ description: "Verbose output.",
+ optional: true,
+ },
}
}
)]
let include_dev = param["include-dev"].as_array();
+ let entries_max = param["entries-max"].as_u64().unwrap_or(pxar::ENCODER_MAX_ENTRIES as u64);
+
let mut devices = if all_file_systems { None } else { Some(HashSet::new()) };
if let Some(include_dev) = include_dev {
let backup_time = Utc.timestamp(backup_time_opt.unwrap_or_else(|| Utc::now().timestamp()), 0);
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
record_repository(&repo);
println!("Starting backup: {}/{}/{}", backup_type, backup_id, BackupDir::backup_time_to_string(backup_time));
let (crypt_config, rsa_encrypted_key) = match keyfile {
None => (None, None),
Some(path) => {
- let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
+ let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
let stats = client
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
.await?;
- manifest.add_file(target, stats.size, stats.csum);
+ manifest.add_file(target, stats.size, stats.csum)?;
}
BackupType::LOGFILE => { // fixme: remove - not needed anymore ?
println!("Upload log file '{}' to '{:?}' as {}", filename, repo, target);
let stats = client
.upload_blob_from_file(&filename, &target, crypt_config.clone(), true)
.await?;
- manifest.add_file(target, stats.size, stats.csum);
+ manifest.add_file(target, stats.size, stats.csum)?;
}
BackupType::PXAR => {
println!("Upload directory '{}' to '{:?}' as {}", filename, repo, target);
skip_lost_and_found,
crypt_config.clone(),
catalog.clone(),
+ entries_max as usize,
).await?;
- manifest.add_file(target, stats.size, stats.csum);
+ manifest.add_file(target, stats.size, stats.csum)?;
catalog.lock().unwrap().end_directory()?;
}
BackupType::IMAGE => {
verbose,
crypt_config.clone(),
).await?;
- manifest.add_file(target, stats.size, stats.csum);
+ manifest.add_file(target, stats.size, stats.csum)?;
}
}
}
let stats = catalog_result_rx.await??;
- manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum);
+ manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum)?;
}
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
let stats = client
.upload_blob_from_data(rsa_encrypted_key, target, None, false, false)
.await?;
- manifest.add_file(format!("{}.blob", target), stats.size, stats.csum);
+ manifest.add_file(format!("{}.blob", target), stats.size, stats.csum)?;
// openssl rsautl -decrypt -inkey master-private.pem -in rsa-encrypted.key -out t
/*
},
target: {
type: String,
- description: r###"Target directory path. Use '-' to write to stdandard output.
+ description: r###"Target directory path. Use '-' to write to standard output.
-We do not extraxt '.pxar' archives when writing to stdandard output.
+We do not extraxt '.pxar' archives when writing to standard output.
"###
},
let archive_name = tools::required_string_param(¶m, "archive-name")?;
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
record_repository(&repo);
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group = BackupGroup::parse(path)?;
-
- let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
- let result = client.get(&path, Some(json!({
- "backup-type": group.backup_type(),
- "backup-id": group.backup_id(),
- }))).await?;
-
- let list = result["data"].as_array().unwrap();
- if list.is_empty() {
- bail!("backup group '{}' does not contain any snapshots:", path);
- }
-
- let epoch = list[0]["backup-time"].as_i64().unwrap();
- let backup_time = Utc.timestamp(epoch, 0);
- (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
+ api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot = BackupDir::parse(path)?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
let crypt_config = match keyfile {
None => None,
Some(path) => {
- let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
+ let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
if server_archive_name == MANIFEST_BLOB_NAME {
let backup_index_data = manifest.into_json().to_string();
if let Some(target) = target {
- file_set_contents(target, backup_index_data.as_bytes(), None)?;
+ replace_file(target, backup_index_data.as_bytes(), CreateOptions::new())?;
} else {
let stdout = std::io::stdout();
let mut writer = stdout.lock();
let snapshot = tools::required_string_param(¶m, "snapshot")?;
let snapshot = BackupDir::parse(snapshot)?;
- let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let mut client = connect(repo.host(), repo.user())?;
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
let crypt_config = match keyfile {
None => None,
Some(path) => {
- let (key, _created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
+ let (key, _created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
let crypt_config = CryptConfig::new(key)?;
Some(Arc::new(crypt_config))
}
client.upload("application/octet-stream", body, &path, Some(args)).await
}
-#[api(
- input: {
- properties: {
- repository: {
- schema: REPO_URL_SCHEMA,
- optional: true,
- },
- group: {
- type: String,
- description: "Backup group.",
- },
- "output-format": {
- schema: OUTPUT_FORMAT,
- optional: true,
- },
- "dry-run": {
- type: Boolean,
- description: "Just show what prune would do, but do not delete anything.",
- optional: true,
- },
- }
- }
-)]
-/// Prune a backup repository.
-async fn prune(mut param: Value) -> Result<Value, Error> {
+const API_METHOD_PRUNE: ApiMethod = ApiMethod::new(
+ &ApiHandler::Async(&prune),
+ &ObjectSchema::new(
+ "Prune a backup repository.",
+ &proxmox_backup::add_common_prune_prameters!([
+ ("dry-run", true, &BooleanSchema::new(
+ "Just show what prune would do, but do not delete anything.")
+ .schema()),
+ ("group", false, &StringSchema::new("Backup group.").schema()),
+ ], [
+ ("output-format", true, &OUTPUT_FORMAT),
+ ("repository", true, &REPO_URL_SCHEMA),
+ ])
+ )
+);
+
+fn prune<'a>(
+ param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &'a mut dyn RpcEnvironment,
+) -> proxmox::api::ApiFuture<'a> {
+ async move {
+ prune_async(param).await
+ }.boxed()
+}
+async fn prune_async(mut param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(¶m)?;
- let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let mut client = connect(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/prune", repo.store());
let group = tools::required_string_param(¶m, "group")?;
let group = BackupGroup::parse(group)?;
- let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+
+ let output_format = get_output_format(¶m);
param.as_object_mut().unwrap().remove("repository");
param.as_object_mut().unwrap().remove("group");
let repo = extract_repository_from_value(¶m)?;
- let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
+ let output_format = get_output_format(¶m);
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
let path = format!("api2/json/admin/datastore/{}/status", repo.store());
- let result = client.get(&path, None).await?;
- let data = &result["data"];
+ let mut result = client.get(&path, None).await?;
+ let mut data = result["data"].take();
record_repository(&repo);
- if output_format == "text" {
- let total = data["total"].as_u64().unwrap();
- let used = data["used"].as_u64().unwrap();
- let avail = data["avail"].as_u64().unwrap();
+ let render_total_percentage = |v: &Value, record: &Value| -> Result<String, Error> {
+ let v = v.as_u64().unwrap();
+ let total = record["total"].as_u64().unwrap();
let roundup = total/200;
+ let per = ((v+roundup)*100)/total;
+ let info = format!(" ({} %)", per);
+ Ok(format!("{} {:>8}", v, info))
+ };
- println!(
- "total: {} used: {} ({} %) available: {}",
- total,
- used,
- ((used+roundup)*100)/total,
- avail,
- );
- } else {
- format_and_print_result(data, &output_format);
- }
+ let options = default_table_format_options()
+ .noheader(true)
+ .column(ColumnConfig::new("total").renderer(render_total_percentage))
+ .column(ColumnConfig::new("used").renderer(render_total_percentage))
+ .column(ColumnConfig::new("avail").renderer(render_total_percentage));
+
+ let schema = &proxmox_backup::api2::admin::datastore::API_RETURN_SCHEMA_STATUS;
+
+ format_and_print_result_full(&mut data, schema, &output_format, &options);
Ok(Value::Null)
}
// like get, but simply ignore errors and return Null instead
async fn try_get(repo: &BackupRepository, url: &str) -> Value {
- let client = match HttpClient::new(repo.host(), repo.user(), None) {
+ let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
+ let password = std::env::var(ENV_VAR_PBS_PASSWORD).ok();
+
+ let options = HttpClientOptions::new()
+ .prefix(Some("proxmox-backup".to_string()))
+ .password(password)
+ .interactive(false)
+ .fingerprint(fingerprint)
+ .fingerprint_cache(true)
+ .ticket_cache(true);
+
+ let client = match HttpClient::new(repo.host(), repo.user(), options) {
Ok(v) => v,
_ => return Value::Null,
};
}
fn complete_backup_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
- async_main(async { complete_backup_group_do(param).await })
+ proxmox_backup::tools::runtime::main(async { complete_backup_group_do(param).await })
}
async fn complete_backup_group_do(param: &HashMap<String, String>) -> Vec<String> {
}
fn complete_group_or_snapshot(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
- async_main(async { complete_group_or_snapshot_do(arg, param).await })
+ proxmox_backup::tools::runtime::main(async { complete_group_or_snapshot_do(arg, param).await })
}
async fn complete_group_or_snapshot_do(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
}
fn complete_backup_snapshot(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
- async_main(async { complete_backup_snapshot_do(param).await })
+ proxmox_backup::tools::runtime::main(async { complete_backup_snapshot_do(param).await })
}
async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec<String> {
}
fn complete_server_file_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
- async_main(async { complete_server_file_name_do(param).await })
+ proxmox_backup::tools::runtime::main(async { complete_server_file_name_do(param).await })
}
async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Vec<String> {
fn complete_archive_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
complete_server_file_name(arg, param)
.iter()
- .map(|v| strip_server_file_expenstion(&v))
+ .map(|v| tools::format::strip_server_file_expenstion(&v))
.collect()
}
complete_server_file_name(arg, param)
.iter()
.filter_map(|v| {
- let name = strip_server_file_expenstion(&v);
+ let name = tools::format::strip_server_file_expenstion(&v);
if name.ends_with(".pxar") {
Some(name)
} else {
}
// If we're on a TTY, query the user for a password
- if crate::tools::tty::stdin_isatty() {
- return Ok(crate::tools::tty::read_password("Encryption Key Password: ")?);
+ if tty::stdin_isatty() {
+ return Ok(tty::read_password("Encryption Key Password: ")?);
}
bail!("no password input mechanism available");
if kdf == "scrypt" {
// always read passphrase from tty
- if !crate::tools::tty::stdin_isatty() {
+ if !tty::stdin_isatty() {
bail!("unable to read passphrase - no tty");
}
- let password = crate::tools::tty::read_password("Encryption Key Password: ")?;
+ let password = tty::read_and_verify_password("Encryption Key Password: ")?;
let key_config = encrypt_key_with_passphrase(&key, &password)?;
let target_path = master_pubkey_path()?;
- file_set_contents(&target_path, &pem_data, None)?;
+ replace_file(&target_path, &pem_data, CreateOptions::new())?;
println!("Imported public master key to {:?}", target_path);
) -> Result<Value, Error> {
// we need a TTY to query the new password
- if !crate::tools::tty::stdin_isatty() {
+ if !tty::stdin_isatty() {
bail!("unable to create master key - no tty");
}
let rsa = openssl::rsa::Rsa::generate(4096)?;
let pkey = openssl::pkey::PKey::from_rsa(rsa)?;
- let new_pw = String::from_utf8(crate::tools::tty::read_password("Master Key Password: ")?)?;
- let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
- if new_pw != verify_pw {
- bail!("Password verification fail!");
- }
-
- if new_pw.len() < 5 {
- bail!("Password is too short!");
- }
+ let password = String::from_utf8(tty::read_and_verify_password("Master Key Password: ")?)?;
let pub_key: Vec<u8> = pkey.public_key_to_pem()?;
let filename_pub = "master-public.pem";
println!("Writing public master key to {}", filename_pub);
- file_set_contents(filename_pub, pub_key.as_slice(), None)?;
+ replace_file(filename_pub, pub_key.as_slice(), CreateOptions::new())?;
let cipher = openssl::symm::Cipher::aes_256_cbc();
- let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, new_pw.as_bytes())?;
+ let priv_key: Vec<u8> = pkey.private_key_to_pem_pkcs8_passphrase(cipher, password.as_bytes())?;
let filename_priv = "master-private.pem";
println!("Writing private master key to {}", filename_priv);
- file_set_contents(filename_priv, priv_key.as_slice(), None)?;
+ replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new())?;
Ok(Value::Null)
}
let kdf = param["kdf"].as_str().unwrap_or("scrypt");
// we need a TTY to query the new password
- if !crate::tools::tty::stdin_isatty() {
+ if !tty::stdin_isatty() {
bail!("unable to change passphrase - no tty");
}
- let (key, created) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
+ let (key, created) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
if kdf == "scrypt" {
- let new_pw = String::from_utf8(crate::tools::tty::read_password("New Password: ")?)?;
- let verify_pw = String::from_utf8(crate::tools::tty::read_password("Verify Password: ")?)?;
-
- if new_pw != verify_pw {
- bail!("Password verification fail!");
- }
+ let password = tty::read_and_verify_password("New Password: ")?;
- if new_pw.len() < 5 {
- bail!("Password is too short!");
- }
-
- let mut new_key_config = encrypt_key_with_passphrase(&key, new_pw.as_bytes())?;
+ let mut new_key_config = encrypt_key_with_passphrase(&key, &password)?;
new_key_config.created = created; // keep original value
store_key_config(&path, true, new_key_config)?;
if verbose {
// This will stay in foreground with debug output enabled as None is
// passed for the RawFd.
- return async_main(mount_do(param, None));
+ return proxmox_backup::tools::runtime::main(mount_do(param, None));
}
// Process should be deamonized.
Ok(ForkResult::Child) => {
nix::unistd::close(pipe.0).unwrap();
nix::unistd::setsid().unwrap();
- async_main(mount_do(param, Some(pipe.1)))
+ proxmox_backup::tools::runtime::main(mount_do(param, Some(pipe.1)))
}
Err(_) => bail!("failed to daemonize process"),
}
let repo = extract_repository_from_value(¶m)?;
let archive_name = tools::required_string_param(¶m, "archive-name")?;
let target = tools::required_string_param(¶m, "target")?;
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
record_repository(&repo);
let path = tools::required_string_param(¶m, "snapshot")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group = BackupGroup::parse(path)?;
-
- let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
- let result = client.get(&path, Some(json!({
- "backup-type": group.backup_type(),
- "backup-id": group.backup_id(),
- }))).await?;
-
- let list = result["data"].as_array().unwrap();
- if list.is_empty() {
- bail!("backup group '{}' does not contain any snapshots:", path);
- }
-
- let epoch = list[0]["backup-time"].as_i64().unwrap();
- let backup_time = Utc.timestamp(epoch, 0);
- (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
+ api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot = BackupDir::parse(path)?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
let crypt_config = match keyfile {
None => None,
Some(path) => {
- let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
+ let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
/// Shell to interactively inspect and restore snapshots.
async fn catalog_shell(param: Value) -> Result<(), Error> {
let repo = extract_repository_from_value(¶m)?;
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let client = connect(repo.host(), repo.user())?;
let path = tools::required_string_param(¶m, "snapshot")?;
let archive_name = tools::required_string_param(¶m, "archive-name")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group = BackupGroup::parse(path)?;
-
- let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
- let result = client.get(&path, Some(json!({
- "backup-type": group.backup_type(),
- "backup-id": group.backup_id(),
- }))).await?;
-
- let list = result["data"].as_array().unwrap();
- if list.is_empty() {
- bail!("backup group '{}' does not contain any snapshots:", path);
- }
-
- let epoch = list[0]["backup-time"].as_i64().unwrap();
- let backup_time = Utc.timestamp(epoch, 0);
- (group.backup_type().to_owned(), group.backup_id().to_owned(), backup_time)
+ api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot = BackupDir::parse(path)?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
let crypt_config = match keyfile {
None => None,
Some(path) => {
- let (key, _) = load_and_decrtypt_key(&path, &get_encryption_key_password)?;
+ let (key, _) = load_and_decrypt_key(&path, &get_encryption_key_password)?;
Some(Arc::new(CryptConfig::new(key)?))
}
};
schema: OUTPUT_FORMAT,
optional: true,
},
+ all: {
+ type: Boolean,
+ description: "Also list stopped tasks.",
+ optional: true,
+ },
}
}
)]
/// List running server tasks for this repo user
-fn task_list(param: Value) -> Result<Value, Error> {
-
- async_main(async {
- let output_format = param["output-format"].as_str().unwrap_or("text").to_owned();
- let repo = extract_repository_from_value(¶m)?;
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
-
- let limit = param["limit"].as_u64().unwrap_or(50) as usize;
-
- let args = json!({
- "running": true,
- "start": 0,
- "limit": limit,
- "userfilter": repo.user(),
- "store": repo.store(),
- });
- let result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
-
- let data = &result["data"];
-
- if output_format == "text" {
- for item in data.as_array().unwrap() {
- println!(
- "{} {}",
- item["upid"].as_str().unwrap(),
- item["status"].as_str().unwrap_or("running"),
- );
- }
- } else {
- format_and_print_result(data, &output_format);
- }
+async fn task_list(param: Value) -> Result<Value, Error> {
+
+ let output_format = get_output_format(¶m);
- Ok::<_, Error>(())
- })?;
+ let repo = extract_repository_from_value(¶m)?;
+ let client = connect(repo.host(), repo.user())?;
+
+ let limit = param["limit"].as_u64().unwrap_or(50) as usize;
+ let running = !param["all"].as_bool().unwrap_or(false);
+
+ let args = json!({
+ "running": running,
+ "start": 0,
+ "limit": limit,
+ "userfilter": repo.user(),
+ "store": repo.store(),
+ });
+
+ let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
+ let mut data = result["data"].take();
+
+ let schema = &proxmox_backup::api2::node::tasks::API_RETURN_SCHEMA_LIST_TASKS;
+
+ let options = default_table_format_options()
+ .column(ColumnConfig::new("starttime").right_align(false).renderer(tools::format::render_epoch))
+ .column(ColumnConfig::new("endtime").right_align(false).renderer(tools::format::render_epoch))
+ .column(ColumnConfig::new("upid"))
+ .column(ColumnConfig::new("status").renderer(tools::format::render_task_status));
+
+ format_and_print_result_full(&mut data, schema, &output_format, &options);
Ok(Value::Null)
}
}
)]
/// Display the task log.
-fn task_log(param: Value) -> Result<Value, Error> {
-
- async_main(async {
- let repo = extract_repository_from_value(¶m)?;
- let upid = tools::required_string_param(¶m, "upid")?;
+async fn task_log(param: Value) -> Result<Value, Error> {
- let client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let repo = extract_repository_from_value(¶m)?;
+ let upid = tools::required_string_param(¶m, "upid")?;
- display_task_log(client, upid, true).await?;
+ let client = connect(repo.host(), repo.user())?;
- Ok::<_, Error>(())
- })?;
+ display_task_log(client, upid, true).await?;
Ok(Value::Null)
}
}
)]
/// Try to stop a specific task.
-fn task_stop(param: Value) -> Result<Value, Error> {
-
- async_main(async {
- let repo = extract_repository_from_value(¶m)?;
- let upid_str = tools::required_string_param(¶m, "upid")?;
+async fn task_stop(param: Value) -> Result<Value, Error> {
- let mut client = HttpClient::new(repo.host(), repo.user(), None)?;
+ let repo = extract_repository_from_value(¶m)?;
+ let upid_str = tools::required_string_param(¶m, "upid")?;
- let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
- let _ = client.delete(&path, None).await?;
+ let mut client = connect(repo.host(), repo.user())?;
- Ok::<_, Error>(())
- })?;
+ let path = format!("api2/json/nodes/localhost/tasks/{}", upid_str);
+ let _ = client.delete(&path, None).await?;
Ok(Value::Null)
}
.insert("catalog", catalog_mgmt_cli())
.insert("task", task_mgmt_cli());
- run_cli_command(cmd_def);
-}
-
-fn async_main<F: Future>(fut: F) -> <F as Future>::Output {
- let mut rt = tokio::runtime::Runtime::new().unwrap();
- let ret = rt.block_on(fut);
- // This does not exist anymore. We need to actually stop our runaways instead...
- // rt.shutdown_now();
- ret
+ run_cli_command(cmd_def, Some(|future| {
+ proxmox_backup::tools::runtime::main(future)
+ }));
}