if running_uid != backup_uid || running_gid != backup_gid {
bail!(
- "proxy not running as backup user or group (got uid {} gid {})",
- running_uid,
- running_gid
+ "proxy not running as backup user or group (got uid {running_uid} gid {running_gid})"
);
}
/// check for a cookie with the user-preferred language, fallback to the config one if not set or
/// not existing
fn get_language(headers: &http::HeaderMap) -> String {
- let exists = |l: &str| Path::new(&format!("/usr/share/pbs-i18n/pbs-lang-{}.js", l)).exists();
+ let exists = |l: &str| Path::new(&format!("/usr/share/pbs-i18n/pbs-lang-{l}.js")).exists();
match cookie_from_header(headers, "PBSLangCookie") {
Some(cookie_lang) if exists(&cookie_lang) => cookie_lang,
let (ct, index) = match api.render_template(template_file, &data) {
Ok(index) => ("text/html", index),
- Err(err) => ("text/plain", format!("Error rendering template: {}", err)),
+ Err(err) => ("text/plain", format!("Error rendering template: {err}")),
};
let mut resp = Response::builder()
},
Some("proxmox-backup-proxy"),
) {
- bail!("unable to inititialize syslog - {}", err);
+ bail!("unable to inititialize syslog - {err}");
}
let _ = public_auth_key(); // load with lazy_static
move |_value| -> Result<_, Error> {
log::info!("reloading certificate");
match make_tls_acceptor() {
- Err(err) => log::error!("error reloading certificate: {}", err),
+ Err(err) => log::error!("error reloading certificate: {err}"),
Ok(new_acceptor) => {
let mut guard = acceptor.lock().unwrap();
*guard = new_acceptor;
// to remove references for not configured datastores
commando_sock.register_command("datastore-removed".to_string(), |_value| {
if let Err(err) = DataStore::remove_unused_datastores() {
- log::error!("could not refresh datastores: {}", err);
+ log::error!("could not refresh datastores: {err}");
}
Ok(Value::Null)
})?;
});
if let Err(err) = init_result {
- bail!("unable to start daemon - {}", err);
+ bail!("unable to start daemon - {err}");
}
// stop gap for https://github.com/tokio-rs/tokio/issues/4730 where the thread holding the
}
acceptor
.set_private_key_file(key_path, SslFiletype::PEM)
- .map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
+ .map_err(|err| format_err!("unable to read proxy key {key_path} - {err}"))?;
acceptor
.set_certificate_chain_file(cert_path)
- .map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
+ .map_err(|err| format_err!("unable to read proxy cert {cert_path} - {err}"))?;
acceptor.set_options(openssl::ssl::SslOptions::NO_RENEGOTIATION);
acceptor.check_private_key().unwrap();
res = listener.accept().fuse() => match res {
Ok(conn) => conn,
Err(err) => {
- eprintln!("error accepting tcp connection: {}", err);
+ eprintln!("error accepting tcp connection: {err}");
continue;
}
},
match openssl::ssl::Ssl::new(acceptor_guard.context()) {
Ok(ssl) => ssl,
Err(err) => {
- eprintln!(
- "failed to create Ssl object from Acceptor context - {}",
- err
- );
+ eprintln!("failed to create Ssl object from Acceptor context - {err}");
continue;
}
}
let stream = match tokio_openssl::SslStream::new(ssl, sock) {
Ok(stream) => stream,
Err(err) => {
- eprintln!(
- "failed to create SslStream using ssl and connection socket - {}",
- err
- );
+ eprintln!("failed to create SslStream using ssl and connection socket - {err}");
continue;
}
};
}
Ok(Err(err)) => {
if debug {
- eprintln!("https handshake failed - {}", err);
+ eprintln!("https handshake failed - {err}");
}
}
Err(_) => {
async fn schedule_datastore_garbage_collection() {
let config = match pbs_config::datastore::config() {
Err(err) => {
- eprintln!("unable to read datastore config - {}", err);
+ eprintln!("unable to read datastore config - {err}");
return;
}
Ok((config, _digest)) => config,
let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c,
Err(err) => {
- eprintln!("datastore config from_value failed - {}", err);
+ eprintln!("datastore config from_value failed - {err}");
continue;
}
};
let event: CalendarEvent = match event_str.parse() {
Ok(event) => event,
Err(err) => {
- eprintln!("unable to parse schedule '{}' - {}", event_str, err);
+ eprintln!("unable to parse schedule '{event_str}' - {err}");
continue;
}
};
let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Lookup)) {
Ok(datastore) => datastore,
Err(err) => {
- eprintln!("lookup_datastore failed - {}", err);
+ eprintln!("lookup_datastore failed - {err}");
continue;
}
};
Ok(Some(next)) => next,
Ok(None) => continue,
Err(err) => {
- eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
+ eprintln!("compute_next_event for '{event_str}' failed - {err}");
continue;
}
};
Some(event_str),
false,
) {
- eprintln!(
- "unable to start garbage collection job on datastore {} - {}",
- store, err
- );
+ eprintln!("unable to start garbage collection job on datastore {store} - {err}");
}
}
}
async fn schedule_datastore_prune_jobs() {
let config = match pbs_config::prune::config() {
Err(err) => {
- eprintln!("unable to read prune job config - {}", err);
+ eprintln!("unable to read prune job config - {err}");
return;
}
Ok((config, _digest)) => config,
let job_config: PruneJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c,
Err(err) => {
- eprintln!("prune job config from_value failed - {}", err);
+ eprintln!("prune job config from_value failed - {err}");
continue;
}
};
&auth_id,
Some(job_config.schedule),
) {
- eprintln!("unable to start datastore prune job {} - {}", &job_id, err);
+ eprintln!("unable to start datastore prune job {job_id} - {err}");
}
};
}
async fn schedule_datastore_sync_jobs() {
let config = match pbs_config::sync::config() {
Err(err) => {
- eprintln!("unable to read sync job config - {}", err);
+ eprintln!("unable to read sync job config - {err}");
return;
}
Ok((config, _digest)) => config,
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c,
Err(err) => {
- eprintln!("sync job config from_value failed - {}", err);
+ eprintln!("sync job config from_value failed - {err}");
continue;
}
};
let auth_id = Authid::root_auth_id().clone();
if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str), false) {
- eprintln!("unable to start datastore sync job {} - {}", &job_id, err);
+ eprintln!("unable to start datastore sync job {job_id} - {err}");
}
};
}
async fn schedule_datastore_verify_jobs() {
let config = match pbs_config::verify::config() {
Err(err) => {
- eprintln!("unable to read verification job config - {}", err);
+ eprintln!("unable to read verification job config - {err}");
return;
}
Ok((config, _digest)) => config,
let job_config: VerificationJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c,
Err(err) => {
- eprintln!("verification job config from_value failed - {}", err);
+ eprintln!("verification job config from_value failed - {err}");
continue;
}
};
};
if let Err(err) = do_verification_job(job, job_config, &auth_id, Some(event_str), false)
{
- eprintln!(
- "unable to start datastore verification job {} - {}",
- &job_id, err
- );
+ eprintln!("unable to start datastore verification job {job_id} - {err}");
}
};
}
async fn schedule_tape_backup_jobs() {
let config = match pbs_config::tape_job::config() {
Err(err) => {
- eprintln!("unable to read tape job config - {}", err);
+ eprintln!("unable to read tape job config - {err}");
return;
}
Ok((config, _digest)) => config,
let job_config: TapeBackupJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c,
Err(err) => {
- eprintln!("tape backup job config from_value failed - {}", err);
+ eprintln!("tape backup job config from_value failed - {err}");
continue;
}
};
if let Err(err) =
do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str), false)
{
- eprintln!("unable to start tape backup job {} - {}", &job_id, err);
+ eprintln!("unable to start tape backup job {job_id} - {err}");
}
};
}
if has_rotated {
task_log!(worker, "cleaning up old task logs");
if let Err(err) = cleanup_old_tasks(&worker, true) {
- task_warn!(worker, "could not completely cleanup old tasks: {}", err);
+ task_warn!(worker, "could not completely cleanup old tasks: {err}");
}
}
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
- eprintln!("could not finish job state for {}: {}", worker_type, err);
+ eprintln!("could not finish job state for {worker_type}: {err}");
}
result
},
) {
- eprintln!("unable to start task log rotation: {}", err);
+ eprintln!("unable to start task log rotation: {err}");
}
}
match futures::join!(f1, f2) {
(Err(e1), Err(e2)) => Err(format_err!(
- "reopen commands failed, proxy: {}; api: {}",
- e1,
- e2
+ "reopen commands failed, proxy: {e1}; api: {e2}"
)),
- (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)),
- (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)),
+ (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {e1}")),
+ (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {e2}")),
_ => Ok(()),
}
}
match futures::join!(f1, f2) {
(Err(e1), Err(e2)) => Err(format_err!(
- "reopen commands failed, proxy: {}; api: {}",
- e1,
- e2
+ "reopen commands failed, proxy: {e1}; api: {e2}"
)),
- (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {}", e1)),
- (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {}", e2)),
+ (Err(e1), Ok(_)) => Err(format_err!("reopen commands failed, proxy: {e1}")),
+ (Ok(_), Err(e2)) => Err(format_err!("reopen commands failed, api: {e2}")),
_ => Ok(()),
}
}
{
Ok(res) => res,
Err(err) => {
- log::error!("collecting host stats panicked: {}", err);
+ log::error!("collecting host stats panicked: {err}");
tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
continue;
}
let (rrd_res, metrics_res) = join!(rrd_future, metrics_future);
if let Err(err) = rrd_res {
- log::error!("rrd update panicked: {}", err);
+ log::error!("rrd update panicked: {err}");
}
if let Err(err) = metrics_res {
- log::error!("error during metrics sending: {}", err);
+ log::error!("error during metrics sending: {err}");
}
tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
.zip(channel_list.iter().map(|(_, name)| name))
{
if let Err(err) = res {
- log::error!("error sending into channel of {}: {}", name, err);
+ log::error!("error sending into channel of {name}: {err}");
}
}
futures::future::join_all(channel_list.into_iter().map(|(channel, name)| async move {
if let Err(err) = channel.join().await {
- log::error!("error sending to metric server {}: {}", name, err);
+ log::error!("error sending to metric server {name}: {err}");
}
}))
.await;
let proc = match read_proc_stat() {
Ok(stat) => Some(stat),
Err(err) => {
- eprintln!("read_proc_stat failed - {}", err);
+ eprintln!("read_proc_stat failed - {err}");
None
}
};
let meminfo = match read_meminfo() {
Ok(stat) => Some(stat),
Err(err) => {
- eprintln!("read_meminfo failed - {}", err);
+ eprintln!("read_meminfo failed - {err}");
None
}
};
let net = match read_proc_net_dev() {
Ok(netdev) => Some(netdev),
Err(err) => {
- eprintln!("read_prox_net_dev failed - {}", err);
+ eprintln!("read_prox_net_dev failed - {err}");
None
}
};
let load = match read_loadavg() {
Ok(loadavg) => Some(loadavg),
Err(err) => {
- eprintln!("read_loadavg failed - {}", err);
+ eprintln!("read_loadavg failed - {err}");
None
}
};
}
}
Err(err) => {
- eprintln!("read datastore config failed - {}", err);
+ eprintln!("read datastore config failed - {err}");
}
}
let event: CalendarEvent = match event_str.parse() {
Ok(event) => event,
Err(err) => {
- eprintln!("unable to parse schedule '{}' - {}", event_str, err);
+ eprintln!("unable to parse schedule '{event_str}' - {err}");
return false;
}
};
let last = match jobstate::last_run_time(worker_type, id) {
Ok(time) => time,
Err(err) => {
- eprintln!(
- "could not get last run time of {} {}: {}",
- worker_type, id, err
- );
+ eprintln!("could not get last run time of {worker_type} {id}: {err}");
return false;
}
};
Ok(Some(next)) => next,
Ok(None) => return false,
Err(err) => {
- eprintln!("compute_next_event for '{}' failed - {}", event_str, err);
+ eprintln!("compute_next_event for '{event_str}' failed - {err}");
return false;
}
};
let usage = match proxmox_sys::fs::fs_info(path) {
Ok(status) => Some(status),
Err(err) => {
- eprintln!("read fs info on {:?} failed - {}", path, err);
+ eprintln!("read fs info on {path:?} failed - {err}");
None
}
};
("zfs", Some(source)) => match source.into_string() {
Ok(dataset) => match zfs_dataset_stats(&dataset) {
Ok(stat) => device_stat = Some(stat),
- Err(err) => eprintln!("zfs_dataset_stats({:?}) failed - {}", dataset, err),
+ Err(err) => eprintln!("zfs_dataset_stats({dataset:?}) failed - {err}"),
},
Err(source) => {
- eprintln!("zfs_pool_stats({:?}) failed - invalid characters", source)
+ eprintln!("zfs_pool_stats({source:?}) failed - invalid characters")
}
},
_ => {
if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
match disk.read_stat() {
Ok(stat) => device_stat = stat,
- Err(err) => eprintln!("disk.read_stat {:?} failed - {}", path, err),
+ Err(err) => eprintln!("disk.read_stat {path:?} failed - {err}"),
}
}
}
device_stat
}
Err(err) => {
- eprintln!("find_mounted_device failed - {}", err);
+ eprintln!("find_mounted_device failed - {err}");
None
}
};