]> git.proxmox.com Git - proxmox-backup.git/commitdiff
rust fmt for pbs src
authorThomas Lamprecht <t.lamprecht@proxmox.com>
Thu, 14 Apr 2022 12:03:46 +0000 (14:03 +0200)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Thu, 14 Apr 2022 12:03:46 +0000 (14:03 +0200)
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
30 files changed:
pbs-api-types/src/maintenance.rs
src/acme/client.rs
src/auth.rs
src/auth_helpers.rs
src/backup/verify.rs
src/bin/docgen.rs
src/bin/proxmox-backup-api.rs
src/bin/proxmox-backup-debug.rs
src/bin/proxmox-backup-proxy.rs
src/bin/proxmox_backup_debug/api.rs
src/bin/proxmox_backup_debug/inspect.rs
src/bin/proxmox_backup_debug/mod.rs
src/bin/proxmox_backup_debug/recover.rs
src/bin/sg-tape-cmd.rs
src/config/acme/mod.rs
src/config/mod.rs
src/config/node.rs
src/rrd_cache.rs
src/tools/apt.rs
src/tools/config.rs
src/tools/disks/mod.rs
src/tools/mod.rs
src/tools/parallel_handler.rs
src/tools/shared_rate_limiter.rs
src/tools/statistics.rs
src/tools/subscription.rs
src/tools/systemd/config.rs
src/tools/systemd/types.rs
src/tools/systemd/unit.rs
src/traffic_control_cache.rs

index f8d4dad39c066bd2d86ed0ec7f4b65a63ca6fce3..dd3de50ac4272df5c62b0f4edbbbaa86903c07ed 100644 (file)
@@ -1,17 +1,16 @@
-use std::borrow::Cow;
 use anyhow::{bail, Error};
 use serde::{Deserialize, Serialize};
+use std::borrow::Cow;
 
-use proxmox_schema::{api, ApiStringFormat, const_regex, Schema, StringSchema};
+use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
 
-const_regex!{
+const_regex! {
     pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
 }
 
 pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
     ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
 
-
 pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
     StringSchema::new("Message describing the reason for the maintenance.")
         .format(&MAINTENANCE_MESSAGE_FORMAT)
@@ -27,7 +26,7 @@ pub enum Operation {
 
 #[api]
 #[derive(Deserialize, Serialize, PartialEq)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 /// Maintenance type.
 pub enum MaintenanceType {
     /// Only read operations are allowed on the datastore.
index bc9d4ec411dc208e7db96fa933584cb4b79b12c9..40b8e16e6d61cdcc5e4d3cb366c2703b96bf3d95 100644 (file)
@@ -10,13 +10,13 @@ use hyper::{Body, Request};
 use nix::sys::stat::Mode;
 use serde::{Deserialize, Serialize};
 
-use proxmox_sys::fs::{replace_file, CreateOptions};
 use proxmox_acme_rs::account::AccountCreator;
 use proxmox_acme_rs::account::AccountData as AcmeAccountData;
 use proxmox_acme_rs::order::{Order, OrderData};
 use proxmox_acme_rs::Request as AcmeRequest;
 use proxmox_acme_rs::{Account, Authorization, Challenge, Directory, Error, ErrorResponse};
 use proxmox_http::client::SimpleHttp;
+use proxmox_sys::fs::{replace_file, CreateOptions};
 
 use crate::api2::types::AcmeAccountName;
 use crate::config::acme::account_path;
index 6e7d65790bc7826312e3331261b96c666300f64c..bd57fe87bd86c7382c94f97f09412aac0a615fdf 100644 (file)
@@ -2,14 +2,14 @@
 //!
 //! This library contains helper to authenticate users.
 
-use std::process::{Command, Stdio};
 use std::io::Write;
+use std::process::{Command, Stdio};
 
 use anyhow::{bail, format_err, Error};
 use serde_json::json;
 
+use pbs_api_types::{RealmRef, Userid, UsernameRef};
 use pbs_buildcfg::configdir;
-use pbs_api_types::{Userid, UsernameRef, RealmRef};
 
 pub trait ProxmoxAuthenticator {
     fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error>;
@@ -20,10 +20,10 @@ pub trait ProxmoxAuthenticator {
 struct PAM();
 
 impl ProxmoxAuthenticator for PAM {
-
     fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
         let mut auth = pam::Authenticator::with_password("proxmox-backup-auth").unwrap();
-        auth.get_handler().set_credentials(username.as_str(), password);
+        auth.get_handler()
+            .set_credentials(username.as_str(), password);
         auth.authenticate()?;
         Ok(())
     }
@@ -34,22 +34,24 @@ impl ProxmoxAuthenticator for PAM {
             .stdin(Stdio::piped())
             .stderr(Stdio::piped())
             .spawn()
-            .map_err(|err| format_err!(
-                "unable to set password for '{}' - execute passwd failed: {}",
-                username.as_str(),
-                err,
-            ))?;
+            .map_err(|err| {
+                format_err!(
+                    "unable to set password for '{}' - execute passwd failed: {}",
+                    username.as_str(),
+                    err,
+                )
+            })?;
 
         // Note: passwd reads password twice from stdin (for verify)
         writeln!(child.stdin.as_mut().unwrap(), "{}\n{}", password, password)?;
 
-        let output = child
-            .wait_with_output()
-            .map_err(|err| format_err!(
+        let output = child.wait_with_output().map_err(|err| {
+            format_err!(
                 "unable to set password for '{}' - wait failed: {}",
                 username.as_str(),
                 err,
-            ))?;
+            )
+        })?;
 
         if !output.status.success() {
             bail!(
@@ -73,7 +75,6 @@ struct PBS();
 const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");
 
 impl ProxmoxAuthenticator for PBS {
-
     fn authenticate_user(&self, username: &UsernameRef, password: &str) -> Result<(), Error> {
         let data = proxmox_sys::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?;
         match data[username.as_str()].as_str() {
@@ -89,7 +90,7 @@ impl ProxmoxAuthenticator for PBS {
         data[username.as_str()] = enc_password.into();
 
         let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
-        let options =  proxmox_sys::fs::CreateOptions::new()
+        let options = proxmox_sys::fs::CreateOptions::new()
             .perm(mode)
             .owner(nix::unistd::ROOT)
             .group(nix::unistd::Gid::from_raw(0));
@@ -107,7 +108,7 @@ impl ProxmoxAuthenticator for PBS {
         }
 
         let mode = nix::sys::stat::Mode::from_bits_truncate(0o0600);
-        let options =  proxmox_sys::fs::CreateOptions::new()
+        let options = proxmox_sys::fs::CreateOptions::new()
             .perm(mode)
             .owner(nix::unistd::ROOT)
             .group(nix::unistd::Gid::from_raw(0));
@@ -130,7 +131,5 @@ pub fn lookup_authenticator(realm: &RealmRef) -> Result<Box<dyn ProxmoxAuthentic
 
 /// Authenticate users
 pub fn authenticate_user(userid: &Userid, password: &str) -> Result<(), Error> {
-
-    lookup_authenticator(userid.realm())?
-        .authenticate_user(userid.name(), password)
+    lookup_authenticator(userid.realm())?.authenticate_user(userid.name(), password)
 }
index 973c1224a1232a1c9a88c5b7f28456cfc5361501..cafd5d6c28393dde2486b5b12d1bdb850d291408 100644 (file)
@@ -6,18 +6,13 @@ use openssl::pkey::{PKey, Private, Public};
 use openssl::rsa::Rsa;
 use openssl::sha;
 
-use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
 use proxmox_lang::try_block;
+use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
 
-use pbs_buildcfg::configdir;
 use pbs_api_types::Userid;
+use pbs_buildcfg::configdir;
 
-fn compute_csrf_secret_digest(
-    timestamp: i64,
-    secret: &[u8],
-    userid: &Userid,
-) -> String {
-
+fn compute_csrf_secret_digest(timestamp: i64, secret: &[u8], userid: &Userid) -> String {
     let mut hasher = sha::Sha256::new();
     let data = format!("{:08X}:{}:", timestamp, userid);
     hasher.update(data.as_bytes());
@@ -26,11 +21,7 @@ fn compute_csrf_secret_digest(
     base64::encode_config(&hasher.finish(), base64::STANDARD_NO_PAD)
 }
 
-pub fn assemble_csrf_prevention_token(
-    secret: &[u8],
-    userid: &Userid,
-) -> String {
-
+pub fn assemble_csrf_prevention_token(secret: &[u8], userid: &Userid) -> String {
     let epoch = proxmox_time::epoch_i64();
 
     let digest = compute_csrf_secret_digest(epoch, secret, userid);
@@ -45,13 +36,11 @@ pub fn verify_csrf_prevention_token(
     min_age: i64,
     max_age: i64,
 ) -> Result<i64, Error> {
-
     use std::collections::VecDeque;
 
     let mut parts: VecDeque<&str> = token.split(':').collect();
 
     try_block!({
-
         if parts.len() != 2 {
             bail!("format error - wrong number of parts.");
         }
@@ -59,8 +48,8 @@ pub fn verify_csrf_prevention_token(
         let timestamp = parts.pop_front().unwrap();
         let sig = parts.pop_front().unwrap();
 
-        let ttime = i64::from_str_radix(timestamp, 16).
-            map_err(|err| format_err!("timestamp format error - {}", err))?;
+        let ttime = i64::from_str_radix(timestamp, 16)
+            .map_err(|err| format_err!("timestamp format error - {}", err))?;
 
         let digest = compute_csrf_secret_digest(ttime, secret, userid);
 
@@ -80,14 +69,16 @@ pub fn verify_csrf_prevention_token(
         }
 
         Ok(age)
-    }).map_err(|err| format_err!("invalid csrf token - {}", err))
+    })
+    .map_err(|err| format_err!("invalid csrf token - {}", err))
 }
 
 pub fn generate_csrf_key() -> Result<(), Error> {
-
     let path = PathBuf::from(configdir!("/csrf.key"));
 
-    if path.exists() { return Ok(()); }
+    if path.exists() {
+        return Ok(());
+    }
 
     let rsa = Rsa::generate(2048).unwrap();
 
@@ -111,13 +102,14 @@ pub fn generate_csrf_key() -> Result<(), Error> {
 }
 
 pub fn generate_auth_key() -> Result<(), Error> {
-
     let priv_path = PathBuf::from(configdir!("/authkey.key"));
 
     let mut public_path = priv_path.clone();
     public_path.set_extension("pub");
 
-    if priv_path.exists() && public_path.exists() { return Ok(()); }
+    if priv_path.exists() && public_path.exists() {
+        return Ok(());
+    }
 
     let rsa = Rsa::generate(4096).unwrap();
 
@@ -150,17 +142,14 @@ pub fn generate_auth_key() -> Result<(), Error> {
 }
 
 pub fn csrf_secret() -> &'static [u8] {
-
     lazy_static! {
-        static ref SECRET: Vec<u8> =
-            file_get_contents(configdir!("/csrf.key")).unwrap();
+        static ref SECRET: Vec<u8> = file_get_contents(configdir!("/csrf.key")).unwrap();
     }
 
     &SECRET
 }
 
 fn load_public_auth_key() -> Result<PKey<Public>, Error> {
-
     let pem = file_get_contents(configdir!("/authkey.pub"))?;
     let rsa = Rsa::public_key_from_pem(&pem)?;
     let key = PKey::from_rsa(rsa)?;
@@ -169,7 +158,6 @@ fn load_public_auth_key() -> Result<PKey<Public>, Error> {
 }
 
 pub fn public_auth_key() -> &'static PKey<Public> {
-
     lazy_static! {
         static ref KEY: PKey<Public> = load_public_auth_key().unwrap();
     }
index 307d366c100656995032372a5c1d97bce748eb17..74f8b23c3b053e252e092dcc101bd34efb0087e5 100644 (file)
@@ -8,11 +8,11 @@ use anyhow::{bail, format_err, Error};
 
 use proxmox_sys::{task_log, WorkerTaskContext};
 
-use pbs_api_types::{Authid, CryptMode, VerifyState, UPID, SnapshotVerifyState};
-use pbs_datastore::{DataStore, DataBlob, StoreProgress};
-use pbs_datastore::backup_info::{BackupGroup, BackupDir, BackupInfo};
+use pbs_api_types::{Authid, CryptMode, SnapshotVerifyState, VerifyState, UPID};
+use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
+use pbs_datastore::{DataBlob, DataStore, StoreProgress};
 use proxmox_sys::fs::lock_dir_noblock_shared;
 
 use crate::tools::parallel_handler::ParallelHandler;
@@ -63,14 +63,14 @@ fn verify_blob(
             // digest already verified above
             blob.decode(None, None)?;
             Ok(())
-        },
+        }
         CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
     }
 }
 
 fn rename_corrupted_chunk(
     datastore: Arc<DataStore>,
-    digest: &[u8;32],
+    digest: &[u8; 32],
     worker: &dyn WorkerTaskContext,
 ) {
     let (path, digest_str) = datastore.chunk_path(digest);
@@ -89,11 +89,16 @@ fn rename_corrupted_chunk(
     match std::fs::rename(&path, &new_path) {
         Ok(_) => {
             task_log!(worker, "corrupted chunk renamed to {:?}", &new_path);
-        },
+        }
         Err(err) => {
             match err.kind() {
-                std::io::ErrorKind::NotFound => { /* ignored */ },
-                _ => task_log!(worker, "could not rename corrupted chunk {:?} - {}", &path, err)
+                std::io::ErrorKind::NotFound => { /* ignored */ }
+                _ => task_log!(
+                    worker,
+                    "could not rename corrupted chunk {:?} - {}",
+                    &path,
+                    err
+                ),
             }
         }
     };
@@ -127,7 +132,7 @@ fn verify_index_chunks(
                     task_log!(worker2, "can't verify chunk, unknown CryptMode - {}", err);
                     errors2.fetch_add(1, Ordering::SeqCst);
                     return Ok(());
-                },
+                }
                 Ok(mode) => mode,
             };
 
@@ -151,15 +156,29 @@ fn verify_index_chunks(
             }
 
             Ok(())
-        }
+        },
     );
 
     let skip_chunk = |digest: &[u8; 32]| -> bool {
-        if verify_worker.verified_chunks.lock().unwrap().contains(digest) {
+        if verify_worker
+            .verified_chunks
+            .lock()
+            .unwrap()
+            .contains(digest)
+        {
             true
-        } else if verify_worker.corrupt_chunks.lock().unwrap().contains(digest) {
+        } else if verify_worker
+            .corrupt_chunks
+            .lock()
+            .unwrap()
+            .contains(digest)
+        {
             let digest_str = hex::encode(digest);
-            task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
+            task_log!(
+                verify_worker.worker,
+                "chunk {} was marked as corrupt",
+                digest_str
+            );
             errors.fetch_add(1, Ordering::SeqCst);
             true
         } else {
@@ -193,8 +212,16 @@ fn verify_index_chunks(
 
         match verify_worker.datastore.load_chunk(&info.digest) {
             Err(err) => {
-                verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
-                task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
+                verify_worker
+                    .corrupt_chunks
+                    .lock()
+                    .unwrap()
+                    .insert(info.digest);
+                task_log!(
+                    verify_worker.worker,
+                    "can't verify chunk, load failed - {}",
+                    err
+                );
                 errors.fetch_add(1, Ordering::SeqCst);
                 rename_corrupted_chunk(
                     verify_worker.datastore.clone(),
@@ -356,7 +383,12 @@ pub fn verify_backup_dir_with_lock(
         }
     }
 
-    task_log!(verify_worker.worker, "verify {}:{}", verify_worker.datastore.name(), backup_dir);
+    task_log!(
+        verify_worker.worker,
+        "verify {}:{}",
+        verify_worker.datastore.name(),
+        backup_dir
+    );
 
     let mut error_count = 0;
 
@@ -367,9 +399,7 @@ pub fn verify_backup_dir_with_lock(
             match archive_type(&info.filename)? {
                 ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
                 ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
-                ArchiveType::Blob => {
-                    verify_blob(verify_worker.datastore.clone(), backup_dir, info)
-                }
+                ArchiveType::Blob => verify_blob(verify_worker.datastore.clone(), backup_dir, info),
             }
         });
 
@@ -473,7 +503,11 @@ pub fn verify_all_backups(
     let mut errors = Vec::new();
     let worker = Arc::clone(&verify_worker.worker);
 
-    task_log!(worker, "verify datastore {}", verify_worker.datastore.name());
+    task_log!(
+        worker,
+        "verify datastore {}",
+        verify_worker.datastore.name()
+    );
 
     if let Some(owner) = &owner {
         task_log!(worker, "limiting to backups owned by {}", owner);
@@ -486,25 +520,20 @@ pub fn verify_all_backups(
                     || (group_owner.is_token()
                         && !owner.is_token()
                         && group_owner.user() == owner.user())
-            },
+            }
             (Ok(_), None) => true,
             (Err(err), Some(_)) => {
                 // intentionally not in task log
                 // the task user might not be allowed to see this group!
                 println!("Failed to get owner of group '{}' - {}", group, err);
                 false
-            },
+            }
             (Err(err), None) => {
                 // we don't filter by owner, but we want to log the error
-                task_log!(
-                    worker,
-                    "Failed to get owner of group '{} - {}",
-                    group,
-                    err,
-                );
+                task_log!(worker, "Failed to get owner of group '{} - {}", group, err,);
                 errors.push(group.to_string());
                 true
-            },
+            }
         }
     };
 
index 82f2b2939ee4a5b753408fdbeded519523e376b7..beea4cf17c44f8415a32b541825d6897eb661bc6 100644 (file)
@@ -11,7 +11,6 @@ use pbs_api_types::PRIVILEGES;
 use proxmox_backup::api2;
 
 fn get_args() -> (String, Vec<String>) {
-
     let mut args = std::env::args();
     let prefix = args.next().unwrap();
     let prefix = prefix.rsplit('/').next().unwrap().to_string(); // without path
@@ -21,7 +20,6 @@ fn get_args() -> (String, Vec<String>) {
 }
 
 fn main() -> Result<(), Error> {
-
     let (_prefix, args) = get_args();
 
     if args.is_empty() {
@@ -49,10 +47,9 @@ fn main() -> Result<(), Error> {
 }
 
 fn generate_api_tree() -> String {
-
     let mut tree = Vec::new();
 
-    let mut data = dump_api_schema(& api2::ROUTER, ".");
+    let mut data = dump_api_schema(&api2::ROUTER, ".");
     data["path"] = "/".into();
     // hack: add invisible space to sort as first entry
     data["text"] = "&#x200b;Management API (HTTP)".into();
@@ -70,11 +67,13 @@ fn generate_api_tree() -> String {
     data["text"] = "Restore API (HTTP/2)".into();
     tree.push(data);
 
-    format!("var apiSchema = {};", serde_json::to_string_pretty(&tree).unwrap())
+    format!(
+        "var apiSchema = {};",
+        serde_json::to_string_pretty(&tree).unwrap()
+    )
 }
 
 pub fn dump_schema(schema: &Schema) -> Value {
-
     let mut data;
 
     match schema {
@@ -112,23 +111,18 @@ pub fn dump_schema(schema: &Schema) -> Value {
             match string_schema.format {
                 None | Some(ApiStringFormat::VerifyFn(_)) => { /* do nothing */ }
                 Some(ApiStringFormat::Pattern(const_regex)) => {
-                    data["pattern"] = format!("/{}/", const_regex.regex_string)
-                        .into();
+                    data["pattern"] = format!("/{}/", const_regex.regex_string).into();
                 }
                 Some(ApiStringFormat::Enum(variants)) => {
-                    let variants: Vec<String> = variants
-                        .iter()
-                        .map(|e| e.value.to_string())
-                        .collect();
+                    let variants: Vec<String> =
+                        variants.iter().map(|e| e.value.to_string()).collect();
                     data["enum"] = serde_json::to_value(variants).unwrap();
                 }
                 Some(ApiStringFormat::PropertyString(subschema)) => {
-
                     match subschema {
                         Schema::Object(_) | Schema::Array(_) => {
                             data["format"] = dump_schema(subschema);
-                            data["typetext"] = get_property_string_type_text(subschema)
-                                .into();
+                            data["typetext"] = get_property_string_type_text(subschema).into();
                         }
                         _ => { /* do nothing  - shouldnot happen */ }
                     };
@@ -137,7 +131,7 @@ pub fn dump_schema(schema: &Schema) -> Value {
             // fixme: dump format
         }
         Schema::Integer(integer_schema) => {
-           data = json!({
+            data = json!({
                 "type": "integer",
                 "description": integer_schema.description,
             });
@@ -162,7 +156,7 @@ pub fn dump_schema(schema: &Schema) -> Value {
             if let Some(minimum) = number_schema.minimum {
                 data["minimum"] = minimum.into();
             }
-             if let Some(maximum) = number_schema.maximum {
+            if let Some(maximum) = number_schema.maximum {
                 data["maximum"] = maximum.into();
             }
         }
@@ -182,7 +176,7 @@ pub fn dump_schema(schema: &Schema) -> Value {
             if let Some(min_length) = array_schema.min_length {
                 data["minLength"] = min_length.into();
             }
-             if let Some(max_length) = array_schema.min_length {
+            if let Some(max_length) = array_schema.min_length {
                 data["maxLength"] = max_length.into();
             }
         }
@@ -216,7 +210,6 @@ pub fn dump_property_schema(param: &dyn ObjectSchemaType) -> Value {
 }
 
 fn dump_api_permission(permission: &Permission) -> Value {
-
     match permission {
         Permission::Superuser => json!({ "user": "root@pam" }),
         Permission::User(user) => json!({ "user": user }),
@@ -233,7 +226,6 @@ fn dump_api_permission(permission: &Permission) -> Value {
             })
         }
         Permission::Privilege(name, value, partial) => {
-
             let mut privs = Vec::new();
             for (name, v) in PRIVILEGES {
                 if (value & v) != 0 {
@@ -260,10 +252,7 @@ fn dump_api_permission(permission: &Permission) -> Value {
     }
 }
 
-fn dump_api_method_schema(
-    method: &str,
-    api_method: &ApiMethod,
-) -> Value {
+fn dump_api_method_schema(method: &str, api_method: &ApiMethod) -> Value {
     let mut data = json!({
         "description": api_method.parameters.description(),
     });
@@ -277,10 +266,16 @@ fn dump_api_method_schema(
     data["returns"] = returns;
 
     match api_method.access {
-        ApiAccess { description: None, permission: Permission::Superuser } => {
+        ApiAccess {
+            description: None,
+            permission: Permission::Superuser,
+        } => {
             // no need to output default
         }
-        ApiAccess { description, permission } => {
+        ApiAccess {
+            description,
+            permission,
+        } => {
             let mut permissions = dump_api_permission(permission);
             if let Some(description) = description {
                 permissions["description"] = description.into();
@@ -301,11 +296,7 @@ fn dump_api_method_schema(
     data
 }
 
-pub fn dump_api_schema(
-    router: &Router,
-    path: &str,
-) -> Value {
-
+pub fn dump_api_schema(router: &Router, path: &str) -> Value {
     let mut data = json!({});
 
     let mut info = json!({});
@@ -327,7 +318,7 @@ pub fn dump_api_schema(
     match &router.subroute {
         None => {
             data["leaf"] = 1.into();
-        },
+        }
         Some(SubRoute::MatchAll { router, param_name }) => {
             let sub_path = if path == "." {
                 format!("/{{{}}}", param_name)
@@ -343,7 +334,6 @@ pub fn dump_api_schema(
             data["leaf"] = 0.into();
         }
         Some(SubRoute::Map(dirmap)) => {
-
             let mut children = Vec::new();
 
             for (key, sub_router) in dirmap.iter() {
index 68aa48633a2440c437bf78a9b4f2323a728a2f3e..f47656e2388c90b4a3c7fd85aa5f04d84f913567 100644 (file)
@@ -4,19 +4,21 @@ use std::pin::Pin;
 use anyhow::{bail, Error};
 use futures::*;
 use http::request::Parts;
+use http::HeaderMap;
 use http::Response;
 use hyper::{Body, Method, StatusCode};
-use http::HeaderMap;
 
 use proxmox_lang::try_block;
 use proxmox_router::{RpcEnvironmentType, UserInformation};
 use proxmox_sys::fs::CreateOptions;
 
-use proxmox_rest_server::{daemon, AuthError, ApiConfig, RestServer, RestEnvironment, ServerAdapter};
+use proxmox_rest_server::{
+    daemon, ApiConfig, AuthError, RestEnvironment, RestServer, ServerAdapter,
+};
 
-use proxmox_backup::server::auth::check_pbs_auth;
 use proxmox_backup::auth_helpers::*;
 use proxmox_backup::config;
+use proxmox_backup::server::auth::check_pbs_auth;
 
 fn main() {
     pbs_tools::setup_libc_malloc_opts();
@@ -32,14 +34,12 @@ fn main() {
 struct ProxmoxBackupApiAdapter;
 
 impl ServerAdapter for ProxmoxBackupApiAdapter {
-
     fn get_index(
         &self,
         _env: RestEnvironment,
         _parts: Parts,
     ) -> Pin<Box<dyn Future<Output = Response<Body>> + Send>> {
         Box::pin(async move {
-
             let index = "<center><h1>Proxmox Backup API Server</h1></center>";
 
             Response::builder()
@@ -54,10 +54,14 @@ impl ServerAdapter for ProxmoxBackupApiAdapter {
         &'a self,
         headers: &'a HeaderMap,
         method: &'a Method,
-    ) -> Pin<Box<dyn Future<Output = Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError>> + Send + 'a>> {
-        Box::pin(async move {
-            check_pbs_auth(headers, method).await
-        })
+    ) -> Pin<
+        Box<
+            dyn Future<Output = Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError>>
+                + Send
+                + 'a,
+        >,
+    > {
+        Box::pin(async move { check_pbs_auth(headers, method).await })
     }
 }
 
@@ -65,7 +69,8 @@ async fn run() -> Result<(), Error> {
     if let Err(err) = syslog::init(
         syslog::Facility::LOG_DAEMON,
         log::LevelFilter::Info,
-        Some("proxmox-backup-api")) {
+        Some("proxmox-backup-api"),
+    ) {
         bail!("unable to inititialize syslog - {}", err);
     }
 
@@ -100,10 +105,17 @@ async fn run() -> Result<(), Error> {
     )?;
 
     let backup_user = pbs_config::backup_user()?;
-    let mut commando_sock = proxmox_rest_server::CommandSocket::new(proxmox_rest_server::our_ctrl_sock(), backup_user.gid);
+    let mut commando_sock = proxmox_rest_server::CommandSocket::new(
+        proxmox_rest_server::our_ctrl_sock(),
+        backup_user.gid,
+    );
 
-    let dir_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
-    let file_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
+    let dir_opts = CreateOptions::new()
+        .owner(backup_user.uid)
+        .group(backup_user.gid);
+    let file_opts = CreateOptions::new()
+        .owner(backup_user.uid)
+        .group(backup_user.gid);
 
     config.enable_access_log(
         pbs_buildcfg::API_ACCESS_LOG_FN,
@@ -119,27 +131,26 @@ async fn run() -> Result<(), Error> {
         &mut commando_sock,
     )?;
 
-
     let rest_server = RestServer::new(config);
-    proxmox_rest_server::init_worker_tasks(pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(), file_opts.clone())?;
+    proxmox_rest_server::init_worker_tasks(
+        pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
+        file_opts.clone(),
+    )?;
 
     // http server future:
-    let server = daemon::create_daemon(
-        ([127,0,0,1], 82).into(),
-        move |listener| {
-            let incoming = hyper::server::conn::AddrIncoming::from_listener(listener)?;
-
-            Ok(async {
-                daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
-
-                hyper::Server::builder(incoming)
-                    .serve(rest_server)
-                    .with_graceful_shutdown(proxmox_rest_server::shutdown_future())
-                    .map_err(Error::from)
-                    .await
-            })
-        },
-    );
+    let server = daemon::create_daemon(([127, 0, 0, 1], 82).into(), move |listener| {
+        let incoming = hyper::server::conn::AddrIncoming::from_listener(listener)?;
+
+        Ok(async {
+            daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
+
+            hyper::Server::builder(incoming)
+                .serve(rest_server)
+                .with_graceful_shutdown(proxmox_rest_server::shutdown_future())
+                .map_err(Error::from)
+                .await
+        })
+    });
 
     proxmox_rest_server::write_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
 
index 33ac4d50bdbbaadb7e1f5f8f540a2caf9ab9dbac..e467c9568a9dc71254af65facd61616f32e21096 100644 (file)
@@ -20,5 +20,9 @@ fn main() {
     let mut rpcenv = CliEnvironment::new();
     rpcenv.set_auth_id(Some(format!("{}@pam", username)));
 
-    run_cli_command(cmd_def, rpcenv, Some(|future| proxmox_async::runtime::main(future)));
+    run_cli_command(
+        cmd_def,
+        rpcenv,
+        Some(|future| proxmox_async::runtime::main(future)),
+    );
 }
index 72d204bf09d669bd283839630d43da581f3f34af..744a93f9fb471b2ea943af063e7aec9797bda612 100644 (file)
@@ -47,8 +47,8 @@ use pbs_buildcfg::configdir;
 use proxmox_time::CalendarEvent;
 
 use pbs_api_types::{
-    Authid, DataStoreConfig, PruneOptions, SyncJobConfig, TapeBackupJobConfig,
-    VerificationJobConfig, Operation
+    Authid, DataStoreConfig, Operation, PruneOptions, SyncJobConfig, TapeBackupJobConfig,
+    VerificationJobConfig,
 };
 
 use proxmox_rest_server::daemon;
@@ -101,10 +101,14 @@ impl ServerAdapter for ProxmoxBackupProxyAdapter {
         &'a self,
         headers: &'a HeaderMap,
         method: &'a Method,
-    ) -> Pin<Box<dyn Future<Output = Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError>> + Send + 'a>> {
-        Box::pin(async move {
-            check_pbs_auth(headers, method).await
-        })
+    ) -> Pin<
+        Box<
+            dyn Future<Output = Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError>>
+                + Send
+                + 'a,
+        >,
+    > {
+        Box::pin(async move { check_pbs_auth(headers, method).await })
     }
 }
 
@@ -194,7 +198,11 @@ async fn run() -> Result<(), Error> {
 
     if let Err(err) = syslog::init(
         syslog::Facility::LOG_DAEMON,
-        if debug { log::LevelFilter::Debug } else { log::LevelFilter::Info },
+        if debug {
+            log::LevelFilter::Debug
+        } else {
+            log::LevelFilter::Info
+        },
         Some("proxmox-backup-proxy"),
     ) {
         bail!("unable to inititialize syslog - {}", err);
index 75c8818a7b8c090c1bb515f7bfdf1b92f50c464a..20df7ba6e01046c0495cca164d43fc022e546b52 100644 (file)
@@ -166,7 +166,6 @@ fn merge_parameters(
         }));
     }
 
-
     let params = schema.parse_parameter_strings(&param_list, true)?;
 
     Ok(params)
index 2e6160a94ed5cf3af378409aa3f17aad81de5443..37bc6e050ee43f96fa880d41131fbfdba41764b0 100644 (file)
@@ -1,13 +1,13 @@
 use std::collections::HashSet;
 use std::fs::File;
 use std::io::{stdout, Read, Seek, SeekFrom, Write};
-use std::path::Path;
 use std::panic::{RefUnwindSafe, UnwindSafe};
+use std::path::Path;
 
 use anyhow::{bail, format_err, Error};
+use hex::FromHex;
 use serde_json::{json, Value};
 use walkdir::WalkDir;
-use hex::FromHex;
 
 use proxmox_router::cli::{
     format_and_print_result, get_output_format, CliCommand, CliCommandMap, CommandLineInterface,
@@ -15,7 +15,8 @@ use proxmox_router::cli::{
 };
 use proxmox_schema::api;
 
-use pbs_tools::crypt_config::CryptConfig;
+use pbs_client::tools::key_source::get_encryption_key_password;
+use pbs_config::key_config::load_and_decrypt_key;
 use pbs_datastore::dynamic_index::DynamicIndexReader;
 use pbs_datastore::file_formats::{
     COMPRESSED_BLOB_MAGIC_1_0, DYNAMIC_SIZED_CHUNK_INDEX_1_0, ENCRYPTED_BLOB_MAGIC_1_0,
@@ -24,8 +25,7 @@ use pbs_datastore::file_formats::{
 use pbs_datastore::fixed_index::FixedIndexReader;
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::DataBlob;
-use pbs_config::key_config::load_and_decrypt_key;
-use pbs_client::tools::key_source::get_encryption_key_password;
+use pbs_tools::crypt_config::CryptConfig;
 
 // Returns either a new file, if a path is given, or stdout, if no path is given.
 fn outfile_or_stdout<P: AsRef<Path>>(
@@ -128,8 +128,7 @@ fn inspect_chunk(
 
     let digest_raw: Option<[u8; 32]> = digest
         .map(|ref d| {
-            <[u8; 32]>::from_hex(d)
-                .map_err(|e| format_err!("could not parse chunk - {}", e))
+            <[u8; 32]>::from_hex(d).map_err(|e| format_err!("could not parse chunk - {}", e))
         })
         .map_or(Ok(None), |r| r.map(Some))?;
 
index a3a526dd06345fa3feef4a8c8a56125e1321c5b6..f092c5854908097ea9425b29500828636ae0d31b 100644 (file)
@@ -1,3 +1,3 @@
+pub mod api;
 pub mod inspect;
 pub mod recover;
-pub mod api;
index 0c062e38e35b5fdcaae32c80439dd13a9015162c..61d3b27366575d999eecf21bb40a18bcb8a8b3b6 100644 (file)
@@ -8,14 +8,14 @@ use serde_json::Value;
 use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
 use proxmox_schema::api;
 
-use pbs_tools::crypt_config::CryptConfig;
+use pbs_client::tools::key_source::get_encryption_key_password;
+use pbs_config::key_config::load_and_decrypt_key;
 use pbs_datastore::dynamic_index::DynamicIndexReader;
 use pbs_datastore::file_formats::{DYNAMIC_SIZED_CHUNK_INDEX_1_0, FIXED_SIZED_CHUNK_INDEX_1_0};
 use pbs_datastore::fixed_index::FixedIndexReader;
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::DataBlob;
-use pbs_config::key_config::load_and_decrypt_key;
-use pbs_client::tools::key_source::get_encryption_key_password;
+use pbs_tools::crypt_config::CryptConfig;
 
 #[api(
     input: {
index 7edfe5c57644e8e652dfcd3c6b8182a82d61c169..fe301ba2f7e6169c09b5958ebc852c2790d89cbd 100644 (file)
@@ -2,7 +2,6 @@
 /// to read and set the encryption key.
 ///
 /// This command can use STDIN as tape device handle.
-
 use std::fs::File;
 use std::os::unix::io::{AsRawFd, FromRawFd};
 
@@ -14,24 +13,15 @@ use proxmox_schema::api;
 use proxmox_uuid::Uuid;
 
 use pbs_api_types::{
-    Fingerprint, LTO_DRIVE_PATH_SCHEMA, DRIVE_NAME_SCHEMA, TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
-    MEDIA_SET_UUID_SCHEMA, LtoTapeDrive,
+    Fingerprint, LtoTapeDrive, DRIVE_NAME_SCHEMA, LTO_DRIVE_PATH_SCHEMA, MEDIA_SET_UUID_SCHEMA,
+    TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
 };
 
-use pbs_tape::linux_list_drives::{open_lto_tape_device, check_tape_is_lto_tape_device};
+use pbs_tape::linux_list_drives::{check_tape_is_lto_tape_device, open_lto_tape_device};
 
-use proxmox_backup::{
-    tape::{
-        drive::{
-            TapeDriver,
-            LtoTapeHandle,
-            open_lto_tape_drive,
-        },
-    },
-};
+use proxmox_backup::tape::drive::{open_lto_tape_drive, LtoTapeHandle, TapeDriver};
 
 fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
-
     let handle = if let Some(name) = param["drive"].as_str() {
         let (config, _digest) = pbs_config::drive::config()?;
         let drive: LtoTapeDrive = config.lookup("lto", name)?;
@@ -56,7 +46,9 @@ fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
 
         let mut drive_names = Vec::new();
         for (name, (section_type, _)) in config.sections.iter() {
-            if section_type != "lto" { continue; }
+            if section_type != "lto" {
+                continue;
+            }
             drive_names.push(name);
         }
 
@@ -106,7 +98,6 @@ fn set_encryption(
     uuid: Option<Uuid>,
     param: Value,
 ) -> Result<(), Error> {
-
     let result = proxmox_lang::try_block!({
         let mut handle = get_tape_handle(&param)?;
 
@@ -123,7 +114,8 @@ fn set_encryption(
         }
 
         Ok(())
-    }).map_err(|err: Error| err.to_string());
+    })
+    .map_err(|err: Error| err.to_string());
 
     println!("{}", serde_json::to_string_pretty(&result)?);
 
@@ -131,7 +123,6 @@ fn set_encryption(
 }
 
 fn main() -> Result<(), Error> {
-
     // check if we are user root or backup
     let backup_uid = pbs_config::backup_user()?.uid;
     let backup_gid = pbs_config::backup_group()?.gid;
@@ -146,16 +137,13 @@ fn main() -> Result<(), Error> {
     if !running_uid.is_root() && (running_uid != backup_uid || running_gid != backup_gid) {
         bail!(
             "Not running as backup user or group (got uid {} gid {})",
-            running_uid, running_gid,
+            running_uid,
+            running_gid,
         );
     }
 
-    let cmd_def = CliCommandMap::new()
-        .insert(
-            "encryption",
-            CliCommand::new(&API_METHOD_SET_ENCRYPTION)
-        )
-        ;
+    let cmd_def =
+        CliCommandMap::new().insert("encryption", CliCommand::new(&API_METHOD_SET_ENCRYPTION));
 
     let mut rpcenv = CliEnvironment::new();
     rpcenv.set_auth_id(Some(String::from("root@pam")));
index 00b7a21db400f76c080874d88b5156e0a19bcf26..d226223ed1c2bd91c4f77b03cb937bbc3470d33a 100644 (file)
@@ -6,15 +6,11 @@ use anyhow::{bail, format_err, Error};
 use serde_json::Value;
 
 use proxmox_sys::error::SysError;
-use proxmox_sys::fs::{CreateOptions, file_read_string};
+use proxmox_sys::fs::{file_read_string, CreateOptions};
 
 use pbs_api_types::PROXMOX_SAFE_ID_REGEX;
 
-use crate::api2::types::{
-    AcmeChallengeSchema,
-    KnownAcmeDirectory,
-    AcmeAccountName,
-};
+use crate::api2::types::{AcmeAccountName, AcmeChallengeSchema, KnownAcmeDirectory};
 
 pub(crate) const ACME_DIR: &str = pbs_buildcfg::configdir!("/acme");
 pub(crate) const ACME_ACCOUNT_DIR: &str = pbs_buildcfg::configdir!("/acme/accounts");
@@ -65,7 +61,6 @@ pub fn account_path(name: &str) -> String {
     format!("{}/{}", ACME_ACCOUNT_DIR, name)
 }
 
-
 pub fn foreach_acme_account<F>(mut func: F) -> Result<(), Error>
 where
     F: FnMut(AcmeAccountName) -> ControlFlow<Result<(), Error>>,
@@ -163,7 +158,10 @@ pub fn complete_acme_plugin_type(_arg: &str, _param: &HashMap<String, String>) -
     ]
 }
 
-pub fn complete_acme_api_challenge_type(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
+pub fn complete_acme_api_challenge_type(
+    _arg: &str,
+    param: &HashMap<String, String>,
+) -> Vec<String> {
     if param.get("type") == Some(&"dns".to_string()) {
         match load_dns_challenge_schema() {
             Ok(schema) => schema.into_iter().map(|s| s.id).collect(),
index ae9ad03eb0e7879f23c04df1465fa7515141e6bd..a3e184e07bd84205451c3a4ec3ee35fd697e4050 100644 (file)
@@ -4,11 +4,11 @@
 //! configuration files.
 
 use anyhow::{bail, format_err, Error};
-use std::path::PathBuf;
 use nix::sys::stat::Mode;
-use openssl::rsa::{Rsa};
-use openssl::x509::{X509Builder};
 use openssl::pkey::PKey;
+use openssl::rsa::Rsa;
+use openssl::x509::X509Builder;
+use std::path::PathBuf;
 
 use proxmox_lang::try_block;
 
@@ -73,23 +73,23 @@ pub fn create_configdir() -> Result<(), Error> {
 
     let backup_user = pbs_config::backup_user()?;
 
-    nix::unistd::chown(cfgdir, Some(backup_user.uid), Some(backup_user.gid))
-        .map_err(|err| {
-            format_err!(
-                "unable to set configuration directory '{}' permissions - {}",
-                cfgdir,
-                err
-            )
-        })
+    nix::unistd::chown(cfgdir, Some(backup_user.uid), Some(backup_user.gid)).map_err(|err| {
+        format_err!(
+            "unable to set configuration directory '{}' permissions - {}",
+            cfgdir,
+            err
+        )
+    })
 }
 
 /// Update self signed node certificate.
 pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
-
     let key_path = PathBuf::from(configdir!("/proxy.key"));
     let cert_path = PathBuf::from(configdir!("/proxy.pem"));
 
-    if key_path.exists() && cert_path.exists() && !force { return Ok(()); }
+    if key_path.exists() && cert_path.exists() && !force {
+        return Ok(());
+    }
 
     let rsa = Rsa::generate(4096).unwrap();
 
@@ -101,7 +101,7 @@ pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
 
     let today = openssl::asn1::Asn1Time::days_from_now(0)?;
     x509.set_not_before(&today)?;
-    let expire = openssl::asn1::Asn1Time::days_from_now(365*1000)?;
+    let expire = openssl::asn1::Asn1Time::days_from_now(365 * 1000)?;
     x509.set_not_after(&expire)?;
 
     let nodename = proxmox_sys::nodename();
@@ -144,8 +144,12 @@ pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
 
     alt_names.dns("localhost");
 
-    if nodename != "localhost" { alt_names.dns(nodename); }
-    if nodename != fqdn { alt_names.dns(&fqdn); }
+    if nodename != "localhost" {
+        alt_names.dns(nodename);
+    }
+    if nodename != fqdn {
+        alt_names.dns(&fqdn);
+    }
 
     let alt_names = alt_names.build(&context)?;
 
index 07e88ee28b47b5336d8594f51c83637f09653f3e..5a6a48d4e5fe83f15fdfeba769f0aa5d586a4afc 100644 (file)
@@ -1,7 +1,7 @@
 use std::collections::HashSet;
 
-use openssl::ssl::{SslAcceptor, SslMethod};
 use anyhow::{bail, Error};
+use openssl::ssl::{SslAcceptor, SslMethod};
 use serde::{Deserialize, Serialize};
 
 use proxmox_schema::{api, ApiStringFormat, ApiType, Updater};
@@ -66,7 +66,7 @@ pub struct AcmeConfig {
 // TODO: auto-generate from available translations
 #[api]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="lowercase")]
+#[serde(rename_all = "lowercase")]
 pub enum Translation {
     /// Arabic
     Ar,
@@ -107,7 +107,7 @@ pub enum Translation {
     /// Polish
     Pl,
     /// Portuguese (Brazil)
-    #[serde(rename="pt_BR")]
+    #[serde(rename = "pt_BR")]
     PtBr,
     /// Russian
     Ru,
@@ -118,10 +118,10 @@ pub enum Translation {
     /// Turkish
     Tr,
     /// Chinese (simplified)
-    #[serde(rename="zh_CN")]
+    #[serde(rename = "zh_CN")]
     ZhCn,
     /// Chinese (traditional)
-    #[serde(rename="zh_TW")]
+    #[serde(rename = "zh_TW")]
     ZhTw,
 }
 
@@ -208,11 +208,11 @@ pub struct NodeConfig {
     pub email_from: Option<String>,
 
     /// List of TLS ciphers for TLS 1.3 that will be used by the proxy. (Proxy has to be restarted for changes to take effect)
-    #[serde(skip_serializing_if = "Option::is_none", rename="ciphers-tls-1.3")]
+    #[serde(skip_serializing_if = "Option::is_none", rename = "ciphers-tls-1.3")]
     pub ciphers_tls_1_3: Option<String>,
 
     /// List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. (Proxy has to be restarted for changes to take effect)
-    #[serde(skip_serializing_if = "Option::is_none", rename="ciphers-tls-1.2")]
+    #[serde(skip_serializing_if = "Option::is_none", rename = "ciphers-tls-1.2")]
     pub ciphers_tls_1_2: Option<String>,
 
     /// Default language used in the GUI
index 6e94728c1b0cdc2b63bb6fd0d34737b72fef3a5e..9a012a7df6b47cd68cd5e22e56748f856a8b538b 100644 (file)
@@ -9,12 +9,12 @@ use std::path::Path;
 use anyhow::{format_err, Error};
 use once_cell::sync::OnceCell;
 
-use proxmox_sys::fs::CreateOptions;
+use proxmox_rrd::rrd::{CF, DST, RRD};
 use proxmox_rrd::RRDCache;
-use proxmox_rrd::rrd::{RRD, DST, CF};
+use proxmox_sys::fs::CreateOptions;
 
-use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
 use pbs_api_types::{RRDMode, RRDTimeFrame};
+use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
 
 const RRD_CACHE_BASEDIR: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/rrdb");
 
@@ -22,14 +22,15 @@ static RRD_CACHE: OnceCell<RRDCache> = OnceCell::new();
 
 /// Get the RRD cache instance
 pub fn get_rrd_cache() -> Result<&'static RRDCache, Error> {
-    RRD_CACHE.get().ok_or_else(|| format_err!("RRD cache not initialized!"))
+    RRD_CACHE
+        .get()
+        .ok_or_else(|| format_err!("RRD cache not initialized!"))
 }
 
 /// Initialize the RRD cache instance
 ///
 /// Note: Only a single process must do this (proxmox-backup-proxy)
 pub fn initialize_rrd_cache() -> Result<&'static RRDCache, Error> {
-
     let backup_user = pbs_config::backup_user()?;
 
     let file_options = CreateOptions::new()
@@ -40,7 +41,7 @@ pub fn initialize_rrd_cache() -> Result<&'static RRDCache, Error> {
         .owner(backup_user.uid)
         .group(backup_user.gid);
 
-    let apply_interval = 30.0*60.0; // 30 minutes
+    let apply_interval = 30.0 * 60.0; // 30 minutes
 
     let cache = RRDCache::new(
         RRD_CACHE_BASEDIR,
@@ -50,47 +51,45 @@ pub fn initialize_rrd_cache() -> Result<&'static RRDCache, Error> {
         load_callback,
     )?;
 
-    RRD_CACHE.set(cache)
+    RRD_CACHE
+        .set(cache)
         .map_err(|_| format_err!("RRD cache already initialized!"))?;
 
     Ok(RRD_CACHE.get().unwrap())
 }
 
-fn load_callback(
-    path: &Path,
-    _rel_path: &str,
-    dst: DST,
-) -> RRD {
-
+fn load_callback(path: &Path, _rel_path: &str, dst: DST) -> RRD {
     match RRD::load(path, true) {
         Ok(rrd) => rrd,
         Err(err) => {
             if err.kind() != std::io::ErrorKind::NotFound {
-                log::warn!("overwriting RRD file {:?}, because of load error: {}", path, err);
+                log::warn!(
+                    "overwriting RRD file {:?}, because of load error: {}",
+                    path,
+                    err
+                );
             }
             RRDCache::create_proxmox_backup_default_rrd(dst)
-        },
+        }
     }
 }
 
-
 /// Extracts data for the specified time frame from from RRD cache
 pub fn extract_rrd_data(
     basedir: &str,
     name: &str,
     timeframe: RRDTimeFrame,
     mode: RRDMode,
-) ->  Result<Option<(u64, u64, Vec<Option<f64>>)>, Error> {
-
+) -> Result<Option<(u64, u64, Vec<Option<f64>>)>, Error> {
     let end = proxmox_time::epoch_f64() as u64;
 
     let (start, resolution) = match timeframe {
         RRDTimeFrame::Hour => (end - 3600, 60),
-        RRDTimeFrame::Day => (end - 3600*24, 60),
-        RRDTimeFrame::Week => (end - 3600*24*7, 30*60),
-        RRDTimeFrame::Month => (end - 3600*24*30, 30*60),
-        RRDTimeFrame::Year => (end - 3600*24*365, 6*60*60),
-        RRDTimeFrame::Decade => (end - 10*3600*24*366, 7*86400),
+        RRDTimeFrame::Day => (end - 3600 * 24, 60),
+        RRDTimeFrame::Week => (end - 3600 * 24 * 7, 30 * 60),
+        RRDTimeFrame::Month => (end - 3600 * 24 * 30, 30 * 60),
+        RRDTimeFrame::Year => (end - 3600 * 24 * 365, 6 * 60 * 60),
+        RRDTimeFrame::Decade => (end - 10 * 3600 * 24 * 366, 7 * 86400),
     };
 
     let cf = match mode {
index c884380e56161c0f1a027deec526fbdf40b10b04..c638f570f3261a3cc82e4ead3a5dbcec7219f925 100644 (file)
@@ -1,14 +1,14 @@
-use std::collections::HashSet;
 use std::collections::HashMap;
+use std::collections::HashSet;
 
-use anyhow::{Error, bail, format_err};
+use anyhow::{bail, format_err, Error};
 use apt_pkg_native::Cache;
 
-use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
 use proxmox_schema::const_regex;
+use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
 
-use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
 use pbs_api_types::APTUpdateInfo;
+use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
 
 const APT_PKG_STATE_FN: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/pkg-state.json");
 
@@ -25,8 +25,13 @@ pub struct PkgState {
 pub fn write_pkg_cache(state: &PkgState) -> Result<(), Error> {
     let serialized_state = serde_json::to_string(state)?;
 
-    replace_file(APT_PKG_STATE_FN, serialized_state.as_bytes(), CreateOptions::new(), false)
-        .map_err(|err| format_err!("Error writing package cache - {}", err))?;
+    replace_file(
+        APT_PKG_STATE_FN,
+        serialized_state.as_bytes(),
+        CreateOptions::new(),
+        false,
+    )
+    .map_err(|err| format_err!("Error writing package cache - {}", err))?;
     Ok(())
 }
 
@@ -42,7 +47,7 @@ pub fn read_pkg_state() -> Result<Option<PkgState>, Error> {
         .map_err(|err| format_err!("could not parse cached package status - {}", err))
 }
 
-pub fn pkg_cache_expired () -> Result<bool, Error> {
+pub fn pkg_cache_expired() -> Result<bool, Error> {
     if let Ok(pbs_cache) = std::fs::metadata(APT_PKG_STATE_FN) {
         let apt_pkgcache = std::fs::metadata("/var/cache/apt/pkgcache.bin")?;
         let dpkg_status = std::fs::metadata("/var/lib/dpkg/status")?;
@@ -57,26 +62,28 @@ pub fn pkg_cache_expired () -> Result<bool, Error> {
 }
 
 pub fn update_cache() -> Result<PkgState, Error> {
-        // update our cache
-        let all_upgradeable = list_installed_apt_packages(|data| {
-            data.candidate_version == data.active_version &&
-            data.installed_version != Some(data.candidate_version)
-        }, None);
-
-        let cache = match read_pkg_state() {
-            Ok(Some(mut cache)) => {
-                cache.package_status = all_upgradeable;
-                cache
-            },
-            _ => PkgState {
-                notified: None,
-                package_status: all_upgradeable,
-            },
-        };
-        write_pkg_cache(&cache)?;
-        Ok(cache)
-}
+    // update our cache
+    let all_upgradeable = list_installed_apt_packages(
+        |data| {
+            data.candidate_version == data.active_version
+                && data.installed_version != Some(data.candidate_version)
+        },
+        None,
+    );
 
+    let cache = match read_pkg_state() {
+        Ok(Some(mut cache)) => {
+            cache.package_status = all_upgradeable;
+            cache
+        }
+        _ => PkgState {
+            notified: None,
+            package_status: all_upgradeable,
+        },
+    };
+    write_pkg_cache(&cache)?;
+    Ok(cache)
+}
 
 const_regex! {
     VERSION_EPOCH_REGEX = r"^\d+:";
@@ -108,9 +115,12 @@ fn get_changelog_url(
                 if output.len() < 2 {
                     bail!("invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", output)
                 }
-                output[1..output.len()-1].to_owned()
-            },
-            None => bail!("invalid output from 'apt-get changelog --print-uris': {}", output)
+                output[1..output.len() - 1].to_owned()
+            }
+            None => bail!(
+                "invalid output from 'apt-get changelog --print-uris': {}",
+                output
+            ),
         };
         return Ok(output);
     } else if origin == "Proxmox" {
@@ -123,18 +133,22 @@ fn get_changelog_url(
                 let base_capture = captures.get(1);
                 match base_capture {
                     Some(base_underscore) => base_underscore.as_str().replace("_", "/"),
-                    None => bail!("incompatible filename, cannot find regex group")
+                    None => bail!("incompatible filename, cannot find regex group"),
                 }
-            },
-            None => bail!("incompatible filename, doesn't match regex")
+            }
+            None => bail!("incompatible filename, doesn't match regex"),
         };
 
         if component == "pbs-enterprise" {
-            return Ok(format!("https://enterprise.proxmox.com/{}/{}_{}.changelog",
-                              base, package, version));
+            return Ok(format!(
+                "https://enterprise.proxmox.com/{}/{}_{}.changelog",
+                base, package, version
+            ));
         } else {
-            return Ok(format!("http://download.proxmox.com/{}/{}_{}.changelog",
-                              base, package, version));
+            return Ok(format!(
+                "http://download.proxmox.com/{}/{}_{}.changelog",
+                base, package, version
+            ));
         }
     }
 
@@ -162,7 +176,6 @@ pub fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
     filter: F,
     only_versions_for: Option<&str>,
 ) -> Vec<APTUpdateInfo> {
-
     let mut ret = Vec::new();
     let mut depends = HashSet::new();
 
@@ -172,26 +185,20 @@ pub fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
 
     let mut cache_iter = match only_versions_for {
         Some(name) => cache.find_by_name(name),
-        None => cache.iter()
+        None => cache.iter(),
     };
 
     loop {
-
         match cache_iter.next() {
             Some(view) => {
                 let di = if only_versions_for.is_some() {
-                    query_detailed_info(
-                        PackagePreSelect::All,
-                        &filter,
-                        view,
-                        None
-                    )
+                    query_detailed_info(PackagePreSelect::All, &filter, view, None)
                 } else {
                     query_detailed_info(
                         PackagePreSelect::OnlyInstalled,
                         &filter,
                         view,
-                        Some(&mut depends)
+                        Some(&mut depends),
                     )
                 };
                 if let Some(info) = di {
@@ -201,7 +208,7 @@ pub fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
                 if only_versions_for.is_some() {
                     break;
                 }
-            },
+            }
             None => {
                 drop(cache_iter);
                 // also loop through missing dependencies, as they would be installed
@@ -209,15 +216,10 @@ pub fn list_installed_apt_packages<F: Fn(FilterData) -> bool>(
                     let mut iter = cache.find_by_name(pkg);
                     let view = match iter.next() {
                         Some(view) => view,
-                        None => continue // package not found, ignore
+                        None => continue, // package not found, ignore
                     };
 
-                    let di = query_detailed_info(
-                        PackagePreSelect::OnlyNew,
-                        &filter,
-                        view,
-                        None
-                    );
+                    let di = query_detailed_info(PackagePreSelect::OnlyNew, &filter, view, None);
                     if let Some(info) = di {
                         ret.push(info);
                     }
@@ -238,7 +240,7 @@ fn query_detailed_info<'a, F, V>(
 ) -> Option<APTUpdateInfo>
 where
     F: Fn(FilterData) -> bool,
-    V: std::ops::Deref<Target = apt_pkg_native::sane::PkgView<'a>>
+    V: std::ops::Deref<Target = apt_pkg_native::sane::PkgView<'a>>,
 {
     let current_version = view.current_version();
     let candidate_version = view.candidate_version();
@@ -247,8 +249,8 @@ where
         PackagePreSelect::OnlyInstalled => match (current_version, candidate_version) {
             (Some(cur), Some(can)) => (Some(cur), can), // package installed and there is an update
             (Some(cur), None) => (Some(cur.clone()), cur), // package installed and up-to-date
-            (None, Some(_)) => return None, // package could be installed
-            (None, None) => return None, // broken
+            (None, Some(_)) => return None,             // package could be installed
+            (None, None) => return None,                // broken
         },
         PackagePreSelect::OnlyNew => match (current_version, candidate_version) {
             (Some(_), Some(_)) => return None,
@@ -267,7 +269,6 @@ where
     // get additional information via nested APT 'iterators'
     let mut view_iter = view.versions();
     while let Some(ver) = view_iter.next() {
-
         let package = view.name();
         let version = ver.version();
         let mut origin_res = "unknown".to_owned();
@@ -299,7 +300,6 @@ where
             let mut origin_iter = ver.origin_iter();
             let origin = origin_iter.next();
             if let Some(origin) = origin {
-
                 if let Some(sd) = origin.short_desc() {
                     short_desc = sd;
                 }
@@ -324,8 +324,8 @@ where
 
                     // build changelog URL from gathered information
                     // ignore errors, use empty changelog instead
-                    let url = get_changelog_url(&package, &filename,
-                        &version, &origin_res, &component);
+                    let url =
+                        get_changelog_url(&package, &filename, &version, &origin_res, &component);
                     if let Ok(url) = url {
                         change_log_url = url;
                     }
@@ -338,7 +338,7 @@ where
                     let dep = match dep_iter.next() {
                         Some(dep) if dep.dep_type() != "Depends" => continue,
                         Some(dep) => dep,
-                        None => break
+                        None => break,
                     };
 
                     let dep_pkg = dep.target_pkg();
@@ -358,7 +358,7 @@ where
                 version: candidate_version.clone(),
                 old_version: match current_version {
                     Some(vers) => vers,
-                    None => "".to_owned()
+                    None => "".to_owned(),
                 },
                 priority: priority_res,
                 section: section_res,
index cc722094f210642aa32d9e8d9b3ab9c0df01534d..c1e9efcaeb75beec66431fd4f9e75c731a0549f7 100644 (file)
@@ -119,7 +119,9 @@ pub fn from_property_string<T>(input: &str, schema: &'static Schema) -> Result<T
 where
     T: for<'de> Deserialize<'de>,
 {
-    Ok(serde_json::from_value(schema.parse_property_string(input)?)?)
+    Ok(serde_json::from_value(
+        schema.parse_property_string(input)?,
+    )?)
 }
 
 /// Serialize a data structure using a 'key: value' config file format.
@@ -154,7 +156,7 @@ fn object_to_writer(output: &mut dyn Write, object: &Object) -> Result<(), Error
     for (key, value) in object.iter() {
         match value {
             _ if key == "description" => continue, // skip description as we handle it above
-            Value::Null => continue,           // delete this entry
+            Value::Null => continue,               // delete this entry
             Value::Bool(v) => writeln!(output, "{}: {}", key, v)?,
             Value::String(v) => {
                 if v.as_bytes().contains(&b'\n') {
@@ -183,11 +185,10 @@ fn test() {
         acmedomain1: test2.invalid.local\n\
     ";
 
-    let data: NodeConfig = from_str(NODE_CONFIG, &NodeConfig::API_SCHEMA)
-        .expect("failed to parse simple node config");
+    let data: NodeConfig =
+        from_str(NODE_CONFIG, &NodeConfig::API_SCHEMA).expect("failed to parse simple node config");
 
-    let config = to_bytes(&data, &NodeConfig::API_SCHEMA)
-        .expect("failed to serialize node config");
+    let config = to_bytes(&data, &NodeConfig::API_SCHEMA).expect("failed to serialize node config");
 
     assert_eq!(config, NODE_CONFIG.as_bytes());
 }
index 94da7b3a0677a7ca0d4aec6756d3910e4a34f8e8..568dccbf44a283fae23265e375427ce77b45f598 100644 (file)
@@ -14,10 +14,10 @@ use once_cell::sync::OnceCell;
 
 use ::serde::{Deserialize, Serialize};
 
-use proxmox_schema::api;
 use proxmox_lang::error::io_err_other;
-use proxmox_sys::linux::procfs::{mountinfo::Device, MountInfo};
 use proxmox_lang::{io_bail, io_format_err};
+use proxmox_schema::api;
+use proxmox_sys::linux::procfs::{mountinfo::Device, MountInfo};
 
 use pbs_api_types::{StorageStatus, BLOCKDEVICE_NAME_REGEX};
 
index 32013a3fa69c59083e4d2ce079d674dad1630151..8ac73d6eaf4eb6f8945e65e54b5fbbad4541a204 100644 (file)
@@ -6,11 +6,7 @@ use std::any::Any;
 use anyhow::{bail, format_err, Error};
 use openssl::hash::{hash, DigestBytes, MessageDigest};
 
-use proxmox_http::{
-    client::SimpleHttp,
-    client::SimpleHttpOptions,
-    ProxyConfig,
-};
+use proxmox_http::{client::SimpleHttp, client::SimpleHttpOptions, ProxyConfig};
 
 pub mod apt;
 pub mod config;
@@ -36,8 +32,7 @@ pub fn get_hardware_address() -> Result<String, Error> {
 
     let contents = proxmox_sys::fs::file_get_contents(FILENAME)
         .map_err(|e| format_err!("Error getting host key - {}", e))?;
-    let digest = md5sum(&contents)
-        .map_err(|e| format_err!("Error digesting host key - {}", e))?;
+    let digest = md5sum(&contents).map_err(|e| format_err!("Error digesting host key - {}", e))?;
 
     Ok(hex::encode(&digest).to_uppercase())
 }
@@ -49,11 +44,13 @@ pub fn assert_if_modified(digest1: &str, digest2: &str) -> Result<(), Error> {
     Ok(())
 }
 
-
 /// Detect modified configuration files
 ///
 /// This function fails with a reasonable error message if checksums do not match.
-pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
+pub fn detect_modified_configuration_file(
+    digest1: &[u8; 32],
+    digest2: &[u8; 32],
+) -> Result<(), Error> {
     if digest1 != digest2 {
         bail!("detected modified configuration - file changed by other user? Try again.");
     }
index b205940681ccbbb0de129352fdc1d5db11b9509c..c4316ad061baa5f52b7a587cee2302d33e92ba69 100644 (file)
@@ -59,7 +59,8 @@ impl<I: Send + 'static> ParallelHandler<I> {
     /// Create a new thread pool, each thread processing incoming data
     /// with 'handler_fn'.
     pub fn new<F>(name: &str, threads: usize, handler_fn: F) -> Self
-        where F: Fn(I) -> Result<(), Error> + Send + Clone + 'static,
+    where
+        F: Fn(I) -> Result<(), Error> + Send + Clone + 'static,
     {
         let mut handles = Vec::new();
         let (input_tx, input_rx) = bounded::<I>(threads);
@@ -89,7 +90,7 @@ impl<I: Send + 'static> ParallelHandler<I> {
                             }
                         }
                     })
-                    .unwrap()
+                    .unwrap(),
             );
         }
         Self {
@@ -132,19 +133,17 @@ impl<I: Send + 'static> ParallelHandler<I> {
     }
 
     fn join_threads(&mut self) -> Vec<String> {
-
         let mut msg_list = Vec::new();
 
         let mut i = 0;
         while let Some(handle) = self.handles.pop() {
             if let Err(panic) = handle.join() {
                 match panic.downcast::<&str>() {
-                    Ok(panic_msg) => msg_list.push(
-                        format!("thread {} ({}) panicked: {}", self.name, i, panic_msg)
-                    ),
-                    Err(_) => msg_list.push(
-                        format!("thread {} ({}) panicked", self.name, i)
-                    ),
+                    Ok(panic_msg) => msg_list.push(format!(
+                        "thread {} ({}) panicked: {}",
+                        self.name, i, panic_msg
+                    )),
+                    Err(_) => msg_list.push(format!("thread {} ({}) panicked", self.name, i)),
                 }
             }
             i += 1;
index 1a0656d47a1cc17886bdd680c2fe8118e52361eb..bcbeb8b9ee58050c7b8e5e6c72a8e867f09766df 100644 (file)
@@ -1,6 +1,6 @@
-use std::path::PathBuf;
 use std::mem::MaybeUninit;
-use std::time::{Instant, Duration};
+use std::path::PathBuf;
+use std::time::{Duration, Instant};
 
 use anyhow::{bail, Error};
 use nix::sys::stat::Mode;
@@ -8,11 +8,12 @@ use nix::sys::stat::Mode;
 use proxmox_sys::fs::{create_path, CreateOptions};
 
 use proxmox_http::client::{RateLimit, RateLimiter, ShareableRateLimit};
-use proxmox_shared_memory::{Init, SharedMemory, SharedMutex};
 use proxmox_shared_memory::{check_subtype, initialize_subtype};
+use proxmox_shared_memory::{Init, SharedMemory, SharedMutex};
 
 // openssl::sha::sha256(b"Proxmox Backup SharedRateLimiter v1.0")[0..8];
-pub const PROXMOX_BACKUP_SHARED_RATE_LIMITER_MAGIC_1_0: [u8; 8] = [6, 58, 213, 96, 161, 122, 130, 117];
+pub const PROXMOX_BACKUP_SHARED_RATE_LIMITER_MAGIC_1_0: [u8; 8] =
+    [6, 58, 213, 96, 161, 122, 130, 117];
 
 const BASE_PATH: &str = pbs_buildcfg::rundir!("/shmem/tbf");
 
@@ -61,11 +62,10 @@ impl Init for SharedRateLimiterData {
 /// implements [Init]. This way we can share the limiter between
 /// different processes.
 pub struct SharedRateLimiter {
-    shmem: SharedMemory<SharedRateLimiterData>
+    shmem: SharedMemory<SharedRateLimiterData>,
 }
 
 impl SharedRateLimiter {
-
     /// Creates a new mmap'ed instance.
     ///
     /// Data is mapped in `/var/run/proxmox-backup/shmem/tbf/<name>` using
@@ -80,10 +80,7 @@ impl SharedRateLimiter {
             .owner(user.uid)
             .group(user.gid);
 
-        create_path(
-            &path,
-            Some(dir_opts.clone()),
-            Some(dir_opts))?;
+        create_path(&path, Some(dir_opts.clone()), Some(dir_opts))?;
 
         path.push(name);
 
@@ -92,8 +89,7 @@ impl SharedRateLimiter {
             .owner(user.uid)
             .group(user.gid);
 
-        let shmem: SharedMemory<SharedRateLimiterData> =
-            SharedMemory::open(&path, file_opts)?;
+        let shmem: SharedMemory<SharedRateLimiterData> = SharedMemory::open(&path, file_opts)?;
 
         shmem.data().tbf.lock().0.update_rate(rate, burst);
 
@@ -103,17 +99,24 @@ impl SharedRateLimiter {
 
 impl ShareableRateLimit for SharedRateLimiter {
     fn update_rate(&self, rate: u64, bucket_size: u64) {
-        self.shmem.data().tbf.lock().0
+        self.shmem
+            .data()
+            .tbf
+            .lock()
+            .0
             .update_rate(rate, bucket_size);
     }
 
     fn traffic(&self) -> u64 {
-        self.shmem.data().tbf.lock().0
-            .traffic()
+        self.shmem.data().tbf.lock().0.traffic()
     }
 
     fn register_traffic(&self, current_time: Instant, data_len: u64) -> Duration {
-        self.shmem.data().tbf.lock().0
+        self.shmem
+            .data()
+            .tbf
+            .lock()
+            .0
             .register_traffic(current_time, data_len)
     }
 }
index 36e646679b92687450d83e60cb6e9f90e15f1c92..414d56143f3a7adb1bc3c7a3c9ac5c1bba5a6684 100644 (file)
@@ -1,6 +1,6 @@
 //! Helpers for common statistics tasks
-use num_traits::NumAssignRef;
 use num_traits::cast::ToPrimitive;
+use num_traits::NumAssignRef;
 
 /// Calculates the sum of a list of numbers
 /// ```
@@ -14,7 +14,7 @@ use num_traits::cast::ToPrimitive;
 /// ```
 pub fn sum<T>(list: &[T]) -> T
 where
-    T: NumAssignRef + ToPrimitive
+    T: NumAssignRef + ToPrimitive,
 {
     let mut sum = T::zero();
     for num in list {
@@ -32,13 +32,13 @@ where
 /// ```
 pub fn mean<T>(list: &[T]) -> Option<f64>
 where
-    T: NumAssignRef + ToPrimitive
+    T: NumAssignRef + ToPrimitive,
 {
     let len = list.len();
     if len == 0 {
-        return None
+        return None;
     }
-    Some(sum(list).to_f64()?/(list.len() as f64))
+    Some(sum(list).to_f64()? / (list.len() as f64))
 }
 
 /// Calculates the variance of a variable x
@@ -50,13 +50,13 @@ where
 /// ```
 pub fn variance<T>(list: &[T]) -> Option<f64>
 where
-    T: NumAssignRef + ToPrimitive
+    T: NumAssignRef + ToPrimitive,
 {
     covariance(list, list)
 }
 
 /// Calculates the (non-corrected) covariance of two variables x,y
-pub fn covariance<X, Y> (x: &[X], y: &[Y]) -> Option<f64>
+pub fn covariance<X, Y>(x: &[X], y: &[Y]) -> Option<f64>
 where
     X: NumAssignRef + ToPrimitive,
     Y: NumAssignRef + ToPrimitive,
@@ -64,19 +64,21 @@ where
     let len_x = x.len();
     let len_y = y.len();
     if len_x == 0 || len_y == 0 || len_x != len_y {
-        return None
+        return None;
     }
 
     let mean_x = mean(x)?;
     let mean_y = mean(y)?;
 
-    let covariance: f64 = (0..len_x).map(|i| {
-        let x = x[i].to_f64().unwrap_or(0.0);
-        let y = y[i].to_f64().unwrap_or(0.0);
-        (x - mean_x)*(y - mean_y)
-    }).sum();
+    let covariance: f64 = (0..len_x)
+        .map(|i| {
+            let x = x[i].to_f64().unwrap_or(0.0);
+            let y = y[i].to_f64().unwrap_or(0.0);
+            (x - mean_x) * (y - mean_y)
+        })
+        .sum();
 
-    Some(covariance/(len_x as f64))
+    Some(covariance / (len_x as f64))
 }
 
 /// Returns the factors `(a,b)` of a linear regression `y = a + bx`
@@ -90,15 +92,15 @@ where
 /// assert!((a - -4.0).abs() < 0.001);
 /// assert!((b - 2.0).abs() < 0.001);
 /// ```
-pub fn linear_regression<X, Y> (x: &[X], y: &[Y]) -> Option<(f64, f64)>
+pub fn linear_regression<X, Y>(x: &[X], y: &[Y]) -> Option<(f64, f64)>
 where
     X: NumAssignRef + ToPrimitive,
-    Y: NumAssignRef + ToPrimitive
+    Y: NumAssignRef + ToPrimitive,
 {
     let len_x = x.len();
     let len_y = y.len();
     if len_x == 0 || len_y == 0 || len_x != len_y {
-        return None
+        return None;
     }
 
     let mean_x = mean(x)?;
@@ -113,11 +115,11 @@ where
 
         let x_mean_x = x - mean_x;
 
-        covariance += x_mean_x*(y - mean_y);
+        covariance += x_mean_x * (y - mean_y);
         variance += x_mean_x * x_mean_x;
     }
 
-    let beta = covariance/variance;
-    let alpha = mean_y - beta*mean_x;
-    Some((alpha,beta))
+    let beta = covariance / variance;
+    let alpha = mean_y - beta * mean_x;
+    Some((alpha, beta))
 }
index e07fd358fd11e8bf1f3e6f6e27eee1e623c31e9b..673f66c5e0c654271099879713020a876b040683 100644 (file)
@@ -1,4 +1,4 @@
-use anyhow::{Error, format_err, bail};
+use anyhow::{bail, format_err, Error};
 use lazy_static::lazy_static;
 use regex::Regex;
 use serde::{Deserialize, Serialize};
@@ -6,16 +6,13 @@ use serde_json::json;
 
 use proxmox_schema::api;
 
-use proxmox_sys::fs::{replace_file, CreateOptions};
 use proxmox_http::client::SimpleHttp;
+use proxmox_sys::fs::{replace_file, CreateOptions};
 
 use pbs_tools::json::json_object_to_query;
 
 use crate::config::node;
-use crate::tools::{
-    self,
-    pbs_simple_http,
-};
+use crate::tools::{self, pbs_simple_http};
 
 /// How long the local key is valid for in between remote checks
 pub const MAX_LOCAL_KEY_AGE: i64 = 15 * 24 * 3600;
@@ -41,7 +38,9 @@ pub enum SubscriptionStatus {
     INVALID,
 }
 impl Default for SubscriptionStatus {
-    fn default() -> Self { SubscriptionStatus::NOTFOUND }
+    fn default() -> Self {
+        SubscriptionStatus::NOTFOUND
+    }
 }
 impl std::fmt::Display for SubscriptionStatus {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
@@ -62,41 +61,41 @@ impl std::fmt::Display for SubscriptionStatus {
     },
 )]
 #[derive(Debug, Default, PartialEq, Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 /// Proxmox subscription information
 pub struct SubscriptionInfo {
     /// Subscription status from the last check
     pub status: SubscriptionStatus,
     /// the server ID, if permitted to access
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub serverid: Option<String>,
     /// timestamp of the last check done
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub checktime: Option<i64>,
     /// the subscription key, if set and permitted to access
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub key: Option<String>,
     /// a more human readable status message
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub message: Option<String>,
     /// human readable productname of the set subscription
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub productname: Option<String>,
     /// register date of the set subscription
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub regdate: Option<String>,
     /// next due date of the set subscription
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub nextduedate: Option<String>,
     /// URL to the web shop
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub url: Option<String>,
 }
 
 async fn register_subscription(
     key: &str,
     server_id: &str,
-    checktime: i64
+    checktime: i64,
 ) -> Result<(String, String), Error> {
     // WHCMS sample code feeds the key into this, but it's just a challenge, so keep it simple
     let rand = hex::encode(&proxmox_sys::linux::random_data(16)?);
@@ -120,7 +119,9 @@ async fn register_subscription(
 
     let uri = "https://shop.proxmox.com/modules/servers/licensing/verify.php";
     let query = json_object_to_query(params)?;
-    let response = client.post(uri, Some(query), Some("application/x-www-form-urlencoded")).await?;
+    let response = client
+        .post(uri, Some(query), Some("application/x-www-form-urlencoded"))
+        .await?;
     let body = SimpleHttp::response_body_string(response).await?;
 
     Ok((body, challenge))
@@ -132,7 +133,7 @@ fn parse_status(value: &str) -> SubscriptionStatus {
         "new" => SubscriptionStatus::NEW,
         "notfound" => SubscriptionStatus::NOTFOUND,
         "invalid" => SubscriptionStatus::INVALID,
-         _ => SubscriptionStatus::INVALID,
+        _ => SubscriptionStatus::INVALID,
     }
 }
 
@@ -164,15 +165,16 @@ fn parse_register_response(
             "productname" => info.productname = Some(value.into()),
             "regdate" => info.regdate = Some(value.into()),
             "nextduedate" => info.nextduedate = Some(value.into()),
-            "message" if value == "Directory Invalid" =>
-                info.message = Some("Invalid Server ID".into()),
+            "message" if value == "Directory Invalid" => {
+                info.message = Some("Invalid Server ID".into())
+            }
             "message" => info.message = Some(value.into()),
             "validdirectory" => {
                 if value.split(',').find(is_server_id) == None {
                     bail!("Server ID does not match");
                 }
                 info.serverid = Some(server_id.to_owned());
-            },
+            }
             "md5hash" => md5hash = value.to_owned(),
             _ => (),
         }
@@ -182,7 +184,11 @@ fn parse_register_response(
         let response_raw = format!("{}{}", SHARED_KEY_DATA, challenge);
         let expected = hex::encode(&tools::md5sum(response_raw.as_bytes())?);
         if expected != md5hash {
-            bail!("Subscription API challenge failed, expected {} != got {}", expected, md5hash);
+            bail!(
+                "Subscription API challenge failed, expected {} != got {}",
+                expected,
+                md5hash
+            );
         }
     }
     Ok(info)
@@ -210,29 +216,38 @@ fn test_parse_register_response() -> Result<(), Error> {
     let checktime = 1600000000;
     let salt = "cf44486bddb6ad0145732642c45b2957";
 
-    let info = parse_register_response(response, key.to_owned(), server_id.to_owned(), checktime, salt)?;
-
-    assert_eq!(info, SubscriptionInfo {
-        key: Some(key),
-        serverid: Some(server_id),
-        status: SubscriptionStatus::ACTIVE,
-        checktime: Some(checktime),
-        url: Some("https://www.proxmox.com/en/proxmox-backup-server/pricing".into()),
-        message: None,
-        nextduedate: Some("2021-09-19".into()),
-        regdate: Some("2020-09-19 00:00:00".into()),
-        productname: Some("Proxmox Backup Server Test Subscription -1 year".into()),
-    });
+    let info = parse_register_response(
+        response,
+        key.to_owned(),
+        server_id.to_owned(),
+        checktime,
+        salt,
+    )?;
+
+    assert_eq!(
+        info,
+        SubscriptionInfo {
+            key: Some(key),
+            serverid: Some(server_id),
+            status: SubscriptionStatus::ACTIVE,
+            checktime: Some(checktime),
+            url: Some("https://www.proxmox.com/en/proxmox-backup-server/pricing".into()),
+            message: None,
+            nextduedate: Some("2021-09-19".into()),
+            regdate: Some("2020-09-19 00:00:00".into()),
+            productname: Some("Proxmox Backup Server Test Subscription -1 year".into()),
+        }
+    );
     Ok(())
 }
 
 /// queries the up to date subscription status and parses the response
 pub fn check_subscription(key: String, server_id: String) -> Result<SubscriptionInfo, Error> {
-
     let now = proxmox_time::epoch_i64();
 
-    let (response, challenge) = proxmox_async::runtime::block_on(register_subscription(&key, &server_id, now))
-        .map_err(|err| format_err!("Error checking subscription: {}", err))?;
+    let (response, challenge) =
+        proxmox_async::runtime::block_on(register_subscription(&key, &server_id, now))
+            .map_err(|err| format_err!("Error checking subscription: {}", err))?;
 
     parse_register_response(&response, key, server_id, now, &challenge)
         .map_err(|err| format_err!("Error parsing subscription check response: {}", err))
@@ -240,16 +255,27 @@ pub fn check_subscription(key: String, server_id: String) -> Result<Subscription
 
 /// reads in subscription information and does a basic integrity verification
 pub fn read_subscription() -> Result<Option<SubscriptionInfo>, Error> {
-
     let cfg = proxmox_sys::fs::file_read_optional_string(&SUBSCRIPTION_FN)?;
-    let cfg = if let Some(cfg) = cfg { cfg } else { return Ok(None); };
+    let cfg = if let Some(cfg) = cfg {
+        cfg
+    } else {
+        return Ok(None);
+    };
 
     let mut cfg = cfg.lines();
 
     // first line is key in plain
-    let _key = if let Some(key) = cfg.next() { key } else { return Ok(None) };
+    let _key = if let Some(key) = cfg.next() {
+        key
+    } else {
+        return Ok(None);
+    };
     // second line is checksum of encoded data
-    let checksum = if let Some(csum) = cfg.next() { csum } else { return Ok(None) };
+    let checksum = if let Some(csum) = cfg.next() {
+        csum
+    } else {
+        return Ok(None);
+    };
 
     let encoded: String = cfg.collect::<String>();
     let decoded = base64::decode(encoded.to_owned())?;
@@ -257,11 +283,16 @@ pub fn read_subscription() -> Result<Option<SubscriptionInfo>, Error> {
 
     let info: SubscriptionInfo = serde_json::from_str(decoded)?;
 
-    let new_checksum = format!("{}{}{}", info.checktime.unwrap_or(0), encoded, SHARED_KEY_DATA);
+    let new_checksum = format!(
+        "{}{}{}",
+        info.checktime.unwrap_or(0),
+        encoded,
+        SHARED_KEY_DATA
+    );
     let new_checksum = base64::encode(tools::md5sum(new_checksum.as_bytes())?);
 
     if checksum != new_checksum {
-        return Ok(Some( SubscriptionInfo {
+        return Ok(Some(SubscriptionInfo {
             status: SubscriptionStatus::INVALID,
             message: Some("checksum mismatch".to_string()),
             ..info
@@ -269,15 +300,16 @@ pub fn read_subscription() -> Result<Option<SubscriptionInfo>, Error> {
     }
 
     let age = proxmox_time::epoch_i64() - info.checktime.unwrap_or(0);
-    if age < -5400 { // allow some delta for DST changes or time syncs, 1.5h
-        return Ok(Some( SubscriptionInfo {
+    if age < -5400 {
+        // allow some delta for DST changes or time syncs, 1.5h
+        return Ok(Some(SubscriptionInfo {
             status: SubscriptionStatus::INVALID,
             message: Some("last check date too far in the future".to_string()),
             ..info
         }));
     } else if age > MAX_LOCAL_KEY_AGE + MAX_KEY_CHECK_FAILURE_AGE {
         if let SubscriptionStatus::ACTIVE = info.status {
-            return Ok(Some( SubscriptionInfo {
+            return Ok(Some(SubscriptionInfo {
                 status: SubscriptionStatus::INVALID,
                 message: Some("subscription information too old".to_string()),
                 ..info
@@ -299,7 +331,12 @@ pub fn write_subscription(info: SubscriptionInfo) -> Result<(), Error> {
         format!("{}\n", info.key.unwrap())
     } else {
         let encoded = base64::encode(serde_json::to_string(&info)?);
-        let csum = format!("{}{}{}", info.checktime.unwrap_or(0), encoded, SHARED_KEY_DATA);
+        let csum = format!(
+            "{}{}{}",
+            info.checktime.unwrap_or(0),
+            encoded,
+            SHARED_KEY_DATA
+        );
         let csum = base64::encode(tools::md5sum(csum.as_bytes())?);
         format!("{}\n{}\n{}\n", info.key.unwrap(), csum, encoded)
     };
@@ -334,13 +371,10 @@ pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<
         (Some(key), Some(password)) => {
             let conf = format!(
                 "machine enterprise.proxmox.com/debian/pbs\n login {}\n password {}\n",
-                key,
-                password,
+                key, password,
             );
             let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
-            let file_opts = CreateOptions::new()
-                .perm(mode)
-                .owner(nix::unistd::ROOT);
+            let file_opts = CreateOptions::new().perm(mode).owner(nix::unistd::ROOT);
 
             // we use a namespaced .conf file, so just overwrite..
             replace_file(auth_conf, conf.as_bytes(), file_opts, true)
@@ -350,7 +384,8 @@ pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<
             Ok(()) => Ok(()),
             Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => Ok(()), // ignore not existing
             Err(err) => Err(err),
-        }.map_err(|e| format_err!("Error clearing apt auth config - {}", e))?,
+        }
+        .map_err(|e| format_err!("Error clearing apt auth config - {}", e))?,
     }
     Ok(())
 }
index 95c1a94250c79feac0bb8809ae8084d339c8709a..3e1ac1bbad2afe3adf989ed486f20ed555599b35 100644 (file)
@@ -8,7 +8,6 @@ use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlug
 
 use proxmox_sys::{fs::replace_file, fs::CreateOptions};
 
-
 lazy_static! {
     pub static ref SERVICE_CONFIG: SectionConfig = init_service();
     pub static ref TIMER_CONFIG: SectionConfig = init_timer();
@@ -16,25 +15,24 @@ lazy_static! {
 }
 
 fn init_service() -> SectionConfig {
-
     let mut config = SectionConfig::with_systemd_syntax(&SYSTEMD_SECTION_NAME_SCHEMA);
 
     match SystemdUnitSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Unit".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
         _ => unreachable!(),
     };
     match SystemdInstallSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Install".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
         _ => unreachable!(),
     };
     match SystemdServiceSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Service".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
@@ -45,25 +43,24 @@ fn init_service() -> SectionConfig {
 }
 
 fn init_timer() -> SectionConfig {
-
     let mut config = SectionConfig::with_systemd_syntax(&SYSTEMD_SECTION_NAME_SCHEMA);
 
     match SystemdUnitSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Unit".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
         _ => unreachable!(),
     };
     match SystemdInstallSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Install".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
         _ => unreachable!(),
     };
     match SystemdTimerSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Timer".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
@@ -74,25 +71,24 @@ fn init_timer() -> SectionConfig {
 }
 
 fn init_mount() -> SectionConfig {
-
     let mut config = SectionConfig::with_systemd_syntax(&SYSTEMD_SECTION_NAME_SCHEMA);
 
     match SystemdUnitSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Unit".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
         _ => unreachable!(),
     };
     match SystemdInstallSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Install".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
         _ => unreachable!(),
     };
     match SystemdMountSection::API_SCHEMA {
-        Schema::Object(ref obj_schema) =>  {
+        Schema::Object(ref obj_schema) => {
             let plugin = SectionConfigPlugin::new("Mount".to_string(), None, obj_schema);
             config.register_plugin(plugin);
         }
@@ -102,8 +98,10 @@ fn init_mount() -> SectionConfig {
     config
 }
 
-fn parse_systemd_config(config: &SectionConfig, filename: &str) -> Result<SectionConfigData, Error> {
-
+fn parse_systemd_config(
+    config: &SectionConfig,
+    filename: &str,
+) -> Result<SectionConfigData, Error> {
     let raw = proxmox_sys::fs::file_get_contents(filename)?;
     let input = String::from_utf8(raw)?;
 
@@ -124,14 +122,16 @@ pub fn parse_systemd_mount(filename: &str) -> Result<SectionConfigData, Error> {
     parse_systemd_config(&MOUNT_CONFIG, filename)
 }
 
-fn save_systemd_config(config: &SectionConfig, filename: &str, data: &SectionConfigData) -> Result<(), Error> {
+fn save_systemd_config(
+    config: &SectionConfig,
+    filename: &str,
+    data: &SectionConfigData,
+) -> Result<(), Error> {
     let raw = config.write(filename, data)?;
 
     let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
     // set the correct owner/group/permissions while saving file, owner(rw) = root
-    let options = CreateOptions::new()
-        .perm(mode)
-        .owner(nix::unistd::ROOT);
+    let options = CreateOptions::new().perm(mode).owner(nix::unistd::ROOT);
 
     replace_file(filename, raw.as_bytes(), options, true)?;
 
index 00bd9cc2b59cf1aeaf14482133d448ca28b36d60..365ccaa7a80f3de5f0a7ba6ce86bbfc1af6c3b58 100644 (file)
@@ -1,34 +1,30 @@
-use serde::{Serialize, Deserialize};
+use serde::{Deserialize, Serialize};
 
-use proxmox_schema::*;
 use pbs_api_types::SINGLE_LINE_COMMENT_FORMAT;
+use proxmox_schema::*;
 
-pub const SYSTEMD_SECTION_NAME_SCHEMA: Schema = StringSchema::new(
-    "Section name")
+pub const SYSTEMD_SECTION_NAME_SCHEMA: Schema = StringSchema::new("Section name")
     .format(&ApiStringFormat::Enum(&[
         EnumEntry::new("Unit", "Unit"),
         EnumEntry::new("Timer", "Timer"),
         EnumEntry::new("Install", "Install"),
         EnumEntry::new("Mount", "Mount"),
-        EnumEntry::new("Service", "Service")]))
+        EnumEntry::new("Service", "Service"),
+    ]))
     .schema();
 
-pub const SYSTEMD_STRING_SCHEMA: Schema =
-    StringSchema::new("Systemd configuration value.")
+pub const SYSTEMD_STRING_SCHEMA: Schema = StringSchema::new("Systemd configuration value.")
     .format(&SINGLE_LINE_COMMENT_FORMAT)
     .schema();
 
-pub const SYSTEMD_STRING_ARRAY_SCHEMA: Schema = ArraySchema::new(
-    "Array of Strings", &SYSTEMD_STRING_SCHEMA)
-    .schema();
+pub const SYSTEMD_STRING_ARRAY_SCHEMA: Schema =
+    ArraySchema::new("Array of Strings", &SYSTEMD_STRING_SCHEMA).schema();
 
-pub const SYSTEMD_TIMESPAN_ARRAY_SCHEMA: Schema = ArraySchema::new(
-    "Array of time spans", &SYSTEMD_TIMESPAN_SCHEMA)
-    .schema();
+pub const SYSTEMD_TIMESPAN_ARRAY_SCHEMA: Schema =
+    ArraySchema::new("Array of time spans", &SYSTEMD_TIMESPAN_SCHEMA).schema();
 
-pub const SYSTEMD_CALENDAR_EVENT_ARRAY_SCHEMA: Schema = ArraySchema::new(
-    "Array of calendar events", &SYSTEMD_CALENDAR_EVENT_SCHEMA)
-    .schema();
+pub const SYSTEMD_CALENDAR_EVENT_ARRAY_SCHEMA: Schema =
+    ArraySchema::new("Array of calendar events", &SYSTEMD_CALENDAR_EVENT_SCHEMA).schema();
 
 #[api(
     properties: {
@@ -70,44 +66,44 @@ pub const SYSTEMD_CALENDAR_EVENT_ARRAY_SCHEMA: Schema = ArraySchema::new(
 #[allow(non_snake_case)]
 /// Systemd Timer Section
 pub struct SystemdTimerSection {
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnCalendar: Option<Vec<String>>,
     ///  If true, the time when the service unit was last triggered is stored on disk.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub Persistent: Option<bool>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnActiveSec: Option<Vec<String>>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnBootSec: Option<Vec<String>>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnStartupSec: Option<Vec<String>>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnUnitActiveSec: Option<Vec<String>>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnUnitInactiveSec: Option<Vec<String>>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub RandomizedDelaySec: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub AccuracySec: Option<String>,
 
     /// Trigger when system clock jumps.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnClockChange: Option<bool>,
 
     /// Trigger when time zone changes.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub OnTimezomeChange: Option<bool>,
 
     /// The unit to activate when this timer elapses.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub Unit: Option<String>,
 
     /// If true, an elapsing timer will cause the system to resume from suspend.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub WakeSystem: Option<bool>,
 
     /// If true, an elapsed timer will stay loaded, and its state remains queryable.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub RemainAfterElapse: Option<bool>,
 }
 
@@ -128,9 +124,9 @@ pub struct SystemdTimerSection {
 /// Systemd Service Section
 pub struct SystemdServiceSection {
     /// The process start-up type for this service unit.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub Type: Option<ServiceStartup>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub ExecStart: Option<Vec<String>>,
 }
 
@@ -142,7 +138,7 @@ pub struct SystemdUnitSection {
     /// A human readable name for the unit.
     pub Description: String,
     /// Check whether the system has AC power.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub ConditionACPower: Option<bool>,
 }
 
@@ -173,16 +169,16 @@ pub struct SystemdUnitSection {
 #[allow(non_snake_case)]
 /// Systemd Install Section
 pub struct SystemdInstallSection {
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub Alias: Option<Vec<String>>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub Also: Option<Vec<String>>,
     /// DefaultInstance for template unit.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub DefaultInstance: Option<String>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub WantedBy: Option<Vec<String>>,
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub RequiredBy: Option<Vec<String>>,
 }
 
@@ -203,27 +199,27 @@ pub struct SystemdMountSection {
     /// absolute path of a file or directory for the mount point
     pub Where: String,
     /// Takes a string for the file system type. See mount(8) for details.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub Type: Option<String>,
     /// Mount options to use when mounting. This takes a comma-separated list of options.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub Options: Option<String>,
     /// If true, parsing of the options specified in Options= is relaxed, and unknown mount options are tolerated.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub SloppyOptions: Option<bool>,
     /// Use lazy unmount
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub LazyUnmount: Option<bool>,
     /// Use forces unmount
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub ForceUnmount: Option<bool>,
     /// Directories of mount points (and any parent directories) are
     /// automatically created if needed. Takes an access mode in octal
     /// notation. Defaults to 0755.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub DirectoryMode: Option<String>,
     /// Configures the time to wait for the mount command to finish.
-    #[serde(skip_serializing_if="Option::is_none")]
+    #[serde(skip_serializing_if = "Option::is_none")]
     pub TimeoutSec: Option<String>,
 }
 
@@ -246,12 +242,12 @@ pub enum ServiceStartup {
     Notify,
 }
 
-pub const SYSTEMD_TIMESPAN_SCHEMA: Schema = StringSchema::new(
-    "systemd time span")
+pub const SYSTEMD_TIMESPAN_SCHEMA: Schema = StringSchema::new("systemd time span")
     .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_time_span))
     .schema();
 
-pub const SYSTEMD_CALENDAR_EVENT_SCHEMA: Schema = StringSchema::new(
-    "systemd calendar event")
-    .format(&ApiStringFormat::VerifyFn(proxmox_time::verify_calendar_event))
+pub const SYSTEMD_CALENDAR_EVENT_SCHEMA: Schema = StringSchema::new("systemd calendar event")
+    .format(&ApiStringFormat::VerifyFn(
+        proxmox_time::verify_calendar_event,
+    ))
     .schema();
index f27ff6b1dc1c8522cee550ff993d79004bd1193a..267295ef9e42e3031bbefbbb5393c1b75601b869 100644 (file)
@@ -20,7 +20,9 @@ fn run_command(mut command: Command) -> Result<(), Error> {
                                     m
                                 }
                             })
-                            .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
+                            .unwrap_or_else(|_| {
+                                String::from("non utf8 error message (suppressed)")
+                            });
 
                         bail!("status code: {} - {}", code, msg);
                     }
@@ -29,7 +31,8 @@ fn run_command(mut command: Command) -> Result<(), Error> {
             }
         }
         Ok(())
-    }).map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
+    })
+    .map_err(|err| format_err!("command {:?} failed - {}", command, err))?;
 
     Ok(())
 }
@@ -96,7 +99,6 @@ pub fn reload_unit(unit: &str) -> Result<(), Error> {
 #[test]
 fn test_escape_unit() -> Result<(), Error> {
     fn test_escape(i: &str, expected: &str, is_path: bool) {
-
         use proxmox_sys::systemd::{escape_unit, unescape_unit};
 
         let escaped = escape_unit(i, is_path);
index 9dfaa32e9905176b999f2f2d744405e58867a46b..d71972c4af036749d03b992a814ee2e114fb8525 100644 (file)
@@ -1,15 +1,15 @@
 //! Traffic control implementation
 
-use std::sync::{Arc, Mutex};
 use std::collections::HashMap;
+use std::convert::TryInto;
 use std::net::{IpAddr, Ipv4Addr, SocketAddr};
+use std::sync::{Arc, Mutex};
 use std::time::Instant;
-use std::convert::TryInto;
 
 use anyhow::Error;
 use cidr::IpInet;
 
-use proxmox_http::client::{ShareableRateLimit, RateLimiter};
+use proxmox_http::client::{RateLimiter, ShareableRateLimit};
 use proxmox_section_config::SectionConfigData;
 
 use proxmox_time::{parse_daily_duration, DailyDuration, TmEditor};
@@ -20,15 +20,15 @@ use pbs_config::ConfigVersionCache;
 
 use crate::tools::SharedRateLimiter;
 
-lazy_static::lazy_static!{
+lazy_static::lazy_static! {
     /// Shared traffic control cache singleton.
     pub static ref TRAFFIC_CONTROL_CACHE: Arc<Mutex<TrafficControlCache>> =
         Arc::new(Mutex::new(TrafficControlCache::new()));
 }
 
 struct ParsedTcRule {
-    config: TrafficControlRule, // original rule config
-    networks: Vec<IpInet>, // parsed networks
+    config: TrafficControlRule,    // original rule config
+    networks: Vec<IpInet>,         // parsed networks
     timeframe: Vec<DailyDuration>, // parsed timeframe
 }
 
@@ -54,16 +54,20 @@ pub struct TrafficControlCache {
     last_update: i64,
     last_traffic_control_generation: usize,
     rules: Vec<ParsedTcRule>,
-    limiter_map: HashMap<String, (Option<Arc<dyn ShareableRateLimit>>, Option<Arc<dyn ShareableRateLimit>>)>,
+    limiter_map: HashMap<
+        String,
+        (
+            Option<Arc<dyn ShareableRateLimit>>,
+            Option<Arc<dyn ShareableRateLimit>>,
+        ),
+    >,
     use_utc: bool, // currently only used for testing
 }
 
-fn timeframe_match(
-    duration_list: &[DailyDuration],
-    now: &TmEditor,
-) -> bool {
-
-    if duration_list.is_empty() { return true; }
+fn timeframe_match(duration_list: &[DailyDuration], now: &TmEditor) -> bool {
+    if duration_list.is_empty() {
+        return true;
+    }
 
     for duration in duration_list.iter() {
         if duration.time_match_with_tm_editor(now) {
@@ -74,11 +78,7 @@ fn timeframe_match(
     false
 }
 
-fn network_match_len(
-    networks: &[IpInet],
-    ip: &IpAddr,
-) -> Option<u8> {
-
+fn network_match_len(networks: &[IpInet], ip: &IpAddr) -> Option<u8> {
     let mut match_len = None;
 
     for cidr in networks.iter() {
@@ -101,14 +101,12 @@ fn cannonical_ip(ip: IpAddr) -> IpAddr {
     // TODO: use std::net::IpAddr::to_cananical once stable
     match ip {
         IpAddr::V4(addr) => IpAddr::V4(addr),
-        IpAddr::V6(addr) => {
-            match addr.octets() {
-                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
-                    IpAddr::V4(Ipv4Addr::new(a, b, c, d))
-                }
-                _ => IpAddr::V6(addr),
+        IpAddr::V6(addr) => match addr.octets() {
+            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
+                IpAddr::V4(Ipv4Addr::new(a, b, c, d))
             }
-        }
+            _ => IpAddr::V6(addr),
+        },
     }
 }
 
@@ -127,7 +125,6 @@ fn create_limiter(
 }
 
 impl TrafficControlCache {
-
     fn new() -> Self {
         Self {
             use_shared_memory: true,
@@ -150,16 +147,22 @@ impl TrafficControlCache {
         let version_cache = match ConfigVersionCache::new() {
             Ok(cache) => cache,
             Err(err) => {
-                log::error!("TrafficControlCache::reload failed in ConfigVersionCache::new: {}", err);
+                log::error!(
+                    "TrafficControlCache::reload failed in ConfigVersionCache::new: {}",
+                    err
+                );
                 return;
             }
         };
 
         let traffic_control_generation = version_cache.traffic_control_generation();
 
-        if (self.last_update != 0) &&
-            (traffic_control_generation == self.last_traffic_control_generation) &&
-            ((now - self.last_update) < 60) { return; }
+        if (self.last_update != 0)
+            && (traffic_control_generation == self.last_traffic_control_generation)
+            && ((now - self.last_update) < 60)
+        {
+            return;
+        }
 
         log::debug!("reload traffic control rules");
 
@@ -184,9 +187,10 @@ impl TrafficControlCache {
     ///
     /// This should be called every second (from `proxmox-backup-proxy`).
     pub fn compute_current_rates(&mut self) {
-
         let elapsed = self.last_rate_compute.elapsed().as_micros();
-        if elapsed < 200_000 { return } // not enough data
+        if elapsed < 200_000 {
+            return;
+        } // not enough data
 
         let mut new_rate_map = HashMap::new();
 
@@ -228,30 +232,30 @@ impl TrafficControlCache {
     }
 
     fn update_config(&mut self, config: &SectionConfigData) -> Result<(), Error> {
-        self.limiter_map.retain(|key, _value| config.sections.contains_key(key));
+        self.limiter_map
+            .retain(|key, _value| config.sections.contains_key(key));
 
-        let rules: Vec<TrafficControlRule> =
-            config.convert_to_typed_array("rule")?;
+        let rules: Vec<TrafficControlRule> = config.convert_to_typed_array("rule")?;
 
         let mut active_rules = Vec::new();
 
         for rule in rules {
-
-            let entry = self.limiter_map.entry(rule.name.clone()).or_insert((None, None));
+            let entry = self
+                .limiter_map
+                .entry(rule.name.clone())
+                .or_insert((None, None));
             let limit = &rule.limit;
 
             match entry.0 {
-                Some(ref read_limiter) => {
-                    match limit.rate_in {
-                        Some(rate_in) => {
-                            read_limiter.update_rate(
-                                rate_in.as_u64(),
-                                limit.burst_in.unwrap_or(rate_in).as_u64(),
-                            );
-                        }
-                        None => entry.0 = None,
+                Some(ref read_limiter) => match limit.rate_in {
+                    Some(rate_in) => {
+                        read_limiter.update_rate(
+                            rate_in.as_u64(),
+                            limit.burst_in.unwrap_or(rate_in).as_u64(),
+                        );
                     }
-                }
+                    None => entry.0 = None,
+                },
                 None => {
                     if let Some(rate_in) = limit.rate_in {
                         let name = format!("{}.in", rule.name);
@@ -267,17 +271,15 @@ impl TrafficControlCache {
             }
 
             match entry.1 {
-                Some(ref write_limiter) => {
-                    match limit.rate_out {
-                        Some(rate_out) => {
-                            write_limiter.update_rate(
-                                rate_out.as_u64(),
-                                limit.burst_out.unwrap_or(rate_out).as_u64(),
-                            );
-                        }
-                        None => entry.1 = None,
+                Some(ref write_limiter) => match limit.rate_out {
+                    Some(rate_out) => {
+                        write_limiter.update_rate(
+                            rate_out.as_u64(),
+                            limit.burst_out.unwrap_or(rate_out).as_u64(),
+                        );
                     }
-                }
+                    None => entry.1 = None,
+                },
                 None => {
                     if let Some(rate_out) = limit.rate_out {
                         let name = format!("{}.out", rule.name);
@@ -314,7 +316,11 @@ impl TrafficControlCache {
                 networks.push(cidr);
             }
 
-            active_rules.push(ParsedTcRule { config: rule, networks, timeframe });
+            active_rules.push(ParsedTcRule {
+                config: rule,
+                networks,
+                timeframe,
+            });
         }
 
         self.rules = active_rules;
@@ -333,8 +339,11 @@ impl TrafficControlCache {
         &self,
         peer: SocketAddr,
         now: i64,
-    ) -> (&str, Option<Arc<dyn ShareableRateLimit>>, Option<Arc<dyn ShareableRateLimit>>) {
-
+    ) -> (
+        &str,
+        Option<Arc<dyn ShareableRateLimit>>,
+        Option<Arc<dyn ShareableRateLimit>>,
+    ) {
         let peer_ip = cannonical_ip(peer.ip());
 
         log::debug!("lookup_rate_limiter: {:?}", peer_ip);
@@ -350,7 +359,9 @@ impl TrafficControlCache {
         let mut last_rule_match = None;
 
         for rule in self.rules.iter() {
-            if !timeframe_match(&rule.timeframe, &now) { continue; }
+            if !timeframe_match(&rule.timeframe, &now) {
+                continue;
+            }
 
             if let Some(match_len) = network_match_len(&rule.networks, &peer_ip) {
                 match last_rule_match {
@@ -367,9 +378,11 @@ impl TrafficControlCache {
         match last_rule_match {
             Some((rule, _)) => {
                 match self.limiter_map.get(&rule.config.name) {
-                    Some((read_limiter, write_limiter)) => {
-                        (&rule.config.name, read_limiter.clone(), write_limiter.clone())
-                    }
+                    Some((read_limiter, write_limiter)) => (
+                        &rule.config.name,
+                        read_limiter.clone(),
+                        write_limiter.clone(),
+                    ),
                     None => ("", None, None), // should never happen
                 }
             }
@@ -378,23 +391,27 @@ impl TrafficControlCache {
     }
 }
 
-
 #[cfg(test)]
 mod test {
     use super::*;
 
     const fn make_test_time(mday: i32, hour: i32, min: i32) -> i64 {
-        (mday*3600*24 + hour*3600 + min*60) as i64
+        (mday * 3600 * 24 + hour * 3600 + min * 60) as i64
     }
 
     #[test]
     fn testnetwork_match() -> Result<(), Error> {
-
         let networks = ["192.168.2.1/24", "127.0.0.0/8"];
         let networks: Vec<IpInet> = networks.iter().map(|n| n.parse().unwrap()).collect();
 
-        assert_eq!(network_match_len(&networks, &"192.168.2.1".parse()?), Some(24));
-        assert_eq!(network_match_len(&networks, &"192.168.2.254".parse()?), Some(24));
+        assert_eq!(
+            network_match_len(&networks, &"192.168.2.1".parse()?),
+            Some(24)
+        );
+        assert_eq!(
+            network_match_len(&networks, &"192.168.2.254".parse()?),
+            Some(24)
+        );
         assert_eq!(network_match_len(&networks, &"192.168.3.1".parse()?), None);
         assert_eq!(network_match_len(&networks, &"127.1.1.0".parse()?), Some(8));
         assert_eq!(network_match_len(&networks, &"128.1.1.0".parse()?), None);
@@ -402,14 +419,16 @@ mod test {
         let networks = ["0.0.0.0/0"];
         let networks: Vec<IpInet> = networks.iter().map(|n| n.parse().unwrap()).collect();
         assert_eq!(network_match_len(&networks, &"127.1.1.0".parse()?), Some(0));
-        assert_eq!(network_match_len(&networks, &"192.168.2.1".parse()?), Some(0));
+        assert_eq!(
+            network_match_len(&networks, &"192.168.2.1".parse()?),
+            Some(0)
+        );
 
         Ok(())
     }
 
     #[test]
-    fn test_rule_match()  -> Result<(), Error> {
-
+    fn test_rule_match() -> Result<(), Error> {
         let config_data = "
 rule: rule1
        comment my test rule
@@ -448,32 +467,35 @@ rule: somewhere
         let private = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 2, 35)), 1234);
         let somewhere = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 1234);
 
-        let (rule, read_limiter, write_limiter) = cache.lookup_rate_limiter(somewhere, THURSDAY_80_00);
+        let (rule, read_limiter, write_limiter) =
+            cache.lookup_rate_limiter(somewhere, THURSDAY_80_00);
         assert_eq!(rule, "somewhere");
         assert!(read_limiter.is_some());
         assert!(write_limiter.is_some());
 
-         let (rule, read_limiter, write_limiter) = cache.lookup_rate_limiter(local, THURSDAY_19_00);
+        let (rule, read_limiter, write_limiter) = cache.lookup_rate_limiter(local, THURSDAY_19_00);
         assert_eq!(rule, "rule2");
         assert!(read_limiter.is_some());
         assert!(write_limiter.is_some());
 
-        let (rule, read_limiter, write_limiter) = cache.lookup_rate_limiter(gateway, THURSDAY_15_00);
+        let (rule, read_limiter, write_limiter) =
+            cache.lookup_rate_limiter(gateway, THURSDAY_15_00);
         assert_eq!(rule, "rule1");
         assert!(read_limiter.is_some());
         assert!(write_limiter.is_some());
 
-        let (rule, read_limiter, write_limiter) = cache.lookup_rate_limiter(gateway, THURSDAY_19_00);
+        let (rule, read_limiter, write_limiter) =
+            cache.lookup_rate_limiter(gateway, THURSDAY_19_00);
         assert_eq!(rule, "somewhere");
         assert!(read_limiter.is_some());
         assert!(write_limiter.is_some());
 
-        let (rule, read_limiter, write_limiter) = cache.lookup_rate_limiter(private, THURSDAY_19_00);
+        let (rule, read_limiter, write_limiter) =
+            cache.lookup_rate_limiter(private, THURSDAY_19_00);
         assert_eq!(rule, "rule2");
         assert!(read_limiter.is_some());
         assert!(write_limiter.is_some());
 
         Ok(())
     }
-
 }