]> git.proxmox.com Git - proxmox-backup.git/commitdiff
api: rustfmt
authorThomas Lamprecht <t.lamprecht@proxmox.com>
Thu, 14 Apr 2022 11:33:01 +0000 (13:33 +0200)
committerThomas Lamprecht <t.lamprecht@proxmox.com>
Thu, 14 Apr 2022 11:33:01 +0000 (13:33 +0200)
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
53 files changed:
src/api2/access/acl.rs
src/api2/access/domain.rs
src/api2/access/mod.rs
src/api2/access/openid.rs
src/api2/access/role.rs
src/api2/access/user.rs
src/api2/admin/datastore.rs
src/api2/admin/mod.rs
src/api2/admin/sync.rs
src/api2/admin/traffic_control.rs
src/api2/admin/verify.rs
src/api2/backup/environment.rs
src/api2/backup/mod.rs
src/api2/backup/upload_chunk.rs
src/api2/config/access/mod.rs
src/api2/config/access/openid.rs
src/api2/config/acme.rs
src/api2/config/changer.rs
src/api2/config/datastore.rs
src/api2/config/drive.rs
src/api2/config/media_pool.rs
src/api2/config/mod.rs
src/api2/config/remote.rs
src/api2/config/sync.rs
src/api2/config/traffic_control.rs
src/api2/config/verify.rs
src/api2/helpers.rs
src/api2/mod.rs
src/api2/node/apt.rs
src/api2/node/certificates.rs
src/api2/node/config.rs
src/api2/node/disks/directory.rs
src/api2/node/disks/zfs.rs
src/api2/node/dns.rs
src/api2/node/journal.rs
src/api2/node/mod.rs
src/api2/node/network.rs
src/api2/node/report.rs
src/api2/node/rrd.rs
src/api2/node/services.rs
src/api2/node/status.rs
src/api2/node/subscription.rs
src/api2/node/syslog.rs
src/api2/node/tasks.rs
src/api2/node/time.rs
src/api2/ping.rs
src/api2/pull.rs
src/api2/reader/environment.rs
src/api2/reader/mod.rs
src/api2/status.rs
src/api2/types/acme.rs
src/api2/types/mod.rs
src/api2/version.rs

index 43d70ee33593ebf530f127bcbdd949e97e99ed14..6d4d4eb4ae127240816aa8ac5faa835160bc1a6f 100644 (file)
@@ -3,13 +3,12 @@
 use anyhow::{bail, Error};
 use hex::FromHex;
 
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
 use pbs_api_types::{
-    Authid, AclListItem, Role, 
-    ACL_PATH_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, PROXMOX_GROUP_ID_SCHEMA,
-    ACL_PROPAGATE_SCHEMA, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY,
+    AclListItem, Authid, Role, ACL_PATH_SCHEMA, ACL_PROPAGATE_SCHEMA, PRIV_PERMISSIONS_MODIFY,
+    PRIV_SYS_AUDIT, PROXMOX_CONFIG_DIGEST_SCHEMA, PROXMOX_GROUP_ID_SCHEMA,
 };
 
 use pbs_config::acl::AclTreeNode;
@@ -32,15 +31,18 @@ fn extract_acl_node_data(
 
     for (user, roles) in &node.users {
         if let Some(auth_id_filter) = auth_id_filter {
-            if !user.is_token()
-                || user.user() != auth_id_filter.user() {
-                 continue;
+            if !user.is_token() || user.user() != auth_id_filter.user() {
+                continue;
             }
         }
 
         for (role, propagate) in roles {
             list.push(AclListItem {
-                path: if path.is_empty() { String::from("/") } else { path.to_string() },
+                path: if path.is_empty() {
+                    String::from("/")
+                } else {
+                    path.to_string()
+                },
                 propagate: *propagate,
                 ugid_type: String::from("user"),
                 ugid: user.to_string(),
@@ -55,7 +57,11 @@ fn extract_acl_node_data(
 
         for (role, propagate) in roles {
             list.push(AclListItem {
-                path: if path.is_empty() { String::from("/") } else { path.to_string() },
+                path: if path.is_empty() {
+                    String::from("/")
+                } else {
+                    path.to_string()
+                },
                 propagate: *propagate,
                 ugid_type: String::from("group"),
                 ugid: group.to_string(),
@@ -201,8 +207,10 @@ pub fn update_acl(
                 } else if auth_id.user() != current_auth_id.user() {
                     bail!("Unprivileged users can only set ACL items for their own API tokens.");
                 }
-            },
-            None => { bail!("Unprivileged user needs to provide auth_id to update ACL item."); },
+            }
+            None => {
+                bail!("Unprivileged user needs to provide auth_id to update ACL item.");
+            }
         };
     }
 
@@ -222,18 +230,26 @@ pub fn update_acl(
     if let Some(ref _group) = group {
         bail!("parameter 'group' - groups are currently not supported.");
     } else if let Some(ref auth_id) = auth_id {
-        if !delete { // Note: we allow to delete non-existent users
+        if !delete {
+            // Note: we allow to delete non-existent users
             let user_cfg = pbs_config::user::cached_config()?;
             if user_cfg.sections.get(&auth_id.to_string()).is_none() {
-                bail!(format!("no such {}.",
-                              if auth_id.is_token() { "API token" } else { "user" }));
+                bail!(format!(
+                    "no such {}.",
+                    if auth_id.is_token() {
+                        "API token"
+                    } else {
+                        "user"
+                    }
+                ));
             }
         }
     } else {
         bail!("missing 'userid' or 'group' parameter.");
     }
 
-    if !delete { // Note: we allow to delete entries with invalid path
+    if !delete {
+        // Note: we allow to delete entries with invalid path
         pbs_config::acl::check_acl_path(&path)?;
     }
 
index 3518e5ca8d00d447a8b1fa17b7d127b6d7508008..a0c5ec883f0f98c35266561a8f621ef91736731b 100644 (file)
@@ -1,9 +1,9 @@
 //! List Authentication domains/realms
 
-use anyhow::{Error};
+use anyhow::Error;
 use serde_json::{json, Value};
 
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
 use pbs_api_types::BasicRealmInfo;
@@ -50,5 +50,4 @@ fn list_domains(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<BasicRealmInf
     Ok(list)
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_LIST_DOMAINS);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_LIST_DOMAINS);
index 70bf381de41e51fa0b040d0f93cc2a793e2c9105..504c7a105bac6c32881e23ac6fd115382941e9de 100644 (file)
@@ -6,19 +6,19 @@ use serde_json::{json, Value};
 use std::collections::HashMap;
 use std::collections::HashSet;
 
-use proxmox_sys::sortable;
 use proxmox_router::{
-    http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission,
+    http_err, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
 };
 use proxmox_schema::api;
+use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    Userid, Authid, PASSWORD_SCHEMA, ACL_PATH_SCHEMA,
-    PRIVILEGES, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT,
+    Authid, Userid, ACL_PATH_SCHEMA, PASSWORD_SCHEMA, PRIVILEGES, PRIV_PERMISSIONS_MODIFY,
+    PRIV_SYS_AUDIT,
 };
-use pbs_tools::ticket::{self, Empty, Ticket};
 use pbs_config::acl::AclTreeNode;
 use pbs_config::CachedUserInfo;
+use pbs_tools::ticket::{self, Empty, Ticket};
 
 use crate::auth_helpers::*;
 use crate::config::tfa::TfaChallenge;
@@ -193,10 +193,11 @@ pub fn create_ticket(
     tfa_challenge: Option<String>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     use proxmox_rest_server::RestEnvironment;
 
-    let env: &RestEnvironment = rpcenv.as_any().downcast_ref::<RestEnvironment>()
+    let env: &RestEnvironment = rpcenv
+        .as_any()
+        .downcast_ref::<RestEnvironment>()
         .ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?;
 
     match authenticate_user(&username, &password, path, privs, port, tfa_challenge) {
@@ -340,7 +341,7 @@ pub fn list_permissions(
             } else {
                 bail!("not allowed to list permissions of {}", auth_id);
             }
-        },
+        }
         None => current_auth_id,
     };
 
index 9e78a88875dca1785f81499bc7b26f75b6744687..388de8e7fa6bc7b0117dcfd0664118104873a029 100644 (file)
@@ -4,31 +4,35 @@ use std::convert::TryFrom;
 use anyhow::{bail, format_err, Error};
 use serde_json::{json, Value};
 
-use proxmox_sys::sortable;
 use proxmox_router::{
-    http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission,
+    http_err, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
 };
 use proxmox_schema::api;
+use proxmox_sys::sortable;
 
 use proxmox_openid::{OpenIdAuthenticator, OpenIdConfig};
 
 use pbs_api_types::{
-    OpenIdRealmConfig, User, Userid,
-    EMAIL_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA, OPENID_DEFAILT_SCOPE_LIST,
-    REALM_ID_SCHEMA,
+    OpenIdRealmConfig, User, Userid, EMAIL_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA,
+    OPENID_DEFAILT_SCOPE_LIST, REALM_ID_SCHEMA,
 };
 use pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M;
 use pbs_tools::ticket::Ticket;
 
-use pbs_config::CachedUserInfo;
 use pbs_config::open_backup_lockfile;
+use pbs_config::CachedUserInfo;
 
 use crate::auth_helpers::*;
 use crate::server::ticket::ApiTicket;
 
-fn openid_authenticator(realm_config: &OpenIdRealmConfig, redirect_url: &str) -> Result<OpenIdAuthenticator, Error> {
-
-    let scopes: Vec<String> = realm_config.scopes.as_deref().unwrap_or(OPENID_DEFAILT_SCOPE_LIST)
+fn openid_authenticator(
+    realm_config: &OpenIdRealmConfig,
+    redirect_url: &str,
+) -> Result<OpenIdAuthenticator, Error> {
+    let scopes: Vec<String> = realm_config
+        .scopes
+        .as_deref()
+        .unwrap_or(OPENID_DEFAILT_SCOPE_LIST)
         .split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
         .filter(|s| !s.is_empty())
         .map(String::from)
@@ -37,11 +41,10 @@ fn openid_authenticator(realm_config: &OpenIdRealmConfig, redirect_url: &str) ->
     let mut acr_values = None;
     if let Some(ref list) = realm_config.acr_values {
         acr_values = Some(
-            list
-                .split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
+            list.split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
                 .filter(|s| !s.is_empty())
                 .map(String::from)
-                .collect()
+                .collect(),
         );
     }
 
@@ -105,7 +108,9 @@ pub fn openid_login(
 ) -> Result<Value, Error> {
     use proxmox_rest_server::RestEnvironment;
 
-    let env: &RestEnvironment = rpcenv.as_any().downcast_ref::<RestEnvironment>()
+    let env: &RestEnvironment = rpcenv
+        .as_any()
+        .downcast_ref::<RestEnvironment>()
         .ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?;
 
     let user_info = CachedUserInfo::new()?;
@@ -113,7 +118,6 @@ pub fn openid_login(
     let mut tested_username = None;
 
     let result = proxmox_lang::try_block!({
-
         let (realm, private_auth_state) =
             OpenIdAuthenticator::verify_public_auth_state(PROXMOX_BACKUP_RUN_DIR_M!(), &state)?;
 
@@ -157,13 +161,19 @@ pub fn openid_login(
                 use pbs_config::user;
                 let _lock = open_backup_lockfile(user::USER_CFG_LOCKFILE, None, true)?;
 
-                let firstname = info["given_name"].as_str().map(|n| n.to_string())
+                let firstname = info["given_name"]
+                    .as_str()
+                    .map(|n| n.to_string())
                     .filter(|n| FIRST_NAME_SCHEMA.parse_simple_value(n).is_ok());
 
-                let lastname = info["family_name"].as_str().map(|n| n.to_string())
+                let lastname = info["family_name"]
+                    .as_str()
+                    .map(|n| n.to_string())
                     .filter(|n| LAST_NAME_SCHEMA.parse_simple_value(n).is_ok());
 
-                let email = info["email"].as_str().map(|n| n.to_string())
+                let email = info["email"]
+                    .as_str()
+                    .map(|n| n.to_string())
                     .filter(|n| EMAIL_SCHEMA.parse_simple_value(n).is_ok());
 
                 let user = User {
@@ -206,7 +216,7 @@ pub fn openid_login(
     if let Err(ref err) = result {
         let msg = err.to_string();
         env.log_failed_auth(tested_username, &msg);
-        return Err(http_err!(UNAUTHORIZED, "{}", msg))
+        return Err(http_err!(UNAUTHORIZED, "{}", msg));
     }
 
     result
@@ -240,7 +250,6 @@ fn openid_auth_url(
     redirect_url: String,
     _rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
-
     let (domains, _digest) = pbs_config::domains::config()?;
     let config: OpenIdRealmConfig = domains.lookup("openid", &realm)?;
 
index 7239b0606b18b8241d21659e5b60634f5006121d..808b53f1664c77707f33dc13651ef99b9babfc96 100644 (file)
@@ -7,7 +7,7 @@ use serde_json::{json, Value};
 use proxmox_router::{Permission, Router};
 use proxmox_schema::api;
 
-use pbs_api_types::{Role, SINGLE_LINE_COMMENT_SCHEMA, PRIVILEGES};
+use pbs_api_types::{Role, PRIVILEGES, SINGLE_LINE_COMMENT_SCHEMA};
 use pbs_config::acl::ROLE_NAMES;
 
 #[api(
@@ -56,5 +56,4 @@ fn list_roles() -> Result<Value, Error> {
     Ok(list.into())
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_LIST_ROLES);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_LIST_ROLES);
index a0ecbd89588584a32c82efc60206e17e2c826353..8a86d68f0c7d769b7c108b2da14024ed58649836 100644 (file)
@@ -1,19 +1,18 @@
 //! User Management
 
 use anyhow::{bail, format_err, Error};
-use serde::{Serialize, Deserialize};
+use hex::FromHex;
+use serde::{Deserialize, Serialize};
 use serde_json::{json, Value};
 use std::collections::HashMap;
-use hex::FromHex;
 
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, SubdirMap, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
 use proxmox_schema::api;
 
 use pbs_api_types::{
-    PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, Authid,
-    Tokenname, UserWithTokens, Userid, User, UserUpdater, ApiToken,
-    ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, PBS_PASSWORD_SCHEMA,
-    PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY,
+    ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA,
+    EXPIRE_USER_SCHEMA, PBS_PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT,
+    PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
 };
 use pbs_config::token_shadow;
 
@@ -59,7 +58,6 @@ pub fn list_users(
     _info: &ApiMethod,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<UserWithTokens>, Error> {
-
     let (config, digest) = pbs_config::user::config()?;
 
     let auth_id: Authid = rpcenv
@@ -74,41 +72,34 @@ pub fn list_users(
     let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
     let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
 
-    let filter_by_privs = |user: &User| {
-        top_level_allowed || user.userid == *userid
-    };
-
+    let filter_by_privs = |user: &User| top_level_allowed || user.userid == *userid;
 
-    let list:Vec<User> = config.convert_to_typed_array("user")?;
+    let list: Vec<User> = config.convert_to_typed_array("user")?;
 
     rpcenv["digest"] = hex::encode(&digest).into();
 
     let iter = list.into_iter().filter(filter_by_privs);
     let list = if include_tokens {
         let tokens: Vec<ApiToken> = config.convert_to_typed_array("token")?;
-        let mut user_to_tokens = tokens
-            .into_iter()
-            .fold(
-                HashMap::new(),
-                |mut map: HashMap<Userid, Vec<ApiToken>>, token: ApiToken| {
+        let mut user_to_tokens = tokens.into_iter().fold(
+            HashMap::new(),
+            |mut map: HashMap<Userid, Vec<ApiToken>>, token: ApiToken| {
                 if token.tokenid.is_token() {
-                    map
-                        .entry(token.tokenid.user().clone())
+                    map.entry(token.tokenid.user().clone())
                         .or_default()
                         .push(token);
                 }
                 map
-            });
-        iter
-            .map(|user: User| {
-                let mut user = new_user_with_tokens(user);
-                user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
-                user
-            })
-            .collect()
+            },
+        );
+        iter.map(|user: User| {
+            let mut user = new_user_with_tokens(user);
+            user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
+            user
+        })
+        .collect()
     } else {
-        iter.map(new_user_with_tokens)
-            .collect()
+        iter.map(new_user_with_tokens).collect()
     };
 
     Ok(list)
@@ -136,14 +127,17 @@ pub fn list_users(
 pub fn create_user(
     password: Option<String>,
     config: User,
-    rpcenv: &mut dyn RpcEnvironment
+    rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::user::lock_config()?;
 
     let (mut section_config, _digest) = pbs_config::user::config()?;
 
-    if section_config.sections.get(config.userid.as_str()).is_some() {
+    if section_config
+        .sections
+        .get(config.userid.as_str())
+        .is_some()
+    {
         bail!("user '{}' already exists.", config.userid);
     }
 
@@ -194,7 +188,7 @@ pub fn read_user(userid: Userid, mut rpcenv: &mut dyn RpcEnvironment) -> Result<
 
 #[api()]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 #[allow(non_camel_case_types)]
 pub enum DeletableProperty {
     /// Delete the comment property.
@@ -253,7 +247,6 @@ pub fn update_user(
     digest: Option<String>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::user::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::user::config()?;
@@ -306,11 +299,19 @@ pub fn update_user(
     }
 
     if let Some(firstname) = update.firstname {
-        data.firstname = if firstname.is_empty() { None } else { Some(firstname) };
+        data.firstname = if firstname.is_empty() {
+            None
+        } else {
+            Some(firstname)
+        };
     }
 
     if let Some(lastname) = update.lastname {
-        data.lastname = if lastname.is_empty() { None } else { Some(lastname) };
+        data.lastname = if lastname.is_empty() {
+            None
+        } else {
+            Some(lastname)
+        };
     }
     if let Some(email) = update.email {
         data.email = if email.is_empty() { None } else { Some(email) };
@@ -345,10 +346,9 @@ pub fn update_user(
 )]
 /// Remove a user from the configuration file.
 pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
-
     let _lock = pbs_config::user::lock_config()?;
     let _tfa_lock = crate::config::tfa::write_lock()?;
+
     let (mut config, expected_digest) = pbs_config::user::config()?;
 
     if let Some(ref digest) = digest {
@@ -357,7 +357,9 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
     }
 
     match config.sections.get(userid.as_str()) {
-        Some(_) => { config.sections.remove(userid.as_str()); },
+        Some(_) => {
+            config.sections.remove(userid.as_str());
+        }
         None => bail!("user '{}' does not exist.", userid),
     }
 
@@ -365,7 +367,7 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
 
     let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
     match authenticator.remove_password(userid.name()) {
-        Ok(()) => {},
+        Ok(()) => {}
         Err(err) => {
             eprintln!(
                 "error removing password after deleting user {:?}: {}",
@@ -417,7 +419,6 @@ pub fn read_token(
     _info: &ApiMethod,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<ApiToken, Error> {
-
     let (config, digest) = pbs_config::user::config()?;
 
     let tokenid = Authid::from((userid, Some(token_name)));
@@ -483,7 +484,6 @@ pub fn generate_token(
     expire: Option<i64>,
     digest: Option<String>,
 ) -> Result<Value, Error> {
-
     let _lock = pbs_config::user::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::user::config()?;
@@ -497,7 +497,11 @@ pub fn generate_token(
     let tokenid_string = tokenid.to_string();
 
     if config.sections.get(&tokenid_string).is_some() {
-        bail!("token '{}' for user '{}' already exists.", token_name.as_str(), userid);
+        bail!(
+            "token '{}' for user '{}' already exists.",
+            token_name.as_str(),
+            userid
+        );
     }
 
     let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
@@ -564,7 +568,6 @@ pub fn update_token(
     expire: Option<i64>,
     digest: Option<String>,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::user::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::user::config()?;
@@ -632,7 +635,6 @@ pub fn delete_token(
     token_name: Tokenname,
     digest: Option<String>,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::user::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::user::config()?;
@@ -646,8 +648,14 @@ pub fn delete_token(
     let tokenid_string = tokenid.to_string();
 
     match config.sections.get(&tokenid_string) {
-        Some(_) => { config.sections.remove(&tokenid_string); },
-        None => bail!("token '{}' of user '{}' does not exist.", token_name.as_str(), userid),
+        Some(_) => {
+            config.sections.remove(&tokenid_string);
+        }
+        None => bail!(
+            "token '{}' of user '{}' does not exist.",
+            token_name.as_str(),
+            userid
+        ),
     }
 
     token_shadow::delete_secret(&tokenid)?;
@@ -664,7 +672,7 @@ pub fn delete_token(
     }
 )]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 /// A Token Entry that contains the token-name
 pub struct TokenApiEntry {
     /// The Token name
@@ -699,20 +707,16 @@ pub fn list_tokens(
     _info: &ApiMethod,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<TokenApiEntry>, Error> {
-
     let (config, digest) = pbs_config::user::config()?;
 
-    let list:Vec<ApiToken> = config.convert_to_typed_array("token")?;
+    let list: Vec<ApiToken> = config.convert_to_typed_array("token")?;
 
     rpcenv["digest"] = hex::encode(&digest).into();
 
     let filter_by_owner = |token: ApiToken| {
         if token.tokenid.is_token() && token.tokenid.user() == &userid {
             let token_name = token.tokenid.tokenname().unwrap().to_owned();
-            Some(TokenApiEntry {
-                token_name,
-                token,
-            })
+            Some(TokenApiEntry { token_name, token })
         } else {
             None
         }
@@ -733,9 +737,7 @@ const TOKEN_ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_TOKENS)
     .match_all("token-name", &TOKEN_ITEM_ROUTER);
 
-const USER_SUBDIRS: SubdirMap = &[
-    ("token", &TOKEN_ROUTER),
-];
+const USER_SUBDIRS: SubdirMap = &[("token", &TOKEN_ROUTER)];
 
 const USER_ROUTER: Router = Router::new()
     .get(&API_METHOD_READ_USER)
index ce5bd2444079e887583ad48955a5fa82d6ce7a09..1309ab4091554d090c2896ba8e93ed7009d7341c 100644 (file)
@@ -12,62 +12,55 @@ use hyper::{header, Body, Response, StatusCode};
 use serde_json::{json, Value};
 use tokio_stream::wrappers::ReceiverStream;
 
+use proxmox_async::blocking::WrappedReaderStream;
+use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
 use proxmox_compression::zstd::ZstdEncoder;
-use proxmox_sys::sortable;
-use proxmox_sys::fs::{
-    file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
-};
 use proxmox_router::{
-    list_subdirs_api_method, http_err, ApiResponseFuture, ApiHandler, ApiMethod, Router,
-    RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
+    http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
+    Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
 };
 use proxmox_schema::*;
+use proxmox_sys::fs::{
+    file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
+};
+use proxmox_sys::sortable;
 use proxmox_sys::{task_log, task_warn};
-use proxmox_async::blocking::WrappedReaderStream;
-use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
 
 use pxar::accessor::aio::Accessor;
 use pxar::EntryKind;
 
-use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
-    DataStoreListItem, GarbageCollectionStatus, GroupListItem,
-    Operation, SnapshotListItem, SnapshotVerifyState, PruneOptions,
-    DataStoreStatus, RRDMode, RRDTimeFrame,
-    BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
-    BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
-    IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
-    VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_AUDIT,
-    PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
-    PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
-
+use pbs_api_types::{
+    Authid, BackupContent, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
+    GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
+    SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
+    BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
+    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
+    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
 };
 use pbs_client::pxar::{create_tar, create_zip};
-use pbs_datastore::{
-    check_backup_owner, DataStore, BackupDir, BackupGroup, StoreProgress, LocalChunkReader,
-    CATALOG_NAME, task_tracking
-};
+use pbs_config::CachedUserInfo;
 use pbs_datastore::backup_info::BackupInfo;
 use pbs_datastore::cached_chunk_reader::CachedChunkReader;
 use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
 use pbs_datastore::data_blob::DataBlob;
 use pbs_datastore::data_blob_reader::DataBlobReader;
 use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
-use pbs_datastore::fixed_index::{FixedIndexReader};
+use pbs_datastore::fixed_index::FixedIndexReader;
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
 use pbs_datastore::prune::compute_prune_info;
+use pbs_datastore::{
+    check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
+    StoreProgress, CATALOG_NAME,
+};
 use pbs_tools::json::{required_integer_param, required_string_param};
-use pbs_config::CachedUserInfo;
-use proxmox_rest_server::{WorkerTask, formatter};
+use proxmox_rest_server::{formatter, WorkerTask};
 
 use crate::api2::node::rrd::create_value_from_rrd;
-use crate::backup::{
-    verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
-};
+use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
 
 use crate::server::jobstate::Job;
 
-
 const GROUP_NOTES_FILE_NAME: &str = "notes";
 
 fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
@@ -97,7 +90,6 @@ fn read_backup_index(
     store: &DataStore,
     backup_dir: &BackupDir,
 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
-
     let (manifest, index_size) = store.load_manifest(backup_dir)?;
 
     let mut result = Vec::new();
@@ -125,7 +117,6 @@ fn get_all_snapshot_files(
     store: &DataStore,
     info: &BackupInfo,
 ) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
-
     let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
 
     let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
@@ -134,7 +125,9 @@ fn get_all_snapshot_files(
     });
 
     for file in &info.files {
-        if file_set.contains(file) { continue; }
+        if file_set.contains(file) {
+            continue;
+        }
         files.push(BackupContent {
             filename: file.to_string(),
             size: None,
@@ -166,7 +159,6 @@ pub fn list_groups(
     store: String,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<GroupListItem>, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let user_info = CachedUserInfo::new()?;
     let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
@@ -182,12 +174,12 @@ pub fn list_groups(
             let owner = match datastore.get_owner(&group) {
                 Ok(auth_id) => auth_id,
                 Err(err) => {
-                    eprintln!("Failed to get owner of group '{}/{}' - {}",
-                             &store,
-                             group,
-                             err);
+                    eprintln!(
+                        "Failed to get owner of group '{}/{}' - {}",
+                        &store, group, err
+                    );
                     return group_info;
-                },
+                }
             };
             if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
                 return group_info;
@@ -197,7 +189,7 @@ pub fn list_groups(
                 Ok(snapshots) => snapshots,
                 Err(_) => {
                     return group_info;
-                },
+                }
             };
 
             let backup_count: u64 = snapshots.len() as u64;
@@ -209,7 +201,8 @@ pub fn list_groups(
                 .iter()
                 .fold(&snapshots[0], |last, curr| {
                     if curr.is_finished()
-                        && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
+                        && curr.backup_dir.backup_time() > last.backup_dir.backup_time()
+                    {
                         curr
                     } else {
                         last
@@ -265,7 +258,6 @@ pub fn delete_group(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     let group = BackupGroup::new(backup_type, backup_id);
@@ -314,13 +306,17 @@ pub fn list_snapshot_files(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<BackupContent>, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
     let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-    check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
+    check_priv_or_backup_owner(
+        &datastore,
+        snapshot.group(),
+        &auth_id,
+        PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
+    )?;
 
     let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
 
@@ -362,13 +358,17 @@ pub fn delete_snapshot(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
-    check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+    check_priv_or_backup_owner(
+        &datastore,
+        snapshot.group(),
+        &auth_id,
+        PRIV_DATASTORE_MODIFY,
+    )?;
 
     datastore.remove_backup_dir(&snapshot, false)?;
 
@@ -401,7 +401,7 @@ pub fn delete_snapshot(
     },
 )]
 /// List backup snapshots.
-pub fn list_snapshots (
+pub fn list_snapshots(
     store: String,
     backup_type: Option<String>,
     backup_id: Option<String>,
@@ -409,7 +409,6 @@ pub fn list_snapshots (
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<SnapshotListItem>, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let user_info = CachedUserInfo::new()?;
     let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
@@ -425,19 +424,15 @@ pub fn list_snapshots (
             let mut groups = Vec::with_capacity(1);
             groups.push(BackupGroup::new(backup_type, backup_id));
             groups
-        },
-        (Some(backup_type), None) => {
-            BackupInfo::list_backup_groups(&base_path)?
-                .into_iter()
-                .filter(|group| group.backup_type() == backup_type)
-                .collect()
-        },
-        (None, Some(backup_id)) => {
-            BackupInfo::list_backup_groups(&base_path)?
-                .into_iter()
-                .filter(|group| group.backup_id() == backup_id)
-                .collect()
-        },
+        }
+        (Some(backup_type), None) => BackupInfo::list_backup_groups(&base_path)?
+            .into_iter()
+            .filter(|group| group.backup_type() == backup_type)
+            .collect(),
+        (None, Some(backup_id)) => BackupInfo::list_backup_groups(&base_path)?
+            .into_iter()
+            .filter(|group| group.backup_id() == backup_id)
+            .collect(),
         _ => BackupInfo::list_backup_groups(&base_path)?,
     };
 
@@ -460,17 +455,18 @@ pub fn list_snapshots (
                     Err(err) => {
                         eprintln!("error parsing fingerprint: '{}'", err);
                         None
-                    },
+                    }
                 };
 
                 let verification = manifest.unprotected["verify_state"].clone();
-                let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
-                    Ok(verify) => verify,
-                    Err(err) => {
-                        eprintln!("error parsing verification state : '{}'", err);
-                        None
-                    }
-                };
+                let verification: Option<SnapshotVerifyState> =
+                    match serde_json::from_value(verification) {
+                        Ok(verify) => verify,
+                        Err(err) => {
+                            eprintln!("error parsing verification state : '{}'", err);
+                            None
+                        }
+                    };
 
                 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
 
@@ -486,18 +482,18 @@ pub fn list_snapshots (
                     owner,
                     protected,
                 }
-            },
+            }
             Err(err) => {
                 eprintln!("error during snapshot file listing: '{}'", err);
                 let files = info
-                        .files
-                        .into_iter()
-                        .map(|filename| BackupContent {
-                            filename,
-                            size: None,
-                            crypt_mode: None,
-                        })
-                        .collect();
+                    .files
+                    .into_iter()
+                    .map(|filename| BackupContent {
+                        filename,
+                        size: None,
+                        crypt_mode: None,
+                    })
+                    .collect();
 
                 SnapshotListItem {
                     backup_type,
@@ -511,55 +507,56 @@ pub fn list_snapshots (
                     owner,
                     protected,
                 }
-            },
+            }
         }
     };
 
-    groups
-        .iter()
-        .try_fold(Vec::new(), |mut snapshots, group| {
-            let owner = match datastore.get_owner(group) {
-                Ok(auth_id) => auth_id,
-                Err(err) => {
-                    eprintln!("Failed to get owner of group '{}/{}' - {}",
-                              &store,
-                              group,
-                              err);
-                    return Ok(snapshots);
-                },
-            };
-
-            if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
+    groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
+        let owner = match datastore.get_owner(group) {
+            Ok(auth_id) => auth_id,
+            Err(err) => {
+                eprintln!(
+                    "Failed to get owner of group '{}/{}' - {}",
+                    &store, group, err
+                );
                 return Ok(snapshots);
             }
+        };
 
-            let group_backups = group.list_backups(&datastore.base_path())?;
+        if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
+            return Ok(snapshots);
+        }
 
-            snapshots.extend(
-                group_backups
-                    .into_iter()
-                    .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
-            );
+        let group_backups = group.list_backups(&datastore.base_path())?;
 
-            Ok(snapshots)
-        })
+        snapshots.extend(
+            group_backups
+                .into_iter()
+                .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
+        );
+
+        Ok(snapshots)
+    })
 }
 
 fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
     let base_path = store.base_path();
     let groups = BackupInfo::list_backup_groups(&base_path)?;
 
-    groups.iter()
+    groups
+        .iter()
         .filter(|group| {
             let owner = match store.get_owner(group) {
                 Ok(owner) => owner,
                 Err(err) => {
-                    eprintln!("Failed to get owner of group '{}/{}' - {}",
-                              store.name(),
-                              group,
-                              err);
+                    eprintln!(
+                        "Failed to get owner of group '{}/{}' - {}",
+                        store.name(),
+                        group,
+                        err
+                    );
                     return false;
-                },
+                }
             };
 
             match filter_owner {
@@ -707,7 +704,10 @@ pub fn verify(
 
     match (backup_type, backup_id, backup_time) {
         (Some(backup_type), Some(backup_id), Some(backup_time)) => {
-            worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
+            worker_id = format!(
+                "{}:{}/{}/{:08X}",
+                store, backup_type, backup_id, backup_time
+            );
             let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
             check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
@@ -745,9 +745,7 @@ pub fn verify(
                     &verify_worker,
                     &backup_dir,
                     worker.upid().clone(),
-                    Some(&move |manifest| {
-                        verify_filter(ignore_verified, outdated_after, manifest)
-                    }),
+                    Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
                 )? {
                     res.push(backup_dir.to_string());
                 }
@@ -758,14 +756,11 @@ pub fn verify(
                     &backup_group,
                     &mut StoreProgress::new(1),
                     worker.upid(),
-                    Some(&move |manifest| {
-                        verify_filter(ignore_verified, outdated_after, manifest)
-                    }),
+                    Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
                 )?;
                 failed_dirs
             } else {
-                let privs = CachedUserInfo::new()?
-                    .lookup_privs(&auth_id, &["datastore", &store]);
+                let privs = CachedUserInfo::new()?.lookup_privs(&auth_id, &["datastore", &store]);
 
                 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
                     Some(auth_id)
@@ -777,9 +772,7 @@ pub fn verify(
                     &verify_worker,
                     worker.upid(),
                     owner,
-                    Some(&move |manifest| {
-                        verify_filter(ignore_verified, outdated_after, manifest)
-                    }),
+                    Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
                 )?
             };
             if !failed_dirs.is_empty() {
@@ -835,7 +828,6 @@ pub fn prune(
     _param: Value,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     let group = BackupGroup::new(&backup_type, &backup_id);
@@ -874,16 +866,24 @@ pub fn prune(
         return Ok(json!(prune_result));
     }
 
-
     // We use a WorkerTask just to have a task log, but run synchrounously
     let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
 
     if keep_all {
         task_log!(worker, "No prune selection - keeping all files.");
     } else {
-        task_log!(worker, "retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options));
-        task_log!(worker, "Starting prune on store \"{}\" group \"{}/{}\"",
-                  store, backup_type, backup_id);
+        task_log!(
+            worker,
+            "retention options: {}",
+            pbs_datastore::prune::cli_options_string(&prune_options)
+        );
+        task_log!(
+            worker,
+            "Starting prune on store \"{}\" group \"{}/{}\"",
+            store,
+            backup_type,
+            backup_id
+        );
     }
 
     for (info, mark) in prune_info {
@@ -893,7 +893,6 @@ pub fn prune(
         let timestamp = info.backup_dir.backup_time_string();
         let group = info.backup_dir.group();
 
-
         let msg = format!(
             "{}/{}/{} {}",
             group.backup_type(),
@@ -962,7 +961,6 @@ pub fn prune_datastore(
     _param: Value,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
@@ -974,14 +972,16 @@ pub fn prune_datastore(
         Some(store.clone()),
         auth_id.to_string(),
         to_stdout,
-        move |worker| crate::server::prune_datastore(
-            worker,
-            auth_id,
-            prune_options,
-            &store,
-            datastore,
-            dry_run
-        ),
+        move |worker| {
+            crate::server::prune_datastore(
+                worker,
+                auth_id,
+                prune_options,
+                &store,
+                datastore,
+                dry_run,
+            )
+        },
     )?;
 
     Ok(upid_str)
@@ -1008,17 +1008,23 @@ pub fn start_garbage_collection(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
-    let job =  Job::new("garbage_collection", &store)
+    let job = Job::new("garbage_collection", &store)
         .map_err(|_| format_err!("garbage collection already running"))?;
 
     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
 
-    let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
-        .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
+    let upid_str =
+        crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
+            .map_err(|err| {
+                format_err!(
+                    "unable to start garbage collection job on datastore {} - {}",
+                    store,
+                    err
+                )
+            })?;
 
     Ok(json!(upid_str))
 }
@@ -1044,7 +1050,6 @@ pub fn garbage_collection_status(
     _info: &ApiMethod,
     _rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<GarbageCollectionStatus, Error> {
-
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
 
     let status = datastore.last_gc_status();
@@ -1068,7 +1073,6 @@ pub fn get_datastore_list(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<DataStoreListItem>, Error> {
-
     let (config, _digest) = pbs_config::datastore::config()?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -1078,14 +1082,12 @@ pub fn get_datastore_list(
 
     for (store, (_, data)) in &config.sections {
         let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
-        let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
+        let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
         if allowed {
-            list.push(
-                DataStoreListItem {
-                    store: store.clone(),
-                    comment: data["comment"].as_str().map(String::from),
-                }
-            );
+            list.push(DataStoreListItem {
+                store: store.clone(),
+                comment: data["comment"].as_str().map(String::from),
+            });
         }
     }
 
@@ -1100,15 +1102,19 @@ pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
         &sorted!([
             ("store", false, &DATASTORE_SCHEMA),
             ("backup-type", false, &BACKUP_TYPE_SCHEMA),
-            ("backup-id", false,  &BACKUP_ID_SCHEMA),
+            ("backup-id", false, &BACKUP_ID_SCHEMA),
             ("backup-time", false, &BACKUP_TIME_SCHEMA),
             ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
         ]),
-    )
-).access(None, &Permission::Privilege(
-    &["datastore", "{store}"],
-    PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
-    true)
+    ),
+)
+.access(
+    None,
+    &Permission::Privilege(
+        &["datastore", "{store}"],
+        PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
+        true,
+    ),
 );
 
 pub fn download_file(
@@ -1118,7 +1124,6 @@ pub fn download_file(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let store = required_string_param(&param, "store")?;
         let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
@@ -1133,9 +1138,17 @@ pub fn download_file(
 
         let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-        check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+        check_priv_or_backup_owner(
+            &datastore,
+            backup_dir.group(),
+            &auth_id,
+            PRIV_DATASTORE_READ,
+        )?;
 
-        println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
+        println!(
+            "Download {} from {} ({}/{})",
+            file_name, store, backup_dir, file_name
+        );
 
         let mut path = datastore.base_path();
         path.push(backup_dir.relative_path());
@@ -1145,21 +1158,23 @@ pub fn download_file(
             .await
             .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
 
-        let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
-            .map_ok(|bytes| bytes.freeze())
-            .map_err(move |err| {
-                eprintln!("error during streaming of '{:?}' - {}", &path, err);
-                err
-            });
+        let payload =
+            tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
+                .map_ok(|bytes| bytes.freeze())
+                .map_err(move |err| {
+                    eprintln!("error during streaming of '{:?}' - {}", &path, err);
+                    err
+                });
         let body = Body::wrap_stream(payload);
 
         // fixme: set other headers ?
         Ok(Response::builder()
-           .status(StatusCode::OK)
-           .header(header::CONTENT_TYPE, "application/octet-stream")
-           .body(body)
-           .unwrap())
-    }.boxed()
+            .status(StatusCode::OK)
+            .header(header::CONTENT_TYPE, "application/octet-stream")
+            .body(body)
+            .unwrap())
+    }
+    .boxed()
 }
 
 #[sortable]
@@ -1170,15 +1185,19 @@ pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
         &sorted!([
             ("store", false, &DATASTORE_SCHEMA),
             ("backup-type", false, &BACKUP_TYPE_SCHEMA),
-            ("backup-id", false,  &BACKUP_ID_SCHEMA),
+            ("backup-id", false, &BACKUP_ID_SCHEMA),
             ("backup-time", false, &BACKUP_TIME_SCHEMA),
             ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
         ]),
-    )
-).access(None, &Permission::Privilege(
-    &["datastore", "{store}"],
-    PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
-    true)
+    ),
+)
+.access(
+    None,
+    &Permission::Privilege(
+        &["datastore", "{store}"],
+        PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
+        true,
+    ),
 );
 
 pub fn download_file_decoded(
@@ -1188,7 +1207,6 @@ pub fn download_file_decoded(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let store = required_string_param(&param, "store")?;
         let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
@@ -1203,7 +1221,12 @@ pub fn download_file_decoded(
 
         let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-        check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+        check_priv_or_backup_owner(
+            &datastore,
+            backup_dir.group(),
+            &auth_id,
+            PRIV_DATASTORE_READ,
+        )?;
 
         let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
         for file in files {
@@ -1212,7 +1235,10 @@ pub fn download_file_decoded(
             }
         }
 
-        println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
+        println!(
+            "Download {} from {} ({}/{})",
+            file_name, store, backup_dir, file_name
+        );
 
         let mut path = datastore.base_path();
         path.push(backup_dir.relative_path());
@@ -1222,34 +1248,38 @@ pub fn download_file_decoded(
 
         let body = match extension {
             "didx" => {
-                let index = DynamicIndexReader::open(&path)
-                    .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
+                let index = DynamicIndexReader::open(&path).map_err(|err| {
+                    format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
+                })?;
                 let (csum, size) = index.compute_csum();
                 manifest.verify_file(&file_name, &csum, size)?;
 
                 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
                 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
-                Body::wrap_stream(AsyncReaderStream::new(reader)
-                    .map_err(move |err| {
-                        eprintln!("error during streaming of '{:?}' - {}", path, err);
-                        err
-                    }))
-            },
+                Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
+                    eprintln!("error during streaming of '{:?}' - {}", path, err);
+                    err
+                }))
+            }
             "fidx" => {
-                let index = FixedIndexReader::open(&path)
-                    .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
+                let index = FixedIndexReader::open(&path).map_err(|err| {
+                    format_err!("unable to read fixed index '{:?}' - {}", &path, err)
+                })?;
 
                 let (csum, size) = index.compute_csum();
                 manifest.verify_file(&file_name, &csum, size)?;
 
                 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
                 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
-                Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
-                    .map_err(move |err| {
-                        eprintln!("error during streaming of '{:?}' - {}", path, err);
-                        err
-                    }))
-            },
+                Body::wrap_stream(
+                    AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
+                        move |err| {
+                            eprintln!("error during streaming of '{:?}' - {}", path, err);
+                            err
+                        },
+                    ),
+                )
+            }
             "blob" => {
                 let file = std::fs::File::open(&path)
                     .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
@@ -1257,25 +1287,27 @@ pub fn download_file_decoded(
                 // FIXME: load full blob to verify index checksum?
 
                 Body::wrap_stream(
-                    WrappedReaderStream::new(DataBlobReader::new(file, None)?)
-                        .map_err(move |err| {
+                    WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
+                        move |err| {
                             eprintln!("error during streaming of '{:?}' - {}", path, err);
                             err
-                        })
+                        },
+                    ),
                 )
-            },
+            }
             extension => {
                 bail!("cannot download '{}' files", extension);
-            },
+            }
         };
 
         // fixme: set other headers ?
         Ok(Response::builder()
-           .status(StatusCode::OK)
-           .header(header::CONTENT_TYPE, "application/octet-stream")
-           .body(body)
-           .unwrap())
-    }.boxed()
+            .status(StatusCode::OK)
+            .header(header::CONTENT_TYPE, "application/octet-stream")
+            .body(body)
+            .unwrap())
+    }
+    .boxed()
 }
 
 #[sortable]
@@ -1289,10 +1321,11 @@ pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
             ("backup-id", false, &BACKUP_ID_SCHEMA),
             ("backup-time", false, &BACKUP_TIME_SCHEMA),
         ]),
-    )
-).access(
+    ),
+)
+.access(
     Some("Only the backup creator/owner is allowed to do this."),
-    &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
+    &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false),
 );
 
 pub fn upload_backup_log(
@@ -1302,12 +1335,11 @@ pub fn upload_backup_log(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let store = required_string_param(&param, "store")?;
         let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
 
-        let file_name =  CLIENT_LOG_BLOB_NAME;
+        let file_name = CLIENT_LOG_BLOB_NAME;
 
         let backup_type = required_string_param(&param, "backup-type")?;
         let backup_id = required_string_param(&param, "backup-id")?;
@@ -1327,8 +1359,14 @@ pub fn upload_backup_log(
             bail!("backup already contains a log.");
         }
 
-        println!("Upload backup log to {}/{}/{}/{}/{}", store,
-                 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
+        println!(
+            "Upload backup log to {}/{}/{}/{}/{}",
+            store,
+            backup_type,
+            backup_id,
+            backup_dir.backup_time_string(),
+            file_name
+        );
 
         let data = req_body
             .map_err(Error::from)
@@ -1345,7 +1383,8 @@ pub fn upload_backup_log(
 
         // fixme: use correct formatter
         Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
-    }.boxed()
+    }
+    .boxed()
 }
 
 #[api(
@@ -1388,7 +1427,12 @@ pub fn catalog(
 
     let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-    check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+    check_priv_or_backup_owner(
+        &datastore,
+        backup_dir.group(),
+        &auth_id,
+        PRIV_DATASTORE_READ,
+    )?;
 
     let file_name = CATALOG_NAME;
 
@@ -1450,7 +1494,6 @@ pub fn pxar_file_download(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let store = required_string_param(&param, "store")?;
         let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
@@ -1467,7 +1510,12 @@ pub fn pxar_file_download(
 
         let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-        check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+        check_priv_or_backup_owner(
+            &datastore,
+            backup_dir.group(),
+            &auth_id,
+            PRIV_DATASTORE_READ,
+        )?;
 
         let mut components = base64::decode(&filepath)?;
         if !components.is_empty() && components[0] == b'/' {
@@ -1503,7 +1551,8 @@ pub fn pxar_file_download(
         let root = decoder.open_root().await?;
         let path = OsStr::from_bytes(file_path).to_os_string();
         let file = root
-            .lookup(&path).await?
+            .lookup(&path)
+            .await?
             .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
 
         let body = match file.kind() {
@@ -1516,10 +1565,7 @@ pub fn pxar_file_download(
             EntryKind::Hardlink(_) => Body::wrap_stream(
                 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
                     .map_err(move |err| {
-                        eprintln!(
-                            "error during streaming of hardlink '{:?}' - {}",
-                            path, err
-                        );
+                        eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
                         err
                     }),
             ),
@@ -1527,18 +1573,24 @@ pub fn pxar_file_download(
                 let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
                 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
                 if tar {
-                    proxmox_rest_server::spawn_internal_task(
-                        create_tar(channelwriter, decoder, path.clone(), false)
-                    );
+                    proxmox_rest_server::spawn_internal_task(create_tar(
+                        channelwriter,
+                        decoder,
+                        path.clone(),
+                        false,
+                    ));
                     let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
                     Body::wrap_stream(zstdstream.map_err(move |err| {
                         eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
                         err
                     }))
                 } else {
-                    proxmox_rest_server::spawn_internal_task(
-                        create_zip(channelwriter, decoder, path.clone(), false)
-                    );
+                    proxmox_rest_server::spawn_internal_task(create_zip(
+                        channelwriter,
+                        decoder,
+                        path.clone(),
+                        false,
+                    ));
                     Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
                         eprintln!("error during streaming of zip '{:?}' - {}", path, err);
                         err
@@ -1550,11 +1602,12 @@ pub fn pxar_file_download(
 
         // fixme: set other headers ?
         Ok(Response::builder()
-           .status(StatusCode::OK)
-           .header(header::CONTENT_TYPE, "application/octet-stream")
-           .body(body)
-           .unwrap())
-    }.boxed()
+            .status(StatusCode::OK)
+            .header(header::CONTENT_TYPE, "application/octet-stream")
+            .body(body)
+            .unwrap())
+    }
+    .boxed()
 }
 
 #[api(
@@ -1582,28 +1635,25 @@ pub fn get_rrd_stats(
     cf: RRDMode,
     _param: Value,
 ) -> Result<Value, Error> {
-
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
     let disk_manager = crate::tools::disks::DiskManage::new();
 
     let mut rrd_fields = vec![
-        "total", "used",
-        "read_ios", "read_bytes",
-        "write_ios", "write_bytes",
+        "total",
+        "used",
+        "read_ios",
+        "read_bytes",
+        "write_ios",
+        "write_bytes",
     ];
 
     // we do not have io_ticks for zpools, so don't include them
     match disk_manager.find_mounted_device(&datastore.base_path()) {
-        Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {},
+        Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
         _ => rrd_fields.push("io_ticks"),
     };
 
-    create_value_from_rrd(
-        &format!("datastore/{}", store),
-        &rrd_fields,
-        timeframe,
-        cf,
-    )
+    create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
 }
 
 #[api(
@@ -1619,10 +1669,7 @@ pub fn get_rrd_stats(
     },
 )]
 /// Read datastore stats
-pub fn get_active_operations(
-    store: String,
-    _param: Value,
-) -> Result<Value, Error> {
+pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
     let active_operations = task_tracking::get_active_operations(&store)?;
     Ok(json!({
         "read": active_operations.read,
@@ -1744,13 +1791,16 @@ pub fn get_notes(
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-    check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
+    check_priv_or_backup_owner(
+        &datastore,
+        backup_dir.group(),
+        &auth_id,
+        PRIV_DATASTORE_AUDIT,
+    )?;
 
     let (manifest, _) = datastore.load_manifest(&backup_dir)?;
 
-    let notes = manifest.unprotected["notes"]
-        .as_str()
-        .unwrap_or("");
+    let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
 
     Ok(String::from(notes))
 }
@@ -1795,11 +1845,18 @@ pub fn set_notes(
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-    check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+    check_priv_or_backup_owner(
+        &datastore,
+        backup_dir.group(),
+        &auth_id,
+        PRIV_DATASTORE_MODIFY,
+    )?;
 
-    datastore.update_manifest(&backup_dir,|manifest| {
-        manifest.unprotected["notes"] = notes.into();
-    }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
+    datastore
+        .update_manifest(&backup_dir, |manifest| {
+            manifest.unprotected["notes"] = notes.into();
+        })
+        .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
 
     Ok(())
 }
@@ -1838,7 +1895,12 @@ pub fn get_protection(
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-    check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
+    check_priv_or_backup_owner(
+        &datastore,
+        backup_dir.group(),
+        &auth_id,
+        PRIV_DATASTORE_AUDIT,
+    )?;
 
     Ok(backup_dir.is_protected(datastore.base_path()))
 }
@@ -1883,7 +1945,12 @@ pub fn set_protection(
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
 
-    check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+    check_priv_or_backup_owner(
+        &datastore,
+        backup_dir.group(),
+        &auth_id,
+        PRIV_DATASTORE_MODIFY,
+    )?;
 
     datastore.update_protection(&backup_dir, protected)
 }
@@ -1918,7 +1985,6 @@ pub fn set_backup_owner(
     new_owner: Authid,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
-
     let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
     let backup_group = BackupGroup::new(backup_type, backup_id);
@@ -1941,43 +2007,44 @@ pub fn set_backup_owner(
                 let owner = owner.user();
                 let new_owner = new_owner.user();
                 owner == new_owner && Authid::from(owner.clone()) == auth_id
-            },
+            }
             (true, false) => {
                 // API token to API token owner
-                Authid::from(owner.user().clone()) == auth_id
-                    && new_owner == auth_id
-            },
+                Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
+            }
             (false, true) => {
                 // API token owner to API token
-                owner == auth_id
-                    && Authid::from(new_owner.user().clone()) == auth_id
-            },
+                owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
+            }
             (false, false) => {
                 // User to User, not allowed for unprivileged users
                 false
-            },
+            }
         }
     } else {
         false
     };
 
     if !allowed {
-        return Err(http_err!(UNAUTHORIZED,
-                  "{} does not have permission to change owner of backup group '{}' to {}",
-                  auth_id,
-                  backup_group,
-                  new_owner,
+        return Err(http_err!(
+            UNAUTHORIZED,
+            "{} does not have permission to change owner of backup group '{}' to {}",
+            auth_id,
+            backup_group,
+            new_owner,
         ));
     }
 
     if !user_info.is_active_auth_id(&new_owner) {
-        bail!("{} '{}' is inactive or non-existent",
-              if new_owner.is_token() {
-                  "API token".to_string()
-              } else {
-                  "user".to_string()
-              },
-              new_owner);
+        bail!(
+            "{} '{}' is inactive or non-existent",
+            if new_owner.is_token() {
+                "API token".to_string()
+            } else {
+                "user".to_string()
+            },
+            new_owner
+        );
     }
 
     datastore.set_owner(&backup_group, &new_owner, true)?;
@@ -1989,112 +2056,80 @@ pub fn set_backup_owner(
 const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
     (
         "active-operations",
-        &Router::new()
-            .get(&API_METHOD_GET_ACTIVE_OPERATIONS)
-    ),
-    (
-        "catalog",
-        &Router::new()
-            .get(&API_METHOD_CATALOG)
+        &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
     ),
+    ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
     (
         "change-owner",
-        &Router::new()
-            .post(&API_METHOD_SET_BACKUP_OWNER)
+        &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
     ),
     (
         "download",
-        &Router::new()
-            .download(&API_METHOD_DOWNLOAD_FILE)
+        &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
     ),
     (
         "download-decoded",
-        &Router::new()
-            .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
-    ),
-    (
-        "files",
-        &Router::new()
-            .get(&API_METHOD_LIST_SNAPSHOT_FILES)
+        &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
     ),
+    ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
     (
         "gc",
         &Router::new()
             .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
-            .post(&API_METHOD_START_GARBAGE_COLLECTION)
+            .post(&API_METHOD_START_GARBAGE_COLLECTION),
     ),
     (
         "group-notes",
         &Router::new()
             .get(&API_METHOD_GET_GROUP_NOTES)
-            .put(&API_METHOD_SET_GROUP_NOTES)
+            .put(&API_METHOD_SET_GROUP_NOTES),
     ),
     (
         "groups",
         &Router::new()
             .get(&API_METHOD_LIST_GROUPS)
-            .delete(&API_METHOD_DELETE_GROUP)
+            .delete(&API_METHOD_DELETE_GROUP),
     ),
     (
         "notes",
         &Router::new()
             .get(&API_METHOD_GET_NOTES)
-            .put(&API_METHOD_SET_NOTES)
+            .put(&API_METHOD_SET_NOTES),
     ),
     (
         "protected",
         &Router::new()
             .get(&API_METHOD_GET_PROTECTION)
-            .put(&API_METHOD_SET_PROTECTION)
-    ),
-    (
-        "prune",
-        &Router::new()
-            .post(&API_METHOD_PRUNE)
+            .put(&API_METHOD_SET_PROTECTION),
     ),
+    ("prune", &Router::new().post(&API_METHOD_PRUNE)),
     (
         "prune-datastore",
-        &Router::new()
-            .post(&API_METHOD_PRUNE_DATASTORE)
+        &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
     ),
     (
         "pxar-file-download",
-        &Router::new()
-            .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
-    ),
-    (
-        "rrd",
-        &Router::new()
-            .get(&API_METHOD_GET_RRD_STATS)
+        &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
     ),
+    ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
     (
         "snapshots",
         &Router::new()
             .get(&API_METHOD_LIST_SNAPSHOTS)
-            .delete(&API_METHOD_DELETE_SNAPSHOT)
-    ),
-    (
-        "status",
-        &Router::new()
-            .get(&API_METHOD_STATUS)
+            .delete(&API_METHOD_DELETE_SNAPSHOT),
     ),
+    ("status", &Router::new().get(&API_METHOD_STATUS)),
     (
         "upload-backup-log",
-        &Router::new()
-            .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
-    ),
-    (
-        "verify",
-        &Router::new()
-            .post(&API_METHOD_VERIFY)
+        &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
     ),
+    ("verify", &Router::new().post(&API_METHOD_VERIFY)),
 ];
 
 const DATASTORE_INFO_ROUTER: Router = Router::new()
     .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
     .subdirs(DATASTORE_INFO_SUBDIRS);
 
-
 pub const ROUTER: Router = Router::new()
     .get(&API_METHOD_GET_DATASTORE_LIST)
     .match_all("store", &DATASTORE_INFO_ROUTER);
index 4667355a51dc3285d5b188fb42f399186b48378c..43973af5b6e881d37787f2185b7c3f30dcf70e96 100644 (file)
@@ -1,18 +1,18 @@
 //! Backup Server Administration
 
-use proxmox_router::{Router, SubdirMap};
 use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{Router, SubdirMap};
 
 pub mod datastore;
 pub mod sync;
-pub mod verify;
 pub mod traffic_control;
+pub mod verify;
 
 const SUBDIRS: SubdirMap = &[
     ("datastore", &datastore::ROUTER),
     ("sync", &sync::ROUTER),
     ("traffic-control", &traffic_control::ROUTER),
-    ("verify", &verify::ROUTER)
+    ("verify", &verify::ROUTER),
 ];
 
 pub const ROUTER: Router = Router::new()
index c85bdf15ff184a33f0bd8dcd91ee1b4b6ea2f5f2..93f6bd898f40474476f44901c0e4b21da42bc83d 100644 (file)
@@ -3,32 +3,23 @@
 use anyhow::{bail, format_err, Error};
 use serde_json::Value;
 
-use proxmox_sys::sortable;
 use proxmox_router::{
-    list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
-    Permission,
+    list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, RpcEnvironmentType,
+    SubdirMap,
 };
 use proxmox_schema::api;
+use proxmox_sys::sortable;
 
-use pbs_api_types::{DATASTORE_SCHEMA, JOB_ID_SCHEMA, Authid, SyncJobConfig, SyncJobStatus};
+use pbs_api_types::{Authid, SyncJobConfig, SyncJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA};
 use pbs_config::sync;
 use pbs_config::CachedUserInfo;
 
 use crate::{
     api2::{
+        config::sync::{check_sync_job_modify_access, check_sync_job_read_access},
         pull::do_sync_job,
-        config::sync::{
-            check_sync_job_modify_access,
-            check_sync_job_read_access,
-        },
-    },
-    server::{
-        jobstate::{
-            Job,
-            JobState,
-            compute_schedule_status,
-        },
     },
+    server::jobstate::{compute_schedule_status, Job, JobState},
 };
 
 #[api(
@@ -56,7 +47,6 @@ pub fn list_sync_jobs(
     _param: Value,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<SyncJobStatus>, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let user_info = CachedUserInfo::new()?;
 
@@ -72,9 +62,7 @@ pub fn list_sync_jobs(
                 true
             }
         })
-        .filter(|job: &SyncJobConfig| {
-            check_sync_job_read_access(&user_info, &auth_id, job)
-        });
+        .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job));
 
     let mut list = Vec::new();
 
@@ -84,7 +72,10 @@ pub fn list_sync_jobs(
 
         let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
 
-        list.push(SyncJobStatus { config: job, status });
+        list.push(SyncJobStatus {
+            config: job,
+            status,
+        });
     }
 
     rpcenv["digest"] = hex::encode(&digest).into();
@@ -131,19 +122,12 @@ pub fn run_sync_job(
 }
 
 #[sortable]
-const SYNC_INFO_SUBDIRS: SubdirMap = &[
-    (
-        "run",
-        &Router::new()
-            .post(&API_METHOD_RUN_SYNC_JOB)
-    ),
-];
+const SYNC_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_SYNC_JOB))];
 
 const SYNC_INFO_ROUTER: Router = Router::new()
     .get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
     .subdirs(SYNC_INFO_SUBDIRS);
 
-
 pub const ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_SYNC_JOBS)
     .match_all("id", &SYNC_INFO_ROUTER);
index 16c3c312df3d6559f046ad2ad2792e040f3617d2..872c79ff4a2e621c484e40d5b2aa7671269bf7e0 100644 (file)
@@ -1,12 +1,10 @@
 use anyhow::Error;
 use serde::{Deserialize, Serialize};
 
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
-use pbs_api_types::{
-    TrafficControlRule, PRIV_SYS_AUDIT,
-};
+use pbs_api_types::{TrafficControlRule, PRIV_SYS_AUDIT};
 
 use crate::traffic_control_cache::TRAFFIC_CONTROL_CACHE;
 
@@ -18,7 +16,7 @@ use crate::traffic_control_cache::TRAFFIC_CONTROL_CACHE;
     },
 )]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 /// Traffic control rule config with current rates
 pub struct TrafficControlCurrentRate {
     #[serde(flatten)]
@@ -48,7 +46,6 @@ pub struct TrafficControlCurrentRate {
 pub fn show_current_traffic(
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<TrafficControlCurrentRate>, Error> {
-
     let (config, digest) = pbs_config::traffic_control::config()?;
 
     let rules: Vec<TrafficControlRule> = config.convert_to_typed_array("rule")?;
@@ -62,7 +59,11 @@ pub fn show_current_traffic(
             None => (0, 0),
             Some(state) => (state.rate_in, state.rate_out),
         };
-        list.push(TrafficControlCurrentRate {config, cur_rate_in, cur_rate_out});
+        list.push(TrafficControlCurrentRate {
+            config,
+            cur_rate_in,
+            cur_rate_out,
+        });
     }
 
     // also return the configuration digest
@@ -71,5 +72,4 @@ pub fn show_current_traffic(
     Ok(list)
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_SHOW_CURRENT_TRAFFIC);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_SHOW_CURRENT_TRAFFIC);
index 8e481cc301d5a596f7d890dba455bb5214935a11..a0fbfafbc89abb07a7bd7a10f2e1c2af891ad76b 100644 (file)
@@ -3,29 +3,23 @@
 use anyhow::{format_err, Error};
 use serde_json::Value;
 
-use proxmox_sys::sortable;
 use proxmox_router::{
-    list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
-    Permission,
+    list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, RpcEnvironmentType,
+    SubdirMap,
 };
 use proxmox_schema::api;
+use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    VerificationJobConfig, VerificationJobStatus, JOB_ID_SCHEMA, Authid,
-    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, DATASTORE_SCHEMA,
+    Authid, VerificationJobConfig, VerificationJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA,
+    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY,
 };
 use pbs_config::verify;
 use pbs_config::CachedUserInfo;
 
-use crate::{
-    server::{
-        do_verification_job,
-        jobstate::{
-            Job,
-            JobState,
-            compute_schedule_status,
-        },
-    },
+use crate::server::{
+    do_verification_job,
+    jobstate::{compute_schedule_status, Job, JobState},
 };
 
 #[api(
@@ -84,7 +78,10 @@ pub fn list_verification_jobs(
 
         let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
 
-        list.push(VerificationJobStatus { config: job, status });
+        list.push(VerificationJobStatus {
+            config: job,
+            status,
+        });
     }
 
     rpcenv["digest"] = hex::encode(&digest).into();
@@ -117,7 +114,12 @@ pub fn run_verification_job(
     let (config, _digest) = verify::config()?;
     let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
 
-    user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, true)?;
+    user_info.check_privs(
+        &auth_id,
+        &["datastore", &verification_job.store],
+        PRIV_DATASTORE_VERIFY,
+        true,
+    )?;
 
     let job = Job::new("verificationjob", &id)?;
     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
@@ -128,7 +130,8 @@ pub fn run_verification_job(
 }
 
 #[sortable]
-const VERIFICATION_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
+const VERIFICATION_INFO_SUBDIRS: SubdirMap =
+    &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
 
 const VERIFICATION_INFO_ROUTER: Router = Router::new()
     .get(&list_subdirs_api_method!(VERIFICATION_INFO_SUBDIRS))
index 3e23840fa728cfa1f8b4f8b279a3088fffd35275..ac55bb647af15043817b50e69376563a9c88a0e7 100644 (file)
@@ -1,20 +1,20 @@
 use anyhow::{bail, format_err, Error};
-use std::sync::{Arc, Mutex};
-use std::collections::HashMap;
 use nix::dir::Dir;
+use std::collections::HashMap;
+use std::sync::{Arc, Mutex};
 
-use ::serde::{Serialize};
+use ::serde::Serialize;
 use serde_json::{json, Value};
 
-use proxmox_sys::fs::{replace_file, CreateOptions};
 use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
+use proxmox_sys::fs::{replace_file, CreateOptions};
 
-use pbs_datastore::{DataStore, DataBlob};
+use pbs_api_types::Authid;
 use pbs_datastore::backup_info::{BackupDir, BackupInfo};
 use pbs_datastore::dynamic_index::DynamicIndexWriter;
 use pbs_datastore::fixed_index::FixedIndexWriter;
-use pbs_api_types::Authid;
-use proxmox_rest_server::{WorkerTask, formatter::*};
+use pbs_datastore::{DataBlob, DataStore};
+use proxmox_rest_server::{formatter::*, WorkerTask};
 
 use crate::backup::verify_backup_dir_with_lock;
 
@@ -72,7 +72,7 @@ struct FixedWriterState {
 }
 
 // key=digest, value=length
-type KnownChunksMap = HashMap<[u8;32], u32>;
+type KnownChunksMap = HashMap<[u8; 32], u32>;
 
 struct SharedBackupState {
     finished: bool,
@@ -86,7 +86,6 @@ struct SharedBackupState {
 }
 
 impl SharedBackupState {
-
     // Raise error if finished flag is set
     fn ensure_unfinished(&self) -> Result<(), Error> {
         if self.finished {
@@ -102,7 +101,6 @@ impl SharedBackupState {
     }
 }
 
-
 /// `RpcEnvironmet` implementation for backup service
 #[derive(Clone)]
 pub struct BackupEnvironment {
@@ -115,7 +113,7 @@ pub struct BackupEnvironment {
     pub datastore: Arc<DataStore>,
     pub backup_dir: BackupDir,
     pub last_backup: Option<BackupInfo>,
-    state: Arc<Mutex<SharedBackupState>>
+    state: Arc<Mutex<SharedBackupState>>,
 }
 
 impl BackupEnvironment {
@@ -126,7 +124,6 @@ impl BackupEnvironment {
         datastore: Arc<DataStore>,
         backup_dir: BackupDir,
     ) -> Self {
-
         let state = SharedBackupState {
             finished: false,
             uid_counter: 0,
@@ -188,13 +185,21 @@ impl BackupEnvironment {
         };
 
         if size > data.chunk_size {
-            bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
+            bail!(
+                "fixed writer '{}' - got large chunk ({} > {}",
+                data.name,
+                size,
+                data.chunk_size
+            );
         }
 
         if size < data.chunk_size {
             data.small_chunk_count += 1;
             if data.small_chunk_count > 1 {
-                bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)", wid);
+                bail!(
+                    "fixed writer '{}' - detected multiple end chunks (chunk size too small)",
+                    wid
+                );
             }
         }
 
@@ -202,7 +207,9 @@ impl BackupEnvironment {
         data.upload_stat.count += 1;
         data.upload_stat.size += size as u64;
         data.upload_stat.compressed_size += compressed_size as u64;
-        if is_duplicate { data.upload_stat.duplicates += 1; }
+        if is_duplicate {
+            data.upload_stat.duplicates += 1;
+        }
 
         // register chunk
         state.known_chunks.insert(digest, size);
@@ -235,7 +242,9 @@ impl BackupEnvironment {
         data.upload_stat.count += 1;
         data.upload_stat.size += size as u64;
         data.upload_stat.compressed_size += compressed_size as u64;
-        if is_duplicate { data.upload_stat.duplicates += 1; }
+        if is_duplicate {
+            data.upload_stat.duplicates += 1;
+        }
 
         // register chunk
         state.known_chunks.insert(digest, size);
@@ -250,37 +259,71 @@ impl BackupEnvironment {
     }
 
     /// Store the writer with an unique ID
-    pub fn register_dynamic_writer(&self, index: DynamicIndexWriter, name: String) -> Result<usize, Error> {
+    pub fn register_dynamic_writer(
+        &self,
+        index: DynamicIndexWriter,
+        name: String,
+    ) -> Result<usize, Error> {
         let mut state = self.state.lock().unwrap();
 
         state.ensure_unfinished()?;
 
         let uid = state.next_uid();
 
-        state.dynamic_writers.insert(uid, DynamicWriterState {
-            index, name, offset: 0, chunk_count: 0, upload_stat: UploadStatistic::new(),
-        });
+        state.dynamic_writers.insert(
+            uid,
+            DynamicWriterState {
+                index,
+                name,
+                offset: 0,
+                chunk_count: 0,
+                upload_stat: UploadStatistic::new(),
+            },
+        );
 
         Ok(uid)
     }
 
     /// Store the writer with an unique ID
-    pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> {
+    pub fn register_fixed_writer(
+        &self,
+        index: FixedIndexWriter,
+        name: String,
+        size: usize,
+        chunk_size: u32,
+        incremental: bool,
+    ) -> Result<usize, Error> {
         let mut state = self.state.lock().unwrap();
 
         state.ensure_unfinished()?;
 
         let uid = state.next_uid();
 
-        state.fixed_writers.insert(uid, FixedWriterState {
-            index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental,
-        });
+        state.fixed_writers.insert(
+            uid,
+            FixedWriterState {
+                index,
+                name,
+                chunk_count: 0,
+                size,
+                chunk_size,
+                small_chunk_count: 0,
+                upload_stat: UploadStatistic::new(),
+                incremental,
+            },
+        );
 
         Ok(uid)
     }
 
     /// Append chunk to dynamic writer
-    pub fn dynamic_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
+    pub fn dynamic_writer_append_chunk(
+        &self,
+        wid: usize,
+        offset: u64,
+        size: u32,
+        digest: &[u8; 32],
+    ) -> Result<(), Error> {
         let mut state = self.state.lock().unwrap();
 
         state.ensure_unfinished()?;
@@ -290,10 +333,13 @@ impl BackupEnvironment {
             None => bail!("dynamic writer '{}' not registered", wid),
         };
 
-
         if data.offset != offset {
-            bail!("dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})",
-                  data.name, data.offset, offset);
+            bail!(
+                "dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})",
+                data.name,
+                data.offset,
+                offset
+            );
         }
 
         data.offset += size as u64;
@@ -305,7 +351,13 @@ impl BackupEnvironment {
     }
 
     /// Append chunk to fixed writer
-    pub fn fixed_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
+    pub fn fixed_writer_append_chunk(
+        &self,
+        wid: usize,
+        offset: u64,
+        size: u32,
+        digest: &[u8; 32],
+    ) -> Result<(), Error> {
         let mut state = self.state.lock().unwrap();
 
         state.ensure_unfinished()?;
@@ -325,7 +377,15 @@ impl BackupEnvironment {
         Ok(())
     }
 
-    fn log_upload_stat(&self, archive_name:  &str, csum: &[u8; 32], uuid: &[u8; 16], size: u64, chunk_count: u64, upload_stat: &UploadStatistic) {
+    fn log_upload_stat(
+        &self,
+        archive_name: &str,
+        csum: &[u8; 32],
+        uuid: &[u8; 16],
+        size: u64,
+        chunk_count: u64,
+        upload_stat: &UploadStatistic,
+    ) {
         self.log(format!("Upload statistics for '{}'", archive_name));
         self.log(format!("UUID: {}", hex::encode(uuid)));
         self.log(format!("Checksum: {}", hex::encode(csum)));
@@ -336,7 +396,11 @@ impl BackupEnvironment {
             return;
         }
 
-        self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
+        self.log(format!(
+            "Upload size: {} ({}%)",
+            upload_stat.size,
+            (upload_stat.size * 100) / size
+        ));
 
         // account for zero chunk, which might be uploaded but never used
         let client_side_duplicates = if chunk_count < upload_stat.count {
@@ -348,17 +412,29 @@ impl BackupEnvironment {
         let server_side_duplicates = upload_stat.duplicates;
 
         if (client_side_duplicates + server_side_duplicates) > 0 {
-            let per = (client_side_duplicates + server_side_duplicates)*100/chunk_count;
-            self.log(format!("Duplicates: {}+{} ({}%)", client_side_duplicates, server_side_duplicates, per));
+            let per = (client_side_duplicates + server_side_duplicates) * 100 / chunk_count;
+            self.log(format!(
+                "Duplicates: {}+{} ({}%)",
+                client_side_duplicates, server_side_duplicates, per
+            ));
         }
 
         if upload_stat.size > 0 {
-            self.log(format!("Compression: {}%", (upload_stat.compressed_size*100)/upload_stat.size));
+            self.log(format!(
+                "Compression: {}%",
+                (upload_stat.compressed_size * 100) / upload_stat.size
+            ));
         }
     }
 
     /// Close dynamic writer
-    pub fn dynamic_writer_close(&self, wid: usize, chunk_count: u64, size: u64, csum: [u8; 32]) -> Result<(), Error> {
+    pub fn dynamic_writer_close(
+        &self,
+        wid: usize,
+        chunk_count: u64,
+        size: u64,
+        csum: [u8; 32],
+    ) -> Result<(), Error> {
         let mut state = self.state.lock().unwrap();
 
         state.ensure_unfinished()?;
@@ -369,11 +445,21 @@ impl BackupEnvironment {
         };
 
         if data.chunk_count != chunk_count {
-            bail!("dynamic writer '{}' close failed - unexpected chunk count ({} != {})", data.name, data.chunk_count, chunk_count);
+            bail!(
+                "dynamic writer '{}' close failed - unexpected chunk count ({} != {})",
+                data.name,
+                data.chunk_count,
+                chunk_count
+            );
         }
 
         if data.offset != size {
-            bail!("dynamic writer '{}' close failed - unexpected file size ({} != {})", data.name, data.offset, size);
+            bail!(
+                "dynamic writer '{}' close failed - unexpected file size ({} != {})",
+                data.name,
+                data.offset,
+                size
+            );
         }
 
         let uuid = data.index.uuid;
@@ -381,10 +467,20 @@ impl BackupEnvironment {
         let expected_csum = data.index.close()?;
 
         if csum != expected_csum {
-            bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name);
+            bail!(
+                "dynamic writer '{}' close failed - got unexpected checksum",
+                data.name
+            );
         }
 
-        self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat);
+        self.log_upload_stat(
+            &data.name,
+            &csum,
+            &uuid,
+            size,
+            chunk_count,
+            &data.upload_stat,
+        );
 
         state.file_counter += 1;
         state.backup_size += size;
@@ -394,7 +490,13 @@ impl BackupEnvironment {
     }
 
     /// Close fixed writer
-    pub fn fixed_writer_close(&self, wid: usize, chunk_count: u64, size: u64, csum: [u8; 32]) -> Result<(), Error> {
+    pub fn fixed_writer_close(
+        &self,
+        wid: usize,
+        chunk_count: u64,
+        size: u64,
+        csum: [u8; 32],
+    ) -> Result<(), Error> {
         let mut state = self.state.lock().unwrap();
 
         state.ensure_unfinished()?;
@@ -405,18 +507,33 @@ impl BackupEnvironment {
         };
 
         if data.chunk_count != chunk_count {
-            bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
+            bail!(
+                "fixed writer '{}' close failed - received wrong number of chunk ({} != {})",
+                data.name,
+                data.chunk_count,
+                chunk_count
+            );
         }
 
         if !data.incremental {
             let expected_count = data.index.index_length();
 
             if chunk_count != (expected_count as u64) {
-                bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
+                bail!(
+                    "fixed writer '{}' close failed - unexpected chunk count ({} != {})",
+                    data.name,
+                    expected_count,
+                    chunk_count
+                );
             }
 
             if size != (data.size as u64) {
-                bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
+                bail!(
+                    "fixed writer '{}' close failed - unexpected file size ({} != {})",
+                    data.name,
+                    data.size,
+                    size
+                );
             }
         }
 
@@ -424,10 +541,20 @@ impl BackupEnvironment {
         let expected_csum = data.index.close()?;
 
         if csum != expected_csum {
-            bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
+            bail!(
+                "fixed writer '{}' close failed - got unexpected checksum",
+                data.name
+            );
         }
 
-        self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat);
+        self.log_upload_stat(
+            &data.name,
+            &expected_csum,
+            &uuid,
+            size,
+            chunk_count,
+            &data.upload_stat,
+        );
 
         state.file_counter += 1;
         state.backup_size += size;
@@ -437,7 +564,6 @@ impl BackupEnvironment {
     }
 
     pub fn add_blob(&self, file_name: &str, data: Vec<u8>) -> Result<(), Error> {
-
         let mut path = self.datastore.base_path();
         path.push(self.backup_dir.relative_path());
         path.push(file_name);
@@ -451,7 +577,10 @@ impl BackupEnvironment {
         let raw_data = blob.raw_data();
         replace_file(&path, raw_data, CreateOptions::new(), false)?;
 
-        self.log(format!("add blob {:?} ({} bytes, comp: {})", path, orig_len, blob_len));
+        self.log(format!(
+            "add blob {:?} ({} bytes, comp: {})",
+            path, orig_len, blob_len
+        ));
 
         let mut state = self.state.lock().unwrap();
         state.file_counter += 1;
@@ -478,9 +607,11 @@ impl BackupEnvironment {
 
         // check for valid manifest and store stats
         let stats = serde_json::to_value(state.backup_stat)?;
-        self.datastore.update_manifest(&self.backup_dir, |manifest| {
-            manifest.unprotected["chunk_upload_stats"] = stats;
-        }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
+        self.datastore
+            .update_manifest(&self.backup_dir, |manifest| {
+                manifest.unprotected["chunk_upload_stats"] = stats;
+            })
+            .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
 
         if let Some(base) = &self.last_backup {
             let path = self.datastore.snapshot_path(&base.backup_dir);
@@ -509,11 +640,13 @@ impl BackupEnvironment {
             return Ok(());
         }
 
-        let worker_id = format!("{}:{}/{}/{:08X}",
+        let worker_id = format!(
+            "{}:{}/{}/{:08X}",
             self.datastore.name(),
             self.backup_dir.group().backup_type(),
             self.backup_dir.group().backup_id(),
-            self.backup_dir.backup_time());
+            self.backup_dir.backup_time()
+        );
 
         let datastore = self.datastore.clone();
         let backup_dir = self.backup_dir.clone();
@@ -526,7 +659,6 @@ impl BackupEnvironment {
             move |worker| {
                 worker.log_message("Automatically verifying newly added snapshot");
 
-
                 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
                 if !verify_backup_dir_with_lock(
                     &verify_worker,
@@ -540,7 +672,8 @@ impl BackupEnvironment {
 
                 Ok(())
             },
-        ).map(|_| ())
+        )
+        .map(|_| ())
     }
 
     pub fn log<S: AsRef<str>>(&self, msg: S) {
@@ -548,7 +681,9 @@ impl BackupEnvironment {
     }
 
     pub fn debug<S: AsRef<str>>(&self, msg: S) {
-        if self.debug { self.worker.log_message(msg); }
+        if self.debug {
+            self.worker.log_message(msg);
+        }
     }
 
     pub fn format_response(&self, result: Result<Value, Error>) -> Response<Body> {
@@ -582,7 +717,6 @@ impl BackupEnvironment {
 }
 
 impl RpcEnvironment for BackupEnvironment {
-
     fn result_attrib_mut(&mut self) -> &mut Value {
         &mut self.result_attributes
     }
index 9d62dc52bb120cfe399260ad557880fe0b5006a7..718d0386766c197640d02db05f0d33b60cd95bd1 100644 (file)
@@ -2,32 +2,32 @@
 
 use anyhow::{bail, format_err, Error};
 use futures::*;
+use hex::FromHex;
 use hyper::header::{HeaderValue, UPGRADE};
 use hyper::http::request::Parts;
-use hyper::{Body, Response, Request, StatusCode};
+use hyper::{Body, Request, Response, StatusCode};
 use serde_json::{json, Value};
-use hex::FromHex;
 
-use proxmox_sys::sortable;
 use proxmox_router::list_subdirs_api_method;
 use proxmox_router::{
-    ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, SubdirMap, Permission,
+    ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
 };
 use proxmox_schema::*;
+use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    Authid, Operation, VerifyState, SnapshotVerifyState,
-    BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
-    CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_BACKUP, BACKUP_ARCHIVE_NAME_SCHEMA,
+    Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
+    BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
+    DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
 };
-use proxmox_sys::fs::lock_dir_noblock_shared;
-use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
 use pbs_config::CachedUserInfo;
-use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
 use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{archive_type, ArchiveType};
-use proxmox_rest_server::{WorkerTask, H2Service};
+use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
+use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
+use proxmox_rest_server::{H2Service, WorkerTask};
+use proxmox_sys::fs::lock_dir_noblock_shared;
 
 mod environment;
 use environment::*;
@@ -35,8 +35,7 @@ use environment::*;
 mod upload_chunk;
 use upload_chunk::*;
 
-pub const ROUTER: Router = Router::new()
-    .upgrade(&API_METHOD_UPGRADE_BACKUP);
+pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
 
 #[sortable]
 pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
@@ -65,269 +64,296 @@ fn upgrade_to_backup_protocol(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
+    async move {
+        let debug = param["debug"].as_bool().unwrap_or(false);
+        let benchmark = param["benchmark"].as_bool().unwrap_or(false);
 
-async move {
-    let debug = param["debug"].as_bool().unwrap_or(false);
-    let benchmark = param["benchmark"].as_bool().unwrap_or(false);
+        let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
-    let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+        let store = required_string_param(&param, "store")?.to_owned();
 
-    let store = required_string_param(&param, "store")?.to_owned();
+        let user_info = CachedUserInfo::new()?;
+        user_info.check_privs(
+            &auth_id,
+            &["datastore", &store],
+            PRIV_DATASTORE_BACKUP,
+            false,
+        )?;
 
-    let user_info = CachedUserInfo::new()?;
-    user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
+        let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
 
-    let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
+        let backup_type = required_string_param(&param, "backup-type")?;
+        let backup_id = required_string_param(&param, "backup-id")?;
+        let backup_time = required_integer_param(&param, "backup-time")?;
 
-    let backup_type = required_string_param(&param, "backup-type")?;
-    let backup_id = required_string_param(&param, "backup-id")?;
-    let backup_time = required_integer_param(&param, "backup-time")?;
+        let protocols = parts
+            .headers
+            .get("UPGRADE")
+            .ok_or_else(|| format_err!("missing Upgrade header"))?
+            .to_str()?;
 
-    let protocols = parts
-        .headers
-        .get("UPGRADE")
-        .ok_or_else(|| format_err!("missing Upgrade header"))?
-        .to_str()?;
+        if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
+            bail!("invalid protocol name");
+        }
 
-    if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
-        bail!("invalid protocol name");
-    }
+        if parts.version >= http::version::Version::HTTP_2 {
+            bail!(
+                "unexpected http version '{:?}' (expected version < 2)",
+                parts.version
+            );
+        }
 
-    if parts.version >=  http::version::Version::HTTP_2 {
-        bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
-    }
+        let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
 
-    let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
+        let env_type = rpcenv.env_type();
 
-    let env_type = rpcenv.env_type();
+        let backup_group = BackupGroup::new(backup_type, backup_id);
 
-    let backup_group = BackupGroup::new(backup_type, backup_id);
+        let worker_type = if backup_type == "host" && backup_id == "benchmark" {
+            if !benchmark {
+                bail!("unable to run benchmark without --benchmark flags");
+            }
+            "benchmark"
+        } else {
+            if benchmark {
+                bail!("benchmark flags is only allowed on 'host/benchmark'");
+            }
+            "backup"
+        };
 
-    let worker_type = if backup_type == "host" && backup_id == "benchmark" {
-        if !benchmark {
-            bail!("unable to run benchmark without --benchmark flags");
-        }
-        "benchmark"
-    } else {
-        if benchmark {
-            bail!("benchmark flags is only allowed on 'host/benchmark'");
+        // lock backup group to only allow one backup per group at a time
+        let (owner, _group_guard) =
+            datastore.create_locked_backup_group(&backup_group, &auth_id)?;
+
+        // permission check
+        let correct_owner =
+            owner == auth_id || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
+        if !correct_owner && worker_type != "benchmark" {
+            // only the owner is allowed to create additional snapshots
+            bail!("backup owner check failed ({} != {})", auth_id, owner);
         }
-        "backup"
-    };
-
-    // lock backup group to only allow one backup per group at a time
-    let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
-
-    // permission check
-    let correct_owner = owner == auth_id
-        || (owner.is_token()
-            && Authid::from(owner.user().clone()) == auth_id);
-    if !correct_owner && worker_type != "benchmark" {
-        // only the owner is allowed to create additional snapshots
-        bail!("backup owner check failed ({} != {})", auth_id, owner);
-    }
 
-    let last_backup = {
-        let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
-        if let Some(info) = info {
-            let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
-            let verify = manifest.unprotected["verify_state"].clone();
-            match serde_json::from_value::<SnapshotVerifyState>(verify) {
-                Ok(verify) => {
-                    match verify.state {
+        let last_backup = {
+            let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true)
+                .unwrap_or(None);
+            if let Some(info) = info {
+                let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
+                let verify = manifest.unprotected["verify_state"].clone();
+                match serde_json::from_value::<SnapshotVerifyState>(verify) {
+                    Ok(verify) => match verify.state {
                         VerifyState::Ok => Some(info),
                         VerifyState::Failed => None,
+                    },
+                    Err(_) => {
+                        // no verify state found, treat as valid
+                        Some(info)
                     }
-                },
-                Err(_) => {
-                    // no verify state found, treat as valid
-                    Some(info)
                 }
+            } else {
+                None
             }
-        } else {
-            None
-        }
-    };
-
-    let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
-
-    let _last_guard = if let Some(last) = &last_backup {
-        if backup_dir.backup_time() <= last.backup_dir.backup_time() {
-            bail!("backup timestamp is older than last backup.");
-        }
-
-        // lock last snapshot to prevent forgetting/pruning it during backup
-        let full_path = datastore.snapshot_path(&last.backup_dir);
-        Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
-    } else {
-        None
-    };
-
-    let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
-    if !is_new { bail!("backup directory already exists."); }
-
-
-    WorkerTask::spawn(worker_type, Some(worker_id), auth_id.to_string(), true, move |worker| {
-        let mut env = BackupEnvironment::new(
-            env_type, auth_id, worker.clone(), datastore, backup_dir);
-
-        env.debug = debug;
-        env.last_backup = last_backup;
-
-        env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
-
-        let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
+        };
 
-        let abort_future = worker.abort_future();
+        let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
 
-        let env2 = env.clone();
+        let _last_guard = if let Some(last) = &last_backup {
+            if backup_dir.backup_time() <= last.backup_dir.backup_time() {
+                bail!("backup timestamp is older than last backup.");
+            }
 
-        let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
-            .map_err(Error::from)
-            .and_then(move |conn| {
-                env2.debug("protocol upgrade done");
+            // lock last snapshot to prevent forgetting/pruning it during backup
+            let full_path = datastore.snapshot_path(&last.backup_dir);
+            Some(lock_dir_noblock_shared(
+                &full_path,
+                "snapshot",
+                "base snapshot is already locked by another operation",
+            )?)
+        } else {
+            None
+        };
 
-                let mut http = hyper::server::conn::Http::new();
-                http.http2_only(true);
-                // increase window size: todo - find optiomal size
-                let window_size = 32*1024*1024; // max = (1 << 31) - 2
-                http.http2_initial_stream_window_size(window_size);
-                http.http2_initial_connection_window_size(window_size);
-                http.http2_max_frame_size(4*1024*1024);
+        let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
+        if !is_new {
+            bail!("backup directory already exists.");
+        }
 
-                let env3 = env2.clone();
-                http.serve_connection(conn, service)
-                    .map(move |result| {
-                        match result {
-                            Err(err) => {
-                                // Avoid  Transport endpoint is not connected (os error 107)
-                                // fixme: find a better way to test for that error
-                                if err.to_string().starts_with("connection error") && env3.finished() {
-                                    Ok(())
-                                } else {
-                                    Err(Error::from(err))
+        WorkerTask::spawn(
+            worker_type,
+            Some(worker_id),
+            auth_id.to_string(),
+            true,
+            move |worker| {
+                let mut env = BackupEnvironment::new(
+                    env_type,
+                    auth_id,
+                    worker.clone(),
+                    datastore,
+                    backup_dir,
+                );
+
+                env.debug = debug;
+                env.last_backup = last_backup;
+
+                env.log(format!(
+                    "starting new {} on datastore '{}': {:?}",
+                    worker_type, store, path
+                ));
+
+                let service =
+                    H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
+
+                let abort_future = worker.abort_future();
+
+                let env2 = env.clone();
+
+                let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
+                    .map_err(Error::from)
+                    .and_then(move |conn| {
+                        env2.debug("protocol upgrade done");
+
+                        let mut http = hyper::server::conn::Http::new();
+                        http.http2_only(true);
+                        // increase window size: todo - find optiomal size
+                        let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
+                        http.http2_initial_stream_window_size(window_size);
+                        http.http2_initial_connection_window_size(window_size);
+                        http.http2_max_frame_size(4 * 1024 * 1024);
+
+                        let env3 = env2.clone();
+                        http.serve_connection(conn, service).map(move |result| {
+                            match result {
+                                Err(err) => {
+                                    // Avoid  Transport endpoint is not connected (os error 107)
+                                    // fixme: find a better way to test for that error
+                                    if err.to_string().starts_with("connection error")
+                                        && env3.finished()
+                                    {
+                                        Ok(())
+                                    } else {
+                                        Err(Error::from(err))
+                                    }
                                 }
+                                Ok(()) => Ok(()),
                             }
-                            Ok(()) => Ok(()),
-                        }
-                    })
-            });
-        let mut abort_future = abort_future
-            .map(|_| Err(format_err!("task aborted")));
-
-        async move {
-            // keep flock until task ends
-            let _group_guard = _group_guard;
-            let snap_guard = snap_guard;
-            let _last_guard = _last_guard;
-
-            let res = select!{
-                req = req_fut => req,
-                abrt = abort_future => abrt,
-            };
-            if benchmark {
-                env.log("benchmark finished successfully");
-                proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
-                return Ok(());
-            }
+                        })
+                    });
+                let mut abort_future = abort_future.map(|_| Err(format_err!("task aborted")));
+
+                async move {
+                    // keep flock until task ends
+                    let _group_guard = _group_guard;
+                    let snap_guard = snap_guard;
+                    let _last_guard = _last_guard;
+
+                    let res = select! {
+                        req = req_fut => req,
+                        abrt = abort_future => abrt,
+                    };
+                    if benchmark {
+                        env.log("benchmark finished successfully");
+                        proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
+                        return Ok(());
+                    }
 
-            let verify = |env: BackupEnvironment| {
-                if let Err(err) = env.verify_after_complete(snap_guard) {
-                    env.log(format!(
+                    let verify = |env: BackupEnvironment| {
+                        if let Err(err) = env.verify_after_complete(snap_guard) {
+                            env.log(format!(
                         "backup finished, but starting the requested verify task failed: {}",
                         err
                     ));
-                }
-            };
-
-            match (res, env.ensure_finished()) {
-                (Ok(_), Ok(())) => {
-                    env.log("backup finished successfully");
-                    verify(env);
-                    Ok(())
-                },
-                (Err(err), Ok(())) => {
-                    // ignore errors after finish
-                    env.log(format!("backup had errors but finished: {}", err));
-                    verify(env);
-                    Ok(())
-                },
-                (Ok(_), Err(err)) => {
-                    env.log(format!("backup ended and finish failed: {}", err));
-                    env.log("removing unfinished backup");
-                    proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
-                    Err(err)
-                },
-                (Err(err), Err(_)) => {
-                    env.log(format!("backup failed: {}", err));
-                    env.log("removing failed backup");
-                    proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
-                    Err(err)
-                },
-            }
-        }
-    })?;
+                        }
+                    };
 
-    let response = Response::builder()
-        .status(StatusCode::SWITCHING_PROTOCOLS)
-        .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
-        .body(Body::empty())?;
+                    match (res, env.ensure_finished()) {
+                        (Ok(_), Ok(())) => {
+                            env.log("backup finished successfully");
+                            verify(env);
+                            Ok(())
+                        }
+                        (Err(err), Ok(())) => {
+                            // ignore errors after finish
+                            env.log(format!("backup had errors but finished: {}", err));
+                            verify(env);
+                            Ok(())
+                        }
+                        (Ok(_), Err(err)) => {
+                            env.log(format!("backup ended and finish failed: {}", err));
+                            env.log("removing unfinished backup");
+                            proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
+                            Err(err)
+                        }
+                        (Err(err), Err(_)) => {
+                            env.log(format!("backup failed: {}", err));
+                            env.log("removing failed backup");
+                            proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
+                            Err(err)
+                        }
+                    }
+                }
+            },
+        )?;
+
+        let response = Response::builder()
+            .status(StatusCode::SWITCHING_PROTOCOLS)
+            .header(
+                UPGRADE,
+                HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()),
+            )
+            .body(Body::empty())?;
 
-    Ok(response)
-    }.boxed()
+        Ok(response)
+    }
+    .boxed()
 }
 
 const BACKUP_API_SUBDIRS: SubdirMap = &[
+    ("blob", &Router::new().upload(&API_METHOD_UPLOAD_BLOB)),
     (
-        "blob", &Router::new()
-            .upload(&API_METHOD_UPLOAD_BLOB)
-    ),
-    (
-        "dynamic_chunk", &Router::new()
-            .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
+        "dynamic_chunk",
+        &Router::new().upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK),
     ),
     (
-        "dynamic_close", &Router::new()
-            .post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
+        "dynamic_close",
+        &Router::new().post(&API_METHOD_CLOSE_DYNAMIC_INDEX),
     ),
     (
-        "dynamic_index", &Router::new()
+        "dynamic_index",
+        &Router::new()
             .post(&API_METHOD_CREATE_DYNAMIC_INDEX)
-            .put(&API_METHOD_DYNAMIC_APPEND)
+            .put(&API_METHOD_DYNAMIC_APPEND),
     ),
     (
-        "finish", &Router::new()
-            .post(
-                &ApiMethod::new(
-                    &ApiHandler::Sync(&finish_backup),
-                    &ObjectSchema::new("Mark backup as finished.", &[])
-                )
-            )
+        "finish",
+        &Router::new().post(&ApiMethod::new(
+            &ApiHandler::Sync(&finish_backup),
+            &ObjectSchema::new("Mark backup as finished.", &[]),
+        )),
     ),
     (
-        "fixed_chunk", &Router::new()
-            .upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
+        "fixed_chunk",
+        &Router::new().upload(&API_METHOD_UPLOAD_FIXED_CHUNK),
     ),
     (
-        "fixed_close", &Router::new()
-            .post(&API_METHOD_CLOSE_FIXED_INDEX)
+        "fixed_close",
+        &Router::new().post(&API_METHOD_CLOSE_FIXED_INDEX),
     ),
     (
-        "fixed_index", &Router::new()
+        "fixed_index",
+        &Router::new()
             .post(&API_METHOD_CREATE_FIXED_INDEX)
-            .put(&API_METHOD_FIXED_APPEND)
+            .put(&API_METHOD_FIXED_APPEND),
     ),
     (
-        "previous", &Router::new()
-            .download(&API_METHOD_DOWNLOAD_PREVIOUS)
+        "previous",
+        &Router::new().download(&API_METHOD_DOWNLOAD_PREVIOUS),
     ),
     (
-        "previous_backup_time", &Router::new()
-            .get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
+        "previous_backup_time",
+        &Router::new().get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME),
     ),
     (
-        "speedtest", &Router::new()
-            .upload(&API_METHOD_UPLOAD_SPEEDTEST)
+        "speedtest",
+        &Router::new().upload(&API_METHOD_UPLOAD_SPEEDTEST),
     ),
 ];
 
@@ -340,10 +366,8 @@ pub const API_METHOD_CREATE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
     &ApiHandler::Sync(&create_dynamic_index),
     &ObjectSchema::new(
         "Create dynamic chunk index file.",
-        &sorted!([
-            ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
-        ]),
-    )
+        &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
+    ),
 );
 
 fn create_dynamic_index(
@@ -351,7 +375,6 @@ fn create_dynamic_index(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let env: &BackupEnvironment = rpcenv.as_ref();
 
     let name = required_string_param(&param, "archive-name")?.to_owned();
@@ -379,14 +402,22 @@ pub const API_METHOD_CREATE_FIXED_INDEX: ApiMethod = ApiMethod::new(
         "Create fixed chunk index file.",
         &sorted!([
             ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
-            ("size", false, &IntegerSchema::new("File size.")
-             .minimum(1)
-             .schema()
+            (
+                "size",
+                false,
+                &IntegerSchema::new("File size.").minimum(1).schema()
+            ),
+            (
+                "reuse-csum",
+                true,
+                &StringSchema::new(
+                    "If set, compare last backup's \
+                csum and reuse index for incremental backup if it matches."
+                )
+                .schema()
             ),
-            ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
-                csum and reuse index for incremental backup if it matches.").schema()),
         ]),
-    )
+    ),
 );
 
 fn create_fixed_index(
@@ -394,7 +425,6 @@ fn create_fixed_index(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let env: &BackupEnvironment = rpcenv.as_ref();
 
     let name = required_string_param(&param, "archive-name")?.to_owned();
@@ -409,7 +439,7 @@ fn create_fixed_index(
     let mut path = env.backup_dir.relative_path();
     path.push(&archive_name);
 
-    let chunk_size = 4096*1024; // todo: ??
+    let chunk_size = 4096 * 1024; // todo: ??
 
     // do incremental backup if csum is set
     let mut reader = None;
@@ -436,8 +466,11 @@ fn create_fixed_index(
         let (old_csum, _) = index.compute_csum();
         let old_csum = hex::encode(&old_csum);
         if old_csum != csum {
-            bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
-                csum, old_csum);
+            bail!(
+                "expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
+                csum,
+                old_csum
+            );
         }
 
         reader = Some(index);
@@ -483,24 +516,28 @@ pub const API_METHOD_DYNAMIC_APPEND: ApiMethod = ApiMethod::new(
                     &IntegerSchema::new("Corresponding chunk offsets.")
                         .minimum(0)
                         .schema()
-                ).schema()
+                )
+                .schema()
             ),
         ]),
-    )
+    ),
 );
 
-fn dynamic_append (
+fn dynamic_append(
     param: Value,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let wid = required_integer_param(&param, "wid")? as usize;
     let digest_list = required_array_param(&param, "digest-list")?;
     let offset_list = required_array_param(&param, "offset-list")?;
 
     if offset_list.len() != digest_list.len() {
-        bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
+        bail!(
+            "offset list has wrong length ({} != {})",
+            offset_list.len(),
+            digest_list.len()
+        );
     }
 
     let env: &BackupEnvironment = rpcenv.as_ref();
@@ -511,11 +548,16 @@ fn dynamic_append (
         let digest_str = item.as_str().unwrap();
         let digest = <[u8; 32]>::from_hex(digest_str)?;
         let offset = offset_list[i].as_u64().unwrap();
-        let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
+        let size = env
+            .lookup_chunk(&digest)
+            .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
 
         env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
 
-        env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
+        env.debug(format!(
+            "successfully added chunk {} to dynamic index {} (offset {}, size {})",
+            digest_str, wid, offset, size
+        ));
     }
 
     Ok(Value::Null)
@@ -548,24 +590,28 @@ pub const API_METHOD_FIXED_APPEND: ApiMethod = ApiMethod::new(
                     &IntegerSchema::new("Corresponding chunk offsets.")
                         .minimum(0)
                         .schema()
-                ).schema()
+                )
+                .schema()
             )
         ]),
-    )
+    ),
 );
 
-fn fixed_append (
+fn fixed_append(
     param: Value,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let wid = required_integer_param(&param, "wid")? as usize;
     let digest_list = required_array_param(&param, "digest-list")?;
     let offset_list = required_array_param(&param, "offset-list")?;
 
     if offset_list.len() != digest_list.len() {
-        bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
+        bail!(
+            "offset list has wrong length ({} != {})",
+            offset_list.len(),
+            digest_list.len()
+        );
     }
 
     let env: &BackupEnvironment = rpcenv.as_ref();
@@ -576,11 +622,16 @@ fn fixed_append (
         let digest_str = item.as_str().unwrap();
         let digest = <[u8; 32]>::from_hex(digest_str)?;
         let offset = offset_list[i].as_u64().unwrap();
-        let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
+        let size = env
+            .lookup_chunk(&digest)
+            .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
 
         env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
 
-        env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
+        env.debug(format!(
+            "successfully added chunk {} to fixed index {} (offset {}, size {})",
+            digest_str, wid, offset, size
+        ));
     }
 
     Ok(Value::Null)
@@ -603,28 +654,35 @@ pub const API_METHOD_CLOSE_DYNAMIC_INDEX: ApiMethod = ApiMethod::new(
             (
                 "chunk-count",
                 false,
-                &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
-                    .minimum(1)
-                    .schema()
+                &IntegerSchema::new(
+                    "Chunk count. This is used to verify that the server got all chunks."
+                )
+                .minimum(1)
+                .schema()
             ),
             (
                 "size",
                 false,
-                &IntegerSchema::new("File size. This is used to verify that the server got all data.")
-                    .minimum(1)
-                    .schema()
+                &IntegerSchema::new(
+                    "File size. This is used to verify that the server got all data."
+                )
+                .minimum(1)
+                .schema()
+            ),
+            (
+                "csum",
+                false,
+                &StringSchema::new("Digest list checksum.").schema()
             ),
-            ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
         ]),
-    )
+    ),
 );
 
-fn close_dynamic_index (
+fn close_dynamic_index(
     param: Value,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let wid = required_integer_param(&param, "wid")? as usize;
     let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
     let size = required_integer_param(&param, "size")? as u64;
@@ -673,12 +731,11 @@ pub const API_METHOD_CLOSE_FIXED_INDEX: ApiMethod = ApiMethod::new(
     )
 );
 
-fn close_fixed_index (
+fn close_fixed_index(
     param: Value,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let wid = required_integer_param(&param, "wid")? as usize;
     let chunk_count = required_integer_param(&param, "chunk-count")? as u64;
     let size = required_integer_param(&param, "size")? as u64;
@@ -694,12 +751,11 @@ fn close_fixed_index (
     Ok(Value::Null)
 }
 
-fn finish_backup (
+fn finish_backup(
     _param: Value,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let env: &BackupEnvironment = rpcenv.as_ref();
 
     env.finish_backup()?;
@@ -711,10 +767,7 @@ fn finish_backup (
 #[sortable]
 pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
     &ApiHandler::Sync(&get_previous_backup_time),
-    &ObjectSchema::new(
-        "Get previous backup time.",
-        &[],
-    )
+    &ObjectSchema::new("Get previous backup time.", &[]),
 );
 
 fn get_previous_backup_time(
@@ -722,10 +775,12 @@ fn get_previous_backup_time(
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let env: &BackupEnvironment = rpcenv.as_ref();
 
-    let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
+    let backup_time = env
+        .last_backup
+        .as_ref()
+        .map(|info| info.backup_dir.backup_time());
 
     Ok(json!(backup_time))
 }
@@ -735,10 +790,8 @@ pub const API_METHOD_DOWNLOAD_PREVIOUS: ApiMethod = ApiMethod::new(
     &ApiHandler::AsyncHttp(&download_previous),
     &ObjectSchema::new(
         "Download archive from previous backup.",
-        &sorted!([
-            ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)
-        ]),
-    )
+        &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)]),
+    ),
 );
 
 fn download_previous(
@@ -748,7 +801,6 @@ fn download_previous(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let env: &BackupEnvironment = rpcenv.as_ref();
 
@@ -772,10 +824,13 @@ fn download_previous(
                     let index = env.datastore.open_dynamic_reader(&path)?;
                     Some(Box::new(index))
                 }
-                _ => { None }
+                _ => None,
             };
             if let Some(index) = index {
-                env.log(format!("register chunks in '{}' from previous backup.", archive_name));
+                env.log(format!(
+                    "register chunks in '{}' from previous backup.",
+                    archive_name
+                ));
 
                 for pos in 0..index.index_count() {
                     let info = index.chunk_info(pos).unwrap();
@@ -787,5 +842,6 @@ fn download_previous(
 
         env.log(format!("download '{}' from previous backup.", archive_name));
         crate::api2::helpers::create_download_response(path).await
-    }.boxed()
+    }
+    .boxed()
 }
index 35f430ed658febbe755fa61b8367cfe03d06b962..a1b1061145e46e9366f3c98bf7421e97a60a98b4 100644 (file)
@@ -4,19 +4,19 @@ use std::task::{Context, Poll};
 
 use anyhow::{bail, format_err, Error};
 use futures::*;
-use hyper::Body;
+use hex::FromHex;
 use hyper::http::request::Parts;
+use hyper::Body;
 use serde_json::{json, Value};
-use hex::FromHex;
 
-use proxmox_sys::sortable;
-use proxmox_router::{ApiResponseFuture, ApiHandler, ApiMethod, RpcEnvironment};
+use proxmox_router::{ApiHandler, ApiMethod, ApiResponseFuture, RpcEnvironment};
 use proxmox_schema::*;
+use proxmox_sys::sortable;
 
-use pbs_datastore::{DataStore, DataBlob};
+use pbs_api_types::{BACKUP_ARCHIVE_NAME_SCHEMA, CHUNK_DIGEST_SCHEMA};
 use pbs_datastore::file_formats::{DataBlobHeader, EncryptedDataBlobHeader};
+use pbs_datastore::{DataBlob, DataStore};
 use pbs_tools::json::{required_integer_param, required_string_param};
-use pbs_api_types::{CHUNK_DIGEST_SCHEMA, BACKUP_ARCHIVE_NAME_SCHEMA};
 
 use super::environment::*;
 
@@ -30,8 +30,21 @@ pub struct UploadChunk {
 }
 
 impl UploadChunk {
-    pub fn new(stream: Body,  store: Arc<DataStore>, digest: [u8; 32], size: u32, encoded_size: u32) -> Self {
-        Self { stream, store, size, encoded_size, raw_data: Some(vec![]), digest }
+    pub fn new(
+        stream: Body,
+        store: Arc<DataStore>,
+        digest: [u8; 32],
+        size: u32,
+        encoded_size: u32,
+    ) -> Self {
+        Self {
+            stream,
+            store,
+            size,
+            encoded_size,
+            raw_data: Some(vec![]),
+            digest,
+        }
     }
 }
 
@@ -77,7 +90,12 @@ impl Future for UploadChunk {
                             Err(err) => break err,
                         };
 
-                        return Poll::Ready(Ok((this.digest, this.size, compressed_size as u32, is_duplicate)))
+                        return Poll::Ready(Ok((
+                            this.digest,
+                            this.size,
+                            compressed_size as u32,
+                            is_duplicate,
+                        )));
                     } else {
                         break format_err!("poll upload chunk stream failed - already finished.");
                     }
@@ -94,24 +112,36 @@ pub const API_METHOD_UPLOAD_FIXED_CHUNK: ApiMethod = ApiMethod::new(
     &ObjectSchema::new(
         "Upload a new chunk.",
         &sorted!([
-            ("wid", false, &IntegerSchema::new("Fixed writer ID.")
-             .minimum(1)
-             .maximum(256)
-             .schema()
+            (
+                "wid",
+                false,
+                &IntegerSchema::new("Fixed writer ID.")
+                    .minimum(1)
+                    .maximum(256)
+                    .schema()
             ),
             ("digest", false, &CHUNK_DIGEST_SCHEMA),
-            ("size", false, &IntegerSchema::new("Chunk size.")
-             .minimum(1)
-             .maximum(1024*1024*16)
-             .schema()
+            (
+                "size",
+                false,
+                &IntegerSchema::new("Chunk size.")
+                    .minimum(1)
+                    .maximum(1024 * 1024 * 16)
+                    .schema()
             ),
-            ("encoded-size", false, &IntegerSchema::new("Encoded chunk size.")
-             .minimum((std::mem::size_of::<DataBlobHeader>() as isize)+1)
-             .maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
-             .schema()
+            (
+                "encoded-size",
+                false,
+                &IntegerSchema::new("Encoded chunk size.")
+                    .minimum((std::mem::size_of::<DataBlobHeader>() as isize) + 1)
+                    .maximum(
+                        1024 * 1024 * 16
+                            + (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
+                    )
+                    .schema()
             ),
         ]),
-    )
+    ),
 );
 
 fn upload_fixed_chunk(
@@ -121,7 +151,6 @@ fn upload_fixed_chunk(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let wid = required_integer_param(&param, "wid")? as usize;
         let size = required_integer_param(&param, "size")? as u32;
@@ -152,24 +181,36 @@ pub const API_METHOD_UPLOAD_DYNAMIC_CHUNK: ApiMethod = ApiMethod::new(
     &ObjectSchema::new(
         "Upload a new chunk.",
         &sorted!([
-            ("wid", false, &IntegerSchema::new("Dynamic writer ID.")
-             .minimum(1)
-             .maximum(256)
-             .schema()
+            (
+                "wid",
+                false,
+                &IntegerSchema::new("Dynamic writer ID.")
+                    .minimum(1)
+                    .maximum(256)
+                    .schema()
             ),
             ("digest", false, &CHUNK_DIGEST_SCHEMA),
-            ("size", false, &IntegerSchema::new("Chunk size.")
-             .minimum(1)
-             .maximum(1024*1024*16)
-             .schema()
+            (
+                "size",
+                false,
+                &IntegerSchema::new("Chunk size.")
+                    .minimum(1)
+                    .maximum(1024 * 1024 * 16)
+                    .schema()
             ),
-            ("encoded-size", false, &IntegerSchema::new("Encoded chunk size.")
-             .minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
-             .maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
-             .schema()
+            (
+                "encoded-size",
+                false,
+                &IntegerSchema::new("Encoded chunk size.")
+                    .minimum((std::mem::size_of::<DataBlobHeader>() as isize) + 1)
+                    .maximum(
+                        1024 * 1024 * 16
+                            + (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
+                    )
+                    .schema()
             ),
         ]),
-    )
+    ),
 );
 
 fn upload_dynamic_chunk(
@@ -179,7 +220,6 @@ fn upload_dynamic_chunk(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let wid = required_integer_param(&param, "wid")? as usize;
         let size = required_integer_param(&param, "size")? as u32;
@@ -191,8 +231,7 @@ fn upload_dynamic_chunk(
         let env: &BackupEnvironment = rpcenv.as_ref();
 
         let (digest, size, compressed_size, is_duplicate) =
-            UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size)
-            .await?;
+            UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size).await?;
 
         env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
         let digest_str = hex::encode(&digest);
@@ -200,12 +239,13 @@ fn upload_dynamic_chunk(
 
         let result = Ok(json!(digest_str));
         Ok(env.format_response(result))
-    }.boxed()
+    }
+    .boxed()
 }
 
 pub const API_METHOD_UPLOAD_SPEEDTEST: ApiMethod = ApiMethod::new(
     &ApiHandler::AsyncHttp(&upload_speedtest),
-    &ObjectSchema::new("Test upload speed.", &[])
+    &ObjectSchema::new("Test upload speed.", &[]),
 );
 
 fn upload_speedtest(
@@ -215,9 +255,7 @@ fn upload_speedtest(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
-
         let result = req_body
             .map_err(Error::from)
             .try_fold(0, |size: usize, chunk| {
@@ -237,7 +275,8 @@ fn upload_speedtest(
         }
         let env: &BackupEnvironment = rpcenv.as_ref();
         Ok(env.format_response(Ok(Value::Null)))
-    }.boxed()
+    }
+    .boxed()
 }
 
 #[sortable]
@@ -247,13 +286,19 @@ pub const API_METHOD_UPLOAD_BLOB: ApiMethod = ApiMethod::new(
         "Upload binary blob file.",
         &sorted!([
             ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
-            ("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
-             .minimum(std::mem::size_of::<DataBlobHeader>() as isize)
-             .maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
-             .schema()
+            (
+                "encoded-size",
+                false,
+                &IntegerSchema::new("Encoded blob size.")
+                    .minimum(std::mem::size_of::<DataBlobHeader>() as isize)
+                    .maximum(
+                        1024 * 1024 * 16
+                            + (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
+                    )
+                    .schema()
             )
         ]),
-    )
+    ),
 );
 
 fn upload_blob(
@@ -263,7 +308,6 @@ fn upload_blob(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let file_name = required_string_param(&param, "file-name")?.to_owned();
         let encoded_size = required_integer_param(&param, "encoded-size")? as usize;
@@ -283,11 +327,16 @@ fn upload_blob(
             .await?;
 
         if encoded_size != data.len() {
-            bail!("got blob with unexpected length ({} != {})", encoded_size, data.len());
+            bail!(
+                "got blob with unexpected length ({} != {})",
+                encoded_size,
+                data.len()
+            );
         }
 
         env.add_blob(&file_name, data)?;
 
         Ok(env.format_response(Ok(Value::Null)))
-    }.boxed()
+    }
+    .boxed()
 }
index d76a28dc50dbfdb1c384d0a691bea1458f92201f..a813646c48a4ba30a5e87fdef3c7feb14503ed7c 100644 (file)
@@ -1,15 +1,12 @@
-use proxmox_router::{Router, SubdirMap};
 use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{Router, SubdirMap};
 use proxmox_sys::sortable;
 
-pub mod tfa;
 pub mod openid;
+pub mod tfa;
 
 #[sortable]
-const SUBDIRS: SubdirMap = &sorted!([
-    ("openid", &openid::ROUTER),
-    ("tfa", &tfa::ROUTER),
-]);
+const SUBDIRS: SubdirMap = &sorted!([("openid", &openid::ROUTER), ("tfa", &tfa::ROUTER),]);
 
 pub const ROUTER: Router = Router::new()
     .get(&list_subdirs_api_method!(SUBDIRS))
index 83112ce720592bcbdc2892979a22d876d3a318c4..eb6cb8cb57c2926b8624d69415b25260eccacbcc 100644 (file)
@@ -1,16 +1,15 @@
+use ::serde::{Deserialize, Serialize};
 /// Configure OpenId realms
-
 use anyhow::Error;
-use serde_json::Value;
-use ::serde::{Deserialize, Serialize};
 use hex::FromHex;
+use serde_json::Value;
 
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
 use pbs_api_types::{
-    OpenIdRealmConfig, OpenIdRealmConfigUpdater,
-    PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_REALM_ALLOCATE,
+    OpenIdRealmConfig, OpenIdRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
+    PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA,
 };
 
 use pbs_config::domains;
@@ -33,7 +32,6 @@ pub fn list_openid_realms(
     _param: Value,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<OpenIdRealmConfig>, Error> {
-
     let (config, digest) = domains::config()?;
 
     let list = config.convert_to_typed_array("openid")?;
@@ -59,14 +57,13 @@ pub fn list_openid_realms(
 )]
 /// Create a new OpenId realm
 pub fn create_openid_realm(config: OpenIdRealmConfig) -> Result<(), Error> {
-
     let _lock = domains::lock_config()?;
 
     let (mut domains, _digest) = domains::config()?;
 
-    if config.realm == "pbs" ||
-        config.realm == "pam" ||
-        domains.sections.get(&config.realm).is_some()
+    if config.realm == "pbs"
+        || config.realm == "pam"
+        || domains.sections.get(&config.realm).is_some()
     {
         param_bail!("realm", "realm '{}' already exists.", config.realm);
     }
@@ -101,7 +98,6 @@ pub fn delete_openid_realm(
     digest: Option<String>,
     _rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
-
     let _lock = domains::lock_config()?;
 
     let (mut domains, expected_digest) = domains::config()?;
@@ -111,7 +107,7 @@ pub fn delete_openid_realm(
         crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
     }
 
-    if domains.sections.remove(&realm).is_none()  {
+    if domains.sections.remove(&realm).is_none() {
         http_bail!(NOT_FOUND, "realm '{}' does not exist.", realm);
     }
 
@@ -138,7 +134,6 @@ pub fn read_openid_realm(
     realm: String,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<OpenIdRealmConfig, Error> {
-
     let (domains, digest) = domains::config()?;
 
     let config = domains.lookup("openid", &realm)?;
@@ -150,7 +145,7 @@ pub fn read_openid_realm(
 
 #[api()]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 #[allow(non_camel_case_types)]
 /// Deletable property name
 pub enum DeletableProperty {
@@ -206,7 +201,6 @@ pub fn update_openid_realm(
     digest: Option<String>,
     _rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
-
     let _lock = domains::lock_config()?;
 
     let (mut domains, expected_digest) = domains::config()?;
@@ -221,12 +215,24 @@ pub fn update_openid_realm(
     if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::client_key => { config.client_key = None; },
-                DeletableProperty::comment => { config.comment = None; },
-                DeletableProperty::autocreate => { config.autocreate = None; },
-                DeletableProperty::scopes => { config.scopes = None; },
-                DeletableProperty::prompt => { config.prompt = None; },
-                DeletableProperty::acr_values => { config.acr_values = None; },
+                DeletableProperty::client_key => {
+                    config.client_key = None;
+                }
+                DeletableProperty::comment => {
+                    config.comment = None;
+                }
+                DeletableProperty::autocreate => {
+                    config.autocreate = None;
+                }
+                DeletableProperty::scopes => {
+                    config.scopes = None;
+                }
+                DeletableProperty::prompt => {
+                    config.prompt = None;
+                }
+                DeletableProperty::acr_values => {
+                    config.acr_values = None;
+                }
             }
         }
     }
@@ -240,14 +246,28 @@ pub fn update_openid_realm(
         }
     }
 
-    if let Some(issuer_url) = update.issuer_url { config.issuer_url = issuer_url; }
-    if let Some(client_id) = update.client_id { config.client_id = client_id; }
+    if let Some(issuer_url) = update.issuer_url {
+        config.issuer_url = issuer_url;
+    }
+    if let Some(client_id) = update.client_id {
+        config.client_id = client_id;
+    }
 
-    if update.client_key.is_some() { config.client_key = update.client_key; }
-    if update.autocreate.is_some() { config.autocreate = update.autocreate; }
-    if update.scopes.is_some() { config.scopes = update.scopes; }
-    if update.prompt.is_some() { config.prompt = update.prompt; }
-    if update.acr_values.is_some() { config.acr_values = update.acr_values; }
+    if update.client_key.is_some() {
+        config.client_key = update.client_key;
+    }
+    if update.autocreate.is_some() {
+        config.autocreate = update.autocreate;
+    }
+    if update.scopes.is_some() {
+        config.scopes = update.scopes;
+    }
+    if update.prompt.is_some() {
+        config.prompt = update.prompt;
+    }
+    if update.acr_values.is_some() {
+        config.acr_values = update.acr_values;
+    }
 
     domains.set_data(&realm, "openid", &config)?;
 
index 9b586e002ebde90c80ebbd895fc2d4a862ff95d8..79540f022fc1a81bc730f21484b13362be88eeb1 100644 (file)
@@ -5,10 +5,10 @@ use std::sync::{Arc, Mutex};
 use std::time::SystemTime;
 
 use anyhow::{bail, format_err, Error};
+use hex::FromHex;
 use lazy_static::lazy_static;
 use serde::{Deserialize, Serialize};
 use serde_json::{json, Value};
-use hex::FromHex;
 
 use proxmox_router::{
     http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
index 4fc654a96c6268aa8f60596cfba61bbfca22629e..e00068578ce39731dcd3dfa7c28b14d990ecc857 100644 (file)
@@ -1,18 +1,17 @@
-use anyhow::Error;
 use ::serde::{Deserialize, Serialize};
-use serde_json::Value;
+use anyhow::Error;
 use hex::FromHex;
+use serde_json::Value;
 
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
 use pbs_api_types::{
-    Authid, ScsiTapeChanger, ScsiTapeChangerUpdater, LtoTapeDrive,
-    PROXMOX_CONFIG_DIGEST_SCHEMA, CHANGER_NAME_SCHEMA, SLOT_ARRAY_SCHEMA,
-    PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
+    Authid, LtoTapeDrive, ScsiTapeChanger, ScsiTapeChangerUpdater, CHANGER_NAME_SCHEMA,
+    PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, SLOT_ARRAY_SCHEMA,
 };
 use pbs_config::CachedUserInfo;
-use pbs_tape::linux_list_drives::{linux_tape_changer_list, check_drive_path};
+use pbs_tape::linux_list_drives::{check_drive_path, linux_tape_changer_list};
 
 #[api(
     protected: true,
@@ -30,7 +29,6 @@ use pbs_tape::linux_list_drives::{linux_tape_changer_list, check_drive_path};
 )]
 /// Create a new changer device
 pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> {
-
     let _lock = pbs_config::drive::lock()?;
 
     let (mut section_config, _digest) = pbs_config::drive::config()?;
@@ -47,7 +45,12 @@ pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> {
         }
 
         if changer.path == config.path {
-            param_bail!("path", "Path '{}' already in use by '{}'", config.path, changer.name);
+            param_bail!(
+                "path",
+                "Path '{}' already in use by '{}'",
+                config.path,
+                changer.name
+            );
         }
     }
 
@@ -79,7 +82,6 @@ pub fn get_config(
     _param: Value,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<ScsiTapeChanger, Error> {
-
     let (config, digest) = pbs_config::drive::config()?;
 
     let data: ScsiTapeChanger = config.lookup("changer", &name)?;
@@ -176,7 +178,6 @@ pub fn update_changer(
     digest: Option<String>,
     _param: Value,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::drive::lock()?;
 
     let (mut config, expected_digest) = pbs_config::drive::config()?;
@@ -244,7 +245,6 @@ pub fn update_changer(
 )]
 /// Delete a tape changer configuration
 pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
-
     let _lock = pbs_config::drive::lock()?;
 
     let (mut config, _digest) = pbs_config::drive::config()?;
@@ -252,18 +252,31 @@ pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
     match config.sections.get(&name) {
         Some((section_type, _)) => {
             if section_type != "changer" {
-                param_bail!("name", "Entry '{}' exists, but is not a changer device", name);
+                param_bail!(
+                    "name",
+                    "Entry '{}' exists, but is not a changer device",
+                    name
+                );
             }
             config.sections.remove(&name);
-        },
-        None => http_bail!(NOT_FOUND, "Delete changer '{}' failed - no such entry", name),
+        }
+        None => http_bail!(
+            NOT_FOUND,
+            "Delete changer '{}' failed - no such entry",
+            name
+        ),
     }
 
     let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
     for drive in drive_list {
         if let Some(changer) = drive.changer {
             if changer == name {
-                param_bail!("name", "Delete changer '{}' failed - used by drive '{}'", name, drive.name);
+                param_bail!(
+                    "name",
+                    "Delete changer '{}' failed - used by drive '{}'",
+                    name,
+                    drive.name
+                );
             }
         }
     }
@@ -278,7 +291,6 @@ const ITEM_ROUTER: Router = Router::new()
     .put(&API_METHOD_UPDATE_CHANGER)
     .delete(&API_METHOD_DELETE_CHANGER);
 
-
 pub const ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_CHANGERS)
     .post(&API_METHOD_CREATE_CHANGER)
index 686f337eda85e7920eaa0f226b7d2f30504046ad..09dda89fea1150d4284003a885db9358020d19a2 100644 (file)
@@ -1,31 +1,27 @@
 use std::path::PathBuf;
 
-use anyhow::Error;
-use serde_json::Value;
 use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
 use hex::FromHex;
+use serde_json::Value;
 
-use proxmox_router::{http_bail, Router, RpcEnvironment, RpcEnvironmentType, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::{api, param_bail, ApiType};
 use proxmox_section_config::SectionConfigData;
 use proxmox_sys::WorkerTaskContext;
 
-use pbs_datastore::chunk_store::ChunkStore;
-use pbs_config::BackupLockGuard;
 use pbs_api_types::{
-    Authid, DatastoreNotify,
-    DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
+    Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DATASTORE_SCHEMA,
     PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
-    DataStoreConfig, DataStoreConfigUpdater,
+    PROXMOX_CONFIG_DIGEST_SCHEMA,
 };
+use pbs_config::BackupLockGuard;
+use pbs_datastore::chunk_store::ChunkStore;
 
+use crate::api2::admin::{sync::list_sync_jobs, verify::list_verification_jobs};
 use crate::api2::config::sync::delete_sync_job;
+use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs};
 use crate::api2::config::verify::delete_verification_job;
-use crate::api2::config::tape_backup_job::{list_tape_backup_jobs, delete_tape_backup_job};
-use crate::api2::admin::{
-    sync::list_sync_jobs,
-    verify::list_verification_jobs,
-};
 use pbs_config::CachedUserInfo;
 
 use proxmox_rest_server::WorkerTask;
@@ -50,7 +46,6 @@ pub fn list_datastores(
     _param: Value,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<DataStoreConfig>, Error> {
-
     let (config, digest) = pbs_config::datastore::config()?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -58,7 +53,7 @@ pub fn list_datastores(
 
     rpcenv["digest"] = hex::encode(&digest).into();
 
-    let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
+    let list: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
     let filter_by_privs = |store: &DataStoreConfig| {
         let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
         (user_privs & PRIV_DATASTORE_AUDIT) != 0
@@ -76,7 +71,13 @@ pub(crate) fn do_create_datastore(
     let path: PathBuf = datastore.path.clone().into();
 
     let backup_user = pbs_config::backup_user()?;
-    let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid, worker)?;
+    let _store = ChunkStore::create(
+        &datastore.name,
+        path,
+        backup_user.uid,
+        backup_user.gid,
+        worker,
+    )?;
 
     config.set_data(&datastore.name, "datastore", &datastore)?;
 
@@ -107,7 +108,6 @@ pub fn create_datastore(
     config: DataStoreConfig,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
-
     let lock = pbs_config::datastore::lock_config()?;
 
     let (section_config, _digest) = pbs_config::datastore::config()?;
@@ -124,7 +124,7 @@ pub fn create_datastore(
         Some(config.name.to_string()),
         auth_id.to_string(),
         to_stdout,
-       move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
+        move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
     )
 }
 
@@ -156,7 +156,7 @@ pub fn read_datastore(
 
 #[api()]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 #[allow(non_camel_case_types)]
 /// Deletable property name
 pub enum DeletableProperty {
@@ -226,7 +226,6 @@ pub fn update_datastore(
     delete: Option<Vec<DeletableProperty>>,
     digest: Option<String>,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::datastore::lock_config()?;
 
     // pass/compare digest
@@ -239,23 +238,51 @@ pub fn update_datastore(
 
     let mut data: DataStoreConfig = config.lookup("datastore", &name)?;
 
-     if let Some(delete) = delete {
+    if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::comment => { data.comment = None; },
-                DeletableProperty::gc_schedule => { data.gc_schedule = None; },
-                DeletableProperty::prune_schedule => { data.prune_schedule = None; },
-                DeletableProperty::keep_last => { data.keep_last = None; },
-                DeletableProperty::keep_hourly => { data.keep_hourly = None; },
-                DeletableProperty::keep_daily => { data.keep_daily = None; },
-                DeletableProperty::keep_weekly => { data.keep_weekly = None; },
-                DeletableProperty::keep_monthly => { data.keep_monthly = None; },
-                DeletableProperty::keep_yearly => { data.keep_yearly = None; },
-                DeletableProperty::verify_new => { data.verify_new = None; },
-                DeletableProperty::notify => { data.notify = None; },
-                DeletableProperty::notify_user => { data.notify_user = None; },
-                DeletableProperty::tuning => { data.tuning = None; },
-                DeletableProperty::maintenance_mode => { data.maintenance_mode = None; },
+                DeletableProperty::comment => {
+                    data.comment = None;
+                }
+                DeletableProperty::gc_schedule => {
+                    data.gc_schedule = None;
+                }
+                DeletableProperty::prune_schedule => {
+                    data.prune_schedule = None;
+                }
+                DeletableProperty::keep_last => {
+                    data.keep_last = None;
+                }
+                DeletableProperty::keep_hourly => {
+                    data.keep_hourly = None;
+                }
+                DeletableProperty::keep_daily => {
+                    data.keep_daily = None;
+                }
+                DeletableProperty::keep_weekly => {
+                    data.keep_weekly = None;
+                }
+                DeletableProperty::keep_monthly => {
+                    data.keep_monthly = None;
+                }
+                DeletableProperty::keep_yearly => {
+                    data.keep_yearly = None;
+                }
+                DeletableProperty::verify_new => {
+                    data.verify_new = None;
+                }
+                DeletableProperty::notify => {
+                    data.notify = None;
+                }
+                DeletableProperty::notify_user => {
+                    data.notify_user = None;
+                }
+                DeletableProperty::tuning => {
+                    data.tuning = None;
+                }
+                DeletableProperty::maintenance_mode => {
+                    data.maintenance_mode = None;
+                }
             }
         }
     }
@@ -281,29 +308,54 @@ pub fn update_datastore(
         data.prune_schedule = update.prune_schedule;
     }
 
-    if update.keep_last.is_some() { data.keep_last = update.keep_last; }
-    if update.keep_hourly.is_some() { data.keep_hourly = update.keep_hourly; }
-    if update.keep_daily.is_some() { data.keep_daily = update.keep_daily; }
-    if update.keep_weekly.is_some() { data.keep_weekly = update.keep_weekly; }
-    if update.keep_monthly.is_some() { data.keep_monthly = update.keep_monthly; }
-    if update.keep_yearly.is_some() { data.keep_yearly = update.keep_yearly; }
+    if update.keep_last.is_some() {
+        data.keep_last = update.keep_last;
+    }
+    if update.keep_hourly.is_some() {
+        data.keep_hourly = update.keep_hourly;
+    }
+    if update.keep_daily.is_some() {
+        data.keep_daily = update.keep_daily;
+    }
+    if update.keep_weekly.is_some() {
+        data.keep_weekly = update.keep_weekly;
+    }
+    if update.keep_monthly.is_some() {
+        data.keep_monthly = update.keep_monthly;
+    }
+    if update.keep_yearly.is_some() {
+        data.keep_yearly = update.keep_yearly;
+    }
 
     if let Some(notify_str) = update.notify {
         let value = DatastoreNotify::API_SCHEMA.parse_property_string(&notify_str)?;
         let notify: DatastoreNotify = serde_json::from_value(value)?;
-        if let  DatastoreNotify { gc: None, verify: None, sync: None } = notify {
+        if let DatastoreNotify {
+            gc: None,
+            verify: None,
+            sync: None,
+        } = notify
+        {
             data.notify = None;
         } else {
             data.notify = Some(notify_str);
         }
     }
-    if update.verify_new.is_some() { data.verify_new = update.verify_new; }
+    if update.verify_new.is_some() {
+        data.verify_new = update.verify_new;
+    }
 
-    if update.notify_user.is_some() { data.notify_user = update.notify_user; }
+    if update.notify_user.is_some() {
+        data.notify_user = update.notify_user;
+    }
 
-    if update.tuning.is_some() { data.tuning = update.tuning; }
+    if update.tuning.is_some() {
+        data.tuning = update.tuning;
+    }
 
-    if update.maintenance_mode.is_some() { data.maintenance_mode = update.maintenance_mode; }
+    if update.maintenance_mode.is_some() {
+        data.maintenance_mode = update.maintenance_mode;
+    }
 
     config.set_data(&name, "datastore", &data)?;
 
@@ -352,7 +404,6 @@ pub async fn delete_datastore(
     digest: Option<String>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::datastore::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::datastore::config()?;
@@ -363,7 +414,9 @@ pub async fn delete_datastore(
     }
 
     match config.sections.get(&name) {
-        Some(_) => { config.sections.remove(&name); },
+        Some(_) => {
+            config.sections.remove(&name);
+        }
         None => http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name),
     }
 
@@ -376,7 +429,10 @@ pub async fn delete_datastore(
         }
 
         let tape_jobs = list_tape_backup_jobs(Value::Null, rpcenv)?;
-        for job_config in  tape_jobs.into_iter().filter(|config| config.setup.store == name) {
+        for job_config in tape_jobs
+            .into_iter()
+            .filter(|config| config.setup.store == name)
+        {
             delete_tape_backup_job(job_config.id, None, rpcenv)?;
         }
     }
index 370c5a940b88413e10aa2b02d6794429a672c69b..f24b7953ab4ce20f33cf9aee8b76309b779d616c 100644 (file)
@@ -1,18 +1,18 @@
-use anyhow::{format_err, Error};
 use ::serde::{Deserialize, Serialize};
-use serde_json::Value;
+use anyhow::{format_err, Error};
 use hex::FromHex;
+use serde_json::Value;
 
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
 use pbs_api_types::{
-    Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger,
-    PROXMOX_CONFIG_DIGEST_SCHEMA, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
+    Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT,
+    PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
 };
 use pbs_config::CachedUserInfo;
 
-use pbs_tape::linux_list_drives::{lto_tape_device_list, check_drive_path};
+use pbs_tape::linux_list_drives::{check_drive_path, lto_tape_device_list};
 
 #[api(
     protected: true,
@@ -30,7 +30,6 @@ use pbs_tape::linux_list_drives::{lto_tape_device_list, check_drive_path};
 )]
 /// Create a new drive
 pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> {
-
     let _lock = pbs_config::drive::lock()?;
 
     let (mut section_config, _digest) = pbs_config::drive::config()?;
@@ -46,7 +45,12 @@ pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> {
             param_bail!("name", "Entry '{}' already exists", config.name);
         }
         if drive.path == config.path {
-            param_bail!("path", "Path '{}' already used in drive '{}'", config.path, drive.name);
+            param_bail!(
+                "path",
+                "Path '{}' already used in drive '{}'",
+                config.path,
+                drive.name
+            );
         }
     }
 
@@ -78,7 +82,6 @@ pub fn get_config(
     _param: Value,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<LtoTapeDrive, Error> {
-
     let (config, digest) = pbs_config::drive::config()?;
 
     let data: LtoTapeDrive = config.lookup("lto", &name)?;
@@ -176,9 +179,8 @@ pub fn update_drive(
     update: LtoTapeDriveUpdater,
     delete: Option<Vec<DeletableProperty>>,
     digest: Option<String>,
-   _param: Value,
+    _param: Value,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::drive::lock()?;
 
     let (mut config, expected_digest) = pbs_config::drive::config()?;
@@ -196,8 +198,10 @@ pub fn update_drive(
                 DeletableProperty::changer => {
                     data.changer = None;
                     data.changer_drivenum = None;
-                },
-                DeletableProperty::changer_drivenum => { data.changer_drivenum = None; },
+                }
+                DeletableProperty::changer_drivenum => {
+                    data.changer_drivenum = None;
+                }
             }
         }
     }
@@ -218,7 +222,10 @@ pub fn update_drive(
             data.changer_drivenum = None;
         } else {
             if data.changer.is_none() {
-                param_bail!("changer", format_err!("Option 'changer-drivenum' requires option 'changer'."));
+                param_bail!(
+                    "changer",
+                    format_err!("Option 'changer-drivenum' requires option 'changer'.")
+                );
             }
             data.changer_drivenum = Some(changer_drivenum);
         }
@@ -246,7 +253,6 @@ pub fn update_drive(
 )]
 /// Delete a drive configuration
 pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
-
     let _lock = pbs_config::drive::lock()?;
 
     let (mut config, _digest) = pbs_config::drive::config()?;
@@ -254,10 +260,14 @@ pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
     match config.sections.get(&name) {
         Some((section_type, _)) => {
             if section_type != "lto" {
-                param_bail!("name", "Entry '{}' exists, but is not a lto tape drive", name);
+                param_bail!(
+                    "name",
+                    "Entry '{}' exists, but is not a lto tape drive",
+                    name
+                );
             }
             config.sections.remove(&name);
-        },
+        }
         None => http_bail!(NOT_FOUND, "Delete drive '{}' failed - no such drive", name),
     }
 
@@ -271,7 +281,6 @@ const ITEM_ROUTER: Router = Router::new()
     .put(&API_METHOD_UPDATE_DRIVE)
     .delete(&API_METHOD_DELETE_DRIVE);
 
-
 pub const ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_DRIVES)
     .post(&API_METHOD_CREATE_DRIVE)
index f350eaeafacc4b0e219bfdce2f0e32e8ee7c6f4d..93886f5154ebf8be32f4610353707874f1aadc6e 100644 (file)
@@ -1,12 +1,12 @@
-use anyhow::Error;
 use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
 
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
 use pbs_api_types::{
-    Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA,
-    PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
+    Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA, PRIV_TAPE_AUDIT,
+    PRIV_TAPE_MODIFY,
 };
 
 use pbs_config::CachedUserInfo;
@@ -26,10 +26,7 @@ use pbs_config::CachedUserInfo;
     },
 )]
 /// Create a new media pool
-pub fn create_pool(
-    config: MediaPoolConfig,
-) -> Result<(), Error> {
-
+pub fn create_pool(config: MediaPoolConfig) -> Result<(), Error> {
     let _lock = pbs_config::media_pool::lock()?;
 
     let (mut section_config, _digest) = pbs_config::media_pool::config()?;
@@ -59,9 +56,7 @@ pub fn create_pool(
     },
 )]
 /// List media pools
-pub fn list_pools(
-    mut rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Vec<MediaPoolConfig>, Error> {
+pub fn list_pools(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<MediaPoolConfig>, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let user_info = CachedUserInfo::new()?;
 
@@ -69,7 +64,7 @@ pub fn list_pools(
 
     let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?;
 
-     let list = list
+    let list = list
         .into_iter()
         .filter(|pool| {
             let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]);
@@ -99,7 +94,6 @@ pub fn list_pools(
 )]
 /// Get media pool configuration
 pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
-
     let (config, _digest) = pbs_config::media_pool::config()?;
 
     let data: MediaPoolConfig = config.lookup("pool", &name)?;
@@ -155,7 +149,6 @@ pub fn update_pool(
     update: MediaPoolConfigUpdater,
     delete: Option<Vec<DeletableProperty>>,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::media_pool::lock()?;
 
     let (mut config, _digest) = pbs_config::media_pool::config()?;
@@ -165,19 +158,37 @@ pub fn update_pool(
     if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::allocation => { data.allocation = None; },
-                DeletableProperty::retention => { data.retention = None; },
-                DeletableProperty::template => { data.template = None; },
-                DeletableProperty::encrypt => { data.encrypt = None; },
-                DeletableProperty::comment => { data.comment = None; },
+                DeletableProperty::allocation => {
+                    data.allocation = None;
+                }
+                DeletableProperty::retention => {
+                    data.retention = None;
+                }
+                DeletableProperty::template => {
+                    data.template = None;
+                }
+                DeletableProperty::encrypt => {
+                    data.encrypt = None;
+                }
+                DeletableProperty::comment => {
+                    data.comment = None;
+                }
             }
         }
     }
 
-    if update.allocation.is_some() { data.allocation = update.allocation; }
-    if update.retention.is_some() { data.retention = update.retention; }
-    if update.template.is_some() { data.template = update.template; }
-    if update.encrypt.is_some() { data.encrypt = update.encrypt; }
+    if update.allocation.is_some() {
+        data.allocation = update.allocation;
+    }
+    if update.retention.is_some() {
+        data.retention = update.retention;
+    }
+    if update.template.is_some() {
+        data.template = update.template;
+    }
+    if update.encrypt.is_some() {
+        data.encrypt = update.encrypt;
+    }
 
     if let Some(comment) = update.comment {
         let comment = comment.trim();
@@ -210,13 +221,14 @@ pub fn update_pool(
 )]
 /// Delete a media pool configuration
 pub fn delete_pool(name: String) -> Result<(), Error> {
-
     let _lock = pbs_config::media_pool::lock()?;
 
     let (mut config, _digest) = pbs_config::media_pool::config()?;
 
     match config.sections.get(&name) {
-        Some(_) => { config.sections.remove(&name); },
+        Some(_) => {
+            config.sections.remove(&name);
+        }
         None => http_bail!(NOT_FOUND, "delete pool '{}' failed - no such pool", name),
     }
 
@@ -230,7 +242,6 @@ const ITEM_ROUTER: Router = Router::new()
     .put(&API_METHOD_UPDATE_POOL)
     .delete(&API_METHOD_DELETE_POOL);
 
-
 pub const ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_POOLS)
     .post(&API_METHOD_CREATE_POOL)
index c256ba646ba8d46ce1e3addfd1ff01d818775fc6..f36474ed80978a08d66939efdee5fc56c1a0992e 100644 (file)
@@ -1,20 +1,20 @@
 //! Backup Server Configuration
 
-use proxmox_router::{Router, SubdirMap};
 use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{Router, SubdirMap};
 
 pub mod access;
 pub mod acme;
+pub mod changer;
 pub mod datastore;
-pub mod remote;
-pub mod sync;
-pub mod verify;
 pub mod drive;
-pub mod changer;
 pub mod media_pool;
-pub mod tape_encryption_keys;
+pub mod remote;
+pub mod sync;
 pub mod tape_backup_job;
+pub mod tape_encryption_keys;
 pub mod traffic_control;
+pub mod verify;
 
 const SUBDIRS: SubdirMap = &[
     ("access", &access::ROUTER),
index 12e35ba44bf89e10dd825e4b3ca7f78ac4f9d57b..8b41c12f92e0bc040ea1147af93d2f63c7386754 100644 (file)
@@ -1,20 +1,20 @@
+use ::serde::{Deserialize, Serialize};
 use anyhow::{bail, format_err, Error};
-use proxmox_sys::sortable;
-use proxmox_router::SubdirMap;
+use hex::FromHex;
 use proxmox_router::list_subdirs_api_method;
+use proxmox_router::SubdirMap;
+use proxmox_sys::sortable;
 use serde_json::Value;
-use ::serde::{Deserialize, Serialize};
-use hex::FromHex;
 
-use proxmox_router::{http_bail, http_err, ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, http_err, ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
-use pbs_client::{HttpClient, HttpClientOptions};
 use pbs_api_types::{
-    REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA, Remote, RemoteConfig, RemoteConfigUpdater,
-    Authid, PROXMOX_CONFIG_DIGEST_SCHEMA, DATASTORE_SCHEMA, GroupListItem,
-    DataStoreListItem, RateLimitConfig, SyncJobConfig, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY,
+    Authid, DataStoreListItem, GroupListItem, RateLimitConfig, Remote, RemoteConfig,
+    RemoteConfigUpdater, SyncJobConfig, DATASTORE_SCHEMA, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY,
+    PROXMOX_CONFIG_DIGEST_SCHEMA, REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA,
 };
+use pbs_client::{HttpClient, HttpClientOptions};
 use pbs_config::sync;
 
 use pbs_config::CachedUserInfo;
@@ -84,12 +84,7 @@ pub fn list_remotes(
     },
 )]
 /// Create new remote.
-pub fn create_remote(
-    name: String,
-    config: RemoteConfig,
-    password: String,
-) -> Result<(), Error> {
-
+pub fn create_remote(name: String, config: RemoteConfig, password: String) -> Result<(), Error> {
     let _lock = pbs_config::remote::lock_config()?;
 
     let (mut section_config, _digest) = pbs_config::remote::config()?;
@@ -98,7 +93,11 @@ pub fn create_remote(
         param_bail!("name", "remote '{}' already exists.", name);
     }
 
-    let remote = Remote { name: name.clone(), config, password };
+    let remote = Remote {
+        name: name.clone(),
+        config,
+        password,
+    };
 
     section_config.set_data(&name, "remote", &remote)?;
 
@@ -188,7 +187,6 @@ pub fn update_remote(
     delete: Option<Vec<DeletableProperty>>,
     digest: Option<String>,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::remote::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::remote::config()?;
@@ -203,9 +201,15 @@ pub fn update_remote(
     if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::comment => { data.config.comment = None; },
-                DeletableProperty::fingerprint => { data.config.fingerprint = None; },
-                DeletableProperty::port => { data.config.port = None; },
+                DeletableProperty::comment => {
+                    data.config.comment = None;
+                }
+                DeletableProperty::fingerprint => {
+                    data.config.fingerprint = None;
+                }
+                DeletableProperty::port => {
+                    data.config.port = None;
+                }
             }
         }
     }
@@ -218,12 +222,22 @@ pub fn update_remote(
             data.config.comment = Some(comment);
         }
     }
-    if let Some(host) = update.host { data.config.host = host; }
-    if update.port.is_some() { data.config.port = update.port; }
-    if let Some(auth_id) = update.auth_id { data.config.auth_id = auth_id; }
-    if let Some(password) = password { data.password = password; }
+    if let Some(host) = update.host {
+        data.config.host = host;
+    }
+    if update.port.is_some() {
+        data.config.port = update.port;
+    }
+    if let Some(auth_id) = update.auth_id {
+        data.config.auth_id = auth_id;
+    }
+    if let Some(password) = password {
+        data.password = password;
+    }
 
-    if update.fingerprint.is_some() { data.config.fingerprint = update.fingerprint; }
+    if update.fingerprint.is_some() {
+        data.config.fingerprint = update.fingerprint;
+    }
 
     config.set_data(&name, "remote", &data)?;
 
@@ -251,13 +265,18 @@ pub fn update_remote(
 )]
 /// Remove a remote from the configuration file.
 pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
-
     let (sync_jobs, _) = sync::config()?;
 
-    let job_list: Vec<SyncJobConfig>  = sync_jobs.convert_to_typed_array("sync")?;
+    let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
     for job in job_list {
         if job.remote == name {
-            param_bail!("name", "remote '{}' is used by sync job '{}' (datastore '{}')", name, job.id, job.store);
+            param_bail!(
+                "name",
+                "remote '{}' is used by sync job '{}' (datastore '{}')",
+                name,
+                job.id,
+                job.store
+            );
         }
     }
 
@@ -271,7 +290,9 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
     }
 
     match config.sections.get(&name) {
-        Some(_) => { config.sections.remove(&name); },
+        Some(_) => {
+            config.sections.remove(&name);
+        }
         None => http_bail!(NOT_FOUND, "remote '{}' does not exist.", name),
     }
 
@@ -285,7 +306,10 @@ pub async fn remote_client(
     remote: &Remote,
     limit: Option<RateLimitConfig>,
 ) -> Result<HttpClient, Error> {
-    let mut options = HttpClientOptions::new_non_interactive(remote.password.clone(), remote.config.fingerprint.clone());
+    let mut options = HttpClientOptions::new_non_interactive(
+        remote.password.clone(),
+        remote.config.fingerprint.clone(),
+    );
 
     if let Some(limit) = limit {
         options = options.rate_limit(limit);
@@ -295,15 +319,22 @@ pub async fn remote_client(
         &remote.config.host,
         remote.config.port.unwrap_or(8007),
         &remote.config.auth_id,
-        options)?;
-    let _auth_info = client.login() // make sure we can auth
+        options,
+    )?;
+    let _auth_info = client
+        .login() // make sure we can auth
         .await
-        .map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.config.host, err))?;
+        .map_err(|err| {
+            format_err!(
+                "remote connection to '{}' failed - {}",
+                remote.config.host,
+                err
+            )
+        })?;
 
     Ok(client)
 }
 
-
 #[api(
     input: {
         properties: {
@@ -327,15 +358,15 @@ pub async fn scan_remote_datastores(name: String) -> Result<Vec<DataStoreListIte
     let remote: Remote = remote_config.lookup("remote", &name)?;
 
     let map_remote_err = |api_err| {
-        http_err!(INTERNAL_SERVER_ERROR,
-                  "failed to scan remote '{}' - {}",
-                  &name,
-                  api_err)
+        http_err!(
+            INTERNAL_SERVER_ERROR,
+            "failed to scan remote '{}' - {}",
+            &name,
+            api_err
+        )
     };
 
-    let client = remote_client(&remote, None)
-        .await
-        .map_err(map_remote_err)?;
+    let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
     let api_res = client
         .get("api2/json/admin/datastore", None)
         .await
@@ -377,15 +408,15 @@ pub async fn scan_remote_groups(name: String, store: String) -> Result<Vec<Group
     let remote: Remote = remote_config.lookup("remote", &name)?;
 
     let map_remote_err = |api_err| {
-        http_err!(INTERNAL_SERVER_ERROR,
-                  "failed to scan remote '{}' - {}",
-                  &name,
-                  api_err)
+        http_err!(
+            INTERNAL_SERVER_ERROR,
+            "failed to scan remote '{}' - {}",
+            &name,
+            api_err
+        )
     };
 
-    let client = remote_client(&remote, None)
-        .await
-        .map_err(map_remote_err)?;
+    let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
     let api_res = client
         .get(&format!("api2/json/admin/datastore/{}/groups", store), None)
         .await
@@ -402,13 +433,8 @@ pub async fn scan_remote_groups(name: String, store: String) -> Result<Vec<Group
 }
 
 #[sortable]
-const DATASTORE_SCAN_SUBDIRS: SubdirMap = &[
-    (
-        "groups",
-        &Router::new()
-            .get(&API_METHOD_SCAN_REMOTE_GROUPS)
-    ),
-];
+const DATASTORE_SCAN_SUBDIRS: SubdirMap =
+    &[("groups", &Router::new().get(&API_METHOD_SCAN_REMOTE_GROUPS))];
 
 const DATASTORE_SCAN_ROUTER: Router = Router::new()
     .get(&list_subdirs_api_method!(DATASTORE_SCAN_SUBDIRS))
index ab4acf220deebd1b35eee2a99d5210f95a2319bb..946af5672ebc999b726a7885a2977b297b8e0bbd 100644 (file)
@@ -1,15 +1,15 @@
-use anyhow::{bail, Error};
-use serde_json::Value;
 use ::serde::{Deserialize, Serialize};
+use anyhow::{bail, Error};
 use hex::FromHex;
+use serde_json::Value;
 
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
 use pbs_api_types::{
-    Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
-    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
-    PRIV_REMOTE_AUDIT, PRIV_REMOTE_READ,
+    Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
+    PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
+    PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
 };
 use pbs_config::sync;
 
@@ -49,10 +49,8 @@ pub fn check_sync_job_modify_access(
     let correct_owner = match job.owner {
         Some(ref owner) => {
             owner == auth_id
-                || (owner.is_token()
-                    && !auth_id.is_token()
-                    && owner.user() == auth_id.user())
-        },
+                || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user())
+        }
         // default sync owner
         None => auth_id == Authid::root_auth_id(),
     };
@@ -98,7 +96,7 @@ pub fn list_sync_jobs(
         .into_iter()
         .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
         .collect();
-   Ok(list)
+    Ok(list)
 }
 
 #[api(
@@ -181,7 +179,7 @@ pub fn read_sync_job(
 
 #[api()]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 #[allow(non_camel_case_types)]
 /// Deletable property name
 pub enum DeletableProperty {
@@ -258,18 +256,36 @@ pub fn update_sync_job(
 
     let mut data: SyncJobConfig = config.lookup("sync", &id)?;
 
-     if let Some(delete) = delete {
+    if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::owner => { data.owner = None; },
-                DeletableProperty::comment => { data.comment = None; },
-                DeletableProperty::schedule => { data.schedule = None; },
-                DeletableProperty::remove_vanished => { data.remove_vanished = None; },
-                DeletableProperty::group_filter => { data.group_filter = None; },
-                DeletableProperty::rate_in => { data.limit.rate_in = None; },
-                DeletableProperty::rate_out => { data.limit.rate_out = None; },
-                DeletableProperty::burst_in => { data.limit.burst_in = None; },
-                DeletableProperty::burst_out => { data.limit.burst_out = None; },
+                DeletableProperty::owner => {
+                    data.owner = None;
+                }
+                DeletableProperty::comment => {
+                    data.comment = None;
+                }
+                DeletableProperty::schedule => {
+                    data.schedule = None;
+                }
+                DeletableProperty::remove_vanished => {
+                    data.remove_vanished = None;
+                }
+                DeletableProperty::group_filter => {
+                    data.group_filter = None;
+                }
+                DeletableProperty::rate_in => {
+                    data.limit.rate_in = None;
+                }
+                DeletableProperty::rate_out => {
+                    data.limit.rate_out = None;
+                }
+                DeletableProperty::burst_in => {
+                    data.limit.burst_in = None;
+                }
+                DeletableProperty::burst_out => {
+                    data.limit.burst_out = None;
+                }
             }
         }
     }
@@ -283,11 +299,21 @@ pub fn update_sync_job(
         }
     }
 
-    if let Some(store) = update.store { data.store = store; }
-    if let Some(remote) = update.remote { data.remote = remote; }
-    if let Some(remote_store) = update.remote_store { data.remote_store = remote_store; }
-    if let Some(owner) = update.owner { data.owner = Some(owner); }
-    if let Some(group_filter) = update.group_filter { data.group_filter = Some(group_filter); }
+    if let Some(store) = update.store {
+        data.store = store;
+    }
+    if let Some(remote) = update.remote {
+        data.remote = remote;
+    }
+    if let Some(remote_store) = update.remote_store {
+        data.remote_store = remote_store;
+    }
+    if let Some(owner) = update.owner {
+        data.owner = Some(owner);
+    }
+    if let Some(group_filter) = update.group_filter {
+        data.group_filter = Some(group_filter);
+    }
 
     if update.limit.rate_in.is_some() {
         data.limit.rate_in = update.limit.rate_in;
@@ -306,8 +332,12 @@ pub fn update_sync_job(
     }
 
     let schedule_changed = data.schedule != update.schedule;
-    if update.schedule.is_some() { data.schedule = update.schedule; }
-    if update.remove_vanished.is_some() { data.remove_vanished = update.remove_vanished; }
+    if update.schedule.is_some() {
+        data.schedule = update.schedule;
+    }
+    if update.remove_vanished.is_some() {
+        data.remove_vanished = update.remove_vanished;
+    }
 
     if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
         bail!("permission check failed");
@@ -366,8 +396,10 @@ pub fn delete_sync_job(
                 bail!("permission check failed");
             }
             config.sections.remove(&id);
-        },
-        Err(_) => { http_bail!(NOT_FOUND, "job '{}' does not exist.", id) },
+        }
+        Err(_) => {
+            http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
+        }
     };
 
     sync::save_config(&config)?;
@@ -387,25 +419,30 @@ pub const ROUTER: Router = Router::new()
     .post(&API_METHOD_CREATE_SYNC_JOB)
     .match_all("id", &ITEM_ROUTER);
 
-
 #[test]
 fn sync_job_access_test() -> Result<(), Error> {
-    let (user_cfg, _) = pbs_config::user::test_cfg_from_str(r###"
+    let (user_cfg, _) = pbs_config::user::test_cfg_from_str(
+        r###"
 user: noperm@pbs
 
 user: read@pbs
 
 user: write@pbs
 
-"###).expect("test user.cfg is not parsable");
-    let acl_tree = pbs_config::acl::AclTree::from_raw(r###"
+"###,
+    )
+    .expect("test user.cfg is not parsable");
+    let acl_tree = pbs_config::acl::AclTree::from_raw(
+        r###"
 acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
 acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
 acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
 acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
 acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
 acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
-"###).expect("test acl.cfg is not parsable");
+"###,
+    )
+    .expect("test acl.cfg is not parsable");
 
     let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
 
@@ -429,28 +466,52 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
     };
 
     // should work without ACLs
-    assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true);
-    assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true);
+    assert_eq!(
+        check_sync_job_read_access(&user_info, root_auth_id, &job),
+        true
+    );
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, root_auth_id, &job),
+        true
+    );
 
     // user without permissions must fail
-    assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
-    assert_eq!(check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_read_access(&user_info, &no_perm_auth_id, &job),
+        false
+    );
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job),
+        false
+    );
 
     // reading without proper read permissions on either remote or local must fail
-    assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_read_access(&user_info, &read_auth_id, &job),
+        false
+    );
 
     // reading without proper read permissions on local end must fail
     job.remote = "remote1".to_string();
-    assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_read_access(&user_info, &read_auth_id, &job),
+        false
+    );
 
     // reading without proper read permissions on remote end must fail
     job.remote = "remote0".to_string();
     job.store = "localstore1".to_string();
-    assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_read_access(&user_info, &read_auth_id, &job),
+        false
+    );
 
     // writing without proper write permissions on either end must fail
     job.store = "localstore0".to_string();
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        false
+    );
 
     // writing without proper write permissions on local end must fail
     job.remote = "remote1".to_string();
@@ -458,46 +519,85 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
     // writing without proper write permissions on remote end must fail
     job.remote = "remote0".to_string();
     job.store = "localstore1".to_string();
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        false
+    );
 
     // reset remote to one where users have access
     job.remote = "remote1".to_string();
 
     // user with read permission can only read, but not modify/run
-    assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), true);
+    assert_eq!(
+        check_sync_job_read_access(&user_info, &read_auth_id, &job),
+        true
+    );
     job.owner = Some(read_auth_id.clone());
-    assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &read_auth_id, &job),
+        false
+    );
     job.owner = None;
-    assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &read_auth_id, &job),
+        false
+    );
     job.owner = Some(write_auth_id.clone());
-    assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &read_auth_id, &job),
+        false
+    );
 
     // user with simple write permission can modify/run
-    assert_eq!(check_sync_job_read_access(&user_info, &write_auth_id, &job), true);
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+    assert_eq!(
+        check_sync_job_read_access(&user_info, &write_auth_id, &job),
+        true
+    );
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        true
+    );
 
     // but can't modify/run with deletion
     job.remove_vanished = Some(true);
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        false
+    );
 
     // unless they have Datastore.Prune as well
     job.store = "localstore2".to_string();
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        true
+    );
 
     // changing owner is not possible
     job.owner = Some(read_auth_id.clone());
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        false
+    );
 
     // also not to the default 'root@pam'
     job.owner = None;
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        false
+    );
 
     // unless they have Datastore.Modify as well
     job.store = "localstore3".to_string();
     job.owner = Some(read_auth_id);
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        true
+    );
     job.owner = None;
-    assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+    assert_eq!(
+        check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+        true
+    );
 
     Ok(())
 }
index bd14138d97ced44e7903310b58f3d8a66ee23486..80164b01ec6216551e834c26261e0cd8f61f7e51 100644 (file)
@@ -1,15 +1,14 @@
-use anyhow::Error;
-use serde_json::Value;
 use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
 use hex::FromHex;
+use serde_json::Value;
 
-use proxmox_router::{http_bail, ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
 use pbs_api_types::{
-    TrafficControlRule, TrafficControlRuleUpdater,
+    TrafficControlRule, TrafficControlRuleUpdater, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
     PROXMOX_CONFIG_DIGEST_SCHEMA, TRAFFIC_CONTROL_ID_SCHEMA,
-    PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
 };
 
 #[api(
@@ -56,13 +55,16 @@ pub fn list_traffic_controls(
 )]
 /// Create new traffic control rule.
 pub fn create_traffic_control(config: TrafficControlRule) -> Result<(), Error> {
-
     let _lock = pbs_config::traffic_control::lock_config()?;
 
     let (mut section_config, _digest) = pbs_config::traffic_control::config()?;
 
     if section_config.sections.get(&config.name).is_some() {
-        param_bail!("name", "traffic control rule '{}' already exists.", config.name);
+        param_bail!(
+            "name",
+            "traffic control rule '{}' already exists.",
+            config.name
+        );
     }
 
     section_config.set_data(&config.name, "rule", &config)?;
@@ -154,7 +156,6 @@ pub fn update_traffic_control(
     delete: Option<Vec<DeletableProperty>>,
     digest: Option<String>,
 ) -> Result<(), Error> {
-
     let _lock = pbs_config::traffic_control::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
@@ -169,12 +170,24 @@ pub fn update_traffic_control(
     if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::rate_in => { data.limit.rate_in = None; },
-                DeletableProperty::rate_out => { data.limit.rate_out = None; },
-                DeletableProperty::burst_in => { data.limit.burst_in = None; },
-                DeletableProperty::burst_out => { data.limit.burst_out = None; },
-                DeletableProperty::comment => { data.comment = None; },
-                DeletableProperty::timeframe => { data.timeframe = None; },
+                DeletableProperty::rate_in => {
+                    data.limit.rate_in = None;
+                }
+                DeletableProperty::rate_out => {
+                    data.limit.rate_out = None;
+                }
+                DeletableProperty::burst_in => {
+                    data.limit.burst_in = None;
+                }
+                DeletableProperty::burst_out => {
+                    data.limit.burst_out = None;
+                }
+                DeletableProperty::comment => {
+                    data.comment = None;
+                }
+                DeletableProperty::timeframe => {
+                    data.timeframe = None;
+                }
             }
         }
     }
@@ -204,8 +217,12 @@ pub fn update_traffic_control(
         data.limit.burst_out = update.limit.burst_out;
     }
 
-    if let Some(network) = update.network { data.network = network; }
-    if update.timeframe.is_some() { data.timeframe = update.timeframe; }
+    if let Some(network) = update.network {
+        data.network = network;
+    }
+    if update.timeframe.is_some() {
+        data.timeframe = update.timeframe;
+    }
 
     config.set_data(&name, "rule", &data)?;
 
@@ -233,7 +250,6 @@ pub fn update_traffic_control(
 )]
 /// Remove a traffic control rule from the configuration file.
 pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<(), Error> {
-
     let _lock = pbs_config::traffic_control::lock_config()?;
 
     let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
@@ -244,7 +260,9 @@ pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<()
     }
 
     match config.sections.get(&name) {
-        Some(_) => { config.sections.remove(&name); },
+        Some(_) => {
+            config.sections.remove(&name);
+        }
         None => http_bail!(NOT_FOUND, "traffic control rule '{}' does not exist.", name),
     }
 
@@ -253,7 +271,6 @@ pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<()
     Ok(())
 }
 
-
 const ITEM_ROUTER: Router = Router::new()
     .get(&API_METHOD_READ_TRAFFIC_CONTROL)
     .put(&API_METHOD_UPDATE_TRAFFIC_CONTROL)
index c0a7820f9f8872128c84011d780e090fa7ccb27c..3086ff253904d87813d3143c3a7f175e623891ea 100644 (file)
@@ -1,14 +1,14 @@
-use anyhow::Error;
-use serde_json::Value;
 use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
 use hex::FromHex;
+use serde_json::Value;
 
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
 use proxmox_schema::{api, param_bail};
 
 use pbs_api_types::{
     Authid, VerificationJobConfig, VerificationJobConfigUpdater, JOB_ID_SCHEMA,
-    PROXMOX_CONFIG_DIGEST_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY,
+    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
 };
 use pbs_config::verify;
 
@@ -42,19 +42,20 @@ pub fn list_verification_jobs(
 
     let list = config.convert_to_typed_array("verification")?;
 
-    let list = list.into_iter()
+    let list = list
+        .into_iter()
         .filter(|job: &VerificationJobConfig| {
             let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
 
             privs & required_privs != 00
-        }).collect();
+        })
+        .collect();
 
     rpcenv["digest"] = hex::encode(&digest).into();
 
     Ok(list)
 }
 
-
 #[api(
     protected: true,
     input: {
@@ -73,12 +74,17 @@ pub fn list_verification_jobs(
 /// Create a new verification job.
 pub fn create_verification_job(
     config: VerificationJobConfig,
-    rpcenv: &mut dyn RpcEnvironment
+    rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<(), Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let user_info = CachedUserInfo::new()?;
 
-    user_info.check_privs(&auth_id, &["datastore", &config.store], PRIV_DATASTORE_VERIFY, false)?;
+    user_info.check_privs(
+        &auth_id,
+        &["datastore", &config.store],
+        PRIV_DATASTORE_VERIFY,
+        false,
+    )?;
 
     let _lock = verify::lock_config()?;
 
@@ -124,7 +130,12 @@ pub fn read_verification_job(
     let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
 
     let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
-    user_info.check_privs(&auth_id, &["datastore", &verification_job.store], required_privs, true)?;
+    user_info.check_privs(
+        &auth_id,
+        &["datastore", &verification_job.store],
+        required_privs,
+        true,
+    )?;
 
     rpcenv["digest"] = hex::encode(&digest).into();
 
@@ -133,7 +144,7 @@ pub fn read_verification_job(
 
 #[api()]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 /// Deletable property name
 pub enum DeletableProperty {
     /// Delete the ignore verified property.
@@ -143,7 +154,7 @@ pub enum DeletableProperty {
     /// Delete the job schedule.
     Schedule,
     /// Delete outdated after property.
-    OutdatedAfter
+    OutdatedAfter,
 }
 
 #[api(
@@ -201,15 +212,28 @@ pub fn update_verification_job(
     let mut data: VerificationJobConfig = config.lookup("verification", &id)?;
 
     // check existing store
-    user_info.check_privs(&auth_id, &["datastore", &data.store], PRIV_DATASTORE_VERIFY, true)?;
+    user_info.check_privs(
+        &auth_id,
+        &["datastore", &data.store],
+        PRIV_DATASTORE_VERIFY,
+        true,
+    )?;
 
     if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::IgnoreVerified => { data.ignore_verified = None; },
-                DeletableProperty::OutdatedAfter => { data.outdated_after = None; },
-                DeletableProperty::Comment => { data.comment = None; },
-                DeletableProperty::Schedule => { data.schedule = None; },
+                DeletableProperty::IgnoreVerified => {
+                    data.ignore_verified = None;
+                }
+                DeletableProperty::OutdatedAfter => {
+                    data.outdated_after = None;
+                }
+                DeletableProperty::Comment => {
+                    data.comment = None;
+                }
+                DeletableProperty::Schedule => {
+                    data.schedule = None;
+                }
             }
         }
     }
@@ -225,15 +249,25 @@ pub fn update_verification_job(
 
     if let Some(store) = update.store {
         // check new store
-        user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_VERIFY, true)?;
+        user_info.check_privs(
+            &auth_id,
+            &["datastore", &store],
+            PRIV_DATASTORE_VERIFY,
+            true,
+        )?;
         data.store = store;
     }
 
-
-    if update.ignore_verified.is_some() { data.ignore_verified = update.ignore_verified; }
-    if update.outdated_after.is_some() { data.outdated_after = update.outdated_after; }
+    if update.ignore_verified.is_some() {
+        data.ignore_verified = update.ignore_verified;
+    }
+    if update.outdated_after.is_some() {
+        data.outdated_after = update.outdated_after;
+    }
     let schedule_changed = data.schedule != update.schedule;
-    if update.schedule.is_some() { data.schedule = update.schedule; }
+    if update.schedule.is_some() {
+        data.schedule = update.schedule;
+    }
 
     config.set_data(&id, "verification", &data)?;
 
@@ -278,7 +312,12 @@ pub fn delete_verification_job(
     let (mut config, expected_digest) = verify::config()?;
 
     let job: VerificationJobConfig = config.lookup("verification", &id)?;
-    user_info.check_privs(&auth_id, &["datastore", &job.store], PRIV_DATASTORE_VERIFY, true)?;
+    user_info.check_privs(
+        &auth_id,
+        &["datastore", &job.store],
+        PRIV_DATASTORE_VERIFY,
+        true,
+    )?;
 
     if let Some(ref digest) = digest {
         let digest = <[u8; 32]>::from_hex(digest)?;
@@ -286,7 +325,9 @@ pub fn delete_verification_job(
     }
 
     match config.sections.get(&id) {
-        Some(_) => { config.sections.remove(&id); },
+        Some(_) => {
+            config.sections.remove(&id);
+        }
         None => http_bail!(NOT_FOUND, "job '{}' does not exist.", id),
     }
 
index 750e4fac053976344c55bf4c224b92c6a396980b..3dc1befc1bbb50feaaae48b8738d4668acf033f1 100644 (file)
@@ -2,7 +2,7 @@ use std::path::PathBuf;
 
 use anyhow::Error;
 use futures::stream::TryStreamExt;
-use hyper::{Body, Response, StatusCode, header};
+use hyper::{header, Body, Response, StatusCode};
 
 use proxmox_router::http_bail;
 
index dd517df4a0c7c7a5ef75da564657cde86b3468e7..6e0a748ef730a41ca8050b41c3991efb153cf1c5 100644 (file)
@@ -4,15 +4,15 @@ pub mod access;
 pub mod admin;
 pub mod backup;
 pub mod config;
+pub mod helpers;
 pub mod node;
+pub mod ping;
+pub mod pull;
 pub mod reader;
 pub mod status;
+pub mod tape;
 pub mod types;
 pub mod version;
-pub mod ping;
-pub mod pull;
-pub mod tape;
-pub mod helpers;
 
 use proxmox_router::{list_subdirs_api_method, Router, SubdirMap};
 
index d9a29313ec06edf1eda9bccb6e3ad3c73fe047b5..99d155a723ed620999e14ce5f871c6ed958cb1b3 100644 (file)
@@ -1,12 +1,12 @@
-use anyhow::{Error, bail, format_err};
+use anyhow::{bail, format_err, Error};
 use serde_json::{json, Value};
 use std::collections::HashMap;
 
-use proxmox_sys::fs::{replace_file, CreateOptions};
 use proxmox_router::{
-    list_subdirs_api_method, RpcEnvironment, RpcEnvironmentType, Permission, Router, SubdirMap
+    list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
 };
 use proxmox_schema::api;
+use proxmox_sys::fs::{replace_file, CreateOptions};
 
 use proxmox_apt::repositories::{
     APTRepositoryFile, APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo,
@@ -15,17 +15,13 @@ use proxmox_apt::repositories::{
 use proxmox_http::ProxyConfig;
 
 use pbs_api_types::{
-    APTUpdateInfo, NODE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
-    PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+    APTUpdateInfo, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
+    UPID_SCHEMA,
 };
 
 use crate::config::node;
+use crate::tools::{apt, pbs_simple_http, subscription};
 use proxmox_rest_server::WorkerTask;
-use crate::tools::{
-    apt,
-    pbs_simple_http,
-    subscription,
-};
 
 #[api(
     input: {
@@ -49,7 +45,6 @@ use crate::tools::{
 )]
 /// List available APT updates
 fn apt_update_available(_param: Value) -> Result<Value, Error> {
-
     if let Ok(false) = apt::pkg_cache_expired() {
         if let Ok(Some(cache)) = apt::read_pkg_state() {
             return Ok(json!(cache.package_status));
@@ -62,7 +57,6 @@ fn apt_update_available(_param: Value) -> Result<Value, Error> {
 }
 
 pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> {
-
     const PROXY_CFG_FN: &str = "/etc/apt/apt.conf.d/76pveproxy"; // use same file as PVE
 
     if let Some(proxy_config) = proxy_config {
@@ -90,7 +84,9 @@ fn read_and_update_proxy_config() -> Result<Option<ProxyConfig>, Error> {
 }
 
 fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
-    if !quiet { worker.log_message("starting apt-get update") }
+    if !quiet {
+        worker.log_message("starting apt-get update")
+    }
 
     read_and_update_proxy_config()?;
 
@@ -98,7 +94,8 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
     command.arg("update");
 
     // apt "errors" quite easily, and run_command is a bit rigid, so handle this inline for now.
-    let output = command.output()
+    let output = command
+        .output()
         .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
 
     if !quiet {
@@ -109,7 +106,13 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
     if !output.status.success() {
         if output.status.code().is_some() {
             let msg = String::from_utf8(output.stderr)
-                .map(|m| if m.is_empty() { String::from("no error message") } else { m })
+                .map(|m| {
+                    if m.is_empty() {
+                        String::from("no error message")
+                    } else {
+                        m
+                    }
+                })
                 .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
             worker.log_warning(msg);
         } else {
@@ -154,7 +157,6 @@ pub fn apt_update_database(
     quiet: bool,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
-
     let auth_id = rpcenv.get_auth_id().unwrap();
     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
 
@@ -176,7 +178,7 @@ pub fn apt_update_database(
                         if notified_version != pkg.version {
                             to_notify.push(pkg);
                         }
-                    },
+                    }
                     None => to_notify.push(pkg),
                 }
             }
@@ -220,19 +222,17 @@ pub fn apt_update_database(
     },
 )]
 /// Retrieve the changelog of the specified package.
-fn apt_get_changelog(
-    param: Value,
-) -> Result<Value, Error> {
-
+fn apt_get_changelog(param: Value) -> Result<Value, Error> {
     let name = pbs_tools::json::required_string_param(&param, "name")?.to_owned();
     let version = param["version"].as_str();
 
-    let pkg_info = apt::list_installed_apt_packages(|data| {
-        match version {
+    let pkg_info = apt::list_installed_apt_packages(
+        |data| match version {
             Some(version) => version == data.active_version,
-            None => data.active_version == data.candidate_version
-        }
-    }, Some(&name));
+            None => data.active_version == data.candidate_version,
+        },
+        Some(&name),
+    );
 
     if pkg_info.is_empty() {
         bail!("Package '{}' not found", name);
@@ -245,33 +245,47 @@ fn apt_get_changelog(
     // FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
     if changelog_url.starts_with("http://download.proxmox.com/") {
         let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, None))
-            .map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
+            .map_err(|err| {
+                format_err!(
+                    "Error downloading changelog from '{}': {}",
+                    changelog_url,
+                    err
+                )
+            })?;
         Ok(json!(changelog))
-
     } else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
         let sub = match subscription::read_subscription()? {
             Some(sub) => sub,
-            None => bail!("cannot retrieve changelog from enterprise repo: no subscription info found")
+            None => {
+                bail!("cannot retrieve changelog from enterprise repo: no subscription info found")
+            }
         };
         let (key, id) = match sub.key {
-            Some(key) => {
-                match sub.serverid {
-                    Some(id) => (key, id),
-                    None =>
-                        bail!("cannot retrieve changelog from enterprise repo: no server id found")
-                }
+            Some(key) => match sub.serverid {
+                Some(id) => (key, id),
+                None => bail!("cannot retrieve changelog from enterprise repo: no server id found"),
             },
-            None => bail!("cannot retrieve changelog from enterprise repo: no subscription key found")
+            None => {
+                bail!("cannot retrieve changelog from enterprise repo: no subscription key found")
+            }
         };
 
         let mut auth_header = HashMap::new();
-        auth_header.insert("Authorization".to_owned(),
-            format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
+        auth_header.insert(
+            "Authorization".to_owned(),
+            format!("Basic {}", base64::encode(format!("{}:{}", key, id))),
+        );
 
-        let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
-            .map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
+        let changelog =
+            proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
+                .map_err(|err| {
+                    format_err!(
+                        "Error downloading changelog from '{}': {}",
+                        changelog_url,
+                        err
+                    )
+                })?;
         Ok(json!(changelog))
-
     } else {
         let mut command = std::process::Command::new("apt-get");
         command.arg("changelog");
@@ -348,23 +362,35 @@ pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> {
         "running kernel: {}",
         nix::sys::utsname::uname().release().to_owned()
     );
-    if let Some(proxmox_backup) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup") {
+    if let Some(proxmox_backup) = pbs_packages
+        .iter()
+        .find(|pkg| pkg.package == "proxmox-backup")
+    {
         let mut proxmox_backup = proxmox_backup.clone();
         proxmox_backup.extra_info = Some(running_kernel);
         packages.push(proxmox_backup);
     } else {
-        packages.push(unknown_package("proxmox-backup".into(), Some(running_kernel)));
+        packages.push(unknown_package(
+            "proxmox-backup".into(),
+            Some(running_kernel),
+        ));
     }
 
     let version = pbs_buildcfg::PROXMOX_PKG_VERSION;
     let release = pbs_buildcfg::PROXMOX_PKG_RELEASE;
     let daemon_version_info = Some(format!("running version: {}.{}", version, release));
-    if let Some(pkg) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup-server") {
+    if let Some(pkg) = pbs_packages
+        .iter()
+        .find(|pkg| pkg.package == "proxmox-backup-server")
+    {
         let mut pkg = pkg.clone();
         pkg.extra_info = daemon_version_info;
         packages.push(pkg);
     } else {
-        packages.push(unknown_package("proxmox-backup".into(), daemon_version_info));
+        packages.push(unknown_package(
+            "proxmox-backup".into(),
+            daemon_version_info,
+        ));
     }
 
     let mut kernel_pkgs: Vec<APTUpdateInfo> = pbs_packages
@@ -609,15 +635,22 @@ pub fn change_repository(
 }
 
 const SUBDIRS: SubdirMap = &[
-    ("changelog", &Router::new().get(&API_METHOD_APT_GET_CHANGELOG)),
-    ("repositories", &Router::new()
-        .get(&API_METHOD_GET_REPOSITORIES)
-        .post(&API_METHOD_CHANGE_REPOSITORY)
-        .put(&API_METHOD_ADD_REPOSITORY)
+    (
+        "changelog",
+        &Router::new().get(&API_METHOD_APT_GET_CHANGELOG),
+    ),
+    (
+        "repositories",
+        &Router::new()
+            .get(&API_METHOD_GET_REPOSITORIES)
+            .post(&API_METHOD_CHANGE_REPOSITORY)
+            .put(&API_METHOD_ADD_REPOSITORY),
     ),
-    ("update", &Router::new()
-        .get(&API_METHOD_APT_UPDATE_AVAILABLE)
-        .post(&API_METHOD_APT_UPDATE_DATABASE)
+    (
+        "update",
+        &Router::new()
+            .get(&API_METHOD_APT_UPDATE_AVAILABLE)
+            .post(&API_METHOD_APT_UPDATE_DATABASE),
     ),
     ("versions", &Router::new().get(&API_METHOD_GET_VERSIONS)),
 ];
index 9508ea381462272f9b6132bab71e6789a7c32273..e303973a5570b8cfeddd5c663357c96459edb27d 100644 (file)
@@ -7,9 +7,9 @@ use openssl::pkey::PKey;
 use openssl::x509::X509;
 use serde::{Deserialize, Serialize};
 
+use proxmox_router::list_subdirs_api_method;
 use proxmox_router::SubdirMap;
 use proxmox_router::{Permission, Router, RpcEnvironment};
-use proxmox_router::list_subdirs_api_method;
 use proxmox_schema::api;
 use proxmox_sys::{task_log, task_warn};
 
@@ -305,7 +305,10 @@ async fn order_certificate(
     };
 
     if domains.is_empty() {
-        task_log!(worker, "No domains configured to be ordered from an ACME server.");
+        task_log!(
+            worker,
+            "No domains configured to be ordered from an ACME server."
+        );
         return Ok(None);
     }
 
@@ -363,7 +366,9 @@ async fn order_certificate(
             task_warn!(
                 worker,
                 "Failed to teardown plugin '{}' for domain '{}' - {}",
-                plugin_id, domain, err
+                plugin_id,
+                domain,
+                err
             );
         }
 
@@ -453,7 +458,10 @@ async fn request_validation(
         let auth = acme.get_authorization(auth_url).await?;
         match auth.status {
             Status::Pending => {
-                task_log!(worker, "Status is still 'pending', trying again in 10 seconds");
+                task_log!(
+                    worker,
+                    "Status is still 'pending', trying again in 10 seconds"
+                );
                 tokio::time::sleep(Duration::from_secs(10)).await;
             }
             Status::Valid => return Ok(()),
@@ -574,7 +582,10 @@ pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error
             let mut acme = node_config.acme_client().await?;
             task_log!(worker, "Revoking old certificate");
             acme.revoke_certificate(cert_pem.as_bytes(), None).await?;
-            task_log!(worker, "Deleting certificate and regenerating a self-signed one");
+            task_log!(
+                worker,
+                "Deleting certificate and regenerating a self-signed one"
+            );
             delete_custom_certificate().await?;
             Ok(())
         },
index 113d7ed8458bced794f00b36b7e16b4d27f31889..64a2d1be97311725b1393dcdc0d685f8a794ca03 100644 (file)
@@ -1,5 +1,5 @@
-use anyhow::Error;
 use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
 use hex::FromHex;
 
 use proxmox_router::{Permission, Router, RpcEnvironment};
@@ -36,7 +36,7 @@ pub fn get_node_config(mut rpcenv: &mut dyn RpcEnvironment) -> Result<NodeConfig
 
 #[api()]
 #[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 #[allow(non_camel_case_types)]
 /// Deletable property name
 pub enum DeletableProperty {
@@ -57,10 +57,10 @@ pub enum DeletableProperty {
     /// Delete the email-from property.
     email_from,
     /// Delete the ciphers-tls-1.3 property.
-    #[serde(rename="ciphers-tls-1.3")]
+    #[serde(rename = "ciphers-tls-1.3")]
     ciphers_tls_1_3,
     /// Delete the ciphers-tls-1.2 property.
-    #[serde(rename="ciphers-tls-1.2")]
+    #[serde(rename = "ciphers-tls-1.2")]
     ciphers_tls_1_2,
     /// Delete the default-lang property.
     default_lang,
@@ -117,36 +117,88 @@ pub fn update_node_config(
     if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::acme => { config.acme = None; },
-                DeletableProperty::acmedomain0 => { config.acmedomain0 = None; },
-                DeletableProperty::acmedomain1 => { config.acmedomain1 = None; },
-                DeletableProperty::acmedomain2 => { config.acmedomain2 = None; },
-                DeletableProperty::acmedomain3 => { config.acmedomain3 = None; },
-                DeletableProperty::acmedomain4 => { config.acmedomain4 = None; },
-                DeletableProperty::http_proxy => { config.http_proxy = None; },
-                DeletableProperty::email_from => { config.email_from = None; },
-                DeletableProperty::ciphers_tls_1_3 => { config.ciphers_tls_1_3 = None; },
-                DeletableProperty::ciphers_tls_1_2 => { config.ciphers_tls_1_2 = None; },
-                DeletableProperty::default_lang => { config.default_lang = None; },
-                DeletableProperty::description => { config.description = None; },
-                DeletableProperty::task_log_max_days => { config.task_log_max_days = None; },
+                DeletableProperty::acme => {
+                    config.acme = None;
+                }
+                DeletableProperty::acmedomain0 => {
+                    config.acmedomain0 = None;
+                }
+                DeletableProperty::acmedomain1 => {
+                    config.acmedomain1 = None;
+                }
+                DeletableProperty::acmedomain2 => {
+                    config.acmedomain2 = None;
+                }
+                DeletableProperty::acmedomain3 => {
+                    config.acmedomain3 = None;
+                }
+                DeletableProperty::acmedomain4 => {
+                    config.acmedomain4 = None;
+                }
+                DeletableProperty::http_proxy => {
+                    config.http_proxy = None;
+                }
+                DeletableProperty::email_from => {
+                    config.email_from = None;
+                }
+                DeletableProperty::ciphers_tls_1_3 => {
+                    config.ciphers_tls_1_3 = None;
+                }
+                DeletableProperty::ciphers_tls_1_2 => {
+                    config.ciphers_tls_1_2 = None;
+                }
+                DeletableProperty::default_lang => {
+                    config.default_lang = None;
+                }
+                DeletableProperty::description => {
+                    config.description = None;
+                }
+                DeletableProperty::task_log_max_days => {
+                    config.task_log_max_days = None;
+                }
             }
         }
     }
 
-    if update.acme.is_some() { config.acme = update.acme; }
-    if update.acmedomain0.is_some() { config.acmedomain0 = update.acmedomain0; }
-    if update.acmedomain1.is_some() { config.acmedomain1 = update.acmedomain1; }
-    if update.acmedomain2.is_some() { config.acmedomain2 = update.acmedomain2; }
-    if update.acmedomain3.is_some() { config.acmedomain3 = update.acmedomain3; }
-    if update.acmedomain4.is_some() { config.acmedomain4 = update.acmedomain4; }
-    if update.http_proxy.is_some() { config.http_proxy = update.http_proxy; }
-    if update.email_from.is_some() { config.email_from = update.email_from; }
-    if update.ciphers_tls_1_3.is_some() { config.ciphers_tls_1_3 = update.ciphers_tls_1_3; }
-    if update.ciphers_tls_1_2.is_some() { config.ciphers_tls_1_2 = update.ciphers_tls_1_2; }
-    if update.default_lang.is_some() { config.default_lang = update.default_lang; }
-    if update.description.is_some() { config.description = update.description; }
-    if update.task_log_max_days.is_some() { config.task_log_max_days = update.task_log_max_days; }
+    if update.acme.is_some() {
+        config.acme = update.acme;
+    }
+    if update.acmedomain0.is_some() {
+        config.acmedomain0 = update.acmedomain0;
+    }
+    if update.acmedomain1.is_some() {
+        config.acmedomain1 = update.acmedomain1;
+    }
+    if update.acmedomain2.is_some() {
+        config.acmedomain2 = update.acmedomain2;
+    }
+    if update.acmedomain3.is_some() {
+        config.acmedomain3 = update.acmedomain3;
+    }
+    if update.acmedomain4.is_some() {
+        config.acmedomain4 = update.acmedomain4;
+    }
+    if update.http_proxy.is_some() {
+        config.http_proxy = update.http_proxy;
+    }
+    if update.email_from.is_some() {
+        config.email_from = update.email_from;
+    }
+    if update.ciphers_tls_1_3.is_some() {
+        config.ciphers_tls_1_3 = update.ciphers_tls_1_3;
+    }
+    if update.ciphers_tls_1_2.is_some() {
+        config.ciphers_tls_1_2 = update.ciphers_tls_1_2;
+    }
+    if update.default_lang.is_some() {
+        config.default_lang = update.default_lang;
+    }
+    if update.description.is_some() {
+        config.description = update.description;
+    }
+    if update.task_log_max_days.is_some() {
+        config.task_log_max_days = update.task_log_max_days;
+    }
 
     crate::config::node::save_config(&config)?;
 
index bf1a1be653b380a630e0c2ca6182ddc275564f4b..f4d85d0a24f840c3d48f2b6afcdbeddcfed6355e 100644 (file)
@@ -1,20 +1,20 @@
+use ::serde::{Deserialize, Serialize};
 use anyhow::{bail, Error};
 use serde_json::json;
-use ::serde::{Deserialize, Serialize};
 
-use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::api;
 use proxmox_section_config::SectionConfigData;
 use proxmox_sys::task_log;
 
 use pbs_api_types::{
-    DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
-    DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+    DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT,
+    PRIV_SYS_MODIFY, UPID_SCHEMA,
 };
 
 use crate::tools::disks::{
-    DiskManage, FileSystemType, DiskUsageType,
-    create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info,
+    create_file_system, create_single_linux_partition, get_disk_usage_info, get_fs_uuid,
+    DiskManage, DiskUsageType, FileSystemType,
 };
 use crate::tools::systemd::{self, types::*};
 
@@ -31,7 +31,7 @@ const BASE_MOUNT_DIR: &str = "/mnt/datastore/";
     },
 )]
 #[derive(Debug, Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
 /// Datastore mount info.
 pub struct DatastoreMountInfo {
     /// The path of the mount unit.
@@ -69,8 +69,7 @@ pub struct DatastoreMountInfo {
     },
 )]
 /// List systemd datastore mount units.
-pub fn  list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
-
+pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
     lazy_static::lazy_static! {
         static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
     }
@@ -144,7 +143,6 @@ pub fn create_datastore_disk(
     filesystem: Option<FileSystemType>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
-
     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
 
     let auth_id = rpcenv.get_auth_id().unwrap();
@@ -161,15 +159,18 @@ pub fn create_datastore_disk(
     let default_path = std::path::PathBuf::from(&mount_point);
 
     match std::fs::metadata(&default_path) {
-        Err(_) => {}, // path does not exist
+        Err(_) => {} // path does not exist
         Ok(_) => {
             bail!("path {:?} already exists", default_path);
         }
     }
 
     let upid_str = WorkerTask::new_thread(
-        "dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
-        {
+        "dircreate",
+        Some(name.clone()),
+        auth_id,
+        to_stdout,
+        move |worker| {
             task_log!(worker, "create datastore '{}' on disk {}", name, disk);
 
             let add_datastore = add_datastore.unwrap_or(false);
@@ -185,7 +186,8 @@ pub fn create_datastore_disk(
             let uuid = get_fs_uuid(&partition)?;
             let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
 
-            let mount_unit_name = create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?;
+            let mount_unit_name =
+                create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?;
 
             crate::tools::systemd::reload_daemon()?;
             crate::tools::systemd::enable_unit(&mount_unit_name)?;
@@ -202,11 +204,17 @@ pub fn create_datastore_disk(
                     bail!("datastore '{}' already exists.", datastore.name);
                 }
 
-                crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
+                crate::api2::config::datastore::do_create_datastore(
+                    lock,
+                    config,
+                    datastore,
+                    Some(&worker),
+                )?;
             }
 
             Ok(())
-        })?;
+        },
+    )?;
 
     Ok(upid_str)
 }
@@ -229,17 +237,19 @@ pub fn create_datastore_disk(
 )]
 /// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
 pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
-
     let path = format!("{}{}", BASE_MOUNT_DIR, name);
     // path of datastore cannot be changed
     let (config, _) = pbs_config::datastore::config()?;
     let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
-    let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
-        .find(|ds| ds.path == path);
+    let conflicting_datastore: Option<DataStoreConfig> =
+        datastores.into_iter().find(|ds| ds.path == path);
 
     if let Some(conflicting_datastore) = conflicting_datastore {
-        bail!("Can't remove '{}' since it's required by datastore '{}'",
-              conflicting_datastore.path, conflicting_datastore.name);
+        bail!(
+            "Can't remove '{}' since it's required by datastore '{}'",
+            conflicting_datastore.path,
+            conflicting_datastore.name
+        );
     }
 
     // disable systemd mount-unit
@@ -262,33 +272,33 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
              until the next reboot or until unmounted manually!",
             path
         ),
-        Ok(_) => Ok(())
+        Ok(_) => Ok(()),
     }
 }
 
-const ITEM_ROUTER: Router = Router::new()
-    .delete(&API_METHOD_DELETE_DATASTORE_DISK);
+const ITEM_ROUTER: Router = Router::new().delete(&API_METHOD_DELETE_DATASTORE_DISK);
 
 pub const ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_DATASTORE_MOUNTS)
     .post(&API_METHOD_CREATE_DATASTORE_DISK)
     .match_all("name", &ITEM_ROUTER);
 
-
 fn create_datastore_mount_unit(
     datastore_name: &str,
     mount_point: &str,
     fs_type: FileSystemType,
     what: &str,
 ) -> Result<String, Error> {
-
     let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true);
     mount_unit_name.push_str(".mount");
 
     let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
 
     let unit = SystemdUnitSection {
-        Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point),
+        Description: format!(
+            "Mount datatstore '{}' under '{}'",
+            datastore_name, mount_point
+        ),
         ..Default::default()
     };
 
index a542f9e0a49353b559b347d2a9bf5c2c43af051b..dac6f535113a09090cda2f8a6fcd8c3a17462ffa 100644 (file)
@@ -1,25 +1,22 @@
 use anyhow::{bail, Error};
 use serde_json::{json, Value};
 
-use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::api;
 use proxmox_sys::task_log;
 
 use pbs_api_types::{
-    ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig,
-    NODE_SCHEMA, ZPOOL_NAME_SCHEMA, DATASTORE_SCHEMA, DISK_ARRAY_SCHEMA,
-    DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA,
-    PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+    DataStoreConfig, ZfsCompressionType, ZfsRaidLevel, ZpoolListItem, DATASTORE_SCHEMA,
+    DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA,
+    ZFS_ASHIFT_SCHEMA, ZPOOL_NAME_SCHEMA,
 };
 
 use crate::tools::disks::{
-    zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
-    DiskUsageType,
+    parse_zpool_status_config_tree, vdev_list_to_tree, zpool_list, zpool_status, DiskUsageType,
 };
 
 use proxmox_rest_server::WorkerTask;
 
-
 #[api(
     protected: true,
     input: {
@@ -42,7 +39,6 @@ use proxmox_rest_server::WorkerTask;
 )]
 /// List zfs pools.
 pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
-
     let data = zpool_list(None, false)?;
 
     let mut list = Vec::new();
@@ -87,15 +83,12 @@ pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
     },
 )]
 /// Get zpool status details.
-pub fn zpool_details(
-    name: String,
-) -> Result<Value, Error> {
-
+pub fn zpool_details(name: String) -> Result<Value, Error> {
     let key_value_list = zpool_status(&name)?;
 
     let config = match key_value_list.iter().find(|(k, _)| k == "config") {
         Some((_, v)) => v,
-        None =>  bail!("got zpool status without config key"),
+        None => bail!("got zpool status without config key"),
     };
 
     let vdev_list = parse_zpool_status_config_tree(config)?;
@@ -107,11 +100,12 @@ pub fn zpool_details(
         }
     }
 
-    tree["name"] = tree.as_object_mut().unwrap()
+    tree["name"] = tree
+        .as_object_mut()
+        .unwrap()
         .remove("pool")
         .unwrap_or_else(|| name.into());
 
-
     Ok(tree)
 }
 
@@ -163,7 +157,6 @@ pub fn create_zpool(
     add_datastore: Option<bool>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
-
     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
 
     let auth_id = rpcenv.get_auth_id().unwrap();
@@ -174,8 +167,12 @@ pub fn create_zpool(
 
     let devices_text = devices.clone();
     let devices = DISK_ARRAY_SCHEMA.parse_property_string(&devices)?;
-    let devices: Vec<String> = devices.as_array().unwrap().iter()
-        .map(|v| v.as_str().unwrap().to_string()).collect();
+    let devices: Vec<String> = devices
+        .as_array()
+        .unwrap()
+        .iter()
+        .map(|v| v.as_str().unwrap().to_string())
+        .collect();
 
     let disk_map = crate::tools::disks::get_disks(None, true)?;
     for disk in devices.iter() {
@@ -220,20 +217,35 @@ pub fn create_zpool(
     let default_path = std::path::PathBuf::from(&mount_point);
 
     match std::fs::metadata(&default_path) {
-        Err(_) => {}, // path does not exist
+        Err(_) => {} // path does not exist
         Ok(_) => {
             bail!("path {:?} already exists", default_path);
         }
     }
 
-     let upid_str = WorkerTask::new_thread(
-        "zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
-        {
-            task_log!(worker, "create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text);
-
+    let upid_str = WorkerTask::new_thread(
+        "zfscreate",
+        Some(name.clone()),
+        auth_id,
+        to_stdout,
+        move |worker| {
+            task_log!(
+                worker,
+                "create {:?} zpool '{}' on devices '{}'",
+                raidlevel,
+                name,
+                devices_text
+            );
 
             let mut command = std::process::Command::new("zpool");
-            command.args(&["create", "-o", &format!("ashift={}", ashift), "-m", &mount_point, &name]);
+            command.args(&[
+                "create",
+                "-o",
+                &format!("ashift={}", ashift),
+                "-m",
+                &mount_point,
+                &name,
+            ]);
 
             match raidlevel {
                 ZfsRaidLevel::Single => {
@@ -244,10 +256,10 @@ pub fn create_zpool(
                     command.args(devices);
                 }
                 ZfsRaidLevel::Raid10 => {
-                     devices.chunks(2).for_each(|pair| {
-                         command.arg("mirror");
-                         command.args(pair);
-                     });
+                    devices.chunks(2).for_each(|pair| {
+                        command.arg("mirror");
+                        command.args(pair);
+                    });
                 }
                 ZfsRaidLevel::RaidZ => {
                     command.arg("raidz");
@@ -269,7 +281,10 @@ pub fn create_zpool(
             task_log!(worker, "{}", output);
 
             if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
-                let import_unit = format!("zfs-import@{}.service", proxmox_sys::systemd::escape_unit(&name, false));
+                let import_unit = format!(
+                    "zfs-import@{}.service",
+                    proxmox_sys::systemd::escape_unit(&name, false)
+                );
                 crate::tools::systemd::enable_unit(&import_unit)?;
             }
 
@@ -294,17 +309,22 @@ pub fn create_zpool(
                     bail!("datastore '{}' already exists.", datastore.name);
                 }
 
-                crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
+                crate::api2::config::datastore::do_create_datastore(
+                    lock,
+                    config,
+                    datastore,
+                    Some(&worker),
+                )?;
             }
 
             Ok(())
-        })?;
+        },
+    )?;
 
     Ok(upid_str)
 }
 
-pub const POOL_ROUTER: Router = Router::new()
-    .get(&API_METHOD_ZPOOL_DETAILS);
+pub const POOL_ROUTER: Router = Router::new().get(&API_METHOD_ZPOOL_DETAILS);
 
 pub const ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_ZPOOLS)
index 8e4d4adaa0a168c289bc22ccef5c92a0c2c85bbb..18cf680fa92d411acb1e1606a5490f37ab8269db 100644 (file)
@@ -1,21 +1,21 @@
 use std::sync::{Arc, Mutex};
 
-use anyhow::{Error};
+use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
 use lazy_static::lazy_static;
 use openssl::sha;
 use regex::Regex;
 use serde_json::{json, Value};
-use ::serde::{Deserialize, Serialize};
 
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use pbs_api_types::{IPRE, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
-use pbs_api_types::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
 
 use pbs_api_types::{
-    PROXMOX_CONFIG_DIGEST_SCHEMA, FIRST_DNS_SERVER_SCHEMA, SECOND_DNS_SERVER_SCHEMA,
-    THIRD_DNS_SERVER_SCHEMA, NODE_SCHEMA, SEARCH_DOMAIN_SCHEMA,
-    PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+    FIRST_DNS_SERVER_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+    PROXMOX_CONFIG_DIGEST_SCHEMA, SEARCH_DOMAIN_SCHEMA, SECOND_DNS_SERVER_SCHEMA,
+    THIRD_DNS_SERVER_SCHEMA,
 };
 
 static RESOLV_CONF_FN: &str = "/etc/resolv.conf";
@@ -34,7 +34,6 @@ pub enum DeletableProperty {
 }
 
 pub fn read_etc_resolv_conf() -> Result<Value, Error> {
-
     let mut result = json!({});
 
     let mut nscount = 0;
@@ -47,24 +46,27 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
 
     lazy_static! {
         static ref DOMAIN_REGEX: Regex = Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap();
-        static ref SERVER_REGEX: Regex = Regex::new(
-            concat!(r"^\s*nameserver\s+(", IPRE!(),  r")\s*")).unwrap();
+        static ref SERVER_REGEX: Regex =
+            Regex::new(concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
     }
 
     let mut options = String::new();
 
     for line in data.lines() {
-
         if let Some(caps) = DOMAIN_REGEX.captures(line) {
             result["search"] = Value::from(&caps[1]);
         } else if let Some(caps) = SERVER_REGEX.captures(line) {
             nscount += 1;
-            if nscount > 3 { continue };
+            if nscount > 3 {
+                continue;
+            };
             let nameserver = &caps[1];
             let id = format!("dns{}", nscount);
             result[id] = Value::from(nameserver);
         } else {
-            if !options.is_empty() { options.push('\n'); }
+            if !options.is_empty() {
+                options.push('\n');
+            }
             options.push_str(line);
         }
     }
@@ -127,7 +129,6 @@ pub fn update_dns(
     delete: Option<Vec<DeletableProperty>>,
     digest: Option<String>,
 ) -> Result<Value, Error> {
-
     lazy_static! {
         static ref MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
     }
@@ -145,17 +146,31 @@ pub fn update_dns(
         for delete_prop in delete {
             let config = config.as_object_mut().unwrap();
             match delete_prop {
-                DeletableProperty::dns1 => { config.remove("dns1"); },
-                DeletableProperty::dns2 => { config.remove("dns2"); },
-                DeletableProperty::dns3 => { config.remove("dns3"); },
+                DeletableProperty::dns1 => {
+                    config.remove("dns1");
+                }
+                DeletableProperty::dns2 => {
+                    config.remove("dns2");
+                }
+                DeletableProperty::dns3 => {
+                    config.remove("dns3");
+                }
             }
         }
     }
 
-    if let Some(search) = search { config["search"] = search.into(); }
-    if let Some(dns1) = dns1 { config["dns1"] = dns1.into(); }
-    if let Some(dns2) = dns2 { config["dns2"] = dns2.into(); }
-    if let Some(dns3) = dns3 { config["dns3"] = dns3.into(); }
+    if let Some(search) = search {
+        config["search"] = search.into();
+    }
+    if let Some(dns1) = dns1 {
+        config["dns1"] = dns1.into();
+    }
+    if let Some(dns2) = dns2 {
+        config["dns2"] = dns2.into();
+    }
+    if let Some(dns3) = dns3 {
+        config["dns3"] = dns3.into();
+    }
 
     let mut data = String::new();
 
@@ -219,7 +234,6 @@ pub fn get_dns(
     _info: &ApiMethod,
     _rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     read_etc_resolv_conf()
 }
 
index 8a4a832c0451a428815f293da40a3c873d091b31..1bedc8dc916de39b21821564ed409e26765be2b2 100644 (file)
@@ -1,10 +1,10 @@
 use std::process::{Command, Stdio};
 
-use anyhow::{Error};
+use anyhow::Error;
 use serde_json::{json, Value};
-use std::io::{BufRead,BufReader};
+use std::io::{BufRead, BufReader};
 
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
 use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT};
@@ -69,7 +69,6 @@ fn get_journal(
     _info: &ApiMethod,
     _rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let mut args = vec![];
 
     if let Some(lastentries) = lastentries {
@@ -127,5 +126,4 @@ fn get_journal(
     Ok(json!(lines))
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_GET_JOURNAL);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_JOURNAL);
index 8c39afc8c939e533615ae2441a2319a2a3a899ce..91f85f625ca2748061eb7a86b4c9563f01bae292 100644 (file)
@@ -12,23 +12,23 @@ use hyper::Request;
 use serde_json::{json, Value};
 use tokio::io::{AsyncBufReadExt, BufReader};
 
-use proxmox_sys::sortable;
 use proxmox_sys::fd::fd_change_cloexec;
+use proxmox_sys::sortable;
 
+use proxmox_http::websocket::WebSocket;
+use proxmox_router::list_subdirs_api_method;
 use proxmox_router::{
-    ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment, Router, SubdirMap,
+    ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
 };
 use proxmox_schema::*;
-use proxmox_router::list_subdirs_api_method;
-use proxmox_http::websocket::WebSocket;
 
 use proxmox_rest_server::WorkerTask;
 
 use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_CONSOLE};
 use pbs_tools::ticket::{self, Empty, Ticket};
 
-use crate::tools;
 use crate::auth_helpers::private_auth_key;
+use crate::tools;
 
 pub mod apt;
 pub mod certificates;
@@ -303,7 +303,7 @@ fn upgrade_to_websocket(
                 .map_err(Error::from)
                 .await
             {
-               Ok(upgraded) => upgraded,
+                Ok(upgraded) => upgraded,
                 _ => bail!("error"),
             };
 
index 4cb0d7b91d0f402d32011f0fd782a69108e00da5..fd75d4dee713c3036745a6055789993e5779a727 100644 (file)
@@ -1,16 +1,16 @@
-use anyhow::{Error, bail};
-use serde::{Deserialize, Serialize};
-use serde_json::{Value, to_value};
+use anyhow::{bail, Error};
 use hex::FromHex;
+use serde::{Deserialize, Serialize};
+use serde_json::{to_value, Value};
 
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
 use pbs_api_types::{
-    Authid, Interface, NetworkInterfaceType, LinuxBondMode, NetworkConfigMethod, BondXmitHashPolicy,
+    Authid, BondXmitHashPolicy, Interface, LinuxBondMode, NetworkConfigMethod,
+    NetworkInterfaceType, CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA,
     NETWORK_INTERFACE_ARRAY_SCHEMA, NETWORK_INTERFACE_LIST_SCHEMA, NETWORK_INTERFACE_NAME_SCHEMA,
-    CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
-    NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+    NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
 };
 use pbs_config::network::{self, NetworkConfig};
 
@@ -18,41 +18,57 @@ use proxmox_rest_server::WorkerTask;
 
 fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
     let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?;
-    Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
+    Ok(value
+        .as_array()
+        .unwrap()
+        .iter()
+        .map(|v| v.as_str().unwrap().to_string())
+        .collect())
 }
 
 fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
-
-    let current_gateway_v4 = config.interfaces.iter()
+    let current_gateway_v4 = config
+        .interfaces
+        .iter()
         .find(|(_, interface)| interface.gateway.is_some())
         .map(|(name, _)| name.to_string());
 
     if let Some(current_gateway_v4) = current_gateway_v4 {
         if current_gateway_v4 != iface {
-            bail!("Default IPv4 gateway already exists on interface '{}'", current_gateway_v4);
+            bail!(
+                "Default IPv4 gateway already exists on interface '{}'",
+                current_gateway_v4
+            );
         }
     }
     Ok(())
 }
 
 fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
-
-    let current_gateway_v6 = config.interfaces.iter()
+    let current_gateway_v6 = config
+        .interfaces
+        .iter()
         .find(|(_, interface)| interface.gateway6.is_some())
         .map(|(name, _)| name.to_string());
 
     if let Some(current_gateway_v6) = current_gateway_v6 {
         if current_gateway_v6 != iface {
-            bail!("Default IPv6 gateway already exists on interface '{}'", current_gateway_v6);
+            bail!(
+                "Default IPv6 gateway already exists on interface '{}'",
+                current_gateway_v6
+            );
         }
     }
     Ok(())
 }
 
-
 fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Error> {
     if iface.interface_type != NetworkInterfaceType::Bridge {
-        bail!("interface '{}' is no bridge (type is {:?})", iface.name, iface.interface_type);
+        bail!(
+            "interface '{}' is no bridge (type is {:?})",
+            iface.name,
+            iface.interface_type
+        );
     }
     iface.bridge_ports = Some(ports);
     Ok(())
@@ -60,7 +76,11 @@ fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Err
 
 fn set_bond_slaves(iface: &mut Interface, slaves: Vec<String>) -> Result<(), Error> {
     if iface.interface_type != NetworkInterfaceType::Bond {
-        bail!("interface '{}' is no bond (type is {:?})", iface.name, iface.interface_type);
+        bail!(
+            "interface '{}' is no bond (type is {:?})",
+            iface.name,
+            iface.interface_type
+        );
     }
     iface.slaves = Some(slaves);
     Ok(())
@@ -91,14 +111,15 @@ pub fn list_network_devices(
     _info: &ApiMethod,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let (config, digest) = network::config()?;
     let digest = hex::encode(&digest);
 
     let mut list = Vec::new();
 
     for (iface, interface) in config.interfaces.iter() {
-        if iface == "lo" { continue; } // do not list lo
+        if iface == "lo" {
+            continue;
+        } // do not list lo
         let mut item: Value = to_value(interface)?;
         item["digest"] = digest.clone().into();
         item["iface"] = iface.to_string().into();
@@ -131,7 +152,6 @@ pub fn list_network_devices(
 )]
 /// Read a network interface configuration.
 pub fn read_interface(iface: String) -> Result<Value, Error> {
-
     let (config, digest) = network::config()?;
 
     let interface = config.lookup(&iface)?;
@@ -142,7 +162,6 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
     Ok(data)
 }
 
-
 #[api(
     protected: true,
     input: {
@@ -256,7 +275,6 @@ pub fn create_interface(
     slaves: Option<String>,
     param: Value,
 ) -> Result<(), Error> {
-
     let interface_type = pbs_tools::json::required_string_param(&param, "type")?;
     let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
 
@@ -271,35 +289,55 @@ pub fn create_interface(
     let mut interface = Interface::new(iface.clone());
     interface.interface_type = interface_type;
 
-    if let Some(autostart) = autostart { interface.autostart = autostart; }
-    if method.is_some() { interface.method = method; }
-    if method6.is_some() { interface.method6 = method6; }
-    if mtu.is_some() { interface.mtu = mtu; }
-    if comments.is_some() { interface.comments = comments; }
-    if comments6.is_some() { interface.comments6 = comments6; }
+    if let Some(autostart) = autostart {
+        interface.autostart = autostart;
+    }
+    if method.is_some() {
+        interface.method = method;
+    }
+    if method6.is_some() {
+        interface.method6 = method6;
+    }
+    if mtu.is_some() {
+        interface.mtu = mtu;
+    }
+    if comments.is_some() {
+        interface.comments = comments;
+    }
+    if comments6.is_some() {
+        interface.comments6 = comments6;
+    }
 
     if let Some(cidr) = cidr {
         let (_, _, is_v6) = network::parse_cidr(&cidr)?;
-        if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
+        if is_v6 {
+            bail!("invalid address type (expected IPv4, got IPv6)");
+        }
         interface.cidr = Some(cidr);
     }
 
     if let Some(cidr6) = cidr6 {
         let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
-        if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
+        if !is_v6 {
+            bail!("invalid address type (expected IPv6, got IPv4)");
+        }
         interface.cidr6 = Some(cidr6);
     }
 
     if let Some(gateway) = gateway {
         let is_v6 = gateway.contains(':');
-        if is_v6 {  bail!("invalid address type (expected IPv4, got IPv6)"); }
+        if is_v6 {
+            bail!("invalid address type (expected IPv4, got IPv6)");
+        }
         check_duplicate_gateway_v4(&config, &iface)?;
         interface.gateway = Some(gateway);
     }
 
     if let Some(gateway6) = gateway6 {
         let is_v6 = gateway6.contains(':');
-        if !is_v6 {  bail!("invalid address type (expected IPv6, got IPv4)"); }
+        if !is_v6 {
+            bail!("invalid address type (expected IPv6, got IPv4)");
+        }
         check_duplicate_gateway_v6(&config, &iface)?;
         interface.gateway6 = Some(gateway6);
     }
@@ -310,7 +348,9 @@ pub fn create_interface(
                 let ports = split_interface_list(&ports)?;
                 set_bridge_ports(&mut interface, ports)?;
             }
-            if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
+            if bridge_vlan_aware.is_some() {
+                interface.bridge_vlan_aware = bridge_vlan_aware;
+            }
         }
         NetworkInterfaceType::Bond => {
             if let Some(mode) = bond_mode {
@@ -322,9 +362,7 @@ pub fn create_interface(
                     interface.bond_primary = bond_primary;
                 }
                 if bond_xmit_hash_policy.is_some() {
-                    if mode != LinuxBondMode::ieee802_3ad &&
-                       mode != LinuxBondMode::balance_xor
-                    {
+                    if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor {
                         bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
                     }
                     interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
@@ -335,7 +373,10 @@ pub fn create_interface(
                 set_bond_slaves(&mut interface, slaves)?;
             }
         }
-        _ => bail!("creating network interface type '{:?}' is not supported", interface_type),
+        _ => bail!(
+            "creating network interface type '{:?}' is not supported",
+            interface_type
+        ),
     }
 
     if interface.cidr.is_some() || interface.gateway.is_some() {
@@ -395,7 +436,6 @@ pub enum DeletableProperty {
     bond_xmit_hash_policy,
 }
 
-
 #[api(
     protected: true,
     input: {
@@ -523,7 +563,6 @@ pub fn update_interface(
     digest: Option<String>,
     param: Value,
 ) -> Result<(), Error> {
-
     let _lock = network::lock_config()?;
 
     let (mut config, expected_digest) = network::config()?;
@@ -533,49 +572,95 @@ pub fn update_interface(
         crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
     }
 
-    if gateway.is_some() { check_duplicate_gateway_v4(&config, &iface)?; }
-    if gateway6.is_some() { check_duplicate_gateway_v6(&config, &iface)?; }
+    if gateway.is_some() {
+        check_duplicate_gateway_v4(&config, &iface)?;
+    }
+    if gateway6.is_some() {
+        check_duplicate_gateway_v6(&config, &iface)?;
+    }
 
     let interface = config.lookup_mut(&iface)?;
 
     if let Some(interface_type) = param.get("type") {
         let interface_type = NetworkInterfaceType::deserialize(interface_type)?;
-        if  interface_type != interface.interface_type {
-            bail!("got unexpected interface type ({:?} != {:?})", interface_type, interface.interface_type);
+        if interface_type != interface.interface_type {
+            bail!(
+                "got unexpected interface type ({:?} != {:?})",
+                interface_type,
+                interface.interface_type
+            );
         }
     }
 
     if let Some(delete) = delete {
         for delete_prop in delete {
             match delete_prop {
-                DeletableProperty::cidr => { interface.cidr = None; },
-                DeletableProperty::cidr6 => { interface.cidr6 = None; },
-                DeletableProperty::gateway => { interface.gateway = None; },
-                DeletableProperty::gateway6 => { interface.gateway6 = None; },
-                DeletableProperty::method => { interface.method = None; },
-                DeletableProperty::method6 => { interface.method6 = None; },
-                DeletableProperty::comments => { interface.comments = None; },
-                DeletableProperty::comments6 => { interface.comments6 = None; },
-                DeletableProperty::mtu => { interface.mtu = None; },
-                DeletableProperty::autostart => { interface.autostart = false; },
-                DeletableProperty::bridge_ports => { set_bridge_ports(interface, Vec::new())?; }
-                DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
-                DeletableProperty::slaves => { set_bond_slaves(interface, Vec::new())?; }
-                DeletableProperty::bond_primary => { interface.bond_primary = None; }
-                DeletableProperty::bond_xmit_hash_policy => { interface.bond_xmit_hash_policy = None }
+                DeletableProperty::cidr => {
+                    interface.cidr = None;
+                }
+                DeletableProperty::cidr6 => {
+                    interface.cidr6 = None;
+                }
+                DeletableProperty::gateway => {
+                    interface.gateway = None;
+                }
+                DeletableProperty::gateway6 => {
+                    interface.gateway6 = None;
+                }
+                DeletableProperty::method => {
+                    interface.method = None;
+                }
+                DeletableProperty::method6 => {
+                    interface.method6 = None;
+                }
+                DeletableProperty::comments => {
+                    interface.comments = None;
+                }
+                DeletableProperty::comments6 => {
+                    interface.comments6 = None;
+                }
+                DeletableProperty::mtu => {
+                    interface.mtu = None;
+                }
+                DeletableProperty::autostart => {
+                    interface.autostart = false;
+                }
+                DeletableProperty::bridge_ports => {
+                    set_bridge_ports(interface, Vec::new())?;
+                }
+                DeletableProperty::bridge_vlan_aware => {
+                    interface.bridge_vlan_aware = None;
+                }
+                DeletableProperty::slaves => {
+                    set_bond_slaves(interface, Vec::new())?;
+                }
+                DeletableProperty::bond_primary => {
+                    interface.bond_primary = None;
+                }
+                DeletableProperty::bond_xmit_hash_policy => interface.bond_xmit_hash_policy = None,
             }
         }
     }
 
-    if let Some(autostart) = autostart { interface.autostart = autostart; }
-    if method.is_some() { interface.method = method; }
-    if method6.is_some() { interface.method6 = method6; }
-    if mtu.is_some() { interface.mtu = mtu; }
+    if let Some(autostart) = autostart {
+        interface.autostart = autostart;
+    }
+    if method.is_some() {
+        interface.method = method;
+    }
+    if method6.is_some() {
+        interface.method6 = method6;
+    }
+    if mtu.is_some() {
+        interface.mtu = mtu;
+    }
     if let Some(ports) = bridge_ports {
         let ports = split_interface_list(&ports)?;
         set_bridge_ports(interface, ports)?;
     }
-    if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
+    if bridge_vlan_aware.is_some() {
+        interface.bridge_vlan_aware = bridge_vlan_aware;
+    }
     if let Some(slaves) = slaves {
         let slaves = split_interface_list(&slaves)?;
         set_bond_slaves(interface, slaves)?;
@@ -589,9 +674,7 @@ pub fn update_interface(
             interface.bond_primary = bond_primary;
         }
         if bond_xmit_hash_policy.is_some() {
-            if mode != LinuxBondMode::ieee802_3ad &&
-               mode != LinuxBondMode::balance_xor
-            {
+            if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor {
                 bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
             }
             interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
@@ -600,30 +683,42 @@ pub fn update_interface(
 
     if let Some(cidr) = cidr {
         let (_, _, is_v6) = network::parse_cidr(&cidr)?;
-        if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
+        if is_v6 {
+            bail!("invalid address type (expected IPv4, got IPv6)");
+        }
         interface.cidr = Some(cidr);
     }
 
     if let Some(cidr6) = cidr6 {
         let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
-        if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
+        if !is_v6 {
+            bail!("invalid address type (expected IPv6, got IPv4)");
+        }
         interface.cidr6 = Some(cidr6);
     }
 
     if let Some(gateway) = gateway {
         let is_v6 = gateway.contains(':');
-        if is_v6 {  bail!("invalid address type (expected IPv4, got IPv6)"); }
+        if is_v6 {
+            bail!("invalid address type (expected IPv4, got IPv6)");
+        }
         interface.gateway = Some(gateway);
     }
 
     if let Some(gateway6) = gateway6 {
         let is_v6 = gateway6.contains(':');
-        if !is_v6 {  bail!("invalid address type (expected IPv6, got IPv4)"); }
+        if !is_v6 {
+            bail!("invalid address type (expected IPv6, got IPv4)");
+        }
         interface.gateway6 = Some(gateway6);
     }
 
-    if comments.is_some() { interface.comments = comments; }
-    if comments6.is_some() { interface.comments6 = comments6; }
+    if comments.is_some() {
+        interface.comments = comments;
+    }
+    if comments6.is_some() {
+        interface.comments6 = comments6;
+    }
 
     if interface.cidr.is_some() || interface.gateway.is_some() {
         interface.method = Some(NetworkConfigMethod::Static);
@@ -696,21 +791,26 @@ pub fn delete_interface(iface: String, digest: Option<String>) -> Result<(), Err
     },
 )]
 /// Reload network configuration (requires ifupdown2).
-pub async fn reload_network_config(
-    rpcenv: &mut dyn RpcEnvironment,
-) -> Result<String, Error> {
-
+pub async fn reload_network_config(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
     network::assert_ifupdown2_installed()?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
-    let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id.to_string(), true, |_worker| async {
-
-        let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
-
-        network::network_reload()?;
-        Ok(())
-    })?;
+    let upid_str = WorkerTask::spawn(
+        "srvreload",
+        Some(String::from("networking")),
+        auth_id.to_string(),
+        true,
+        |_worker| async {
+            let _ = std::fs::rename(
+                network::NETWORK_INTERFACES_NEW_FILENAME,
+                network::NETWORK_INTERFACES_FILENAME,
+            );
+
+            network::network_reload()?;
+            Ok(())
+        },
+    )?;
 
     Ok(upid_str)
 }
@@ -730,7 +830,6 @@ pub async fn reload_network_config(
 )]
 /// Revert network configuration (rm /etc/network/interfaces.new).
 pub fn revert_network_config() -> Result<(), Error> {
-
     let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME);
 
     Ok(())
index bfea05861e98ecfdcf5d2e8bd15f8ff611fa7b0b..a3d2cea1eddfac370dc15bfd81e2cc9c0b3652ee 100644 (file)
@@ -33,5 +33,4 @@ fn get_report(
     Ok(json!(generate_report()))
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_GET_REPORT);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_REPORT);
index 381805afa122932139d5848e7680654b7e0ca090..55e6099dee1740b83759c2942c119765348a0bd8 100644 (file)
@@ -1,13 +1,11 @@
 use anyhow::{bail, Error};
-use serde_json::{Value, json};
+use serde_json::{json, Value};
 use std::collections::BTreeMap;
 
 use proxmox_router::{Permission, Router};
 use proxmox_schema::api;
 
-use pbs_api_types::{
-    NODE_SCHEMA, RRDMode, RRDTimeFrame, PRIV_SYS_AUDIT,
-};
+use pbs_api_types::{RRDMode, RRDTimeFrame, NODE_SCHEMA, PRIV_SYS_AUDIT};
 
 use crate::rrd_cache::extract_rrd_data;
 
@@ -17,7 +15,6 @@ pub fn create_value_from_rrd(
     timeframe: RRDTimeFrame,
     mode: RRDMode,
 ) -> Result<Value, Error> {
-
     let mut result: Vec<Value> = Vec::new();
 
     let mut timemap = BTreeMap::new();
@@ -30,9 +27,13 @@ pub fn create_value_from_rrd(
             None => continue,
         };
 
-        if let Some(expected_resolution) = last_resolution  {
+        if let Some(expected_resolution) = last_resolution {
             if reso != expected_resolution {
-                bail!("got unexpected RRD resolution ({} != {})", reso, expected_resolution);
+                bail!(
+                    "got unexpected RRD resolution ({} != {})",
+                    reso,
+                    expected_resolution
+                );
             }
         } else {
             last_resolution = Some(reso);
@@ -75,29 +76,30 @@ pub fn create_value_from_rrd(
     },
 )]
 /// Read node stats
-fn get_node_stats(
-    timeframe: RRDTimeFrame,
-    cf: RRDMode,
-    _param: Value,
-) -> Result<Value, Error> {
-
+fn get_node_stats(timeframe: RRDTimeFrame, cf: RRDMode, _param: Value) -> Result<Value, Error> {
     create_value_from_rrd(
         "host",
         &[
-            "cpu", "iowait",
-            "memtotal", "memused",
-            "swaptotal", "swapused",
-            "netin", "netout",
+            "cpu",
+            "iowait",
+            "memtotal",
+            "memused",
+            "swaptotal",
+            "swapused",
+            "netin",
+            "netout",
             "loadavg",
-            "total", "used",
-            "read_ios", "read_bytes",
-            "write_ios", "write_bytes",
+            "total",
+            "used",
+            "read_ios",
+            "read_bytes",
+            "write_ios",
+            "write_bytes",
             "io_ticks",
-         ],
+        ],
         timeframe,
         cf,
     )
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_GET_NODE_STATS);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_NODE_STATS);
index 9f08b85a62d59f715fe3090c31bd6597cc409909..0deb45627a2565fc6569211f007aa3835adf7c3e 100644 (file)
@@ -3,11 +3,11 @@ use std::process::{Command, Stdio};
 use anyhow::{bail, Error};
 use serde_json::{json, Value};
 
-use proxmox_sys::sortable;
-use proxmox_router::{list_subdirs_api_method, Router, Permission, RpcEnvironment, SubdirMap};
+use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
 use proxmox_schema::api;
+use proxmox_sys::sortable;
 
-use pbs_api_types::{Authid, NODE_SCHEMA, SERVICE_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
+use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SERVICE_ID_SCHEMA};
 
 use proxmox_rest_server::WorkerTask;
 
@@ -22,7 +22,6 @@ static SERVICE_NAME_LIST: [&str; 7] = [
 ];
 
 pub fn real_service_name(service: &str) -> &str {
-
     // since postfix package 3.1.0-3.1 the postfix unit is only here
     // to manage subinstances, of which the default is called "-".
     // This is where we look for the daemon status
@@ -35,7 +34,6 @@ pub fn real_service_name(service: &str) -> &str {
 }
 
 fn get_full_service_state(service: &str) -> Result<Value, Error> {
-
     let real_service_name = real_service_name(service);
 
     let mut child = Command::new("systemctl")
@@ -43,7 +41,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
         .stdout(Stdio::piped())
         .spawn()?;
 
-    use std::io::{BufRead,BufReader};
+    use std::io::{BufRead, BufReader};
 
     let mut result = json!({});
 
@@ -76,7 +74,6 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
 }
 
 fn json_service_state(service: &str, status: Value) -> Value {
-
     if let Some(desc) = status["Description"].as_str() {
         let name = status["Name"].as_str().unwrap_or(service);
         let state = status["SubState"].as_str().unwrap_or("unknown");
@@ -128,10 +125,7 @@ fn json_service_state(service: &str, status: Value) -> Value {
     },
 )]
 /// Service list.
-fn list_services(
-    _param: Value,
-) -> Result<Value, Error> {
-
+fn list_services(_param: Value) -> Result<Value, Error> {
     let mut list = vec![];
 
     for service in &SERVICE_NAME_LIST {
@@ -165,11 +159,7 @@ fn list_services(
     },
 )]
 /// Read service properties.
-fn get_service_state(
-    service: String,
-    _param: Value,
-) -> Result<Value, Error> {
-
+fn get_service_state(service: String, _param: Value) -> Result<Value, Error> {
     let service = service.as_str();
 
     if !SERVICE_NAME_LIST.contains(&service) {
@@ -182,11 +172,10 @@ fn get_service_state(
 }
 
 fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
-
     let workerid = format!("srv{}", &cmd);
 
     let cmd = match cmd {
-        "start"|"stop"|"restart"=> cmd.to_string(),
+        "start" | "stop" | "restart" => cmd.to_string(),
         "reload" => "try-reload-or-restart".to_string(), // some services do not implement reload
         _ => bail!("unknown service command '{}'", cmd),
     };
@@ -198,9 +187,12 @@ fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Valu
         auth_id.to_string(),
         false,
         move |_worker| {
-
             if service == "proxmox-backup" && cmd == "stop" {
-                bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd);
+                bail!(
+                    "invalid service cmd '{} {}' cannot stop essential service!",
+                    service,
+                    cmd
+                );
             }
 
             let real_service_name = real_service_name(&service);
@@ -214,7 +206,7 @@ fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Valu
             }
 
             Ok(())
-        }
+        },
     )?;
 
     Ok(upid.into())
@@ -242,7 +234,6 @@ fn start_service(
     _param: Value,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     log::info!("starting service {}", service);
@@ -271,8 +262,7 @@ fn stop_service(
     service: String,
     _param: Value,
     rpcenv: &mut dyn RpcEnvironment,
- ) -> Result<Value, Error> {
-
+) -> Result<Value, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     log::info!("stopping service {}", service);
@@ -302,7 +292,6 @@ fn restart_service(
     _param: Value,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     log::info!("re-starting service {}", service);
@@ -337,7 +326,6 @@ fn reload_service(
     _param: Value,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
 
     log::info!("reloading service {}", service);
@@ -347,26 +335,11 @@ fn reload_service(
 
 #[sortable]
 const SERVICE_SUBDIRS: SubdirMap = &sorted!([
-    (
-        "reload", &Router::new()
-            .post(&API_METHOD_RELOAD_SERVICE)
-    ),
-    (
-        "restart", &Router::new()
-            .post(&API_METHOD_RESTART_SERVICE)
-    ),
-    (
-        "start", &Router::new()
-            .post(&API_METHOD_START_SERVICE)
-    ),
-    (
-        "state", &Router::new()
-            .get(&API_METHOD_GET_SERVICE_STATE)
-    ),
-    (
-        "stop", &Router::new()
-            .post(&API_METHOD_STOP_SERVICE)
-    ),
+    ("reload", &Router::new().post(&API_METHOD_RELOAD_SERVICE)),
+    ("restart", &Router::new().post(&API_METHOD_RESTART_SERVICE)),
+    ("start", &Router::new().post(&API_METHOD_START_SERVICE)),
+    ("state", &Router::new().get(&API_METHOD_GET_SERVICE_STATE)),
+    ("stop", &Router::new().post(&API_METHOD_STOP_SERVICE)),
 ]);
 
 const SERVICE_ROUTER: Router = Router::new()
index 9559dda630d488ad28a347b27df8ff3de14d7e73..da394625b3239bb9095a3e15223b1a4cd5625915 100644 (file)
@@ -1,18 +1,18 @@
-use std::process::Command;
 use std::path::Path;
+use std::process::Command;
 
-use anyhow::{Error, format_err, bail};
+use anyhow::{bail, format_err, Error};
 use serde_json::Value;
 
 use proxmox_sys::linux::procfs;
 
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
-use pbs_api_types::{NODE_SCHEMA, NodePowerCommand, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
+use pbs_api_types::{NodePowerCommand, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
 
 use crate::api2::types::{
-    NodeCpuInformation, NodeStatus, NodeMemoryCounters, NodeSwapCounters, NodeInformation,
+    NodeCpuInformation, NodeInformation, NodeMemoryCounters, NodeStatus, NodeSwapCounters,
 };
 
 impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation {
@@ -111,7 +111,6 @@ fn get_status(
 )]
 /// Reboot or shutdown the node.
 fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
-
     let systemctl_command = match command {
         NodePowerCommand::Reboot => "reboot",
         NodePowerCommand::Shutdown => "poweroff",
@@ -126,7 +125,13 @@ fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
         match output.status.code() {
             Some(code) => {
                 let msg = String::from_utf8(output.stderr)
-                    .map(|m| if m.is_empty() { String::from("no error message") } else { m })
+                    .map(|m| {
+                        if m.is_empty() {
+                            String::from("no error message")
+                        } else {
+                            m
+                        }
+                    })
                     .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
                 bail!("diff failed with status code: {} - {}", code, msg);
             }
index 5fa38033ed967756673b35f05e5ec8ed7abf9d29..3bd2da44f198aa9098e74cccfe2a548bb7d5f483 100644 (file)
@@ -1,16 +1,15 @@
-use anyhow::{Error, format_err, bail};
+use anyhow::{bail, format_err, Error};
 use serde_json::Value;
 
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
 use pbs_api_types::{
-    NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid,
-    PRIV_SYS_AUDIT,PRIV_SYS_MODIFY,
+    Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SUBSCRIPTION_KEY_SCHEMA,
 };
 
 use crate::tools;
-use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
+use crate::tools::subscription::{self, SubscriptionInfo, SubscriptionStatus};
 use pbs_config::CachedUserInfo;
 
 #[api(
@@ -33,9 +32,7 @@ use pbs_config::CachedUserInfo;
     },
 )]
 /// Check and update subscription status.
-pub fn check_subscription(
-    force: bool,
-) -> Result<(), Error> {
+pub fn check_subscription(force: bool) -> Result<(), Error> {
     let info = match subscription::read_subscription() {
         Err(err) => bail!("could not read subscription status: {}", err),
         Ok(Some(info)) => info,
@@ -93,7 +90,7 @@ pub fn get_subscription(
             status: SubscriptionStatus::NOTFOUND,
             message: Some("There is no subscription key".into()),
             serverid: Some(tools::get_hardware_address()?),
-            url:  Some(url.into()),
+            url: Some(url.into()),
             ..Default::default()
         },
     };
@@ -132,10 +129,7 @@ pub fn get_subscription(
     },
 )]
 /// Set a subscription key and check it.
-pub fn set_subscription(
-    key: String,
-) -> Result<(), Error> {
-
+pub fn set_subscription(key: String) -> Result<(), Error> {
     let server_id = tools::get_hardware_address()?;
 
     let info = subscription::check_subscription(key, server_id)?;
@@ -161,7 +155,6 @@ pub fn set_subscription(
 )]
 /// Delete subscription info.
 pub fn delete_subscription() -> Result<(), Error> {
-
     subscription::delete_subscription()
         .map_err(|err| format_err!("Deleting subscription failed: {}", err))?;
 
index 443a8b75a336e2be64425ab1216bc4ecbe246667..e0109e821647b5afeb1148705b7e72384ebf6425 100644 (file)
@@ -1,12 +1,12 @@
 use std::process::{Command, Stdio};
 
-use anyhow::{Error};
+use anyhow::Error;
 use serde_json::{json, Value};
 
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
 
-use pbs_api_types::{NODE_SCHEMA, SYSTEMD_DATETIME_FORMAT, PRIV_SYS_AUDIT};
+use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT, SYSTEMD_DATETIME_FORMAT};
 
 fn dump_journal(
     start: Option<u64>,
@@ -15,12 +15,17 @@ fn dump_journal(
     until: Option<&str>,
     service: Option<&str>,
 ) -> Result<(u64, Vec<Value>), Error> {
-
     let mut args = vec!["-o", "short", "--no-pager"];
 
-    if let Some(service) = service { args.extend(&["--unit", service]); }
-    if let Some(since) = since { args.extend(&["--since", since]); }
-    if let Some(until) = until { args.extend(&["--until", until]); }
+    if let Some(service) = service {
+        args.extend(&["--unit", service]);
+    }
+    if let Some(since) = since {
+        args.extend(&["--since", since]);
+    }
+    if let Some(until) = until {
+        args.extend(&["--until", until]);
+    }
 
     let mut lines: Vec<Value> = vec![];
     let mut limit = limit.unwrap_or(50);
@@ -32,15 +37,19 @@ fn dump_journal(
         .stdout(Stdio::piped())
         .spawn()?;
 
-    use std::io::{BufRead,BufReader};
+    use std::io::{BufRead, BufReader};
 
     if let Some(ref mut stdout) = child.stdout {
         for line in BufReader::new(stdout).lines() {
             match line {
                 Ok(line) => {
                     count += 1;
-                    if count < start { continue };
-                   if limit == 0 { continue };
+                    if count < start {
+                        continue;
+                    };
+                    if limit == 0 {
+                        continue;
+                    };
 
                     lines.push(json!({ "n": count, "t": line }));
 
@@ -64,7 +73,7 @@ fn dump_journal(
     // so we add a line
     if count == 0 {
         count += 1;
-       lines.push(json!({ "n": count, "t": "no content"}));
+        lines.push(json!({ "n": count, "t": "no content"}));
     }
 
     Ok((count, lines))
@@ -133,21 +142,21 @@ fn get_syslog(
     _info: &ApiMethod,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
-    let service = param["service"].as_str().map(|service| crate::api2::node::services::real_service_name(service));
+    let service = param["service"]
+        .as_str()
+        .map(|service| crate::api2::node::services::real_service_name(service));
 
     let (count, lines) = dump_journal(
         param["start"].as_u64(),
         param["limit"].as_u64(),
         param["since"].as_str(),
         param["until"].as_str(),
-        service)?;
+        service,
+    )?;
 
     rpcenv["total"] = Value::from(count);
 
     Ok(json!(lines))
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_GET_SYSLOG);
-
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_SYSLOG);
index b8046f1b301f14d9a41c938e38c045995822af40..a0c30cca645ddd34b8b74ab5b8ff3d34de2d43fc 100644 (file)
@@ -4,21 +4,20 @@ use std::io::{BufRead, BufReader};
 use anyhow::{bail, Error};
 use serde_json::{json, Value};
 
-use proxmox_sys::sortable;
-use proxmox_router::{list_subdirs_api_method, Router, RpcEnvironment, Permission, SubdirMap};
+use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
 use proxmox_schema::api;
+use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    Userid, Authid, Tokenname, TaskListItem, TaskStateType, UPID,
-    NODE_SCHEMA, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX,
-    SYNC_JOB_WORKER_ID_REGEX, DATASTORE_SCHEMA,
+    Authid, TaskListItem, TaskStateType, Tokenname, Userid, DATASTORE_SCHEMA, NODE_SCHEMA,
     PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_VERIFY, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+    SYNC_JOB_WORKER_ID_REGEX, UPID, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX,
 };
 
 use crate::api2::pull::check_pull_privs;
 
-use proxmox_rest_server::{upid_log_path, upid_read_status, TaskState, TaskListInfoIterator};
 use pbs_config::CachedUserInfo;
+use proxmox_rest_server::{upid_log_path, upid_read_status, TaskListInfoIterator, TaskState};
 
 // matches respective job execution privileges
 fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
@@ -26,13 +25,15 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
         ("verificationjob", Some(workerid)) => {
             if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
                 if let Some(store) = captures.get(1) {
-                    return user_info.check_privs(auth_id,
-                                                 &["datastore", store.as_str()],
-                                                 PRIV_DATASTORE_VERIFY,
-                                                 true);
+                    return user_info.check_privs(
+                        auth_id,
+                        &["datastore", store.as_str()],
+                        PRIV_DATASTORE_VERIFY,
+                        true,
+                    );
                 }
             }
-        },
+        }
         ("syncjob", Some(workerid)) => {
             if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
                 let remote = captures.get(1);
@@ -40,29 +41,34 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
                 let local_store = captures.get(3);
 
                 if let (Some(remote), Some(remote_store), Some(local_store)) =
-                    (remote, remote_store, local_store) {
-
-                    return check_pull_privs(auth_id,
-                                            local_store.as_str(),
-                                            remote.as_str(),
-                                            remote_store.as_str(),
-                                            false);
+                    (remote, remote_store, local_store)
+                {
+                    return check_pull_privs(
+                        auth_id,
+                        local_store.as_str(),
+                        remote.as_str(),
+                        remote_store.as_str(),
+                        false,
+                    );
                 }
             }
-        },
+        }
         ("garbage_collection", Some(workerid)) => {
-            return user_info.check_privs(auth_id,
-                                         &["datastore", workerid],
-                                         PRIV_DATASTORE_MODIFY,
-                                         true)
-        },
+            return user_info.check_privs(
+                auth_id,
+                &["datastore", workerid],
+                PRIV_DATASTORE_MODIFY,
+                true,
+            )
+        }
         ("prune", Some(workerid)) => {
-            return user_info.check_privs(auth_id,
-                                         &["datastore",
-                                         workerid],
-                                         PRIV_DATASTORE_MODIFY,
-                                         true);
-        },
+            return user_info.check_privs(
+                auth_id,
+                &["datastore", workerid],
+                PRIV_DATASTORE_MODIFY,
+                true,
+            );
+        }
         _ => bail!("not a scheduled job task"),
     };
 
@@ -102,7 +108,8 @@ fn check_job_store(upid: &UPID, store: &str) -> bool {
 fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
     let task_auth_id: Authid = upid.auth_id.parse()?;
     if auth_id == &task_auth_id
-        || (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id) {
+        || (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id)
+    {
         // task owner can always read
         Ok(())
     } else {
@@ -111,7 +118,8 @@ fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
         // access to all tasks
         // or task == job which the user/token could have configured/manually executed
 
-        user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
+        user_info
+            .check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
             .or_else(|_| check_job_privs(auth_id, &user_info, upid))
             .or_else(|_| bail!("task access not allowed"))
     }
@@ -127,9 +135,10 @@ pub fn tasktype(state: &TaskState) -> TaskStateType {
 }
 
 fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types::TaskListItem {
-    let (endtime, status) = info
-        .state
-        .map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
+    let (endtime, status) = info.state.map_or_else(
+        || (None, None),
+        |a| (Some(a.endtime()), Some(a.to_string())),
+    );
 
     pbs_api_types::TaskListItem {
         upid: info.upid_str,
@@ -210,11 +219,7 @@ fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types
     },
 )]
 /// Get task status.
-async fn get_task_status(
-    param: Value,
-    rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
+async fn get_task_status(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
     let upid = extract_upid(&param)?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -249,7 +254,6 @@ async fn get_task_status(
 }
 
 fn extract_upid(param: &Value) -> Result<UPID, Error> {
-
     let upid_str = pbs_tools::json::required_string_param(param, "upid")?;
 
     upid_str.parse::<UPID>()
@@ -289,11 +293,7 @@ fn extract_upid(param: &Value) -> Result<UPID, Error> {
     },
 )]
 /// Read task log.
-async fn read_task_log(
-    param: Value,
-    mut rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
+async fn read_task_log(param: Value, mut rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
     let upid = extract_upid(&param)?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -317,8 +317,12 @@ async fn read_task_log(
         match line {
             Ok(line) => {
                 count += 1;
-                if count < start { continue };
-               if limit == 0 { continue };
+                if count < start {
+                    continue;
+                };
+                if limit == 0 {
+                    continue;
+                };
 
                 lines.push(json!({ "n": count, "t": line }));
 
@@ -359,11 +363,7 @@ async fn read_task_log(
     },
 )]
 /// Try to stop a task.
-fn stop_task(
-    param: Value,
-    rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
+fn stop_task(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
     let upid = extract_upid(&param)?;
 
     let auth_id = rpcenv.get_auth_id().unwrap();
@@ -465,7 +465,6 @@ pub fn list_tasks(
     param: Value,
     mut rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Vec<TaskListItem>, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let user_info = CachedUserInfo::new()?;
     let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
@@ -475,7 +474,11 @@ pub fn list_tasks(
     let store = param["store"].as_str();
 
     let list = TaskListInfoIterator::new(running)?;
-    let limit = if limit > 0 { limit as usize } else { usize::MAX };
+    let limit = if limit > 0 {
+        limit as usize
+    } else {
+        usize::MAX
+    };
 
     let mut skipped = 0;
     let mut result: Vec<TaskListItem> = Vec::new();
@@ -510,15 +513,21 @@ pub fn list_tasks(
         }
 
         if let Some(needle) = &userfilter {
-            if !info.upid.auth_id.to_string().contains(needle) { continue; }
+            if !info.upid.auth_id.to_string().contains(needle) {
+                continue;
+            }
         }
 
         if let Some(store) = store {
-            if !check_job_store(&info.upid, store) { continue; }
+            if !check_job_store(&info.upid, store) {
+                continue;
+            }
         }
 
         if let Some(typefilter) = &typefilter {
-            if !info.upid.worker_type.contains(typefilter) { continue; }
+            if !info.upid.worker_type.contains(typefilter) {
+                continue;
+            }
         }
 
         match (&info.state, &statusfilter) {
@@ -528,9 +537,9 @@ pub fn list_tasks(
                 if !filters.contains(&tasktype(state)) {
                     continue;
                 }
-            },
+            }
             (None, Some(_)) => continue,
-            _ => {},
+            _ => {}
         }
 
         if skipped < start as usize {
@@ -546,7 +555,8 @@ pub fn list_tasks(
     }
 
     let mut count = result.len() + start as usize;
-    if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
+    if !result.is_empty() && result.len() >= limit {
+        // we have a 'virtual' entry as long as we have any new
         count += 1;
     }
 
@@ -557,14 +567,8 @@ pub fn list_tasks(
 
 #[sortable]
 const UPID_API_SUBDIRS: SubdirMap = &sorted!([
-    (
-        "log", &Router::new()
-            .get(&API_METHOD_READ_TASK_LOG)
-    ),
-    (
-        "status", &Router::new()
-            .get(&API_METHOD_GET_TASK_STATUS)
-    )
+    ("log", &Router::new().get(&API_METHOD_READ_TASK_LOG)),
+    ("status", &Router::new().get(&API_METHOD_GET_TASK_STATUS))
 ]);
 
 pub const UPID_API_ROUTER: Router = Router::new()
index 88f64117da7a307e18e5cdd1615118231a05dda3..bbdbc6bfc2f7b623f6987d457cc13a300a511e7a 100644 (file)
@@ -1,11 +1,11 @@
 use anyhow::{bail, format_err, Error};
 use serde_json::{json, Value};
 
-use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions};
-use proxmox_router::{Router, Permission};
+use proxmox_router::{Permission, Router};
 use proxmox_schema::api;
+use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions};
 
-use pbs_api_types::{NODE_SCHEMA, TIME_ZONE_SCHEMA, PRIV_SYS_MODIFY};
+use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY, TIME_ZONE_SCHEMA};
 
 fn read_etc_localtime() -> Result<String, Error> {
     // use /etc/timezone
@@ -14,8 +14,8 @@ fn read_etc_localtime() -> Result<String, Error> {
     }
 
     // otherwise guess from the /etc/localtime symlink
-    let link = std::fs::read_link("/etc/localtime").
-        map_err(|err| format_err!("failed to guess timezone - {}", err))?;
+    let link = std::fs::read_link("/etc/localtime")
+        .map_err(|err| format_err!("failed to guess timezone - {}", err))?;
 
     let link = link.to_string_lossy();
     match link.rfind("/zoneinfo/") {
@@ -87,17 +87,19 @@ fn get_time(_param: Value) -> Result<Value, Error> {
     },
 )]
 /// Set time zone
-fn set_timezone(
-    timezone: String,
-    _param: Value,
-) -> Result<Value, Error> {
+fn set_timezone(timezone: String, _param: Value) -> Result<Value, Error> {
     let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone));
 
     if !path.exists() {
         bail!("No such timezone.");
     }
 
-    replace_file("/etc/timezone", timezone.as_bytes(), CreateOptions::new(), true)?;
+    replace_file(
+        "/etc/timezone",
+        timezone.as_bytes(),
+        CreateOptions::new(),
+        true,
+    )?;
 
     let _ = std::fs::remove_file("/etc/localtime");
 
index 7e4d152d206165fd8c41160d43a65a578e886f9a..593ef2b379129f49e5744f57e10a3482111fb6f9 100644 (file)
@@ -1,9 +1,9 @@
 //! Cheap check if the API daemon is online.
 
-use anyhow::{Error};
+use anyhow::Error;
 use serde_json::{json, Value};
 
-use proxmox_router::{Router, Permission};
+use proxmox_router::{Permission, Router};
 use proxmox_schema::api;
 
 #[api(
@@ -28,5 +28,4 @@ pub fn ping() -> Result<Value, Error> {
         "pong": true,
     }))
 }
-pub const ROUTER: Router = Router::new()
-    .get(&API_METHOD_PING);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_PING);
index aaeed4dea0c98c5c4f877817816b93e626decc81..e89f867c77dae7a8c5f24a69afda635b74d3f56b 100644 (file)
@@ -2,23 +2,22 @@
 use std::convert::TryFrom;
 
 use anyhow::{format_err, Error};
-use futures::{select, future::FutureExt};
+use futures::{future::FutureExt, select};
 
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
 use proxmox_sys::task_log;
 
 use pbs_api_types::{
-    Authid, SyncJobConfig, GroupFilter, RateLimitConfig, GROUP_FILTER_LIST_SCHEMA,
-    DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
-    PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
+    Authid, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
+    GROUP_FILTER_LIST_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
+    REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
 };
-use proxmox_rest_server::WorkerTask;
 use pbs_config::CachedUserInfo;
+use proxmox_rest_server::WorkerTask;
 
-use crate::server::pull::{PullParameters, pull_store};
 use crate::server::jobstate::Job;
-
+use crate::server::pull::{pull_store, PullParameters};
 
 pub fn check_pull_privs(
     auth_id: &Authid,
@@ -27,11 +26,15 @@ pub fn check_pull_privs(
     remote_store: &str,
     delete: bool,
 ) -> Result<(), Error> {
-
     let user_info = CachedUserInfo::new()?;
 
     user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
-    user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
+    user_info.check_privs(
+        auth_id,
+        &["remote", remote, remote_store],
+        PRIV_REMOTE_READ,
+        false,
+    )?;
 
     if delete {
         user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
@@ -48,7 +51,11 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
             &sync_job.store,
             &sync_job.remote,
             &sync_job.remote_store,
-            sync_job.owner.as_ref().unwrap_or_else(|| Authid::root_auth_id()).clone(),
+            sync_job
+                .owner
+                .as_ref()
+                .unwrap_or_else(|| Authid::root_auth_id())
+                .clone(),
             sync_job.remove_vanished,
             sync_job.group_filter.clone(),
             sync_job.limit.clone(),
@@ -63,12 +70,13 @@ pub fn do_sync_job(
     schedule: Option<String>,
     to_stdout: bool,
 ) -> Result<String, Error> {
-
-    let job_id = format!("{}:{}:{}:{}",
-                         sync_job.remote,
-                         sync_job.remote_store,
-                         sync_job.store,
-                         job.jobname());
+    let job_id = format!(
+        "{}:{}:{}:{}",
+        sync_job.remote,
+        sync_job.remote_store,
+        sync_job.store,
+        job.jobname()
+    );
     let worker_type = job.jobtype().to_string();
 
     let (email, notify) = crate::server::lookup_datastore_notify_settings(&sync_job.store);
@@ -79,14 +87,12 @@ pub fn do_sync_job(
         auth_id.to_string(),
         to_stdout,
         move |worker| async move {
-
             job.start(&worker.upid().to_string())?;
 
             let worker2 = worker.clone();
             let sync_job2 = sync_job.clone();
 
             let worker_future = async move {
-
                 let pull_params = PullParameters::try_from(&sync_job)?;
                 let client = pull_params.client().await?;
 
@@ -109,9 +115,11 @@ pub fn do_sync_job(
                 Ok(())
             };
 
-            let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
+            let mut abort_future = worker2
+                .abort_future()
+                .map(|_| Err(format_err!("sync aborted")));
 
-            let result = select!{
+            let result = select! {
                 worker = worker_future.fuse() => worker,
                 abort = abort_future => abort,
             };
@@ -119,20 +127,23 @@ pub fn do_sync_job(
             let status = worker2.create_state(&result);
 
             match job.finish(status) {
-                Ok(_) => {},
+                Ok(_) => {}
                 Err(err) => {
                     eprintln!("could not finish job state: {}", err);
                 }
             }
 
             if let Some(email) = email {
-                if let Err(err) = crate::server::send_sync_status(&email, notify, &sync_job2, &result) {
+                if let Err(err) =
+                    crate::server::send_sync_status(&email, notify, &sync_job2, &result)
+                {
                     eprintln!("send sync notification failed: {}", err);
                 }
             }
 
             result
-        })?;
+        },
+    )?;
 
     Ok(upid_str)
 }
@@ -173,7 +184,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
     },
 )]
 /// Sync store from other repository
-async fn pull (
+async fn pull(
     store: String,
     remote: String,
     remote_store: String,
@@ -183,7 +194,6 @@ async fn pull (
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
-
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let delete = remove_vanished.unwrap_or(false);
 
@@ -201,25 +211,29 @@ async fn pull (
     let client = pull_params.client().await?;
 
     // fixme: set to_stdout to false?
-    let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.to_string(), true, move |worker| async move {
-
-        task_log!(worker, "sync datastore '{}' start", store);
+    let upid_str = WorkerTask::spawn(
+        "sync",
+        Some(store.clone()),
+        auth_id.to_string(),
+        true,
+        move |worker| async move {
+            task_log!(worker, "sync datastore '{}' start", store);
 
-        let pull_future = pull_store(&worker, &client, &pull_params);
-        let future = select!{
-            success = pull_future.fuse() => success,
-            abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
-        };
+            let pull_future = pull_store(&worker, &client, &pull_params);
+            let future = select! {
+                success = pull_future.fuse() => success,
+                abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
+            };
 
-        let _ = future?;
+            let _ = future?;
 
-        task_log!(worker, "sync datastore '{}' end", store);
+            task_log!(worker, "sync datastore '{}' end", store);
 
-        Ok(())
-    })?;
+            Ok(())
+        },
+    )?;
 
     Ok(upid_str)
 }
 
-pub const ROUTER: Router = Router::new()
-    .post(&API_METHOD_PULL);
+pub const ROUTER: Router = Router::new().post(&API_METHOD_PULL);
index b4aaa614bdaf92d97c3ba2dbd1c3d1866707e333..37039da2f573a8e94ea536b87dc0a5bec6c8f661 100644 (file)
@@ -1,13 +1,13 @@
-use std::sync::{Arc,RwLock};
 use std::collections::HashSet;
+use std::sync::{Arc, RwLock};
 
 use serde_json::{json, Value};
 
 use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
 
+use pbs_api_types::Authid;
 use pbs_datastore::backup_info::BackupDir;
 use pbs_datastore::DataStore;
-use pbs_api_types::Authid;
 use proxmox_rest_server::formatter::*;
 use proxmox_rest_server::WorkerTask;
 
@@ -22,7 +22,7 @@ pub struct ReaderEnvironment {
     pub worker: Arc<WorkerTask>,
     pub datastore: Arc<DataStore>,
     pub backup_dir: BackupDir,
-    allowed_chunks: Arc<RwLock<HashSet<[u8;32]>>>,
+    allowed_chunks: Arc<RwLock<HashSet<[u8; 32]>>>,
 }
 
 impl ReaderEnvironment {
@@ -33,8 +33,6 @@ impl ReaderEnvironment {
         datastore: Arc<DataStore>,
         backup_dir: BackupDir,
     ) -> Self {
-
-
         Self {
             result_attributes: json!({}),
             env_type,
@@ -53,22 +51,22 @@ impl ReaderEnvironment {
     }
 
     pub fn debug<S: AsRef<str>>(&self, msg: S) {
-        if self.debug { self.worker.log_message(msg); }
+        if self.debug {
+            self.worker.log_message(msg);
+        }
     }
 
-
-    pub fn register_chunk(&self, digest: [u8;32]) {
+    pub fn register_chunk(&self, digest: [u8; 32]) {
         let mut allowed_chunks = self.allowed_chunks.write().unwrap();
         allowed_chunks.insert(digest);
     }
 
-    pub fn check_chunk_access(&self, digest: [u8;32]) -> bool {
-       self.allowed_chunks.read().unwrap().contains(&digest)
+    pub fn check_chunk_access(&self, digest: [u8; 32]) -> bool {
+        self.allowed_chunks.read().unwrap().contains(&digest)
     }
 }
 
 impl RpcEnvironment for ReaderEnvironment {
-
     fn result_attrib_mut(&mut self) -> &mut Value {
         &mut self.result_attributes
     }
index 45cefe5da35cfba686b477561337abe626927b42..20d629b5f81ab545e2be87578b783276d002e247 100644 (file)
@@ -2,58 +2,66 @@
 
 use anyhow::{bail, format_err, Error};
 use futures::*;
+use hex::FromHex;
 use hyper::header::{self, HeaderValue, UPGRADE};
 use hyper::http::request::Parts;
-use hyper::{Body, Response, Request, StatusCode};
+use hyper::{Body, Request, Response, StatusCode};
 use serde_json::Value;
-use hex::FromHex;
 
-use proxmox_sys::sortable;
 use proxmox_router::{
     http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
     Router, RpcEnvironment, SubdirMap,
 };
 use proxmox_schema::{BooleanSchema, ObjectSchema};
+use proxmox_sys::sortable;
 
 use pbs_api_types::{
-    Authid, Operation, DATASTORE_SCHEMA, BACKUP_TYPE_SCHEMA, BACKUP_TIME_SCHEMA,
-    BACKUP_ID_SCHEMA, CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP,
-    BACKUP_ARCHIVE_NAME_SCHEMA,
+    Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
+    BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
+    PRIV_DATASTORE_READ,
 };
-use proxmox_sys::fs::lock_dir_noblock_shared;
-use pbs_tools::json::{required_integer_param, required_string_param};
-use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
+use pbs_config::CachedUserInfo;
 use pbs_datastore::backup_info::BackupDir;
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{archive_type, ArchiveType};
-use pbs_config::CachedUserInfo;
-use proxmox_rest_server::{WorkerTask, H2Service};
+use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
+use pbs_tools::json::{required_integer_param, required_string_param};
+use proxmox_rest_server::{H2Service, WorkerTask};
+use proxmox_sys::fs::lock_dir_noblock_shared;
 
 use crate::api2::helpers;
 
 mod environment;
 use environment::*;
 
-pub const ROUTER: Router = Router::new()
-    .upgrade(&API_METHOD_UPGRADE_BACKUP);
+pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
 
 #[sortable]
 pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
     &ApiHandler::AsyncHttp(&upgrade_to_backup_reader_protocol),
     &ObjectSchema::new(
-        concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."),
+        concat!(
+            "Upgraded to backup protocol ('",
+            PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(),
+            "')."
+        ),
         &sorted!([
             ("store", false, &DATASTORE_SCHEMA),
             ("backup-type", false, &BACKUP_TYPE_SCHEMA),
             ("backup-id", false, &BACKUP_ID_SCHEMA),
             ("backup-time", false, &BACKUP_TIME_SCHEMA),
-            ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
+            (
+                "debug",
+                true,
+                &BooleanSchema::new("Enable verbose debug logging.").schema()
+            ),
         ]),
-    )
-).access(
+    ),
+)
+.access(
     // Note: parameter 'store' is no uri parameter, so we need to test inside function body
     Some("The user needs Datastore.Read privilege on /datastore/{store}."),
-    &Permission::Anybody
+    &Permission::Anybody,
 );
 
 fn upgrade_to_backup_reader_protocol(
@@ -63,7 +71,6 @@ fn upgrade_to_backup_reader_protocol(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let debug = param["debug"].as_bool().unwrap_or(false);
 
@@ -91,14 +98,17 @@ fn upgrade_to_backup_reader_protocol(
             .headers
             .get("UPGRADE")
             .ok_or_else(|| format_err!("missing Upgrade header"))?
-        .to_str()?;
+            .to_str()?;
 
         if protocols != PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!() {
             bail!("invalid protocol name");
         }
 
-        if parts.version >=  http::version::Version::HTTP_2 {
-            bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
+        if parts.version >= http::version::Version::HTTP_2 {
+            bail!(
+                "unexpected http version '{:?}' (expected version < 2)",
+                parts.version
+            );
         }
 
         let env_type = rpcenv.env_type();
@@ -107,8 +117,7 @@ fn upgrade_to_backup_reader_protocol(
         if !priv_read {
             let owner = datastore.get_owner(backup_dir.group())?;
             let correct_owner = owner == auth_id
-                || (owner.is_token()
-                    && Authid::from(owner.user().clone()) == auth_id);
+                || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
             if !correct_owner {
                 bail!("backup owner check failed!");
             }
@@ -117,83 +126,100 @@ fn upgrade_to_backup_reader_protocol(
         let _guard = lock_dir_noblock_shared(
             &datastore.snapshot_path(&backup_dir),
             "snapshot",
-            "locked by another operation")?;
+            "locked by another operation",
+        )?;
 
         let path = datastore.base_path();
 
         //let files = BackupInfo::list_files(&path, &backup_dir)?;
 
-        let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
-
-        WorkerTask::spawn("reader", Some(worker_id), auth_id.to_string(), true, move |worker| async move {
-            let _guard = _guard;
-
-            let mut env = ReaderEnvironment::new(
-                env_type,
-                auth_id,
-                worker.clone(),
-                datastore,
-                backup_dir,
-            );
-
-            env.debug = debug;
-
-            env.log(format!("starting new backup reader datastore '{}': {:?}", store, path));
-
-            let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
-
-            let mut abort_future = worker.abort_future()
-                .map(|_| Err(format_err!("task aborted")));
-
-            let env2 = env.clone();
-            let req_fut = async move {
-                let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
-                env2.debug("protocol upgrade done");
-
-                let mut http = hyper::server::conn::Http::new();
-                http.http2_only(true);
-                // increase window size: todo - find optiomal size
-                let window_size = 32*1024*1024; // max = (1 << 31) - 2
-                http.http2_initial_stream_window_size(window_size);
-                http.http2_initial_connection_window_size(window_size);
-                http.http2_max_frame_size(4*1024*1024);
-
-                http.serve_connection(conn, service)
-                    .map_err(Error::from).await
-            };
-
-            futures::select!{
-                req = req_fut.fuse() => req?,
-                abort = abort_future => abort?,
-            };
-
-            env.log("reader finished successfully");
-
-            Ok(())
-        })?;
+        let worker_id = format!(
+            "{}:{}/{}/{:08X}",
+            store,
+            backup_type,
+            backup_id,
+            backup_dir.backup_time()
+        );
+
+        WorkerTask::spawn(
+            "reader",
+            Some(worker_id),
+            auth_id.to_string(),
+            true,
+            move |worker| async move {
+                let _guard = _guard;
+
+                let mut env = ReaderEnvironment::new(
+                    env_type,
+                    auth_id,
+                    worker.clone(),
+                    datastore,
+                    backup_dir,
+                );
+
+                env.debug = debug;
+
+                env.log(format!(
+                    "starting new backup reader datastore '{}': {:?}",
+                    store, path
+                ));
+
+                let service =
+                    H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
+
+                let mut abort_future = worker
+                    .abort_future()
+                    .map(|_| Err(format_err!("task aborted")));
+
+                let env2 = env.clone();
+                let req_fut = async move {
+                    let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
+                    env2.debug("protocol upgrade done");
+
+                    let mut http = hyper::server::conn::Http::new();
+                    http.http2_only(true);
+                    // increase window size: todo - find optiomal size
+                    let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
+                    http.http2_initial_stream_window_size(window_size);
+                    http.http2_initial_connection_window_size(window_size);
+                    http.http2_max_frame_size(4 * 1024 * 1024);
+
+                    http.serve_connection(conn, service)
+                        .map_err(Error::from)
+                        .await
+                };
+
+                futures::select! {
+                    req = req_fut.fuse() => req?,
+                    abort = abort_future => abort?,
+                };
+
+                env.log("reader finished successfully");
+
+                Ok(())
+            },
+        )?;
 
         let response = Response::builder()
             .status(StatusCode::SWITCHING_PROTOCOLS)
-            .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()))
+            .header(
+                UPGRADE,
+                HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()),
+            )
             .body(Body::empty())?;
 
         Ok(response)
-    }.boxed()
+    }
+    .boxed()
 }
 
 const READER_API_SUBDIRS: SubdirMap = &[
+    ("chunk", &Router::new().download(&API_METHOD_DOWNLOAD_CHUNK)),
     (
-        "chunk", &Router::new()
-            .download(&API_METHOD_DOWNLOAD_CHUNK)
-    ),
-    (
-        "download", &Router::new()
-            .download(&API_METHOD_DOWNLOAD_FILE)
-    ),
-    (
-        "speedtest", &Router::new()
-            .download(&API_METHOD_SPEEDTEST)
+        "download",
+        &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
     ),
+    ("speedtest", &Router::new().download(&API_METHOD_SPEEDTEST)),
 ];
 
 pub const READER_API_ROUTER: Router = Router::new()
@@ -205,10 +231,8 @@ pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
     &ApiHandler::AsyncHttp(&download_file),
     &ObjectSchema::new(
         "Download specified file.",
-        &sorted!([
-            ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
-        ]),
-    )
+        &sorted!([("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
+    ),
 );
 
 fn download_file(
@@ -218,7 +242,6 @@ fn download_file(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let env: &ReaderEnvironment = rpcenv.as_ref();
 
@@ -239,11 +262,14 @@ fn download_file(
                 let index = env.datastore.open_dynamic_reader(&path)?;
                 Some(Box::new(index))
             }
-            _ => { None }
+            _ => None,
         };
 
         if let Some(index) = index {
-            env.log(format!("register chunks in '{}' as downloadable.", file_name));
+            env.log(format!(
+                "register chunks in '{}' as downloadable.",
+                file_name
+            ));
 
             for pos in 0..index.index_count() {
                 let info = index.chunk_info(pos).unwrap();
@@ -252,7 +278,8 @@ fn download_file(
         }
 
         helpers::create_download_response(path).await
-    }.boxed()
+    }
+    .boxed()
 }
 
 #[sortable]
@@ -260,10 +287,8 @@ pub const API_METHOD_DOWNLOAD_CHUNK: ApiMethod = ApiMethod::new(
     &ApiHandler::AsyncHttp(&download_chunk),
     &ObjectSchema::new(
         "Download specified chunk.",
-        &sorted!([
-            ("digest", false, &CHUNK_DIGEST_SCHEMA),
-        ]),
-    )
+        &sorted!([("digest", false, &CHUNK_DIGEST_SCHEMA),]),
+    ),
 );
 
 fn download_chunk(
@@ -273,7 +298,6 @@ fn download_chunk(
     _info: &ApiMethod,
     rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
     async move {
         let env: &ReaderEnvironment = rpcenv.as_ref();
 
@@ -281,8 +305,15 @@ fn download_chunk(
         let digest = <[u8; 32]>::from_hex(digest_str)?;
 
         if !env.check_chunk_access(digest) {
-            env.log(format!("attempted to download chunk {} which is not in registered chunk list", digest_str));
-            return Err(http_err!(UNAUTHORIZED, "download chunk {} not allowed", digest_str));
+            env.log(format!(
+                "attempted to download chunk {} which is not in registered chunk list",
+                digest_str
+            ));
+            return Err(http_err!(
+                UNAUTHORIZED,
+                "download chunk {} not allowed",
+                digest_str
+            ));
         }
 
         let (path, _) = env.datastore.chunk_path(&digest);
@@ -290,18 +321,21 @@ fn download_chunk(
 
         env.debug(format!("download chunk {:?}", path));
 
-        let data = proxmox_async::runtime::block_in_place(|| std::fs::read(path))
-            .map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
+        let data =
+            proxmox_async::runtime::block_in_place(|| std::fs::read(path)).map_err(move |err| {
+                http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err)
+            })?;
 
         let body = Body::from(data);
 
         // fixme: set other headers ?
         Ok(Response::builder()
-           .status(StatusCode::OK)
-           .header(header::CONTENT_TYPE, "application/octet-stream")
-           .body(body)
-           .unwrap())
-    }.boxed()
+            .status(StatusCode::OK)
+            .header(header::CONTENT_TYPE, "application/octet-stream")
+            .body(body)
+            .unwrap())
+    }
+    .boxed()
 }
 
 /* this is too slow
@@ -347,7 +381,7 @@ fn download_chunk_old(
 
 pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new(
     &ApiHandler::AsyncHttp(&speedtest),
-    &ObjectSchema::new("Test 1M block download speed.", &[])
+    &ObjectSchema::new("Test 1M block download speed.", &[]),
 );
 
 fn speedtest(
@@ -357,8 +391,7 @@ fn speedtest(
     _info: &ApiMethod,
     _rpcenv: Box<dyn RpcEnvironment>,
 ) -> ApiResponseFuture {
-
-    let buffer = vec![65u8; 1024*1024]; // nonsense [A,A,A...]
+    let buffer = vec![65u8; 1024 * 1024]; // nonsense [A,A,A...]
 
     let body = Body::from(buffer);
 
index baa39bec51939c61cee17dd5e3ac616364d5b727..51cb345b21dcf1cf13277272ca02b83b5acbd513 100644 (file)
@@ -3,26 +3,20 @@
 use anyhow::Error;
 use serde_json::Value;
 
-use proxmox_schema::api;
-use proxmox_router::{
-    ApiMethod,
-    Permission,
-    Router,
-    RpcEnvironment,
-    SubdirMap,
-};
 use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
+use proxmox_schema::api;
 
 use pbs_api_types::{
-    Authid, DataStoreStatusListItem, Operation, RRDMode, RRDTimeFrame,
-    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
+    Authid, DataStoreStatusListItem, Operation, RRDMode, RRDTimeFrame, PRIV_DATASTORE_AUDIT,
+    PRIV_DATASTORE_BACKUP,
 };
 
-use pbs_datastore::DataStore;
 use pbs_config::CachedUserInfo;
+use pbs_datastore::DataStore;
 
-use crate::tools::statistics::{linear_regression};
 use crate::rrd_cache::extract_rrd_data;
+use crate::tools::statistics::linear_regression;
 
 #[api(
     returns: {
@@ -41,8 +35,7 @@ pub fn datastore_status(
     _param: Value,
     _info: &ApiMethod,
     rpcenv: &mut dyn RpcEnvironment,
-    ) -> Result<Vec<DataStoreStatusListItem>, Error> {
-
+) -> Result<Vec<DataStoreStatusListItem>, Error> {
     let (config, _digest) = pbs_config::datastore::config()?;
 
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -52,7 +45,7 @@ pub fn datastore_status(
 
     for (store, (_, _)) in &config.sections {
         let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
-        let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
+        let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
         if !allowed {
             continue;
         }
@@ -90,12 +83,8 @@ pub fn datastore_status(
 
         let rrd_dir = format!("datastore/{}", store);
 
-        let get_rrd = |what: &str| extract_rrd_data(
-            &rrd_dir,
-            what,
-            RRDTimeFrame::Month,
-            RRDMode::Average,
-        );
+        let get_rrd =
+            |what: &str| extract_rrd_data(&rrd_dir, what, RRDTimeFrame::Month, RRDMode::Average);
 
         let total_res = get_rrd("total")?;
         let used_res = get_rrd("used")?;
@@ -114,14 +103,12 @@ pub fn datastore_status(
 
                 match (total, used) {
                     (Some(total), Some(used)) if total != 0.0 => {
-                        time_list.push(start + (idx as u64)*reso);
-                        let usage = used/total;
+                        time_list.push(start + (idx as u64) * reso);
+                        let usage = used / total;
                         usage_list.push(usage);
                         history.push(Some(usage));
-                    },
-                    _ => {
-                        history.push(None)
                     }
+                    _ => history.push(None),
                 }
             }
 
@@ -145,9 +132,10 @@ pub fn datastore_status(
     Ok(list.into())
 }
 
-const SUBDIRS: SubdirMap = &[
-    ("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
-];
+const SUBDIRS: SubdirMap = &[(
+    "datastore-usage",
+    &Router::new().get(&API_METHOD_DATASTORE_STATUS),
+)];
 
 pub const ROUTER: Router = Router::new()
     .get(&list_subdirs_api_method!(SUBDIRS))
index 21e953bb58367b37c57300431b6efa7e0d9f7fb5..210ebdbc9902e500e6a44675fa3bcd73af5fea15 100644 (file)
@@ -1,11 +1,9 @@
 use serde::{Deserialize, Serialize};
 use serde_json::Value;
 
-use proxmox_schema::{api, ApiType, Schema, StringSchema, ApiStringFormat};
+use proxmox_schema::{api, ApiStringFormat, ApiType, Schema, StringSchema};
 
-use pbs_api_types::{
-    DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT,
-};
+use pbs_api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT};
 
 #[api(
     properties: {
@@ -41,10 +39,10 @@ pub struct AcmeDomain {
     pub plugin: Option<String>,
 }
 
-pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema = StringSchema::new(
-    "ACME domain configuration string")
-    .format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
-    .schema();
+pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema =
+    StringSchema::new("ACME domain configuration string")
+        .format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
+        .schema();
 
 #[api(
     properties: {
index 1911557f67f25bf35a69761ff1a96a0025620565..9a65c69109b82fd6bcc76eb4c4dc9cdbd35ed8ca 100644 (file)
@@ -21,12 +21,10 @@ pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
     Ok(())
 });
 
-
 // Regression tests
 
 #[test]
 fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
-
     let schema = pbs_api_types::CERT_FINGERPRINT_SHA256_SCHEMA;
 
     let invalid_fingerprints = [
@@ -40,7 +38,10 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
 
     for fingerprint in invalid_fingerprints.iter() {
         if schema.parse_simple_value(fingerprint).is_ok() {
-            bail!("test fingerprint '{}' failed -  got Ok() while exception an error.", fingerprint);
+            bail!(
+                "test fingerprint '{}' failed -  got Ok() while exception an error.",
+                fingerprint
+            );
         }
     }
 
@@ -58,7 +59,11 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
         };
 
         if v != serde_json::json!(fingerprint) {
-            bail!("unable to parse fingerprint '{}' - got wrong value {:?}", fingerprint, v);
+            bail!(
+                "unable to parse fingerprint '{}' - got wrong value {:?}",
+                fingerprint,
+                v
+            );
         }
     }
 
@@ -67,24 +72,26 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
 
 #[test]
 fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
-
     use pbs_api_types::Userid;
 
     let invalid_user_ids = [
-        "x", // too short
-        "xx", // too short
-        "xxx", // no realm
-        "xxx@", // no realm
-        "xx x@test", // contains space
+        "x",                                                                 // too short
+        "xx",                                                                // too short
+        "xxx",                                                               // no realm
+        "xxx@",                                                              // no realm
+        "xx x@test",                                                         // contains space
         "xx\nx@test", // contains control character
-        "x:xx@test", // contains collon
-        "xx/x@test", // contains slash
+        "x:xx@test",  // contains collon
+        "xx/x@test",  // contains slash
         "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@test", // too long
     ];
 
     for name in invalid_user_ids.iter() {
         if Userid::API_SCHEMA.parse_simple_value(name).is_ok() {
-            bail!("test userid '{}' failed -  got Ok() while exception an error.", name);
+            bail!(
+                "test userid '{}' failed -  got Ok() while exception an error.",
+                name
+            );
         }
     }
 
@@ -105,7 +112,11 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
         };
 
         if v != serde_json::json!(name) {
-            bail!("unable to parse userid '{}' - got wrong value {:?}", name, v);
+            bail!(
+                "unable to parse userid '{}' - got wrong value {:?}",
+                name,
+                v
+            );
         }
     }
 
@@ -139,7 +150,7 @@ pub struct NodeSwapCounters {
 }
 
 #[api]
-#[derive(Serialize,Deserialize,Default)]
+#[derive(Serialize, Deserialize, Default)]
 #[serde(rename_all = "kebab-case")]
 /// Contains general node information such as the fingerprint`
 pub struct NodeInformation {
@@ -207,13 +218,13 @@ pub struct NodeStatus {
     pub info: NodeInformation,
 }
 
-pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new(
-    "HTTP proxy configuration [http://]<host>[:port]")
-    .format(&ApiStringFormat::VerifyFn(|s| {
-        proxmox_http::ProxyConfig::parse_proxy_url(s)?;
-        Ok(())
-    }))
-    .min_length(1)
-    .max_length(128)
-    .type_text("[http://]<host>[:port]")
-    .schema();
+pub const HTTP_PROXY_SCHEMA: Schema =
+    StringSchema::new("HTTP proxy configuration [http://]<host>[:port]")
+        .format(&ApiStringFormat::VerifyFn(|s| {
+            proxmox_http::ProxyConfig::parse_proxy_url(s)?;
+            Ok(())
+        }))
+        .min_length(1)
+        .max_length(128)
+        .type_text("[http://]<host>[:port]")
+        .schema();
index 4ea85708914bab2bae2d02bd2cde1d7a72a14e88..0e91688b5451650db86205a768da974f304118d6 100644 (file)
@@ -1,9 +1,9 @@
 //! Version information
 
-use anyhow::{Error};
+use anyhow::Error;
 use serde_json::{json, Value};
 
-use proxmox_router::{ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiHandler, ApiMethod, Permission, Router, RpcEnvironment};
 use proxmox_schema::ObjectSchema;
 
 fn get_version(
@@ -11,7 +11,6 @@ fn get_version(
     _info: &ApiMethod,
     _rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
-
     Ok(json!({
         "version": pbs_buildcfg::PROXMOX_PKG_VERSION,
         "release": pbs_buildcfg::PROXMOX_PKG_RELEASE,
@@ -19,11 +18,10 @@ fn get_version(
     }))
 }
 
-pub const ROUTER: Router = Router::new()
-    .get(
-        &ApiMethod::new(
-            &ApiHandler::Sync(&get_version),
-            &ObjectSchema::new("Proxmox Backup Server API version.", &[])
-        ).access(None, &Permission::Anybody)
-    );
-
+pub const ROUTER: Router = Router::new().get(
+    &ApiMethod::new(
+        &ApiHandler::Sync(&get_version),
+        &ObjectSchema::new("Proxmox Backup Server API version.", &[]),
+    )
+    .access(None, &Permission::Anybody),
+);