use anyhow::{bail, Error};
use hex::FromHex;
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{
- Authid, AclListItem, Role,
- ACL_PATH_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, PROXMOX_GROUP_ID_SCHEMA,
- ACL_PROPAGATE_SCHEMA, PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY,
+ AclListItem, Authid, Role, ACL_PATH_SCHEMA, ACL_PROPAGATE_SCHEMA, PRIV_PERMISSIONS_MODIFY,
+ PRIV_SYS_AUDIT, PROXMOX_CONFIG_DIGEST_SCHEMA, PROXMOX_GROUP_ID_SCHEMA,
};
use pbs_config::acl::AclTreeNode;
for (user, roles) in &node.users {
if let Some(auth_id_filter) = auth_id_filter {
- if !user.is_token()
- || user.user() != auth_id_filter.user() {
- continue;
+ if !user.is_token() || user.user() != auth_id_filter.user() {
+ continue;
}
}
for (role, propagate) in roles {
list.push(AclListItem {
- path: if path.is_empty() { String::from("/") } else { path.to_string() },
+ path: if path.is_empty() {
+ String::from("/")
+ } else {
+ path.to_string()
+ },
propagate: *propagate,
ugid_type: String::from("user"),
ugid: user.to_string(),
for (role, propagate) in roles {
list.push(AclListItem {
- path: if path.is_empty() { String::from("/") } else { path.to_string() },
+ path: if path.is_empty() {
+ String::from("/")
+ } else {
+ path.to_string()
+ },
propagate: *propagate,
ugid_type: String::from("group"),
ugid: group.to_string(),
} else if auth_id.user() != current_auth_id.user() {
bail!("Unprivileged users can only set ACL items for their own API tokens.");
}
- },
- None => { bail!("Unprivileged user needs to provide auth_id to update ACL item."); },
+ }
+ None => {
+ bail!("Unprivileged user needs to provide auth_id to update ACL item.");
+ }
};
}
if let Some(ref _group) = group {
bail!("parameter 'group' - groups are currently not supported.");
} else if let Some(ref auth_id) = auth_id {
- if !delete { // Note: we allow to delete non-existent users
+ if !delete {
+ // Note: we allow to delete non-existent users
let user_cfg = pbs_config::user::cached_config()?;
if user_cfg.sections.get(&auth_id.to_string()).is_none() {
- bail!(format!("no such {}.",
- if auth_id.is_token() { "API token" } else { "user" }));
+ bail!(format!(
+ "no such {}.",
+ if auth_id.is_token() {
+ "API token"
+ } else {
+ "user"
+ }
+ ));
}
}
} else {
bail!("missing 'userid' or 'group' parameter.");
}
- if !delete { // Note: we allow to delete entries with invalid path
+ if !delete {
+ // Note: we allow to delete entries with invalid path
pbs_config::acl::check_acl_path(&path)?;
}
//! List Authentication domains/realms
-use anyhow::{Error};
+use anyhow::Error;
use serde_json::{json, Value};
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::BasicRealmInfo;
Ok(list)
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_LIST_DOMAINS);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_LIST_DOMAINS);
use std::collections::HashMap;
use std::collections::HashSet;
-use proxmox_sys::sortable;
use proxmox_router::{
- http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission,
+ http_err, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::api;
+use proxmox_sys::sortable;
use pbs_api_types::{
- Userid, Authid, PASSWORD_SCHEMA, ACL_PATH_SCHEMA,
- PRIVILEGES, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT,
+ Authid, Userid, ACL_PATH_SCHEMA, PASSWORD_SCHEMA, PRIVILEGES, PRIV_PERMISSIONS_MODIFY,
+ PRIV_SYS_AUDIT,
};
-use pbs_tools::ticket::{self, Empty, Ticket};
use pbs_config::acl::AclTreeNode;
use pbs_config::CachedUserInfo;
+use pbs_tools::ticket::{self, Empty, Ticket};
use crate::auth_helpers::*;
use crate::config::tfa::TfaChallenge;
tfa_challenge: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
use proxmox_rest_server::RestEnvironment;
- let env: &RestEnvironment = rpcenv.as_any().downcast_ref::<RestEnvironment>()
+ let env: &RestEnvironment = rpcenv
+ .as_any()
+ .downcast_ref::<RestEnvironment>()
.ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?;
match authenticate_user(&username, &password, path, privs, port, tfa_challenge) {
} else {
bail!("not allowed to list permissions of {}", auth_id);
}
- },
+ }
None => current_auth_id,
};
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
-use proxmox_sys::sortable;
use proxmox_router::{
- http_err, list_subdirs_api_method, Router, RpcEnvironment, SubdirMap, Permission,
+ http_err, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::api;
+use proxmox_sys::sortable;
use proxmox_openid::{OpenIdAuthenticator, OpenIdConfig};
use pbs_api_types::{
- OpenIdRealmConfig, User, Userid,
- EMAIL_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA, OPENID_DEFAILT_SCOPE_LIST,
- REALM_ID_SCHEMA,
+ OpenIdRealmConfig, User, Userid, EMAIL_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA,
+ OPENID_DEFAILT_SCOPE_LIST, REALM_ID_SCHEMA,
};
use pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M;
use pbs_tools::ticket::Ticket;
-use pbs_config::CachedUserInfo;
use pbs_config::open_backup_lockfile;
+use pbs_config::CachedUserInfo;
use crate::auth_helpers::*;
use crate::server::ticket::ApiTicket;
-fn openid_authenticator(realm_config: &OpenIdRealmConfig, redirect_url: &str) -> Result<OpenIdAuthenticator, Error> {
-
- let scopes: Vec<String> = realm_config.scopes.as_deref().unwrap_or(OPENID_DEFAILT_SCOPE_LIST)
+fn openid_authenticator(
+ realm_config: &OpenIdRealmConfig,
+ redirect_url: &str,
+) -> Result<OpenIdAuthenticator, Error> {
+ let scopes: Vec<String> = realm_config
+ .scopes
+ .as_deref()
+ .unwrap_or(OPENID_DEFAILT_SCOPE_LIST)
.split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
.filter(|s| !s.is_empty())
.map(String::from)
let mut acr_values = None;
if let Some(ref list) = realm_config.acr_values {
acr_values = Some(
- list
- .split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
+ list.split(|c: char| c == ',' || c == ';' || char::is_ascii_whitespace(&c))
.filter(|s| !s.is_empty())
.map(String::from)
- .collect()
+ .collect(),
);
}
) -> Result<Value, Error> {
use proxmox_rest_server::RestEnvironment;
- let env: &RestEnvironment = rpcenv.as_any().downcast_ref::<RestEnvironment>()
+ let env: &RestEnvironment = rpcenv
+ .as_any()
+ .downcast_ref::<RestEnvironment>()
.ok_or_else(|| format_err!("detected worng RpcEnvironment type"))?;
let user_info = CachedUserInfo::new()?;
let mut tested_username = None;
let result = proxmox_lang::try_block!({
-
let (realm, private_auth_state) =
OpenIdAuthenticator::verify_public_auth_state(PROXMOX_BACKUP_RUN_DIR_M!(), &state)?;
use pbs_config::user;
let _lock = open_backup_lockfile(user::USER_CFG_LOCKFILE, None, true)?;
- let firstname = info["given_name"].as_str().map(|n| n.to_string())
+ let firstname = info["given_name"]
+ .as_str()
+ .map(|n| n.to_string())
.filter(|n| FIRST_NAME_SCHEMA.parse_simple_value(n).is_ok());
- let lastname = info["family_name"].as_str().map(|n| n.to_string())
+ let lastname = info["family_name"]
+ .as_str()
+ .map(|n| n.to_string())
.filter(|n| LAST_NAME_SCHEMA.parse_simple_value(n).is_ok());
- let email = info["email"].as_str().map(|n| n.to_string())
+ let email = info["email"]
+ .as_str()
+ .map(|n| n.to_string())
.filter(|n| EMAIL_SCHEMA.parse_simple_value(n).is_ok());
let user = User {
if let Err(ref err) = result {
let msg = err.to_string();
env.log_failed_auth(tested_username, &msg);
- return Err(http_err!(UNAUTHORIZED, "{}", msg))
+ return Err(http_err!(UNAUTHORIZED, "{}", msg));
}
result
redirect_url: String,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
-
let (domains, _digest) = pbs_config::domains::config()?;
let config: OpenIdRealmConfig = domains.lookup("openid", &realm)?;
use proxmox_router::{Permission, Router};
use proxmox_schema::api;
-use pbs_api_types::{Role, SINGLE_LINE_COMMENT_SCHEMA, PRIVILEGES};
+use pbs_api_types::{Role, PRIVILEGES, SINGLE_LINE_COMMENT_SCHEMA};
use pbs_config::acl::ROLE_NAMES;
#[api(
Ok(list.into())
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_LIST_ROLES);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_LIST_ROLES);
//! User Management
use anyhow::{bail, format_err, Error};
-use serde::{Serialize, Deserialize};
+use hex::FromHex;
+use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
-use hex::FromHex;
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, SubdirMap, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api;
use pbs_api_types::{
- PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, Authid,
- Tokenname, UserWithTokens, Userid, User, UserUpdater, ApiToken,
- ENABLE_USER_SCHEMA, EXPIRE_USER_SCHEMA, PBS_PASSWORD_SCHEMA,
- PRIV_SYS_AUDIT, PRIV_PERMISSIONS_MODIFY,
+ ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA,
+ EXPIRE_USER_SCHEMA, PBS_PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT,
+ PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
};
use pbs_config::token_shadow;
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<UserWithTokens>, Error> {
-
let (config, digest) = pbs_config::user::config()?;
let auth_id: Authid = rpcenv
let top_level_privs = user_info.lookup_privs(&auth_id, &["access", "users"]);
let top_level_allowed = (top_level_privs & PRIV_SYS_AUDIT) != 0;
- let filter_by_privs = |user: &User| {
- top_level_allowed || user.userid == *userid
- };
-
+ let filter_by_privs = |user: &User| top_level_allowed || user.userid == *userid;
- let list:Vec<User> = config.convert_to_typed_array("user")?;
+ let list: Vec<User> = config.convert_to_typed_array("user")?;
rpcenv["digest"] = hex::encode(&digest).into();
let iter = list.into_iter().filter(filter_by_privs);
let list = if include_tokens {
let tokens: Vec<ApiToken> = config.convert_to_typed_array("token")?;
- let mut user_to_tokens = tokens
- .into_iter()
- .fold(
- HashMap::new(),
- |mut map: HashMap<Userid, Vec<ApiToken>>, token: ApiToken| {
+ let mut user_to_tokens = tokens.into_iter().fold(
+ HashMap::new(),
+ |mut map: HashMap<Userid, Vec<ApiToken>>, token: ApiToken| {
if token.tokenid.is_token() {
- map
- .entry(token.tokenid.user().clone())
+ map.entry(token.tokenid.user().clone())
.or_default()
.push(token);
}
map
- });
- iter
- .map(|user: User| {
- let mut user = new_user_with_tokens(user);
- user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
- user
- })
- .collect()
+ },
+ );
+ iter.map(|user: User| {
+ let mut user = new_user_with_tokens(user);
+ user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
+ user
+ })
+ .collect()
} else {
- iter.map(new_user_with_tokens)
- .collect()
+ iter.map(new_user_with_tokens).collect()
};
Ok(list)
pub fn create_user(
password: Option<String>,
config: User,
- rpcenv: &mut dyn RpcEnvironment
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
-
let _lock = pbs_config::user::lock_config()?;
let (mut section_config, _digest) = pbs_config::user::config()?;
- if section_config.sections.get(config.userid.as_str()).is_some() {
+ if section_config
+ .sections
+ .get(config.userid.as_str())
+ .is_some()
+ {
bail!("user '{}' already exists.", config.userid);
}
#[api()]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
pub enum DeletableProperty {
/// Delete the comment property.
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
-
let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?;
}
if let Some(firstname) = update.firstname {
- data.firstname = if firstname.is_empty() { None } else { Some(firstname) };
+ data.firstname = if firstname.is_empty() {
+ None
+ } else {
+ Some(firstname)
+ };
}
if let Some(lastname) = update.lastname {
- data.lastname = if lastname.is_empty() { None } else { Some(lastname) };
+ data.lastname = if lastname.is_empty() {
+ None
+ } else {
+ Some(lastname)
+ };
}
if let Some(email) = update.email {
data.email = if email.is_empty() { None } else { Some(email) };
)]
/// Remove a user from the configuration file.
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
-
let _lock = pbs_config::user::lock_config()?;
let _tfa_lock = crate::config::tfa::write_lock()?;
-
+
let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest {
}
match config.sections.get(userid.as_str()) {
- Some(_) => { config.sections.remove(userid.as_str()); },
+ Some(_) => {
+ config.sections.remove(userid.as_str());
+ }
None => bail!("user '{}' does not exist.", userid),
}
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
match authenticator.remove_password(userid.name()) {
- Ok(()) => {},
+ Ok(()) => {}
Err(err) => {
eprintln!(
"error removing password after deleting user {:?}: {}",
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<ApiToken, Error> {
-
let (config, digest) = pbs_config::user::config()?;
let tokenid = Authid::from((userid, Some(token_name)));
expire: Option<i64>,
digest: Option<String>,
) -> Result<Value, Error> {
-
let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?;
let tokenid_string = tokenid.to_string();
if config.sections.get(&tokenid_string).is_some() {
- bail!("token '{}' for user '{}' already exists.", token_name.as_str(), userid);
+ bail!(
+ "token '{}' for user '{}' already exists.",
+ token_name.as_str(),
+ userid
+ );
}
let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
expire: Option<i64>,
digest: Option<String>,
) -> Result<(), Error> {
-
let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?;
token_name: Tokenname,
digest: Option<String>,
) -> Result<(), Error> {
-
let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?;
let tokenid_string = tokenid.to_string();
match config.sections.get(&tokenid_string) {
- Some(_) => { config.sections.remove(&tokenid_string); },
- None => bail!("token '{}' of user '{}' does not exist.", token_name.as_str(), userid),
+ Some(_) => {
+ config.sections.remove(&tokenid_string);
+ }
+ None => bail!(
+ "token '{}' of user '{}' does not exist.",
+ token_name.as_str(),
+ userid
+ ),
}
token_shadow::delete_secret(&tokenid)?;
}
)]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
/// A Token Entry that contains the token-name
pub struct TokenApiEntry {
/// The Token name
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TokenApiEntry>, Error> {
-
let (config, digest) = pbs_config::user::config()?;
- let list:Vec<ApiToken> = config.convert_to_typed_array("token")?;
+ let list: Vec<ApiToken> = config.convert_to_typed_array("token")?;
rpcenv["digest"] = hex::encode(&digest).into();
let filter_by_owner = |token: ApiToken| {
if token.tokenid.is_token() && token.tokenid.user() == &userid {
let token_name = token.tokenid.tokenname().unwrap().to_owned();
- Some(TokenApiEntry {
- token_name,
- token,
- })
+ Some(TokenApiEntry { token_name, token })
} else {
None
}
.get(&API_METHOD_LIST_TOKENS)
.match_all("token-name", &TOKEN_ITEM_ROUTER);
-const USER_SUBDIRS: SubdirMap = &[
- ("token", &TOKEN_ROUTER),
-];
+const USER_SUBDIRS: SubdirMap = &[("token", &TOKEN_ROUTER)];
const USER_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_USER)
use serde_json::{json, Value};
use tokio_stream::wrappers::ReceiverStream;
+use proxmox_async::blocking::WrappedReaderStream;
+use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
use proxmox_compression::zstd::ZstdEncoder;
-use proxmox_sys::sortable;
-use proxmox_sys::fs::{
- file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
-};
use proxmox_router::{
- list_subdirs_api_method, http_err, ApiResponseFuture, ApiHandler, ApiMethod, Router,
- RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
+ http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
+ Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
};
use proxmox_schema::*;
+use proxmox_sys::fs::{
+ file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
+};
+use proxmox_sys::sortable;
use proxmox_sys::{task_log, task_warn};
-use proxmox_async::blocking::WrappedReaderStream;
-use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
use pxar::accessor::aio::Accessor;
use pxar::EntryKind;
-use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
- DataStoreListItem, GarbageCollectionStatus, GroupListItem,
- Operation, SnapshotListItem, SnapshotVerifyState, PruneOptions,
- DataStoreStatus, RRDMode, RRDTimeFrame,
- BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
- BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
- IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
- VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_AUDIT,
- PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
- PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
-
+use pbs_api_types::{
+ Authid, BackupContent, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
+ GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
+ SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
+ BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
+ PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
+ PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
-use pbs_datastore::{
- check_backup_owner, DataStore, BackupDir, BackupGroup, StoreProgress, LocalChunkReader,
- CATALOG_NAME, task_tracking
-};
+use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::BackupInfo;
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
-use pbs_datastore::fixed_index::{FixedIndexReader};
+use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
use pbs_datastore::prune::compute_prune_info;
+use pbs_datastore::{
+ check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
+ StoreProgress, CATALOG_NAME,
+};
use pbs_tools::json::{required_integer_param, required_string_param};
-use pbs_config::CachedUserInfo;
-use proxmox_rest_server::{WorkerTask, formatter};
+use proxmox_rest_server::{formatter, WorkerTask};
use crate::api2::node::rrd::create_value_from_rrd;
-use crate::backup::{
- verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
-};
+use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
use crate::server::jobstate::Job;
-
const GROUP_NOTES_FILE_NAME: &str = "notes";
fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
store: &DataStore,
backup_dir: &BackupDir,
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
-
let (manifest, index_size) = store.load_manifest(backup_dir)?;
let mut result = Vec::new();
store: &DataStore,
info: &BackupInfo,
) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
-
let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
});
for file in &info.files {
- if file_set.contains(file) { continue; }
+ if file_set.contains(file) {
+ continue;
+ }
files.push(BackupContent {
filename: file.to_string(),
size: None,
store: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let owner = match datastore.get_owner(&group) {
Ok(auth_id) => auth_id,
Err(err) => {
- eprintln!("Failed to get owner of group '{}/{}' - {}",
- &store,
- group,
- err);
+ eprintln!(
+ "Failed to get owner of group '{}/{}' - {}",
+ &store, group, err
+ );
return group_info;
- },
+ }
};
if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
return group_info;
Ok(snapshots) => snapshots,
Err(_) => {
return group_info;
- },
+ }
};
let backup_count: u64 = snapshots.len() as u64;
.iter()
.fold(&snapshots[0], |last, curr| {
if curr.is_finished()
- && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
+ && curr.backup_dir.backup_time() > last.backup_dir.backup_time()
+ {
curr
} else {
last
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let group = BackupGroup::new(backup_type, backup_id);
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ snapshot.group(),
+ &auth_id,
+ PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
+ )?;
let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ snapshot.group(),
+ &auth_id,
+ PRIV_DATASTORE_MODIFY,
+ )?;
datastore.remove_backup_dir(&snapshot, false)?;
},
)]
/// List backup snapshots.
-pub fn list_snapshots (
+pub fn list_snapshots(
store: String,
backup_type: Option<String>,
backup_id: Option<String>,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SnapshotListItem>, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
let mut groups = Vec::with_capacity(1);
groups.push(BackupGroup::new(backup_type, backup_id));
groups
- },
- (Some(backup_type), None) => {
- BackupInfo::list_backup_groups(&base_path)?
- .into_iter()
- .filter(|group| group.backup_type() == backup_type)
- .collect()
- },
- (None, Some(backup_id)) => {
- BackupInfo::list_backup_groups(&base_path)?
- .into_iter()
- .filter(|group| group.backup_id() == backup_id)
- .collect()
- },
+ }
+ (Some(backup_type), None) => BackupInfo::list_backup_groups(&base_path)?
+ .into_iter()
+ .filter(|group| group.backup_type() == backup_type)
+ .collect(),
+ (None, Some(backup_id)) => BackupInfo::list_backup_groups(&base_path)?
+ .into_iter()
+ .filter(|group| group.backup_id() == backup_id)
+ .collect(),
_ => BackupInfo::list_backup_groups(&base_path)?,
};
Err(err) => {
eprintln!("error parsing fingerprint: '{}'", err);
None
- },
+ }
};
let verification = manifest.unprotected["verify_state"].clone();
- let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
- Ok(verify) => verify,
- Err(err) => {
- eprintln!("error parsing verification state : '{}'", err);
- None
- }
- };
+ let verification: Option<SnapshotVerifyState> =
+ match serde_json::from_value(verification) {
+ Ok(verify) => verify,
+ Err(err) => {
+ eprintln!("error parsing verification state : '{}'", err);
+ None
+ }
+ };
let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
owner,
protected,
}
- },
+ }
Err(err) => {
eprintln!("error during snapshot file listing: '{}'", err);
let files = info
- .files
- .into_iter()
- .map(|filename| BackupContent {
- filename,
- size: None,
- crypt_mode: None,
- })
- .collect();
+ .files
+ .into_iter()
+ .map(|filename| BackupContent {
+ filename,
+ size: None,
+ crypt_mode: None,
+ })
+ .collect();
SnapshotListItem {
backup_type,
owner,
protected,
}
- },
+ }
}
};
- groups
- .iter()
- .try_fold(Vec::new(), |mut snapshots, group| {
- let owner = match datastore.get_owner(group) {
- Ok(auth_id) => auth_id,
- Err(err) => {
- eprintln!("Failed to get owner of group '{}/{}' - {}",
- &store,
- group,
- err);
- return Ok(snapshots);
- },
- };
-
- if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
+ groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
+ let owner = match datastore.get_owner(group) {
+ Ok(auth_id) => auth_id,
+ Err(err) => {
+ eprintln!(
+ "Failed to get owner of group '{}/{}' - {}",
+ &store, group, err
+ );
return Ok(snapshots);
}
+ };
- let group_backups = group.list_backups(&datastore.base_path())?;
+ if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
+ return Ok(snapshots);
+ }
- snapshots.extend(
- group_backups
- .into_iter()
- .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
- );
+ let group_backups = group.list_backups(&datastore.base_path())?;
- Ok(snapshots)
- })
+ snapshots.extend(
+ group_backups
+ .into_iter()
+ .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)),
+ );
+
+ Ok(snapshots)
+ })
}
fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
let base_path = store.base_path();
let groups = BackupInfo::list_backup_groups(&base_path)?;
- groups.iter()
+ groups
+ .iter()
.filter(|group| {
let owner = match store.get_owner(group) {
Ok(owner) => owner,
Err(err) => {
- eprintln!("Failed to get owner of group '{}/{}' - {}",
- store.name(),
- group,
- err);
+ eprintln!(
+ "Failed to get owner of group '{}/{}' - {}",
+ store.name(),
+ group,
+ err
+ );
return false;
- },
+ }
};
match filter_owner {
match (backup_type, backup_id, backup_time) {
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
- worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
+ worker_id = format!(
+ "{}:{}/{}/{:08X}",
+ store, backup_type, backup_id, backup_time
+ );
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
&verify_worker,
&backup_dir,
worker.upid().clone(),
- Some(&move |manifest| {
- verify_filter(ignore_verified, outdated_after, manifest)
- }),
+ Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
)? {
res.push(backup_dir.to_string());
}
&backup_group,
&mut StoreProgress::new(1),
worker.upid(),
- Some(&move |manifest| {
- verify_filter(ignore_verified, outdated_after, manifest)
- }),
+ Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
)?;
failed_dirs
} else {
- let privs = CachedUserInfo::new()?
- .lookup_privs(&auth_id, &["datastore", &store]);
+ let privs = CachedUserInfo::new()?.lookup_privs(&auth_id, &["datastore", &store]);
let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
Some(auth_id)
&verify_worker,
worker.upid(),
owner,
- Some(&move |manifest| {
- verify_filter(ignore_verified, outdated_after, manifest)
- }),
+ Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
)?
};
if !failed_dirs.is_empty() {
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let group = BackupGroup::new(&backup_type, &backup_id);
return Ok(json!(prune_result));
}
-
// We use a WorkerTask just to have a task log, but run synchrounously
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
if keep_all {
task_log!(worker, "No prune selection - keeping all files.");
} else {
- task_log!(worker, "retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options));
- task_log!(worker, "Starting prune on store \"{}\" group \"{}/{}\"",
- store, backup_type, backup_id);
+ task_log!(
+ worker,
+ "retention options: {}",
+ pbs_datastore::prune::cli_options_string(&prune_options)
+ );
+ task_log!(
+ worker,
+ "Starting prune on store \"{}\" group \"{}/{}\"",
+ store,
+ backup_type,
+ backup_id
+ );
}
for (info, mark) in prune_info {
let timestamp = info.backup_dir.backup_time_string();
let group = info.backup_dir.group();
-
let msg = format!(
"{}/{}/{} {}",
group.backup_type(),
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
Some(store.clone()),
auth_id.to_string(),
to_stdout,
- move |worker| crate::server::prune_datastore(
- worker,
- auth_id,
- prune_options,
- &store,
- datastore,
- dry_run
- ),
+ move |worker| {
+ crate::server::prune_datastore(
+ worker,
+ auth_id,
+ prune_options,
+ &store,
+ datastore,
+ dry_run,
+ )
+ },
)?;
Ok(upid_str)
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let job = Job::new("garbage_collection", &store)
+ let job = Job::new("garbage_collection", &store)
.map_err(|_| format_err!("garbage collection already running"))?;
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
- let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
- .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
+ let upid_str =
+ crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
+ .map_err(|err| {
+ format_err!(
+ "unable to start garbage collection job on datastore {} - {}",
+ store,
+ err
+ )
+ })?;
Ok(json!(upid_str))
}
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<GarbageCollectionStatus, Error> {
-
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let status = datastore.last_gc_status();
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<DataStoreListItem>, Error> {
-
let (config, _digest) = pbs_config::datastore::config()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
for (store, (_, data)) in &config.sections {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
- let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
+ let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
if allowed {
- list.push(
- DataStoreListItem {
- store: store.clone(),
- comment: data["comment"].as_str().map(String::from),
- }
- );
+ list.push(DataStoreListItem {
+ store: store.clone(),
+ comment: data["comment"].as_str().map(String::from),
+ });
}
}
&sorted!([
("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
- ("backup-id", false, &BACKUP_ID_SCHEMA),
+ ("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
]),
- )
-).access(None, &Permission::Privilege(
- &["datastore", "{store}"],
- PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
- true)
+ ),
+)
+.access(
+ None,
+ &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
+ true,
+ ),
);
pub fn download_file(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let store = required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_READ,
+ )?;
- println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
+ println!(
+ "Download {} from {} ({}/{})",
+ file_name, store, backup_dir, file_name
+ );
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
.await
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
- let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
- .map_ok(|bytes| bytes.freeze())
- .map_err(move |err| {
- eprintln!("error during streaming of '{:?}' - {}", &path, err);
- err
- });
+ let payload =
+ tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
+ .map_ok(|bytes| bytes.freeze())
+ .map_err(move |err| {
+ eprintln!("error during streaming of '{:?}' - {}", &path, err);
+ err
+ });
let body = Body::wrap_stream(payload);
// fixme: set other headers ?
Ok(Response::builder()
- .status(StatusCode::OK)
- .header(header::CONTENT_TYPE, "application/octet-stream")
- .body(body)
- .unwrap())
- }.boxed()
+ .status(StatusCode::OK)
+ .header(header::CONTENT_TYPE, "application/octet-stream")
+ .body(body)
+ .unwrap())
+ }
+ .boxed()
}
#[sortable]
&sorted!([
("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
- ("backup-id", false, &BACKUP_ID_SCHEMA),
+ ("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
]),
- )
-).access(None, &Permission::Privilege(
- &["datastore", "{store}"],
- PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
- true)
+ ),
+)
+.access(
+ None,
+ &Permission::Privilege(
+ &["datastore", "{store}"],
+ PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
+ true,
+ ),
);
pub fn download_file_decoded(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let store = required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_READ,
+ )?;
let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
for file in files {
}
}
- println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
+ println!(
+ "Download {} from {} ({}/{})",
+ file_name, store, backup_dir, file_name
+ );
let mut path = datastore.base_path();
path.push(backup_dir.relative_path());
let body = match extension {
"didx" => {
- let index = DynamicIndexReader::open(&path)
- .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
+ let index = DynamicIndexReader::open(&path).map_err(|err| {
+ format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
+ })?;
let (csum, size) = index.compute_csum();
manifest.verify_file(&file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
- Body::wrap_stream(AsyncReaderStream::new(reader)
- .map_err(move |err| {
- eprintln!("error during streaming of '{:?}' - {}", path, err);
- err
- }))
- },
+ Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| {
+ eprintln!("error during streaming of '{:?}' - {}", path, err);
+ err
+ }))
+ }
"fidx" => {
- let index = FixedIndexReader::open(&path)
- .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
+ let index = FixedIndexReader::open(&path).map_err(|err| {
+ format_err!("unable to read fixed index '{:?}' - {}", &path, err)
+ })?;
let (csum, size) = index.compute_csum();
manifest.verify_file(&file_name, &csum, size)?;
let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
- Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
- .map_err(move |err| {
- eprintln!("error during streaming of '{:?}' - {}", path, err);
- err
- }))
- },
+ Body::wrap_stream(
+ AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err(
+ move |err| {
+ eprintln!("error during streaming of '{:?}' - {}", path, err);
+ err
+ },
+ ),
+ )
+ }
"blob" => {
let file = std::fs::File::open(&path)
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
// FIXME: load full blob to verify index checksum?
Body::wrap_stream(
- WrappedReaderStream::new(DataBlobReader::new(file, None)?)
- .map_err(move |err| {
+ WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err(
+ move |err| {
eprintln!("error during streaming of '{:?}' - {}", path, err);
err
- })
+ },
+ ),
)
- },
+ }
extension => {
bail!("cannot download '{}' files", extension);
- },
+ }
};
// fixme: set other headers ?
Ok(Response::builder()
- .status(StatusCode::OK)
- .header(header::CONTENT_TYPE, "application/octet-stream")
- .body(body)
- .unwrap())
- }.boxed()
+ .status(StatusCode::OK)
+ .header(header::CONTENT_TYPE, "application/octet-stream")
+ .body(body)
+ .unwrap())
+ }
+ .boxed()
}
#[sortable]
("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
]),
- )
-).access(
+ ),
+)
+.access(
Some("Only the backup creator/owner is allowed to do this."),
- &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
+ &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false),
);
pub fn upload_backup_log(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let store = required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
- let file_name = CLIENT_LOG_BLOB_NAME;
+ let file_name = CLIENT_LOG_BLOB_NAME;
let backup_type = required_string_param(¶m, "backup-type")?;
let backup_id = required_string_param(¶m, "backup-id")?;
bail!("backup already contains a log.");
}
- println!("Upload backup log to {}/{}/{}/{}/{}", store,
- backup_type, backup_id, backup_dir.backup_time_string(), file_name);
+ println!(
+ "Upload backup log to {}/{}/{}/{}/{}",
+ store,
+ backup_type,
+ backup_id,
+ backup_dir.backup_time_string(),
+ file_name
+ );
let data = req_body
.map_err(Error::from)
// fixme: use correct formatter
Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
- }.boxed()
+ }
+ .boxed()
}
#[api(
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_READ,
+ )?;
let file_name = CATALOG_NAME;
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let store = required_string_param(¶m, "store")?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_READ,
+ )?;
let mut components = base64::decode(&filepath)?;
if !components.is_empty() && components[0] == b'/' {
let root = decoder.open_root().await?;
let path = OsStr::from_bytes(file_path).to_os_string();
let file = root
- .lookup(&path).await?
+ .lookup(&path)
+ .await?
.ok_or_else(|| format_err!("error opening '{:?}'", path))?;
let body = match file.kind() {
EntryKind::Hardlink(_) => Body::wrap_stream(
AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
.map_err(move |err| {
- eprintln!(
- "error during streaming of hardlink '{:?}' - {}",
- path, err
- );
+ eprintln!("error during streaming of hardlink '{:?}' - {}", path, err);
err
}),
),
let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100);
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
if tar {
- proxmox_rest_server::spawn_internal_task(
- create_tar(channelwriter, decoder, path.clone(), false)
- );
+ proxmox_rest_server::spawn_internal_task(create_tar(
+ channelwriter,
+ decoder,
+ path.clone(),
+ false,
+ ));
let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?;
Body::wrap_stream(zstdstream.map_err(move |err| {
eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err);
err
}))
} else {
- proxmox_rest_server::spawn_internal_task(
- create_zip(channelwriter, decoder, path.clone(), false)
- );
+ proxmox_rest_server::spawn_internal_task(create_zip(
+ channelwriter,
+ decoder,
+ path.clone(),
+ false,
+ ));
Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
eprintln!("error during streaming of zip '{:?}' - {}", path, err);
err
// fixme: set other headers ?
Ok(Response::builder()
- .status(StatusCode::OK)
- .header(header::CONTENT_TYPE, "application/octet-stream")
- .body(body)
- .unwrap())
- }.boxed()
+ .status(StatusCode::OK)
+ .header(header::CONTENT_TYPE, "application/octet-stream")
+ .body(body)
+ .unwrap())
+ }
+ .boxed()
}
#[api(
cf: RRDMode,
_param: Value,
) -> Result<Value, Error> {
-
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let disk_manager = crate::tools::disks::DiskManage::new();
let mut rrd_fields = vec![
- "total", "used",
- "read_ios", "read_bytes",
- "write_ios", "write_bytes",
+ "total",
+ "used",
+ "read_ios",
+ "read_bytes",
+ "write_ios",
+ "write_bytes",
];
// we do not have io_ticks for zpools, so don't include them
match disk_manager.find_mounted_device(&datastore.base_path()) {
- Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {},
+ Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {}
_ => rrd_fields.push("io_ticks"),
};
- create_value_from_rrd(
- &format!("datastore/{}", store),
- &rrd_fields,
- timeframe,
- cf,
- )
+ create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf)
}
#[api(
},
)]
/// Read datastore stats
-pub fn get_active_operations(
- store: String,
- _param: Value,
-) -> Result<Value, Error> {
+pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> {
let active_operations = task_tracking::get_active_operations(&store)?;
Ok(json!({
"read": active_operations.read,
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_AUDIT,
+ )?;
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
- let notes = manifest.unprotected["notes"]
- .as_str()
- .unwrap_or("");
+ let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
Ok(String::from(notes))
}
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_MODIFY,
+ )?;
- datastore.update_manifest(&backup_dir,|manifest| {
- manifest.unprotected["notes"] = notes.into();
- }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
+ datastore
+ .update_manifest(&backup_dir, |manifest| {
+ manifest.unprotected["notes"] = notes.into();
+ })
+ .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
Ok(())
}
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_AUDIT,
+ )?;
Ok(backup_dir.is_protected(datastore.base_path()))
}
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
- check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
+ check_priv_or_backup_owner(
+ &datastore,
+ backup_dir.group(),
+ &auth_id,
+ PRIV_DATASTORE_MODIFY,
+ )?;
datastore.update_protection(&backup_dir, protected)
}
new_owner: Authid,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
-
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_group = BackupGroup::new(backup_type, backup_id);
let owner = owner.user();
let new_owner = new_owner.user();
owner == new_owner && Authid::from(owner.clone()) == auth_id
- },
+ }
(true, false) => {
// API token to API token owner
- Authid::from(owner.user().clone()) == auth_id
- && new_owner == auth_id
- },
+ Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id
+ }
(false, true) => {
// API token owner to API token
- owner == auth_id
- && Authid::from(new_owner.user().clone()) == auth_id
- },
+ owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id
+ }
(false, false) => {
// User to User, not allowed for unprivileged users
false
- },
+ }
}
} else {
false
};
if !allowed {
- return Err(http_err!(UNAUTHORIZED,
- "{} does not have permission to change owner of backup group '{}' to {}",
- auth_id,
- backup_group,
- new_owner,
+ return Err(http_err!(
+ UNAUTHORIZED,
+ "{} does not have permission to change owner of backup group '{}' to {}",
+ auth_id,
+ backup_group,
+ new_owner,
));
}
if !user_info.is_active_auth_id(&new_owner) {
- bail!("{} '{}' is inactive or non-existent",
- if new_owner.is_token() {
- "API token".to_string()
- } else {
- "user".to_string()
- },
- new_owner);
+ bail!(
+ "{} '{}' is inactive or non-existent",
+ if new_owner.is_token() {
+ "API token".to_string()
+ } else {
+ "user".to_string()
+ },
+ new_owner
+ );
}
datastore.set_owner(&backup_group, &new_owner, true)?;
const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
(
"active-operations",
- &Router::new()
- .get(&API_METHOD_GET_ACTIVE_OPERATIONS)
- ),
- (
- "catalog",
- &Router::new()
- .get(&API_METHOD_CATALOG)
+ &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS),
),
+ ("catalog", &Router::new().get(&API_METHOD_CATALOG)),
(
"change-owner",
- &Router::new()
- .post(&API_METHOD_SET_BACKUP_OWNER)
+ &Router::new().post(&API_METHOD_SET_BACKUP_OWNER),
),
(
"download",
- &Router::new()
- .download(&API_METHOD_DOWNLOAD_FILE)
+ &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
),
(
"download-decoded",
- &Router::new()
- .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
- ),
- (
- "files",
- &Router::new()
- .get(&API_METHOD_LIST_SNAPSHOT_FILES)
+ &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED),
),
+ ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)),
(
"gc",
&Router::new()
.get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
- .post(&API_METHOD_START_GARBAGE_COLLECTION)
+ .post(&API_METHOD_START_GARBAGE_COLLECTION),
),
(
"group-notes",
&Router::new()
.get(&API_METHOD_GET_GROUP_NOTES)
- .put(&API_METHOD_SET_GROUP_NOTES)
+ .put(&API_METHOD_SET_GROUP_NOTES),
),
(
"groups",
&Router::new()
.get(&API_METHOD_LIST_GROUPS)
- .delete(&API_METHOD_DELETE_GROUP)
+ .delete(&API_METHOD_DELETE_GROUP),
),
(
"notes",
&Router::new()
.get(&API_METHOD_GET_NOTES)
- .put(&API_METHOD_SET_NOTES)
+ .put(&API_METHOD_SET_NOTES),
),
(
"protected",
&Router::new()
.get(&API_METHOD_GET_PROTECTION)
- .put(&API_METHOD_SET_PROTECTION)
- ),
- (
- "prune",
- &Router::new()
- .post(&API_METHOD_PRUNE)
+ .put(&API_METHOD_SET_PROTECTION),
),
+ ("prune", &Router::new().post(&API_METHOD_PRUNE)),
(
"prune-datastore",
- &Router::new()
- .post(&API_METHOD_PRUNE_DATASTORE)
+ &Router::new().post(&API_METHOD_PRUNE_DATASTORE),
),
(
"pxar-file-download",
- &Router::new()
- .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
- ),
- (
- "rrd",
- &Router::new()
- .get(&API_METHOD_GET_RRD_STATS)
+ &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD),
),
+ ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)),
(
"snapshots",
&Router::new()
.get(&API_METHOD_LIST_SNAPSHOTS)
- .delete(&API_METHOD_DELETE_SNAPSHOT)
- ),
- (
- "status",
- &Router::new()
- .get(&API_METHOD_STATUS)
+ .delete(&API_METHOD_DELETE_SNAPSHOT),
),
+ ("status", &Router::new().get(&API_METHOD_STATUS)),
(
"upload-backup-log",
- &Router::new()
- .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
- ),
- (
- "verify",
- &Router::new()
- .post(&API_METHOD_VERIFY)
+ &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG),
),
+ ("verify", &Router::new().post(&API_METHOD_VERIFY)),
];
const DATASTORE_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
.subdirs(DATASTORE_INFO_SUBDIRS);
-
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_GET_DATASTORE_LIST)
.match_all("store", &DATASTORE_INFO_ROUTER);
//! Backup Server Administration
-use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{Router, SubdirMap};
pub mod datastore;
pub mod sync;
-pub mod verify;
pub mod traffic_control;
+pub mod verify;
const SUBDIRS: SubdirMap = &[
("datastore", &datastore::ROUTER),
("sync", &sync::ROUTER),
("traffic-control", &traffic_control::ROUTER),
- ("verify", &verify::ROUTER)
+ ("verify", &verify::ROUTER),
];
pub const ROUTER: Router = Router::new()
use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox_sys::sortable;
use proxmox_router::{
- list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
- Permission,
+ list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, RpcEnvironmentType,
+ SubdirMap,
};
use proxmox_schema::api;
+use proxmox_sys::sortable;
-use pbs_api_types::{DATASTORE_SCHEMA, JOB_ID_SCHEMA, Authid, SyncJobConfig, SyncJobStatus};
+use pbs_api_types::{Authid, SyncJobConfig, SyncJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA};
use pbs_config::sync;
use pbs_config::CachedUserInfo;
use crate::{
api2::{
+ config::sync::{check_sync_job_modify_access, check_sync_job_read_access},
pull::do_sync_job,
- config::sync::{
- check_sync_job_modify_access,
- check_sync_job_read_access,
- },
- },
- server::{
- jobstate::{
- Job,
- JobState,
- compute_schedule_status,
- },
},
+ server::jobstate::{compute_schedule_status, Job, JobState},
};
#[api(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobStatus>, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
true
}
})
- .filter(|job: &SyncJobConfig| {
- check_sync_job_read_access(&user_info, &auth_id, job)
- });
+ .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job));
let mut list = Vec::new();
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
- list.push(SyncJobStatus { config: job, status });
+ list.push(SyncJobStatus {
+ config: job,
+ status,
+ });
}
rpcenv["digest"] = hex::encode(&digest).into();
}
#[sortable]
-const SYNC_INFO_SUBDIRS: SubdirMap = &[
- (
- "run",
- &Router::new()
- .post(&API_METHOD_RUN_SYNC_JOB)
- ),
-];
+const SYNC_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_SYNC_JOB))];
const SYNC_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SYNC_INFO_SUBDIRS))
.subdirs(SYNC_INFO_SUBDIRS);
-
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_SYNC_JOBS)
.match_all("id", &SYNC_INFO_ROUTER);
use anyhow::Error;
use serde::{Deserialize, Serialize};
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
-use pbs_api_types::{
- TrafficControlRule, PRIV_SYS_AUDIT,
-};
+use pbs_api_types::{TrafficControlRule, PRIV_SYS_AUDIT};
use crate::traffic_control_cache::TRAFFIC_CONTROL_CACHE;
},
)]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
/// Traffic control rule config with current rates
pub struct TrafficControlCurrentRate {
#[serde(flatten)]
pub fn show_current_traffic(
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TrafficControlCurrentRate>, Error> {
-
let (config, digest) = pbs_config::traffic_control::config()?;
let rules: Vec<TrafficControlRule> = config.convert_to_typed_array("rule")?;
None => (0, 0),
Some(state) => (state.rate_in, state.rate_out),
};
- list.push(TrafficControlCurrentRate {config, cur_rate_in, cur_rate_out});
+ list.push(TrafficControlCurrentRate {
+ config,
+ cur_rate_in,
+ cur_rate_out,
+ });
}
// also return the configuration digest
Ok(list)
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_SHOW_CURRENT_TRAFFIC);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_SHOW_CURRENT_TRAFFIC);
use anyhow::{format_err, Error};
use serde_json::Value;
-use proxmox_sys::sortable;
use proxmox_router::{
- list_subdirs_api_method, ApiMethod, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
- Permission,
+ list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, RpcEnvironmentType,
+ SubdirMap,
};
use proxmox_schema::api;
+use proxmox_sys::sortable;
use pbs_api_types::{
- VerificationJobConfig, VerificationJobStatus, JOB_ID_SCHEMA, Authid,
- PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, DATASTORE_SCHEMA,
+ Authid, VerificationJobConfig, VerificationJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA,
+ PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY,
};
use pbs_config::verify;
use pbs_config::CachedUserInfo;
-use crate::{
- server::{
- do_verification_job,
- jobstate::{
- Job,
- JobState,
- compute_schedule_status,
- },
- },
+use crate::server::{
+ do_verification_job,
+ jobstate::{compute_schedule_status, Job, JobState},
};
#[api(
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
- list.push(VerificationJobStatus { config: job, status });
+ list.push(VerificationJobStatus {
+ config: job,
+ status,
+ });
}
rpcenv["digest"] = hex::encode(&digest).into();
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
- user_info.check_privs(&auth_id, &["datastore", &verification_job.store], PRIV_DATASTORE_VERIFY, true)?;
+ user_info.check_privs(
+ &auth_id,
+ &["datastore", &verification_job.store],
+ PRIV_DATASTORE_VERIFY,
+ true,
+ )?;
let job = Job::new("verificationjob", &id)?;
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
}
#[sortable]
-const VERIFICATION_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
+const VERIFICATION_INFO_SUBDIRS: SubdirMap =
+ &[("run", &Router::new().post(&API_METHOD_RUN_VERIFICATION_JOB))];
const VERIFICATION_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(VERIFICATION_INFO_SUBDIRS))
use anyhow::{bail, format_err, Error};
-use std::sync::{Arc, Mutex};
-use std::collections::HashMap;
use nix::dir::Dir;
+use std::collections::HashMap;
+use std::sync::{Arc, Mutex};
-use ::serde::{Serialize};
+use ::serde::Serialize;
use serde_json::{json, Value};
-use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
+use proxmox_sys::fs::{replace_file, CreateOptions};
-use pbs_datastore::{DataStore, DataBlob};
+use pbs_api_types::Authid;
use pbs_datastore::backup_info::{BackupDir, BackupInfo};
use pbs_datastore::dynamic_index::DynamicIndexWriter;
use pbs_datastore::fixed_index::FixedIndexWriter;
-use pbs_api_types::Authid;
-use proxmox_rest_server::{WorkerTask, formatter::*};
+use pbs_datastore::{DataBlob, DataStore};
+use proxmox_rest_server::{formatter::*, WorkerTask};
use crate::backup::verify_backup_dir_with_lock;
}
// key=digest, value=length
-type KnownChunksMap = HashMap<[u8;32], u32>;
+type KnownChunksMap = HashMap<[u8; 32], u32>;
struct SharedBackupState {
finished: bool,
}
impl SharedBackupState {
-
// Raise error if finished flag is set
fn ensure_unfinished(&self) -> Result<(), Error> {
if self.finished {
}
}
-
/// `RpcEnvironmet` implementation for backup service
#[derive(Clone)]
pub struct BackupEnvironment {
pub datastore: Arc<DataStore>,
pub backup_dir: BackupDir,
pub last_backup: Option<BackupInfo>,
- state: Arc<Mutex<SharedBackupState>>
+ state: Arc<Mutex<SharedBackupState>>,
}
impl BackupEnvironment {
datastore: Arc<DataStore>,
backup_dir: BackupDir,
) -> Self {
-
let state = SharedBackupState {
finished: false,
uid_counter: 0,
};
if size > data.chunk_size {
- bail!("fixed writer '{}' - got large chunk ({} > {}", data.name, size, data.chunk_size);
+ bail!(
+ "fixed writer '{}' - got large chunk ({} > {}",
+ data.name,
+ size,
+ data.chunk_size
+ );
}
if size < data.chunk_size {
data.small_chunk_count += 1;
if data.small_chunk_count > 1 {
- bail!("fixed writer '{}' - detected multiple end chunks (chunk size too small)", wid);
+ bail!(
+ "fixed writer '{}' - detected multiple end chunks (chunk size too small)",
+ wid
+ );
}
}
data.upload_stat.count += 1;
data.upload_stat.size += size as u64;
data.upload_stat.compressed_size += compressed_size as u64;
- if is_duplicate { data.upload_stat.duplicates += 1; }
+ if is_duplicate {
+ data.upload_stat.duplicates += 1;
+ }
// register chunk
state.known_chunks.insert(digest, size);
data.upload_stat.count += 1;
data.upload_stat.size += size as u64;
data.upload_stat.compressed_size += compressed_size as u64;
- if is_duplicate { data.upload_stat.duplicates += 1; }
+ if is_duplicate {
+ data.upload_stat.duplicates += 1;
+ }
// register chunk
state.known_chunks.insert(digest, size);
}
/// Store the writer with an unique ID
- pub fn register_dynamic_writer(&self, index: DynamicIndexWriter, name: String) -> Result<usize, Error> {
+ pub fn register_dynamic_writer(
+ &self,
+ index: DynamicIndexWriter,
+ name: String,
+ ) -> Result<usize, Error> {
let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?;
let uid = state.next_uid();
- state.dynamic_writers.insert(uid, DynamicWriterState {
- index, name, offset: 0, chunk_count: 0, upload_stat: UploadStatistic::new(),
- });
+ state.dynamic_writers.insert(
+ uid,
+ DynamicWriterState {
+ index,
+ name,
+ offset: 0,
+ chunk_count: 0,
+ upload_stat: UploadStatistic::new(),
+ },
+ );
Ok(uid)
}
/// Store the writer with an unique ID
- pub fn register_fixed_writer(&self, index: FixedIndexWriter, name: String, size: usize, chunk_size: u32, incremental: bool) -> Result<usize, Error> {
+ pub fn register_fixed_writer(
+ &self,
+ index: FixedIndexWriter,
+ name: String,
+ size: usize,
+ chunk_size: u32,
+ incremental: bool,
+ ) -> Result<usize, Error> {
let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?;
let uid = state.next_uid();
- state.fixed_writers.insert(uid, FixedWriterState {
- index, name, chunk_count: 0, size, chunk_size, small_chunk_count: 0, upload_stat: UploadStatistic::new(), incremental,
- });
+ state.fixed_writers.insert(
+ uid,
+ FixedWriterState {
+ index,
+ name,
+ chunk_count: 0,
+ size,
+ chunk_size,
+ small_chunk_count: 0,
+ upload_stat: UploadStatistic::new(),
+ incremental,
+ },
+ );
Ok(uid)
}
/// Append chunk to dynamic writer
- pub fn dynamic_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
+ pub fn dynamic_writer_append_chunk(
+ &self,
+ wid: usize,
+ offset: u64,
+ size: u32,
+ digest: &[u8; 32],
+ ) -> Result<(), Error> {
let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?;
None => bail!("dynamic writer '{}' not registered", wid),
};
-
if data.offset != offset {
- bail!("dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})",
- data.name, data.offset, offset);
+ bail!(
+ "dynamic writer '{}' append chunk failed - got strange chunk offset ({} != {})",
+ data.name,
+ data.offset,
+ offset
+ );
}
data.offset += size as u64;
}
/// Append chunk to fixed writer
- pub fn fixed_writer_append_chunk(&self, wid: usize, offset: u64, size: u32, digest: &[u8; 32]) -> Result<(), Error> {
+ pub fn fixed_writer_append_chunk(
+ &self,
+ wid: usize,
+ offset: u64,
+ size: u32,
+ digest: &[u8; 32],
+ ) -> Result<(), Error> {
let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?;
Ok(())
}
- fn log_upload_stat(&self, archive_name: &str, csum: &[u8; 32], uuid: &[u8; 16], size: u64, chunk_count: u64, upload_stat: &UploadStatistic) {
+ fn log_upload_stat(
+ &self,
+ archive_name: &str,
+ csum: &[u8; 32],
+ uuid: &[u8; 16],
+ size: u64,
+ chunk_count: u64,
+ upload_stat: &UploadStatistic,
+ ) {
self.log(format!("Upload statistics for '{}'", archive_name));
self.log(format!("UUID: {}", hex::encode(uuid)));
self.log(format!("Checksum: {}", hex::encode(csum)));
return;
}
- self.log(format!("Upload size: {} ({}%)", upload_stat.size, (upload_stat.size*100)/size));
+ self.log(format!(
+ "Upload size: {} ({}%)",
+ upload_stat.size,
+ (upload_stat.size * 100) / size
+ ));
// account for zero chunk, which might be uploaded but never used
let client_side_duplicates = if chunk_count < upload_stat.count {
let server_side_duplicates = upload_stat.duplicates;
if (client_side_duplicates + server_side_duplicates) > 0 {
- let per = (client_side_duplicates + server_side_duplicates)*100/chunk_count;
- self.log(format!("Duplicates: {}+{} ({}%)", client_side_duplicates, server_side_duplicates, per));
+ let per = (client_side_duplicates + server_side_duplicates) * 100 / chunk_count;
+ self.log(format!(
+ "Duplicates: {}+{} ({}%)",
+ client_side_duplicates, server_side_duplicates, per
+ ));
}
if upload_stat.size > 0 {
- self.log(format!("Compression: {}%", (upload_stat.compressed_size*100)/upload_stat.size));
+ self.log(format!(
+ "Compression: {}%",
+ (upload_stat.compressed_size * 100) / upload_stat.size
+ ));
}
}
/// Close dynamic writer
- pub fn dynamic_writer_close(&self, wid: usize, chunk_count: u64, size: u64, csum: [u8; 32]) -> Result<(), Error> {
+ pub fn dynamic_writer_close(
+ &self,
+ wid: usize,
+ chunk_count: u64,
+ size: u64,
+ csum: [u8; 32],
+ ) -> Result<(), Error> {
let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?;
};
if data.chunk_count != chunk_count {
- bail!("dynamic writer '{}' close failed - unexpected chunk count ({} != {})", data.name, data.chunk_count, chunk_count);
+ bail!(
+ "dynamic writer '{}' close failed - unexpected chunk count ({} != {})",
+ data.name,
+ data.chunk_count,
+ chunk_count
+ );
}
if data.offset != size {
- bail!("dynamic writer '{}' close failed - unexpected file size ({} != {})", data.name, data.offset, size);
+ bail!(
+ "dynamic writer '{}' close failed - unexpected file size ({} != {})",
+ data.name,
+ data.offset,
+ size
+ );
}
let uuid = data.index.uuid;
let expected_csum = data.index.close()?;
if csum != expected_csum {
- bail!("dynamic writer '{}' close failed - got unexpected checksum", data.name);
+ bail!(
+ "dynamic writer '{}' close failed - got unexpected checksum",
+ data.name
+ );
}
- self.log_upload_stat(&data.name, &csum, &uuid, size, chunk_count, &data.upload_stat);
+ self.log_upload_stat(
+ &data.name,
+ &csum,
+ &uuid,
+ size,
+ chunk_count,
+ &data.upload_stat,
+ );
state.file_counter += 1;
state.backup_size += size;
}
/// Close fixed writer
- pub fn fixed_writer_close(&self, wid: usize, chunk_count: u64, size: u64, csum: [u8; 32]) -> Result<(), Error> {
+ pub fn fixed_writer_close(
+ &self,
+ wid: usize,
+ chunk_count: u64,
+ size: u64,
+ csum: [u8; 32],
+ ) -> Result<(), Error> {
let mut state = self.state.lock().unwrap();
state.ensure_unfinished()?;
};
if data.chunk_count != chunk_count {
- bail!("fixed writer '{}' close failed - received wrong number of chunk ({} != {})", data.name, data.chunk_count, chunk_count);
+ bail!(
+ "fixed writer '{}' close failed - received wrong number of chunk ({} != {})",
+ data.name,
+ data.chunk_count,
+ chunk_count
+ );
}
if !data.incremental {
let expected_count = data.index.index_length();
if chunk_count != (expected_count as u64) {
- bail!("fixed writer '{}' close failed - unexpected chunk count ({} != {})", data.name, expected_count, chunk_count);
+ bail!(
+ "fixed writer '{}' close failed - unexpected chunk count ({} != {})",
+ data.name,
+ expected_count,
+ chunk_count
+ );
}
if size != (data.size as u64) {
- bail!("fixed writer '{}' close failed - unexpected file size ({} != {})", data.name, data.size, size);
+ bail!(
+ "fixed writer '{}' close failed - unexpected file size ({} != {})",
+ data.name,
+ data.size,
+ size
+ );
}
}
let expected_csum = data.index.close()?;
if csum != expected_csum {
- bail!("fixed writer '{}' close failed - got unexpected checksum", data.name);
+ bail!(
+ "fixed writer '{}' close failed - got unexpected checksum",
+ data.name
+ );
}
- self.log_upload_stat(&data.name, &expected_csum, &uuid, size, chunk_count, &data.upload_stat);
+ self.log_upload_stat(
+ &data.name,
+ &expected_csum,
+ &uuid,
+ size,
+ chunk_count,
+ &data.upload_stat,
+ );
state.file_counter += 1;
state.backup_size += size;
}
pub fn add_blob(&self, file_name: &str, data: Vec<u8>) -> Result<(), Error> {
-
let mut path = self.datastore.base_path();
path.push(self.backup_dir.relative_path());
path.push(file_name);
let raw_data = blob.raw_data();
replace_file(&path, raw_data, CreateOptions::new(), false)?;
- self.log(format!("add blob {:?} ({} bytes, comp: {})", path, orig_len, blob_len));
+ self.log(format!(
+ "add blob {:?} ({} bytes, comp: {})",
+ path, orig_len, blob_len
+ ));
let mut state = self.state.lock().unwrap();
state.file_counter += 1;
// check for valid manifest and store stats
let stats = serde_json::to_value(state.backup_stat)?;
- self.datastore.update_manifest(&self.backup_dir, |manifest| {
- manifest.unprotected["chunk_upload_stats"] = stats;
- }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
+ self.datastore
+ .update_manifest(&self.backup_dir, |manifest| {
+ manifest.unprotected["chunk_upload_stats"] = stats;
+ })
+ .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
if let Some(base) = &self.last_backup {
let path = self.datastore.snapshot_path(&base.backup_dir);
return Ok(());
}
- let worker_id = format!("{}:{}/{}/{:08X}",
+ let worker_id = format!(
+ "{}:{}/{}/{:08X}",
self.datastore.name(),
self.backup_dir.group().backup_type(),
self.backup_dir.group().backup_id(),
- self.backup_dir.backup_time());
+ self.backup_dir.backup_time()
+ );
let datastore = self.datastore.clone();
let backup_dir = self.backup_dir.clone();
move |worker| {
worker.log_message("Automatically verifying newly added snapshot");
-
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
if !verify_backup_dir_with_lock(
&verify_worker,
Ok(())
},
- ).map(|_| ())
+ )
+ .map(|_| ())
}
pub fn log<S: AsRef<str>>(&self, msg: S) {
}
pub fn debug<S: AsRef<str>>(&self, msg: S) {
- if self.debug { self.worker.log_message(msg); }
+ if self.debug {
+ self.worker.log_message(msg);
+ }
}
pub fn format_response(&self, result: Result<Value, Error>) -> Response<Body> {
}
impl RpcEnvironment for BackupEnvironment {
-
fn result_attrib_mut(&mut self) -> &mut Value {
&mut self.result_attributes
}
use anyhow::{bail, format_err, Error};
use futures::*;
+use hex::FromHex;
use hyper::header::{HeaderValue, UPGRADE};
use hyper::http::request::Parts;
-use hyper::{Body, Response, Request, StatusCode};
+use hyper::{Body, Request, Response, StatusCode};
use serde_json::{json, Value};
-use hex::FromHex;
-use proxmox_sys::sortable;
use proxmox_router::list_subdirs_api_method;
use proxmox_router::{
- ApiResponseFuture, ApiHandler, ApiMethod, Router, RpcEnvironment, SubdirMap, Permission,
+ ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::*;
+use proxmox_sys::sortable;
use pbs_api_types::{
- Authid, Operation, VerifyState, SnapshotVerifyState,
- BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
- CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_BACKUP, BACKUP_ARCHIVE_NAME_SCHEMA,
+ Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
+ BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
+ DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
};
-use proxmox_sys::fs::lock_dir_noblock_shared;
-use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
use pbs_config::CachedUserInfo;
-use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType};
-use proxmox_rest_server::{WorkerTask, H2Service};
+use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
+use pbs_tools::json::{required_array_param, required_integer_param, required_string_param};
+use proxmox_rest_server::{H2Service, WorkerTask};
+use proxmox_sys::fs::lock_dir_noblock_shared;
mod environment;
use environment::*;
mod upload_chunk;
use upload_chunk::*;
-pub const ROUTER: Router = Router::new()
- .upgrade(&API_METHOD_UPGRADE_BACKUP);
+pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
#[sortable]
pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
+ async move {
+ let debug = param["debug"].as_bool().unwrap_or(false);
+ let benchmark = param["benchmark"].as_bool().unwrap_or(false);
-async move {
- let debug = param["debug"].as_bool().unwrap_or(false);
- let benchmark = param["benchmark"].as_bool().unwrap_or(false);
+ let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+ let store = required_string_param(¶m, "store")?.to_owned();
- let store = required_string_param(¶m, "store")?.to_owned();
+ let user_info = CachedUserInfo::new()?;
+ user_info.check_privs(
+ &auth_id,
+ &["datastore", &store],
+ PRIV_DATASTORE_BACKUP,
+ false,
+ )?;
- let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_BACKUP, false)?;
+ let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
- let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
+ let backup_type = required_string_param(¶m, "backup-type")?;
+ let backup_id = required_string_param(¶m, "backup-id")?;
+ let backup_time = required_integer_param(¶m, "backup-time")?;
- let backup_type = required_string_param(¶m, "backup-type")?;
- let backup_id = required_string_param(¶m, "backup-id")?;
- let backup_time = required_integer_param(¶m, "backup-time")?;
+ let protocols = parts
+ .headers
+ .get("UPGRADE")
+ .ok_or_else(|| format_err!("missing Upgrade header"))?
+ .to_str()?;
- let protocols = parts
- .headers
- .get("UPGRADE")
- .ok_or_else(|| format_err!("missing Upgrade header"))?
- .to_str()?;
+ if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
+ bail!("invalid protocol name");
+ }
- if protocols != PROXMOX_BACKUP_PROTOCOL_ID_V1!() {
- bail!("invalid protocol name");
- }
+ if parts.version >= http::version::Version::HTTP_2 {
+ bail!(
+ "unexpected http version '{:?}' (expected version < 2)",
+ parts.version
+ );
+ }
- if parts.version >= http::version::Version::HTTP_2 {
- bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
- }
+ let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
- let worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
+ let env_type = rpcenv.env_type();
- let env_type = rpcenv.env_type();
+ let backup_group = BackupGroup::new(backup_type, backup_id);
- let backup_group = BackupGroup::new(backup_type, backup_id);
+ let worker_type = if backup_type == "host" && backup_id == "benchmark" {
+ if !benchmark {
+ bail!("unable to run benchmark without --benchmark flags");
+ }
+ "benchmark"
+ } else {
+ if benchmark {
+ bail!("benchmark flags is only allowed on 'host/benchmark'");
+ }
+ "backup"
+ };
- let worker_type = if backup_type == "host" && backup_id == "benchmark" {
- if !benchmark {
- bail!("unable to run benchmark without --benchmark flags");
- }
- "benchmark"
- } else {
- if benchmark {
- bail!("benchmark flags is only allowed on 'host/benchmark'");
+ // lock backup group to only allow one backup per group at a time
+ let (owner, _group_guard) =
+ datastore.create_locked_backup_group(&backup_group, &auth_id)?;
+
+ // permission check
+ let correct_owner =
+ owner == auth_id || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
+ if !correct_owner && worker_type != "benchmark" {
+ // only the owner is allowed to create additional snapshots
+ bail!("backup owner check failed ({} != {})", auth_id, owner);
}
- "backup"
- };
-
- // lock backup group to only allow one backup per group at a time
- let (owner, _group_guard) = datastore.create_locked_backup_group(&backup_group, &auth_id)?;
-
- // permission check
- let correct_owner = owner == auth_id
- || (owner.is_token()
- && Authid::from(owner.user().clone()) == auth_id);
- if !correct_owner && worker_type != "benchmark" {
- // only the owner is allowed to create additional snapshots
- bail!("backup owner check failed ({} != {})", auth_id, owner);
- }
- let last_backup = {
- let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true).unwrap_or(None);
- if let Some(info) = info {
- let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
- let verify = manifest.unprotected["verify_state"].clone();
- match serde_json::from_value::<SnapshotVerifyState>(verify) {
- Ok(verify) => {
- match verify.state {
+ let last_backup = {
+ let info = BackupInfo::last_backup(&datastore.base_path(), &backup_group, true)
+ .unwrap_or(None);
+ if let Some(info) = info {
+ let (manifest, _) = datastore.load_manifest(&info.backup_dir)?;
+ let verify = manifest.unprotected["verify_state"].clone();
+ match serde_json::from_value::<SnapshotVerifyState>(verify) {
+ Ok(verify) => match verify.state {
VerifyState::Ok => Some(info),
VerifyState::Failed => None,
+ },
+ Err(_) => {
+ // no verify state found, treat as valid
+ Some(info)
}
- },
- Err(_) => {
- // no verify state found, treat as valid
- Some(info)
}
+ } else {
+ None
}
- } else {
- None
- }
- };
-
- let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
-
- let _last_guard = if let Some(last) = &last_backup {
- if backup_dir.backup_time() <= last.backup_dir.backup_time() {
- bail!("backup timestamp is older than last backup.");
- }
-
- // lock last snapshot to prevent forgetting/pruning it during backup
- let full_path = datastore.snapshot_path(&last.backup_dir);
- Some(lock_dir_noblock_shared(&full_path, "snapshot", "base snapshot is already locked by another operation")?)
- } else {
- None
- };
-
- let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
- if !is_new { bail!("backup directory already exists."); }
-
-
- WorkerTask::spawn(worker_type, Some(worker_id), auth_id.to_string(), true, move |worker| {
- let mut env = BackupEnvironment::new(
- env_type, auth_id, worker.clone(), datastore, backup_dir);
-
- env.debug = debug;
- env.last_backup = last_backup;
-
- env.log(format!("starting new {} on datastore '{}': {:?}", worker_type, store, path));
-
- let service = H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
+ };
- let abort_future = worker.abort_future();
+ let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
- let env2 = env.clone();
+ let _last_guard = if let Some(last) = &last_backup {
+ if backup_dir.backup_time() <= last.backup_dir.backup_time() {
+ bail!("backup timestamp is older than last backup.");
+ }
- let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
- .map_err(Error::from)
- .and_then(move |conn| {
- env2.debug("protocol upgrade done");
+ // lock last snapshot to prevent forgetting/pruning it during backup
+ let full_path = datastore.snapshot_path(&last.backup_dir);
+ Some(lock_dir_noblock_shared(
+ &full_path,
+ "snapshot",
+ "base snapshot is already locked by another operation",
+ )?)
+ } else {
+ None
+ };
- let mut http = hyper::server::conn::Http::new();
- http.http2_only(true);
- // increase window size: todo - find optiomal size
- let window_size = 32*1024*1024; // max = (1 << 31) - 2
- http.http2_initial_stream_window_size(window_size);
- http.http2_initial_connection_window_size(window_size);
- http.http2_max_frame_size(4*1024*1024);
+ let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
+ if !is_new {
+ bail!("backup directory already exists.");
+ }
- let env3 = env2.clone();
- http.serve_connection(conn, service)
- .map(move |result| {
- match result {
- Err(err) => {
- // Avoid Transport endpoint is not connected (os error 107)
- // fixme: find a better way to test for that error
- if err.to_string().starts_with("connection error") && env3.finished() {
- Ok(())
- } else {
- Err(Error::from(err))
+ WorkerTask::spawn(
+ worker_type,
+ Some(worker_id),
+ auth_id.to_string(),
+ true,
+ move |worker| {
+ let mut env = BackupEnvironment::new(
+ env_type,
+ auth_id,
+ worker.clone(),
+ datastore,
+ backup_dir,
+ );
+
+ env.debug = debug;
+ env.last_backup = last_backup;
+
+ env.log(format!(
+ "starting new {} on datastore '{}': {:?}",
+ worker_type, store, path
+ ));
+
+ let service =
+ H2Service::new(env.clone(), worker.clone(), &BACKUP_API_ROUTER, debug);
+
+ let abort_future = worker.abort_future();
+
+ let env2 = env.clone();
+
+ let mut req_fut = hyper::upgrade::on(Request::from_parts(parts, req_body))
+ .map_err(Error::from)
+ .and_then(move |conn| {
+ env2.debug("protocol upgrade done");
+
+ let mut http = hyper::server::conn::Http::new();
+ http.http2_only(true);
+ // increase window size: todo - find optiomal size
+ let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
+ http.http2_initial_stream_window_size(window_size);
+ http.http2_initial_connection_window_size(window_size);
+ http.http2_max_frame_size(4 * 1024 * 1024);
+
+ let env3 = env2.clone();
+ http.serve_connection(conn, service).map(move |result| {
+ match result {
+ Err(err) => {
+ // Avoid Transport endpoint is not connected (os error 107)
+ // fixme: find a better way to test for that error
+ if err.to_string().starts_with("connection error")
+ && env3.finished()
+ {
+ Ok(())
+ } else {
+ Err(Error::from(err))
+ }
}
+ Ok(()) => Ok(()),
}
- Ok(()) => Ok(()),
- }
- })
- });
- let mut abort_future = abort_future
- .map(|_| Err(format_err!("task aborted")));
-
- async move {
- // keep flock until task ends
- let _group_guard = _group_guard;
- let snap_guard = snap_guard;
- let _last_guard = _last_guard;
-
- let res = select!{
- req = req_fut => req,
- abrt = abort_future => abrt,
- };
- if benchmark {
- env.log("benchmark finished successfully");
- proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
- return Ok(());
- }
+ })
+ });
+ let mut abort_future = abort_future.map(|_| Err(format_err!("task aborted")));
+
+ async move {
+ // keep flock until task ends
+ let _group_guard = _group_guard;
+ let snap_guard = snap_guard;
+ let _last_guard = _last_guard;
+
+ let res = select! {
+ req = req_fut => req,
+ abrt = abort_future => abrt,
+ };
+ if benchmark {
+ env.log("benchmark finished successfully");
+ proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
+ return Ok(());
+ }
- let verify = |env: BackupEnvironment| {
- if let Err(err) = env.verify_after_complete(snap_guard) {
- env.log(format!(
+ let verify = |env: BackupEnvironment| {
+ if let Err(err) = env.verify_after_complete(snap_guard) {
+ env.log(format!(
"backup finished, but starting the requested verify task failed: {}",
err
));
- }
- };
-
- match (res, env.ensure_finished()) {
- (Ok(_), Ok(())) => {
- env.log("backup finished successfully");
- verify(env);
- Ok(())
- },
- (Err(err), Ok(())) => {
- // ignore errors after finish
- env.log(format!("backup had errors but finished: {}", err));
- verify(env);
- Ok(())
- },
- (Ok(_), Err(err)) => {
- env.log(format!("backup ended and finish failed: {}", err));
- env.log("removing unfinished backup");
- proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
- Err(err)
- },
- (Err(err), Err(_)) => {
- env.log(format!("backup failed: {}", err));
- env.log("removing failed backup");
- proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
- Err(err)
- },
- }
- }
- })?;
+ }
+ };
- let response = Response::builder()
- .status(StatusCode::SWITCHING_PROTOCOLS)
- .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
- .body(Body::empty())?;
+ match (res, env.ensure_finished()) {
+ (Ok(_), Ok(())) => {
+ env.log("backup finished successfully");
+ verify(env);
+ Ok(())
+ }
+ (Err(err), Ok(())) => {
+ // ignore errors after finish
+ env.log(format!("backup had errors but finished: {}", err));
+ verify(env);
+ Ok(())
+ }
+ (Ok(_), Err(err)) => {
+ env.log(format!("backup ended and finish failed: {}", err));
+ env.log("removing unfinished backup");
+ proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
+ Err(err)
+ }
+ (Err(err), Err(_)) => {
+ env.log(format!("backup failed: {}", err));
+ env.log("removing failed backup");
+ proxmox_async::runtime::block_in_place(|| env.remove_backup())?;
+ Err(err)
+ }
+ }
+ }
+ },
+ )?;
+
+ let response = Response::builder()
+ .status(StatusCode::SWITCHING_PROTOCOLS)
+ .header(
+ UPGRADE,
+ HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()),
+ )
+ .body(Body::empty())?;
- Ok(response)
- }.boxed()
+ Ok(response)
+ }
+ .boxed()
}
const BACKUP_API_SUBDIRS: SubdirMap = &[
+ ("blob", &Router::new().upload(&API_METHOD_UPLOAD_BLOB)),
(
- "blob", &Router::new()
- .upload(&API_METHOD_UPLOAD_BLOB)
- ),
- (
- "dynamic_chunk", &Router::new()
- .upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK)
+ "dynamic_chunk",
+ &Router::new().upload(&API_METHOD_UPLOAD_DYNAMIC_CHUNK),
),
(
- "dynamic_close", &Router::new()
- .post(&API_METHOD_CLOSE_DYNAMIC_INDEX)
+ "dynamic_close",
+ &Router::new().post(&API_METHOD_CLOSE_DYNAMIC_INDEX),
),
(
- "dynamic_index", &Router::new()
+ "dynamic_index",
+ &Router::new()
.post(&API_METHOD_CREATE_DYNAMIC_INDEX)
- .put(&API_METHOD_DYNAMIC_APPEND)
+ .put(&API_METHOD_DYNAMIC_APPEND),
),
(
- "finish", &Router::new()
- .post(
- &ApiMethod::new(
- &ApiHandler::Sync(&finish_backup),
- &ObjectSchema::new("Mark backup as finished.", &[])
- )
- )
+ "finish",
+ &Router::new().post(&ApiMethod::new(
+ &ApiHandler::Sync(&finish_backup),
+ &ObjectSchema::new("Mark backup as finished.", &[]),
+ )),
),
(
- "fixed_chunk", &Router::new()
- .upload(&API_METHOD_UPLOAD_FIXED_CHUNK)
+ "fixed_chunk",
+ &Router::new().upload(&API_METHOD_UPLOAD_FIXED_CHUNK),
),
(
- "fixed_close", &Router::new()
- .post(&API_METHOD_CLOSE_FIXED_INDEX)
+ "fixed_close",
+ &Router::new().post(&API_METHOD_CLOSE_FIXED_INDEX),
),
(
- "fixed_index", &Router::new()
+ "fixed_index",
+ &Router::new()
.post(&API_METHOD_CREATE_FIXED_INDEX)
- .put(&API_METHOD_FIXED_APPEND)
+ .put(&API_METHOD_FIXED_APPEND),
),
(
- "previous", &Router::new()
- .download(&API_METHOD_DOWNLOAD_PREVIOUS)
+ "previous",
+ &Router::new().download(&API_METHOD_DOWNLOAD_PREVIOUS),
),
(
- "previous_backup_time", &Router::new()
- .get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME)
+ "previous_backup_time",
+ &Router::new().get(&API_METHOD_GET_PREVIOUS_BACKUP_TIME),
),
(
- "speedtest", &Router::new()
- .upload(&API_METHOD_UPLOAD_SPEEDTEST)
+ "speedtest",
+ &Router::new().upload(&API_METHOD_UPLOAD_SPEEDTEST),
),
];
&ApiHandler::Sync(&create_dynamic_index),
&ObjectSchema::new(
"Create dynamic chunk index file.",
- &sorted!([
- ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
- ]),
- )
+ &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
+ ),
);
fn create_dynamic_index(
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let env: &BackupEnvironment = rpcenv.as_ref();
let name = required_string_param(¶m, "archive-name")?.to_owned();
"Create fixed chunk index file.",
&sorted!([
("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
- ("size", false, &IntegerSchema::new("File size.")
- .minimum(1)
- .schema()
+ (
+ "size",
+ false,
+ &IntegerSchema::new("File size.").minimum(1).schema()
+ ),
+ (
+ "reuse-csum",
+ true,
+ &StringSchema::new(
+ "If set, compare last backup's \
+ csum and reuse index for incremental backup if it matches."
+ )
+ .schema()
),
- ("reuse-csum", true, &StringSchema::new("If set, compare last backup's \
- csum and reuse index for incremental backup if it matches.").schema()),
]),
- )
+ ),
);
fn create_fixed_index(
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let env: &BackupEnvironment = rpcenv.as_ref();
let name = required_string_param(¶m, "archive-name")?.to_owned();
let mut path = env.backup_dir.relative_path();
path.push(&archive_name);
- let chunk_size = 4096*1024; // todo: ??
+ let chunk_size = 4096 * 1024; // todo: ??
// do incremental backup if csum is set
let mut reader = None;
let (old_csum, _) = index.compute_csum();
let old_csum = hex::encode(&old_csum);
if old_csum != csum {
- bail!("expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
- csum, old_csum);
+ bail!(
+ "expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",
+ csum,
+ old_csum
+ );
}
reader = Some(index);
&IntegerSchema::new("Corresponding chunk offsets.")
.minimum(0)
.schema()
- ).schema()
+ )
+ .schema()
),
]),
- )
+ ),
);
-fn dynamic_append (
+fn dynamic_append(
param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let wid = required_integer_param(¶m, "wid")? as usize;
let digest_list = required_array_param(¶m, "digest-list")?;
let offset_list = required_array_param(¶m, "offset-list")?;
if offset_list.len() != digest_list.len() {
- bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
+ bail!(
+ "offset list has wrong length ({} != {})",
+ offset_list.len(),
+ digest_list.len()
+ );
}
let env: &BackupEnvironment = rpcenv.as_ref();
let digest_str = item.as_str().unwrap();
let digest = <[u8; 32]>::from_hex(digest_str)?;
let offset = offset_list[i].as_u64().unwrap();
- let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
+ let size = env
+ .lookup_chunk(&digest)
+ .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
- env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
+ env.debug(format!(
+ "successfully added chunk {} to dynamic index {} (offset {}, size {})",
+ digest_str, wid, offset, size
+ ));
}
Ok(Value::Null)
&IntegerSchema::new("Corresponding chunk offsets.")
.minimum(0)
.schema()
- ).schema()
+ )
+ .schema()
)
]),
- )
+ ),
);
-fn fixed_append (
+fn fixed_append(
param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let wid = required_integer_param(¶m, "wid")? as usize;
let digest_list = required_array_param(¶m, "digest-list")?;
let offset_list = required_array_param(¶m, "offset-list")?;
if offset_list.len() != digest_list.len() {
- bail!("offset list has wrong length ({} != {})", offset_list.len(), digest_list.len());
+ bail!(
+ "offset list has wrong length ({} != {})",
+ offset_list.len(),
+ digest_list.len()
+ );
}
let env: &BackupEnvironment = rpcenv.as_ref();
let digest_str = item.as_str().unwrap();
let digest = <[u8; 32]>::from_hex(digest_str)?;
let offset = offset_list[i].as_u64().unwrap();
- let size = env.lookup_chunk(&digest).ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
+ let size = env
+ .lookup_chunk(&digest)
+ .ok_or_else(|| format_err!("no such chunk {}", digest_str))?;
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
- env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
+ env.debug(format!(
+ "successfully added chunk {} to fixed index {} (offset {}, size {})",
+ digest_str, wid, offset, size
+ ));
}
Ok(Value::Null)
(
"chunk-count",
false,
- &IntegerSchema::new("Chunk count. This is used to verify that the server got all chunks.")
- .minimum(1)
- .schema()
+ &IntegerSchema::new(
+ "Chunk count. This is used to verify that the server got all chunks."
+ )
+ .minimum(1)
+ .schema()
),
(
"size",
false,
- &IntegerSchema::new("File size. This is used to verify that the server got all data.")
- .minimum(1)
- .schema()
+ &IntegerSchema::new(
+ "File size. This is used to verify that the server got all data."
+ )
+ .minimum(1)
+ .schema()
+ ),
+ (
+ "csum",
+ false,
+ &StringSchema::new("Digest list checksum.").schema()
),
- ("csum", false, &StringSchema::new("Digest list checksum.").schema()),
]),
- )
+ ),
);
-fn close_dynamic_index (
+fn close_dynamic_index(
param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let wid = required_integer_param(¶m, "wid")? as usize;
let chunk_count = required_integer_param(¶m, "chunk-count")? as u64;
let size = required_integer_param(¶m, "size")? as u64;
)
);
-fn close_fixed_index (
+fn close_fixed_index(
param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let wid = required_integer_param(¶m, "wid")? as usize;
let chunk_count = required_integer_param(¶m, "chunk-count")? as u64;
let size = required_integer_param(¶m, "size")? as u64;
Ok(Value::Null)
}
-fn finish_backup (
+fn finish_backup(
_param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let env: &BackupEnvironment = rpcenv.as_ref();
env.finish_backup()?;
#[sortable]
pub const API_METHOD_GET_PREVIOUS_BACKUP_TIME: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&get_previous_backup_time),
- &ObjectSchema::new(
- "Get previous backup time.",
- &[],
- )
+ &ObjectSchema::new("Get previous backup time.", &[]),
);
fn get_previous_backup_time(
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let env: &BackupEnvironment = rpcenv.as_ref();
- let backup_time = env.last_backup.as_ref().map(|info| info.backup_dir.backup_time());
+ let backup_time = env
+ .last_backup
+ .as_ref()
+ .map(|info| info.backup_dir.backup_time());
Ok(json!(backup_time))
}
&ApiHandler::AsyncHttp(&download_previous),
&ObjectSchema::new(
"Download archive from previous backup.",
- &sorted!([
- ("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)
- ]),
- )
+ &sorted!([("archive-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA)]),
+ ),
);
fn download_previous(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let env: &BackupEnvironment = rpcenv.as_ref();
let index = env.datastore.open_dynamic_reader(&path)?;
Some(Box::new(index))
}
- _ => { None }
+ _ => None,
};
if let Some(index) = index {
- env.log(format!("register chunks in '{}' from previous backup.", archive_name));
+ env.log(format!(
+ "register chunks in '{}' from previous backup.",
+ archive_name
+ ));
for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap();
env.log(format!("download '{}' from previous backup.", archive_name));
crate::api2::helpers::create_download_response(path).await
- }.boxed()
+ }
+ .boxed()
}
use anyhow::{bail, format_err, Error};
use futures::*;
-use hyper::Body;
+use hex::FromHex;
use hyper::http::request::Parts;
+use hyper::Body;
use serde_json::{json, Value};
-use hex::FromHex;
-use proxmox_sys::sortable;
-use proxmox_router::{ApiResponseFuture, ApiHandler, ApiMethod, RpcEnvironment};
+use proxmox_router::{ApiHandler, ApiMethod, ApiResponseFuture, RpcEnvironment};
use proxmox_schema::*;
+use proxmox_sys::sortable;
-use pbs_datastore::{DataStore, DataBlob};
+use pbs_api_types::{BACKUP_ARCHIVE_NAME_SCHEMA, CHUNK_DIGEST_SCHEMA};
use pbs_datastore::file_formats::{DataBlobHeader, EncryptedDataBlobHeader};
+use pbs_datastore::{DataBlob, DataStore};
use pbs_tools::json::{required_integer_param, required_string_param};
-use pbs_api_types::{CHUNK_DIGEST_SCHEMA, BACKUP_ARCHIVE_NAME_SCHEMA};
use super::environment::*;
}
impl UploadChunk {
- pub fn new(stream: Body, store: Arc<DataStore>, digest: [u8; 32], size: u32, encoded_size: u32) -> Self {
- Self { stream, store, size, encoded_size, raw_data: Some(vec![]), digest }
+ pub fn new(
+ stream: Body,
+ store: Arc<DataStore>,
+ digest: [u8; 32],
+ size: u32,
+ encoded_size: u32,
+ ) -> Self {
+ Self {
+ stream,
+ store,
+ size,
+ encoded_size,
+ raw_data: Some(vec![]),
+ digest,
+ }
}
}
Err(err) => break err,
};
- return Poll::Ready(Ok((this.digest, this.size, compressed_size as u32, is_duplicate)))
+ return Poll::Ready(Ok((
+ this.digest,
+ this.size,
+ compressed_size as u32,
+ is_duplicate,
+ )));
} else {
break format_err!("poll upload chunk stream failed - already finished.");
}
&ObjectSchema::new(
"Upload a new chunk.",
&sorted!([
- ("wid", false, &IntegerSchema::new("Fixed writer ID.")
- .minimum(1)
- .maximum(256)
- .schema()
+ (
+ "wid",
+ false,
+ &IntegerSchema::new("Fixed writer ID.")
+ .minimum(1)
+ .maximum(256)
+ .schema()
),
("digest", false, &CHUNK_DIGEST_SCHEMA),
- ("size", false, &IntegerSchema::new("Chunk size.")
- .minimum(1)
- .maximum(1024*1024*16)
- .schema()
+ (
+ "size",
+ false,
+ &IntegerSchema::new("Chunk size.")
+ .minimum(1)
+ .maximum(1024 * 1024 * 16)
+ .schema()
),
- ("encoded-size", false, &IntegerSchema::new("Encoded chunk size.")
- .minimum((std::mem::size_of::<DataBlobHeader>() as isize)+1)
- .maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
- .schema()
+ (
+ "encoded-size",
+ false,
+ &IntegerSchema::new("Encoded chunk size.")
+ .minimum((std::mem::size_of::<DataBlobHeader>() as isize) + 1)
+ .maximum(
+ 1024 * 1024 * 16
+ + (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
+ )
+ .schema()
),
]),
- )
+ ),
);
fn upload_fixed_chunk(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let wid = required_integer_param(¶m, "wid")? as usize;
let size = required_integer_param(¶m, "size")? as u32;
&ObjectSchema::new(
"Upload a new chunk.",
&sorted!([
- ("wid", false, &IntegerSchema::new("Dynamic writer ID.")
- .minimum(1)
- .maximum(256)
- .schema()
+ (
+ "wid",
+ false,
+ &IntegerSchema::new("Dynamic writer ID.")
+ .minimum(1)
+ .maximum(256)
+ .schema()
),
("digest", false, &CHUNK_DIGEST_SCHEMA),
- ("size", false, &IntegerSchema::new("Chunk size.")
- .minimum(1)
- .maximum(1024*1024*16)
- .schema()
+ (
+ "size",
+ false,
+ &IntegerSchema::new("Chunk size.")
+ .minimum(1)
+ .maximum(1024 * 1024 * 16)
+ .schema()
),
- ("encoded-size", false, &IntegerSchema::new("Encoded chunk size.")
- .minimum((std::mem::size_of::<DataBlobHeader>() as isize) +1)
- .maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
- .schema()
+ (
+ "encoded-size",
+ false,
+ &IntegerSchema::new("Encoded chunk size.")
+ .minimum((std::mem::size_of::<DataBlobHeader>() as isize) + 1)
+ .maximum(
+ 1024 * 1024 * 16
+ + (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
+ )
+ .schema()
),
]),
- )
+ ),
);
fn upload_dynamic_chunk(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let wid = required_integer_param(¶m, "wid")? as usize;
let size = required_integer_param(¶m, "size")? as u32;
let env: &BackupEnvironment = rpcenv.as_ref();
let (digest, size, compressed_size, is_duplicate) =
- UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size)
- .await?;
+ UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size).await?;
env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = hex::encode(&digest);
let result = Ok(json!(digest_str));
Ok(env.format_response(result))
- }.boxed()
+ }
+ .boxed()
}
pub const API_METHOD_UPLOAD_SPEEDTEST: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&upload_speedtest),
- &ObjectSchema::new("Test upload speed.", &[])
+ &ObjectSchema::new("Test upload speed.", &[]),
);
fn upload_speedtest(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
-
let result = req_body
.map_err(Error::from)
.try_fold(0, |size: usize, chunk| {
}
let env: &BackupEnvironment = rpcenv.as_ref();
Ok(env.format_response(Ok(Value::Null)))
- }.boxed()
+ }
+ .boxed()
}
#[sortable]
"Upload binary blob file.",
&sorted!([
("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
- ("encoded-size", false, &IntegerSchema::new("Encoded blob size.")
- .minimum(std::mem::size_of::<DataBlobHeader>() as isize)
- .maximum(1024*1024*16+(std::mem::size_of::<EncryptedDataBlobHeader>() as isize))
- .schema()
+ (
+ "encoded-size",
+ false,
+ &IntegerSchema::new("Encoded blob size.")
+ .minimum(std::mem::size_of::<DataBlobHeader>() as isize)
+ .maximum(
+ 1024 * 1024 * 16
+ + (std::mem::size_of::<EncryptedDataBlobHeader>() as isize)
+ )
+ .schema()
)
]),
- )
+ ),
);
fn upload_blob(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let file_name = required_string_param(¶m, "file-name")?.to_owned();
let encoded_size = required_integer_param(¶m, "encoded-size")? as usize;
.await?;
if encoded_size != data.len() {
- bail!("got blob with unexpected length ({} != {})", encoded_size, data.len());
+ bail!(
+ "got blob with unexpected length ({} != {})",
+ encoded_size,
+ data.len()
+ );
}
env.add_blob(&file_name, data)?;
Ok(env.format_response(Ok(Value::Null)))
- }.boxed()
+ }
+ .boxed()
}
-use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{Router, SubdirMap};
use proxmox_sys::sortable;
-pub mod tfa;
pub mod openid;
+pub mod tfa;
#[sortable]
-const SUBDIRS: SubdirMap = &sorted!([
- ("openid", &openid::ROUTER),
- ("tfa", &tfa::ROUTER),
-]);
+const SUBDIRS: SubdirMap = &sorted!([("openid", &openid::ROUTER), ("tfa", &tfa::ROUTER),]);
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))
+use ::serde::{Deserialize, Serialize};
/// Configure OpenId realms
-
use anyhow::Error;
-use serde_json::Value;
-use ::serde::{Deserialize, Serialize};
use hex::FromHex;
+use serde_json::Value;
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
- OpenIdRealmConfig, OpenIdRealmConfigUpdater,
- PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_REALM_ALLOCATE,
+ OpenIdRealmConfig, OpenIdRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
+ PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA,
};
use pbs_config::domains;
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<OpenIdRealmConfig>, Error> {
-
let (config, digest) = domains::config()?;
let list = config.convert_to_typed_array("openid")?;
)]
/// Create a new OpenId realm
pub fn create_openid_realm(config: OpenIdRealmConfig) -> Result<(), Error> {
-
let _lock = domains::lock_config()?;
let (mut domains, _digest) = domains::config()?;
- if config.realm == "pbs" ||
- config.realm == "pam" ||
- domains.sections.get(&config.realm).is_some()
+ if config.realm == "pbs"
+ || config.realm == "pam"
+ || domains.sections.get(&config.realm).is_some()
{
param_bail!("realm", "realm '{}' already exists.", config.realm);
}
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
-
let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
- if domains.sections.remove(&realm).is_none() {
+ if domains.sections.remove(&realm).is_none() {
http_bail!(NOT_FOUND, "realm '{}' does not exist.", realm);
}
realm: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<OpenIdRealmConfig, Error> {
-
let (domains, digest) = domains::config()?;
let config = domains.lookup("openid", &realm)?;
#[api()]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
-
let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::client_key => { config.client_key = None; },
- DeletableProperty::comment => { config.comment = None; },
- DeletableProperty::autocreate => { config.autocreate = None; },
- DeletableProperty::scopes => { config.scopes = None; },
- DeletableProperty::prompt => { config.prompt = None; },
- DeletableProperty::acr_values => { config.acr_values = None; },
+ DeletableProperty::client_key => {
+ config.client_key = None;
+ }
+ DeletableProperty::comment => {
+ config.comment = None;
+ }
+ DeletableProperty::autocreate => {
+ config.autocreate = None;
+ }
+ DeletableProperty::scopes => {
+ config.scopes = None;
+ }
+ DeletableProperty::prompt => {
+ config.prompt = None;
+ }
+ DeletableProperty::acr_values => {
+ config.acr_values = None;
+ }
}
}
}
}
}
- if let Some(issuer_url) = update.issuer_url { config.issuer_url = issuer_url; }
- if let Some(client_id) = update.client_id { config.client_id = client_id; }
+ if let Some(issuer_url) = update.issuer_url {
+ config.issuer_url = issuer_url;
+ }
+ if let Some(client_id) = update.client_id {
+ config.client_id = client_id;
+ }
- if update.client_key.is_some() { config.client_key = update.client_key; }
- if update.autocreate.is_some() { config.autocreate = update.autocreate; }
- if update.scopes.is_some() { config.scopes = update.scopes; }
- if update.prompt.is_some() { config.prompt = update.prompt; }
- if update.acr_values.is_some() { config.acr_values = update.acr_values; }
+ if update.client_key.is_some() {
+ config.client_key = update.client_key;
+ }
+ if update.autocreate.is_some() {
+ config.autocreate = update.autocreate;
+ }
+ if update.scopes.is_some() {
+ config.scopes = update.scopes;
+ }
+ if update.prompt.is_some() {
+ config.prompt = update.prompt;
+ }
+ if update.acr_values.is_some() {
+ config.acr_values = update.acr_values;
+ }
domains.set_data(&realm, "openid", &config)?;
use std::time::SystemTime;
use anyhow::{bail, format_err, Error};
+use hex::FromHex;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
-use hex::FromHex;
use proxmox_router::{
http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
-use anyhow::Error;
use ::serde::{Deserialize, Serialize};
-use serde_json::Value;
+use anyhow::Error;
use hex::FromHex;
+use serde_json::Value;
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
- Authid, ScsiTapeChanger, ScsiTapeChangerUpdater, LtoTapeDrive,
- PROXMOX_CONFIG_DIGEST_SCHEMA, CHANGER_NAME_SCHEMA, SLOT_ARRAY_SCHEMA,
- PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
+ Authid, LtoTapeDrive, ScsiTapeChanger, ScsiTapeChangerUpdater, CHANGER_NAME_SCHEMA,
+ PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, SLOT_ARRAY_SCHEMA,
};
use pbs_config::CachedUserInfo;
-use pbs_tape::linux_list_drives::{linux_tape_changer_list, check_drive_path};
+use pbs_tape::linux_list_drives::{check_drive_path, linux_tape_changer_list};
#[api(
protected: true,
)]
/// Create a new changer device
pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> {
-
let _lock = pbs_config::drive::lock()?;
let (mut section_config, _digest) = pbs_config::drive::config()?;
}
if changer.path == config.path {
- param_bail!("path", "Path '{}' already in use by '{}'", config.path, changer.name);
+ param_bail!(
+ "path",
+ "Path '{}' already in use by '{}'",
+ config.path,
+ changer.name
+ );
}
}
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<ScsiTapeChanger, Error> {
-
let (config, digest) = pbs_config::drive::config()?;
let data: ScsiTapeChanger = config.lookup("changer", &name)?;
digest: Option<String>,
_param: Value,
) -> Result<(), Error> {
-
let _lock = pbs_config::drive::lock()?;
let (mut config, expected_digest) = pbs_config::drive::config()?;
)]
/// Delete a tape changer configuration
pub fn delete_changer(name: String, _param: Value) -> Result<(), Error> {
-
let _lock = pbs_config::drive::lock()?;
let (mut config, _digest) = pbs_config::drive::config()?;
match config.sections.get(&name) {
Some((section_type, _)) => {
if section_type != "changer" {
- param_bail!("name", "Entry '{}' exists, but is not a changer device", name);
+ param_bail!(
+ "name",
+ "Entry '{}' exists, but is not a changer device",
+ name
+ );
}
config.sections.remove(&name);
- },
- None => http_bail!(NOT_FOUND, "Delete changer '{}' failed - no such entry", name),
+ }
+ None => http_bail!(
+ NOT_FOUND,
+ "Delete changer '{}' failed - no such entry",
+ name
+ ),
}
let drive_list: Vec<LtoTapeDrive> = config.convert_to_typed_array("lto")?;
for drive in drive_list {
if let Some(changer) = drive.changer {
if changer == name {
- param_bail!("name", "Delete changer '{}' failed - used by drive '{}'", name, drive.name);
+ param_bail!(
+ "name",
+ "Delete changer '{}' failed - used by drive '{}'",
+ name,
+ drive.name
+ );
}
}
}
.put(&API_METHOD_UPDATE_CHANGER)
.delete(&API_METHOD_DELETE_CHANGER);
-
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_CHANGERS)
.post(&API_METHOD_CREATE_CHANGER)
use std::path::PathBuf;
-use anyhow::Error;
-use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
use hex::FromHex;
+use serde_json::Value;
-use proxmox_router::{http_bail, Router, RpcEnvironment, RpcEnvironmentType, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::{api, param_bail, ApiType};
use proxmox_section_config::SectionConfigData;
use proxmox_sys::WorkerTaskContext;
-use pbs_datastore::chunk_store::ChunkStore;
-use pbs_config::BackupLockGuard;
use pbs_api_types::{
- Authid, DatastoreNotify,
- DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
+ Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DATASTORE_SCHEMA,
PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
- DataStoreConfig, DataStoreConfigUpdater,
+ PROXMOX_CONFIG_DIGEST_SCHEMA,
};
+use pbs_config::BackupLockGuard;
+use pbs_datastore::chunk_store::ChunkStore;
+use crate::api2::admin::{sync::list_sync_jobs, verify::list_verification_jobs};
use crate::api2::config::sync::delete_sync_job;
+use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs};
use crate::api2::config::verify::delete_verification_job;
-use crate::api2::config::tape_backup_job::{list_tape_backup_jobs, delete_tape_backup_job};
-use crate::api2::admin::{
- sync::list_sync_jobs,
- verify::list_verification_jobs,
-};
use pbs_config::CachedUserInfo;
use proxmox_rest_server::WorkerTask;
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<DataStoreConfig>, Error> {
-
let (config, digest) = pbs_config::datastore::config()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
rpcenv["digest"] = hex::encode(&digest).into();
- let list:Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
+ let list: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store.name]);
(user_privs & PRIV_DATASTORE_AUDIT) != 0
let path: PathBuf = datastore.path.clone().into();
let backup_user = pbs_config::backup_user()?;
- let _store = ChunkStore::create(&datastore.name, path, backup_user.uid, backup_user.gid, worker)?;
+ let _store = ChunkStore::create(
+ &datastore.name,
+ path,
+ backup_user.uid,
+ backup_user.gid,
+ worker,
+ )?;
config.set_data(&datastore.name, "datastore", &datastore)?;
config: DataStoreConfig,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
-
let lock = pbs_config::datastore::lock_config()?;
let (section_config, _digest) = pbs_config::datastore::config()?;
Some(config.name.to_string()),
auth_id.to_string(),
to_stdout,
- move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
+ move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
)
}
#[api()]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
-
let _lock = pbs_config::datastore::lock_config()?;
// pass/compare digest
let mut data: DataStoreConfig = config.lookup("datastore", &name)?;
- if let Some(delete) = delete {
+ if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::comment => { data.comment = None; },
- DeletableProperty::gc_schedule => { data.gc_schedule = None; },
- DeletableProperty::prune_schedule => { data.prune_schedule = None; },
- DeletableProperty::keep_last => { data.keep_last = None; },
- DeletableProperty::keep_hourly => { data.keep_hourly = None; },
- DeletableProperty::keep_daily => { data.keep_daily = None; },
- DeletableProperty::keep_weekly => { data.keep_weekly = None; },
- DeletableProperty::keep_monthly => { data.keep_monthly = None; },
- DeletableProperty::keep_yearly => { data.keep_yearly = None; },
- DeletableProperty::verify_new => { data.verify_new = None; },
- DeletableProperty::notify => { data.notify = None; },
- DeletableProperty::notify_user => { data.notify_user = None; },
- DeletableProperty::tuning => { data.tuning = None; },
- DeletableProperty::maintenance_mode => { data.maintenance_mode = None; },
+ DeletableProperty::comment => {
+ data.comment = None;
+ }
+ DeletableProperty::gc_schedule => {
+ data.gc_schedule = None;
+ }
+ DeletableProperty::prune_schedule => {
+ data.prune_schedule = None;
+ }
+ DeletableProperty::keep_last => {
+ data.keep_last = None;
+ }
+ DeletableProperty::keep_hourly => {
+ data.keep_hourly = None;
+ }
+ DeletableProperty::keep_daily => {
+ data.keep_daily = None;
+ }
+ DeletableProperty::keep_weekly => {
+ data.keep_weekly = None;
+ }
+ DeletableProperty::keep_monthly => {
+ data.keep_monthly = None;
+ }
+ DeletableProperty::keep_yearly => {
+ data.keep_yearly = None;
+ }
+ DeletableProperty::verify_new => {
+ data.verify_new = None;
+ }
+ DeletableProperty::notify => {
+ data.notify = None;
+ }
+ DeletableProperty::notify_user => {
+ data.notify_user = None;
+ }
+ DeletableProperty::tuning => {
+ data.tuning = None;
+ }
+ DeletableProperty::maintenance_mode => {
+ data.maintenance_mode = None;
+ }
}
}
}
data.prune_schedule = update.prune_schedule;
}
- if update.keep_last.is_some() { data.keep_last = update.keep_last; }
- if update.keep_hourly.is_some() { data.keep_hourly = update.keep_hourly; }
- if update.keep_daily.is_some() { data.keep_daily = update.keep_daily; }
- if update.keep_weekly.is_some() { data.keep_weekly = update.keep_weekly; }
- if update.keep_monthly.is_some() { data.keep_monthly = update.keep_monthly; }
- if update.keep_yearly.is_some() { data.keep_yearly = update.keep_yearly; }
+ if update.keep_last.is_some() {
+ data.keep_last = update.keep_last;
+ }
+ if update.keep_hourly.is_some() {
+ data.keep_hourly = update.keep_hourly;
+ }
+ if update.keep_daily.is_some() {
+ data.keep_daily = update.keep_daily;
+ }
+ if update.keep_weekly.is_some() {
+ data.keep_weekly = update.keep_weekly;
+ }
+ if update.keep_monthly.is_some() {
+ data.keep_monthly = update.keep_monthly;
+ }
+ if update.keep_yearly.is_some() {
+ data.keep_yearly = update.keep_yearly;
+ }
if let Some(notify_str) = update.notify {
let value = DatastoreNotify::API_SCHEMA.parse_property_string(¬ify_str)?;
let notify: DatastoreNotify = serde_json::from_value(value)?;
- if let DatastoreNotify { gc: None, verify: None, sync: None } = notify {
+ if let DatastoreNotify {
+ gc: None,
+ verify: None,
+ sync: None,
+ } = notify
+ {
data.notify = None;
} else {
data.notify = Some(notify_str);
}
}
- if update.verify_new.is_some() { data.verify_new = update.verify_new; }
+ if update.verify_new.is_some() {
+ data.verify_new = update.verify_new;
+ }
- if update.notify_user.is_some() { data.notify_user = update.notify_user; }
+ if update.notify_user.is_some() {
+ data.notify_user = update.notify_user;
+ }
- if update.tuning.is_some() { data.tuning = update.tuning; }
+ if update.tuning.is_some() {
+ data.tuning = update.tuning;
+ }
- if update.maintenance_mode.is_some() { data.maintenance_mode = update.maintenance_mode; }
+ if update.maintenance_mode.is_some() {
+ data.maintenance_mode = update.maintenance_mode;
+ }
config.set_data(&name, "datastore", &data)?;
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
-
let _lock = pbs_config::datastore::lock_config()?;
let (mut config, expected_digest) = pbs_config::datastore::config()?;
}
match config.sections.get(&name) {
- Some(_) => { config.sections.remove(&name); },
+ Some(_) => {
+ config.sections.remove(&name);
+ }
None => http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name),
}
}
let tape_jobs = list_tape_backup_jobs(Value::Null, rpcenv)?;
- for job_config in tape_jobs.into_iter().filter(|config| config.setup.store == name) {
+ for job_config in tape_jobs
+ .into_iter()
+ .filter(|config| config.setup.store == name)
+ {
delete_tape_backup_job(job_config.id, None, rpcenv)?;
}
}
-use anyhow::{format_err, Error};
use ::serde::{Deserialize, Serialize};
-use serde_json::Value;
+use anyhow::{format_err, Error};
use hex::FromHex;
+use serde_json::Value;
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
- Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger,
- PROXMOX_CONFIG_DIGEST_SCHEMA, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
+ Authid, LtoTapeDrive, LtoTapeDriveUpdater, ScsiTapeChanger, DRIVE_NAME_SCHEMA, PRIV_TAPE_AUDIT,
+ PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::CachedUserInfo;
-use pbs_tape::linux_list_drives::{lto_tape_device_list, check_drive_path};
+use pbs_tape::linux_list_drives::{check_drive_path, lto_tape_device_list};
#[api(
protected: true,
)]
/// Create a new drive
pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> {
-
let _lock = pbs_config::drive::lock()?;
let (mut section_config, _digest) = pbs_config::drive::config()?;
param_bail!("name", "Entry '{}' already exists", config.name);
}
if drive.path == config.path {
- param_bail!("path", "Path '{}' already used in drive '{}'", config.path, drive.name);
+ param_bail!(
+ "path",
+ "Path '{}' already used in drive '{}'",
+ config.path,
+ drive.name
+ );
}
}
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<LtoTapeDrive, Error> {
-
let (config, digest) = pbs_config::drive::config()?;
let data: LtoTapeDrive = config.lookup("lto", &name)?;
update: LtoTapeDriveUpdater,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
- _param: Value,
+ _param: Value,
) -> Result<(), Error> {
-
let _lock = pbs_config::drive::lock()?;
let (mut config, expected_digest) = pbs_config::drive::config()?;
DeletableProperty::changer => {
data.changer = None;
data.changer_drivenum = None;
- },
- DeletableProperty::changer_drivenum => { data.changer_drivenum = None; },
+ }
+ DeletableProperty::changer_drivenum => {
+ data.changer_drivenum = None;
+ }
}
}
}
data.changer_drivenum = None;
} else {
if data.changer.is_none() {
- param_bail!("changer", format_err!("Option 'changer-drivenum' requires option 'changer'."));
+ param_bail!(
+ "changer",
+ format_err!("Option 'changer-drivenum' requires option 'changer'.")
+ );
}
data.changer_drivenum = Some(changer_drivenum);
}
)]
/// Delete a drive configuration
pub fn delete_drive(name: String, _param: Value) -> Result<(), Error> {
-
let _lock = pbs_config::drive::lock()?;
let (mut config, _digest) = pbs_config::drive::config()?;
match config.sections.get(&name) {
Some((section_type, _)) => {
if section_type != "lto" {
- param_bail!("name", "Entry '{}' exists, but is not a lto tape drive", name);
+ param_bail!(
+ "name",
+ "Entry '{}' exists, but is not a lto tape drive",
+ name
+ );
}
config.sections.remove(&name);
- },
+ }
None => http_bail!(NOT_FOUND, "Delete drive '{}' failed - no such drive", name),
}
.put(&API_METHOD_UPDATE_DRIVE)
.delete(&API_METHOD_DELETE_DRIVE);
-
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DRIVES)
.post(&API_METHOD_CREATE_DRIVE)
-use anyhow::Error;
use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
- Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA,
- PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
+ Authid, MediaPoolConfig, MediaPoolConfigUpdater, MEDIA_POOL_NAME_SCHEMA, PRIV_TAPE_AUDIT,
+ PRIV_TAPE_MODIFY,
};
use pbs_config::CachedUserInfo;
},
)]
/// Create a new media pool
-pub fn create_pool(
- config: MediaPoolConfig,
-) -> Result<(), Error> {
-
+pub fn create_pool(config: MediaPoolConfig) -> Result<(), Error> {
let _lock = pbs_config::media_pool::lock()?;
let (mut section_config, _digest) = pbs_config::media_pool::config()?;
},
)]
/// List media pools
-pub fn list_pools(
- mut rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Vec<MediaPoolConfig>, Error> {
+pub fn list_pools(mut rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<MediaPoolConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let list = config.convert_to_typed_array::<MediaPoolConfig>("pool")?;
- let list = list
+ let list = list
.into_iter()
.filter(|pool| {
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool", &pool.name]);
)]
/// Get media pool configuration
pub fn get_config(name: String) -> Result<MediaPoolConfig, Error> {
-
let (config, _digest) = pbs_config::media_pool::config()?;
let data: MediaPoolConfig = config.lookup("pool", &name)?;
update: MediaPoolConfigUpdater,
delete: Option<Vec<DeletableProperty>>,
) -> Result<(), Error> {
-
let _lock = pbs_config::media_pool::lock()?;
let (mut config, _digest) = pbs_config::media_pool::config()?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::allocation => { data.allocation = None; },
- DeletableProperty::retention => { data.retention = None; },
- DeletableProperty::template => { data.template = None; },
- DeletableProperty::encrypt => { data.encrypt = None; },
- DeletableProperty::comment => { data.comment = None; },
+ DeletableProperty::allocation => {
+ data.allocation = None;
+ }
+ DeletableProperty::retention => {
+ data.retention = None;
+ }
+ DeletableProperty::template => {
+ data.template = None;
+ }
+ DeletableProperty::encrypt => {
+ data.encrypt = None;
+ }
+ DeletableProperty::comment => {
+ data.comment = None;
+ }
}
}
}
- if update.allocation.is_some() { data.allocation = update.allocation; }
- if update.retention.is_some() { data.retention = update.retention; }
- if update.template.is_some() { data.template = update.template; }
- if update.encrypt.is_some() { data.encrypt = update.encrypt; }
+ if update.allocation.is_some() {
+ data.allocation = update.allocation;
+ }
+ if update.retention.is_some() {
+ data.retention = update.retention;
+ }
+ if update.template.is_some() {
+ data.template = update.template;
+ }
+ if update.encrypt.is_some() {
+ data.encrypt = update.encrypt;
+ }
if let Some(comment) = update.comment {
let comment = comment.trim();
)]
/// Delete a media pool configuration
pub fn delete_pool(name: String) -> Result<(), Error> {
-
let _lock = pbs_config::media_pool::lock()?;
let (mut config, _digest) = pbs_config::media_pool::config()?;
match config.sections.get(&name) {
- Some(_) => { config.sections.remove(&name); },
+ Some(_) => {
+ config.sections.remove(&name);
+ }
None => http_bail!(NOT_FOUND, "delete pool '{}' failed - no such pool", name),
}
.put(&API_METHOD_UPDATE_POOL)
.delete(&API_METHOD_DELETE_POOL);
-
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_POOLS)
.post(&API_METHOD_CREATE_POOL)
//! Backup Server Configuration
-use proxmox_router::{Router, SubdirMap};
use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{Router, SubdirMap};
pub mod access;
pub mod acme;
+pub mod changer;
pub mod datastore;
-pub mod remote;
-pub mod sync;
-pub mod verify;
pub mod drive;
-pub mod changer;
pub mod media_pool;
-pub mod tape_encryption_keys;
+pub mod remote;
+pub mod sync;
pub mod tape_backup_job;
+pub mod tape_encryption_keys;
pub mod traffic_control;
+pub mod verify;
const SUBDIRS: SubdirMap = &[
("access", &access::ROUTER),
+use ::serde::{Deserialize, Serialize};
use anyhow::{bail, format_err, Error};
-use proxmox_sys::sortable;
-use proxmox_router::SubdirMap;
+use hex::FromHex;
use proxmox_router::list_subdirs_api_method;
+use proxmox_router::SubdirMap;
+use proxmox_sys::sortable;
use serde_json::Value;
-use ::serde::{Deserialize, Serialize};
-use hex::FromHex;
-use proxmox_router::{http_bail, http_err, ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, http_err, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
-use pbs_client::{HttpClient, HttpClientOptions};
use pbs_api_types::{
- REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA, Remote, RemoteConfig, RemoteConfigUpdater,
- Authid, PROXMOX_CONFIG_DIGEST_SCHEMA, DATASTORE_SCHEMA, GroupListItem,
- DataStoreListItem, RateLimitConfig, SyncJobConfig, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY,
+ Authid, DataStoreListItem, GroupListItem, RateLimitConfig, Remote, RemoteConfig,
+ RemoteConfigUpdater, SyncJobConfig, DATASTORE_SCHEMA, PRIV_REMOTE_AUDIT, PRIV_REMOTE_MODIFY,
+ PROXMOX_CONFIG_DIGEST_SCHEMA, REMOTE_ID_SCHEMA, REMOTE_PASSWORD_SCHEMA,
};
+use pbs_client::{HttpClient, HttpClientOptions};
use pbs_config::sync;
use pbs_config::CachedUserInfo;
},
)]
/// Create new remote.
-pub fn create_remote(
- name: String,
- config: RemoteConfig,
- password: String,
-) -> Result<(), Error> {
-
+pub fn create_remote(name: String, config: RemoteConfig, password: String) -> Result<(), Error> {
let _lock = pbs_config::remote::lock_config()?;
let (mut section_config, _digest) = pbs_config::remote::config()?;
param_bail!("name", "remote '{}' already exists.", name);
}
- let remote = Remote { name: name.clone(), config, password };
+ let remote = Remote {
+ name: name.clone(),
+ config,
+ password,
+ };
section_config.set_data(&name, "remote", &remote)?;
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
-
let _lock = pbs_config::remote::lock_config()?;
let (mut config, expected_digest) = pbs_config::remote::config()?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::comment => { data.config.comment = None; },
- DeletableProperty::fingerprint => { data.config.fingerprint = None; },
- DeletableProperty::port => { data.config.port = None; },
+ DeletableProperty::comment => {
+ data.config.comment = None;
+ }
+ DeletableProperty::fingerprint => {
+ data.config.fingerprint = None;
+ }
+ DeletableProperty::port => {
+ data.config.port = None;
+ }
}
}
}
data.config.comment = Some(comment);
}
}
- if let Some(host) = update.host { data.config.host = host; }
- if update.port.is_some() { data.config.port = update.port; }
- if let Some(auth_id) = update.auth_id { data.config.auth_id = auth_id; }
- if let Some(password) = password { data.password = password; }
+ if let Some(host) = update.host {
+ data.config.host = host;
+ }
+ if update.port.is_some() {
+ data.config.port = update.port;
+ }
+ if let Some(auth_id) = update.auth_id {
+ data.config.auth_id = auth_id;
+ }
+ if let Some(password) = password {
+ data.password = password;
+ }
- if update.fingerprint.is_some() { data.config.fingerprint = update.fingerprint; }
+ if update.fingerprint.is_some() {
+ data.config.fingerprint = update.fingerprint;
+ }
config.set_data(&name, "remote", &data)?;
)]
/// Remove a remote from the configuration file.
pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error> {
-
let (sync_jobs, _) = sync::config()?;
- let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
+ let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
for job in job_list {
if job.remote == name {
- param_bail!("name", "remote '{}' is used by sync job '{}' (datastore '{}')", name, job.id, job.store);
+ param_bail!(
+ "name",
+ "remote '{}' is used by sync job '{}' (datastore '{}')",
+ name,
+ job.id,
+ job.store
+ );
}
}
}
match config.sections.get(&name) {
- Some(_) => { config.sections.remove(&name); },
+ Some(_) => {
+ config.sections.remove(&name);
+ }
None => http_bail!(NOT_FOUND, "remote '{}' does not exist.", name),
}
remote: &Remote,
limit: Option<RateLimitConfig>,
) -> Result<HttpClient, Error> {
- let mut options = HttpClientOptions::new_non_interactive(remote.password.clone(), remote.config.fingerprint.clone());
+ let mut options = HttpClientOptions::new_non_interactive(
+ remote.password.clone(),
+ remote.config.fingerprint.clone(),
+ );
if let Some(limit) = limit {
options = options.rate_limit(limit);
&remote.config.host,
remote.config.port.unwrap_or(8007),
&remote.config.auth_id,
- options)?;
- let _auth_info = client.login() // make sure we can auth
+ options,
+ )?;
+ let _auth_info = client
+ .login() // make sure we can auth
.await
- .map_err(|err| format_err!("remote connection to '{}' failed - {}", remote.config.host, err))?;
+ .map_err(|err| {
+ format_err!(
+ "remote connection to '{}' failed - {}",
+ remote.config.host,
+ err
+ )
+ })?;
Ok(client)
}
-
#[api(
input: {
properties: {
let remote: Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| {
- http_err!(INTERNAL_SERVER_ERROR,
- "failed to scan remote '{}' - {}",
- &name,
- api_err)
+ http_err!(
+ INTERNAL_SERVER_ERROR,
+ "failed to scan remote '{}' - {}",
+ &name,
+ api_err
+ )
};
- let client = remote_client(&remote, None)
- .await
- .map_err(map_remote_err)?;
+ let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
let api_res = client
.get("api2/json/admin/datastore", None)
.await
let remote: Remote = remote_config.lookup("remote", &name)?;
let map_remote_err = |api_err| {
- http_err!(INTERNAL_SERVER_ERROR,
- "failed to scan remote '{}' - {}",
- &name,
- api_err)
+ http_err!(
+ INTERNAL_SERVER_ERROR,
+ "failed to scan remote '{}' - {}",
+ &name,
+ api_err
+ )
};
- let client = remote_client(&remote, None)
- .await
- .map_err(map_remote_err)?;
+ let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
let api_res = client
.get(&format!("api2/json/admin/datastore/{}/groups", store), None)
.await
}
#[sortable]
-const DATASTORE_SCAN_SUBDIRS: SubdirMap = &[
- (
- "groups",
- &Router::new()
- .get(&API_METHOD_SCAN_REMOTE_GROUPS)
- ),
-];
+const DATASTORE_SCAN_SUBDIRS: SubdirMap =
+ &[("groups", &Router::new().get(&API_METHOD_SCAN_REMOTE_GROUPS))];
const DATASTORE_SCAN_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(DATASTORE_SCAN_SUBDIRS))
-use anyhow::{bail, Error};
-use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use anyhow::{bail, Error};
use hex::FromHex;
+use serde_json::Value;
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
- Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
- PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
- PRIV_REMOTE_AUDIT, PRIV_REMOTE_READ,
+ Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
+ PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
+ PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::sync;
let correct_owner = match job.owner {
Some(ref owner) => {
owner == auth_id
- || (owner.is_token()
- && !auth_id.is_token()
- && owner.user() == auth_id.user())
- },
+ || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user())
+ }
// default sync owner
None => auth_id == Authid::root_auth_id(),
};
.into_iter()
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
.collect();
- Ok(list)
+ Ok(list)
}
#[api(
#[api()]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
let mut data: SyncJobConfig = config.lookup("sync", &id)?;
- if let Some(delete) = delete {
+ if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::owner => { data.owner = None; },
- DeletableProperty::comment => { data.comment = None; },
- DeletableProperty::schedule => { data.schedule = None; },
- DeletableProperty::remove_vanished => { data.remove_vanished = None; },
- DeletableProperty::group_filter => { data.group_filter = None; },
- DeletableProperty::rate_in => { data.limit.rate_in = None; },
- DeletableProperty::rate_out => { data.limit.rate_out = None; },
- DeletableProperty::burst_in => { data.limit.burst_in = None; },
- DeletableProperty::burst_out => { data.limit.burst_out = None; },
+ DeletableProperty::owner => {
+ data.owner = None;
+ }
+ DeletableProperty::comment => {
+ data.comment = None;
+ }
+ DeletableProperty::schedule => {
+ data.schedule = None;
+ }
+ DeletableProperty::remove_vanished => {
+ data.remove_vanished = None;
+ }
+ DeletableProperty::group_filter => {
+ data.group_filter = None;
+ }
+ DeletableProperty::rate_in => {
+ data.limit.rate_in = None;
+ }
+ DeletableProperty::rate_out => {
+ data.limit.rate_out = None;
+ }
+ DeletableProperty::burst_in => {
+ data.limit.burst_in = None;
+ }
+ DeletableProperty::burst_out => {
+ data.limit.burst_out = None;
+ }
}
}
}
}
}
- if let Some(store) = update.store { data.store = store; }
- if let Some(remote) = update.remote { data.remote = remote; }
- if let Some(remote_store) = update.remote_store { data.remote_store = remote_store; }
- if let Some(owner) = update.owner { data.owner = Some(owner); }
- if let Some(group_filter) = update.group_filter { data.group_filter = Some(group_filter); }
+ if let Some(store) = update.store {
+ data.store = store;
+ }
+ if let Some(remote) = update.remote {
+ data.remote = remote;
+ }
+ if let Some(remote_store) = update.remote_store {
+ data.remote_store = remote_store;
+ }
+ if let Some(owner) = update.owner {
+ data.owner = Some(owner);
+ }
+ if let Some(group_filter) = update.group_filter {
+ data.group_filter = Some(group_filter);
+ }
if update.limit.rate_in.is_some() {
data.limit.rate_in = update.limit.rate_in;
}
let schedule_changed = data.schedule != update.schedule;
- if update.schedule.is_some() { data.schedule = update.schedule; }
- if update.remove_vanished.is_some() { data.remove_vanished = update.remove_vanished; }
+ if update.schedule.is_some() {
+ data.schedule = update.schedule;
+ }
+ if update.remove_vanished.is_some() {
+ data.remove_vanished = update.remove_vanished;
+ }
if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
bail!("permission check failed");
bail!("permission check failed");
}
config.sections.remove(&id);
- },
- Err(_) => { http_bail!(NOT_FOUND, "job '{}' does not exist.", id) },
+ }
+ Err(_) => {
+ http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
+ }
};
sync::save_config(&config)?;
.post(&API_METHOD_CREATE_SYNC_JOB)
.match_all("id", &ITEM_ROUTER);
-
#[test]
fn sync_job_access_test() -> Result<(), Error> {
- let (user_cfg, _) = pbs_config::user::test_cfg_from_str(r###"
+ let (user_cfg, _) = pbs_config::user::test_cfg_from_str(
+ r###"
user: noperm@pbs
user: read@pbs
user: write@pbs
-"###).expect("test user.cfg is not parsable");
- let acl_tree = pbs_config::acl::AclTree::from_raw(r###"
+"###,
+ )
+ .expect("test user.cfg is not parsable");
+ let acl_tree = pbs_config::acl::AclTree::from_raw(
+ r###"
acl:1:/datastore/localstore1:read@pbs,write@pbs:DatastoreAudit
acl:1:/datastore/localstore1:write@pbs:DatastoreBackup
acl:1:/datastore/localstore2:write@pbs:DatastorePowerUser
acl:1:/datastore/localstore3:write@pbs:DatastoreAdmin
acl:1:/remote/remote1:read@pbs,write@pbs:RemoteAudit
acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
-"###).expect("test acl.cfg is not parsable");
+"###,
+ )
+ .expect("test acl.cfg is not parsable");
let user_info = CachedUserInfo::test_new(user_cfg, acl_tree);
};
// should work without ACLs
- assert_eq!(check_sync_job_read_access(&user_info, root_auth_id, &job), true);
- assert_eq!(check_sync_job_modify_access(&user_info, root_auth_id, &job), true);
+ assert_eq!(
+ check_sync_job_read_access(&user_info, root_auth_id, &job),
+ true
+ );
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, root_auth_id, &job),
+ true
+ );
// user without permissions must fail
- assert_eq!(check_sync_job_read_access(&user_info, &no_perm_auth_id, &job), false);
- assert_eq!(check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_read_access(&user_info, &no_perm_auth_id, &job),
+ false
+ );
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &no_perm_auth_id, &job),
+ false
+ );
// reading without proper read permissions on either remote or local must fail
- assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_read_access(&user_info, &read_auth_id, &job),
+ false
+ );
// reading without proper read permissions on local end must fail
job.remote = "remote1".to_string();
- assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_read_access(&user_info, &read_auth_id, &job),
+ false
+ );
// reading without proper read permissions on remote end must fail
job.remote = "remote0".to_string();
job.store = "localstore1".to_string();
- assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_read_access(&user_info, &read_auth_id, &job),
+ false
+ );
// writing without proper write permissions on either end must fail
job.store = "localstore0".to_string();
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ false
+ );
// writing without proper write permissions on local end must fail
job.remote = "remote1".to_string();
// writing without proper write permissions on remote end must fail
job.remote = "remote0".to_string();
job.store = "localstore1".to_string();
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ false
+ );
// reset remote to one where users have access
job.remote = "remote1".to_string();
// user with read permission can only read, but not modify/run
- assert_eq!(check_sync_job_read_access(&user_info, &read_auth_id, &job), true);
+ assert_eq!(
+ check_sync_job_read_access(&user_info, &read_auth_id, &job),
+ true
+ );
job.owner = Some(read_auth_id.clone());
- assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &read_auth_id, &job),
+ false
+ );
job.owner = None;
- assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &read_auth_id, &job),
+ false
+ );
job.owner = Some(write_auth_id.clone());
- assert_eq!(check_sync_job_modify_access(&user_info, &read_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &read_auth_id, &job),
+ false
+ );
// user with simple write permission can modify/run
- assert_eq!(check_sync_job_read_access(&user_info, &write_auth_id, &job), true);
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+ assert_eq!(
+ check_sync_job_read_access(&user_info, &write_auth_id, &job),
+ true
+ );
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ true
+ );
// but can't modify/run with deletion
job.remove_vanished = Some(true);
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ false
+ );
// unless they have Datastore.Prune as well
job.store = "localstore2".to_string();
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ true
+ );
// changing owner is not possible
job.owner = Some(read_auth_id.clone());
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ false
+ );
// also not to the default 'root@pam'
job.owner = None;
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), false);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ false
+ );
// unless they have Datastore.Modify as well
job.store = "localstore3".to_string();
job.owner = Some(read_auth_id);
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ true
+ );
job.owner = None;
- assert_eq!(check_sync_job_modify_access(&user_info, &write_auth_id, &job), true);
+ assert_eq!(
+ check_sync_job_modify_access(&user_info, &write_auth_id, &job),
+ true
+ );
Ok(())
}
-use anyhow::Error;
-use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
use hex::FromHex;
+use serde_json::Value;
-use proxmox_router::{http_bail, ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
- TrafficControlRule, TrafficControlRuleUpdater,
+ TrafficControlRule, TrafficControlRuleUpdater, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
PROXMOX_CONFIG_DIGEST_SCHEMA, TRAFFIC_CONTROL_ID_SCHEMA,
- PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
};
#[api(
)]
/// Create new traffic control rule.
pub fn create_traffic_control(config: TrafficControlRule) -> Result<(), Error> {
-
let _lock = pbs_config::traffic_control::lock_config()?;
let (mut section_config, _digest) = pbs_config::traffic_control::config()?;
if section_config.sections.get(&config.name).is_some() {
- param_bail!("name", "traffic control rule '{}' already exists.", config.name);
+ param_bail!(
+ "name",
+ "traffic control rule '{}' already exists.",
+ config.name
+ );
}
section_config.set_data(&config.name, "rule", &config)?;
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<(), Error> {
-
let _lock = pbs_config::traffic_control::lock_config()?;
let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::rate_in => { data.limit.rate_in = None; },
- DeletableProperty::rate_out => { data.limit.rate_out = None; },
- DeletableProperty::burst_in => { data.limit.burst_in = None; },
- DeletableProperty::burst_out => { data.limit.burst_out = None; },
- DeletableProperty::comment => { data.comment = None; },
- DeletableProperty::timeframe => { data.timeframe = None; },
+ DeletableProperty::rate_in => {
+ data.limit.rate_in = None;
+ }
+ DeletableProperty::rate_out => {
+ data.limit.rate_out = None;
+ }
+ DeletableProperty::burst_in => {
+ data.limit.burst_in = None;
+ }
+ DeletableProperty::burst_out => {
+ data.limit.burst_out = None;
+ }
+ DeletableProperty::comment => {
+ data.comment = None;
+ }
+ DeletableProperty::timeframe => {
+ data.timeframe = None;
+ }
}
}
}
data.limit.burst_out = update.limit.burst_out;
}
- if let Some(network) = update.network { data.network = network; }
- if update.timeframe.is_some() { data.timeframe = update.timeframe; }
+ if let Some(network) = update.network {
+ data.network = network;
+ }
+ if update.timeframe.is_some() {
+ data.timeframe = update.timeframe;
+ }
config.set_data(&name, "rule", &data)?;
)]
/// Remove a traffic control rule from the configuration file.
pub fn delete_traffic_control(name: String, digest: Option<String>) -> Result<(), Error> {
-
let _lock = pbs_config::traffic_control::lock_config()?;
let (mut config, expected_digest) = pbs_config::traffic_control::config()?;
}
match config.sections.get(&name) {
- Some(_) => { config.sections.remove(&name); },
+ Some(_) => {
+ config.sections.remove(&name);
+ }
None => http_bail!(NOT_FOUND, "traffic control rule '{}' does not exist.", name),
}
Ok(())
}
-
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_TRAFFIC_CONTROL)
.put(&API_METHOD_UPDATE_TRAFFIC_CONTROL)
-use anyhow::Error;
-use serde_json::Value;
use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
use hex::FromHex;
+use serde_json::Value;
-use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, VerificationJobConfig, VerificationJobConfigUpdater, JOB_ID_SCHEMA,
- PROXMOX_CONFIG_DIGEST_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY,
+ PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_VERIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::verify;
let list = config.convert_to_typed_array("verification")?;
- let list = list.into_iter()
+ let list = list
+ .into_iter()
.filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &["datastore", &job.store]);
privs & required_privs != 00
- }).collect();
+ })
+ .collect();
rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
-
#[api(
protected: true,
input: {
/// Create a new verification job.
pub fn create_verification_job(
config: VerificationJobConfig,
- rpcenv: &mut dyn RpcEnvironment
+ rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
- user_info.check_privs(&auth_id, &["datastore", &config.store], PRIV_DATASTORE_VERIFY, false)?;
+ user_info.check_privs(
+ &auth_id,
+ &["datastore", &config.store],
+ PRIV_DATASTORE_VERIFY,
+ false,
+ )?;
let _lock = verify::lock_config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
- user_info.check_privs(&auth_id, &["datastore", &verification_job.store], required_privs, true)?;
+ user_info.check_privs(
+ &auth_id,
+ &["datastore", &verification_job.store],
+ required_privs,
+ true,
+ )?;
rpcenv["digest"] = hex::encode(&digest).into();
#[api()]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the ignore verified property.
/// Delete the job schedule.
Schedule,
/// Delete outdated after property.
- OutdatedAfter
+ OutdatedAfter,
}
#[api(
let mut data: VerificationJobConfig = config.lookup("verification", &id)?;
// check existing store
- user_info.check_privs(&auth_id, &["datastore", &data.store], PRIV_DATASTORE_VERIFY, true)?;
+ user_info.check_privs(
+ &auth_id,
+ &["datastore", &data.store],
+ PRIV_DATASTORE_VERIFY,
+ true,
+ )?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::IgnoreVerified => { data.ignore_verified = None; },
- DeletableProperty::OutdatedAfter => { data.outdated_after = None; },
- DeletableProperty::Comment => { data.comment = None; },
- DeletableProperty::Schedule => { data.schedule = None; },
+ DeletableProperty::IgnoreVerified => {
+ data.ignore_verified = None;
+ }
+ DeletableProperty::OutdatedAfter => {
+ data.outdated_after = None;
+ }
+ DeletableProperty::Comment => {
+ data.comment = None;
+ }
+ DeletableProperty::Schedule => {
+ data.schedule = None;
+ }
}
}
}
if let Some(store) = update.store {
// check new store
- user_info.check_privs(&auth_id, &["datastore", &store], PRIV_DATASTORE_VERIFY, true)?;
+ user_info.check_privs(
+ &auth_id,
+ &["datastore", &store],
+ PRIV_DATASTORE_VERIFY,
+ true,
+ )?;
data.store = store;
}
-
- if update.ignore_verified.is_some() { data.ignore_verified = update.ignore_verified; }
- if update.outdated_after.is_some() { data.outdated_after = update.outdated_after; }
+ if update.ignore_verified.is_some() {
+ data.ignore_verified = update.ignore_verified;
+ }
+ if update.outdated_after.is_some() {
+ data.outdated_after = update.outdated_after;
+ }
let schedule_changed = data.schedule != update.schedule;
- if update.schedule.is_some() { data.schedule = update.schedule; }
+ if update.schedule.is_some() {
+ data.schedule = update.schedule;
+ }
config.set_data(&id, "verification", &data)?;
let (mut config, expected_digest) = verify::config()?;
let job: VerificationJobConfig = config.lookup("verification", &id)?;
- user_info.check_privs(&auth_id, &["datastore", &job.store], PRIV_DATASTORE_VERIFY, true)?;
+ user_info.check_privs(
+ &auth_id,
+ &["datastore", &job.store],
+ PRIV_DATASTORE_VERIFY,
+ true,
+ )?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
}
match config.sections.get(&id) {
- Some(_) => { config.sections.remove(&id); },
+ Some(_) => {
+ config.sections.remove(&id);
+ }
None => http_bail!(NOT_FOUND, "job '{}' does not exist.", id),
}
use anyhow::Error;
use futures::stream::TryStreamExt;
-use hyper::{Body, Response, StatusCode, header};
+use hyper::{header, Body, Response, StatusCode};
use proxmox_router::http_bail;
pub mod admin;
pub mod backup;
pub mod config;
+pub mod helpers;
pub mod node;
+pub mod ping;
+pub mod pull;
pub mod reader;
pub mod status;
+pub mod tape;
pub mod types;
pub mod version;
-pub mod ping;
-pub mod pull;
-pub mod tape;
-pub mod helpers;
use proxmox_router::{list_subdirs_api_method, Router, SubdirMap};
-use anyhow::{Error, bail, format_err};
+use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use std::collections::HashMap;
-use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_router::{
- list_subdirs_api_method, RpcEnvironment, RpcEnvironmentType, Permission, Router, SubdirMap
+ list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
};
use proxmox_schema::api;
+use proxmox_sys::fs::{replace_file, CreateOptions};
use proxmox_apt::repositories::{
APTRepositoryFile, APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo,
use proxmox_http::ProxyConfig;
use pbs_api_types::{
- APTUpdateInfo, NODE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
- PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ APTUpdateInfo, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
+ UPID_SCHEMA,
};
use crate::config::node;
+use crate::tools::{apt, pbs_simple_http, subscription};
use proxmox_rest_server::WorkerTask;
-use crate::tools::{
- apt,
- pbs_simple_http,
- subscription,
-};
#[api(
input: {
)]
/// List available APT updates
fn apt_update_available(_param: Value) -> Result<Value, Error> {
-
if let Ok(false) = apt::pkg_cache_expired() {
if let Ok(Some(cache)) = apt::read_pkg_state() {
return Ok(json!(cache.package_status));
}
pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> {
-
const PROXY_CFG_FN: &str = "/etc/apt/apt.conf.d/76pveproxy"; // use same file as PVE
if let Some(proxy_config) = proxy_config {
}
fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
- if !quiet { worker.log_message("starting apt-get update") }
+ if !quiet {
+ worker.log_message("starting apt-get update")
+ }
read_and_update_proxy_config()?;
command.arg("update");
// apt "errors" quite easily, and run_command is a bit rigid, so handle this inline for now.
- let output = command.output()
+ let output = command
+ .output()
.map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
if !quiet {
if !output.status.success() {
if output.status.code().is_some() {
let msg = String::from_utf8(output.stderr)
- .map(|m| if m.is_empty() { String::from("no error message") } else { m })
+ .map(|m| {
+ if m.is_empty() {
+ String::from("no error message")
+ } else {
+ m
+ }
+ })
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
worker.log_warning(msg);
} else {
quiet: bool,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
-
let auth_id = rpcenv.get_auth_id().unwrap();
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
if notified_version != pkg.version {
to_notify.push(pkg);
}
- },
+ }
None => to_notify.push(pkg),
}
}
},
)]
/// Retrieve the changelog of the specified package.
-fn apt_get_changelog(
- param: Value,
-) -> Result<Value, Error> {
-
+fn apt_get_changelog(param: Value) -> Result<Value, Error> {
let name = pbs_tools::json::required_string_param(¶m, "name")?.to_owned();
let version = param["version"].as_str();
- let pkg_info = apt::list_installed_apt_packages(|data| {
- match version {
+ let pkg_info = apt::list_installed_apt_packages(
+ |data| match version {
Some(version) => version == data.active_version,
- None => data.active_version == data.candidate_version
- }
- }, Some(&name));
+ None => data.active_version == data.candidate_version,
+ },
+ Some(&name),
+ );
if pkg_info.is_empty() {
bail!("Package '{}' not found", name);
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
if changelog_url.starts_with("http://download.proxmox.com/") {
let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, None))
- .map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
+ .map_err(|err| {
+ format_err!(
+ "Error downloading changelog from '{}': {}",
+ changelog_url,
+ err
+ )
+ })?;
Ok(json!(changelog))
-
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
let sub = match subscription::read_subscription()? {
Some(sub) => sub,
- None => bail!("cannot retrieve changelog from enterprise repo: no subscription info found")
+ None => {
+ bail!("cannot retrieve changelog from enterprise repo: no subscription info found")
+ }
};
let (key, id) = match sub.key {
- Some(key) => {
- match sub.serverid {
- Some(id) => (key, id),
- None =>
- bail!("cannot retrieve changelog from enterprise repo: no server id found")
- }
+ Some(key) => match sub.serverid {
+ Some(id) => (key, id),
+ None => bail!("cannot retrieve changelog from enterprise repo: no server id found"),
},
- None => bail!("cannot retrieve changelog from enterprise repo: no subscription key found")
+ None => {
+ bail!("cannot retrieve changelog from enterprise repo: no subscription key found")
+ }
};
let mut auth_header = HashMap::new();
- auth_header.insert("Authorization".to_owned(),
- format!("Basic {}", base64::encode(format!("{}:{}", key, id))));
+ auth_header.insert(
+ "Authorization".to_owned(),
+ format!("Basic {}", base64::encode(format!("{}:{}", key, id))),
+ );
- let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
- .map_err(|err| format_err!("Error downloading changelog from '{}': {}", changelog_url, err))?;
+ let changelog =
+ proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
+ .map_err(|err| {
+ format_err!(
+ "Error downloading changelog from '{}': {}",
+ changelog_url,
+ err
+ )
+ })?;
Ok(json!(changelog))
-
} else {
let mut command = std::process::Command::new("apt-get");
command.arg("changelog");
"running kernel: {}",
nix::sys::utsname::uname().release().to_owned()
);
- if let Some(proxmox_backup) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup") {
+ if let Some(proxmox_backup) = pbs_packages
+ .iter()
+ .find(|pkg| pkg.package == "proxmox-backup")
+ {
let mut proxmox_backup = proxmox_backup.clone();
proxmox_backup.extra_info = Some(running_kernel);
packages.push(proxmox_backup);
} else {
- packages.push(unknown_package("proxmox-backup".into(), Some(running_kernel)));
+ packages.push(unknown_package(
+ "proxmox-backup".into(),
+ Some(running_kernel),
+ ));
}
let version = pbs_buildcfg::PROXMOX_PKG_VERSION;
let release = pbs_buildcfg::PROXMOX_PKG_RELEASE;
let daemon_version_info = Some(format!("running version: {}.{}", version, release));
- if let Some(pkg) = pbs_packages.iter().find(|pkg| pkg.package == "proxmox-backup-server") {
+ if let Some(pkg) = pbs_packages
+ .iter()
+ .find(|pkg| pkg.package == "proxmox-backup-server")
+ {
let mut pkg = pkg.clone();
pkg.extra_info = daemon_version_info;
packages.push(pkg);
} else {
- packages.push(unknown_package("proxmox-backup".into(), daemon_version_info));
+ packages.push(unknown_package(
+ "proxmox-backup".into(),
+ daemon_version_info,
+ ));
}
let mut kernel_pkgs: Vec<APTUpdateInfo> = pbs_packages
}
const SUBDIRS: SubdirMap = &[
- ("changelog", &Router::new().get(&API_METHOD_APT_GET_CHANGELOG)),
- ("repositories", &Router::new()
- .get(&API_METHOD_GET_REPOSITORIES)
- .post(&API_METHOD_CHANGE_REPOSITORY)
- .put(&API_METHOD_ADD_REPOSITORY)
+ (
+ "changelog",
+ &Router::new().get(&API_METHOD_APT_GET_CHANGELOG),
+ ),
+ (
+ "repositories",
+ &Router::new()
+ .get(&API_METHOD_GET_REPOSITORIES)
+ .post(&API_METHOD_CHANGE_REPOSITORY)
+ .put(&API_METHOD_ADD_REPOSITORY),
),
- ("update", &Router::new()
- .get(&API_METHOD_APT_UPDATE_AVAILABLE)
- .post(&API_METHOD_APT_UPDATE_DATABASE)
+ (
+ "update",
+ &Router::new()
+ .get(&API_METHOD_APT_UPDATE_AVAILABLE)
+ .post(&API_METHOD_APT_UPDATE_DATABASE),
),
("versions", &Router::new().get(&API_METHOD_GET_VERSIONS)),
];
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
+use proxmox_router::list_subdirs_api_method;
use proxmox_router::SubdirMap;
use proxmox_router::{Permission, Router, RpcEnvironment};
-use proxmox_router::list_subdirs_api_method;
use proxmox_schema::api;
use proxmox_sys::{task_log, task_warn};
};
if domains.is_empty() {
- task_log!(worker, "No domains configured to be ordered from an ACME server.");
+ task_log!(
+ worker,
+ "No domains configured to be ordered from an ACME server."
+ );
return Ok(None);
}
task_warn!(
worker,
"Failed to teardown plugin '{}' for domain '{}' - {}",
- plugin_id, domain, err
+ plugin_id,
+ domain,
+ err
);
}
let auth = acme.get_authorization(auth_url).await?;
match auth.status {
Status::Pending => {
- task_log!(worker, "Status is still 'pending', trying again in 10 seconds");
+ task_log!(
+ worker,
+ "Status is still 'pending', trying again in 10 seconds"
+ );
tokio::time::sleep(Duration::from_secs(10)).await;
}
Status::Valid => return Ok(()),
let mut acme = node_config.acme_client().await?;
task_log!(worker, "Revoking old certificate");
acme.revoke_certificate(cert_pem.as_bytes(), None).await?;
- task_log!(worker, "Deleting certificate and regenerating a self-signed one");
+ task_log!(
+ worker,
+ "Deleting certificate and regenerating a self-signed one"
+ );
delete_custom_certificate().await?;
Ok(())
},
-use anyhow::Error;
use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
use hex::FromHex;
use proxmox_router::{Permission, Router, RpcEnvironment};
#[api()]
#[derive(Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
#[allow(non_camel_case_types)]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the email-from property.
email_from,
/// Delete the ciphers-tls-1.3 property.
- #[serde(rename="ciphers-tls-1.3")]
+ #[serde(rename = "ciphers-tls-1.3")]
ciphers_tls_1_3,
/// Delete the ciphers-tls-1.2 property.
- #[serde(rename="ciphers-tls-1.2")]
+ #[serde(rename = "ciphers-tls-1.2")]
ciphers_tls_1_2,
/// Delete the default-lang property.
default_lang,
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::acme => { config.acme = None; },
- DeletableProperty::acmedomain0 => { config.acmedomain0 = None; },
- DeletableProperty::acmedomain1 => { config.acmedomain1 = None; },
- DeletableProperty::acmedomain2 => { config.acmedomain2 = None; },
- DeletableProperty::acmedomain3 => { config.acmedomain3 = None; },
- DeletableProperty::acmedomain4 => { config.acmedomain4 = None; },
- DeletableProperty::http_proxy => { config.http_proxy = None; },
- DeletableProperty::email_from => { config.email_from = None; },
- DeletableProperty::ciphers_tls_1_3 => { config.ciphers_tls_1_3 = None; },
- DeletableProperty::ciphers_tls_1_2 => { config.ciphers_tls_1_2 = None; },
- DeletableProperty::default_lang => { config.default_lang = None; },
- DeletableProperty::description => { config.description = None; },
- DeletableProperty::task_log_max_days => { config.task_log_max_days = None; },
+ DeletableProperty::acme => {
+ config.acme = None;
+ }
+ DeletableProperty::acmedomain0 => {
+ config.acmedomain0 = None;
+ }
+ DeletableProperty::acmedomain1 => {
+ config.acmedomain1 = None;
+ }
+ DeletableProperty::acmedomain2 => {
+ config.acmedomain2 = None;
+ }
+ DeletableProperty::acmedomain3 => {
+ config.acmedomain3 = None;
+ }
+ DeletableProperty::acmedomain4 => {
+ config.acmedomain4 = None;
+ }
+ DeletableProperty::http_proxy => {
+ config.http_proxy = None;
+ }
+ DeletableProperty::email_from => {
+ config.email_from = None;
+ }
+ DeletableProperty::ciphers_tls_1_3 => {
+ config.ciphers_tls_1_3 = None;
+ }
+ DeletableProperty::ciphers_tls_1_2 => {
+ config.ciphers_tls_1_2 = None;
+ }
+ DeletableProperty::default_lang => {
+ config.default_lang = None;
+ }
+ DeletableProperty::description => {
+ config.description = None;
+ }
+ DeletableProperty::task_log_max_days => {
+ config.task_log_max_days = None;
+ }
}
}
}
- if update.acme.is_some() { config.acme = update.acme; }
- if update.acmedomain0.is_some() { config.acmedomain0 = update.acmedomain0; }
- if update.acmedomain1.is_some() { config.acmedomain1 = update.acmedomain1; }
- if update.acmedomain2.is_some() { config.acmedomain2 = update.acmedomain2; }
- if update.acmedomain3.is_some() { config.acmedomain3 = update.acmedomain3; }
- if update.acmedomain4.is_some() { config.acmedomain4 = update.acmedomain4; }
- if update.http_proxy.is_some() { config.http_proxy = update.http_proxy; }
- if update.email_from.is_some() { config.email_from = update.email_from; }
- if update.ciphers_tls_1_3.is_some() { config.ciphers_tls_1_3 = update.ciphers_tls_1_3; }
- if update.ciphers_tls_1_2.is_some() { config.ciphers_tls_1_2 = update.ciphers_tls_1_2; }
- if update.default_lang.is_some() { config.default_lang = update.default_lang; }
- if update.description.is_some() { config.description = update.description; }
- if update.task_log_max_days.is_some() { config.task_log_max_days = update.task_log_max_days; }
+ if update.acme.is_some() {
+ config.acme = update.acme;
+ }
+ if update.acmedomain0.is_some() {
+ config.acmedomain0 = update.acmedomain0;
+ }
+ if update.acmedomain1.is_some() {
+ config.acmedomain1 = update.acmedomain1;
+ }
+ if update.acmedomain2.is_some() {
+ config.acmedomain2 = update.acmedomain2;
+ }
+ if update.acmedomain3.is_some() {
+ config.acmedomain3 = update.acmedomain3;
+ }
+ if update.acmedomain4.is_some() {
+ config.acmedomain4 = update.acmedomain4;
+ }
+ if update.http_proxy.is_some() {
+ config.http_proxy = update.http_proxy;
+ }
+ if update.email_from.is_some() {
+ config.email_from = update.email_from;
+ }
+ if update.ciphers_tls_1_3.is_some() {
+ config.ciphers_tls_1_3 = update.ciphers_tls_1_3;
+ }
+ if update.ciphers_tls_1_2.is_some() {
+ config.ciphers_tls_1_2 = update.ciphers_tls_1_2;
+ }
+ if update.default_lang.is_some() {
+ config.default_lang = update.default_lang;
+ }
+ if update.description.is_some() {
+ config.description = update.description;
+ }
+ if update.task_log_max_days.is_some() {
+ config.task_log_max_days = update.task_log_max_days;
+ }
crate::config::node::save_config(&config)?;
+use ::serde::{Deserialize, Serialize};
use anyhow::{bail, Error};
use serde_json::json;
-use ::serde::{Deserialize, Serialize};
-use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::api;
use proxmox_section_config::SectionConfigData;
use proxmox_sys::task_log;
use pbs_api_types::{
- DataStoreConfig, NODE_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
- DATASTORE_SCHEMA, UPID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT,
+ PRIV_SYS_MODIFY, UPID_SCHEMA,
};
use crate::tools::disks::{
- DiskManage, FileSystemType, DiskUsageType,
- create_file_system, create_single_linux_partition, get_fs_uuid, get_disk_usage_info,
+ create_file_system, create_single_linux_partition, get_disk_usage_info, get_fs_uuid,
+ DiskManage, DiskUsageType, FileSystemType,
};
use crate::tools::systemd::{self, types::*};
},
)]
#[derive(Debug, Serialize, Deserialize)]
-#[serde(rename_all="kebab-case")]
+#[serde(rename_all = "kebab-case")]
/// Datastore mount info.
pub struct DatastoreMountInfo {
/// The path of the mount unit.
},
)]
/// List systemd datastore mount units.
-pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
-
+pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
lazy_static::lazy_static! {
static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap();
}
filesystem: Option<FileSystemType>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
-
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id = rpcenv.get_auth_id().unwrap();
let default_path = std::path::PathBuf::from(&mount_point);
match std::fs::metadata(&default_path) {
- Err(_) => {}, // path does not exist
+ Err(_) => {} // path does not exist
Ok(_) => {
bail!("path {:?} already exists", default_path);
}
}
let upid_str = WorkerTask::new_thread(
- "dircreate", Some(name.clone()), auth_id, to_stdout, move |worker|
- {
+ "dircreate",
+ Some(name.clone()),
+ auth_id,
+ to_stdout,
+ move |worker| {
task_log!(worker, "create datastore '{}' on disk {}", name, disk);
let add_datastore = add_datastore.unwrap_or(false);
let uuid = get_fs_uuid(&partition)?;
let uuid_path = format!("/dev/disk/by-uuid/{}", uuid);
- let mount_unit_name = create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?;
+ let mount_unit_name =
+ create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?;
crate::tools::systemd::reload_daemon()?;
crate::tools::systemd::enable_unit(&mount_unit_name)?;
bail!("datastore '{}' already exists.", datastore.name);
}
- crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
+ crate::api2::config::datastore::do_create_datastore(
+ lock,
+ config,
+ datastore,
+ Some(&worker),
+ )?;
}
Ok(())
- })?;
+ },
+ )?;
Ok(upid_str)
}
)]
/// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
-
let path = format!("{}{}", BASE_MOUNT_DIR, name);
// path of datastore cannot be changed
let (config, _) = pbs_config::datastore::config()?;
let datastores: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
- let conflicting_datastore: Option<DataStoreConfig> = datastores.into_iter()
- .find(|ds| ds.path == path);
+ let conflicting_datastore: Option<DataStoreConfig> =
+ datastores.into_iter().find(|ds| ds.path == path);
if let Some(conflicting_datastore) = conflicting_datastore {
- bail!("Can't remove '{}' since it's required by datastore '{}'",
- conflicting_datastore.path, conflicting_datastore.name);
+ bail!(
+ "Can't remove '{}' since it's required by datastore '{}'",
+ conflicting_datastore.path,
+ conflicting_datastore.name
+ );
}
// disable systemd mount-unit
until the next reboot or until unmounted manually!",
path
),
- Ok(_) => Ok(())
+ Ok(_) => Ok(()),
}
}
-const ITEM_ROUTER: Router = Router::new()
- .delete(&API_METHOD_DELETE_DATASTORE_DISK);
+const ITEM_ROUTER: Router = Router::new().delete(&API_METHOD_DELETE_DATASTORE_DISK);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_DATASTORE_MOUNTS)
.post(&API_METHOD_CREATE_DATASTORE_DISK)
.match_all("name", &ITEM_ROUTER);
-
fn create_datastore_mount_unit(
datastore_name: &str,
mount_point: &str,
fs_type: FileSystemType,
what: &str,
) -> Result<String, Error> {
-
let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true);
mount_unit_name.push_str(".mount");
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
let unit = SystemdUnitSection {
- Description: format!("Mount datatstore '{}' under '{}'", datastore_name, mount_point),
+ Description: format!(
+ "Mount datatstore '{}' under '{}'",
+ datastore_name, mount_point
+ ),
..Default::default()
};
use anyhow::{bail, Error};
use serde_json::{json, Value};
-use proxmox_router::{Router, RpcEnvironment, RpcEnvironmentType, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::api;
use proxmox_sys::task_log;
use pbs_api_types::{
- ZpoolListItem, ZfsRaidLevel, ZfsCompressionType, DataStoreConfig,
- NODE_SCHEMA, ZPOOL_NAME_SCHEMA, DATASTORE_SCHEMA, DISK_ARRAY_SCHEMA,
- DISK_LIST_SCHEMA, ZFS_ASHIFT_SCHEMA, UPID_SCHEMA,
- PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ DataStoreConfig, ZfsCompressionType, ZfsRaidLevel, ZpoolListItem, DATASTORE_SCHEMA,
+ DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA,
+ ZFS_ASHIFT_SCHEMA, ZPOOL_NAME_SCHEMA,
};
use crate::tools::disks::{
- zpool_list, zpool_status, parse_zpool_status_config_tree, vdev_list_to_tree,
- DiskUsageType,
+ parse_zpool_status_config_tree, vdev_list_to_tree, zpool_list, zpool_status, DiskUsageType,
};
use proxmox_rest_server::WorkerTask;
-
#[api(
protected: true,
input: {
)]
/// List zfs pools.
pub fn list_zpools() -> Result<Vec<ZpoolListItem>, Error> {
-
let data = zpool_list(None, false)?;
let mut list = Vec::new();
},
)]
/// Get zpool status details.
-pub fn zpool_details(
- name: String,
-) -> Result<Value, Error> {
-
+pub fn zpool_details(name: String) -> Result<Value, Error> {
let key_value_list = zpool_status(&name)?;
let config = match key_value_list.iter().find(|(k, _)| k == "config") {
Some((_, v)) => v,
- None => bail!("got zpool status without config key"),
+ None => bail!("got zpool status without config key"),
};
let vdev_list = parse_zpool_status_config_tree(config)?;
}
}
- tree["name"] = tree.as_object_mut().unwrap()
+ tree["name"] = tree
+ .as_object_mut()
+ .unwrap()
.remove("pool")
.unwrap_or_else(|| name.into());
-
Ok(tree)
}
add_datastore: Option<bool>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
-
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id = rpcenv.get_auth_id().unwrap();
let devices_text = devices.clone();
let devices = DISK_ARRAY_SCHEMA.parse_property_string(&devices)?;
- let devices: Vec<String> = devices.as_array().unwrap().iter()
- .map(|v| v.as_str().unwrap().to_string()).collect();
+ let devices: Vec<String> = devices
+ .as_array()
+ .unwrap()
+ .iter()
+ .map(|v| v.as_str().unwrap().to_string())
+ .collect();
let disk_map = crate::tools::disks::get_disks(None, true)?;
for disk in devices.iter() {
let default_path = std::path::PathBuf::from(&mount_point);
match std::fs::metadata(&default_path) {
- Err(_) => {}, // path does not exist
+ Err(_) => {} // path does not exist
Ok(_) => {
bail!("path {:?} already exists", default_path);
}
}
- let upid_str = WorkerTask::new_thread(
- "zfscreate", Some(name.clone()), auth_id, to_stdout, move |worker|
- {
- task_log!(worker, "create {:?} zpool '{}' on devices '{}'", raidlevel, name, devices_text);
-
+ let upid_str = WorkerTask::new_thread(
+ "zfscreate",
+ Some(name.clone()),
+ auth_id,
+ to_stdout,
+ move |worker| {
+ task_log!(
+ worker,
+ "create {:?} zpool '{}' on devices '{}'",
+ raidlevel,
+ name,
+ devices_text
+ );
let mut command = std::process::Command::new("zpool");
- command.args(&["create", "-o", &format!("ashift={}", ashift), "-m", &mount_point, &name]);
+ command.args(&[
+ "create",
+ "-o",
+ &format!("ashift={}", ashift),
+ "-m",
+ &mount_point,
+ &name,
+ ]);
match raidlevel {
ZfsRaidLevel::Single => {
command.args(devices);
}
ZfsRaidLevel::Raid10 => {
- devices.chunks(2).for_each(|pair| {
- command.arg("mirror");
- command.args(pair);
- });
+ devices.chunks(2).for_each(|pair| {
+ command.arg("mirror");
+ command.args(pair);
+ });
}
ZfsRaidLevel::RaidZ => {
command.arg("raidz");
task_log!(worker, "{}", output);
if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() {
- let import_unit = format!("zfs-import@{}.service", proxmox_sys::systemd::escape_unit(&name, false));
+ let import_unit = format!(
+ "zfs-import@{}.service",
+ proxmox_sys::systemd::escape_unit(&name, false)
+ );
crate::tools::systemd::enable_unit(&import_unit)?;
}
bail!("datastore '{}' already exists.", datastore.name);
}
- crate::api2::config::datastore::do_create_datastore(lock, config, datastore, Some(&worker))?;
+ crate::api2::config::datastore::do_create_datastore(
+ lock,
+ config,
+ datastore,
+ Some(&worker),
+ )?;
}
Ok(())
- })?;
+ },
+ )?;
Ok(upid_str)
}
-pub const POOL_ROUTER: Router = Router::new()
- .get(&API_METHOD_ZPOOL_DETAILS);
+pub const POOL_ROUTER: Router = Router::new().get(&API_METHOD_ZPOOL_DETAILS);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_ZPOOLS)
use std::sync::{Arc, Mutex};
-use anyhow::{Error};
+use ::serde::{Deserialize, Serialize};
+use anyhow::Error;
use lazy_static::lazy_static;
use openssl::sha;
use regex::Regex;
use serde_json::{json, Value};
-use ::serde::{Deserialize, Serialize};
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use pbs_api_types::{IPRE, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
-use pbs_api_types::{IPRE, IPV4RE, IPV6RE, IPV4OCTET, IPV6H16, IPV6LS32};
use pbs_api_types::{
- PROXMOX_CONFIG_DIGEST_SCHEMA, FIRST_DNS_SERVER_SCHEMA, SECOND_DNS_SERVER_SCHEMA,
- THIRD_DNS_SERVER_SCHEMA, NODE_SCHEMA, SEARCH_DOMAIN_SCHEMA,
- PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ FIRST_DNS_SERVER_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ PROXMOX_CONFIG_DIGEST_SCHEMA, SEARCH_DOMAIN_SCHEMA, SECOND_DNS_SERVER_SCHEMA,
+ THIRD_DNS_SERVER_SCHEMA,
};
static RESOLV_CONF_FN: &str = "/etc/resolv.conf";
}
pub fn read_etc_resolv_conf() -> Result<Value, Error> {
-
let mut result = json!({});
let mut nscount = 0;
lazy_static! {
static ref DOMAIN_REGEX: Regex = Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap();
- static ref SERVER_REGEX: Regex = Regex::new(
- concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
+ static ref SERVER_REGEX: Regex =
+ Regex::new(concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
}
let mut options = String::new();
for line in data.lines() {
-
if let Some(caps) = DOMAIN_REGEX.captures(line) {
result["search"] = Value::from(&caps[1]);
} else if let Some(caps) = SERVER_REGEX.captures(line) {
nscount += 1;
- if nscount > 3 { continue };
+ if nscount > 3 {
+ continue;
+ };
let nameserver = &caps[1];
let id = format!("dns{}", nscount);
result[id] = Value::from(nameserver);
} else {
- if !options.is_empty() { options.push('\n'); }
+ if !options.is_empty() {
+ options.push('\n');
+ }
options.push_str(line);
}
}
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
) -> Result<Value, Error> {
-
lazy_static! {
static ref MUTEX: Arc<Mutex<()>> = Arc::new(Mutex::new(()));
}
for delete_prop in delete {
let config = config.as_object_mut().unwrap();
match delete_prop {
- DeletableProperty::dns1 => { config.remove("dns1"); },
- DeletableProperty::dns2 => { config.remove("dns2"); },
- DeletableProperty::dns3 => { config.remove("dns3"); },
+ DeletableProperty::dns1 => {
+ config.remove("dns1");
+ }
+ DeletableProperty::dns2 => {
+ config.remove("dns2");
+ }
+ DeletableProperty::dns3 => {
+ config.remove("dns3");
+ }
}
}
}
- if let Some(search) = search { config["search"] = search.into(); }
- if let Some(dns1) = dns1 { config["dns1"] = dns1.into(); }
- if let Some(dns2) = dns2 { config["dns2"] = dns2.into(); }
- if let Some(dns3) = dns3 { config["dns3"] = dns3.into(); }
+ if let Some(search) = search {
+ config["search"] = search.into();
+ }
+ if let Some(dns1) = dns1 {
+ config["dns1"] = dns1.into();
+ }
+ if let Some(dns2) = dns2 {
+ config["dns2"] = dns2.into();
+ }
+ if let Some(dns3) = dns3 {
+ config["dns3"] = dns3.into();
+ }
let mut data = String::new();
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
read_etc_resolv_conf()
}
use std::process::{Command, Stdio};
-use anyhow::{Error};
+use anyhow::Error;
use serde_json::{json, Value};
-use std::io::{BufRead,BufReader};
+use std::io::{BufRead, BufReader};
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT};
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let mut args = vec![];
if let Some(lastentries) = lastentries {
Ok(json!(lines))
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_GET_JOURNAL);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_JOURNAL);
use serde_json::{json, Value};
use tokio::io::{AsyncBufReadExt, BufReader};
-use proxmox_sys::sortable;
use proxmox_sys::fd::fd_change_cloexec;
+use proxmox_sys::sortable;
+use proxmox_http::websocket::WebSocket;
+use proxmox_router::list_subdirs_api_method;
use proxmox_router::{
- ApiHandler, ApiMethod, ApiResponseFuture, Permission, RpcEnvironment, Router, SubdirMap,
+ ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::*;
-use proxmox_router::list_subdirs_api_method;
-use proxmox_http::websocket::WebSocket;
use proxmox_rest_server::WorkerTask;
use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_CONSOLE};
use pbs_tools::ticket::{self, Empty, Ticket};
-use crate::tools;
use crate::auth_helpers::private_auth_key;
+use crate::tools;
pub mod apt;
pub mod certificates;
.map_err(Error::from)
.await
{
- Ok(upgraded) => upgraded,
+ Ok(upgraded) => upgraded,
_ => bail!("error"),
};
-use anyhow::{Error, bail};
-use serde::{Deserialize, Serialize};
-use serde_json::{Value, to_value};
+use anyhow::{bail, Error};
use hex::FromHex;
+use serde::{Deserialize, Serialize};
+use serde_json::{to_value, Value};
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{
- Authid, Interface, NetworkInterfaceType, LinuxBondMode, NetworkConfigMethod, BondXmitHashPolicy,
+ Authid, BondXmitHashPolicy, Interface, LinuxBondMode, NetworkConfigMethod,
+ NetworkInterfaceType, CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA,
NETWORK_INTERFACE_ARRAY_SCHEMA, NETWORK_INTERFACE_LIST_SCHEMA, NETWORK_INTERFACE_NAME_SCHEMA,
- CIDR_V4_SCHEMA, CIDR_V6_SCHEMA, IP_V4_SCHEMA, IP_V6_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
- NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::network::{self, NetworkConfig};
fn split_interface_list(list: &str) -> Result<Vec<String>, Error> {
let value = NETWORK_INTERFACE_ARRAY_SCHEMA.parse_property_string(list)?;
- Ok(value.as_array().unwrap().iter().map(|v| v.as_str().unwrap().to_string()).collect())
+ Ok(value
+ .as_array()
+ .unwrap()
+ .iter()
+ .map(|v| v.as_str().unwrap().to_string())
+ .collect())
}
fn check_duplicate_gateway_v4(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
-
- let current_gateway_v4 = config.interfaces.iter()
+ let current_gateway_v4 = config
+ .interfaces
+ .iter()
.find(|(_, interface)| interface.gateway.is_some())
.map(|(name, _)| name.to_string());
if let Some(current_gateway_v4) = current_gateway_v4 {
if current_gateway_v4 != iface {
- bail!("Default IPv4 gateway already exists on interface '{}'", current_gateway_v4);
+ bail!(
+ "Default IPv4 gateway already exists on interface '{}'",
+ current_gateway_v4
+ );
}
}
Ok(())
}
fn check_duplicate_gateway_v6(config: &NetworkConfig, iface: &str) -> Result<(), Error> {
-
- let current_gateway_v6 = config.interfaces.iter()
+ let current_gateway_v6 = config
+ .interfaces
+ .iter()
.find(|(_, interface)| interface.gateway6.is_some())
.map(|(name, _)| name.to_string());
if let Some(current_gateway_v6) = current_gateway_v6 {
if current_gateway_v6 != iface {
- bail!("Default IPv6 gateway already exists on interface '{}'", current_gateway_v6);
+ bail!(
+ "Default IPv6 gateway already exists on interface '{}'",
+ current_gateway_v6
+ );
}
}
Ok(())
}
-
fn set_bridge_ports(iface: &mut Interface, ports: Vec<String>) -> Result<(), Error> {
if iface.interface_type != NetworkInterfaceType::Bridge {
- bail!("interface '{}' is no bridge (type is {:?})", iface.name, iface.interface_type);
+ bail!(
+ "interface '{}' is no bridge (type is {:?})",
+ iface.name,
+ iface.interface_type
+ );
}
iface.bridge_ports = Some(ports);
Ok(())
fn set_bond_slaves(iface: &mut Interface, slaves: Vec<String>) -> Result<(), Error> {
if iface.interface_type != NetworkInterfaceType::Bond {
- bail!("interface '{}' is no bond (type is {:?})", iface.name, iface.interface_type);
+ bail!(
+ "interface '{}' is no bond (type is {:?})",
+ iface.name,
+ iface.interface_type
+ );
}
iface.slaves = Some(slaves);
Ok(())
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let (config, digest) = network::config()?;
let digest = hex::encode(&digest);
let mut list = Vec::new();
for (iface, interface) in config.interfaces.iter() {
- if iface == "lo" { continue; } // do not list lo
+ if iface == "lo" {
+ continue;
+ } // do not list lo
let mut item: Value = to_value(interface)?;
item["digest"] = digest.clone().into();
item["iface"] = iface.to_string().into();
)]
/// Read a network interface configuration.
pub fn read_interface(iface: String) -> Result<Value, Error> {
-
let (config, digest) = network::config()?;
let interface = config.lookup(&iface)?;
Ok(data)
}
-
#[api(
protected: true,
input: {
slaves: Option<String>,
param: Value,
) -> Result<(), Error> {
-
let interface_type = pbs_tools::json::required_string_param(¶m, "type")?;
let interface_type: NetworkInterfaceType = serde_json::from_value(interface_type.into())?;
let mut interface = Interface::new(iface.clone());
interface.interface_type = interface_type;
- if let Some(autostart) = autostart { interface.autostart = autostart; }
- if method.is_some() { interface.method = method; }
- if method6.is_some() { interface.method6 = method6; }
- if mtu.is_some() { interface.mtu = mtu; }
- if comments.is_some() { interface.comments = comments; }
- if comments6.is_some() { interface.comments6 = comments6; }
+ if let Some(autostart) = autostart {
+ interface.autostart = autostart;
+ }
+ if method.is_some() {
+ interface.method = method;
+ }
+ if method6.is_some() {
+ interface.method6 = method6;
+ }
+ if mtu.is_some() {
+ interface.mtu = mtu;
+ }
+ if comments.is_some() {
+ interface.comments = comments;
+ }
+ if comments6.is_some() {
+ interface.comments6 = comments6;
+ }
if let Some(cidr) = cidr {
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
- if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
+ if is_v6 {
+ bail!("invalid address type (expected IPv4, got IPv6)");
+ }
interface.cidr = Some(cidr);
}
if let Some(cidr6) = cidr6 {
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
- if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
+ if !is_v6 {
+ bail!("invalid address type (expected IPv6, got IPv4)");
+ }
interface.cidr6 = Some(cidr6);
}
if let Some(gateway) = gateway {
let is_v6 = gateway.contains(':');
- if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
+ if is_v6 {
+ bail!("invalid address type (expected IPv4, got IPv6)");
+ }
check_duplicate_gateway_v4(&config, &iface)?;
interface.gateway = Some(gateway);
}
if let Some(gateway6) = gateway6 {
let is_v6 = gateway6.contains(':');
- if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
+ if !is_v6 {
+ bail!("invalid address type (expected IPv6, got IPv4)");
+ }
check_duplicate_gateway_v6(&config, &iface)?;
interface.gateway6 = Some(gateway6);
}
let ports = split_interface_list(&ports)?;
set_bridge_ports(&mut interface, ports)?;
}
- if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
+ if bridge_vlan_aware.is_some() {
+ interface.bridge_vlan_aware = bridge_vlan_aware;
+ }
}
NetworkInterfaceType::Bond => {
if let Some(mode) = bond_mode {
interface.bond_primary = bond_primary;
}
if bond_xmit_hash_policy.is_some() {
- if mode != LinuxBondMode::ieee802_3ad &&
- mode != LinuxBondMode::balance_xor
- {
+ if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor {
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
}
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
set_bond_slaves(&mut interface, slaves)?;
}
}
- _ => bail!("creating network interface type '{:?}' is not supported", interface_type),
+ _ => bail!(
+ "creating network interface type '{:?}' is not supported",
+ interface_type
+ ),
}
if interface.cidr.is_some() || interface.gateway.is_some() {
bond_xmit_hash_policy,
}
-
#[api(
protected: true,
input: {
digest: Option<String>,
param: Value,
) -> Result<(), Error> {
-
let _lock = network::lock_config()?;
let (mut config, expected_digest) = network::config()?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
- if gateway.is_some() { check_duplicate_gateway_v4(&config, &iface)?; }
- if gateway6.is_some() { check_duplicate_gateway_v6(&config, &iface)?; }
+ if gateway.is_some() {
+ check_duplicate_gateway_v4(&config, &iface)?;
+ }
+ if gateway6.is_some() {
+ check_duplicate_gateway_v6(&config, &iface)?;
+ }
let interface = config.lookup_mut(&iface)?;
if let Some(interface_type) = param.get("type") {
let interface_type = NetworkInterfaceType::deserialize(interface_type)?;
- if interface_type != interface.interface_type {
- bail!("got unexpected interface type ({:?} != {:?})", interface_type, interface.interface_type);
+ if interface_type != interface.interface_type {
+ bail!(
+ "got unexpected interface type ({:?} != {:?})",
+ interface_type,
+ interface.interface_type
+ );
}
}
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
- DeletableProperty::cidr => { interface.cidr = None; },
- DeletableProperty::cidr6 => { interface.cidr6 = None; },
- DeletableProperty::gateway => { interface.gateway = None; },
- DeletableProperty::gateway6 => { interface.gateway6 = None; },
- DeletableProperty::method => { interface.method = None; },
- DeletableProperty::method6 => { interface.method6 = None; },
- DeletableProperty::comments => { interface.comments = None; },
- DeletableProperty::comments6 => { interface.comments6 = None; },
- DeletableProperty::mtu => { interface.mtu = None; },
- DeletableProperty::autostart => { interface.autostart = false; },
- DeletableProperty::bridge_ports => { set_bridge_ports(interface, Vec::new())?; }
- DeletableProperty::bridge_vlan_aware => { interface.bridge_vlan_aware = None; }
- DeletableProperty::slaves => { set_bond_slaves(interface, Vec::new())?; }
- DeletableProperty::bond_primary => { interface.bond_primary = None; }
- DeletableProperty::bond_xmit_hash_policy => { interface.bond_xmit_hash_policy = None }
+ DeletableProperty::cidr => {
+ interface.cidr = None;
+ }
+ DeletableProperty::cidr6 => {
+ interface.cidr6 = None;
+ }
+ DeletableProperty::gateway => {
+ interface.gateway = None;
+ }
+ DeletableProperty::gateway6 => {
+ interface.gateway6 = None;
+ }
+ DeletableProperty::method => {
+ interface.method = None;
+ }
+ DeletableProperty::method6 => {
+ interface.method6 = None;
+ }
+ DeletableProperty::comments => {
+ interface.comments = None;
+ }
+ DeletableProperty::comments6 => {
+ interface.comments6 = None;
+ }
+ DeletableProperty::mtu => {
+ interface.mtu = None;
+ }
+ DeletableProperty::autostart => {
+ interface.autostart = false;
+ }
+ DeletableProperty::bridge_ports => {
+ set_bridge_ports(interface, Vec::new())?;
+ }
+ DeletableProperty::bridge_vlan_aware => {
+ interface.bridge_vlan_aware = None;
+ }
+ DeletableProperty::slaves => {
+ set_bond_slaves(interface, Vec::new())?;
+ }
+ DeletableProperty::bond_primary => {
+ interface.bond_primary = None;
+ }
+ DeletableProperty::bond_xmit_hash_policy => interface.bond_xmit_hash_policy = None,
}
}
}
- if let Some(autostart) = autostart { interface.autostart = autostart; }
- if method.is_some() { interface.method = method; }
- if method6.is_some() { interface.method6 = method6; }
- if mtu.is_some() { interface.mtu = mtu; }
+ if let Some(autostart) = autostart {
+ interface.autostart = autostart;
+ }
+ if method.is_some() {
+ interface.method = method;
+ }
+ if method6.is_some() {
+ interface.method6 = method6;
+ }
+ if mtu.is_some() {
+ interface.mtu = mtu;
+ }
if let Some(ports) = bridge_ports {
let ports = split_interface_list(&ports)?;
set_bridge_ports(interface, ports)?;
}
- if bridge_vlan_aware.is_some() { interface.bridge_vlan_aware = bridge_vlan_aware; }
+ if bridge_vlan_aware.is_some() {
+ interface.bridge_vlan_aware = bridge_vlan_aware;
+ }
if let Some(slaves) = slaves {
let slaves = split_interface_list(&slaves)?;
set_bond_slaves(interface, slaves)?;
interface.bond_primary = bond_primary;
}
if bond_xmit_hash_policy.is_some() {
- if mode != LinuxBondMode::ieee802_3ad &&
- mode != LinuxBondMode::balance_xor
- {
+ if mode != LinuxBondMode::ieee802_3ad && mode != LinuxBondMode::balance_xor {
bail!("bond_xmit_hash_policy is only valid with LACP(802.3ad) or balance-xor mode");
}
interface.bond_xmit_hash_policy = bond_xmit_hash_policy;
if let Some(cidr) = cidr {
let (_, _, is_v6) = network::parse_cidr(&cidr)?;
- if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
+ if is_v6 {
+ bail!("invalid address type (expected IPv4, got IPv6)");
+ }
interface.cidr = Some(cidr);
}
if let Some(cidr6) = cidr6 {
let (_, _, is_v6) = network::parse_cidr(&cidr6)?;
- if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
+ if !is_v6 {
+ bail!("invalid address type (expected IPv6, got IPv4)");
+ }
interface.cidr6 = Some(cidr6);
}
if let Some(gateway) = gateway {
let is_v6 = gateway.contains(':');
- if is_v6 { bail!("invalid address type (expected IPv4, got IPv6)"); }
+ if is_v6 {
+ bail!("invalid address type (expected IPv4, got IPv6)");
+ }
interface.gateway = Some(gateway);
}
if let Some(gateway6) = gateway6 {
let is_v6 = gateway6.contains(':');
- if !is_v6 { bail!("invalid address type (expected IPv6, got IPv4)"); }
+ if !is_v6 {
+ bail!("invalid address type (expected IPv6, got IPv4)");
+ }
interface.gateway6 = Some(gateway6);
}
- if comments.is_some() { interface.comments = comments; }
- if comments6.is_some() { interface.comments6 = comments6; }
+ if comments.is_some() {
+ interface.comments = comments;
+ }
+ if comments6.is_some() {
+ interface.comments6 = comments6;
+ }
if interface.cidr.is_some() || interface.gateway.is_some() {
interface.method = Some(NetworkConfigMethod::Static);
},
)]
/// Reload network configuration (requires ifupdown2).
-pub async fn reload_network_config(
- rpcenv: &mut dyn RpcEnvironment,
-) -> Result<String, Error> {
-
+pub async fn reload_network_config(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error> {
network::assert_ifupdown2_installed()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
- let upid_str = WorkerTask::spawn("srvreload", Some(String::from("networking")), auth_id.to_string(), true, |_worker| async {
-
- let _ = std::fs::rename(network::NETWORK_INTERFACES_NEW_FILENAME, network::NETWORK_INTERFACES_FILENAME);
-
- network::network_reload()?;
- Ok(())
- })?;
+ let upid_str = WorkerTask::spawn(
+ "srvreload",
+ Some(String::from("networking")),
+ auth_id.to_string(),
+ true,
+ |_worker| async {
+ let _ = std::fs::rename(
+ network::NETWORK_INTERFACES_NEW_FILENAME,
+ network::NETWORK_INTERFACES_FILENAME,
+ );
+
+ network::network_reload()?;
+ Ok(())
+ },
+ )?;
Ok(upid_str)
}
)]
/// Revert network configuration (rm /etc/network/interfaces.new).
pub fn revert_network_config() -> Result<(), Error> {
-
let _ = std::fs::remove_file(network::NETWORK_INTERFACES_NEW_FILENAME);
Ok(())
Ok(json!(generate_report()))
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_GET_REPORT);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_REPORT);
use anyhow::{bail, Error};
-use serde_json::{Value, json};
+use serde_json::{json, Value};
use std::collections::BTreeMap;
use proxmox_router::{Permission, Router};
use proxmox_schema::api;
-use pbs_api_types::{
- NODE_SCHEMA, RRDMode, RRDTimeFrame, PRIV_SYS_AUDIT,
-};
+use pbs_api_types::{RRDMode, RRDTimeFrame, NODE_SCHEMA, PRIV_SYS_AUDIT};
use crate::rrd_cache::extract_rrd_data;
timeframe: RRDTimeFrame,
mode: RRDMode,
) -> Result<Value, Error> {
-
let mut result: Vec<Value> = Vec::new();
let mut timemap = BTreeMap::new();
None => continue,
};
- if let Some(expected_resolution) = last_resolution {
+ if let Some(expected_resolution) = last_resolution {
if reso != expected_resolution {
- bail!("got unexpected RRD resolution ({} != {})", reso, expected_resolution);
+ bail!(
+ "got unexpected RRD resolution ({} != {})",
+ reso,
+ expected_resolution
+ );
}
} else {
last_resolution = Some(reso);
},
)]
/// Read node stats
-fn get_node_stats(
- timeframe: RRDTimeFrame,
- cf: RRDMode,
- _param: Value,
-) -> Result<Value, Error> {
-
+fn get_node_stats(timeframe: RRDTimeFrame, cf: RRDMode, _param: Value) -> Result<Value, Error> {
create_value_from_rrd(
"host",
&[
- "cpu", "iowait",
- "memtotal", "memused",
- "swaptotal", "swapused",
- "netin", "netout",
+ "cpu",
+ "iowait",
+ "memtotal",
+ "memused",
+ "swaptotal",
+ "swapused",
+ "netin",
+ "netout",
"loadavg",
- "total", "used",
- "read_ios", "read_bytes",
- "write_ios", "write_bytes",
+ "total",
+ "used",
+ "read_ios",
+ "read_bytes",
+ "write_ios",
+ "write_bytes",
"io_ticks",
- ],
+ ],
timeframe,
cf,
)
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_GET_NODE_STATS);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_NODE_STATS);
use anyhow::{bail, Error};
use serde_json::{json, Value};
-use proxmox_sys::sortable;
-use proxmox_router::{list_subdirs_api_method, Router, Permission, RpcEnvironment, SubdirMap};
+use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api;
+use proxmox_sys::sortable;
-use pbs_api_types::{Authid, NODE_SCHEMA, SERVICE_ID_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY};
+use pbs_api_types::{Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SERVICE_ID_SCHEMA};
use proxmox_rest_server::WorkerTask;
];
pub fn real_service_name(service: &str) -> &str {
-
// since postfix package 3.1.0-3.1 the postfix unit is only here
// to manage subinstances, of which the default is called "-".
// This is where we look for the daemon status
}
fn get_full_service_state(service: &str) -> Result<Value, Error> {
-
let real_service_name = real_service_name(service);
let mut child = Command::new("systemctl")
.stdout(Stdio::piped())
.spawn()?;
- use std::io::{BufRead,BufReader};
+ use std::io::{BufRead, BufReader};
let mut result = json!({});
}
fn json_service_state(service: &str, status: Value) -> Value {
-
if let Some(desc) = status["Description"].as_str() {
let name = status["Name"].as_str().unwrap_or(service);
let state = status["SubState"].as_str().unwrap_or("unknown");
},
)]
/// Service list.
-fn list_services(
- _param: Value,
-) -> Result<Value, Error> {
-
+fn list_services(_param: Value) -> Result<Value, Error> {
let mut list = vec![];
for service in &SERVICE_NAME_LIST {
},
)]
/// Read service properties.
-fn get_service_state(
- service: String,
- _param: Value,
-) -> Result<Value, Error> {
-
+fn get_service_state(service: String, _param: Value) -> Result<Value, Error> {
let service = service.as_str();
if !SERVICE_NAME_LIST.contains(&service) {
}
fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Value, Error> {
-
let workerid = format!("srv{}", &cmd);
let cmd = match cmd {
- "start"|"stop"|"restart"=> cmd.to_string(),
+ "start" | "stop" | "restart" => cmd.to_string(),
"reload" => "try-reload-or-restart".to_string(), // some services do not implement reload
_ => bail!("unknown service command '{}'", cmd),
};
auth_id.to_string(),
false,
move |_worker| {
-
if service == "proxmox-backup" && cmd == "stop" {
- bail!("invalid service cmd '{} {}' cannot stop essential service!", service, cmd);
+ bail!(
+ "invalid service cmd '{} {}' cannot stop essential service!",
+ service,
+ cmd
+ );
}
let real_service_name = real_service_name(&service);
}
Ok(())
- }
+ },
)?;
Ok(upid.into())
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("starting service {}", service);
service: String,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
- ) -> Result<Value, Error> {
-
+) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("stopping service {}", service);
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("re-starting service {}", service);
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
log::info!("reloading service {}", service);
#[sortable]
const SERVICE_SUBDIRS: SubdirMap = &sorted!([
- (
- "reload", &Router::new()
- .post(&API_METHOD_RELOAD_SERVICE)
- ),
- (
- "restart", &Router::new()
- .post(&API_METHOD_RESTART_SERVICE)
- ),
- (
- "start", &Router::new()
- .post(&API_METHOD_START_SERVICE)
- ),
- (
- "state", &Router::new()
- .get(&API_METHOD_GET_SERVICE_STATE)
- ),
- (
- "stop", &Router::new()
- .post(&API_METHOD_STOP_SERVICE)
- ),
+ ("reload", &Router::new().post(&API_METHOD_RELOAD_SERVICE)),
+ ("restart", &Router::new().post(&API_METHOD_RESTART_SERVICE)),
+ ("start", &Router::new().post(&API_METHOD_START_SERVICE)),
+ ("state", &Router::new().get(&API_METHOD_GET_SERVICE_STATE)),
+ ("stop", &Router::new().post(&API_METHOD_STOP_SERVICE)),
]);
const SERVICE_ROUTER: Router = Router::new()
-use std::process::Command;
use std::path::Path;
+use std::process::Command;
-use anyhow::{Error, format_err, bail};
+use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox_sys::linux::procfs;
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
-use pbs_api_types::{NODE_SCHEMA, NodePowerCommand, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
+use pbs_api_types::{NodePowerCommand, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT};
use crate::api2::types::{
- NodeCpuInformation, NodeStatus, NodeMemoryCounters, NodeSwapCounters, NodeInformation,
+ NodeCpuInformation, NodeInformation, NodeMemoryCounters, NodeStatus, NodeSwapCounters,
};
impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation {
)]
/// Reboot or shutdown the node.
fn reboot_or_shutdown(command: NodePowerCommand) -> Result<(), Error> {
-
let systemctl_command = match command {
NodePowerCommand::Reboot => "reboot",
NodePowerCommand::Shutdown => "poweroff",
match output.status.code() {
Some(code) => {
let msg = String::from_utf8(output.stderr)
- .map(|m| if m.is_empty() { String::from("no error message") } else { m })
+ .map(|m| {
+ if m.is_empty() {
+ String::from("no error message")
+ } else {
+ m
+ }
+ })
.unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
bail!("diff failed with status code: {} - {}", code, msg);
}
-use anyhow::{Error, format_err, bail};
+use anyhow::{bail, format_err, Error};
use serde_json::Value;
-use proxmox_router::{Router, RpcEnvironment, Permission};
+use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{
- NODE_SCHEMA, SUBSCRIPTION_KEY_SCHEMA, Authid,
- PRIV_SYS_AUDIT,PRIV_SYS_MODIFY,
+ Authid, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, SUBSCRIPTION_KEY_SCHEMA,
};
use crate::tools;
-use crate::tools::subscription::{self, SubscriptionStatus, SubscriptionInfo};
+use crate::tools::subscription::{self, SubscriptionInfo, SubscriptionStatus};
use pbs_config::CachedUserInfo;
#[api(
},
)]
/// Check and update subscription status.
-pub fn check_subscription(
- force: bool,
-) -> Result<(), Error> {
+pub fn check_subscription(force: bool) -> Result<(), Error> {
let info = match subscription::read_subscription() {
Err(err) => bail!("could not read subscription status: {}", err),
Ok(Some(info)) => info,
status: SubscriptionStatus::NOTFOUND,
message: Some("There is no subscription key".into()),
serverid: Some(tools::get_hardware_address()?),
- url: Some(url.into()),
+ url: Some(url.into()),
..Default::default()
},
};
},
)]
/// Set a subscription key and check it.
-pub fn set_subscription(
- key: String,
-) -> Result<(), Error> {
-
+pub fn set_subscription(key: String) -> Result<(), Error> {
let server_id = tools::get_hardware_address()?;
let info = subscription::check_subscription(key, server_id)?;
)]
/// Delete subscription info.
pub fn delete_subscription() -> Result<(), Error> {
-
subscription::delete_subscription()
.map_err(|err| format_err!("Deleting subscription failed: {}", err))?;
use std::process::{Command, Stdio};
-use anyhow::{Error};
+use anyhow::Error;
use serde_json::{json, Value};
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
-use pbs_api_types::{NODE_SCHEMA, SYSTEMD_DATETIME_FORMAT, PRIV_SYS_AUDIT};
+use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT, SYSTEMD_DATETIME_FORMAT};
fn dump_journal(
start: Option<u64>,
until: Option<&str>,
service: Option<&str>,
) -> Result<(u64, Vec<Value>), Error> {
-
let mut args = vec!["-o", "short", "--no-pager"];
- if let Some(service) = service { args.extend(&["--unit", service]); }
- if let Some(since) = since { args.extend(&["--since", since]); }
- if let Some(until) = until { args.extend(&["--until", until]); }
+ if let Some(service) = service {
+ args.extend(&["--unit", service]);
+ }
+ if let Some(since) = since {
+ args.extend(&["--since", since]);
+ }
+ if let Some(until) = until {
+ args.extend(&["--until", until]);
+ }
let mut lines: Vec<Value> = vec![];
let mut limit = limit.unwrap_or(50);
.stdout(Stdio::piped())
.spawn()?;
- use std::io::{BufRead,BufReader};
+ use std::io::{BufRead, BufReader};
if let Some(ref mut stdout) = child.stdout {
for line in BufReader::new(stdout).lines() {
match line {
Ok(line) => {
count += 1;
- if count < start { continue };
- if limit == 0 { continue };
+ if count < start {
+ continue;
+ };
+ if limit == 0 {
+ continue;
+ };
lines.push(json!({ "n": count, "t": line }));
// so we add a line
if count == 0 {
count += 1;
- lines.push(json!({ "n": count, "t": "no content"}));
+ lines.push(json!({ "n": count, "t": "no content"}));
}
Ok((count, lines))
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
- let service = param["service"].as_str().map(|service| crate::api2::node::services::real_service_name(service));
+ let service = param["service"]
+ .as_str()
+ .map(|service| crate::api2::node::services::real_service_name(service));
let (count, lines) = dump_journal(
param["start"].as_u64(),
param["limit"].as_u64(),
param["since"].as_str(),
param["until"].as_str(),
- service)?;
+ service,
+ )?;
rpcenv["total"] = Value::from(count);
Ok(json!(lines))
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_GET_SYSLOG);
-
+pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_SYSLOG);
use anyhow::{bail, Error};
use serde_json::{json, Value};
-use proxmox_sys::sortable;
-use proxmox_router::{list_subdirs_api_method, Router, RpcEnvironment, Permission, SubdirMap};
+use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api;
+use proxmox_sys::sortable;
use pbs_api_types::{
- Userid, Authid, Tokenname, TaskListItem, TaskStateType, UPID,
- NODE_SCHEMA, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX,
- SYNC_JOB_WORKER_ID_REGEX, DATASTORE_SCHEMA,
+ Authid, TaskListItem, TaskStateType, Tokenname, Userid, DATASTORE_SCHEMA, NODE_SCHEMA,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_VERIFY, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY,
+ SYNC_JOB_WORKER_ID_REGEX, UPID, UPID_SCHEMA, VERIFICATION_JOB_WORKER_ID_REGEX,
};
use crate::api2::pull::check_pull_privs;
-use proxmox_rest_server::{upid_log_path, upid_read_status, TaskState, TaskListInfoIterator};
use pbs_config::CachedUserInfo;
+use proxmox_rest_server::{upid_log_path, upid_read_status, TaskListInfoIterator, TaskState};
// matches respective job execution privileges
fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) -> Result<(), Error> {
("verificationjob", Some(workerid)) => {
if let Some(captures) = VERIFICATION_JOB_WORKER_ID_REGEX.captures(workerid) {
if let Some(store) = captures.get(1) {
- return user_info.check_privs(auth_id,
- &["datastore", store.as_str()],
- PRIV_DATASTORE_VERIFY,
- true);
+ return user_info.check_privs(
+ auth_id,
+ &["datastore", store.as_str()],
+ PRIV_DATASTORE_VERIFY,
+ true,
+ );
}
}
- },
+ }
("syncjob", Some(workerid)) => {
if let Some(captures) = SYNC_JOB_WORKER_ID_REGEX.captures(workerid) {
let remote = captures.get(1);
let local_store = captures.get(3);
if let (Some(remote), Some(remote_store), Some(local_store)) =
- (remote, remote_store, local_store) {
-
- return check_pull_privs(auth_id,
- local_store.as_str(),
- remote.as_str(),
- remote_store.as_str(),
- false);
+ (remote, remote_store, local_store)
+ {
+ return check_pull_privs(
+ auth_id,
+ local_store.as_str(),
+ remote.as_str(),
+ remote_store.as_str(),
+ false,
+ );
}
}
- },
+ }
("garbage_collection", Some(workerid)) => {
- return user_info.check_privs(auth_id,
- &["datastore", workerid],
- PRIV_DATASTORE_MODIFY,
- true)
- },
+ return user_info.check_privs(
+ auth_id,
+ &["datastore", workerid],
+ PRIV_DATASTORE_MODIFY,
+ true,
+ )
+ }
("prune", Some(workerid)) => {
- return user_info.check_privs(auth_id,
- &["datastore",
- workerid],
- PRIV_DATASTORE_MODIFY,
- true);
- },
+ return user_info.check_privs(
+ auth_id,
+ &["datastore", workerid],
+ PRIV_DATASTORE_MODIFY,
+ true,
+ );
+ }
_ => bail!("not a scheduled job task"),
};
fn check_task_access(auth_id: &Authid, upid: &UPID) -> Result<(), Error> {
let task_auth_id: Authid = upid.auth_id.parse()?;
if auth_id == &task_auth_id
- || (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id) {
+ || (task_auth_id.is_token() && &Authid::from(task_auth_id.user().clone()) == auth_id)
+ {
// task owner can always read
Ok(())
} else {
// access to all tasks
// or task == job which the user/token could have configured/manually executed
- user_info.check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
+ user_info
+ .check_privs(auth_id, &["system", "tasks"], PRIV_SYS_AUDIT, false)
.or_else(|_| check_job_privs(auth_id, &user_info, upid))
.or_else(|_| bail!("task access not allowed"))
}
}
fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types::TaskListItem {
- let (endtime, status) = info
- .state
- .map_or_else(|| (None, None), |a| (Some(a.endtime()), Some(a.to_string())));
+ let (endtime, status) = info.state.map_or_else(
+ || (None, None),
+ |a| (Some(a.endtime()), Some(a.to_string())),
+ );
pbs_api_types::TaskListItem {
upid: info.upid_str,
},
)]
/// Get task status.
-async fn get_task_status(
- param: Value,
- rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
+async fn get_task_status(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let upid = extract_upid(¶m)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
}
fn extract_upid(param: &Value) -> Result<UPID, Error> {
-
let upid_str = pbs_tools::json::required_string_param(param, "upid")?;
upid_str.parse::<UPID>()
},
)]
/// Read task log.
-async fn read_task_log(
- param: Value,
- mut rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
+async fn read_task_log(param: Value, mut rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let upid = extract_upid(¶m)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
match line {
Ok(line) => {
count += 1;
- if count < start { continue };
- if limit == 0 { continue };
+ if count < start {
+ continue;
+ };
+ if limit == 0 {
+ continue;
+ };
lines.push(json!({ "n": count, "t": line }));
},
)]
/// Try to stop a task.
-fn stop_task(
- param: Value,
- rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Value, Error> {
-
+fn stop_task(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let upid = extract_upid(¶m)?;
let auth_id = rpcenv.get_auth_id().unwrap();
param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TaskListItem>, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let user_privs = user_info.lookup_privs(&auth_id, &["system", "tasks"]);
let store = param["store"].as_str();
let list = TaskListInfoIterator::new(running)?;
- let limit = if limit > 0 { limit as usize } else { usize::MAX };
+ let limit = if limit > 0 {
+ limit as usize
+ } else {
+ usize::MAX
+ };
let mut skipped = 0;
let mut result: Vec<TaskListItem> = Vec::new();
}
if let Some(needle) = &userfilter {
- if !info.upid.auth_id.to_string().contains(needle) { continue; }
+ if !info.upid.auth_id.to_string().contains(needle) {
+ continue;
+ }
}
if let Some(store) = store {
- if !check_job_store(&info.upid, store) { continue; }
+ if !check_job_store(&info.upid, store) {
+ continue;
+ }
}
if let Some(typefilter) = &typefilter {
- if !info.upid.worker_type.contains(typefilter) { continue; }
+ if !info.upid.worker_type.contains(typefilter) {
+ continue;
+ }
}
match (&info.state, &statusfilter) {
if !filters.contains(&tasktype(state)) {
continue;
}
- },
+ }
(None, Some(_)) => continue,
- _ => {},
+ _ => {}
}
if skipped < start as usize {
}
let mut count = result.len() + start as usize;
- if !result.is_empty() && result.len() >= limit { // we have a 'virtual' entry as long as we have any new
+ if !result.is_empty() && result.len() >= limit {
+ // we have a 'virtual' entry as long as we have any new
count += 1;
}
#[sortable]
const UPID_API_SUBDIRS: SubdirMap = &sorted!([
- (
- "log", &Router::new()
- .get(&API_METHOD_READ_TASK_LOG)
- ),
- (
- "status", &Router::new()
- .get(&API_METHOD_GET_TASK_STATUS)
- )
+ ("log", &Router::new().get(&API_METHOD_READ_TASK_LOG)),
+ ("status", &Router::new().get(&API_METHOD_GET_TASK_STATUS))
]);
pub const UPID_API_ROUTER: Router = Router::new()
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
-use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions};
-use proxmox_router::{Router, Permission};
+use proxmox_router::{Permission, Router};
use proxmox_schema::api;
+use proxmox_sys::fs::{file_read_firstline, replace_file, CreateOptions};
-use pbs_api_types::{NODE_SCHEMA, TIME_ZONE_SCHEMA, PRIV_SYS_MODIFY};
+use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY, TIME_ZONE_SCHEMA};
fn read_etc_localtime() -> Result<String, Error> {
// use /etc/timezone
}
// otherwise guess from the /etc/localtime symlink
- let link = std::fs::read_link("/etc/localtime").
- map_err(|err| format_err!("failed to guess timezone - {}", err))?;
+ let link = std::fs::read_link("/etc/localtime")
+ .map_err(|err| format_err!("failed to guess timezone - {}", err))?;
let link = link.to_string_lossy();
match link.rfind("/zoneinfo/") {
},
)]
/// Set time zone
-fn set_timezone(
- timezone: String,
- _param: Value,
-) -> Result<Value, Error> {
+fn set_timezone(timezone: String, _param: Value) -> Result<Value, Error> {
let path = std::path::PathBuf::from(format!("/usr/share/zoneinfo/{}", timezone));
if !path.exists() {
bail!("No such timezone.");
}
- replace_file("/etc/timezone", timezone.as_bytes(), CreateOptions::new(), true)?;
+ replace_file(
+ "/etc/timezone",
+ timezone.as_bytes(),
+ CreateOptions::new(),
+ true,
+ )?;
let _ = std::fs::remove_file("/etc/localtime");
//! Cheap check if the API daemon is online.
-use anyhow::{Error};
+use anyhow::Error;
use serde_json::{json, Value};
-use proxmox_router::{Router, Permission};
+use proxmox_router::{Permission, Router};
use proxmox_schema::api;
#[api(
"pong": true,
}))
}
-pub const ROUTER: Router = Router::new()
- .get(&API_METHOD_PING);
+pub const ROUTER: Router = Router::new().get(&API_METHOD_PING);
use std::convert::TryFrom;
use anyhow::{format_err, Error};
-use futures::{select, future::FutureExt};
+use futures::{future::FutureExt, select};
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
-use proxmox_router::{ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_sys::task_log;
use pbs_api_types::{
- Authid, SyncJobConfig, GroupFilter, RateLimitConfig, GROUP_FILTER_LIST_SCHEMA,
- DATASTORE_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
- PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
+ Authid, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
+ GROUP_FILTER_LIST_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
+ REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
};
-use proxmox_rest_server::WorkerTask;
use pbs_config::CachedUserInfo;
+use proxmox_rest_server::WorkerTask;
-use crate::server::pull::{PullParameters, pull_store};
use crate::server::jobstate::Job;
-
+use crate::server::pull::{pull_store, PullParameters};
pub fn check_pull_privs(
auth_id: &Authid,
remote_store: &str,
delete: bool,
) -> Result<(), Error> {
-
let user_info = CachedUserInfo::new()?;
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
- user_info.check_privs(auth_id, &["remote", remote, remote_store], PRIV_REMOTE_READ, false)?;
+ user_info.check_privs(
+ auth_id,
+ &["remote", remote, remote_store],
+ PRIV_REMOTE_READ,
+ false,
+ )?;
if delete {
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
&sync_job.store,
&sync_job.remote,
&sync_job.remote_store,
- sync_job.owner.as_ref().unwrap_or_else(|| Authid::root_auth_id()).clone(),
+ sync_job
+ .owner
+ .as_ref()
+ .unwrap_or_else(|| Authid::root_auth_id())
+ .clone(),
sync_job.remove_vanished,
sync_job.group_filter.clone(),
sync_job.limit.clone(),
schedule: Option<String>,
to_stdout: bool,
) -> Result<String, Error> {
-
- let job_id = format!("{}:{}:{}:{}",
- sync_job.remote,
- sync_job.remote_store,
- sync_job.store,
- job.jobname());
+ let job_id = format!(
+ "{}:{}:{}:{}",
+ sync_job.remote,
+ sync_job.remote_store,
+ sync_job.store,
+ job.jobname()
+ );
let worker_type = job.jobtype().to_string();
let (email, notify) = crate::server::lookup_datastore_notify_settings(&sync_job.store);
auth_id.to_string(),
to_stdout,
move |worker| async move {
-
job.start(&worker.upid().to_string())?;
let worker2 = worker.clone();
let sync_job2 = sync_job.clone();
let worker_future = async move {
-
let pull_params = PullParameters::try_from(&sync_job)?;
let client = pull_params.client().await?;
Ok(())
};
- let mut abort_future = worker2.abort_future().map(|_| Err(format_err!("sync aborted")));
+ let mut abort_future = worker2
+ .abort_future()
+ .map(|_| Err(format_err!("sync aborted")));
- let result = select!{
+ let result = select! {
worker = worker_future.fuse() => worker,
abort = abort_future => abort,
};
let status = worker2.create_state(&result);
match job.finish(status) {
- Ok(_) => {},
+ Ok(_) => {}
Err(err) => {
eprintln!("could not finish job state: {}", err);
}
}
if let Some(email) = email {
- if let Err(err) = crate::server::send_sync_status(&email, notify, &sync_job2, &result) {
+ if let Err(err) =
+ crate::server::send_sync_status(&email, notify, &sync_job2, &result)
+ {
eprintln!("send sync notification failed: {}", err);
}
}
result
- })?;
+ },
+ )?;
Ok(upid_str)
}
},
)]
/// Sync store from other repository
-async fn pull (
+async fn pull(
store: String,
remote: String,
remote_store: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
-
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(false);
let client = pull_params.client().await?;
// fixme: set to_stdout to false?
- let upid_str = WorkerTask::spawn("sync", Some(store.clone()), auth_id.to_string(), true, move |worker| async move {
-
- task_log!(worker, "sync datastore '{}' start", store);
+ let upid_str = WorkerTask::spawn(
+ "sync",
+ Some(store.clone()),
+ auth_id.to_string(),
+ true,
+ move |worker| async move {
+ task_log!(worker, "sync datastore '{}' start", store);
- let pull_future = pull_store(&worker, &client, &pull_params);
- let future = select!{
- success = pull_future.fuse() => success,
- abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
- };
+ let pull_future = pull_store(&worker, &client, &pull_params);
+ let future = select! {
+ success = pull_future.fuse() => success,
+ abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
+ };
- let _ = future?;
+ let _ = future?;
- task_log!(worker, "sync datastore '{}' end", store);
+ task_log!(worker, "sync datastore '{}' end", store);
- Ok(())
- })?;
+ Ok(())
+ },
+ )?;
Ok(upid_str)
}
-pub const ROUTER: Router = Router::new()
- .post(&API_METHOD_PULL);
+pub const ROUTER: Router = Router::new().post(&API_METHOD_PULL);
-use std::sync::{Arc,RwLock};
use std::collections::HashSet;
+use std::sync::{Arc, RwLock};
use serde_json::{json, Value};
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
+use pbs_api_types::Authid;
use pbs_datastore::backup_info::BackupDir;
use pbs_datastore::DataStore;
-use pbs_api_types::Authid;
use proxmox_rest_server::formatter::*;
use proxmox_rest_server::WorkerTask;
pub worker: Arc<WorkerTask>,
pub datastore: Arc<DataStore>,
pub backup_dir: BackupDir,
- allowed_chunks: Arc<RwLock<HashSet<[u8;32]>>>,
+ allowed_chunks: Arc<RwLock<HashSet<[u8; 32]>>>,
}
impl ReaderEnvironment {
datastore: Arc<DataStore>,
backup_dir: BackupDir,
) -> Self {
-
-
Self {
result_attributes: json!({}),
env_type,
}
pub fn debug<S: AsRef<str>>(&self, msg: S) {
- if self.debug { self.worker.log_message(msg); }
+ if self.debug {
+ self.worker.log_message(msg);
+ }
}
-
- pub fn register_chunk(&self, digest: [u8;32]) {
+ pub fn register_chunk(&self, digest: [u8; 32]) {
let mut allowed_chunks = self.allowed_chunks.write().unwrap();
allowed_chunks.insert(digest);
}
- pub fn check_chunk_access(&self, digest: [u8;32]) -> bool {
- self.allowed_chunks.read().unwrap().contains(&digest)
+ pub fn check_chunk_access(&self, digest: [u8; 32]) -> bool {
+ self.allowed_chunks.read().unwrap().contains(&digest)
}
}
impl RpcEnvironment for ReaderEnvironment {
-
fn result_attrib_mut(&mut self) -> &mut Value {
&mut self.result_attributes
}
use anyhow::{bail, format_err, Error};
use futures::*;
+use hex::FromHex;
use hyper::header::{self, HeaderValue, UPGRADE};
use hyper::http::request::Parts;
-use hyper::{Body, Response, Request, StatusCode};
+use hyper::{Body, Request, Response, StatusCode};
use serde_json::Value;
-use hex::FromHex;
-use proxmox_sys::sortable;
use proxmox_router::{
http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission,
Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::{BooleanSchema, ObjectSchema};
+use proxmox_sys::sortable;
use pbs_api_types::{
- Authid, Operation, DATASTORE_SCHEMA, BACKUP_TYPE_SCHEMA, BACKUP_TIME_SCHEMA,
- BACKUP_ID_SCHEMA, CHUNK_DIGEST_SCHEMA, PRIV_DATASTORE_READ, PRIV_DATASTORE_BACKUP,
- BACKUP_ARCHIVE_NAME_SCHEMA,
+ Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
+ BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
+ PRIV_DATASTORE_READ,
};
-use proxmox_sys::fs::lock_dir_noblock_shared;
-use pbs_tools::json::{required_integer_param, required_string_param};
-use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
+use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::BackupDir;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType};
-use pbs_config::CachedUserInfo;
-use proxmox_rest_server::{WorkerTask, H2Service};
+use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
+use pbs_tools::json::{required_integer_param, required_string_param};
+use proxmox_rest_server::{H2Service, WorkerTask};
+use proxmox_sys::fs::lock_dir_noblock_shared;
use crate::api2::helpers;
mod environment;
use environment::*;
-pub const ROUTER: Router = Router::new()
- .upgrade(&API_METHOD_UPGRADE_BACKUP);
+pub const ROUTER: Router = Router::new().upgrade(&API_METHOD_UPGRADE_BACKUP);
#[sortable]
pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&upgrade_to_backup_reader_protocol),
&ObjectSchema::new(
- concat!("Upgraded to backup protocol ('", PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(), "')."),
+ concat!(
+ "Upgraded to backup protocol ('",
+ PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!(),
+ "')."
+ ),
&sorted!([
("store", false, &DATASTORE_SCHEMA),
("backup-type", false, &BACKUP_TYPE_SCHEMA),
("backup-id", false, &BACKUP_ID_SCHEMA),
("backup-time", false, &BACKUP_TIME_SCHEMA),
- ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
+ (
+ "debug",
+ true,
+ &BooleanSchema::new("Enable verbose debug logging.").schema()
+ ),
]),
- )
-).access(
+ ),
+)
+.access(
// Note: parameter 'store' is no uri parameter, so we need to test inside function body
Some("The user needs Datastore.Read privilege on /datastore/{store}."),
- &Permission::Anybody
+ &Permission::Anybody,
);
fn upgrade_to_backup_reader_protocol(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let debug = param["debug"].as_bool().unwrap_or(false);
.headers
.get("UPGRADE")
.ok_or_else(|| format_err!("missing Upgrade header"))?
- .to_str()?;
+ .to_str()?;
if protocols != PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!() {
bail!("invalid protocol name");
}
- if parts.version >= http::version::Version::HTTP_2 {
- bail!("unexpected http version '{:?}' (expected version < 2)", parts.version);
+ if parts.version >= http::version::Version::HTTP_2 {
+ bail!(
+ "unexpected http version '{:?}' (expected version < 2)",
+ parts.version
+ );
}
let env_type = rpcenv.env_type();
if !priv_read {
let owner = datastore.get_owner(backup_dir.group())?;
let correct_owner = owner == auth_id
- || (owner.is_token()
- && Authid::from(owner.user().clone()) == auth_id);
+ || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
if !correct_owner {
bail!("backup owner check failed!");
}
let _guard = lock_dir_noblock_shared(
&datastore.snapshot_path(&backup_dir),
"snapshot",
- "locked by another operation")?;
+ "locked by another operation",
+ )?;
let path = datastore.base_path();
//let files = BackupInfo::list_files(&path, &backup_dir)?;
- let worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_dir.backup_time());
-
- WorkerTask::spawn("reader", Some(worker_id), auth_id.to_string(), true, move |worker| async move {
- let _guard = _guard;
-
- let mut env = ReaderEnvironment::new(
- env_type,
- auth_id,
- worker.clone(),
- datastore,
- backup_dir,
- );
-
- env.debug = debug;
-
- env.log(format!("starting new backup reader datastore '{}': {:?}", store, path));
-
- let service = H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
-
- let mut abort_future = worker.abort_future()
- .map(|_| Err(format_err!("task aborted")));
-
- let env2 = env.clone();
- let req_fut = async move {
- let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
- env2.debug("protocol upgrade done");
-
- let mut http = hyper::server::conn::Http::new();
- http.http2_only(true);
- // increase window size: todo - find optiomal size
- let window_size = 32*1024*1024; // max = (1 << 31) - 2
- http.http2_initial_stream_window_size(window_size);
- http.http2_initial_connection_window_size(window_size);
- http.http2_max_frame_size(4*1024*1024);
-
- http.serve_connection(conn, service)
- .map_err(Error::from).await
- };
-
- futures::select!{
- req = req_fut.fuse() => req?,
- abort = abort_future => abort?,
- };
-
- env.log("reader finished successfully");
-
- Ok(())
- })?;
+ let worker_id = format!(
+ "{}:{}/{}/{:08X}",
+ store,
+ backup_type,
+ backup_id,
+ backup_dir.backup_time()
+ );
+
+ WorkerTask::spawn(
+ "reader",
+ Some(worker_id),
+ auth_id.to_string(),
+ true,
+ move |worker| async move {
+ let _guard = _guard;
+
+ let mut env = ReaderEnvironment::new(
+ env_type,
+ auth_id,
+ worker.clone(),
+ datastore,
+ backup_dir,
+ );
+
+ env.debug = debug;
+
+ env.log(format!(
+ "starting new backup reader datastore '{}': {:?}",
+ store, path
+ ));
+
+ let service =
+ H2Service::new(env.clone(), worker.clone(), &READER_API_ROUTER, debug);
+
+ let mut abort_future = worker
+ .abort_future()
+ .map(|_| Err(format_err!("task aborted")));
+
+ let env2 = env.clone();
+ let req_fut = async move {
+ let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
+ env2.debug("protocol upgrade done");
+
+ let mut http = hyper::server::conn::Http::new();
+ http.http2_only(true);
+ // increase window size: todo - find optiomal size
+ let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
+ http.http2_initial_stream_window_size(window_size);
+ http.http2_initial_connection_window_size(window_size);
+ http.http2_max_frame_size(4 * 1024 * 1024);
+
+ http.serve_connection(conn, service)
+ .map_err(Error::from)
+ .await
+ };
+
+ futures::select! {
+ req = req_fut.fuse() => req?,
+ abort = abort_future => abort?,
+ };
+
+ env.log("reader finished successfully");
+
+ Ok(())
+ },
+ )?;
let response = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS)
- .header(UPGRADE, HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()))
+ .header(
+ UPGRADE,
+ HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()),
+ )
.body(Body::empty())?;
Ok(response)
- }.boxed()
+ }
+ .boxed()
}
const READER_API_SUBDIRS: SubdirMap = &[
+ ("chunk", &Router::new().download(&API_METHOD_DOWNLOAD_CHUNK)),
(
- "chunk", &Router::new()
- .download(&API_METHOD_DOWNLOAD_CHUNK)
- ),
- (
- "download", &Router::new()
- .download(&API_METHOD_DOWNLOAD_FILE)
- ),
- (
- "speedtest", &Router::new()
- .download(&API_METHOD_SPEEDTEST)
+ "download",
+ &Router::new().download(&API_METHOD_DOWNLOAD_FILE),
),
+ ("speedtest", &Router::new().download(&API_METHOD_SPEEDTEST)),
];
pub const READER_API_ROUTER: Router = Router::new()
&ApiHandler::AsyncHttp(&download_file),
&ObjectSchema::new(
"Download specified file.",
- &sorted!([
- ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
- ]),
- )
+ &sorted!([("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),]),
+ ),
);
fn download_file(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let env: &ReaderEnvironment = rpcenv.as_ref();
let index = env.datastore.open_dynamic_reader(&path)?;
Some(Box::new(index))
}
- _ => { None }
+ _ => None,
};
if let Some(index) = index {
- env.log(format!("register chunks in '{}' as downloadable.", file_name));
+ env.log(format!(
+ "register chunks in '{}' as downloadable.",
+ file_name
+ ));
for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap();
}
helpers::create_download_response(path).await
- }.boxed()
+ }
+ .boxed()
}
#[sortable]
&ApiHandler::AsyncHttp(&download_chunk),
&ObjectSchema::new(
"Download specified chunk.",
- &sorted!([
- ("digest", false, &CHUNK_DIGEST_SCHEMA),
- ]),
- )
+ &sorted!([("digest", false, &CHUNK_DIGEST_SCHEMA),]),
+ ),
);
fn download_chunk(
_info: &ApiMethod,
rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
async move {
let env: &ReaderEnvironment = rpcenv.as_ref();
let digest = <[u8; 32]>::from_hex(digest_str)?;
if !env.check_chunk_access(digest) {
- env.log(format!("attempted to download chunk {} which is not in registered chunk list", digest_str));
- return Err(http_err!(UNAUTHORIZED, "download chunk {} not allowed", digest_str));
+ env.log(format!(
+ "attempted to download chunk {} which is not in registered chunk list",
+ digest_str
+ ));
+ return Err(http_err!(
+ UNAUTHORIZED,
+ "download chunk {} not allowed",
+ digest_str
+ ));
}
let (path, _) = env.datastore.chunk_path(&digest);
env.debug(format!("download chunk {:?}", path));
- let data = proxmox_async::runtime::block_in_place(|| std::fs::read(path))
- .map_err(move |err| http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err))?;
+ let data =
+ proxmox_async::runtime::block_in_place(|| std::fs::read(path)).map_err(move |err| {
+ http_err!(BAD_REQUEST, "reading file {:?} failed: {}", path2, err)
+ })?;
let body = Body::from(data);
// fixme: set other headers ?
Ok(Response::builder()
- .status(StatusCode::OK)
- .header(header::CONTENT_TYPE, "application/octet-stream")
- .body(body)
- .unwrap())
- }.boxed()
+ .status(StatusCode::OK)
+ .header(header::CONTENT_TYPE, "application/octet-stream")
+ .body(body)
+ .unwrap())
+ }
+ .boxed()
}
/* this is too slow
pub const API_METHOD_SPEEDTEST: ApiMethod = ApiMethod::new(
&ApiHandler::AsyncHttp(&speedtest),
- &ObjectSchema::new("Test 1M block download speed.", &[])
+ &ObjectSchema::new("Test 1M block download speed.", &[]),
);
fn speedtest(
_info: &ApiMethod,
_rpcenv: Box<dyn RpcEnvironment>,
) -> ApiResponseFuture {
-
- let buffer = vec![65u8; 1024*1024]; // nonsense [A,A,A...]
+ let buffer = vec![65u8; 1024 * 1024]; // nonsense [A,A,A...]
let body = Body::from(buffer);
use anyhow::Error;
use serde_json::Value;
-use proxmox_schema::api;
-use proxmox_router::{
- ApiMethod,
- Permission,
- Router,
- RpcEnvironment,
- SubdirMap,
-};
use proxmox_router::list_subdirs_api_method;
+use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
+use proxmox_schema::api;
use pbs_api_types::{
- Authid, DataStoreStatusListItem, Operation, RRDMode, RRDTimeFrame,
- PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
+ Authid, DataStoreStatusListItem, Operation, RRDMode, RRDTimeFrame, PRIV_DATASTORE_AUDIT,
+ PRIV_DATASTORE_BACKUP,
};
-use pbs_datastore::DataStore;
use pbs_config::CachedUserInfo;
+use pbs_datastore::DataStore;
-use crate::tools::statistics::{linear_regression};
use crate::rrd_cache::extract_rrd_data;
+use crate::tools::statistics::linear_regression;
#[api(
returns: {
_param: Value,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
- ) -> Result<Vec<DataStoreStatusListItem>, Error> {
-
+) -> Result<Vec<DataStoreStatusListItem>, Error> {
let (config, _digest) = pbs_config::datastore::config()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
for (store, (_, _)) in &config.sections {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
- let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
+ let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
if !allowed {
continue;
}
let rrd_dir = format!("datastore/{}", store);
- let get_rrd = |what: &str| extract_rrd_data(
- &rrd_dir,
- what,
- RRDTimeFrame::Month,
- RRDMode::Average,
- );
+ let get_rrd =
+ |what: &str| extract_rrd_data(&rrd_dir, what, RRDTimeFrame::Month, RRDMode::Average);
let total_res = get_rrd("total")?;
let used_res = get_rrd("used")?;
match (total, used) {
(Some(total), Some(used)) if total != 0.0 => {
- time_list.push(start + (idx as u64)*reso);
- let usage = used/total;
+ time_list.push(start + (idx as u64) * reso);
+ let usage = used / total;
usage_list.push(usage);
history.push(Some(usage));
- },
- _ => {
- history.push(None)
}
+ _ => history.push(None),
}
}
Ok(list.into())
}
-const SUBDIRS: SubdirMap = &[
- ("datastore-usage", &Router::new().get(&API_METHOD_DATASTORE_STATUS)),
-];
+const SUBDIRS: SubdirMap = &[(
+ "datastore-usage",
+ &Router::new().get(&API_METHOD_DATASTORE_STATUS),
+)];
pub const ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(SUBDIRS))
use serde::{Deserialize, Serialize};
use serde_json::Value;
-use proxmox_schema::{api, ApiType, Schema, StringSchema, ApiStringFormat};
+use proxmox_schema::{api, ApiStringFormat, ApiType, Schema, StringSchema};
-use pbs_api_types::{
- DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT,
-};
+use pbs_api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, PROXMOX_SAFE_ID_FORMAT};
#[api(
properties: {
pub plugin: Option<String>,
}
-pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema = StringSchema::new(
- "ACME domain configuration string")
- .format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
- .schema();
+pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema =
+ StringSchema::new("ACME domain configuration string")
+ .format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
+ .schema();
#[api(
properties: {
Ok(())
});
-
// Regression tests
#[test]
fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> {
-
let schema = pbs_api_types::CERT_FINGERPRINT_SHA256_SCHEMA;
let invalid_fingerprints = [
for fingerprint in invalid_fingerprints.iter() {
if schema.parse_simple_value(fingerprint).is_ok() {
- bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
+ bail!(
+ "test fingerprint '{}' failed - got Ok() while exception an error.",
+ fingerprint
+ );
}
}
};
if v != serde_json::json!(fingerprint) {
- bail!("unable to parse fingerprint '{}' - got wrong value {:?}", fingerprint, v);
+ bail!(
+ "unable to parse fingerprint '{}' - got wrong value {:?}",
+ fingerprint,
+ v
+ );
}
}
#[test]
fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
-
use pbs_api_types::Userid;
let invalid_user_ids = [
- "x", // too short
- "xx", // too short
- "xxx", // no realm
- "xxx@", // no realm
- "xx x@test", // contains space
+ "x", // too short
+ "xx", // too short
+ "xxx", // no realm
+ "xxx@", // no realm
+ "xx x@test", // contains space
"xx\nx@test", // contains control character
- "x:xx@test", // contains collon
- "xx/x@test", // contains slash
+ "x:xx@test", // contains collon
+ "xx/x@test", // contains slash
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@test", // too long
];
for name in invalid_user_ids.iter() {
if Userid::API_SCHEMA.parse_simple_value(name).is_ok() {
- bail!("test userid '{}' failed - got Ok() while exception an error.", name);
+ bail!(
+ "test userid '{}' failed - got Ok() while exception an error.",
+ name
+ );
}
}
};
if v != serde_json::json!(name) {
- bail!("unable to parse userid '{}' - got wrong value {:?}", name, v);
+ bail!(
+ "unable to parse userid '{}' - got wrong value {:?}",
+ name,
+ v
+ );
}
}
}
#[api]
-#[derive(Serialize,Deserialize,Default)]
+#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Contains general node information such as the fingerprint`
pub struct NodeInformation {
pub info: NodeInformation,
}
-pub const HTTP_PROXY_SCHEMA: Schema = StringSchema::new(
- "HTTP proxy configuration [http://]<host>[:port]")
- .format(&ApiStringFormat::VerifyFn(|s| {
- proxmox_http::ProxyConfig::parse_proxy_url(s)?;
- Ok(())
- }))
- .min_length(1)
- .max_length(128)
- .type_text("[http://]<host>[:port]")
- .schema();
+pub const HTTP_PROXY_SCHEMA: Schema =
+ StringSchema::new("HTTP proxy configuration [http://]<host>[:port]")
+ .format(&ApiStringFormat::VerifyFn(|s| {
+ proxmox_http::ProxyConfig::parse_proxy_url(s)?;
+ Ok(())
+ }))
+ .min_length(1)
+ .max_length(128)
+ .type_text("[http://]<host>[:port]")
+ .schema();
//! Version information
-use anyhow::{Error};
+use anyhow::Error;
use serde_json::{json, Value};
-use proxmox_router::{ApiHandler, ApiMethod, Router, RpcEnvironment, Permission};
+use proxmox_router::{ApiHandler, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::ObjectSchema;
fn get_version(
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
-
Ok(json!({
"version": pbs_buildcfg::PROXMOX_PKG_VERSION,
"release": pbs_buildcfg::PROXMOX_PKG_RELEASE,
}))
}
-pub const ROUTER: Router = Router::new()
- .get(
- &ApiMethod::new(
- &ApiHandler::Sync(&get_version),
- &ObjectSchema::new("Proxmox Backup Server API version.", &[])
- ).access(None, &Permission::Anybody)
- );
-
+pub const ROUTER: Router = Router::new().get(
+ &ApiMethod::new(
+ &ApiHandler::Sync(&get_version),
+ &ObjectSchema::new("Proxmox Backup Server API version.", &[]),
+ )
+ .access(None, &Permission::Anybody),
+);