if let Some(user_data) = crate::config::tfa::read()?.users.remove(&userid) {
match {
- // scope to prevent the temprary iter from borrowing across the whole match
+ // scope to prevent the temporary iter from borrowing across the whole match
let entry = tfa_id_iter(&user_data).find(|(_ty, _index, entry_id)| id == *entry_id);
entry.map(|(ty, index, _)| (ty, index))
} {
.ok_or_else(|| http_err!(NOT_FOUND, "no such entry: {}/{}", userid, id))?;
match {
- // scope to prevent the temprary iter from borrowing across the whole match
+ // scope to prevent the temporary iter from borrowing across the whole match
let entry = tfa_id_iter(&user_data).find(|(_, _, entry_id)| id == *entry_id);
entry.map(|(ty, index, _)| (ty, index))
} {
-//! Datastore Syncronization Job Management
+//! Datastore Synchronization Job Management
use anyhow::{bail, format_err, Error};
use serde_json::Value;
let kdf = kdf.unwrap_or_default();
if let Kdf::None = kdf {
- bail!("Please specify a key derivation funktion (none is not allowed here).");
+ bail!("Please specify a key derivation function (none is not allowed here).");
}
let _lock = open_file_locked(
let kdf = kdf.unwrap_or_default();
if let Kdf::None = kdf {
- bail!("Please specify a key derivation funktion (none is not allowed here).");
+ bail!("Please specify a key derivation function (none is not allowed here).");
}
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
},
notify: {
type: bool,
- description: r#"Send notification mail about new package updates availanle to the
+ description: r#"Send notification mail about new package updates available to the
email address configured for 'root@pam')."#,
default: false,
optional: true,
},
},
returns: {
- description: "The import-export slot number the media was transfered to.",
+ description: "The import-export slot number the media was transferred to.",
type: u64,
minimum: 1,
},
}
}
- worker.log("Drive cleaned sucessfully");
+ worker.log("Drive cleaned successfully");
Ok(())
},
}
Ok((Some(media_id), _key_config)) => {
if label_text != media_id.label.label_text {
- worker.warn(format!("label text missmatch ({} != {})", label_text, media_id.label.label_text));
+ worker.warn(format!("label text mismatch ({} != {})", label_text, media_id.label.label_text));
continue;
}
worker.log(format!("inventorize media '{}' with uuid '{}'", label_text, media_id.label.uuid));
/// Update media status (None, 'full', 'damaged' or 'retired')
///
/// It is not allowed to set status to 'writable' or 'unknown' (those
-/// are internaly managed states).
+/// are internally managed states).
pub fn update_media_status(uuid: Uuid, status: Option<MediaStatus>) -> Result<(), Error> {
let status_path = Path::new(TAPE_STATUS_DIR);
pub enum Notify {
/// Never send notification
Never,
- /// Send notifications for failed and sucessful jobs
+ /// Send notifications for failed and successful jobs
Always,
/// Send notifications for failed jobs only
Error,
#[api()]
#[derive(Debug,Serialize,Deserialize)]
#[serde(rename_all = "kebab-case")]
-/// Kind of devive
+/// Kind of device
pub enum DeviceKind {
/// Tape changer (Autoloader, Robot)
Changer,
//!
//! Since PBS allows multiple potentially interfering operations at the
//! same time (e.g. garbage collect, prune, multiple backup creations
-//! (only in seperate groups), forget, ...), these need to lock against
+//! (only in separate groups), forget, ...), these need to lock against
//! each other in certain scenarios. There is no overarching global lock
//! though, instead always the finest grained lock possible is used,
//! because running these operations concurrently is treated as a feature
#[test]
fn test_chunk_store1() {
- let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path
+ let mut path = std::fs::canonicalize(".").unwrap(); // we need absolute path
path.push(".testdir");
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
if !self.chunk_store.cond_touch_chunk(digest, false)? {
crate::task_warn!(
worker,
- "warning: unable to access non-existant chunk {}, required by {:?}",
+ "warning: unable to access non-existent chunk {}, required by {:?}",
proxmox::tools::digest_to_hex(digest),
file_name,
);
type: String,
description: r###"Target directory path. Use '-' to write to standard output.
-We do not extraxt '.pxar' archives when writing to standard output.
+We do not extract '.pxar' archives when writing to standard output.
"###
},
let options = default_table_format_options()
.disable_sort()
- .noborder(true) // just not helpfull for version info which gets copy pasted often
+ .noborder(true) // just not helpful for version info which gets copy pasted often
.column(ColumnConfig::new("Package"))
.column(ColumnConfig::new("Version"))
.column(ColumnConfig::new("ExtraInfo").header("Extra Info"))
optional: true,
},
subject: {
- description: "Include the specified subject as titel text.",
+ description: "Include the specified subject as title text.",
optional: true,
},
"output-format": {
return proxmox_backup::tools::runtime::main(mount_do(param, None));
}
- // Process should be deamonized.
+ // Process should be daemonized.
// Make sure to fork before the async runtime is instantiated to avoid troubles.
let (pr, pw) = proxmox_backup::tools::pipe()?;
match unsafe { fork() } {
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
},
subject: {
- description: "Include the specified subject as titel text.",
+ description: "Include the specified subject as title text.",
optional: true,
},
"output-format": {
},
},
)]
-/// Print tthe encryption key's metadata.
+/// Print the encryption key's metadata.
fn show_key(
param: Value,
rpcenv: &mut dyn RpcEnvironment,
/// Tape command implemented using scsi-generic raw commands
///
-/// SCSI-generic command needs root priviledges, so this binary need
+/// SCSI-generic command needs root privileges, so this binary need
/// to be setuid root.
///
/// This command can use STDIN as tape device handle.
/// namespaced directory for persistent logging
pub const PROXMOX_BACKUP_LOG_DIR: &str = PROXMOX_BACKUP_LOG_DIR_M!();
-/// logfile for all API reuests handled by the proxy and privileged API daemons. Note that not all
+/// logfile for all API requests handled by the proxy and privileged API daemons. Note that not all
/// failed logins can be logged here with full information, use the auth log for that.
pub const API_ACCESS_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/access.log");
-/// logfile for any failed authentication, via ticket or via token, and new successfull ticket
+/// logfile for any failed authentication, via ticket or via token, and new successful ticket
/// creations. This file can be useful for fail2ban.
pub const API_AUTH_LOG_FN: &str = concat!(PROXMOX_BACKUP_LOG_DIR_M!(), "/api/auth.log");
}
// We have no `self` here for `h2` and `verbose`, the only other arg "common" with 1 other
- // funciton in the same path is `wid`, so those 3 could be in a struct, but there's no real use
+ // function in the same path is `wid`, so those 3 could be in a struct, but there's no real use
// since this is a private method.
#[allow(clippy::too_many_arguments)]
fn upload_chunk_info_stream(
Ok(connection)
})
- // unravel the thread JoinHandle to a useable future
+ // unravel the thread JoinHandle to a usable future
.map(|res| match res {
Ok(res) => res,
Err(err) => Err(format_err!("thread join error on vsock connect: {}", err)),
Ok(())
}
-// parse ip address with otional cidr mask
+// parse ip address with optional cidr mask
pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), Error> {
lazy_static! {
//! indexed by key fingerprint.
//!
//! We store the plain key (unencrypted), as well as a encrypted
-//! version protected by passowrd (see struct `KeyConfig`)
+//! version protected by password (see struct `KeyConfig`)
//!
//! Tape backups store the password protected version on tape, so that
-//! it is possible to retore the key from tape if you know the
+//! it is possible to restore the key from tape if you know the
//! password.
use std::collections::HashMap;
}
/// Save the current data. Note that we do not replace the file here since we lock the file
- /// itself, as it is in `/run`, and the typicall error case for this particular situation
+ /// itself, as it is in `/run`, and the typical error case for this particular situation
/// (machine loses power) simply prevents some login, but that'll probably fail anyway for
/// other reasons then...
///
Garbage collection successful.
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{datastore}}>
Garbage collection failed: {{error}}
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
Verification successful.
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
{{/each}}
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
Synchronization successful.
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
Synchronization failed: {{error}}
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
Tape Backup successful.
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#DataStore-{{job.store}}>
Tape Backup failed: {{error}}
-Please visit the web interface for futher details:
+Please visit the web interface for further details:
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
match data.abort_listeners.pop() {
None => { break; },
Some(ch) => {
- let _ = ch.send(()); // ignore erros here
+ let _ = ch.send(()); // ignore errors here
},
}
}
/// Changer element status.
///
/// Drive and slots may be `Empty`, or contain some media, either
-/// with knwon volume tag `VolumeTag(String)`, or without (`Full`).
+/// with known volume tag `VolumeTag(String)`, or without (`Full`).
#[derive(Serialize, Deserialize, Debug)]
pub enum ElementStatus {
Empty,
pub drives: Vec<DriveStatus>,
/// List of known storage slots
pub slots: Vec<StorageElementStatus>,
- /// Tranport elements
+ /// Transport elements
///
/// Note: Some libraries do not report transport elements.
pub transports: Vec<TransportElementStatus>,
/// List online media labels (label_text/barcodes)
///
- /// List acessible (online) label texts. This does not include
+ /// List accessible (online) label texts. This does not include
/// media inside import-export slots or cleaning media.
fn online_media_label_texts(&mut self) -> Result<Vec<String>, Error> {
let status = self.status()?;
/// Unload media to a free storage slot
///
- /// If posible to the slot it was previously loaded from.
+ /// If possible to the slot it was previously loaded from.
///
/// Note: This method consumes status - so please use returned status afterward.
fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result<MtxStatus, Error> {
-//! Wrapper around expernal `mtx` command line tool
+//! Wrapper around external `mtx` command line tool
mod parse_mtx_status;
pub use parse_mtx_status::*;
Ok(())
}
-/// Tranfer medium from one storage slot to another
+/// Transfer medium from one storage slot to another
pub fn transfer_medium<F: AsRawFd>(
file: &mut F,
from_slot: u64,
bail!("got wrong number of import/export elements");
}
if (setup.transfer_element_count as usize) != drives.len() {
- bail!("got wrong number of tranfer elements");
+ bail!("got wrong number of transfer elements");
}
// create same virtual slot order as mtx(1)
element_type_code: u8,
flags: u8,
descriptor_length: u16,
- reseved: u8,
+ reserved: u8,
byte_count_of_descriptor_data_available: [u8;3],
}
page_code: u16,
page_len: u16,
extdecc_cfgp_byte: u8,
- reserverd: [u8; 15],
+ reserved: [u8; 15],
}
#[derive(Endian)]
let desc: SspDataEncryptionAlgorithmDescriptor =
unsafe { reader.read_be_value()? };
if desc.descriptor_len != 0x14 {
- bail!("got wrong key descriptior len");
+ bail!("got wrong key descriptor len");
}
if (desc.control_byte_4 & 0b00000011) != 2 {
- continue; // cant encrypt in hardware
+ continue; // can't encrypt in hardware
}
if ((desc.control_byte_4 & 0b00001100) >> 2) != 2 {
- continue; // cant decrypt in hardware
+ continue; // can't decrypt in hardware
}
if desc.algorithm_code == 0x00010014 && desc.key_size == 32 {
aes_cgm_index = Some(desc.algorythm_index);
control_byte: u8,
key_format: u8,
key_len: u16,
- reserverd: [u8; 8],
+ reserved: [u8; 8],
}
fn decode_spin_data_encryption_status(data: &[u8]) -> Result<DataEncryptionStatus, Error> {
(0x08_02, 8, MamFormat::ASCII, "Application Version"),
(0x08_03, 160, MamFormat::ASCII, "User Medium Text Label"),
(0x08_04, 12, MamFormat::ASCII, "Date And Time Last Written"),
- (0x08_05, 1, MamFormat::BINARY, "Text Localization Identifer"),
+ (0x08_05, 1, MamFormat::BINARY, "Text Localization Identifier"),
(0x08_06, 32, MamFormat::ASCII, "Barcode"),
(0x08_07, 80, MamFormat::ASCII, "Owning Host Textual Name"),
(0x08_08, 160, MamFormat::ASCII, "Media Pool"),
(0x08_0B, 16, MamFormat::ASCII, "Application Format Version"),
(0x08_0C, 50, MamFormat::ASCII, "Volume Coherency Information"),
- (0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifer"),
- (0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifer"),
+ (0x08_20, 36, MamFormat::ASCII, "Medium Globally Unique Identifier"),
+ (0x08_21, 36, MamFormat::ASCII, "Media Pool Globally Unique Identifier"),
(0x10_00, 28, MamFormat::BINARY, "Unique Cartridge Identify (UCI)"),
(0x10_01, 24, MamFormat::BINARY, "Alternate Unique Cartridge Identify (Alt-UCI)"),
/// Set or clear encryption key
///
/// We use the media_set_uuid to XOR the secret key with the
- /// uuid (first 16 bytes), so that each media set uses an uique
+ /// uuid (first 16 bytes), so that each media set uses an unique
/// key for encryption.
fn set_encryption(
&mut self,
}
}
-/// Aquires an exclusive lock for the tape device
+/// Acquires an exclusive lock for the tape device
///
/// Basically calls lock_device_path() using the configured drive path.
pub fn lock_tape_device(
pub struct DeviceLockGuard(std::fs::File);
-// Aquires an exclusive lock on `device_path`
+// Acquires an exclusive lock on `device_path`
//
// Uses systemd escape_unit to compute a file name from `device_path`, the try
// to lock `/var/lock/<name>`.
}
fn transfer_media(&mut self, _from: u64, _to: u64) -> Result<MtxStatus, Error> {
- bail!("media tranfer is not implemented!");
+ bail!("media transfer is not implemented!");
}
fn export_media(&mut self, _label_text: &str) -> Result<Option<u64>, Error> {
if seq_nr != buffer.seq_nr() {
proxmox::io_bail!(
- "detected tape block with wrong seqence number ({} != {})",
+ "detected tape block with wrong sequence number ({} != {})",
seq_nr, buffer.seq_nr())
}
///
/// A chunk archive consists of a `MediaContentHeader` followed by a
/// list of chunks entries. Each chunk entry consists of a
-/// `ChunkArchiveEntryHeader` folowed by the chunk data (`DataBlob`).
+/// `ChunkArchiveEntryHeader` followed by the chunk data (`DataBlob`).
///
/// `| MediaContentHeader | ( ChunkArchiveEntryHeader | DataBlob )* |`
pub struct ChunkArchiveWriter<'a> {
Self { reader }
}
- /// Allow access to the underyling reader
+ /// Allow access to the underlying reader
pub fn reader(&self) -> &R {
&self.reader
}
///
/// This ignores file attributes like ACLs and xattrs.
///
-/// Returns `Ok(Some(content_uuid))` on succees, and `Ok(None)` if
+/// Returns `Ok(Some(content_uuid))` on success, and `Ok(None)` if
/// `LEOM` was detected before all data was written. The stream is
/// marked inclomplete in that case and does not contain all data (The
/// backup task must rewrite the whole file on the next media).
Ok(file)
}
- /// Retunrs an iterator for all used chunks.
+ /// Returns an iterator for all used chunks.
pub fn chunk_iterator(&self) -> Result<SnapshotChunkIterator, Error> {
SnapshotChunkIterator::new(&self)
}
// Helpers to simplify testing
- /// Genreate and insert a new free tape (test helper)
+ /// Generate and insert a new free tape (test helper)
pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid {
let label = MediaLabel {
uuid
}
- /// Genreate and insert a new tape assigned to a specific pool
+ /// Generate and insert a new tape assigned to a specific pool
/// (test helper)
pub fn generate_assigned_tape(
&mut self,
uuid
}
- /// Genreate and insert a used tape (test helper)
+ /// Generate and insert a used tape (test helper)
pub fn generate_used_tape(
&mut self,
label_text: &str,
//! A set of backup medias.
//!
//! This struct manages backup media state during backup. The main
-//! purpose is to allocate media sets and assing new tapes to it.
+//! purpose is to allocate media sets and assign new tapes to it.
//!
//!
&self.name
}
- /// Retruns encryption settings
+ /// Returns encryption settings
pub fn encrypt_fingerprint(&self) -> Option<Fingerprint> {
self.encrypt_fingerprint.clone()
}
Ok(list)
}
- // tests if the media data is considered as expired at sepcified time
+ // tests if the media data is considered as expired at specified time
pub fn media_is_expired(&self, media: &BackupMedia, current_time: i64) -> bool {
if media.status() != &MediaStatus::Full {
return false;
let seq_nr = seq_nr as usize;
if self.media_list.len() > seq_nr {
if self.media_list[seq_nr].is_some() {
- bail!("found duplicate squence number in media set '{}/{}'",
+ bail!("found duplicate sequence number in media set '{}/{}'",
self.uuid.to_string(), seq_nr);
}
} else {
}
}
- /// Move to EOM (if not aleady there), then creates a new snapshot
+ /// Move to EOM (if not already there), then creates a new snapshot
/// archive writing specified files (as .pxar) into it. On
/// success, this return 'Ok(true)' and the media catalog gets
/// updated.
Ok((done, bytes_written))
}
- /// Move to EOM (if not aleady there), then creates a new chunk
+ /// Move to EOM (if not already there), then creates a new chunk
/// archive and writes chunks from 'chunk_iter'. This stops when
/// it detect LEOM or when we reach max archive size
/// (4GB). Written chunks are registered in the media catalog.
///
/// See: https://github.com/torvalds/linux/blob/master/Documentation/scsi/st.rst
///
-/// On sucess, this returns if we en countered a EOM condition.
+/// On success, this returns if we en countered a EOM condition.
pub fn tape_device_write_block<W: Write>(
writer: &mut W,
data: &[u8],
// next call fail because there is no free media
assert!(pool.alloc_writable_media(start_time + 5).is_err());
- // Create new nedia set, so that previous set can expire
+ // Create new media set, so that previous set can expire
pool.start_write_session(start_time + 10)?;
assert!(pool.alloc_writable_media(start_time + 10).is_err());
}
}
- /// Remove the node referenced by `node_ptr` from the linke list and return it.
+ /// Remove the node referenced by `node_ptr` from the linked list and return it.
fn remove(&mut self, node_ptr: *mut CacheNode<K, V>) -> Box<CacheNode<K, V>> {
let node = unsafe { Box::from_raw(node_ptr) };
if let Err(panic) = handle.join() {
match panic.downcast::<&str>() {
Ok(panic_msg) => msg_list.push(
- format!("thread {} ({}) paniced: {}", self.name, i, panic_msg)
+ format!("thread {} ({}) panicked: {}", self.name, i, panic_msg)
),
Err(_) => msg_list.push(
- format!("thread {} ({}) paniced", self.name, i)
+ format!("thread {} ({}) panicked", self.name, i)
),
}
}
//!
//! See: `/usr/include/scsi/sg_pt.h`
//!
-//! The SCSI Commands Reference Manual also contains some usefull information.
+//! The SCSI Commands Reference Manual also contains some useful information.
use std::os::unix::io::AsRawFd;
use std::ptr::NonNull;
Ok(())
}
-/// querys the up to date subscription status and parses the response
+/// queries the up to date subscription status and parses the response
pub fn check_subscription(key: String, server_id: String) -> Result<SubscriptionInfo, Error> {
let now = proxmox::tools::time::epoch_i64();
Ok(())
}
-/// updates apt authenification for repo access
+/// updates apt authentication for repo access
pub fn update_apt_auth(key: Option<String>, password: Option<String>) -> Result<(), Error> {
let auth_conf = std::path::Path::new(APT_AUTH_FN);
match (key, password) {