From: Thomas Lamprecht Date: Sat, 30 May 2020 14:37:33 +0000 (+0200) Subject: typo fixes all over the place X-Git-Tag: v0.2.2~6 X-Git-Url: https://git.proxmox.com/?a=commitdiff_plain;h=add5861e8d49e33d45c7882c0b54a4f711013a99;p=proxmox-backup.git typo fixes all over the place Signed-off-by: Thomas Lamprecht --- diff --git a/src/api2/backup.rs b/src/api2/backup.rs index 3c9cfd87..fc5fd691 100644 --- a/src/api2/backup.rs +++ b/src/api2/backup.rs @@ -107,7 +107,7 @@ async move { } let (path, is_new) = datastore.create_backup_dir(&backup_dir)?; - if !is_new { bail!("backup directorty already exists."); } + if !is_new { bail!("backup directory already exists."); } WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| { let mut env = BackupEnvironment::new( @@ -151,7 +151,7 @@ async move { match (res, env.ensure_finished()) { (Ok(_), Ok(())) => { - env.log("backup finished sucessfully"); + env.log("backup finished successfully"); Ok(()) }, (Err(err), Ok(())) => { @@ -378,7 +378,7 @@ fn dynamic_append ( env.dynamic_writer_append_chunk(wid, offset, size, &digest)?; - env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size)); + env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size)); } Ok(Value::Null) @@ -443,7 +443,7 @@ fn fixed_append ( env.fixed_writer_append_chunk(wid, offset, size, &digest)?; - env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size)); + env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size)); } Ok(Value::Null) @@ -498,7 +498,7 @@ fn close_dynamic_index ( env.dynamic_writer_close(wid, chunk_count, size, csum)?; - env.log(format!("sucessfully closed dynamic index {}", wid)); + env.log(format!("successfully closed dynamic index {}", wid)); Ok(Value::Null) } @@ -552,7 +552,7 @@ fn close_fixed_index ( env.fixed_writer_close(wid, chunk_count, size, csum)?; - env.log(format!("sucessfully closed fixed index {}", wid)); + env.log(format!("successfully closed fixed index {}", wid)); Ok(Value::Null) } @@ -566,7 +566,7 @@ fn finish_backup ( let env: &BackupEnvironment = rpcenv.as_ref(); env.finish_backup()?; - env.log("sucessfully finished backup"); + env.log("successfully finished backup"); Ok(Value::Null) } diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs index 08c2cdc9..34d08333 100644 --- a/src/api2/backup/environment.rs +++ b/src/api2/backup/environment.rs @@ -52,7 +52,7 @@ struct FixedWriterState { struct SharedBackupState { finished: bool, uid_counter: usize, - file_counter: usize, // sucessfully uploaded files + file_counter: usize, // successfully uploaded files dynamic_writers: HashMap, fixed_writers: HashMap, known_chunks: HashMap<[u8;32], u32>, diff --git a/src/api2/node/network.rs b/src/api2/node/network.rs index 352be401..c0c55333 100644 --- a/src/api2/node/network.rs +++ b/src/api2/node/network.rs @@ -338,7 +338,7 @@ pub enum DeletableProperty { autostart, /// Delete bridge ports (set to 'none') bridge_ports, - /// Delet bridge-vlan-aware flag + /// Delete bridge-vlan-aware flag bridge_vlan_aware, /// Delete bond-slaves (set to 'none') slaves, diff --git a/src/api2/node/services.rs b/src/api2/node/services.rs index 9a6b6f93..b0cca76e 100644 --- a/src/api2/node/services.rs +++ b/src/api2/node/services.rs @@ -256,7 +256,7 @@ fn stop_service( _param: Value, ) -> Result { - log::info!("stoping service {}", service); + log::info!("stopping service {}", service); run_service_command(&service, "stop") } diff --git a/src/api2/reader.rs b/src/api2/reader.rs index cb000153..a6aed505 100644 --- a/src/api2/reader.rs +++ b/src/api2/reader.rs @@ -131,7 +131,7 @@ fn upgrade_to_backup_reader_protocol( Either::Right((Ok(res), _)) => Ok(res), Either::Right((Err(err), _)) => Err(err), }) - .map_ok(move |_| env.log("reader finished sucessfully")) + .map_ok(move |_| env.log("reader finished successfully")) })?; let response = Response::builder() diff --git a/src/api2/types.rs b/src/api2/types.rs index 81b39ad5..e613714b 100644 --- a/src/api2/types.rs +++ b/src/api2/types.rs @@ -822,7 +822,7 @@ fn test_cert_fingerprint_schema() -> Result<(), anyhow::Error> { for fingerprint in invalid_fingerprints.iter() { if let Ok(_) = parse_simple_value(fingerprint, &schema) { - bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint); + bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint); } } @@ -866,7 +866,7 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> { for name in invalid_user_ids.iter() { if let Ok(_) = parse_simple_value(name, &schema) { - bail!("test userid '{}' failed - got Ok() while expection an error.", name); + bail!("test userid '{}' failed - got Ok() while exception an error.", name); } } diff --git a/src/backup/data_blob.rs b/src/backup/data_blob.rs index 03bbfec9..0886cc86 100644 --- a/src/backup/data_blob.rs +++ b/src/backup/data_blob.rs @@ -311,7 +311,7 @@ impl DataBlob { /// Verify digest and data length for unencrypted chunks. /// /// To do that, we need to decompress data first. Please note that - /// this is noth possible for encrypted chunks. + /// this is north possible for encrypted chunks. pub fn verify_unencrypted( &self, expected_chunk_size: usize, diff --git a/src/bin/completion.rs b/src/bin/completion.rs index c1c3b69a..316dd0dc 100644 --- a/src/bin/completion.rs +++ b/src/bin/completion.rs @@ -49,7 +49,7 @@ fn hello_command( } #[api(input: { properties: {} })] -/// Quit command. Exit the programm. +/// Quit command. Exit the program. /// /// Returns: nothing fn quit_command() -> Result<(), Error> { diff --git a/src/bin/dynamic-files.rs b/src/bin/dynamic-files.rs index 9d001f5c..8338dee2 100644 --- a/src/bin/dynamic-files.rs +++ b/src/bin/dynamic-files.rs @@ -16,7 +16,7 @@ use std::io::Write; // tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros // # pxar create test.pxar ./dyntest1/ -// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992) +// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992) fn create_large_file(path: PathBuf) { diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs index a87256a5..f294aef9 100644 --- a/src/bin/proxmox-backup-client.rs +++ b/src/bin/proxmox-backup-client.rs @@ -2000,7 +2000,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { if let Some(pipe) = pipe { nix::unistd::chdir(Path::new("/")).unwrap(); - // Finish creation of deamon by redirecting filedescriptors. + // Finish creation of daemon by redirecting filedescriptors. let nullfd = nix::fcntl::open( "/dev/null", nix::fcntl::OFlag::O_RDWR, diff --git a/src/bin/proxmox_backup_manager/cert.rs b/src/bin/proxmox_backup_manager/cert.rs index fd4882c7..f5f725a3 100644 --- a/src/bin/proxmox_backup_manager/cert.rs +++ b/src/bin/proxmox_backup_manager/cert.rs @@ -17,7 +17,7 @@ fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result Result<(), Error> { let cert_path = PathBuf::from(configdir!("/proxy.pem")); diff --git a/src/client/backup_reader.rs b/src/client/backup_reader.rs index f7310134..d4f60ed0 100644 --- a/src/client/backup_reader.rs +++ b/src/client/backup_reader.rs @@ -138,7 +138,7 @@ impl BackupReader { /// Download a .blob file /// - /// This creates a temorary file in /tmp (using O_TMPFILE). The data is verified using + /// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using /// the provided manifest. pub async fn download_blob( &self, @@ -164,7 +164,7 @@ impl BackupReader { /// Download dynamic index file /// - /// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using + /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using /// the provided manifest. pub async fn download_dynamic_index( &self, @@ -192,7 +192,7 @@ impl BackupReader { /// Download fixed index file /// - /// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using + /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using /// the provided manifest. pub async fn download_fixed_index( &self, diff --git a/src/client/http_client.rs b/src/client/http_client.rs index ede41eda..23c4aa54 100644 --- a/src/client/http_client.rs +++ b/src/client/http_client.rs @@ -343,7 +343,7 @@ impl HttpClient { /// Login /// - /// Login is done on demand, so this is onyl required if you need + /// Login is done on demand, so this is only required if you need /// access to authentication data in 'AuthInfo'. pub async fn login(&self) -> Result { self.auth.listen().await diff --git a/src/client/pull.rs b/src/client/pull.rs index 378f400c..87904360 100644 --- a/src/client/pull.rs +++ b/src/client/pull.rs @@ -123,12 +123,12 @@ async fn try_client_log_download( .read(true) .open(&tmp_path)?; - // Note: be silent if there is no log - only log sucessful download + // Note: be silent if there is no log - only log successful download if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await { if let Err(err) = std::fs::rename(&tmp_path, &path) { bail!("Atomic rename file {:?} failed - {}", path, err); } - worker.log(format!("got bakup log file {:?}", CLIENT_LOG_BLOB_NAME)); + worker.log(format!("got backup log file {:?}", CLIENT_LOG_BLOB_NAME)); } Ok(()) diff --git a/src/config/network.rs b/src/config/network.rs index 6527cf64..c217141f 100644 --- a/src/config/network.rs +++ b/src/config/network.rs @@ -149,7 +149,7 @@ impl Interface { Ok(()) } - /// Write attributes not dependening on address family + /// Write attributes not depending on address family fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> { static EMPTY_LIST: Vec = Vec::new(); @@ -187,7 +187,7 @@ impl Interface { Ok(()) } - /// Write attributes dependening on address family inet (IPv4) + /// Write attributes depending on address family inet (IPv4) fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> { if method == NetworkConfigMethod::Static { if let Some(address) = &self.cidr { @@ -211,7 +211,7 @@ impl Interface { Ok(()) } - /// Write attributes dependening on address family inet6 (IPv6) + /// Write attributes depending on address family inet6 (IPv6) fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> { if method == NetworkConfigMethod::Static { if let Some(address) = &self.cidr6 { diff --git a/src/pxar.rs b/src/pxar.rs index 0696fd0b..07b7c2c5 100644 --- a/src/pxar.rs +++ b/src/pxar.rs @@ -4,7 +4,7 @@ //! format used in the [casync](https://github.com/systemd/casync) //! toolkit (we are not 100\% binary compatible). It is a file archive //! format defined by 'Lennart Poettering', specially defined for -//! efficent deduplication. +//! efficient deduplication. //! Every archive contains items in the following order: //! * `ENTRY` -- containing general stat() data and related bits diff --git a/src/pxar/binary_search_tree.rs b/src/pxar/binary_search_tree.rs index 8865881b..63a1d3fa 100644 --- a/src/pxar/binary_search_tree.rs +++ b/src/pxar/binary_search_tree.rs @@ -61,7 +61,7 @@ fn copy_binary_search_tree_inner( } } -/// This function calls the provided `copy_func()` with the permutaion +/// This function calls the provided `copy_func()` with the permutation /// info. /// /// ``` @@ -71,7 +71,7 @@ fn copy_binary_search_tree_inner( /// }); /// ``` /// -/// This will produce the folowing output: +/// This will produce the following output: /// /// ```no-compile /// Copy 3 to 0 @@ -81,7 +81,7 @@ fn copy_binary_search_tree_inner( /// Copy 4 to 2 /// ``` /// -/// So this generates the following permuation: `[3,1,4,0,2]`. +/// So this generates the following permutation: `[3,1,4,0,2]`. pub fn copy_binary_search_tree( n: usize, diff --git a/src/pxar/encoder.rs b/src/pxar/encoder.rs index bb0d0d20..f438e39a 100644 --- a/src/pxar/encoder.rs +++ b/src/pxar/encoder.rs @@ -1117,7 +1117,7 @@ impl<'a, W: Write, C: BackupCatalogWriter> Encoder<'a, W, C> { if pos != size { // Note:: casync format cannot handle that bail!( - "detected shrinked file {:?} ({} < {})", + "detected shrunk file {:?} ({} < {})", self.full_path(), pos, size diff --git a/src/pxar/format_definition.rs b/src/pxar/format_definition.rs index 02ea421f..206d3beb 100644 --- a/src/pxar/format_definition.rs +++ b/src/pxar/format_definition.rs @@ -29,7 +29,7 @@ pub const PXAR_QUOTA_PROJID: u64 = 0x161baf2d8772a72b; /// Marks item as hardlink /// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__"); pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86; -/// Marks the beginnig of the payload (actual content) of regular files +/// Marks the beginning of the payload (actual content) of regular files pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9; /// Marks item as entry of goodbye table pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403; diff --git a/src/pxar/match_pattern.rs b/src/pxar/match_pattern.rs index 9b979669..6293344a 100644 --- a/src/pxar/match_pattern.rs +++ b/src/pxar/match_pattern.rs @@ -124,7 +124,7 @@ impl MatchPattern { Ok(Some((match_pattern, content_buffer, stat))) } - /// Interprete a byte buffer as a sinlge line containing a valid + /// Interpret a byte buffer as a sinlge line containing a valid /// `MatchPattern`. /// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`. /// Pattern starting with '!' are interpreted as negative match pattern. diff --git a/src/pxar/sequential_decoder.rs b/src/pxar/sequential_decoder.rs index 8ae31cb7..cfddf8ab 100644 --- a/src/pxar/sequential_decoder.rs +++ b/src/pxar/sequential_decoder.rs @@ -84,7 +84,7 @@ impl SequentialDecoder { pub(crate) fn read_link(&mut self, size: u64) -> Result { if size < (HEADER_SIZE + 2) { - bail!("dectected short link target."); + bail!("detected short link target."); } let target_len = size - HEADER_SIZE; @@ -104,7 +104,7 @@ impl SequentialDecoder { pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> { if size < (HEADER_SIZE + 8 + 2) { - bail!("dectected short hardlink header."); + bail!("detected short hardlink header."); } let offset: u64 = self.read_item()?; let target = self.read_link(size - 8)?; @@ -121,7 +121,7 @@ impl SequentialDecoder { pub(crate) fn read_filename(&mut self, size: u64) -> Result { if size < (HEADER_SIZE + 2) { - bail!("dectected short filename"); + bail!("detected short filename"); } let name_len = size - HEADER_SIZE; diff --git a/src/server/worker_task.rs b/src/server/worker_task.rs index eb478906..f0ee9b64 100644 --- a/src/server/worker_task.rs +++ b/src/server/worker_task.rs @@ -277,7 +277,7 @@ fn update_active_workers(new_upid: Option<&UPID>) -> Result, E } else { match state { None => { - println!("Detected stoped UPID {}", upid_str); + println!("Detected stopped UPID {}", upid_str); let status = upid_read_status(&upid) .unwrap_or_else(|_| String::from("unknown")); finish_list.push(TaskListInfo { diff --git a/src/tools.rs b/src/tools.rs index 2fedd848..59fee2a7 100644 --- a/src/tools.rs +++ b/src/tools.rs @@ -127,7 +127,7 @@ pub fn lock_file( } /// Open or create a lock file (append mode). Then try to -/// aquire a lock using `lock_file()`. +/// acquire a lock using `lock_file()`. pub fn open_file_locked>(path: P, timeout: Duration) -> Result { let path = path.as_ref(); let mut file = match OpenOptions::new().create(true).append(true).open(path) { @@ -136,7 +136,7 @@ pub fn open_file_locked>(path: P, timeout: Duration) -> Result Ok(file), - Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err), + Err(err) => bail!("Unable to acquire lock {:?} - {}", path, err), } } @@ -441,7 +441,7 @@ pub fn join(data: &Vec, sep: char) -> String { /// Detect modified configuration files /// -/// This function fails with a resonable error message if checksums do not match. +/// This function fails with a reasonable error message if checksums do not match. pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> { if digest1 != digest2 { bail!("detected modified configuration - file changed by other user? Try again."); diff --git a/src/tools/broadcast_future.rs b/src/tools/broadcast_future.rs index da627d01..1c82bdc3 100644 --- a/src/tools/broadcast_future.rs +++ b/src/tools/broadcast_future.rs @@ -149,14 +149,14 @@ fn test_broadcast_future() { .map_ok(|res| { CHECKSUM.fetch_add(res, Ordering::SeqCst); }) - .map_err(|err| { panic!("got errror {}", err); }) + .map_err(|err| { panic!("got error {}", err); }) .map(|_| ()); let receiver2 = sender.listen() .map_ok(|res| { CHECKSUM.fetch_add(res*2, Ordering::SeqCst); }) - .map_err(|err| { panic!("got errror {}", err); }) + .map_err(|err| { panic!("got error {}", err); }) .map(|_| ()); let mut rt = tokio::runtime::Runtime::new().unwrap(); diff --git a/src/tools/file_logger.rs b/src/tools/file_logger.rs index c2c5aab7..426e7b8d 100644 --- a/src/tools/file_logger.rs +++ b/src/tools/file_logger.rs @@ -4,7 +4,7 @@ use std::io::Write; /// Log messages with timestamps into files /// -/// Logs messages to file, and optionaly to standart output. +/// Logs messages to file, and optionally to standard output. /// /// /// #### Example: diff --git a/src/tools/fs.rs b/src/tools/fs.rs index 0f7e35ed..894fdf3e 100644 --- a/src/tools/fs.rs +++ b/src/tools/fs.rs @@ -107,7 +107,7 @@ pub fn read_subdir(dirfd: RawFd, path: &P) -> nix::Res } /// Scan through a directory with a regular expression. This is simply a shortcut filtering the -/// results of `read_subdir`. Non-UTF8 comaptible file names are silently ignored. +/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored. pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>( dirfd: RawFd, path: &P, diff --git a/src/tools/process_locker.rs b/src/tools/process_locker.rs index 7a73cebe..56f8c514 100644 --- a/src/tools/process_locker.rs +++ b/src/tools/process_locker.rs @@ -1,6 +1,6 @@ //! Inter-process reader-writer lock builder. //! -//! This implemenation uses fcntl record locks with non-blocking +//! This implementation uses fcntl record locks with non-blocking //! F_SETLK command (never blocks). //! //! We maintain a map of shared locks with time stamps, so you can get @@ -127,9 +127,9 @@ impl ProcessLocker { Ok(()) } - /// Try to aquire a shared lock + /// Try to acquire a shared lock /// - /// On sucess, this makes sure that no other process can get an exclusive lock for the file. + /// On success, this makes sure that no other process can get an exclusive lock for the file. pub fn try_shared_lock(locker: Arc>) -> Result { let mut data = locker.lock().unwrap(); @@ -168,7 +168,7 @@ impl ProcessLocker { result } - /// Try to aquire a exclusive lock + /// Try to acquire a exclusive lock /// /// Make sure the we are the only process which has locks for this file (shared or exclusive). pub fn try_exclusive_lock(locker: Arc>) -> Result { diff --git a/src/tools/ticket.rs b/src/tools/ticket.rs index b3658496..17476e98 100644 --- a/src/tools/ticket.rs +++ b/src/tools/ticket.rs @@ -1,4 +1,4 @@ -//! Generate and verify Authentification tickets +//! Generate and verify Authentication tickets use anyhow::{bail, Error}; use base64; diff --git a/www/DataStoreContent.js b/www/DataStoreContent.js index 8099c99c..2be03842 100644 --- a/www/DataStoreContent.js +++ b/www/DataStoreContent.js @@ -77,7 +77,7 @@ Ext.define('PBS.DataStoreContent', { } else if (btype === 'host') { cls = 'fa-building'; } else { - console.warn(`got unkown backup-type '${btype}'`); + console.warn(`got unknown backup-type '${btype}'`); continue; // FIXME: auto render? what do? }