}
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
- if !is_new { bail!("backup directorty already exists."); }
+ if !is_new { bail!("backup directory already exists."); }
WorkerTask::spawn("backup", Some(worker_id), &username.clone(), true, move |worker| {
let mut env = BackupEnvironment::new(
match (res, env.ensure_finished()) {
(Ok(_), Ok(())) => {
- env.log("backup finished sucessfully");
+ env.log("backup finished successfully");
Ok(())
},
(Err(err), Ok(())) => {
env.dynamic_writer_append_chunk(wid, offset, size, &digest)?;
- env.debug(format!("sucessfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
+ env.debug(format!("successfully added chunk {} to dynamic index {} (offset {}, size {})", digest_str, wid, offset, size));
}
Ok(Value::Null)
env.fixed_writer_append_chunk(wid, offset, size, &digest)?;
- env.debug(format!("sucessfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
+ env.debug(format!("successfully added chunk {} to fixed index {} (offset {}, size {})", digest_str, wid, offset, size));
}
Ok(Value::Null)
env.dynamic_writer_close(wid, chunk_count, size, csum)?;
- env.log(format!("sucessfully closed dynamic index {}", wid));
+ env.log(format!("successfully closed dynamic index {}", wid));
Ok(Value::Null)
}
env.fixed_writer_close(wid, chunk_count, size, csum)?;
- env.log(format!("sucessfully closed fixed index {}", wid));
+ env.log(format!("successfully closed fixed index {}", wid));
Ok(Value::Null)
}
let env: &BackupEnvironment = rpcenv.as_ref();
env.finish_backup()?;
- env.log("sucessfully finished backup");
+ env.log("successfully finished backup");
Ok(Value::Null)
}
struct SharedBackupState {
finished: bool,
uid_counter: usize,
- file_counter: usize, // sucessfully uploaded files
+ file_counter: usize, // successfully uploaded files
dynamic_writers: HashMap<usize, DynamicWriterState>,
fixed_writers: HashMap<usize, FixedWriterState>,
known_chunks: HashMap<[u8;32], u32>,
autostart,
/// Delete bridge ports (set to 'none')
bridge_ports,
- /// Delet bridge-vlan-aware flag
+ /// Delete bridge-vlan-aware flag
bridge_vlan_aware,
/// Delete bond-slaves (set to 'none')
slaves,
_param: Value,
) -> Result<Value, Error> {
- log::info!("stoping service {}", service);
+ log::info!("stopping service {}", service);
run_service_command(&service, "stop")
}
Either::Right((Ok(res), _)) => Ok(res),
Either::Right((Err(err), _)) => Err(err),
})
- .map_ok(move |_| env.log("reader finished sucessfully"))
+ .map_ok(move |_| env.log("reader finished successfully"))
})?;
let response = Response::builder()
for fingerprint in invalid_fingerprints.iter() {
if let Ok(_) = parse_simple_value(fingerprint, &schema) {
- bail!("test fingerprint '{}' failed - got Ok() while expection an error.", fingerprint);
+ bail!("test fingerprint '{}' failed - got Ok() while exception an error.", fingerprint);
}
}
for name in invalid_user_ids.iter() {
if let Ok(_) = parse_simple_value(name, &schema) {
- bail!("test userid '{}' failed - got Ok() while expection an error.", name);
+ bail!("test userid '{}' failed - got Ok() while exception an error.", name);
}
}
/// Verify digest and data length for unencrypted chunks.
///
/// To do that, we need to decompress data first. Please note that
- /// this is noth possible for encrypted chunks.
+ /// this is north possible for encrypted chunks.
pub fn verify_unencrypted(
&self,
expected_chunk_size: usize,
}
#[api(input: { properties: {} })]
-/// Quit command. Exit the programm.
+/// Quit command. Exit the program.
///
/// Returns: nothing
fn quit_command() -> Result<(), Error> {
// tar: dyntest1/testfile7.dat: File shrank by 2833252864 bytes; padding with zeros
// # pxar create test.pxar ./dyntest1/
-// Error: detected shrinked file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
+// Error: detected shrunk file "./dyntest1/testfile0.dat" (22020096 < 12679380992)
fn create_large_file(path: PathBuf) {
if let Some(pipe) = pipe {
nix::unistd::chdir(Path::new("/")).unwrap();
- // Finish creation of deamon by redirecting filedescriptors.
+ // Finish creation of daemon by redirecting filedescriptors.
let nullfd = nix::fcntl::open(
"/dev/null",
nix::fcntl::OFlag::O_RDWR,
}
#[api]
-/// Diplay node certificate information.
+/// Display node certificate information.
fn cert_info() -> Result<(), Error> {
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
/// Download a .blob file
///
- /// This creates a temorary file in /tmp (using O_TMPFILE). The data is verified using
+ /// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
/// the provided manifest.
pub async fn download_blob(
&self,
/// Download dynamic index file
///
- /// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
+ /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_dynamic_index(
&self,
/// Download fixed index file
///
- /// This creates a temorary file in /tmp (using O_TMPFILE). The index is verified using
+ /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_fixed_index(
&self,
/// Login
///
- /// Login is done on demand, so this is onyl required if you need
+ /// Login is done on demand, so this is only required if you need
/// access to authentication data in 'AuthInfo'.
pub async fn login(&self) -> Result<AuthInfo, Error> {
self.auth.listen().await
.read(true)
.open(&tmp_path)?;
- // Note: be silent if there is no log - only log sucessful download
+ // Note: be silent if there is no log - only log successful download
if let Ok(_) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
if let Err(err) = std::fs::rename(&tmp_path, &path) {
bail!("Atomic rename file {:?} failed - {}", path, err);
}
- worker.log(format!("got bakup log file {:?}", CLIENT_LOG_BLOB_NAME));
+ worker.log(format!("got backup log file {:?}", CLIENT_LOG_BLOB_NAME));
}
Ok(())
Ok(())
}
- /// Write attributes not dependening on address family
+ /// Write attributes not depending on address family
fn write_iface_attributes(&self, w: &mut dyn Write) -> Result<(), Error> {
static EMPTY_LIST: Vec<String> = Vec::new();
Ok(())
}
- /// Write attributes dependening on address family inet (IPv4)
+ /// Write attributes depending on address family inet (IPv4)
fn write_iface_attributes_v4(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
if method == NetworkConfigMethod::Static {
if let Some(address) = &self.cidr {
Ok(())
}
- /// Write attributes dependening on address family inet6 (IPv6)
+ /// Write attributes depending on address family inet6 (IPv6)
fn write_iface_attributes_v6(&self, w: &mut dyn Write, method: NetworkConfigMethod) -> Result<(), Error> {
if method == NetworkConfigMethod::Static {
if let Some(address) = &self.cidr6 {
//! format used in the [casync](https://github.com/systemd/casync)
//! toolkit (we are not 100\% binary compatible). It is a file archive
//! format defined by 'Lennart Poettering', specially defined for
-//! efficent deduplication.
+//! efficient deduplication.
//! Every archive contains items in the following order:
//! * `ENTRY` -- containing general stat() data and related bits
}
}
-/// This function calls the provided `copy_func()` with the permutaion
+/// This function calls the provided `copy_func()` with the permutation
/// info.
///
/// ```
/// });
/// ```
///
-/// This will produce the folowing output:
+/// This will produce the following output:
///
/// ```no-compile
/// Copy 3 to 0
/// Copy 4 to 2
/// ```
///
-/// So this generates the following permuation: `[3,1,4,0,2]`.
+/// So this generates the following permutation: `[3,1,4,0,2]`.
pub fn copy_binary_search_tree<F: FnMut(usize, usize)>(
n: usize,
if pos != size {
// Note:: casync format cannot handle that
bail!(
- "detected shrinked file {:?} ({} < {})",
+ "detected shrunk file {:?} ({} < {})",
self.full_path(),
pos,
size
/// Marks item as hardlink
/// compute_goodbye_hash(b"__PROXMOX_FORMAT_HARDLINK__");
pub const PXAR_FORMAT_HARDLINK: u64 = 0x2c5e06f634f65b86;
-/// Marks the beginnig of the payload (actual content) of regular files
+/// Marks the beginning of the payload (actual content) of regular files
pub const PXAR_PAYLOAD: u64 = 0x8b9e1d93d6dcffc9;
/// Marks item as entry of goodbye table
pub const PXAR_GOODBYE: u64 = 0xdfd35c5e8327c403;
Ok(Some((match_pattern, content_buffer, stat)))
}
- /// Interprete a byte buffer as a sinlge line containing a valid
+ /// Interpret a byte buffer as a sinlge line containing a valid
/// `MatchPattern`.
/// Pattern starting with `#` are interpreted as comments, returning `Ok(None)`.
/// Pattern starting with '!' are interpreted as negative match pattern.
pub(crate) fn read_link(&mut self, size: u64) -> Result<PathBuf, Error> {
if size < (HEADER_SIZE + 2) {
- bail!("dectected short link target.");
+ bail!("detected short link target.");
}
let target_len = size - HEADER_SIZE;
pub(crate) fn read_hardlink(&mut self, size: u64) -> Result<(PathBuf, u64), Error> {
if size < (HEADER_SIZE + 8 + 2) {
- bail!("dectected short hardlink header.");
+ bail!("detected short hardlink header.");
}
let offset: u64 = self.read_item()?;
let target = self.read_link(size - 8)?;
pub(crate) fn read_filename(&mut self, size: u64) -> Result<OsString, Error> {
if size < (HEADER_SIZE + 2) {
- bail!("dectected short filename");
+ bail!("detected short filename");
}
let name_len = size - HEADER_SIZE;
} else {
match state {
None => {
- println!("Detected stoped UPID {}", upid_str);
+ println!("Detected stopped UPID {}", upid_str);
let status = upid_read_status(&upid)
.unwrap_or_else(|_| String::from("unknown"));
finish_list.push(TaskListInfo {
}
/// Open or create a lock file (append mode). Then try to
-/// aquire a lock using `lock_file()`.
+/// acquire a lock using `lock_file()`.
pub fn open_file_locked<P: AsRef<Path>>(path: P, timeout: Duration) -> Result<File, Error> {
let path = path.as_ref();
let mut file = match OpenOptions::new().create(true).append(true).open(path) {
};
match lock_file(&mut file, true, Some(timeout)) {
Ok(_) => Ok(file),
- Err(err) => bail!("Unable to aquire lock {:?} - {}", path, err),
+ Err(err) => bail!("Unable to acquire lock {:?} - {}", path, err),
}
}
/// Detect modified configuration files
///
-/// This function fails with a resonable error message if checksums do not match.
+/// This function fails with a reasonable error message if checksums do not match.
pub fn detect_modified_configuration_file(digest1: &[u8;32], digest2: &[u8;32]) -> Result<(), Error> {
if digest1 != digest2 {
bail!("detected modified configuration - file changed by other user? Try again.");
.map_ok(|res| {
CHECKSUM.fetch_add(res, Ordering::SeqCst);
})
- .map_err(|err| { panic!("got errror {}", err); })
+ .map_err(|err| { panic!("got error {}", err); })
.map(|_| ());
let receiver2 = sender.listen()
.map_ok(|res| {
CHECKSUM.fetch_add(res*2, Ordering::SeqCst);
})
- .map_err(|err| { panic!("got errror {}", err); })
+ .map_err(|err| { panic!("got error {}", err); })
.map(|_| ());
let mut rt = tokio::runtime::Runtime::new().unwrap();
/// Log messages with timestamps into files
///
-/// Logs messages to file, and optionaly to standart output.
+/// Logs messages to file, and optionally to standard output.
///
///
/// #### Example:
}
/// Scan through a directory with a regular expression. This is simply a shortcut filtering the
-/// results of `read_subdir`. Non-UTF8 comaptible file names are silently ignored.
+/// results of `read_subdir`. Non-UTF8 compatible file names are silently ignored.
pub fn scan_subdir<'a, P: ?Sized + nix::NixPath>(
dirfd: RawFd,
path: &P,
//! Inter-process reader-writer lock builder.
//!
-//! This implemenation uses fcntl record locks with non-blocking
+//! This implementation uses fcntl record locks with non-blocking
//! F_SETLK command (never blocks).
//!
//! We maintain a map of shared locks with time stamps, so you can get
Ok(())
}
- /// Try to aquire a shared lock
+ /// Try to acquire a shared lock
///
- /// On sucess, this makes sure that no other process can get an exclusive lock for the file.
+ /// On success, this makes sure that no other process can get an exclusive lock for the file.
pub fn try_shared_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockSharedGuard, Error> {
let mut data = locker.lock().unwrap();
result
}
- /// Try to aquire a exclusive lock
+ /// Try to acquire a exclusive lock
///
/// Make sure the we are the only process which has locks for this file (shared or exclusive).
pub fn try_exclusive_lock(locker: Arc<Mutex<Self>>) -> Result<ProcessLockExclusiveGuard, Error> {
-//! Generate and verify Authentification tickets
+//! Generate and verify Authentication tickets
use anyhow::{bail, Error};
use base64;
} else if (btype === 'host') {
cls = 'fa-building';
} else {
- console.warn(`got unkown backup-type '${btype}'`);
+ console.warn(`got unknown backup-type '${btype}'`);
continue; // FIXME: auto render? what do?
}