//!
//! Acquire shared lock for ChunkStore (process wide).
//!
-//! Note: When creating .idx files, we create temporary (.tmp) file,
+//! Note: When creating .idx files, we create temporary a (.tmp) file,
//! then do an atomic rename ...
//!
//!
//! * Garbage Collect:
//!
//! Acquire exclusive lock for ChunkStore (process wide). If we have
-//! already an shared lock for ChunkStore, try to updraged that
+//! already a shared lock for the ChunkStore, try to upgrade that
//! lock.
//!
//!
//! * Server Restart
//!
-//! Try to abort running garbage collection to release exclusive
-//! ChunkStore lock asap. Start new service with existing listening
+//! Try to abort the running garbage collection to release exclusive
+//! ChunkStore locks ASAP. Start the new service with the existing listening
//! socket.
//!
//!
//!
//! Deleting backups is as easy as deleting the corresponding .idx
//! files. Unfortunately, this does not free up any storage, because
-//! those files just contains references to chunks.
+//! those files just contain references to chunks.
//!
//! To free up some storage, we run a garbage collection process at
-//! regular intervals. The collector uses an mark and sweep
+//! regular intervals. The collector uses a mark and sweep
//! approach. In the first phase, it scans all .idx files to mark used
//! chunks. The second phase then removes all unmarked chunks from the
//! store.
//! amount of time ago (by default 24h). So we may only delete chunks
//! with `atime` older than 24 hours.
//!
-//! Another problem arise from running backups. The mark phase does
+//! Another problem arises from running backups. The mark phase does
//! not find any chunks from those backups, because there is no .idx
//! file for them (created after the backup). Chunks created or
//! touched by those backups may have an `atime` as old as the start
-//! time of those backup. Please not that the backup start time may
-//! predate the GC start time. Se we may only delete chunk older than
+//! time of those backups. Please note that the backup start time may
+//! predate the GC start time. So we may only delete chunks older than
//! the start time of those running backup jobs.
//!
//!
//!
//! Not sure if this is better. TODO
-pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
-pub const CATALOG_BLOB_NAME: &str = "catalog.blob";
+use anyhow::{bail, Error};
+
+// Note: .pcat1 => Proxmox Catalog Format version 1
+pub const CATALOG_NAME: &str = "catalog.pcat1.didx";
#[macro_export]
macro_rules! PROXMOX_BACKUP_PROTOCOL_ID_V1 {
() => { "proxmox-backup-reader-protocol-v1" }
}
+/// Unix system user used by proxmox-backup-proxy
+pub const BACKUP_USER_NAME: &str = "backup";
+/// Unix system group used by proxmox-backup-proxy
+pub const BACKUP_GROUP_NAME: &str = "backup";
+
+/// Return User info for the 'backup' user (``getpwnam_r(3)``)
+pub fn backup_user() -> Result<nix::unistd::User, Error> {
+ match nix::unistd::User::from_name(BACKUP_USER_NAME)? {
+ Some(user) => Ok(user),
+ None => bail!("Unable to lookup backup user."),
+ }
+}
+
+/// Return Group info for the 'backup' group (``getgrnam(3)``)
+pub fn backup_group() -> Result<nix::unistd::Group, Error> {
+ match nix::unistd::Group::from_name(BACKUP_GROUP_NAME)? {
+ Some(group) => Ok(group),
+ None => bail!("Unable to lookup backup user."),
+ }
+}
+
mod file_formats;
pub use file_formats::*;
+mod manifest;
+pub use manifest::*;
+
mod crypt_config;
pub use crypt_config::*;
mod data_blob_writer;
pub use data_blob_writer::*;
-mod catalog_blob;
-pub use catalog_blob::*;
+mod catalog;
+pub use catalog::*;
mod chunk_stream;
pub use chunk_stream::*;
mod backup_info;
pub use backup_info::*;
+mod prune;
+pub use prune::*;
+
mod datastore;
pub use datastore::*;
+
+mod verify;
+pub use verify::*;
+
+mod catalog_shell;
+pub use catalog_shell::*;
+
+mod async_index_reader;
+pub use async_index_reader::*;