use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::convert::TryFrom;
+use std::str::FromStr;
+use std::time::Duration;
+use std::fs::File;
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
-use chrono::{DateTime, Utc};
+
+use proxmox::tools::fs::{replace_file, file_read_optional_string, CreateOptions, open_file_locked};
+
+use pbs_tools::format::HumanByte;
+use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
use super::backup_info::{BackupGroup, BackupDir};
use super::chunk_store::ChunkStore;
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
-use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
+use super::manifest::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
use super::index::*;
use super::{DataBlob, ArchiveType, archive_type};
-use crate::backup::CryptMode;
-use crate::config::datastore;
-use crate::server::WorkerTask;
+use crate::config::datastore::{self, DataStoreConfig};
+use crate::task::TaskState;
use crate::tools;
-use crate::api2::types::GarbageCollectionStatus;
+use crate::api2::types::{Authid, GarbageCollectionStatus};
+use crate::server::UPID;
lazy_static! {
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
/// management interface for backup.
pub struct DataStore {
chunk_store: Arc<ChunkStore>,
- gc_mutex: Mutex<bool>,
+ gc_mutex: Mutex<()>,
last_gc_status: Mutex<GarbageCollectionStatus>,
+ verify_new: bool,
}
impl DataStore {
let (config, _digest) = datastore::config()?;
let config: datastore::DataStoreConfig = config.lookup("datastore", name)?;
+ let path = PathBuf::from(&config.path);
let mut map = DATASTORE_MAP.lock().unwrap();
if let Some(datastore) = map.get(name) {
// Compare Config - if changed, create new Datastore object!
- if datastore.chunk_store.base == PathBuf::from(&config.path) {
+ if datastore.chunk_store.base == path &&
+ datastore.verify_new == config.verify_new.unwrap_or(false)
+ {
return Ok(datastore.clone());
}
}
- let datastore = DataStore::open(name)?;
+ let datastore = DataStore::open_with_path(name, &path, config)?;
let datastore = Arc::new(datastore);
map.insert(name.to_string(), datastore.clone());
Ok(datastore)
}
- pub fn open(store_name: &str) -> Result<Self, Error> {
-
+ /// removes all datastores that are not configured anymore
+ pub fn remove_unused_datastores() -> Result<(), Error>{
let (config, _digest) = datastore::config()?;
- let (_, store_config) = config.sections.get(store_name)
- .ok_or(format_err!("no such datastore '{}'", store_name))?;
- let path = store_config["path"].as_str().unwrap();
+ let mut map = DATASTORE_MAP.lock().unwrap();
+ // removes all elements that are not in the config
+ map.retain(|key, _| {
+ config.sections.contains_key(key)
+ });
+ Ok(())
+ }
+ fn open_with_path(store_name: &str, path: &Path, config: DataStoreConfig) -> Result<Self, Error> {
let chunk_store = ChunkStore::open(store_name, path)?;
- let gc_status = GarbageCollectionStatus::default();
+ let mut gc_status_path = chunk_store.base_path();
+ gc_status_path.push(".gc-status");
+
+ let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
+ match serde_json::from_str(&state) {
+ Ok(state) => state,
+ Err(err) => {
+ eprintln!("error reading gc-status: {}", err);
+ GarbageCollectionStatus::default()
+ }
+ }
+ } else {
+ GarbageCollectionStatus::default()
+ };
Ok(Self {
chunk_store: Arc::new(chunk_store),
- gc_mutex: Mutex::new(false),
+ gc_mutex: Mutex::new(()),
last_gc_status: Mutex::new(gc_status),
+ verify_new: config.verify_new.unwrap_or(false),
})
}
pub fn get_chunk_iterator(
&self,
) -> Result<
- impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)>,
+ impl Iterator<Item = (Result<pbs_tools::fs::ReadDirEntry, Error>, usize, bool)>,
Error
> {
self.chunk_store.get_chunk_iterator()
Ok(out)
}
+ /// Fast index verification - only check if chunks exists
+ pub fn fast_index_verification(
+ &self,
+ index: &dyn IndexFile,
+ checked: &mut HashSet<[u8;32]>,
+ ) -> Result<(), Error> {
+
+ for pos in 0..index.index_count() {
+ let info = index.chunk_info(pos).unwrap();
+ if checked.contains(&info.digest) {
+ continue;
+ }
+
+ self.stat_chunk(&info.digest).
+ map_err(|err| {
+ format_err!(
+ "fast_index_verification error, stat_chunk {} failed - {}",
+ proxmox::tools::digest_to_hex(&info.digest),
+ err,
+ )
+ })?;
+
+ checked.insert(info.digest);
+ }
+
+ Ok(())
+ }
+
pub fn name(&self) -> &str {
self.chunk_store.name()
}
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
- for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
+ for item in pbs_tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
if let Ok(item) = item {
if let Some(file_type) = item.file_type() {
if file_type != nix::dir::Type::File { continue; }
let full_path = self.group_path(backup_group);
+ let _guard = pbs_tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
+
log::info!("removing backup group {:?}", full_path);
+
+ // remove all individual backup dirs first to ensure nothing is using them
+ for snap in backup_group.list_backups(&self.base_path())? {
+ self.remove_backup_dir(&snap.backup_dir, false)?;
+ }
+
+ // no snapshots left, we can now safely remove the empty folder
std::fs::remove_dir_all(&full_path)
.map_err(|err| {
format_err!(
- "removing backup group {:?} failed - {}",
+ "removing backup group directory {:?} failed - {}",
full_path,
err,
)
}
/// Remove a backup directory including all content
- pub fn remove_backup_dir(&self, backup_dir: &BackupDir) -> Result<(), Error> {
+ pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
let full_path = self.snapshot_path(backup_dir);
+ let (_guard, _manifest_guard);
+ if !force {
+ _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
+ _manifest_guard = self.lock_manifest(backup_dir)?;
+ }
+
log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path)
.map_err(|err| {
)
})?;
+ // the manifest does not exists anymore, we do not need to keep the lock
+ if let Ok(path) = self.manifest_lock_path(backup_dir) {
+ // ignore errors
+ let _ = std::fs::remove_file(path);
+ }
+
Ok(())
}
/// Returns the time of the last successful backup
///
/// Or None if there is no backup in the group (or the group dir does not exist).
- pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<DateTime<Utc>>, Error> {
+ pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
let base_path = self.base_path();
let mut group_path = base_path.clone();
group_path.push(backup_group.group_path());
/// Returns the backup owner.
///
- /// The backup owner is the user who first created the backup group.
- pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<String, Error> {
+ /// The backup owner is the entity who first created the backup group.
+ pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
let mut full_path = self.base_path();
full_path.push(backup_group.group_path());
full_path.push("owner");
let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
- Ok(owner.trim_end().to_string()) // remove trailing newline
+ Ok(owner.trim_end().parse()?) // remove trailing newline
}
/// Set the backup owner.
- pub fn set_owner(&self, backup_group: &BackupGroup, userid: &str, force: bool) -> Result<(), Error> {
+ pub fn set_owner(
+ &self,
+ backup_group: &BackupGroup,
+ auth_id: &Authid,
+ force: bool,
+ ) -> Result<(), Error> {
let mut path = self.base_path();
path.push(backup_group.group_path());
path.push("owner");
let mut file = open_options.open(&path)
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
- write!(file, "{}\n", userid)
+ writeln!(file, "{}", auth_id)
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
Ok(())
}
- /// Create a backup group if it does not already exists.
+ /// Create (if it does not already exists) and lock a backup group
///
/// And set the owner to 'userid'. If the group already exists, it returns the
/// current owner (instead of setting the owner).
- pub fn create_backup_group(&self, backup_group: &BackupGroup, userid: &str) -> Result<String, Error> {
-
+ ///
+ /// This also acquires an exclusive lock on the directory and returns the lock guard.
+ pub fn create_locked_backup_group(
+ &self,
+ backup_group: &BackupGroup,
+ auth_id: &Authid,
+ ) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
- let base_path = self.base_path();
-
- let mut full_path = base_path.clone();
+ let mut full_path = self.base_path();
full_path.push(backup_group.backup_type());
std::fs::create_dir_all(&full_path)?;
// create the last component now
match std::fs::create_dir(&full_path) {
Ok(_) => {
- self.set_owner(backup_group, userid, false)?;
+ let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
+ self.set_owner(backup_group, auth_id, false)?;
let owner = self.get_owner(backup_group)?; // just to be sure
- Ok(owner)
+ Ok((owner, guard))
}
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
+ let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
let owner = self.get_owner(backup_group)?; // just to be sure
- Ok(owner)
+ Ok((owner, guard))
}
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
}
/// Creates a new backup snapshot inside a BackupGroup
///
/// The BackupGroup directory needs to exist.
- pub fn create_backup_dir(&self, backup_dir: &BackupDir) -> Result<(PathBuf, bool), io::Error> {
+ pub fn create_locked_backup_dir(&self, backup_dir: &BackupDir)
+ -> Result<(PathBuf, bool, DirLockGuard), Error>
+ {
let relative_path = backup_dir.relative_path();
let mut full_path = self.base_path();
full_path.push(&relative_path);
+ let lock = ||
+ lock_dir_noblock(&full_path, "snapshot", "internal error - tried creating snapshot that's already in use");
+
match std::fs::create_dir(&full_path) {
- Ok(_) => Ok((relative_path, true)),
- Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false)),
- Err(e) => Err(e)
+ Ok(_) => Ok((relative_path, true, lock()?)),
+ Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false, lock()?)),
+ Err(e) => Err(e.into())
}
}
use walkdir::WalkDir;
- let walker = WalkDir::new(&base).same_file_system(true).into_iter();
+ let walker = WalkDir::new(&base).into_iter();
// make sure we skip .chunks (and other hidden files to keep it simple)
fn is_hidden(entry: &walkdir::DirEntry) -> bool {
entry.file_name()
.to_str()
- .map(|s| s.starts_with("."))
+ .map(|s| s.starts_with('.'))
.unwrap_or(false)
}
let handle_entry_err = |err: walkdir::Error| {
if let Some(inner) = err.io_error() {
- let path = err.path().unwrap_or(Path::new(""));
- match inner.kind() {
- io::ErrorKind::PermissionDenied => {
+ if let Some(path) = err.path() {
+ if inner.kind() == io::ErrorKind::PermissionDenied {
// only allow to skip ext4 fsck directory, avoid GC if, for example,
// a user got file permissions wrong on datastore rsync to new server
if err.depth() > 1 || !path.ends_with("lost+found") {
- bail!("cannot continue garbage-collection safely, permission denied on: {}", path.display())
+ bail!("cannot continue garbage-collection safely, permission denied on: {:?}", path)
}
- },
- _ => bail!("unexpected error on datastore traversal: {} - {}", inner, path.display()),
+ } else {
+ bail!("unexpected error on datastore traversal: {} - {:?}", inner, path)
+ }
+ } else {
+ bail!("unexpected error on datastore traversal: {}", inner)
}
}
Ok(())
index: I,
file_name: &Path, // only used for error reporting
status: &mut GarbageCollectionStatus,
- worker: &WorkerTask,
+ worker: &dyn TaskState,
) -> Result<(), Error> {
status.index_file_count += 1;
status.index_data_bytes += index.index_bytes();
for pos in 0..index.index_count() {
- worker.fail_on_abort()?;
+ worker.check_abort()?;
tools::fail_on_shutdown()?;
let digest = index.index_digest(pos).unwrap();
- if let Err(err) = self.chunk_store.touch_chunk(digest) {
- bail!("unable to access chunk {}, required by {:?} - {}",
- proxmox::tools::digest_to_hex(digest), file_name, err);
+ if !self.chunk_store.cond_touch_chunk(digest, false)? {
+ crate::task_warn!(
+ worker,
+ "warning: unable to access non-existent chunk {}, required by {:?}",
+ proxmox::tools::digest_to_hex(digest),
+ file_name,
+ );
+
+ // touch any corresponding .bad files to keep them around, meaning if a chunk is
+ // rewritten correctly they will be removed automatically, as well as if no index
+ // file requires the chunk anymore (won't get to this loop then)
+ for i in 0..=9 {
+ let bad_ext = format!("{}.bad", i);
+ let mut bad_path = PathBuf::new();
+ bad_path.push(self.chunk_path(digest).0);
+ bad_path.set_extension(bad_ext);
+ self.chunk_store.cond_touch_path(&bad_path, false)?;
+ }
}
}
Ok(())
}
- fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus, worker: &WorkerTask) -> Result<(), Error> {
+ fn mark_used_chunks(
+ &self,
+ status: &mut GarbageCollectionStatus,
+ worker: &dyn TaskState,
+ ) -> Result<(), Error> {
let image_list = self.list_images()?;
+ let image_count = image_list.len();
+
+ let mut last_percentage: usize = 0;
+
+ let mut strange_paths_count: u64 = 0;
- for path in image_list {
+ for (i, img) in image_list.into_iter().enumerate() {
- worker.fail_on_abort()?;
+ worker.check_abort()?;
tools::fail_on_shutdown()?;
- if let Ok(archive_type) = archive_type(&path) {
- if archive_type == ArchiveType::FixedIndex {
- let index = self.open_fixed_reader(&path)?;
- self.index_mark_used_chunks(index, &path, status, worker)?;
- } else if archive_type == ArchiveType::DynamicIndex {
- let index = self.open_dynamic_reader(&path)?;
- self.index_mark_used_chunks(index, &path, status, worker)?;
+ if let Some(backup_dir_path) = img.parent() {
+ let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
+ if let Some(backup_dir_str) = backup_dir_path.to_str() {
+ if BackupDir::from_str(backup_dir_str).is_err() {
+ strange_paths_count += 1;
+ }
+ }
+ }
+
+ match std::fs::File::open(&img) {
+ Ok(file) => {
+ if let Ok(archive_type) = archive_type(&img) {
+ if archive_type == ArchiveType::FixedIndex {
+ let index = FixedIndexReader::new(file).map_err(|e| {
+ format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
+ })?;
+ self.index_mark_used_chunks(index, &img, status, worker)?;
+ } else if archive_type == ArchiveType::DynamicIndex {
+ let index = DynamicIndexReader::new(file).map_err(|e| {
+ format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
+ })?;
+ self.index_mark_used_chunks(index, &img, status, worker)?;
+ }
+ }
}
+ Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
+ Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
+ }
+
+ let percentage = (i + 1) * 100 / image_count;
+ if percentage > last_percentage {
+ crate::task_log!(
+ worker,
+ "marked {}% ({} of {} index files)",
+ percentage,
+ i + 1,
+ image_count,
+ );
+ last_percentage = percentage;
}
}
+ if strange_paths_count > 0 {
+ crate::task_log!(
+ worker,
+ "found (and marked) {} index files outside of expected directory scheme",
+ strange_paths_count,
+ );
+ }
+
+
Ok(())
}
}
pub fn garbage_collection_running(&self) -> bool {
- if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
+ !matches!(self.gc_mutex.try_lock(), Ok(_))
}
- pub fn garbage_collection(&self, worker: &WorkerTask) -> Result<(), Error> {
+ pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
+ // avoids that we run GC if an old daemon process has still a
+ // running backup writer, which is not save as we have no "oldest
+ // writer" information and thus no safe atime cutoff
let _exclusive_lock = self.chunk_store.try_exclusive_lock()?;
- let now = unsafe { libc::time(std::ptr::null_mut()) };
-
- let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(now);
+ let phase1_start_time = proxmox::tools::time::epoch_i64();
+ let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
let mut gc_status = GarbageCollectionStatus::default();
- gc_status.upid = Some(worker.to_string());
-
- worker.log("Start GC phase1 (mark used chunks)");
-
- self.mark_used_chunks(&mut gc_status, &worker)?;
-
- worker.log("Start GC phase2 (sweep unused chunks)");
- self.chunk_store.sweep_unused_chunks(oldest_writer, &mut gc_status, &worker)?;
-
- worker.log(&format!("Removed bytes: {}", gc_status.removed_bytes));
- worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks));
+ gc_status.upid = Some(upid.to_string());
+
+ crate::task_log!(worker, "Start GC phase1 (mark used chunks)");
+
+ self.mark_used_chunks(&mut gc_status, worker)?;
+
+ crate::task_log!(worker, "Start GC phase2 (sweep unused chunks)");
+ self.chunk_store.sweep_unused_chunks(
+ oldest_writer,
+ phase1_start_time,
+ &mut gc_status,
+ worker,
+ )?;
+
+ crate::task_log!(
+ worker,
+ "Removed garbage: {}",
+ HumanByte::from(gc_status.removed_bytes),
+ );
+ crate::task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
if gc_status.pending_bytes > 0 {
- worker.log(&format!("Pending removals: {} bytes ({} chunks)", gc_status.pending_bytes, gc_status.pending_chunks));
+ crate::task_log!(
+ worker,
+ "Pending removals: {} (in {} chunks)",
+ HumanByte::from(gc_status.pending_bytes),
+ gc_status.pending_chunks,
+ );
+ }
+ if gc_status.removed_bad > 0 {
+ crate::task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
+ }
+
+ if gc_status.still_bad > 0 {
+ crate::task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
}
- worker.log(&format!("Original data bytes: {}", gc_status.index_data_bytes));
+ crate::task_log!(
+ worker,
+ "Original data usage: {}",
+ HumanByte::from(gc_status.index_data_bytes),
+ );
if gc_status.index_data_bytes > 0 {
- let comp_per = (gc_status.disk_bytes*100)/gc_status.index_data_bytes;
- worker.log(&format!("Disk bytes: {} ({} %)", gc_status.disk_bytes, comp_per));
+ let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
+ crate::task_log!(
+ worker,
+ "On-Disk usage: {} ({:.2}%)",
+ HumanByte::from(gc_status.disk_bytes),
+ comp_per,
+ );
}
- worker.log(&format!("Disk chunks: {}", gc_status.disk_chunks));
+ crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
+
+ let deduplication_factor = if gc_status.disk_bytes > 0 {
+ (gc_status.index_data_bytes as f64)/(gc_status.disk_bytes as f64)
+ } else {
+ 1.0
+ };
+
+ crate::task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
if gc_status.disk_chunks > 0 {
let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
- worker.log(&format!("Average chunk size: {}", avg_chunk));
+ crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
+ }
+
+ if let Ok(serialized) = serde_json::to_string(&gc_status) {
+ let mut path = self.base_path();
+ path.push(".gc-status");
+
+ let backup_user = crate::backup::backup_user()?;
+ let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
+ // set the correct owner/group/permissions while saving file
+ // owner(rw) = backup, group(r)= backup
+ let options = CreateOptions::new()
+ .perm(mode)
+ .owner(backup_user.uid)
+ .group(backup_user.gid);
+
+ // ignore errors
+ let _ = replace_file(path, serialized.as_bytes(), options);
}
*self.last_gc_status.lock().unwrap() = gc_status;
self.chunk_store.insert_chunk(chunk, digest)
}
- pub fn verify_stored_chunk(&self, digest: &[u8; 32], expected_chunk_size: u64) -> Result<(), Error> {
- let blob = self.load_chunk(digest)?;
- blob.verify_unencrypted(expected_chunk_size as usize, digest)?;
- Ok(())
- }
-
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
let mut path = self.base_path();
path.push(backup_dir.relative_path());
DataBlob::load_from_reader(&mut file)
}).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
}
-
+
+
+ pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
+ let (chunk_path, _digest_str) = self.chunk_store.chunk_path(digest);
+ std::fs::metadata(chunk_path).map_err(Error::from)
+ }
+
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
digest_str,
err,
))
- }
-
+ }
+
+ /// Returns the filename to lock a manifest
+ ///
+ /// Also creates the basedir. The lockfile is located in
+ /// '/run/proxmox-backup/locks/{datastore}/{type}/{id}/{timestamp}.index.json.lck'
+ fn manifest_lock_path(
+ &self,
+ backup_dir: &BackupDir,
+ ) -> Result<String, Error> {
+ let mut path = format!(
+ "/run/proxmox-backup/locks/{}/{}/{}",
+ self.name(),
+ backup_dir.group().backup_type(),
+ backup_dir.group().backup_id(),
+ );
+ std::fs::create_dir_all(&path)?;
+ use std::fmt::Write;
+ write!(path, "/{}{}", backup_dir.backup_time_string(), &MANIFEST_LOCK_NAME)?;
+
+ Ok(path)
+ }
+
+ fn lock_manifest(
+ &self,
+ backup_dir: &BackupDir,
+ ) -> Result<File, Error> {
+ let path = self.manifest_lock_path(backup_dir)?;
+
+ // update_manifest should never take a long time, so if someone else has
+ // the lock we can simply block a bit and should get it soon
+ open_file_locked(&path, Duration::from_secs(5), true)
+ .map_err(|err| {
+ format_err!(
+ "unable to acquire manifest lock {:?} - {}", &path, err
+ )
+ })
+ }
+
+ /// Load the manifest without a lock. Must not be written back.
pub fn load_manifest(
&self,
backup_dir: &BackupDir,
- ) -> Result<(BackupManifest, CryptMode, u64), Error> {
+ ) -> Result<(BackupManifest, u64), Error> {
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
let raw_size = blob.raw_size();
- let crypt_mode = blob.crypt_mode()?;
let manifest = BackupManifest::try_from(blob)?;
- Ok((manifest, crypt_mode, raw_size))
+ Ok((manifest, raw_size))
+ }
+
+ /// Update the manifest of the specified snapshot. Never write a manifest directly,
+ /// only use this method - anything else may break locking guarantees.
+ pub fn update_manifest(
+ &self,
+ backup_dir: &BackupDir,
+ update_fn: impl FnOnce(&mut BackupManifest),
+ ) -> Result<(), Error> {
+
+ let _guard = self.lock_manifest(backup_dir)?;
+ let (mut manifest, _) = self.load_manifest(&backup_dir)?;
+
+ update_fn(&mut manifest);
+
+ let manifest = serde_json::to_value(manifest)?;
+ let manifest = serde_json::to_string_pretty(&manifest)?;
+ let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
+ let raw_data = blob.raw_data();
+
+ let mut path = self.base_path();
+ path.push(backup_dir.relative_path());
+ path.push(MANIFEST_BLOB_NAME);
+
+ // atomic replace invalidates flock - no other writes past this point!
+ replace_file(&path, raw_data, CreateOptions::new())?;
+
+ Ok(())
+ }
+
+ pub fn verify_new(&self) -> bool {
+ self.verify_new
+ }
+
+ /// returns a list of chunks sorted by their inode number on disk
+ /// chunks that could not be stat'ed are at the end of the list
+ pub fn get_chunks_in_order<F, A>(
+ &self,
+ index: &Box<dyn IndexFile + Send>,
+ skip_chunk: F,
+ check_abort: A,
+ ) -> Result<Vec<(usize, u64)>, Error>
+ where
+ F: Fn(&[u8; 32]) -> bool,
+ A: Fn(usize) -> Result<(), Error>,
+ {
+ let index_count = index.index_count();
+ let mut chunk_list = Vec::with_capacity(index_count);
+ use std::os::unix::fs::MetadataExt;
+ for pos in 0..index_count {
+ check_abort(pos)?;
+
+ let info = index.chunk_info(pos).unwrap();
+
+ if skip_chunk(&info.digest) {
+ continue;
+ }
+
+ let ino = match self.stat_chunk(&info.digest) {
+ Err(_) => u64::MAX, // could not stat, move to end of list
+ Ok(metadata) => metadata.ino(),
+ };
+
+ chunk_list.push((pos, ino));
+ }
+
+ // sorting by inode improves data locality, which makes it lots faster on spinners
+ chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(&ino_b));
+
+ Ok(chunk_list)
}
}