]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/backup/datastore.rs
more clippy fixups
[proxmox-backup.git] / src / backup / datastore.rs
index d2ef1e3da5136e95ce9bc1478337489a970a2ed8..63b07f303ff62a0c11f5ed70b4557a97606d3e5c 100644 (file)
-use failure::*;
+use std::collections::{HashSet, HashMap};
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+use std::sync::{Arc, Mutex};
+use std::convert::TryFrom;
 
-use chrono::prelude::*;
-
-use std::path::{PathBuf, Path};
-use std::collections::HashMap;
+use anyhow::{bail, format_err, Error};
 use lazy_static::lazy_static;
-use std::sync::{Mutex, Arc};
+use serde_json::Value;
 
-use std::os::unix::io::AsRawFd;
+use proxmox::tools::fs::{replace_file, CreateOptions};
 
-use crate::tools;
+use super::backup_info::{BackupGroup, BackupDir};
+use super::chunk_store::ChunkStore;
+use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
+use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
+use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest};
+use super::index::*;
+use super::{DataBlob, ArchiveType, archive_type};
 use crate::config::datastore;
-use super::chunk_store::*;
-use super::image_index::*;
-use super::archive_index::*;
+use crate::task::TaskState;
+use crate::tools;
+use crate::tools::format::HumanByte;
+use crate::tools::fs::{lock_dir_noblock, DirLockGuard};
+use crate::api2::types::{GarbageCollectionStatus, Userid};
+use crate::server::UPID;
 
-use chrono::{Utc, TimeZone};
+lazy_static! {
+    static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
+}
 
+/// Datastore Management
+///
+/// A Datastore can store severals backups, and provides the
+/// management interface for backup.
 pub struct DataStore {
     chunk_store: Arc<ChunkStore>,
     gc_mutex: Mutex<bool>,
-}
-
-#[derive(Debug)]
-pub struct BackupInfo {
-    pub backup_type: String,
-    pub backup_id: String,
-    pub backup_time: DateTime<Utc>,
-}
-
-lazy_static!{
-    static ref datastore_map: Mutex<HashMap<String, Arc<DataStore>>> =  Mutex::new(HashMap::new());
+    last_gc_status: Mutex<GarbageCollectionStatus>,
 }
 
 impl DataStore {
 
     pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> {
 
-        let config = datastore::config()?;
-        let (_, store_config) = config.sections.get(name)
-            .ok_or(format_err!("no such datastore '{}'", name))?;
+        let (config, _digest) = datastore::config()?;
+        let config: datastore::DataStoreConfig = config.lookup("datastore", name)?;
 
-        let path = store_config["path"].as_str().unwrap();
-
-        let mut map = datastore_map.lock().unwrap();
+        let mut map = DATASTORE_MAP.lock().unwrap();
 
         if let Some(datastore) = map.get(name) {
             // Compare Config - if changed, create new Datastore object!
-            if datastore.chunk_store.base == PathBuf::from(path) {
+            if datastore.chunk_store.base == PathBuf::from(&config.path) {
                 return Ok(datastore.clone());
             }
         }
 
-        if let Ok(datastore) = DataStore::open(name)  {
-            let datastore = Arc::new(datastore);
-            map.insert(name.to_string(), datastore.clone());
-            return Ok(datastore);
-        }
+        let datastore = DataStore::open(name)?;
+
+        let datastore = Arc::new(datastore);
+        map.insert(name.to_string(), datastore.clone());
 
-        bail!("store not found");
+        Ok(datastore)
     }
 
     pub fn open(store_name: &str) -> Result<Self, Error> {
 
-        let config = datastore::config()?;
+        let (config, _digest) = datastore::config()?;
         let (_, store_config) = config.sections.get(store_name)
             .ok_or(format_err!("no such datastore '{}'", store_name))?;
 
         let path = store_config["path"].as_str().unwrap();
 
+        Self::open_with_path(store_name, Path::new(path))
+    }
+
+    pub fn open_with_path(store_name: &str, path: &Path) -> Result<Self, Error> {
         let chunk_store = ChunkStore::open(store_name, path)?;
 
+        let gc_status = GarbageCollectionStatus::default();
+
         Ok(Self {
             chunk_store: Arc::new(chunk_store),
             gc_mutex: Mutex::new(false),
+            last_gc_status: Mutex::new(gc_status),
         })
     }
 
-    pub fn create_image_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<ImageIndexWriter, Error> {
+    pub fn get_chunk_iterator(
+        &self,
+    ) -> Result<
+        impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)>,
+        Error
+    > {
+        self.chunk_store.get_chunk_iterator()
+    }
+
+    pub fn create_fixed_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<FixedIndexWriter, Error> {
 
-        let index = ImageIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
+        let index = FixedIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?;
 
         Ok(index)
     }
 
-    pub fn open_image_reader<P: AsRef<Path>>(&self, filename: P) -> Result<ImageIndexReader, Error> {
+    pub fn open_fixed_reader<P: AsRef<Path>>(&self, filename: P) -> Result<FixedIndexReader, Error> {
 
-        let index = ImageIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
+        let full_path =  self.chunk_store.relative_path(filename.as_ref());
+
+        let index = FixedIndexReader::open(&full_path)?;
 
         Ok(index)
     }
 
-    pub fn create_archive_writer<P: AsRef<Path>>(
+    pub fn create_dynamic_writer<P: AsRef<Path>>(
         &self, filename: P,
-        chunk_size: usize
-    ) -> Result<ArchiveIndexWriter, Error> {
+    ) -> Result<DynamicIndexWriter, Error> {
 
-        let index = ArchiveIndexWriter::create(
-            self.chunk_store.clone(), filename.as_ref(), chunk_size)?;
+        let index = DynamicIndexWriter::create(
+            self.chunk_store.clone(), filename.as_ref())?;
 
         Ok(index)
     }
 
-    pub fn open_archive_reader<P: AsRef<Path>>(&self, filename: P) -> Result<ArchiveIndexReader, Error> {
+    pub fn open_dynamic_reader<P: AsRef<Path>>(&self, filename: P) -> Result<DynamicIndexReader, Error> {
+
+        let full_path =  self.chunk_store.relative_path(filename.as_ref());
 
-        let index = ArchiveIndexReader::open(self.chunk_store.clone(), filename.as_ref())?;
+        let index = DynamicIndexReader::open(&full_path)?;
 
         Ok(index)
     }
 
+    pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
+    where
+        P: AsRef<Path>,
+    {
+        let filename = filename.as_ref();
+        let out: Box<dyn IndexFile + Send> =
+            match archive_type(filename)? {
+                ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?),
+                ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?),
+                _ => bail!("cannot open index file of unknown type: {:?}", filename),
+            };
+        Ok(out)
+    }
+
+    pub fn name(&self) -> &str {
+        self.chunk_store.name()
+    }
+
     pub fn base_path(&self) -> PathBuf {
         self.chunk_store.base_path()
     }
 
-    pub fn get_backup_dir(
-        &self,
-        backup_type: &str,
-        backup_id: &str,
-        backup_time: DateTime<Utc>,
-    ) ->  PathBuf  {
+    /// Cleanup a backup directory
+    ///
+    /// Removes all files not mentioned in the manifest.
+    pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest
+    ) ->  Result<(), Error> {
+
+        let mut full_path = self.base_path();
+        full_path.push(backup_dir.relative_path());
 
-        let mut relative_path = PathBuf::new();
+        let mut wanted_files = HashSet::new();
+        wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
+        wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
+        manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); });
 
-        relative_path.push(backup_type);
+        for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? {
+            if let Ok(item) = item {
+                if let Some(file_type) = item.file_type() {
+                    if file_type != nix::dir::Type::File { continue; }
+                }
+                let file_name = item.file_name().to_bytes();
+                if file_name == b"." || file_name == b".." { continue; };
 
-        relative_path.push(backup_id);
+                if let Ok(name) = std::str::from_utf8(file_name) {
+                    if wanted_files.contains(name) { continue; }
+                }
+                println!("remove unused file {:?}", item.file_name());
+                let dirfd = item.parent_fd();
+                let _res = unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) };
+            }
+        }
 
-        let date_str = backup_time.format("%Y-%m-%dT%H:%M:%S").to_string();
+        Ok(())
+    }
 
-        relative_path.push(&date_str);
+    /// Returns the absolute path for a backup_group
+    pub fn group_path(&self, backup_group: &BackupGroup) -> PathBuf {
+        let mut full_path = self.base_path();
+        full_path.push(backup_group.group_path());
+        full_path
+    }
 
-        relative_path
+    /// Returns the absolute path for backup_dir
+    pub fn snapshot_path(&self, backup_dir: &BackupDir) -> PathBuf {
+        let mut full_path = self.base_path();
+        full_path.push(backup_dir.relative_path());
+        full_path
     }
 
-    pub fn create_backup_dir(
-        &self,
-        backup_type: &str,
-        backup_id: &str,
-        backup_time: i64,
-    ) ->  Result<PathBuf, Error> {
-        let mut relative_path = PathBuf::new();
+    /// Remove a complete backup group including all snapshots
+    pub fn remove_backup_group(&self, backup_group: &BackupGroup) ->  Result<(), Error> {
 
-        relative_path.push(backup_type);
+        let full_path = self.group_path(backup_group);
 
-        relative_path.push(backup_id);
+        let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?;
 
-        let dt = Utc.timestamp(backup_time, 0);
-        let date_str = dt.format("%Y-%m-%dT%H:%M:%S").to_string();
+        log::info!("removing backup group {:?}", full_path);
+        std::fs::remove_dir_all(&full_path)
+            .map_err(|err| {
+                format_err!(
+                    "removing backup group {:?} failed - {}",
+                    full_path,
+                    err,
+                )
+            })?;
 
-        println!("date: {}", date_str);
+        Ok(())
+    }
 
-        relative_path.push(&date_str);
+    /// Remove a backup directory including all content
+    pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) ->  Result<(), Error> {
 
+        let full_path = self.snapshot_path(backup_dir);
 
-        let mut full_path = self.base_path();
-        full_path.push(&relative_path);
+        let _guard;
+        if !force {
+            _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?;
+        }
 
-        std::fs::create_dir_all(&full_path)?;
+        log::info!("removing backup snapshot {:?}", full_path);
+        std::fs::remove_dir_all(&full_path)
+            .map_err(|err| {
+                format_err!(
+                    "removing backup snapshot {:?} failed - {}",
+                    full_path,
+                    err,
+                )
+            })?;
 
-        Ok(relative_path)
+        Ok(())
     }
 
-    pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> {
-        let path = self.base_path();
+    /// Returns the time of the last successful backup
+    ///
+    /// Or None if there is no backup in the group (or the group dir does not exist).
+    pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
+        let base_path = self.base_path();
+        let mut group_path = base_path.clone();
+        group_path.push(backup_group.group_path());
 
-        let mut list = vec![];
+        if group_path.exists() {
+            backup_group.last_successful_backup(&base_path)
+        } else {
+            Ok(None)
+        }
+    }
+
+    /// Returns the backup owner.
+    ///
+    /// The backup owner is the user who first created the backup group.
+    pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> {
+        let mut full_path = self.base_path();
+        full_path.push(backup_group.group_path());
+        full_path.push("owner");
+        let owner = proxmox::tools::fs::file_read_firstline(full_path)?;
+        Ok(owner.trim_end().parse()?) // remove trailing newline
+    }
 
-        lazy_static! {
-            static ref BACKUP_TYPE_REGEX: regex::Regex = regex::Regex::new(r"^(host|vm|ct)$").unwrap();
-            static ref BACKUP_ID_REGEX: regex::Regex = regex::Regex::new(r"^[A-Za-z][A-Za-z0-9_-]+$").unwrap();
-            static ref BACKUP_DATE_REGEX: regex::Regex = regex::Regex::new(
-                r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}$").unwrap();
+    /// Set the backup owner.
+    pub fn set_owner(
+        &self,
+        backup_group: &BackupGroup,
+        userid: &Userid,
+        force: bool,
+    ) -> Result<(), Error> {
+        let mut path = self.base_path();
+        path.push(backup_group.group_path());
+        path.push("owner");
+
+        let mut open_options = std::fs::OpenOptions::new();
+        open_options.write(true);
+        open_options.truncate(true);
+
+        if force {
+            open_options.create(true);
+        } else {
+            open_options.create_new(true);
         }
 
-        tools::scandir(libc::AT_FDCWD, &path, &BACKUP_TYPE_REGEX, |l0_fd, backup_type, file_type| {
-            if file_type != nix::dir::Type::Directory { return Ok(()); }
-            tools::scandir(l0_fd, backup_type, &BACKUP_ID_REGEX, |l1_fd, backup_id, file_type| {
-                if file_type != nix::dir::Type::Directory { return Ok(()); }
-                tools::scandir(l1_fd, backup_id, &BACKUP_DATE_REGEX, |_, backup_time, file_type| {
-                    if file_type != nix::dir::Type::Directory { return Ok(()); }
+        let mut file = open_options.open(&path)
+            .map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
 
-                    let dt = Utc.datetime_from_str(backup_time, "%Y-%m-%dT%H:%M:%S")?;
+        writeln!(file, "{}", userid)
+            .map_err(|err| format_err!("unable to write owner file  {:?} - {}", path, err))?;
 
-                    list.push(BackupInfo {
-                        backup_type: backup_type.to_owned(),
-                        backup_id: backup_id.to_owned(),
-                        backup_time: dt,
-                    });
+        Ok(())
+    }
 
-                    Ok(())
-                })
-            })
-        })?;
+    /// Create (if it does not already exists) and lock a backup group
+    ///
+    /// And set the owner to 'userid'. If the group already exists, it returns the
+    /// current owner (instead of setting the owner).
+    ///
+    /// This also acquires an exclusive lock on the directory and returns the lock guard.
+    pub fn create_locked_backup_group(
+        &self,
+        backup_group: &BackupGroup,
+        userid: &Userid,
+    ) -> Result<(Userid, DirLockGuard), Error> {
+        // create intermediate path first:
+        let base_path = self.base_path();
+
+        let mut full_path = base_path.clone();
+        full_path.push(backup_group.backup_type());
+        std::fs::create_dir_all(&full_path)?;
 
-        Ok(list)
+        full_path.push(backup_group.backup_id());
+
+        // create the last component now
+        match std::fs::create_dir(&full_path) {
+            Ok(_) => {
+                let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
+                self.set_owner(backup_group, userid, false)?;
+                let owner = self.get_owner(backup_group)?; // just to be sure
+                Ok((owner, guard))
+            }
+            Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
+                let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?;
+                let owner = self.get_owner(backup_group)?; // just to be sure
+                Ok((owner, guard))
+            }
+            Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
+        }
+    }
+
+    /// Creates a new backup snapshot inside a BackupGroup
+    ///
+    /// The BackupGroup directory needs to exist.
+    pub fn create_locked_backup_dir(&self, backup_dir: &BackupDir)
+        -> Result<(PathBuf, bool, DirLockGuard), Error>
+    {
+        let relative_path = backup_dir.relative_path();
+        let mut full_path = self.base_path();
+        full_path.push(&relative_path);
+
+        let lock = ||
+            lock_dir_noblock(&full_path, "snapshot", "internal error - tried creating snapshot that's already in use");
+
+        match std::fs::create_dir(&full_path) {
+            Ok(_) => Ok((relative_path, true, lock()?)),
+            Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false, lock()?)),
+            Err(e) => Err(e.into())
+        }
     }
 
     pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
@@ -212,13 +372,32 @@ impl DataStore {
                 .map(|s| s.starts_with("."))
                 .unwrap_or(false)
         }
-
+        let handle_entry_err = |err: walkdir::Error| {
+            if let Some(inner) = err.io_error() {
+                let path = err.path().unwrap_or(Path::new(""));
+                match inner.kind() {
+                    io::ErrorKind::PermissionDenied => {
+                        // only allow to skip ext4 fsck directory, avoid GC if, for example,
+                        // a user got file permissions wrong on datastore rsync to new server
+                        if err.depth() > 1 || !path.ends_with("lost+found") {
+                            bail!("cannot continue garbage-collection safely, permission denied on: {}", path.display())
+                        }
+                    },
+                    _ => bail!("unexpected error on datastore traversal: {} - {}", inner, path.display()),
+                }
+            }
+            Ok(())
+        };
         for entry in walker.filter_entry(|e| !is_hidden(e)) {
-            let path = entry?.into_path();
-            if let Some(ext) = path.extension() {
-                if ext == "iidx" {
-                    list.push(path);
-                } else if ext == "aidx" {
+            let path = match entry {
+                Ok(entry) => entry.into_path(),
+                Err(err) => {
+                    handle_entry_err(err)?;
+                    continue
+                },
+            };
+            if let Ok(archive_type) = archive_type(&path) {
+                if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex {
                     list.push(path);
                 }
             }
@@ -227,48 +406,249 @@ impl DataStore {
         Ok(list)
     }
 
-    fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
+    // mark chunks  used by ``index`` as used
+    fn index_mark_used_chunks<I: IndexFile>(
+        &self,
+        index: I,
+        file_name: &Path, // only used for error reporting
+        status: &mut GarbageCollectionStatus,
+        worker: &dyn TaskState,
+    ) -> Result<(), Error> {
+
+        status.index_file_count += 1;
+        status.index_data_bytes += index.index_bytes();
+
+        for pos in 0..index.index_count() {
+            worker.check_abort()?;
+            tools::fail_on_shutdown()?;
+            let digest = index.index_digest(pos).unwrap();
+            if let Err(err) = self.chunk_store.touch_chunk(digest) {
+                crate::task_warn!(
+                    worker,
+                    "warning: unable to access chunk {}, required by {:?} - {}",
+                    proxmox::tools::digest_to_hex(digest),
+                    file_name,
+                    err,
+                );
+            }
+        }
+        Ok(())
+    }
+
+    fn mark_used_chunks(
+        &self,
+        status: &mut GarbageCollectionStatus,
+        worker: &dyn TaskState,
+    ) -> Result<(), Error> {
 
         let image_list = self.list_images()?;
 
+        let image_count = image_list.len();
+
+        let mut done = 0;
+
+        let mut last_percentage: usize = 0;
+
         for path in image_list {
-            if let Some(ext) = path.extension() {
-                if ext == "iidx" {
-                    let index = self.open_image_reader(&path)?;
-                    index.mark_used_chunks(status)?;
-                } else if ext == "aidx" {
-                    let index = self.open_archive_reader(&path)?;
-                    index.mark_used_chunks(status)?;
+
+            worker.check_abort()?;
+            tools::fail_on_shutdown()?;
+
+            if let Ok(archive_type) = archive_type(&path) {
+                if archive_type == ArchiveType::FixedIndex {
+                    let index = self.open_fixed_reader(&path)?;
+                    self.index_mark_used_chunks(index, &path, status, worker)?;
+                } else if archive_type == ArchiveType::DynamicIndex {
+                    let index = self.open_dynamic_reader(&path)?;
+                    self.index_mark_used_chunks(index, &path, status, worker)?;
                 }
             }
+            done += 1;
+
+            let percentage = done*100/image_count;
+            if percentage > last_percentage {
+                crate::task_log!(
+                    worker,
+                    "percentage done: phase1 {}% ({} of {} index files)",
+                    percentage,
+                    done,
+                    image_count,
+                );
+                last_percentage = percentage;
+            }
         }
 
         Ok(())
-   }
+    }
+
+    pub fn last_gc_status(&self) -> GarbageCollectionStatus {
+        self.last_gc_status.lock().unwrap().clone()
+    }
 
-    pub fn garbage_collection(&self) -> Result<(), Error> {
+    pub fn garbage_collection_running(&self) -> bool {
+        if let Ok(_) = self.gc_mutex.try_lock() { false } else { true }
+    }
+
+    pub fn garbage_collection(&self, worker: &dyn TaskState, upid: &UPID) -> Result<(), Error> {
 
         if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() {
 
+            // avoids that we run GC if an old daemon process has still a
+            // running backup writer, which is not save as we have no "oldest
+            // writer" information and thus no safe atime cutoff
+            let _exclusive_lock =  self.chunk_store.try_exclusive_lock()?;
+
+            let phase1_start_time = proxmox::tools::time::epoch_i64();
+            let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time);
+
             let mut gc_status = GarbageCollectionStatus::default();
-            gc_status.used_bytes = 0;
+            gc_status.upid = Some(upid.to_string());
+
+            crate::task_log!(worker, "Start GC phase1 (mark used chunks)");
+
+            self.mark_used_chunks(&mut gc_status, worker)?;
+
+            crate::task_log!(worker, "Start GC phase2 (sweep unused chunks)");
+            self.chunk_store.sweep_unused_chunks(
+                oldest_writer,
+                phase1_start_time,
+                &mut gc_status,
+                worker,
+            )?;
+
+            crate::task_log!(
+                worker,
+                "Removed garbage: {}",
+                HumanByte::from(gc_status.removed_bytes),
+            );
+            crate::task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
+            if gc_status.pending_bytes > 0 {
+                crate::task_log!(
+                    worker,
+                    "Pending removals: {} (in {} chunks)",
+                    HumanByte::from(gc_status.pending_bytes),
+                    gc_status.pending_chunks,
+                );
+            }
+            if gc_status.removed_bad > 0 {
+                crate::task_log!(worker, "Removed bad files: {}", gc_status.removed_bad);
+            }
 
-            println!("Start GC phase1 (mark chunks)");
+            crate::task_log!(
+                worker,
+                "Original data usage: {}",
+                HumanByte::from(gc_status.index_data_bytes),
+            );
+
+            if gc_status.index_data_bytes > 0 {
+                let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64;
+                crate::task_log!(
+                    worker,
+                    "On-Disk usage: {} ({:.2}%)",
+                    HumanByte::from(gc_status.disk_bytes),
+                    comp_per,
+                );
+            }
 
-            self.mark_used_chunks(&mut gc_status)?;
+            crate::task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
 
-            println!("Start GC phase2 (sweep unused chunks)");
-            self.chunk_store.sweep_unused_chunks(&mut gc_status)?;
+            if gc_status.disk_chunks > 0 {
+                let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64);
+                crate::task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
+            }
 
-            println!("Used bytes: {}", gc_status.used_bytes);
-            println!("Used chunks: {}", gc_status.used_chunks);
-            println!("Disk bytes: {}", gc_status.disk_bytes);
-            println!("Disk chunks: {}", gc_status.disk_chunks);
+            *self.last_gc_status.lock().unwrap() = gc_status;
 
         } else {
-            println!("Start GC failed - (already running/locked)");
+            bail!("Start GC failed - (already running/locked)");
         }
 
         Ok(())
     }
+
+    pub fn try_shared_chunk_store_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> {
+        self.chunk_store.try_shared_lock()
+    }
+
+    pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) {
+        self.chunk_store.chunk_path(digest)
+    }
+
+    pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> {
+        self.chunk_store.cond_touch_chunk(digest, fail_if_not_exist)
+    }
+
+    pub fn insert_chunk(
+        &self,
+        chunk: &DataBlob,
+        digest: &[u8; 32],
+    ) -> Result<(bool, u64), Error> {
+        self.chunk_store.insert_chunk(chunk, digest)
+    }
+
+    pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
+        let mut path = self.base_path();
+        path.push(backup_dir.relative_path());
+        path.push(filename);
+
+        proxmox::try_block!({
+            let mut file = std::fs::File::open(&path)?;
+            DataBlob::load_from_reader(&mut file)
+        }).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
+    }
+
+
+    pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
+
+        let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
+
+        proxmox::try_block!({
+            let mut file = std::fs::File::open(&chunk_path)?;
+            DataBlob::load_from_reader(&mut file)
+        }).map_err(|err| format_err!(
+            "store '{}', unable to load chunk '{}' - {}",
+            self.name(),
+            digest_str,
+            err,
+        ))
+     }
+
+    pub fn load_manifest(
+        &self,
+        backup_dir: &BackupDir,
+    ) -> Result<(BackupManifest, u64), Error> {
+        let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
+        let raw_size = blob.raw_size();
+        let manifest = BackupManifest::try_from(blob)?;
+        Ok((manifest, raw_size))
+    }
+
+    pub fn load_manifest_json(
+        &self,
+        backup_dir: &BackupDir,
+    ) -> Result<Value, Error> {
+        let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
+        // no expected digest available
+        let manifest_data = blob.decode(None, None)?;
+        let manifest: Value = serde_json::from_slice(&manifest_data[..])?;
+        Ok(manifest)
+    }
+
+    pub fn store_manifest(
+        &self,
+        backup_dir: &BackupDir,
+        manifest: Value,
+    ) -> Result<(), Error> {
+        let manifest = serde_json::to_string_pretty(&manifest)?;
+        let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
+        let raw_data = blob.raw_data();
+
+        let mut path = self.base_path();
+        path.push(backup_dir.relative_path());
+        path.push(MANIFEST_BLOB_NAME);
+
+        replace_file(&path, raw_data, CreateOptions::new())?;
+
+        Ok(())
+    }
 }