]> git.proxmox.com Git - proxmox-backup.git/blobdiff - src/backup/chunk_store.rs
backup: touch all chunks, even if they exist
[proxmox-backup.git] / src / backup / chunk_store.rs
index c166bbb5a9803e2fb8680e83aaac46565ada770c..1d9de70a8387b1650f54415ec26904120730817d 100644 (file)
@@ -1,49 +1,44 @@
-use failure::*;
+use anyhow::{bail, format_err, Error};
+
 use std::path::{Path, PathBuf};
 use std::io::Write;
-use std::time::Duration;
-
-use openssl::sha;
-use std::sync::Mutex;
-
-use std::fs::File;
+use std::sync::{Arc, Mutex};
 use std::os::unix::io::AsRawFd;
 
+use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
+
 use crate::tools;
+use crate::api2::types::GarbageCollectionStatus;
 
-pub struct GarbageCollectionStatus {
-    pub used_bytes: usize,
-    pub used_chunks: usize,
-    pub disk_bytes: usize,
-    pub disk_chunks: usize,
-}
-
-impl Default for GarbageCollectionStatus {
-    fn default() -> Self {
-        GarbageCollectionStatus {
-            used_bytes: 0,
-            used_chunks: 0,
-            disk_bytes: 0,
-            disk_chunks: 0,
-        }
-    }
-}
+use super::DataBlob;
+use crate::server::WorkerTask;
 
+/// File system based chunk store
 pub struct ChunkStore {
     name: String, // used for error reporting
     pub (crate) base: PathBuf,
     chunk_dir: PathBuf,
     mutex: Mutex<bool>,
-    _lockfile: File,
+    locker: Arc<Mutex<tools::ProcessLocker>>,
 }
 
 // TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ?
 
+pub fn verify_chunk_size(size: usize) -> Result<(), Error> {
+
+    static SIZES: [usize; 7] = [64*1024, 128*1024, 256*1024, 512*1024, 1024*1024, 2048*1024, 4096*1024];
+
+    if !SIZES.contains(&size) {
+        bail!("Got unsupported chunk size '{}'", size);
+    }
+    Ok(())
+}
+
 fn digest_to_prefix(digest: &[u8]) -> PathBuf {
 
     let mut buf = Vec::<u8>::with_capacity(2+1+2+1);
 
-    const HEX_CHARS: &'static [u8; 16] = b"0123456789abcdef";
+    const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
 
     buf.push(HEX_CHARS[(digest[0] as usize) >> 4]);
     buf.push(HEX_CHARS[(digest[0] as usize) &0xf]);
@@ -56,7 +51,6 @@ fn digest_to_prefix(digest: &[u8]) -> PathBuf {
     path.into()
 }
 
-
 impl ChunkStore {
 
     fn chunk_dir<P: AsRef<Path>>(path: P) -> PathBuf {
@@ -67,7 +61,10 @@ impl ChunkStore {
         chunk_dir
     }
 
-    pub fn create<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
+    pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid) -> Result<Self, Error>
+    where
+        P: Into<PathBuf>,
+    {
 
         let base: PathBuf = path.into();
 
@@ -77,36 +74,57 @@ impl ChunkStore {
 
         let chunk_dir = Self::chunk_dir(&base);
 
-        if let Err(err) = std::fs::create_dir(&base) {
-            bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
+        let options = CreateOptions::new()
+            .owner(uid)
+            .group(gid);
+
+        let default_options = CreateOptions::new();
+
+        match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
+            Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
+            Ok(res) => if ! res  { nix::unistd::chown(&base, Some(uid), Some(gid))? },
         }
 
-        if let Err(err) = std::fs::create_dir(&chunk_dir) {
+        if let Err(err) = create_dir(&chunk_dir, options.clone()) {
             bail!("unable to create chunk store '{}' subdir {:?} - {}", name, chunk_dir, err);
         }
 
+        // create lock file with correct owner/group
+        let lockfile_path = Self::lockfile_path(&base);
+        proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone())?;
+
         // create 64*1024 subdirs
         let mut last_percentage = 0;
 
         for i in 0..64*1024 {
             let mut l1path = chunk_dir.clone();
             l1path.push(format!("{:04x}", i));
-            if let Err(err) = std::fs::create_dir(&l1path) {
+            if let Err(err) = create_dir(&l1path, options.clone()) {
                 bail!("unable to create chunk store '{}' subdir {:?} - {}", name, l1path, err);
             }
             let percentage = (i*100)/(64*1024);
             if percentage != last_percentage {
-                eprintln!("Percentage done: {}", percentage);
+                eprintln!("{}%", percentage);
                 last_percentage = percentage;
             }
         }
 
+
         Self::open(name, base)
     }
 
-    pub fn open<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
+    fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
+        let base: PathBuf = base.into();
 
-        let base: PathBuf = path.into();
+        let mut lockfile_path = base.clone();
+        lockfile_path.push(".lock");
+
+        lockfile_path
+    }
+
+    pub fn open<P: Into<PathBuf>>(name: &str, base: P) -> Result<Self, Error> {
+
+        let base: PathBuf = base.into();
 
         if !base.is_absolute() {
             bail!("expected absolute path - got {:?}", base);
@@ -118,32 +136,30 @@ impl ChunkStore {
             bail!("unable to open chunk store '{}' at {:?} - {}", name, chunk_dir, err);
         }
 
-        let mut lockfile_path = base.clone();
-        lockfile_path.push(".lock");
+        let lockfile_path = Self::lockfile_path(&base);
 
-        // make sure only one process/thread/task can use it
-        let lockfile = tools::open_file_locked(
-            lockfile_path, Duration::from_secs(10))?;
+        let locker = tools::ProcessLocker::new(&lockfile_path)?;
 
         Ok(ChunkStore {
             name: name.to_owned(),
             base,
             chunk_dir,
-            _lockfile: lockfile,
+            locker,
             mutex: Mutex::new(false)
         })
     }
 
-    pub fn touch_chunk(&self, digest:&[u8]) -> Result<(), Error> {
+    pub fn touch_chunk(&self, digest: &[u8; 32]) -> Result<(), Error> {
+        self.cond_touch_chunk(digest, true)?;
+        Ok(())
+    }
+
+    pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> {
 
-        let mut chunk_path = self.chunk_dir.clone();
-        let prefix = digest_to_prefix(&digest);
-        chunk_path.push(&prefix);
-        let digest_str = tools::digest_to_hex(&digest);
-        chunk_path.push(&digest_str);
+        let (chunk_path, _digest_str) = self.chunk_path(digest);
 
-        const UTIME_NOW: i64 = ((1 << 30) - 1);
-        const UTIME_OMIT: i64 = ((1 << 30) - 2);
+        const UTIME_NOW: i64 = (1 << 30) - 1;
+        const UTIME_OMIT: i64 = (1 << 30) - 2;
 
         let times: [libc::timespec; 2] = [
             libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
@@ -153,141 +169,253 @@ impl ChunkStore {
         use nix::NixPath;
 
         let res = chunk_path.with_nix_path(|cstr| unsafe {
-            libc::utimensat(-1, cstr.as_ptr(), &times[0], libc::AT_SYMLINK_NOFOLLOW)
+            let tmp = libc::utimensat(-1, cstr.as_ptr(), &times[0], libc::AT_SYMLINK_NOFOLLOW);
+            nix::errno::Errno::result(tmp)
         })?;
 
-        if let Err(err) = nix::errno::Errno::result(res) {
-            bail!("updata atime failed for chunk {:?} - {}", chunk_path, err);
+        if let Err(err) = res {
+            if !fail_if_not_exist && err.as_errno() == Some(nix::errno::Errno::ENOENT) {
+                return Ok(false);
+            }
+
+            bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
         }
 
-        Ok(())
+        Ok(true)
     }
 
-    pub fn read_chunk(&self, digest:&[u8], buffer: &mut Vec<u8>) -> Result<(), Error> {
-
-        let mut chunk_path = self.chunk_dir.clone();
-        let prefix = digest_to_prefix(&digest);
-        chunk_path.push(&prefix);
-        let digest_str = tools::digest_to_hex(&digest);
-        chunk_path.push(&digest_str);
+    pub fn get_chunk_iterator(
+        &self,
+    ) -> Result<
+        impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
+        Error
+    > {
+        use nix::dir::Dir;
+        use nix::fcntl::OFlag;
+        use nix::sys::stat::Mode;
 
-        let mut f = std::fs::File::open(&chunk_path)?;
+        let base_handle = Dir::open(&self.chunk_dir, OFlag::O_RDONLY, Mode::empty())
+            .map_err(|err| {
+                format_err!(
+                    "unable to open store '{}' chunk dir {:?} - {}",
+                    self.name,
+                    self.chunk_dir,
+                    err,
+                )
+            })?;
+
+        let mut done = false;
+        let mut inner: Option<tools::fs::ReadDir> = None;
+        let mut at = 0;
+        let mut percentage = 0;
+        Ok(std::iter::from_fn(move || {
+            if done {
+                return None;
+            }
 
-        let stat = nix::sys::stat::fstat(f.as_raw_fd())?;
-        let size = stat.st_size as usize;
+            loop {
+                if let Some(ref mut inner) = inner {
+                    match inner.next() {
+                        Some(Ok(entry)) => {
+                            // skip files if they're not a hash
+                            let bytes = entry.file_name().to_bytes();
+                            if bytes.len() != 64 && bytes.len() != 64 + ".0.bad".len() {
+                                continue;
+                            }
+                            if !bytes.iter().take(64).all(u8::is_ascii_hexdigit) {
+                                continue;
+                            }
+
+                            let bad = bytes.ends_with(".bad".as_bytes());
+                            return Some((Ok(entry), percentage, bad));
+                        }
+                        Some(Err(err)) => {
+                            // stop after first error
+                            done = true;
+                            // and pass the error through:
+                            return Some((Err(err), percentage, false));
+                        }
+                        None => (), // open next directory
+                    }
+                }
 
-        if buffer.capacity() < size {
-            let mut newsize =  buffer.capacity();
-            while newsize < size { newsize = newsize << 1; }
-            let additional = newsize - buffer.len();
-            buffer.reserve_exact(additional);
-        }
-        unsafe { buffer.set_len(size); }
+                inner = None;
 
-        use std::io::Read;
+                if at == 0x10000 {
+                    done = true;
+                    return None;
+                }
 
-        f.read_exact(buffer.as_mut_slice())?;
+                let subdir: &str = &format!("{:04x}", at);
+                percentage = (at * 100) / 0x10000;
+                at += 1;
+                match tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
+                    Ok(dir) => {
+                        inner = Some(dir);
+                        // start reading:
+                        continue;
+                    }
+                    Err(ref err) if err.as_errno() == Some(nix::errno::Errno::ENOENT) => {
+                        // non-existing directories are okay, just keep going:
+                        continue;
+                    }
+                    Err(err) => {
+                        // other errors are fatal, so end our iteration
+                        done = true;
+                        // and pass the error through:
+                        return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage, false));
+                    }
+                }
+            }
+        }).fuse())
+    }
 
-        Ok(())
+    pub fn oldest_writer(&self) -> Option<i64> {
+        tools::ProcessLocker::oldest_shared_lock(self.locker.clone())
     }
 
-    fn sweep_old_files(&self, handle: &mut nix::dir::Dir, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
+    pub fn sweep_unused_chunks(
+        &self,
+        oldest_writer: i64,
+        phase1_start_time: i64,
+        status: &mut GarbageCollectionStatus,
+        worker: &WorkerTask,
+    ) -> Result<(), Error> {
+        use nix::sys::stat::fstatat;
+        use nix::unistd::{unlinkat, UnlinkatFlags};
+
+        let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
 
-        let rawfd = handle.as_raw_fd();
+        if oldest_writer < min_atime {
+            min_atime = oldest_writer;
+        }
 
-        let now = unsafe { libc::time(std::ptr::null_mut()) };
+        min_atime -= 300; // add 5 mins gap for safety
 
-        for entry in handle.iter() {
-            let entry = match entry {
-                Ok(entry) => entry,
-                Err(_) => continue /* ignore */,
+        let mut last_percentage = 0;
+        let mut chunk_count = 0;
+
+        for (entry, percentage, bad) in self.get_chunk_iterator()? {
+            if last_percentage != percentage {
+                last_percentage = percentage;
+                worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
+            }
+
+            worker.fail_on_abort()?;
+            tools::fail_on_shutdown()?;
+
+            let (dirfd, entry) = match entry {
+                Ok(entry) => (entry.parent_fd(), entry),
+                Err(err) => bail!("chunk iterator on chunk store '{}' failed - {}", self.name, err),
             };
+
             let file_type = match entry.file_type() {
                 Some(file_type) => file_type,
                 None => bail!("unsupported file system type on chunk store '{}'", self.name),
             };
-            if file_type != nix::dir::Type::File { continue; }
+            if file_type != nix::dir::Type::File {
+                continue;
+            }
+
+            chunk_count += 1;
 
             let filename = entry.file_name();
-            if let Ok(stat) = nix::sys::stat::fstatat(rawfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
-                let age = now - stat.st_atime;
-                //println!("FOUND {}  {:?}", age/(3600*24), filename);
-                if age/(3600*24) >= 2 {
-                    println!("UNLINK {}  {:?}", age/(3600*24), filename);
-                    let res = unsafe { libc::unlinkat(rawfd, filename.as_ptr(), 0) };
-                    if res != 0 {
-                        let err = nix::Error::last();
-                        bail!("unlink chunk {:?} failed on store '{}' - {}", filename, self.name, err);
+
+            let lock = self.mutex.lock();
+
+            if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
+                if bad {
+                    match std::ffi::CString::new(&filename.to_bytes()[..64]) {
+                        Ok(orig_filename) => {
+                            match fstatat(
+                                dirfd,
+                                orig_filename.as_c_str(),
+                                nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
+                            {
+                                Ok(_) => { /* do nothing */ },
+                                Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
+                                    // chunk hasn't been rewritten yet, keep
+                                    // .bad file around for manual recovery
+                                    continue;
+                                },
+                                Err(err) => {
+                                    // some other error, warn user and keep
+                                    // .bad file around too
+                                    worker.warn(format!(
+                                        "error during stat on '{:?}' - {}",
+                                        orig_filename,
+                                        err,
+                                    ));
+                                    continue;
+                                }
+                            }
+                        },
+                        Err(err) => {
+                            worker.warn(format!(
+                                "could not get original filename from .bad file '{:?}' - {}",
+                                filename,
+                                err,
+                            ));
+                            continue;
+                        }
                     }
-                } else {
-                    status.disk_chunks += 1;
-                    status.disk_bytes += stat.st_size as usize;
 
+                    if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
+                        worker.warn(format!(
+                            "unlinking corrupt chunk {:?} failed on store '{}' - {}",
+                            filename,
+                            self.name,
+                            err,
+                        ));
+                    } else {
+                        status.removed_bad += 1;
+                        status.removed_bytes += stat.st_size as u64;
+                    }
+                } else if stat.st_atime < min_atime {
+                    //let age = now - stat.st_atime;
+                    //println!("UNLINK {}  {:?}", age/(3600*24), filename);
+                    if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
+                        bail!(
+                            "unlinking chunk {:?} failed on store '{}' - {}",
+                            filename,
+                            self.name,
+                            err,
+                        );
+                    }
+                    status.removed_chunks += 1;
+                    status.removed_bytes += stat.st_size as u64;
+                } else {
+                    if stat.st_atime < oldest_writer {
+                        status.pending_chunks += 1;
+                        status.pending_bytes += stat.st_size as u64;
+                    } else {
+                        status.disk_chunks += 1;
+                        status.disk_bytes += stat.st_size as u64;
+                     }
                 }
             }
+            drop(lock);
         }
-        Ok(())
-    }
-
-    pub fn sweep_unused_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
-
-        use nix::fcntl::OFlag;
-        use nix::sys::stat::Mode;
-        use nix::dir::Dir;
-
-        let base_handle = match Dir::open(
-            &self.chunk_dir, OFlag::O_RDONLY, Mode::empty()) {
-            Ok(h) => h,
-            Err(err) => bail!("unable to open store '{}' chunk dir {:?} - {}",
-                              self.name, self.chunk_dir, err),
-        };
 
-        let base_fd = base_handle.as_raw_fd();
-
-        let mut last_percentage = 0;
-
-        for i in 0..64*1024 {
-
-            let percentage = (i*100)/(64*1024);
-            if percentage != last_percentage {
-                eprintln!("Percentage done: {}", percentage);
-                last_percentage = percentage;
-            }
-
-            let l1name = PathBuf::from(format!("{:04x}", i));
-            match nix::dir::Dir::openat(base_fd, &l1name, OFlag::O_RDONLY, Mode::empty()) {
-                Ok(mut h) => {
-                    //println!("SCAN {:?} {:?}", l1name);
-                   self.sweep_old_files(&mut h, status)?;
-                }
-                Err(err) => bail!("unable to open store '{}' dir {:?}/{:?} - {}",
-                                  self.name, self.chunk_dir, l1name, err),
-            };
-        }
         Ok(())
     }
 
-    pub fn insert_chunk(&self, chunk: &[u8]) -> Result<(bool, [u8; 32]), Error> {
-
-        // fixme: use Sha512/256 when available
-        let mut hasher = sha::Sha256::new();
-        hasher.update(chunk);
+    pub fn insert_chunk(
+        &self,
+        chunk: &DataBlob,
+        digest: &[u8; 32],
+    ) -> Result<(bool, u64), Error> {
 
-        let digest = hasher.finish();
+        //println!("DIGEST {}", proxmox::tools::digest_to_hex(digest));
 
-        //println!("DIGEST {}", tools::digest_to_hex(&digest));
-
-        let mut chunk_path = self.chunk_dir.clone();
-        let prefix = digest_to_prefix(&digest);
-        chunk_path.push(&prefix);
-        let digest_str = tools::digest_to_hex(&digest);
-        chunk_path.push(&digest_str);
+        let (chunk_path, digest_str) = self.chunk_path(digest);
 
         let lock = self.mutex.lock();
 
         if let Ok(metadata) = std::fs::metadata(&chunk_path) {
             if metadata.is_file() {
-                 return Ok((true, digest));
+                self.touch_chunk(digest)?;
+                return Ok((true, metadata.len()));
             } else {
                 bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);
             }
@@ -295,19 +423,36 @@ impl ChunkStore {
 
         let mut tmp_path = chunk_path.clone();
         tmp_path.set_extension("tmp");
-        let mut f = std::fs::File::create(&tmp_path)?;
-        f.write_all(chunk)?;
+
+        let mut file = std::fs::File::create(&tmp_path)?;
+
+        let raw_data = chunk.raw_data();
+        let encoded_size = raw_data.len() as u64;
+
+        file.write_all(raw_data)?;
 
         if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
             if let Err(_) = std::fs::remove_file(&tmp_path)  { /* ignore */ }
-            bail!("Atomic rename on store '{}' failed for chunk {} - {}", self.name, digest_str, err);
+            bail!(
+                "Atomic rename on store '{}' failed for chunk {} - {}",
+                self.name,
+                digest_str,
+                err,
+            );
         }
 
-        //println!("PATH {:?}", chunk_path);
-
         drop(lock);
 
-        Ok((false, digest))
+        Ok((false, encoded_size))
+    }
+
+    pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) {
+        let mut chunk_path = self.chunk_dir.clone();
+        let prefix = digest_to_prefix(digest);
+        chunk_path.push(&prefix);
+        let digest_str = proxmox::tools::digest_to_hex(digest);
+        chunk_path.push(&digest_str);
+        (chunk_path, digest_str)
     }
 
     pub fn relative_path(&self, path: &Path) -> PathBuf {
@@ -317,9 +462,21 @@ impl ChunkStore {
         full_path
     }
 
+    pub fn name(&self) -> &str {
+        &self.name
+    }
+
     pub fn base_path(&self) -> PathBuf {
         self.base.clone()
     }
+
+    pub fn try_shared_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> {
+        tools::ProcessLocker::try_shared_lock(self.locker.clone())
+    }
+
+    pub fn try_exclusive_lock(&self) -> Result<tools::ProcessLockExclusiveGuard, Error> {
+        tools::ProcessLocker::try_exclusive_lock(self.locker.clone())
+    }
 }
 
 
@@ -334,15 +491,19 @@ fn test_chunk_store1() {
     let chunk_store = ChunkStore::open("test", &path);
     assert!(chunk_store.is_err());
 
-    let chunk_store = ChunkStore::create("test", &path).unwrap();
-    let (exists, _) = chunk_store.insert_chunk(&[0u8, 1u8]).unwrap();
+    let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
+    let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid).unwrap();
+
+    let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
+
+    let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
     assert!(!exists);
 
-    let (exists, _) = chunk_store.insert_chunk(&[0u8, 1u8]).unwrap();
+    let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
     assert!(exists);
 
 
-    let chunk_store = ChunkStore::create("test", &path);
+    let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid);
     assert!(chunk_store.is_err());
 
     if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }