-use failure::*;
+use anyhow::{bail, format_err, Error};
+
use std::path::{Path, PathBuf};
use std::io::Write;
-use std::time::Duration;
-
-use openssl::sha;
-use std::sync::Mutex;
-
-use std::fs::File;
+use std::sync::{Arc, Mutex};
use std::os::unix::io::AsRawFd;
+use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
+
use crate::tools;
+use crate::api2::types::GarbageCollectionStatus;
-pub struct GarbageCollectionStatus {
- pub used_bytes: usize,
- pub used_chunks: usize,
- pub disk_bytes: usize,
- pub disk_chunks: usize,
-}
-
-impl Default for GarbageCollectionStatus {
- fn default() -> Self {
- GarbageCollectionStatus {
- used_bytes: 0,
- used_chunks: 0,
- disk_bytes: 0,
- disk_chunks: 0,
- }
- }
-}
+use super::DataBlob;
+use crate::server::WorkerTask;
+/// File system based chunk store
pub struct ChunkStore {
name: String, // used for error reporting
pub (crate) base: PathBuf,
chunk_dir: PathBuf,
mutex: Mutex<bool>,
- _lockfile: File,
+ locker: Arc<Mutex<tools::ProcessLocker>>,
}
-const HEX_CHARS: &'static [u8; 16] = b"0123456789abcdef";
-
// TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ?
-pub fn digest_to_hex(digest: &[u8]) -> String {
+pub fn verify_chunk_size(size: usize) -> Result<(), Error> {
- let mut buf = Vec::<u8>::with_capacity(digest.len()*2);
+ static SIZES: [usize; 7] = [64*1024, 128*1024, 256*1024, 512*1024, 1024*1024, 2048*1024, 4096*1024];
- for i in 0..digest.len() {
- buf.push(HEX_CHARS[(digest[i] >> 4) as usize]);
- buf.push(HEX_CHARS[(digest[i] & 0xf) as usize]);
+ if !SIZES.contains(&size) {
+ bail!("Got unsupported chunk size '{}'", size);
}
-
- unsafe { String::from_utf8_unchecked(buf) }
+ Ok(())
}
fn digest_to_prefix(digest: &[u8]) -> PathBuf {
let mut buf = Vec::<u8>::with_capacity(2+1+2+1);
+ const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
+
buf.push(HEX_CHARS[(digest[0] as usize) >> 4]);
buf.push(HEX_CHARS[(digest[0] as usize) &0xf]);
- buf.push('/' as u8);
-
buf.push(HEX_CHARS[(digest[1] as usize) >> 4]);
buf.push(HEX_CHARS[(digest[1] as usize) & 0xf]);
buf.push('/' as u8);
path.into()
}
-
impl ChunkStore {
fn chunk_dir<P: AsRef<Path>>(path: P) -> PathBuf {
chunk_dir
}
- pub fn create<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
+ pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid) -> Result<Self, Error>
+ where
+ P: Into<PathBuf>,
+ {
let base: PathBuf = path.into();
+
+ if !base.is_absolute() {
+ bail!("expected absolute path - got {:?}", base);
+ }
+
let chunk_dir = Self::chunk_dir(&base);
- if let Err(err) = std::fs::create_dir(&base) {
- bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
+ let options = CreateOptions::new()
+ .owner(uid)
+ .group(gid);
+
+ let default_options = CreateOptions::new();
+
+ match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
+ Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
+ Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
}
- if let Err(err) = std::fs::create_dir(&chunk_dir) {
+ if let Err(err) = create_dir(&chunk_dir, options.clone()) {
bail!("unable to create chunk store '{}' subdir {:?} - {}", name, chunk_dir, err);
}
- // create 256*256 subdirs
+ // create lock file with correct owner/group
+ let lockfile_path = Self::lockfile_path(&base);
+ proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone())?;
+
+ // create 64*1024 subdirs
let mut last_percentage = 0;
- for i in 0..256 {
+ for i in 0..64*1024 {
let mut l1path = chunk_dir.clone();
- l1path.push(format!("{:02x}",i));
- if let Err(err) = std::fs::create_dir(&l1path) {
+ l1path.push(format!("{:04x}", i));
+ if let Err(err) = create_dir(&l1path, options.clone()) {
bail!("unable to create chunk store '{}' subdir {:?} - {}", name, l1path, err);
}
- for j in 0..256 {
- let mut l2path = l1path.clone();
- l2path.push(format!("{:02x}",j));
- if let Err(err) = std::fs::create_dir(&l2path) {
- bail!("unable to create chunk store '{}' subdir {:?} - {}", name, l2path, err);
- }
- let percentage = ((i*256+j)*100)/(256*256);
- if percentage != last_percentage {
- eprintln!("Percentage done: {}", percentage);
- last_percentage = percentage;
- }
+ let percentage = (i*100)/(64*1024);
+ if percentage != last_percentage {
+ eprintln!("{}%", percentage);
+ last_percentage = percentage;
}
}
+
Self::open(name, base)
}
- pub fn open<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
+ fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
+ let base: PathBuf = base.into();
+
+ let mut lockfile_path = base.clone();
+ lockfile_path.push(".lock");
+
+ lockfile_path
+ }
+
+ pub fn open<P: Into<PathBuf>>(name: &str, base: P) -> Result<Self, Error> {
+
+ let base: PathBuf = base.into();
+
+ if !base.is_absolute() {
+ bail!("expected absolute path - got {:?}", base);
+ }
- let base: PathBuf = path.into();
let chunk_dir = Self::chunk_dir(&base);
if let Err(err) = std::fs::metadata(&chunk_dir) {
bail!("unable to open chunk store '{}' at {:?} - {}", name, chunk_dir, err);
}
- let mut lockfile_path = base.clone();
- lockfile_path.push(".lock");
+ let lockfile_path = Self::lockfile_path(&base);
- // make sure only one process/thread/task can use it
- let lockfile = tools::open_file_locked(
- lockfile_path, Duration::from_secs(10))?;
+ let locker = tools::ProcessLocker::new(&lockfile_path)?;
Ok(ChunkStore {
name: name.to_owned(),
base,
chunk_dir,
- _lockfile: lockfile,
+ locker,
mutex: Mutex::new(false)
})
}
- pub fn touch_chunk(&self, digest:&[u8]) -> Result<(), Error> {
-
- // fixme: nix::sys::stat::utimensat
- let mut chunk_path = self.chunk_dir.clone();
- let prefix = digest_to_prefix(&digest);
- chunk_path.push(&prefix);
- let digest_str = digest_to_hex(&digest);
- chunk_path.push(&digest_str);
-
- std::fs::metadata(&chunk_path)?;
+ pub fn touch_chunk(&self, digest: &[u8; 32]) -> Result<(), Error> {
+ self.cond_touch_chunk(digest, true)?;
Ok(())
}
- fn sweep_old_files(&self, handle: &mut nix::dir::Dir, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
+ pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> {
- let rawfd = handle.as_raw_fd();
+ let (chunk_path, _digest_str) = self.chunk_path(digest);
- let now = unsafe { libc::time(std::ptr::null_mut()) };
+ const UTIME_NOW: i64 = (1 << 30) - 1;
+ const UTIME_OMIT: i64 = (1 << 30) - 2;
- for entry in handle.iter() {
- let entry = match entry {
- Ok(entry) => entry,
- Err(_) => continue /* ignore */,
- };
- let file_type = match entry.file_type() {
- Some(file_type) => file_type,
- None => bail!("unsupported file system type on chunk store '{}'", self.name),
- };
- if file_type != nix::dir::Type::File { continue; }
+ let times: [libc::timespec; 2] = [
+ libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
+ libc::timespec { tv_sec: 0, tv_nsec: UTIME_OMIT }
+ ];
- let filename = entry.file_name();
- if let Ok(stat) = nix::sys::stat::fstatat(rawfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
- let age = now - stat.st_atime;
- //println!("FOUND {} {:?}", age/(3600*24), filename);
- if age/(3600*24) >= 2 {
- println!("UNLINK {} {:?}", age/(3600*24), filename);
- let res = unsafe { libc::unlinkat(rawfd, filename.as_ptr(), 0) };
- if res != 0 {
- let err = nix::Error::last();
- bail!("unlink chunk {:?} failed on store '{}' - {}", filename, self.name, err);
- }
- } else {
- status.disk_chunks += 1;
- status.disk_bytes += stat.st_size as usize;
+ use nix::NixPath;
- }
+ let res = chunk_path.with_nix_path(|cstr| unsafe {
+ let tmp = libc::utimensat(-1, cstr.as_ptr(), ×[0], libc::AT_SYMLINK_NOFOLLOW);
+ nix::errno::Errno::result(tmp)
+ })?;
+
+ if let Err(err) = res {
+ if !fail_if_not_exist && err.as_errno() == Some(nix::errno::Errno::ENOENT) {
+ return Ok(false);
}
+
+ bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
}
- Ok(())
- }
- pub fn sweep_used_chunks(&self, status: &mut GarbageCollectionStatus) -> Result<(), Error> {
+ Ok(true)
+ }
+ pub fn get_chunk_iterator(
+ &self,
+ ) -> Result<
+ impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
+ Error
+ > {
+ use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
- use nix::dir::Dir;
- let base_handle = match Dir::open(
- &self.chunk_dir, OFlag::O_RDONLY, Mode::empty()) {
- Ok(h) => h,
- Err(err) => bail!("unable to open store '{}' chunk dir {:?} - {}",
- self.name, self.chunk_dir, err),
- };
+ let base_handle = Dir::open(&self.chunk_dir, OFlag::O_RDONLY, Mode::empty())
+ .map_err(|err| {
+ format_err!(
+ "unable to open store '{}' chunk dir {:?} - {}",
+ self.name,
+ self.chunk_dir,
+ err,
+ )
+ })?;
+
+ let mut done = false;
+ let mut inner: Option<tools::fs::ReadDir> = None;
+ let mut at = 0;
+ let mut percentage = 0;
+ Ok(std::iter::from_fn(move || {
+ if done {
+ return None;
+ }
+
+ loop {
+ if let Some(ref mut inner) = inner {
+ match inner.next() {
+ Some(Ok(entry)) => {
+ // skip files if they're not a hash
+ let bytes = entry.file_name().to_bytes();
+ if bytes.len() != 64 && bytes.len() != 64 + ".0.bad".len() {
+ continue;
+ }
+ if !bytes.iter().take(64).all(u8::is_ascii_hexdigit) {
+ continue;
+ }
+
+ let bad = bytes.ends_with(".bad".as_bytes());
+ return Some((Ok(entry), percentage, bad));
+ }
+ Some(Err(err)) => {
+ // stop after first error
+ done = true;
+ // and pass the error through:
+ return Some((Err(err), percentage, false));
+ }
+ None => (), // open next directory
+ }
+ }
+
+ inner = None;
+
+ if at == 0x10000 {
+ done = true;
+ return None;
+ }
+
+ let subdir: &str = &format!("{:04x}", at);
+ percentage = (at * 100) / 0x10000;
+ at += 1;
+ match tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
+ Ok(dir) => {
+ inner = Some(dir);
+ // start reading:
+ continue;
+ }
+ Err(ref err) if err.as_errno() == Some(nix::errno::Errno::ENOENT) => {
+ // non-existing directories are okay, just keep going:
+ continue;
+ }
+ Err(err) => {
+ // other errors are fatal, so end our iteration
+ done = true;
+ // and pass the error through:
+ return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage, false));
+ }
+ }
+ }
+ }).fuse())
+ }
+
+ pub fn oldest_writer(&self) -> Option<i64> {
+ tools::ProcessLocker::oldest_shared_lock(self.locker.clone())
+ }
+
+ pub fn sweep_unused_chunks(
+ &self,
+ oldest_writer: i64,
+ phase1_start_time: i64,
+ status: &mut GarbageCollectionStatus,
+ worker: &WorkerTask,
+ ) -> Result<(), Error> {
+ use nix::sys::stat::fstatat;
+ use nix::unistd::{unlinkat, UnlinkatFlags};
- let base_fd = base_handle.as_raw_fd();
+ let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
+
+ if oldest_writer < min_atime {
+ min_atime = oldest_writer;
+ }
+
+ min_atime -= 300; // add 5 mins gap for safety
let mut last_percentage = 0;
+ let mut chunk_count = 0;
+
+ for (entry, percentage, bad) in self.get_chunk_iterator()? {
+ if last_percentage != percentage {
+ last_percentage = percentage;
+ worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
+ }
+
+ worker.fail_on_abort()?;
+ tools::fail_on_shutdown()?;
- for i in 0..256 {
- let l1name = PathBuf::from(format!("{:02x}", i));
- let mut l1_handle = match nix::dir::Dir::openat(
- base_fd, &l1name, OFlag::O_RDONLY, Mode::empty()) {
- Ok(h) => h,
- Err(err) => bail!("unable to open store '{}' dir {:?}/{:?} - {}",
- self.name, self.chunk_dir, l1name, err),
+ let (dirfd, entry) = match entry {
+ Ok(entry) => (entry.parent_fd(), entry),
+ Err(err) => bail!("chunk iterator on chunk store '{}' failed - {}", self.name, err),
};
- let l1_fd = l1_handle.as_raw_fd();
+ let file_type = match entry.file_type() {
+ Some(file_type) => file_type,
+ None => bail!("unsupported file system type on chunk store '{}'", self.name),
+ };
+ if file_type != nix::dir::Type::File {
+ continue;
+ }
+
+ chunk_count += 1;
- for j in 0..256 {
- let l2name = PathBuf::from(format!("{:02x}", j));
+ let filename = entry.file_name();
+
+ let lock = self.mutex.lock();
+
+ if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
+ if bad {
+ match std::ffi::CString::new(&filename.to_bytes()[..64]) {
+ Ok(orig_filename) => {
+ match fstatat(
+ dirfd,
+ orig_filename.as_c_str(),
+ nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
+ {
+ Ok(_) => { /* do nothing */ },
+ Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
+ // chunk hasn't been rewritten yet, keep
+ // .bad file around for manual recovery
+ continue;
+ },
+ Err(err) => {
+ // some other error, warn user and keep
+ // .bad file around too
+ worker.warn(format!(
+ "error during stat on '{:?}' - {}",
+ orig_filename,
+ err,
+ ));
+ continue;
+ }
+ }
+ },
+ Err(err) => {
+ worker.warn(format!(
+ "could not get original filename from .bad file '{:?}' - {}",
+ filename,
+ err,
+ ));
+ continue;
+ }
+ }
- let percentage = ((i*256+j)*100)/(256*256);
- if percentage != last_percentage {
- eprintln!("Percentage done: {}", percentage);
- last_percentage = percentage;
+ if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
+ worker.warn(format!(
+ "unlinking corrupt chunk {:?} failed on store '{}' - {}",
+ filename,
+ self.name,
+ err,
+ ));
+ } else {
+ status.removed_bad += 1;
+ status.removed_bytes += stat.st_size as u64;
+ }
+ } else if stat.st_atime < min_atime {
+ //let age = now - stat.st_atime;
+ //println!("UNLINK {} {:?}", age/(3600*24), filename);
+ if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
+ bail!(
+ "unlinking chunk {:?} failed on store '{}' - {}",
+ filename,
+ self.name,
+ err,
+ );
+ }
+ status.removed_chunks += 1;
+ status.removed_bytes += stat.st_size as u64;
+ } else {
+ if stat.st_atime < oldest_writer {
+ status.pending_chunks += 1;
+ status.pending_bytes += stat.st_size as u64;
+ } else {
+ status.disk_chunks += 1;
+ status.disk_bytes += stat.st_size as u64;
+ }
}
- //println!("SCAN {:?} {:?}", l1name, l2name);
-
- let mut l2_handle = match Dir::openat(
- l1_fd, &l2name, OFlag::O_RDONLY, Mode::empty()) {
- Ok(h) => h,
- Err(err) => bail!(
- "unable to open store '{}' dir {:?}/{:?}/{:?} - {}",
- self.name, self.chunk_dir, l1name, l2name, err),
- };
- self.sweep_old_files(&mut l2_handle, status)?;
}
+ drop(lock);
}
+
Ok(())
}
- pub fn insert_chunk(&self, chunk: &[u8]) -> Result<(bool, [u8; 32]), Error> {
+ pub fn insert_chunk(
+ &self,
+ chunk: &DataBlob,
+ digest: &[u8; 32],
+ ) -> Result<(bool, u64), Error> {
- // fixme: use Sha512/256 when available
- let mut hasher = sha::Sha256::new();
- hasher.update(chunk);
+ //println!("DIGEST {}", proxmox::tools::digest_to_hex(digest));
- let digest = hasher.finish();
-
- //println!("DIGEST {}", digest_to_hex(&digest));
-
- let mut chunk_path = self.chunk_dir.clone();
- let prefix = digest_to_prefix(&digest);
- chunk_path.push(&prefix);
- let digest_str = digest_to_hex(&digest);
- chunk_path.push(&digest_str);
+ let (chunk_path, digest_str) = self.chunk_path(digest);
let lock = self.mutex.lock();
if let Ok(metadata) = std::fs::metadata(&chunk_path) {
if metadata.is_file() {
- return Ok((true, digest));
+ self.touch_chunk(digest)?;
+ return Ok((true, metadata.len()));
} else {
bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);
}
let mut tmp_path = chunk_path.clone();
tmp_path.set_extension("tmp");
- let mut f = std::fs::File::create(&tmp_path)?;
- f.write_all(chunk)?;
+
+ let mut file = std::fs::File::create(&tmp_path)?;
+
+ let raw_data = chunk.raw_data();
+ let encoded_size = raw_data.len() as u64;
+
+ file.write_all(raw_data)?;
if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
if let Err(_) = std::fs::remove_file(&tmp_path) { /* ignore */ }
- bail!("Atomic rename on store '{}' failed for chunk {} - {}", self.name, digest_str, err);
+ bail!(
+ "Atomic rename on store '{}' failed for chunk {} - {}",
+ self.name,
+ digest_str,
+ err,
+ );
}
- println!("PATH {:?}", chunk_path);
-
drop(lock);
- Ok((false, digest))
+ Ok((false, encoded_size))
+ }
+
+ pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) {
+ let mut chunk_path = self.chunk_dir.clone();
+ let prefix = digest_to_prefix(digest);
+ chunk_path.push(&prefix);
+ let digest_str = proxmox::tools::digest_to_hex(digest);
+ chunk_path.push(&digest_str);
+ (chunk_path, digest_str)
}
pub fn relative_path(&self, path: &Path) -> PathBuf {
full_path
}
+ pub fn name(&self) -> &str {
+ &self.name
+ }
+
pub fn base_path(&self) -> PathBuf {
self.base.clone()
}
+
+ pub fn try_shared_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> {
+ tools::ProcessLocker::try_shared_lock(self.locker.clone())
+ }
+
+ pub fn try_exclusive_lock(&self) -> Result<tools::ProcessLockExclusiveGuard, Error> {
+ tools::ProcessLocker::try_exclusive_lock(self.locker.clone())
+ }
}
#[test]
fn test_chunk_store1() {
+ let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path
+ path.push(".testdir");
+
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
- let chunk_store = ChunkStore::open("test", ".testdir");
+ let chunk_store = ChunkStore::open("test", &path);
assert!(chunk_store.is_err());
- let chunk_store = ChunkStore::create("test", ".testdir").unwrap();
- let (exists, _) = chunk_store.insert_chunk(&[0u8, 1u8]).unwrap();
+ let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
+ let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid).unwrap();
+
+ let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
+
+ let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
assert!(!exists);
- let (exists, _) = chunk_store.insert_chunk(&[0u8, 1u8]).unwrap();
+ let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
assert!(exists);
- let chunk_store = ChunkStore::create("test", ".testdir");
+ let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid);
assert!(chunk_store.is_err());
-
+ if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
}