-use failure::*;
+use anyhow::{bail, format_err, Error};
use std::path::{Path, PathBuf};
use std::io::Write;
use std::sync::{Arc, Mutex};
use std::os::unix::io::AsRawFd;
-use serde::Serialize;
+
+use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
use crate::tools;
+use crate::api2::types::GarbageCollectionStatus;
+
use super::DataBlob;
use crate::server::WorkerTask;
-#[derive(Clone, Serialize)]
-pub struct GarbageCollectionStatus {
- pub upid: Option<String>,
- pub index_file_count: usize,
- pub index_data_bytes: u64,
- pub disk_bytes: u64,
- pub disk_chunks: usize,
- pub removed_bytes: u64,
- pub removed_chunks: usize,
-}
-
-impl Default for GarbageCollectionStatus {
- fn default() -> Self {
- GarbageCollectionStatus {
- upid: None,
- index_file_count: 0,
- index_data_bytes: 0,
- disk_bytes: 0,
- disk_chunks: 0,
- removed_bytes: 0,
- removed_chunks: 0,
- }
- }
-}
-
/// File system based chunk store
pub struct ChunkStore {
name: String, // used for error reporting
chunk_dir
}
- pub fn create<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
+ pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid) -> Result<Self, Error>
+ where
+ P: Into<PathBuf>,
+ {
let base: PathBuf = path.into();
let chunk_dir = Self::chunk_dir(&base);
- if let Err(err) = std::fs::create_dir_all(&base) {
- bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
+ let options = CreateOptions::new()
+ .owner(uid)
+ .group(gid);
+
+ let default_options = CreateOptions::new();
+
+ match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
+ Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
+ Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
}
- if let Err(err) = std::fs::create_dir_all(&chunk_dir) {
+ if let Err(err) = create_dir(&chunk_dir, options.clone()) {
bail!("unable to create chunk store '{}' subdir {:?} - {}", name, chunk_dir, err);
}
+ // create lock file with correct owner/group
+ let lockfile_path = Self::lockfile_path(&base);
+ proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone())?;
+
// create 64*1024 subdirs
let mut last_percentage = 0;
for i in 0..64*1024 {
let mut l1path = chunk_dir.clone();
l1path.push(format!("{:04x}", i));
- if let Err(err) = std::fs::create_dir(&l1path) {
+ if let Err(err) = create_dir(&l1path, options.clone()) {
bail!("unable to create chunk store '{}' subdir {:?} - {}", name, l1path, err);
}
let percentage = (i*100)/(64*1024);
if percentage != last_percentage {
- eprintln!("Percentage done: {}", percentage);
+ eprintln!("{}%", percentage);
last_percentage = percentage;
}
}
+
Self::open(name, base)
}
- pub fn open<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
+ fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
+ let base: PathBuf = base.into();
- let base: PathBuf = path.into();
+ let mut lockfile_path = base.clone();
+ lockfile_path.push(".lock");
+
+ lockfile_path
+ }
+
+ pub fn open<P: Into<PathBuf>>(name: &str, base: P) -> Result<Self, Error> {
+
+ let base: PathBuf = base.into();
if !base.is_absolute() {
bail!("expected absolute path - got {:?}", base);
bail!("unable to open chunk store '{}' at {:?} - {}", name, chunk_dir, err);
}
- let mut lockfile_path = base.clone();
- lockfile_path.push(".lock");
+ let lockfile_path = Self::lockfile_path(&base);
let locker = tools::ProcessLocker::new(&lockfile_path)?;
}
pub fn touch_chunk(&self, digest: &[u8; 32]) -> Result<(), Error> {
+ self.cond_touch_chunk(digest, true)?;
+ Ok(())
+ }
+
+ pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> {
let (chunk_path, _digest_str) = self.chunk_path(digest);
- const UTIME_NOW: i64 = ((1 << 30) - 1);
- const UTIME_OMIT: i64 = ((1 << 30) - 2);
+ const UTIME_NOW: i64 = (1 << 30) - 1;
+ const UTIME_OMIT: i64 = (1 << 30) - 2;
let times: [libc::timespec; 2] = [
libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
use nix::NixPath;
let res = chunk_path.with_nix_path(|cstr| unsafe {
- libc::utimensat(-1, cstr.as_ptr(), ×[0], libc::AT_SYMLINK_NOFOLLOW)
+ let tmp = libc::utimensat(-1, cstr.as_ptr(), ×[0], libc::AT_SYMLINK_NOFOLLOW);
+ nix::errno::Errno::result(tmp)
})?;
- if let Err(err) = nix::errno::Errno::result(res) {
- bail!("updata atime failed for chunk {:?} - {}", chunk_path, err);
- }
-
- Ok(())
- }
-
- pub fn read_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
+ if let Err(err) = res {
+ if !fail_if_not_exist && err.as_errno() == Some(nix::errno::Errno::ENOENT) {
+ return Ok(false);
+ }
- let (chunk_path, digest_str) = self.chunk_path(digest);
- let mut file = std::fs::File::open(&chunk_path)
- .map_err(|err| {
- format_err!(
- "store '{}', unable to read chunk '{}' - {}",
- self.name,
- digest_str,
- err,
- )
- })?;
+ bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
+ }
- DataBlob::load(&mut file)
+ Ok(true)
}
pub fn get_chunk_iterator(
&self,
) -> Result<
- impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)> + std::iter::FusedIterator,
+ impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
Error
> {
use nix::dir::Dir;
Some(Ok(entry)) => {
// skip files if they're not a hash
let bytes = entry.file_name().to_bytes();
- if bytes.len() != 64 {
+ if bytes.len() != 64 && bytes.len() != 64 + ".0.bad".len() {
continue;
}
- if !bytes.iter().all(u8::is_ascii_hexdigit) {
+ if !bytes.iter().take(64).all(u8::is_ascii_hexdigit) {
continue;
}
- return Some((Ok(entry), percentage));
+
+ let bad = bytes.ends_with(".bad".as_bytes());
+ return Some((Ok(entry), percentage, bad));
}
Some(Err(err)) => {
// stop after first error
done = true;
// and pass the error through:
- return Some((Err(err), percentage));
+ return Some((Err(err), percentage, false));
}
None => (), // open next directory
}
// other errors are fatal, so end our iteration
done = true;
// and pass the error through:
- return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage));
+ return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage, false));
}
}
}
pub fn sweep_unused_chunks(
&self,
- oldest_writer: Option<i64>,
+ oldest_writer: i64,
+ phase1_start_time: i64,
status: &mut GarbageCollectionStatus,
- worker: Arc<WorkerTask>,
+ worker: &WorkerTask,
) -> Result<(), Error> {
use nix::sys::stat::fstatat;
+ use nix::unistd::{unlinkat, UnlinkatFlags};
- let now = unsafe { libc::time(std::ptr::null_mut()) };
+ let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
- let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
-
- if let Some(stamp) = oldest_writer {
- if stamp < min_atime {
- min_atime = stamp;
- }
+ if oldest_writer < min_atime {
+ min_atime = oldest_writer;
}
min_atime -= 300; // add 5 mins gap for safety
let mut last_percentage = 0;
let mut chunk_count = 0;
- for (entry, percentage) in self.get_chunk_iterator()? {
+ for (entry, percentage, bad) in self.get_chunk_iterator()? {
if last_percentage != percentage {
last_percentage = percentage;
- worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
+ worker.log(format!("percentage done: phase2 {}% (processed {} chunks)", percentage, chunk_count));
}
+ worker.fail_on_abort()?;
tools::fail_on_shutdown()?;
let (dirfd, entry) = match entry {
let lock = self.mutex.lock();
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
- let age = now - stat.st_atime;
- //println!("FOUND {} {:?}", age/(3600*24), filename);
- if stat.st_atime < min_atime {
- println!("UNLINK {} {:?}", age/(3600*24), filename);
- let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) };
- if res != 0 {
- let err = nix::Error::last();
+ if bad {
+ match std::ffi::CString::new(&filename.to_bytes()[..64]) {
+ Ok(orig_filename) => {
+ match fstatat(
+ dirfd,
+ orig_filename.as_c_str(),
+ nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
+ {
+ Ok(_) => { /* do nothing */ },
+ Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
+ // chunk hasn't been rewritten yet, keep
+ // .bad file around for manual recovery
+ continue;
+ },
+ Err(err) => {
+ // some other error, warn user and keep
+ // .bad file around too
+ worker.warn(format!(
+ "error during stat on '{:?}' - {}",
+ orig_filename,
+ err,
+ ));
+ continue;
+ }
+ }
+ },
+ Err(err) => {
+ worker.warn(format!(
+ "could not get original filename from .bad file '{:?}' - {}",
+ filename,
+ err,
+ ));
+ continue;
+ }
+ }
+
+ if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
+ worker.warn(format!(
+ "unlinking corrupt chunk {:?} failed on store '{}' - {}",
+ filename,
+ self.name,
+ err,
+ ));
+ } else {
+ status.removed_bad += 1;
+ status.removed_bytes += stat.st_size as u64;
+ }
+ } else if stat.st_atime < min_atime {
+ //let age = now - stat.st_atime;
+ //println!("UNLINK {} {:?}", age/(3600*24), filename);
+ if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
bail!(
- "unlink chunk {:?} failed on store '{}' - {}",
+ "unlinking chunk {:?} failed on store '{}' - {}",
filename,
self.name,
err,
}
status.removed_chunks += 1;
status.removed_bytes += stat.st_size as u64;
- } else {
- status.disk_chunks += 1;
- status.disk_bytes += stat.st_size as u64;
+ } else {
+ if stat.st_atime < oldest_writer {
+ status.pending_chunks += 1;
+ status.pending_bytes += stat.st_size as u64;
+ } else {
+ status.disk_chunks += 1;
+ status.disk_bytes += stat.st_size as u64;
+ }
}
}
drop(lock);
if let Ok(metadata) = std::fs::metadata(&chunk_path) {
if metadata.is_file() {
+ self.touch_chunk(digest)?;
return Ok((true, metadata.len()));
} else {
bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);
full_path
}
+ pub fn name(&self) -> &str {
+ &self.name
+ }
+
pub fn base_path(&self) -> PathBuf {
self.base.clone()
}
let chunk_store = ChunkStore::open("test", &path);
assert!(chunk_store.is_err());
- let chunk_store = ChunkStore::create("test", &path).unwrap();
+ let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
+ let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid).unwrap();
let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
assert!(exists);
- let chunk_store = ChunkStore::create("test", &path);
+ let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid);
assert!(chunk_store.is_err());
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }