]> git.proxmox.com Git - proxmox-backup.git/blob - src/backup/chunk_store.rs
backup/datastore: count still bad chunks for the status
[proxmox-backup.git] / src / backup / chunk_store.rs
1 use anyhow::{bail, format_err, Error};
2
3 use std::path::{Path, PathBuf};
4 use std::io::Write;
5 use std::sync::{Arc, Mutex};
6 use std::os::unix::io::AsRawFd;
7
8 use proxmox::tools::fs::{CreateOptions, create_path, create_dir};
9
10 use crate::tools;
11 use crate::api2::types::GarbageCollectionStatus;
12
13 use super::DataBlob;
14 use crate::task::TaskState;
15
16 /// File system based chunk store
17 pub struct ChunkStore {
18 name: String, // used for error reporting
19 pub (crate) base: PathBuf,
20 chunk_dir: PathBuf,
21 mutex: Mutex<bool>,
22 locker: Arc<Mutex<tools::ProcessLocker>>,
23 }
24
25 // TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ?
26
27 pub fn verify_chunk_size(size: usize) -> Result<(), Error> {
28
29 static SIZES: [usize; 7] = [64*1024, 128*1024, 256*1024, 512*1024, 1024*1024, 2048*1024, 4096*1024];
30
31 if !SIZES.contains(&size) {
32 bail!("Got unsupported chunk size '{}'", size);
33 }
34 Ok(())
35 }
36
37 fn digest_to_prefix(digest: &[u8]) -> PathBuf {
38
39 let mut buf = Vec::<u8>::with_capacity(2+1+2+1);
40
41 const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
42
43 buf.push(HEX_CHARS[(digest[0] as usize) >> 4]);
44 buf.push(HEX_CHARS[(digest[0] as usize) &0xf]);
45 buf.push(HEX_CHARS[(digest[1] as usize) >> 4]);
46 buf.push(HEX_CHARS[(digest[1] as usize) & 0xf]);
47 buf.push('/' as u8);
48
49 let path = unsafe { String::from_utf8_unchecked(buf)};
50
51 path.into()
52 }
53
54 impl ChunkStore {
55
56 fn chunk_dir<P: AsRef<Path>>(path: P) -> PathBuf {
57
58 let mut chunk_dir: PathBuf = PathBuf::from(path.as_ref());
59 chunk_dir.push(".chunks");
60
61 chunk_dir
62 }
63
64 pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid) -> Result<Self, Error>
65 where
66 P: Into<PathBuf>,
67 {
68
69 let base: PathBuf = path.into();
70
71 if !base.is_absolute() {
72 bail!("expected absolute path - got {:?}", base);
73 }
74
75 let chunk_dir = Self::chunk_dir(&base);
76
77 let options = CreateOptions::new()
78 .owner(uid)
79 .group(gid);
80
81 let default_options = CreateOptions::new();
82
83 match create_path(&base, Some(default_options.clone()), Some(options.clone())) {
84 Err(err) => bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err),
85 Ok(res) => if ! res { nix::unistd::chown(&base, Some(uid), Some(gid))? },
86 }
87
88 if let Err(err) = create_dir(&chunk_dir, options.clone()) {
89 bail!("unable to create chunk store '{}' subdir {:?} - {}", name, chunk_dir, err);
90 }
91
92 // create lock file with correct owner/group
93 let lockfile_path = Self::lockfile_path(&base);
94 proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone())?;
95
96 // create 64*1024 subdirs
97 let mut last_percentage = 0;
98
99 for i in 0..64*1024 {
100 let mut l1path = chunk_dir.clone();
101 l1path.push(format!("{:04x}", i));
102 if let Err(err) = create_dir(&l1path, options.clone()) {
103 bail!("unable to create chunk store '{}' subdir {:?} - {}", name, l1path, err);
104 }
105 let percentage = (i*100)/(64*1024);
106 if percentage != last_percentage {
107 // eprintln!("ChunkStore::create {}%", percentage);
108 last_percentage = percentage;
109 }
110 }
111
112 Self::open(name, base)
113 }
114
115 fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf {
116 let base: PathBuf = base.into();
117
118 let mut lockfile_path = base.clone();
119 lockfile_path.push(".lock");
120
121 lockfile_path
122 }
123
124 pub fn open<P: Into<PathBuf>>(name: &str, base: P) -> Result<Self, Error> {
125
126 let base: PathBuf = base.into();
127
128 if !base.is_absolute() {
129 bail!("expected absolute path - got {:?}", base);
130 }
131
132 let chunk_dir = Self::chunk_dir(&base);
133
134 if let Err(err) = std::fs::metadata(&chunk_dir) {
135 bail!("unable to open chunk store '{}' at {:?} - {}", name, chunk_dir, err);
136 }
137
138 let lockfile_path = Self::lockfile_path(&base);
139
140 let locker = tools::ProcessLocker::new(&lockfile_path)?;
141
142 Ok(ChunkStore {
143 name: name.to_owned(),
144 base,
145 chunk_dir,
146 locker,
147 mutex: Mutex::new(false)
148 })
149 }
150
151 pub fn touch_chunk(&self, digest: &[u8; 32]) -> Result<(), Error> {
152 self.cond_touch_chunk(digest, true)?;
153 Ok(())
154 }
155
156 pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> {
157
158 let (chunk_path, _digest_str) = self.chunk_path(digest);
159
160 const UTIME_NOW: i64 = (1 << 30) - 1;
161 const UTIME_OMIT: i64 = (1 << 30) - 2;
162
163 let times: [libc::timespec; 2] = [
164 libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
165 libc::timespec { tv_sec: 0, tv_nsec: UTIME_OMIT }
166 ];
167
168 use nix::NixPath;
169
170 let res = chunk_path.with_nix_path(|cstr| unsafe {
171 let tmp = libc::utimensat(-1, cstr.as_ptr(), &times[0], libc::AT_SYMLINK_NOFOLLOW);
172 nix::errno::Errno::result(tmp)
173 })?;
174
175 if let Err(err) = res {
176 if !fail_if_not_exist && err.as_errno() == Some(nix::errno::Errno::ENOENT) {
177 return Ok(false);
178 }
179
180 bail!("update atime failed for chunk {:?} - {}", chunk_path, err);
181 }
182
183 Ok(true)
184 }
185
186 pub fn get_chunk_iterator(
187 &self,
188 ) -> Result<
189 impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize, bool)> + std::iter::FusedIterator,
190 Error
191 > {
192 use nix::dir::Dir;
193 use nix::fcntl::OFlag;
194 use nix::sys::stat::Mode;
195
196 let base_handle = Dir::open(&self.chunk_dir, OFlag::O_RDONLY, Mode::empty())
197 .map_err(|err| {
198 format_err!(
199 "unable to open store '{}' chunk dir {:?} - {}",
200 self.name,
201 self.chunk_dir,
202 err,
203 )
204 })?;
205
206 let mut done = false;
207 let mut inner: Option<tools::fs::ReadDir> = None;
208 let mut at = 0;
209 let mut percentage = 0;
210 Ok(std::iter::from_fn(move || {
211 if done {
212 return None;
213 }
214
215 loop {
216 if let Some(ref mut inner) = inner {
217 match inner.next() {
218 Some(Ok(entry)) => {
219 // skip files if they're not a hash
220 let bytes = entry.file_name().to_bytes();
221 if bytes.len() != 64 && bytes.len() != 64 + ".0.bad".len() {
222 continue;
223 }
224 if !bytes.iter().take(64).all(u8::is_ascii_hexdigit) {
225 continue;
226 }
227
228 let bad = bytes.ends_with(".bad".as_bytes());
229 return Some((Ok(entry), percentage, bad));
230 }
231 Some(Err(err)) => {
232 // stop after first error
233 done = true;
234 // and pass the error through:
235 return Some((Err(err), percentage, false));
236 }
237 None => (), // open next directory
238 }
239 }
240
241 inner = None;
242
243 if at == 0x10000 {
244 done = true;
245 return None;
246 }
247
248 let subdir: &str = &format!("{:04x}", at);
249 percentage = (at * 100) / 0x10000;
250 at += 1;
251 match tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
252 Ok(dir) => {
253 inner = Some(dir);
254 // start reading:
255 continue;
256 }
257 Err(ref err) if err.as_errno() == Some(nix::errno::Errno::ENOENT) => {
258 // non-existing directories are okay, just keep going:
259 continue;
260 }
261 Err(err) => {
262 // other errors are fatal, so end our iteration
263 done = true;
264 // and pass the error through:
265 return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage, false));
266 }
267 }
268 }
269 }).fuse())
270 }
271
272 pub fn oldest_writer(&self) -> Option<i64> {
273 tools::ProcessLocker::oldest_shared_lock(self.locker.clone())
274 }
275
276 pub fn sweep_unused_chunks(
277 &self,
278 oldest_writer: i64,
279 phase1_start_time: i64,
280 status: &mut GarbageCollectionStatus,
281 worker: &dyn TaskState,
282 ) -> Result<(), Error> {
283 use nix::sys::stat::fstatat;
284 use nix::unistd::{unlinkat, UnlinkatFlags};
285
286 let mut min_atime = phase1_start_time - 3600*24; // at least 24h (see mount option relatime)
287
288 if oldest_writer < min_atime {
289 min_atime = oldest_writer;
290 }
291
292 min_atime -= 300; // add 5 mins gap for safety
293
294 let mut last_percentage = 0;
295 let mut chunk_count = 0;
296
297 for (entry, percentage, bad) in self.get_chunk_iterator()? {
298 if last_percentage != percentage {
299 last_percentage = percentage;
300 crate::task_log!(
301 worker,
302 "percentage done: phase2 {}% (processed {} chunks)",
303 percentage,
304 chunk_count,
305 );
306 }
307
308 worker.check_abort()?;
309 tools::fail_on_shutdown()?;
310
311 let (dirfd, entry) = match entry {
312 Ok(entry) => (entry.parent_fd(), entry),
313 Err(err) => bail!("chunk iterator on chunk store '{}' failed - {}", self.name, err),
314 };
315
316 let file_type = match entry.file_type() {
317 Some(file_type) => file_type,
318 None => bail!("unsupported file system type on chunk store '{}'", self.name),
319 };
320 if file_type != nix::dir::Type::File {
321 continue;
322 }
323
324 chunk_count += 1;
325
326 let filename = entry.file_name();
327
328 let lock = self.mutex.lock();
329
330 if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
331 if bad {
332 // filename validity checked in iterator
333 let orig_filename = std::ffi::CString::new(&filename.to_bytes()[..64])?;
334 match fstatat(
335 dirfd,
336 orig_filename.as_c_str(),
337 nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
338 {
339 Ok(_) => {
340 match unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
341 Err(err) =>
342 crate::task_warn!(
343 worker,
344 "unlinking corrupt chunk {:?} failed on store '{}' - {}",
345 filename,
346 self.name,
347 err,
348 ),
349 Ok(_) => {
350 status.removed_bad += 1;
351 status.removed_bytes += stat.st_size as u64;
352 }
353 }
354 },
355 Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
356 // chunk hasn't been rewritten yet, keep .bad file
357 status.still_bad += 1;
358 },
359 Err(err) => {
360 // some other error, warn user and keep .bad file around too
361 status.still_bad += 1;
362 crate::task_warn!(
363 worker,
364 "error during stat on '{:?}' - {}",
365 orig_filename,
366 err,
367 );
368 }
369 }
370 } else if stat.st_atime < min_atime {
371 //let age = now - stat.st_atime;
372 //println!("UNLINK {} {:?}", age/(3600*24), filename);
373 if let Err(err) = unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir) {
374 bail!(
375 "unlinking chunk {:?} failed on store '{}' - {}",
376 filename,
377 self.name,
378 err,
379 );
380 }
381 status.removed_chunks += 1;
382 status.removed_bytes += stat.st_size as u64;
383 } else if stat.st_atime < oldest_writer {
384 status.pending_chunks += 1;
385 status.pending_bytes += stat.st_size as u64;
386 } else {
387 status.disk_chunks += 1;
388 status.disk_bytes += stat.st_size as u64;
389 }
390 }
391 drop(lock);
392 }
393
394 Ok(())
395 }
396
397 pub fn insert_chunk(
398 &self,
399 chunk: &DataBlob,
400 digest: &[u8; 32],
401 ) -> Result<(bool, u64), Error> {
402
403 //println!("DIGEST {}", proxmox::tools::digest_to_hex(digest));
404
405 let (chunk_path, digest_str) = self.chunk_path(digest);
406
407 let lock = self.mutex.lock();
408
409 if let Ok(metadata) = std::fs::metadata(&chunk_path) {
410 if metadata.is_file() {
411 self.touch_chunk(digest)?;
412 return Ok((true, metadata.len()));
413 } else {
414 bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);
415 }
416 }
417
418 let mut tmp_path = chunk_path.clone();
419 tmp_path.set_extension("tmp");
420
421 let mut file = std::fs::File::create(&tmp_path)?;
422
423 let raw_data = chunk.raw_data();
424 let encoded_size = raw_data.len() as u64;
425
426 file.write_all(raw_data)?;
427
428 if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
429 if let Err(_) = std::fs::remove_file(&tmp_path) { /* ignore */ }
430 bail!(
431 "Atomic rename on store '{}' failed for chunk {} - {}",
432 self.name,
433 digest_str,
434 err,
435 );
436 }
437
438 drop(lock);
439
440 Ok((false, encoded_size))
441 }
442
443 pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) {
444 let mut chunk_path = self.chunk_dir.clone();
445 let prefix = digest_to_prefix(digest);
446 chunk_path.push(&prefix);
447 let digest_str = proxmox::tools::digest_to_hex(digest);
448 chunk_path.push(&digest_str);
449 (chunk_path, digest_str)
450 }
451
452 pub fn relative_path(&self, path: &Path) -> PathBuf {
453
454 let mut full_path = self.base.clone();
455 full_path.push(path);
456 full_path
457 }
458
459 pub fn name(&self) -> &str {
460 &self.name
461 }
462
463 pub fn base_path(&self) -> PathBuf {
464 self.base.clone()
465 }
466
467 pub fn try_shared_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> {
468 tools::ProcessLocker::try_shared_lock(self.locker.clone())
469 }
470
471 pub fn try_exclusive_lock(&self) -> Result<tools::ProcessLockExclusiveGuard, Error> {
472 tools::ProcessLocker::try_exclusive_lock(self.locker.clone())
473 }
474 }
475
476
477 #[test]
478 fn test_chunk_store1() {
479
480 let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path
481 path.push(".testdir");
482
483 if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
484
485 let chunk_store = ChunkStore::open("test", &path);
486 assert!(chunk_store.is_err());
487
488 let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap();
489 let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid).unwrap();
490
491 let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
492
493 let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
494 assert!(!exists);
495
496 let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
497 assert!(exists);
498
499
500 let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid);
501 assert!(chunk_store.is_err());
502
503 if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
504 }