]> git.proxmox.com Git - proxmox-backup.git/blob - src/backup/chunk_store.rs
remove DataChunk file format - use DataBlob instead
[proxmox-backup.git] / src / backup / chunk_store.rs
1 use failure::*;
2
3 use std::path::{Path, PathBuf};
4 use std::io::Write;
5 use std::sync::{Arc, Mutex};
6 use std::os::unix::io::AsRawFd;
7 use serde::Serialize;
8
9 use crate::tools;
10 use super::DataBlob;
11 use crate::server::WorkerTask;
12
13 #[derive(Clone, Serialize)]
14 pub struct GarbageCollectionStatus {
15 pub upid: Option<String>,
16 pub index_file_count: usize,
17 pub index_data_bytes: u64,
18 pub disk_bytes: u64,
19 pub disk_chunks: usize,
20 pub removed_bytes: u64,
21 pub removed_chunks: usize,
22 }
23
24 impl Default for GarbageCollectionStatus {
25 fn default() -> Self {
26 GarbageCollectionStatus {
27 upid: None,
28 index_file_count: 0,
29 index_data_bytes: 0,
30 disk_bytes: 0,
31 disk_chunks: 0,
32 removed_bytes: 0,
33 removed_chunks: 0,
34 }
35 }
36 }
37
38 /// File system based chunk store
39 pub struct ChunkStore {
40 name: String, // used for error reporting
41 pub (crate) base: PathBuf,
42 chunk_dir: PathBuf,
43 mutex: Mutex<bool>,
44 locker: Arc<Mutex<tools::ProcessLocker>>,
45 }
46
47 // TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ?
48
49 pub fn verify_chunk_size(size: usize) -> Result<(), Error> {
50
51 static SIZES: [usize; 7] = [64*1024, 128*1024, 256*1024, 512*1024, 1024*1024, 2048*1024, 4096*1024];
52
53 if !SIZES.contains(&size) {
54 bail!("Got unsupported chunk size '{}'", size);
55 }
56 Ok(())
57 }
58
59 fn digest_to_prefix(digest: &[u8]) -> PathBuf {
60
61 let mut buf = Vec::<u8>::with_capacity(2+1+2+1);
62
63 const HEX_CHARS: &'static [u8; 16] = b"0123456789abcdef";
64
65 buf.push(HEX_CHARS[(digest[0] as usize) >> 4]);
66 buf.push(HEX_CHARS[(digest[0] as usize) &0xf]);
67 buf.push(HEX_CHARS[(digest[1] as usize) >> 4]);
68 buf.push(HEX_CHARS[(digest[1] as usize) & 0xf]);
69 buf.push('/' as u8);
70
71 let path = unsafe { String::from_utf8_unchecked(buf)};
72
73 path.into()
74 }
75
76 impl ChunkStore {
77
78 fn chunk_dir<P: AsRef<Path>>(path: P) -> PathBuf {
79
80 let mut chunk_dir: PathBuf = PathBuf::from(path.as_ref());
81 chunk_dir.push(".chunks");
82
83 chunk_dir
84 }
85
86 pub fn create<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
87
88 let base: PathBuf = path.into();
89
90 if !base.is_absolute() {
91 bail!("expected absolute path - got {:?}", base);
92 }
93
94 let chunk_dir = Self::chunk_dir(&base);
95
96 if let Err(err) = std::fs::create_dir(&base) {
97 bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err);
98 }
99
100 if let Err(err) = std::fs::create_dir(&chunk_dir) {
101 bail!("unable to create chunk store '{}' subdir {:?} - {}", name, chunk_dir, err);
102 }
103
104 // create 64*1024 subdirs
105 let mut last_percentage = 0;
106
107 for i in 0..64*1024 {
108 let mut l1path = chunk_dir.clone();
109 l1path.push(format!("{:04x}", i));
110 if let Err(err) = std::fs::create_dir(&l1path) {
111 bail!("unable to create chunk store '{}' subdir {:?} - {}", name, l1path, err);
112 }
113 let percentage = (i*100)/(64*1024);
114 if percentage != last_percentage {
115 eprintln!("Percentage done: {}", percentage);
116 last_percentage = percentage;
117 }
118 }
119
120 Self::open(name, base)
121 }
122
123 pub fn open<P: Into<PathBuf>>(name: &str, path: P) -> Result<Self, Error> {
124
125 let base: PathBuf = path.into();
126
127 if !base.is_absolute() {
128 bail!("expected absolute path - got {:?}", base);
129 }
130
131 let chunk_dir = Self::chunk_dir(&base);
132
133 if let Err(err) = std::fs::metadata(&chunk_dir) {
134 bail!("unable to open chunk store '{}' at {:?} - {}", name, chunk_dir, err);
135 }
136
137 let mut lockfile_path = base.clone();
138 lockfile_path.push(".lock");
139
140 let locker = tools::ProcessLocker::new(&lockfile_path)?;
141
142 Ok(ChunkStore {
143 name: name.to_owned(),
144 base,
145 chunk_dir,
146 locker,
147 mutex: Mutex::new(false)
148 })
149 }
150
151 pub fn touch_chunk(&self, digest: &[u8; 32]) -> Result<(), Error> {
152
153 let (chunk_path, _digest_str) = self.chunk_path(digest);
154
155 const UTIME_NOW: i64 = ((1 << 30) - 1);
156 const UTIME_OMIT: i64 = ((1 << 30) - 2);
157
158 let times: [libc::timespec; 2] = [
159 libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW },
160 libc::timespec { tv_sec: 0, tv_nsec: UTIME_OMIT }
161 ];
162
163 use nix::NixPath;
164
165 let res = chunk_path.with_nix_path(|cstr| unsafe {
166 libc::utimensat(-1, cstr.as_ptr(), &times[0], libc::AT_SYMLINK_NOFOLLOW)
167 })?;
168
169 if let Err(err) = nix::errno::Errno::result(res) {
170 bail!("updata atime failed for chunk {:?} - {}", chunk_path, err);
171 }
172
173 Ok(())
174 }
175
176 pub fn read_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
177
178 let (chunk_path, digest_str) = self.chunk_path(digest);
179 let mut file = std::fs::File::open(&chunk_path)
180 .map_err(|err| {
181 format_err!(
182 "store '{}', unable to read chunk '{}' - {}",
183 self.name,
184 digest_str,
185 err,
186 )
187 })?;
188
189 DataBlob::load(&mut file)
190 }
191
192 pub fn get_chunk_iterator(
193 &self,
194 ) -> Result<
195 impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)> + std::iter::FusedIterator,
196 Error
197 > {
198 use nix::dir::Dir;
199 use nix::fcntl::OFlag;
200 use nix::sys::stat::Mode;
201
202 let base_handle = Dir::open(&self.chunk_dir, OFlag::O_RDONLY, Mode::empty())
203 .map_err(|err| {
204 format_err!(
205 "unable to open store '{}' chunk dir {:?} - {}",
206 self.name,
207 self.chunk_dir,
208 err,
209 )
210 })?;
211
212 let mut done = false;
213 let mut inner: Option<tools::fs::ReadDir> = None;
214 let mut at = 0;
215 let mut percentage = 0;
216 Ok(std::iter::from_fn(move || {
217 if done {
218 return None;
219 }
220
221 loop {
222 if let Some(ref mut inner) = inner {
223 match inner.next() {
224 Some(Ok(entry)) => {
225 // skip files if they're not a hash
226 let bytes = entry.file_name().to_bytes();
227 if bytes.len() != 64 {
228 continue;
229 }
230 if !bytes.iter().all(u8::is_ascii_hexdigit) {
231 continue;
232 }
233 return Some((Ok(entry), percentage));
234 }
235 Some(Err(err)) => {
236 // stop after first error
237 done = true;
238 // and pass the error through:
239 return Some((Err(err), percentage));
240 }
241 None => (), // open next directory
242 }
243 }
244
245 inner = None;
246
247 if at == 0x10000 {
248 done = true;
249 return None;
250 }
251
252 let subdir: &str = &format!("{:04x}", at);
253 percentage = (at * 100) / 0x10000;
254 at += 1;
255 match tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) {
256 Ok(dir) => {
257 inner = Some(dir);
258 // start reading:
259 continue;
260 }
261 Err(ref err) if err.as_errno() == Some(nix::errno::Errno::ENOENT) => {
262 // non-existing directories are okay, just keep going:
263 continue;
264 }
265 Err(err) => {
266 // other errors are fatal, so end our iteration
267 done = true;
268 // and pass the error through:
269 return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage));
270 }
271 }
272 }
273 }).fuse())
274 }
275
276 pub fn oldest_writer(&self) -> Option<i64> {
277 tools::ProcessLocker::oldest_shared_lock(self.locker.clone())
278 }
279
280 pub fn sweep_unused_chunks(
281 &self,
282 oldest_writer: Option<i64>,
283 status: &mut GarbageCollectionStatus,
284 worker: Arc<WorkerTask>,
285 ) -> Result<(), Error> {
286 use nix::sys::stat::fstatat;
287
288 let now = unsafe { libc::time(std::ptr::null_mut()) };
289
290 let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime)
291
292 if let Some(stamp) = oldest_writer {
293 if stamp < min_atime {
294 min_atime = stamp;
295 }
296 }
297
298 min_atime -= 300; // add 5 mins gap for safety
299
300 let mut last_percentage = 0;
301 let mut chunk_count = 0;
302
303 for (entry, percentage) in self.get_chunk_iterator()? {
304 if last_percentage != percentage {
305 last_percentage = percentage;
306 worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count));
307 }
308
309 tools::fail_on_shutdown()?;
310
311 let (dirfd, entry) = match entry {
312 Ok(entry) => (entry.parent_fd(), entry),
313 Err(err) => bail!("chunk iterator on chunk store '{}' failed - {}", self.name, err),
314 };
315
316 let file_type = match entry.file_type() {
317 Some(file_type) => file_type,
318 None => bail!("unsupported file system type on chunk store '{}'", self.name),
319 };
320 if file_type != nix::dir::Type::File {
321 continue;
322 }
323
324 chunk_count += 1;
325
326 let filename = entry.file_name();
327
328 let lock = self.mutex.lock();
329
330 if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
331 let age = now - stat.st_atime;
332 //println!("FOUND {} {:?}", age/(3600*24), filename);
333 if stat.st_atime < min_atime {
334 println!("UNLINK {} {:?}", age/(3600*24), filename);
335 let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) };
336 if res != 0 {
337 let err = nix::Error::last();
338 bail!(
339 "unlink chunk {:?} failed on store '{}' - {}",
340 filename,
341 self.name,
342 err,
343 );
344 }
345 status.removed_chunks += 1;
346 status.removed_bytes += stat.st_size as u64;
347 } else {
348 status.disk_chunks += 1;
349 status.disk_bytes += stat.st_size as u64;
350 }
351 }
352 drop(lock);
353 }
354
355 Ok(())
356 }
357
358 pub fn insert_chunk(
359 &self,
360 chunk: &DataBlob,
361 digest: &[u8; 32],
362 ) -> Result<(bool, u64), Error> {
363
364 //println!("DIGEST {}", proxmox::tools::digest_to_hex(digest));
365
366 let (chunk_path, digest_str) = self.chunk_path(digest);
367
368 let lock = self.mutex.lock();
369
370 if let Ok(metadata) = std::fs::metadata(&chunk_path) {
371 if metadata.is_file() {
372 return Ok((true, metadata.len()));
373 } else {
374 bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str);
375 }
376 }
377
378 let mut tmp_path = chunk_path.clone();
379 tmp_path.set_extension("tmp");
380
381 let mut file = std::fs::File::create(&tmp_path)?;
382
383 let raw_data = chunk.raw_data();
384 let encoded_size = raw_data.len() as u64;
385
386 file.write_all(raw_data)?;
387
388 if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) {
389 if let Err(_) = std::fs::remove_file(&tmp_path) { /* ignore */ }
390 bail!(
391 "Atomic rename on store '{}' failed for chunk {} - {}",
392 self.name,
393 digest_str,
394 err,
395 );
396 }
397
398 drop(lock);
399
400 Ok((false, encoded_size))
401 }
402
403 pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) {
404 let mut chunk_path = self.chunk_dir.clone();
405 let prefix = digest_to_prefix(digest);
406 chunk_path.push(&prefix);
407 let digest_str = proxmox::tools::digest_to_hex(digest);
408 chunk_path.push(&digest_str);
409 (chunk_path, digest_str)
410 }
411
412 pub fn relative_path(&self, path: &Path) -> PathBuf {
413
414 let mut full_path = self.base.clone();
415 full_path.push(path);
416 full_path
417 }
418
419 pub fn base_path(&self) -> PathBuf {
420 self.base.clone()
421 }
422
423 pub fn try_shared_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> {
424 tools::ProcessLocker::try_shared_lock(self.locker.clone())
425 }
426
427 pub fn try_exclusive_lock(&self) -> Result<tools::ProcessLockExclusiveGuard, Error> {
428 tools::ProcessLocker::try_exclusive_lock(self.locker.clone())
429 }
430 }
431
432
433 #[test]
434 fn test_chunk_store1() {
435
436 let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path
437 path.push(".testdir");
438
439 if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
440
441 let chunk_store = ChunkStore::open("test", &path);
442 assert!(chunk_store.is_err());
443
444 let chunk_store = ChunkStore::create("test", &path).unwrap();
445
446 let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap();
447
448 let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
449 assert!(!exists);
450
451 let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
452 assert!(exists);
453
454
455 let chunk_store = ChunkStore::create("test", &path);
456 assert!(chunk_store.is_err());
457
458 if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
459 }