]>
Commit | Line | Data |
---|---|---|
1 | use anyhow::{bail, format_err, Error}; | |
2 | ||
3 | use std::path::{Path, PathBuf}; | |
4 | use std::io::Write; | |
5 | use std::sync::{Arc, Mutex}; | |
6 | use std::os::unix::io::AsRawFd; | |
7 | ||
8 | use proxmox::tools::fs::{CreateOptions, create_path, create_dir}; | |
9 | ||
10 | use crate::tools; | |
11 | use crate::api2::types::GarbageCollectionStatus; | |
12 | ||
13 | use super::DataBlob; | |
14 | use crate::server::WorkerTask; | |
15 | ||
16 | /// File system based chunk store | |
17 | pub struct ChunkStore { | |
18 | name: String, // used for error reporting | |
19 | pub (crate) base: PathBuf, | |
20 | chunk_dir: PathBuf, | |
21 | mutex: Mutex<bool>, | |
22 | locker: Arc<Mutex<tools::ProcessLocker>>, | |
23 | } | |
24 | ||
25 | // TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ? | |
26 | ||
27 | pub fn verify_chunk_size(size: usize) -> Result<(), Error> { | |
28 | ||
29 | static SIZES: [usize; 7] = [64*1024, 128*1024, 256*1024, 512*1024, 1024*1024, 2048*1024, 4096*1024]; | |
30 | ||
31 | if !SIZES.contains(&size) { | |
32 | bail!("Got unsupported chunk size '{}'", size); | |
33 | } | |
34 | Ok(()) | |
35 | } | |
36 | ||
37 | fn digest_to_prefix(digest: &[u8]) -> PathBuf { | |
38 | ||
39 | let mut buf = Vec::<u8>::with_capacity(2+1+2+1); | |
40 | ||
41 | const HEX_CHARS: &[u8; 16] = b"0123456789abcdef"; | |
42 | ||
43 | buf.push(HEX_CHARS[(digest[0] as usize) >> 4]); | |
44 | buf.push(HEX_CHARS[(digest[0] as usize) &0xf]); | |
45 | buf.push(HEX_CHARS[(digest[1] as usize) >> 4]); | |
46 | buf.push(HEX_CHARS[(digest[1] as usize) & 0xf]); | |
47 | buf.push('/' as u8); | |
48 | ||
49 | let path = unsafe { String::from_utf8_unchecked(buf)}; | |
50 | ||
51 | path.into() | |
52 | } | |
53 | ||
54 | impl ChunkStore { | |
55 | ||
56 | fn chunk_dir<P: AsRef<Path>>(path: P) -> PathBuf { | |
57 | ||
58 | let mut chunk_dir: PathBuf = PathBuf::from(path.as_ref()); | |
59 | chunk_dir.push(".chunks"); | |
60 | ||
61 | chunk_dir | |
62 | } | |
63 | ||
64 | pub fn create<P>(name: &str, path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid) -> Result<Self, Error> | |
65 | where | |
66 | P: Into<PathBuf>, | |
67 | { | |
68 | ||
69 | let base: PathBuf = path.into(); | |
70 | ||
71 | if !base.is_absolute() { | |
72 | bail!("expected absolute path - got {:?}", base); | |
73 | } | |
74 | ||
75 | let chunk_dir = Self::chunk_dir(&base); | |
76 | ||
77 | let options = CreateOptions::new() | |
78 | .owner(uid) | |
79 | .group(gid); | |
80 | ||
81 | let default_options = CreateOptions::new(); | |
82 | ||
83 | if let Err(err) = create_path(&base, Some(default_options.clone()), Some(options.clone())) { | |
84 | bail!("unable to create chunk store '{}' at {:?} - {}", name, base, err); | |
85 | } | |
86 | ||
87 | if let Err(err) = create_dir(&chunk_dir, options.clone()) { | |
88 | bail!("unable to create chunk store '{}' subdir {:?} - {}", name, chunk_dir, err); | |
89 | } | |
90 | ||
91 | // create lock file with correct owner/group | |
92 | let lockfile_path = Self::lockfile_path(&base); | |
93 | proxmox::tools::fs::replace_file(lockfile_path, b"", options.clone())?; | |
94 | ||
95 | // create 64*1024 subdirs | |
96 | let mut last_percentage = 0; | |
97 | ||
98 | for i in 0..64*1024 { | |
99 | let mut l1path = chunk_dir.clone(); | |
100 | l1path.push(format!("{:04x}", i)); | |
101 | if let Err(err) = create_dir(&l1path, options.clone()) { | |
102 | bail!("unable to create chunk store '{}' subdir {:?} - {}", name, l1path, err); | |
103 | } | |
104 | let percentage = (i*100)/(64*1024); | |
105 | if percentage != last_percentage { | |
106 | eprintln!("Percentage done: {}", percentage); | |
107 | last_percentage = percentage; | |
108 | } | |
109 | } | |
110 | ||
111 | ||
112 | Self::open(name, base) | |
113 | } | |
114 | ||
115 | fn lockfile_path<P: Into<PathBuf>>(base: P) -> PathBuf { | |
116 | let base: PathBuf = base.into(); | |
117 | ||
118 | let mut lockfile_path = base.clone(); | |
119 | lockfile_path.push(".lock"); | |
120 | ||
121 | lockfile_path | |
122 | } | |
123 | ||
124 | pub fn open<P: Into<PathBuf>>(name: &str, base: P) -> Result<Self, Error> { | |
125 | ||
126 | let base: PathBuf = base.into(); | |
127 | ||
128 | if !base.is_absolute() { | |
129 | bail!("expected absolute path - got {:?}", base); | |
130 | } | |
131 | ||
132 | let chunk_dir = Self::chunk_dir(&base); | |
133 | ||
134 | if let Err(err) = std::fs::metadata(&chunk_dir) { | |
135 | bail!("unable to open chunk store '{}' at {:?} - {}", name, chunk_dir, err); | |
136 | } | |
137 | ||
138 | let lockfile_path = Self::lockfile_path(&base); | |
139 | ||
140 | let locker = tools::ProcessLocker::new(&lockfile_path)?; | |
141 | ||
142 | Ok(ChunkStore { | |
143 | name: name.to_owned(), | |
144 | base, | |
145 | chunk_dir, | |
146 | locker, | |
147 | mutex: Mutex::new(false) | |
148 | }) | |
149 | } | |
150 | ||
151 | pub fn touch_chunk(&self, digest: &[u8; 32]) -> Result<(), Error> { | |
152 | self.cond_touch_chunk(digest, true)?; | |
153 | Ok(()) | |
154 | } | |
155 | ||
156 | pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> { | |
157 | ||
158 | let (chunk_path, _digest_str) = self.chunk_path(digest); | |
159 | ||
160 | const UTIME_NOW: i64 = ((1 << 30) - 1); | |
161 | const UTIME_OMIT: i64 = ((1 << 30) - 2); | |
162 | ||
163 | let times: [libc::timespec; 2] = [ | |
164 | libc::timespec { tv_sec: 0, tv_nsec: UTIME_NOW }, | |
165 | libc::timespec { tv_sec: 0, tv_nsec: UTIME_OMIT } | |
166 | ]; | |
167 | ||
168 | use nix::NixPath; | |
169 | ||
170 | let res = chunk_path.with_nix_path(|cstr| unsafe { | |
171 | let tmp = libc::utimensat(-1, cstr.as_ptr(), ×[0], libc::AT_SYMLINK_NOFOLLOW); | |
172 | nix::errno::Errno::result(tmp) | |
173 | })?; | |
174 | ||
175 | if let Err(err) = res { | |
176 | if !fail_if_not_exist && err.as_errno() == Some(nix::errno::Errno::ENOENT) { | |
177 | return Ok(false); | |
178 | } | |
179 | ||
180 | bail!("updata atime failed for chunk {:?} - {}", chunk_path, err); | |
181 | } | |
182 | ||
183 | Ok(true) | |
184 | } | |
185 | ||
186 | pub fn read_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> { | |
187 | ||
188 | let (chunk_path, digest_str) = self.chunk_path(digest); | |
189 | let mut file = std::fs::File::open(&chunk_path) | |
190 | .map_err(|err| { | |
191 | format_err!( | |
192 | "store '{}', unable to read chunk '{}' - {}", | |
193 | self.name, | |
194 | digest_str, | |
195 | err, | |
196 | ) | |
197 | })?; | |
198 | ||
199 | DataBlob::load(&mut file) | |
200 | } | |
201 | ||
202 | pub fn get_chunk_iterator( | |
203 | &self, | |
204 | ) -> Result< | |
205 | impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)> + std::iter::FusedIterator, | |
206 | Error | |
207 | > { | |
208 | use nix::dir::Dir; | |
209 | use nix::fcntl::OFlag; | |
210 | use nix::sys::stat::Mode; | |
211 | ||
212 | let base_handle = Dir::open(&self.chunk_dir, OFlag::O_RDONLY, Mode::empty()) | |
213 | .map_err(|err| { | |
214 | format_err!( | |
215 | "unable to open store '{}' chunk dir {:?} - {}", | |
216 | self.name, | |
217 | self.chunk_dir, | |
218 | err, | |
219 | ) | |
220 | })?; | |
221 | ||
222 | let mut done = false; | |
223 | let mut inner: Option<tools::fs::ReadDir> = None; | |
224 | let mut at = 0; | |
225 | let mut percentage = 0; | |
226 | Ok(std::iter::from_fn(move || { | |
227 | if done { | |
228 | return None; | |
229 | } | |
230 | ||
231 | loop { | |
232 | if let Some(ref mut inner) = inner { | |
233 | match inner.next() { | |
234 | Some(Ok(entry)) => { | |
235 | // skip files if they're not a hash | |
236 | let bytes = entry.file_name().to_bytes(); | |
237 | if bytes.len() != 64 { | |
238 | continue; | |
239 | } | |
240 | if !bytes.iter().all(u8::is_ascii_hexdigit) { | |
241 | continue; | |
242 | } | |
243 | return Some((Ok(entry), percentage)); | |
244 | } | |
245 | Some(Err(err)) => { | |
246 | // stop after first error | |
247 | done = true; | |
248 | // and pass the error through: | |
249 | return Some((Err(err), percentage)); | |
250 | } | |
251 | None => (), // open next directory | |
252 | } | |
253 | } | |
254 | ||
255 | inner = None; | |
256 | ||
257 | if at == 0x10000 { | |
258 | done = true; | |
259 | return None; | |
260 | } | |
261 | ||
262 | let subdir: &str = &format!("{:04x}", at); | |
263 | percentage = (at * 100) / 0x10000; | |
264 | at += 1; | |
265 | match tools::fs::read_subdir(base_handle.as_raw_fd(), subdir) { | |
266 | Ok(dir) => { | |
267 | inner = Some(dir); | |
268 | // start reading: | |
269 | continue; | |
270 | } | |
271 | Err(ref err) if err.as_errno() == Some(nix::errno::Errno::ENOENT) => { | |
272 | // non-existing directories are okay, just keep going: | |
273 | continue; | |
274 | } | |
275 | Err(err) => { | |
276 | // other errors are fatal, so end our iteration | |
277 | done = true; | |
278 | // and pass the error through: | |
279 | return Some((Err(format_err!("unable to read subdir '{}' - {}", subdir, err)), percentage)); | |
280 | } | |
281 | } | |
282 | } | |
283 | }).fuse()) | |
284 | } | |
285 | ||
286 | pub fn oldest_writer(&self) -> Option<i64> { | |
287 | tools::ProcessLocker::oldest_shared_lock(self.locker.clone()) | |
288 | } | |
289 | ||
290 | pub fn sweep_unused_chunks( | |
291 | &self, | |
292 | oldest_writer: i64, | |
293 | status: &mut GarbageCollectionStatus, | |
294 | worker: &WorkerTask, | |
295 | ) -> Result<(), Error> { | |
296 | use nix::sys::stat::fstatat; | |
297 | ||
298 | let now = unsafe { libc::time(std::ptr::null_mut()) }; | |
299 | ||
300 | let mut min_atime = now - 3600*24; // at least 24h (see mount option relatime) | |
301 | ||
302 | if oldest_writer < min_atime { | |
303 | min_atime = oldest_writer; | |
304 | } | |
305 | ||
306 | min_atime -= 300; // add 5 mins gap for safety | |
307 | ||
308 | let mut last_percentage = 0; | |
309 | let mut chunk_count = 0; | |
310 | ||
311 | for (entry, percentage) in self.get_chunk_iterator()? { | |
312 | if last_percentage != percentage { | |
313 | last_percentage = percentage; | |
314 | worker.log(format!("percentage done: {}, chunk count: {}", percentage, chunk_count)); | |
315 | } | |
316 | ||
317 | worker.fail_on_abort()?; | |
318 | tools::fail_on_shutdown()?; | |
319 | ||
320 | let (dirfd, entry) = match entry { | |
321 | Ok(entry) => (entry.parent_fd(), entry), | |
322 | Err(err) => bail!("chunk iterator on chunk store '{}' failed - {}", self.name, err), | |
323 | }; | |
324 | ||
325 | let file_type = match entry.file_type() { | |
326 | Some(file_type) => file_type, | |
327 | None => bail!("unsupported file system type on chunk store '{}'", self.name), | |
328 | }; | |
329 | if file_type != nix::dir::Type::File { | |
330 | continue; | |
331 | } | |
332 | ||
333 | chunk_count += 1; | |
334 | ||
335 | let filename = entry.file_name(); | |
336 | ||
337 | let lock = self.mutex.lock(); | |
338 | ||
339 | if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) { | |
340 | if stat.st_atime < min_atime { | |
341 | //let age = now - stat.st_atime; | |
342 | //println!("UNLINK {} {:?}", age/(3600*24), filename); | |
343 | let res = unsafe { libc::unlinkat(dirfd, filename.as_ptr(), 0) }; | |
344 | if res != 0 { | |
345 | let err = nix::Error::last(); | |
346 | bail!( | |
347 | "unlink chunk {:?} failed on store '{}' - {}", | |
348 | filename, | |
349 | self.name, | |
350 | err, | |
351 | ); | |
352 | } | |
353 | status.removed_chunks += 1; | |
354 | status.removed_bytes += stat.st_size as u64; | |
355 | } else { | |
356 | if stat.st_atime < oldest_writer { | |
357 | status.pending_chunks += 1; | |
358 | status.pending_bytes += stat.st_size as u64; | |
359 | } else { | |
360 | status.disk_chunks += 1; | |
361 | status.disk_bytes += stat.st_size as u64; | |
362 | } | |
363 | } | |
364 | } | |
365 | drop(lock); | |
366 | } | |
367 | ||
368 | Ok(()) | |
369 | } | |
370 | ||
371 | pub fn insert_chunk( | |
372 | &self, | |
373 | chunk: &DataBlob, | |
374 | digest: &[u8; 32], | |
375 | ) -> Result<(bool, u64), Error> { | |
376 | ||
377 | //println!("DIGEST {}", proxmox::tools::digest_to_hex(digest)); | |
378 | ||
379 | let (chunk_path, digest_str) = self.chunk_path(digest); | |
380 | ||
381 | let lock = self.mutex.lock(); | |
382 | ||
383 | if let Ok(metadata) = std::fs::metadata(&chunk_path) { | |
384 | if metadata.is_file() { | |
385 | return Ok((true, metadata.len())); | |
386 | } else { | |
387 | bail!("Got unexpected file type on store '{}' for chunk {}", self.name, digest_str); | |
388 | } | |
389 | } | |
390 | ||
391 | let mut tmp_path = chunk_path.clone(); | |
392 | tmp_path.set_extension("tmp"); | |
393 | ||
394 | let mut file = std::fs::File::create(&tmp_path)?; | |
395 | ||
396 | let raw_data = chunk.raw_data(); | |
397 | let encoded_size = raw_data.len() as u64; | |
398 | ||
399 | file.write_all(raw_data)?; | |
400 | ||
401 | if let Err(err) = std::fs::rename(&tmp_path, &chunk_path) { | |
402 | if let Err(_) = std::fs::remove_file(&tmp_path) { /* ignore */ } | |
403 | bail!( | |
404 | "Atomic rename on store '{}' failed for chunk {} - {}", | |
405 | self.name, | |
406 | digest_str, | |
407 | err, | |
408 | ); | |
409 | } | |
410 | ||
411 | drop(lock); | |
412 | ||
413 | Ok((false, encoded_size)) | |
414 | } | |
415 | ||
416 | pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) { | |
417 | let mut chunk_path = self.chunk_dir.clone(); | |
418 | let prefix = digest_to_prefix(digest); | |
419 | chunk_path.push(&prefix); | |
420 | let digest_str = proxmox::tools::digest_to_hex(digest); | |
421 | chunk_path.push(&digest_str); | |
422 | (chunk_path, digest_str) | |
423 | } | |
424 | ||
425 | pub fn relative_path(&self, path: &Path) -> PathBuf { | |
426 | ||
427 | let mut full_path = self.base.clone(); | |
428 | full_path.push(path); | |
429 | full_path | |
430 | } | |
431 | ||
432 | pub fn base_path(&self) -> PathBuf { | |
433 | self.base.clone() | |
434 | } | |
435 | ||
436 | pub fn try_shared_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> { | |
437 | tools::ProcessLocker::try_shared_lock(self.locker.clone()) | |
438 | } | |
439 | ||
440 | pub fn try_exclusive_lock(&self) -> Result<tools::ProcessLockExclusiveGuard, Error> { | |
441 | tools::ProcessLocker::try_exclusive_lock(self.locker.clone()) | |
442 | } | |
443 | } | |
444 | ||
445 | ||
446 | #[test] | |
447 | fn test_chunk_store1() { | |
448 | ||
449 | let mut path = std::fs::canonicalize(".").unwrap(); // we need absulute path | |
450 | path.push(".testdir"); | |
451 | ||
452 | if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ } | |
453 | ||
454 | let chunk_store = ChunkStore::open("test", &path); | |
455 | assert!(chunk_store.is_err()); | |
456 | ||
457 | let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()).unwrap().unwrap(); | |
458 | let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid).unwrap(); | |
459 | ||
460 | let (chunk, digest) = super::DataChunkBuilder::new(&[0u8, 1u8]).build().unwrap(); | |
461 | ||
462 | let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap(); | |
463 | assert!(!exists); | |
464 | ||
465 | let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap(); | |
466 | assert!(exists); | |
467 | ||
468 | ||
469 | let chunk_store = ChunkStore::create("test", &path, user.uid, user.gid); | |
470 | assert!(chunk_store.is_err()); | |
471 | ||
472 | if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ } | |
473 | } |