]>
Commit | Line | Data |
---|---|---|
7759eef5 | 1 | use std::collections::{HashSet, HashMap}; |
54552dda | 2 | use std::io::{self, Write}; |
367f002e WB |
3 | use std::path::{Path, PathBuf}; |
4 | use std::sync::{Arc, Mutex}; | |
60f9a6ea | 5 | use std::convert::TryFrom; |
367f002e | 6 | |
f7d4e4b5 | 7 | use anyhow::{bail, format_err, Error}; |
2c32fdde | 8 | use lazy_static::lazy_static; |
41b373ec | 9 | use chrono::{DateTime, Utc}; |
e4439025 DM |
10 | use serde_json::Value; |
11 | ||
12 | use proxmox::tools::fs::{replace_file, CreateOptions}; | |
529de6c7 | 13 | |
6d6b4e72 | 14 | use super::backup_info::{BackupGroup, BackupDir}; |
a92830dc | 15 | use super::chunk_store::ChunkStore; |
367f002e WB |
16 | use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; |
17 | use super::fixed_index::{FixedIndexReader, FixedIndexWriter}; | |
1610c45a | 18 | use super::manifest::{MANIFEST_BLOB_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest}; |
5de2bced | 19 | use super::index::*; |
1e8da0a7 | 20 | use super::{DataBlob, ArchiveType, archive_type}; |
367f002e | 21 | use crate::config::datastore; |
d4b59ae0 | 22 | use crate::server::WorkerTask; |
367f002e | 23 | use crate::tools; |
49a92084 | 24 | use crate::tools::format::HumanByte; |
e4342585 | 25 | use crate::tools::fs::{lock_dir_noblock, DirLockGuard}; |
e7cb4dc5 | 26 | use crate::api2::types::{GarbageCollectionStatus, Userid}; |
529de6c7 | 27 | |
367f002e WB |
28 | lazy_static! { |
29 | static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new()); | |
b3483782 | 30 | } |
ff3d3100 | 31 | |
e5064ba6 DM |
32 | /// Datastore Management |
33 | /// | |
34 | /// A Datastore can store severals backups, and provides the | |
35 | /// management interface for backup. | |
529de6c7 | 36 | pub struct DataStore { |
1629d2ad | 37 | chunk_store: Arc<ChunkStore>, |
64e53b28 | 38 | gc_mutex: Mutex<bool>, |
f2b99c34 | 39 | last_gc_status: Mutex<GarbageCollectionStatus>, |
529de6c7 DM |
40 | } |
41 | ||
42 | impl DataStore { | |
43 | ||
2c32fdde DM |
44 | pub fn lookup_datastore(name: &str) -> Result<Arc<DataStore>, Error> { |
45 | ||
d0187a51 DM |
46 | let (config, _digest) = datastore::config()?; |
47 | let config: datastore::DataStoreConfig = config.lookup("datastore", name)?; | |
2c32fdde | 48 | |
515688d1 | 49 | let mut map = DATASTORE_MAP.lock().unwrap(); |
2c32fdde DM |
50 | |
51 | if let Some(datastore) = map.get(name) { | |
52 | // Compare Config - if changed, create new Datastore object! | |
d0187a51 | 53 | if datastore.chunk_store.base == PathBuf::from(&config.path) { |
2c32fdde DM |
54 | return Ok(datastore.clone()); |
55 | } | |
56 | } | |
57 | ||
f0a61124 DM |
58 | let datastore = DataStore::open(name)?; |
59 | ||
60 | let datastore = Arc::new(datastore); | |
61 | map.insert(name.to_string(), datastore.clone()); | |
2c32fdde | 62 | |
f0a61124 | 63 | Ok(datastore) |
2c32fdde DM |
64 | } |
65 | ||
af6f80d3 | 66 | pub fn open(store_name: &str) -> Result<Self, Error> { |
529de6c7 | 67 | |
d0187a51 | 68 | let (config, _digest) = datastore::config()?; |
529de6c7 DM |
69 | let (_, store_config) = config.sections.get(store_name) |
70 | .ok_or(format_err!("no such datastore '{}'", store_name))?; | |
71 | ||
72 | let path = store_config["path"].as_str().unwrap(); | |
73 | ||
277fc5a3 | 74 | let chunk_store = ChunkStore::open(store_name, path)?; |
529de6c7 | 75 | |
f2b99c34 DM |
76 | let gc_status = GarbageCollectionStatus::default(); |
77 | ||
529de6c7 | 78 | Ok(Self { |
1629d2ad | 79 | chunk_store: Arc::new(chunk_store), |
64e53b28 | 80 | gc_mutex: Mutex::new(false), |
f2b99c34 | 81 | last_gc_status: Mutex::new(gc_status), |
529de6c7 DM |
82 | }) |
83 | } | |
84 | ||
d59397e6 WB |
85 | pub fn get_chunk_iterator( |
86 | &self, | |
87 | ) -> Result< | |
a5736098 | 88 | impl Iterator<Item = (Result<tools::fs::ReadDirEntry, Error>, usize)>, |
d59397e6 WB |
89 | Error |
90 | > { | |
a5736098 | 91 | self.chunk_store.get_chunk_iterator() |
d59397e6 WB |
92 | } |
93 | ||
91a905b6 | 94 | pub fn create_fixed_writer<P: AsRef<Path>>(&self, filename: P, size: usize, chunk_size: usize) -> Result<FixedIndexWriter, Error> { |
529de6c7 | 95 | |
91a905b6 | 96 | let index = FixedIndexWriter::create(self.chunk_store.clone(), filename.as_ref(), size, chunk_size)?; |
529de6c7 DM |
97 | |
98 | Ok(index) | |
99 | } | |
100 | ||
91a905b6 | 101 | pub fn open_fixed_reader<P: AsRef<Path>>(&self, filename: P) -> Result<FixedIndexReader, Error> { |
529de6c7 | 102 | |
a7c72ad9 DM |
103 | let full_path = self.chunk_store.relative_path(filename.as_ref()); |
104 | ||
105 | let index = FixedIndexReader::open(&full_path)?; | |
529de6c7 DM |
106 | |
107 | Ok(index) | |
108 | } | |
3d5c11e5 | 109 | |
93d5d779 | 110 | pub fn create_dynamic_writer<P: AsRef<Path>>( |
0433db19 | 111 | &self, filename: P, |
93d5d779 | 112 | ) -> Result<DynamicIndexWriter, Error> { |
0433db19 | 113 | |
93d5d779 | 114 | let index = DynamicIndexWriter::create( |
976595e1 | 115 | self.chunk_store.clone(), filename.as_ref())?; |
0433db19 DM |
116 | |
117 | Ok(index) | |
118 | } | |
ff3d3100 | 119 | |
93d5d779 | 120 | pub fn open_dynamic_reader<P: AsRef<Path>>(&self, filename: P) -> Result<DynamicIndexReader, Error> { |
77703d95 | 121 | |
d48a9955 DM |
122 | let full_path = self.chunk_store.relative_path(filename.as_ref()); |
123 | ||
124 | let index = DynamicIndexReader::open(&full_path)?; | |
77703d95 DM |
125 | |
126 | Ok(index) | |
127 | } | |
128 | ||
5de2bced WB |
129 | pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error> |
130 | where | |
131 | P: AsRef<Path>, | |
132 | { | |
133 | let filename = filename.as_ref(); | |
134 | let out: Box<dyn IndexFile + Send> = | |
1e8da0a7 DM |
135 | match archive_type(filename)? { |
136 | ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?), | |
137 | ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?), | |
5de2bced WB |
138 | _ => bail!("cannot open index file of unknown type: {:?}", filename), |
139 | }; | |
140 | Ok(out) | |
141 | } | |
142 | ||
60f9a6ea DM |
143 | pub fn name(&self) -> &str { |
144 | self.chunk_store.name() | |
145 | } | |
146 | ||
ff3d3100 DM |
147 | pub fn base_path(&self) -> PathBuf { |
148 | self.chunk_store.base_path() | |
149 | } | |
150 | ||
c47e294e | 151 | /// Cleanup a backup directory |
7759eef5 DM |
152 | /// |
153 | /// Removes all files not mentioned in the manifest. | |
154 | pub fn cleanup_backup_dir(&self, backup_dir: &BackupDir, manifest: &BackupManifest | |
155 | ) -> Result<(), Error> { | |
156 | ||
157 | let mut full_path = self.base_path(); | |
158 | full_path.push(backup_dir.relative_path()); | |
159 | ||
160 | let mut wanted_files = HashSet::new(); | |
161 | wanted_files.insert(MANIFEST_BLOB_NAME.to_string()); | |
1610c45a | 162 | wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string()); |
7759eef5 DM |
163 | manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); }); |
164 | ||
165 | for item in tools::fs::read_subdir(libc::AT_FDCWD, &full_path)? { | |
166 | if let Ok(item) = item { | |
167 | if let Some(file_type) = item.file_type() { | |
168 | if file_type != nix::dir::Type::File { continue; } | |
169 | } | |
170 | let file_name = item.file_name().to_bytes(); | |
171 | if file_name == b"." || file_name == b".." { continue; }; | |
172 | ||
173 | if let Ok(name) = std::str::from_utf8(file_name) { | |
174 | if wanted_files.contains(name) { continue; } | |
175 | } | |
176 | println!("remove unused file {:?}", item.file_name()); | |
177 | let dirfd = item.parent_fd(); | |
178 | let _res = unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) }; | |
179 | } | |
180 | } | |
181 | ||
182 | Ok(()) | |
183 | } | |
4b4eba0b | 184 | |
41b373ec DM |
185 | /// Returns the absolute path for a backup_group |
186 | pub fn group_path(&self, backup_group: &BackupGroup) -> PathBuf { | |
4b4eba0b DM |
187 | let mut full_path = self.base_path(); |
188 | full_path.push(backup_group.group_path()); | |
41b373ec DM |
189 | full_path |
190 | } | |
191 | ||
192 | /// Returns the absolute path for backup_dir | |
193 | pub fn snapshot_path(&self, backup_dir: &BackupDir) -> PathBuf { | |
194 | let mut full_path = self.base_path(); | |
195 | full_path.push(backup_dir.relative_path()); | |
196 | full_path | |
197 | } | |
198 | ||
199 | /// Remove a complete backup group including all snapshots | |
6abce6c2 | 200 | pub fn remove_backup_group(&self, backup_group: &BackupGroup) -> Result<(), Error> { |
41b373ec DM |
201 | |
202 | let full_path = self.group_path(backup_group); | |
4b4eba0b | 203 | |
6d6b4e72 | 204 | let _guard = tools::fs::lock_dir_noblock(&full_path, "backup group", "possible running backup")?; |
c9756b40 | 205 | |
4b4eba0b | 206 | log::info!("removing backup group {:?}", full_path); |
6abce6c2 | 207 | std::fs::remove_dir_all(&full_path) |
8a1d68c8 DM |
208 | .map_err(|err| { |
209 | format_err!( | |
210 | "removing backup group {:?} failed - {}", | |
211 | full_path, | |
212 | err, | |
213 | ) | |
214 | })?; | |
4b4eba0b DM |
215 | |
216 | Ok(()) | |
217 | } | |
218 | ||
8f579717 | 219 | /// Remove a backup directory including all content |
c9756b40 | 220 | pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> { |
8f579717 | 221 | |
41b373ec | 222 | let full_path = self.snapshot_path(backup_dir); |
8f579717 | 223 | |
6d6b4e72 | 224 | let _guard; |
c9756b40 | 225 | if !force { |
6d6b4e72 | 226 | _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or used as base")?; |
c9756b40 SR |
227 | } |
228 | ||
8a1d68c8 | 229 | log::info!("removing backup snapshot {:?}", full_path); |
6abce6c2 | 230 | std::fs::remove_dir_all(&full_path) |
8a1d68c8 DM |
231 | .map_err(|err| { |
232 | format_err!( | |
233 | "removing backup snapshot {:?} failed - {}", | |
234 | full_path, | |
235 | err, | |
236 | ) | |
237 | })?; | |
8f579717 DM |
238 | |
239 | Ok(()) | |
240 | } | |
241 | ||
41b373ec DM |
242 | /// Returns the time of the last successful backup |
243 | /// | |
244 | /// Or None if there is no backup in the group (or the group dir does not exist). | |
245 | pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<DateTime<Utc>>, Error> { | |
246 | let base_path = self.base_path(); | |
247 | let mut group_path = base_path.clone(); | |
248 | group_path.push(backup_group.group_path()); | |
249 | ||
250 | if group_path.exists() { | |
251 | backup_group.last_successful_backup(&base_path) | |
252 | } else { | |
253 | Ok(None) | |
254 | } | |
255 | } | |
256 | ||
54552dda DM |
257 | /// Returns the backup owner. |
258 | /// | |
259 | /// The backup owner is the user who first created the backup group. | |
e7cb4dc5 | 260 | pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Userid, Error> { |
54552dda DM |
261 | let mut full_path = self.base_path(); |
262 | full_path.push(backup_group.group_path()); | |
263 | full_path.push("owner"); | |
264 | let owner = proxmox::tools::fs::file_read_firstline(full_path)?; | |
e7cb4dc5 | 265 | Ok(owner.trim_end().parse()?) // remove trailing newline |
54552dda DM |
266 | } |
267 | ||
268 | /// Set the backup owner. | |
e7cb4dc5 WB |
269 | pub fn set_owner( |
270 | &self, | |
271 | backup_group: &BackupGroup, | |
272 | userid: &Userid, | |
273 | force: bool, | |
274 | ) -> Result<(), Error> { | |
54552dda DM |
275 | let mut path = self.base_path(); |
276 | path.push(backup_group.group_path()); | |
277 | path.push("owner"); | |
278 | ||
279 | let mut open_options = std::fs::OpenOptions::new(); | |
280 | open_options.write(true); | |
281 | open_options.truncate(true); | |
282 | ||
283 | if force { | |
284 | open_options.create(true); | |
285 | } else { | |
286 | open_options.create_new(true); | |
287 | } | |
288 | ||
289 | let mut file = open_options.open(&path) | |
290 | .map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?; | |
291 | ||
292 | write!(file, "{}\n", userid) | |
293 | .map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?; | |
294 | ||
295 | Ok(()) | |
296 | } | |
297 | ||
1fc82c41 | 298 | /// Create (if it does not already exists) and lock a backup group |
54552dda DM |
299 | /// |
300 | /// And set the owner to 'userid'. If the group already exists, it returns the | |
301 | /// current owner (instead of setting the owner). | |
1fc82c41 | 302 | /// |
1ffe0301 | 303 | /// This also acquires an exclusive lock on the directory and returns the lock guard. |
e7cb4dc5 WB |
304 | pub fn create_locked_backup_group( |
305 | &self, | |
306 | backup_group: &BackupGroup, | |
307 | userid: &Userid, | |
e4342585 | 308 | ) -> Result<(Userid, DirLockGuard), Error> { |
8731e40a | 309 | // create intermediate path first: |
54552dda DM |
310 | let base_path = self.base_path(); |
311 | ||
312 | let mut full_path = base_path.clone(); | |
313 | full_path.push(backup_group.backup_type()); | |
8731e40a WB |
314 | std::fs::create_dir_all(&full_path)?; |
315 | ||
54552dda DM |
316 | full_path.push(backup_group.backup_id()); |
317 | ||
318 | // create the last component now | |
319 | match std::fs::create_dir(&full_path) { | |
320 | Ok(_) => { | |
e4342585 | 321 | let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?; |
54552dda DM |
322 | self.set_owner(backup_group, userid, false)?; |
323 | let owner = self.get_owner(backup_group)?; // just to be sure | |
1fc82c41 | 324 | Ok((owner, guard)) |
54552dda DM |
325 | } |
326 | Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => { | |
e4342585 | 327 | let guard = lock_dir_noblock(&full_path, "backup group", "another backup is already running")?; |
54552dda | 328 | let owner = self.get_owner(backup_group)?; // just to be sure |
1fc82c41 | 329 | Ok((owner, guard)) |
54552dda DM |
330 | } |
331 | Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err), | |
332 | } | |
333 | } | |
334 | ||
335 | /// Creates a new backup snapshot inside a BackupGroup | |
336 | /// | |
337 | /// The BackupGroup directory needs to exist. | |
f23f7543 SR |
338 | pub fn create_locked_backup_dir(&self, backup_dir: &BackupDir) |
339 | -> Result<(PathBuf, bool, DirLockGuard), Error> | |
340 | { | |
b3483782 DM |
341 | let relative_path = backup_dir.relative_path(); |
342 | let mut full_path = self.base_path(); | |
343 | full_path.push(&relative_path); | |
ff3d3100 | 344 | |
f23f7543 SR |
345 | let lock = || |
346 | lock_dir_noblock(&full_path, "snapshot", "internal error - tried creating snapshot that's already in use"); | |
347 | ||
8731e40a | 348 | match std::fs::create_dir(&full_path) { |
f23f7543 SR |
349 | Ok(_) => Ok((relative_path, true, lock()?)), |
350 | Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok((relative_path, false, lock()?)), | |
351 | Err(e) => Err(e.into()) | |
8731e40a | 352 | } |
ff3d3100 DM |
353 | } |
354 | ||
3d5c11e5 | 355 | pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> { |
ff3d3100 | 356 | let base = self.base_path(); |
3d5c11e5 DM |
357 | |
358 | let mut list = vec![]; | |
359 | ||
95cea65b DM |
360 | use walkdir::WalkDir; |
361 | ||
362 | let walker = WalkDir::new(&base).same_file_system(true).into_iter(); | |
363 | ||
364 | // make sure we skip .chunks (and other hidden files to keep it simple) | |
365 | fn is_hidden(entry: &walkdir::DirEntry) -> bool { | |
366 | entry.file_name() | |
367 | .to_str() | |
368 | .map(|s| s.starts_with(".")) | |
369 | .unwrap_or(false) | |
370 | } | |
c3b090ac TL |
371 | let handle_entry_err = |err: walkdir::Error| { |
372 | if let Some(inner) = err.io_error() { | |
373 | let path = err.path().unwrap_or(Path::new("")); | |
374 | match inner.kind() { | |
375 | io::ErrorKind::PermissionDenied => { | |
376 | // only allow to skip ext4 fsck directory, avoid GC if, for example, | |
377 | // a user got file permissions wrong on datastore rsync to new server | |
378 | if err.depth() > 1 || !path.ends_with("lost+found") { | |
379 | bail!("cannot continue garbage-collection safely, permission denied on: {}", path.display()) | |
380 | } | |
381 | }, | |
382 | _ => bail!("unexpected error on datastore traversal: {} - {}", inner, path.display()), | |
383 | } | |
384 | } | |
385 | Ok(()) | |
386 | }; | |
95cea65b | 387 | for entry in walker.filter_entry(|e| !is_hidden(e)) { |
c3b090ac TL |
388 | let path = match entry { |
389 | Ok(entry) => entry.into_path(), | |
390 | Err(err) => { | |
391 | handle_entry_err(err)?; | |
392 | continue | |
393 | }, | |
394 | }; | |
1e8da0a7 DM |
395 | if let Ok(archive_type) = archive_type(&path) { |
396 | if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex { | |
95cea65b | 397 | list.push(path); |
3d5c11e5 DM |
398 | } |
399 | } | |
400 | } | |
401 | ||
402 | Ok(list) | |
403 | } | |
404 | ||
a660978c DM |
405 | // mark chunks used by ``index`` as used |
406 | fn index_mark_used_chunks<I: IndexFile>( | |
407 | &self, | |
408 | index: I, | |
409 | file_name: &Path, // only used for error reporting | |
410 | status: &mut GarbageCollectionStatus, | |
99641a6b | 411 | worker: &WorkerTask, |
a660978c DM |
412 | ) -> Result<(), Error> { |
413 | ||
414 | status.index_file_count += 1; | |
415 | status.index_data_bytes += index.index_bytes(); | |
416 | ||
417 | for pos in 0..index.index_count() { | |
99641a6b | 418 | worker.fail_on_abort()?; |
a660978c DM |
419 | tools::fail_on_shutdown()?; |
420 | let digest = index.index_digest(pos).unwrap(); | |
421 | if let Err(err) = self.chunk_store.touch_chunk(digest) { | |
2f57a433 OB |
422 | worker.warn(&format!("warning: unable to access chunk {}, required by {:?} - {}", |
423 | proxmox::tools::digest_to_hex(digest), file_name, err)); | |
a660978c DM |
424 | } |
425 | } | |
426 | Ok(()) | |
427 | } | |
428 | ||
99641a6b | 429 | fn mark_used_chunks(&self, status: &mut GarbageCollectionStatus, worker: &WorkerTask) -> Result<(), Error> { |
3d5c11e5 DM |
430 | |
431 | let image_list = self.list_images()?; | |
432 | ||
8317873c DM |
433 | let image_count = image_list.len(); |
434 | ||
435 | let mut done = 0; | |
436 | ||
437 | let mut last_percentage: usize = 0; | |
438 | ||
3d5c11e5 | 439 | for path in image_list { |
92da93b2 | 440 | |
99641a6b | 441 | worker.fail_on_abort()?; |
92da93b2 DM |
442 | tools::fail_on_shutdown()?; |
443 | ||
1e8da0a7 DM |
444 | if let Ok(archive_type) = archive_type(&path) { |
445 | if archive_type == ArchiveType::FixedIndex { | |
91a905b6 | 446 | let index = self.open_fixed_reader(&path)?; |
99641a6b | 447 | self.index_mark_used_chunks(index, &path, status, worker)?; |
1e8da0a7 | 448 | } else if archive_type == ArchiveType::DynamicIndex { |
93d5d779 | 449 | let index = self.open_dynamic_reader(&path)?; |
99641a6b | 450 | self.index_mark_used_chunks(index, &path, status, worker)?; |
77703d95 DM |
451 | } |
452 | } | |
8317873c DM |
453 | done += 1; |
454 | ||
455 | let percentage = done*100/image_count; | |
456 | if percentage > last_percentage { | |
457 | worker.log(format!("percentage done: phase1 {}% ({} of {} index files)", | |
458 | percentage, done, image_count)); | |
459 | last_percentage = percentage; | |
460 | } | |
3d5c11e5 DM |
461 | } |
462 | ||
463 | Ok(()) | |
f2b99c34 DM |
464 | } |
465 | ||
466 | pub fn last_gc_status(&self) -> GarbageCollectionStatus { | |
467 | self.last_gc_status.lock().unwrap().clone() | |
468 | } | |
3d5c11e5 | 469 | |
8545480a DM |
470 | pub fn garbage_collection_running(&self) -> bool { |
471 | if let Ok(_) = self.gc_mutex.try_lock() { false } else { true } | |
472 | } | |
473 | ||
99641a6b | 474 | pub fn garbage_collection(&self, worker: &WorkerTask) -> Result<(), Error> { |
3d5c11e5 | 475 | |
a198d74f | 476 | if let Ok(ref mut _mutex) = self.gc_mutex.try_lock() { |
e95950e4 | 477 | |
43b13033 DM |
478 | let _exclusive_lock = self.chunk_store.try_exclusive_lock()?; |
479 | ||
49a92084 TL |
480 | let phase1_start_time = unsafe { libc::time(std::ptr::null_mut()) }; |
481 | let oldest_writer = self.chunk_store.oldest_writer().unwrap_or(phase1_start_time); | |
11861a48 | 482 | |
64e53b28 | 483 | let mut gc_status = GarbageCollectionStatus::default(); |
f2b99c34 | 484 | gc_status.upid = Some(worker.to_string()); |
6ea3a0b7 | 485 | |
73e57f24 | 486 | worker.log("Start GC phase1 (mark used chunks)"); |
64e53b28 | 487 | |
99641a6b | 488 | self.mark_used_chunks(&mut gc_status, &worker)?; |
64e53b28 | 489 | |
d4b59ae0 | 490 | worker.log("Start GC phase2 (sweep unused chunks)"); |
49a92084 | 491 | self.chunk_store.sweep_unused_chunks(oldest_writer, phase1_start_time, &mut gc_status, &worker)?; |
64e53b28 | 492 | |
49a92084 | 493 | worker.log(&format!("Removed garbage: {}", HumanByte::from(gc_status.removed_bytes))); |
a660978c | 494 | worker.log(&format!("Removed chunks: {}", gc_status.removed_chunks)); |
cf459b19 | 495 | if gc_status.pending_bytes > 0 { |
49a92084 | 496 | worker.log(&format!("Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks)); |
cf459b19 DM |
497 | } |
498 | ||
49a92084 | 499 | worker.log(&format!("Original data usage: {}", HumanByte::from(gc_status.index_data_bytes))); |
868c5852 DM |
500 | |
501 | if gc_status.index_data_bytes > 0 { | |
49a92084 TL |
502 | let comp_per = (gc_status.disk_bytes as f64 * 100.)/gc_status.index_data_bytes as f64; |
503 | worker.log(&format!("On-Disk usage: {} ({:.2}%)", HumanByte::from(gc_status.disk_bytes), comp_per)); | |
868c5852 DM |
504 | } |
505 | ||
49a92084 | 506 | worker.log(&format!("On-Disk chunks: {}", gc_status.disk_chunks)); |
868c5852 DM |
507 | |
508 | if gc_status.disk_chunks > 0 { | |
509 | let avg_chunk = gc_status.disk_bytes/(gc_status.disk_chunks as u64); | |
49a92084 | 510 | worker.log(&format!("Average chunk size: {}", HumanByte::from(avg_chunk))); |
868c5852 | 511 | } |
64e53b28 | 512 | |
f2b99c34 DM |
513 | *self.last_gc_status.lock().unwrap() = gc_status; |
514 | ||
64e53b28 | 515 | } else { |
d4b59ae0 | 516 | bail!("Start GC failed - (already running/locked)"); |
64e53b28 | 517 | } |
3d5c11e5 DM |
518 | |
519 | Ok(()) | |
520 | } | |
3b7ade9e | 521 | |
1cf5178a DM |
522 | pub fn try_shared_chunk_store_lock(&self) -> Result<tools::ProcessLockSharedGuard, Error> { |
523 | self.chunk_store.try_shared_lock() | |
524 | } | |
525 | ||
d48a9955 DM |
526 | pub fn chunk_path(&self, digest:&[u8; 32]) -> (PathBuf, String) { |
527 | self.chunk_store.chunk_path(digest) | |
528 | } | |
529 | ||
2585a8a4 DM |
530 | pub fn cond_touch_chunk(&self, digest: &[u8; 32], fail_if_not_exist: bool) -> Result<bool, Error> { |
531 | self.chunk_store.cond_touch_chunk(digest, fail_if_not_exist) | |
532 | } | |
533 | ||
f98ac774 | 534 | pub fn insert_chunk( |
3b7ade9e | 535 | &self, |
4ee8f53d DM |
536 | chunk: &DataBlob, |
537 | digest: &[u8; 32], | |
3b7ade9e | 538 | ) -> Result<(bool, u64), Error> { |
4ee8f53d | 539 | self.chunk_store.insert_chunk(chunk, digest) |
3b7ade9e | 540 | } |
60f9a6ea | 541 | |
39f18b30 | 542 | pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> { |
60f9a6ea DM |
543 | let mut path = self.base_path(); |
544 | path.push(backup_dir.relative_path()); | |
545 | path.push(filename); | |
546 | ||
39f18b30 DM |
547 | proxmox::try_block!({ |
548 | let mut file = std::fs::File::open(&path)?; | |
549 | DataBlob::load_from_reader(&mut file) | |
550 | }).map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err)) | |
551 | } | |
e4439025 DM |
552 | |
553 | ||
39f18b30 DM |
554 | pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> { |
555 | ||
556 | let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest); | |
557 | ||
558 | proxmox::try_block!({ | |
559 | let mut file = std::fs::File::open(&chunk_path)?; | |
560 | DataBlob::load_from_reader(&mut file) | |
561 | }).map_err(|err| format_err!( | |
562 | "store '{}', unable to load chunk '{}' - {}", | |
563 | self.name(), | |
564 | digest_str, | |
565 | err, | |
566 | )) | |
567 | } | |
e4439025 | 568 | |
521a0acb WB |
569 | pub fn load_manifest( |
570 | &self, | |
571 | backup_dir: &BackupDir, | |
ff86ef00 | 572 | ) -> Result<(BackupManifest, u64), Error> { |
39f18b30 DM |
573 | let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?; |
574 | let raw_size = blob.raw_size(); | |
60f9a6ea | 575 | let manifest = BackupManifest::try_from(blob)?; |
ff86ef00 | 576 | Ok((manifest, raw_size)) |
60f9a6ea | 577 | } |
e4439025 DM |
578 | |
579 | pub fn load_manifest_json( | |
580 | &self, | |
581 | backup_dir: &BackupDir, | |
582 | ) -> Result<Value, Error> { | |
583 | let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?; | |
8819d1f2 FG |
584 | // no expected digest available |
585 | let manifest_data = blob.decode(None, None)?; | |
e4439025 DM |
586 | let manifest: Value = serde_json::from_slice(&manifest_data[..])?; |
587 | Ok(manifest) | |
588 | } | |
589 | ||
590 | pub fn store_manifest( | |
591 | &self, | |
592 | backup_dir: &BackupDir, | |
593 | manifest: Value, | |
594 | ) -> Result<(), Error> { | |
595 | let manifest = serde_json::to_string_pretty(&manifest)?; | |
596 | let blob = DataBlob::encode(manifest.as_bytes(), None, true)?; | |
597 | let raw_data = blob.raw_data(); | |
598 | ||
599 | let mut path = self.base_path(); | |
600 | path.push(backup_dir.relative_path()); | |
601 | path.push(MANIFEST_BLOB_NAME); | |
602 | ||
603 | replace_file(&path, raw_data, CreateOptions::new())?; | |
604 | ||
605 | Ok(()) | |
606 | } | |
529de6c7 | 607 | } |