1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::io
::{self, Write}
;
3 use std
::path
::{Path, PathBuf}
;
4 use std
::sync
::{Arc, Mutex}
;
5 use std
::convert
::TryFrom
;
7 use std
::time
::Duration
;
9 use anyhow
::{bail, format_err, Error}
;
10 use lazy_static
::lazy_static
;
12 use proxmox_sys
::fs
::{replace_file, file_read_optional_string, CreateOptions}
;
13 use proxmox_sys
::process_locker
::ProcessLockSharedGuard
;
14 use proxmox_sys
::WorkerTaskContext
;
15 use proxmox_sys
::{task_log, task_warn}
;
16 use proxmox_sys
::fs
::{lock_dir_noblock, DirLockGuard}
;
18 use pbs_api_types
::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus, HumanByte}
;
19 use pbs_config
::{open_backup_lockfile, BackupLockGuard}
;
22 use crate::backup_info
::{BackupGroup, BackupDir}
;
23 use crate::chunk_store
::ChunkStore
;
24 use crate::dynamic_index
::{DynamicIndexReader, DynamicIndexWriter}
;
25 use crate::fixed_index
::{FixedIndexReader, FixedIndexWriter}
;
26 use crate::index
::IndexFile
;
27 use crate::manifest
::{
28 MANIFEST_BLOB_NAME
, MANIFEST_LOCK_NAME
, CLIENT_LOG_BLOB_NAME
,
29 ArchiveType
, BackupManifest
,
34 static ref DATASTORE_MAP
: Mutex
<HashMap
<String
, Arc
<DataStore
>>> = Mutex
::new(HashMap
::new());
37 /// checks if auth_id is owner, or, if owner is a token, if
38 /// auth_id is the user of the token
39 pub fn check_backup_owner(
42 ) -> Result
<(), Error
> {
43 let correct_owner
= owner
== auth_id
44 || (owner
.is_token() && &Authid
::from(owner
.user().clone()) == auth_id
);
46 bail
!("backup owner check failed ({} != {})", auth_id
, owner
);
51 /// Datastore Management
53 /// A Datastore can store severals backups, and provides the
54 /// management interface for backup.
55 pub struct DataStore
{
56 chunk_store
: Arc
<ChunkStore
>,
58 last_gc_status
: Mutex
<GarbageCollectionStatus
>,
64 pub fn lookup_datastore(name
: &str) -> Result
<Arc
<DataStore
>, Error
> {
66 let (config
, _digest
) = pbs_config
::datastore
::config()?
;
67 let config
: DataStoreConfig
= config
.lookup("datastore", name
)?
;
68 let path
= PathBuf
::from(&config
.path
);
70 let mut map
= DATASTORE_MAP
.lock().unwrap();
72 if let Some(datastore
) = map
.get(name
) {
73 // Compare Config - if changed, create new Datastore object!
74 if datastore
.chunk_store
.base() == path
&&
75 datastore
.verify_new
== config
.verify_new
.unwrap_or(false)
77 return Ok(datastore
.clone());
81 let datastore
= DataStore
::open_with_path(name
, &path
, config
)?
;
83 let datastore
= Arc
::new(datastore
);
84 map
.insert(name
.to_string(), datastore
.clone());
89 /// removes all datastores that are not configured anymore
90 pub fn remove_unused_datastores() -> Result
<(), Error
>{
91 let (config
, _digest
) = pbs_config
::datastore
::config()?
;
93 let mut map
= DATASTORE_MAP
.lock().unwrap();
94 // removes all elements that are not in the config
96 config
.sections
.contains_key(key
)
101 fn open_with_path(store_name
: &str, path
: &Path
, config
: DataStoreConfig
) -> Result
<Self, Error
> {
102 let chunk_store
= ChunkStore
::open(store_name
, path
)?
;
104 let mut gc_status_path
= chunk_store
.base_path();
105 gc_status_path
.push(".gc-status");
107 let gc_status
= if let Some(state
) = file_read_optional_string(gc_status_path
)?
{
108 match serde_json
::from_str(&state
) {
111 eprintln
!("error reading gc-status: {}", err
);
112 GarbageCollectionStatus
::default()
116 GarbageCollectionStatus
::default()
120 chunk_store
: Arc
::new(chunk_store
),
121 gc_mutex
: Mutex
::new(()),
122 last_gc_status
: Mutex
::new(gc_status
),
123 verify_new
: config
.verify_new
.unwrap_or(false),
127 pub fn get_chunk_iterator(
130 impl Iterator
<Item
= (Result
<proxmox_sys
::fs
::ReadDirEntry
, Error
>, usize, bool
)>,
133 self.chunk_store
.get_chunk_iterator()
136 pub fn create_fixed_writer
<P
: AsRef
<Path
>>(&self, filename
: P
, size
: usize, chunk_size
: usize) -> Result
<FixedIndexWriter
, Error
> {
138 let index
= FixedIndexWriter
::create(self.chunk_store
.clone(), filename
.as_ref(), size
, chunk_size
)?
;
143 pub fn open_fixed_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<FixedIndexReader
, Error
> {
145 let full_path
= self.chunk_store
.relative_path(filename
.as_ref());
147 let index
= FixedIndexReader
::open(&full_path
)?
;
152 pub fn create_dynamic_writer
<P
: AsRef
<Path
>>(
154 ) -> Result
<DynamicIndexWriter
, Error
> {
156 let index
= DynamicIndexWriter
::create(
157 self.chunk_store
.clone(), filename
.as_ref())?
;
162 pub fn open_dynamic_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<DynamicIndexReader
, Error
> {
164 let full_path
= self.chunk_store
.relative_path(filename
.as_ref());
166 let index
= DynamicIndexReader
::open(&full_path
)?
;
171 pub fn open_index
<P
>(&self, filename
: P
) -> Result
<Box
<dyn IndexFile
+ Send
>, Error
>
175 let filename
= filename
.as_ref();
176 let out
: Box
<dyn IndexFile
+ Send
> =
177 match archive_type(filename
)?
{
178 ArchiveType
::DynamicIndex
=> Box
::new(self.open_dynamic_reader(filename
)?
),
179 ArchiveType
::FixedIndex
=> Box
::new(self.open_fixed_reader(filename
)?
),
180 _
=> bail
!("cannot open index file of unknown type: {:?}", filename
),
185 /// Fast index verification - only check if chunks exists
186 pub fn fast_index_verification(
188 index
: &dyn IndexFile
,
189 checked
: &mut HashSet
<[u8;32]>,
190 ) -> Result
<(), Error
> {
192 for pos
in 0..index
.index_count() {
193 let info
= index
.chunk_info(pos
).unwrap();
194 if checked
.contains(&info
.digest
) {
198 self.stat_chunk(&info
.digest
).
201 "fast_index_verification error, stat_chunk {} failed - {}",
202 hex
::encode(&info
.digest
),
207 checked
.insert(info
.digest
);
213 pub fn name(&self) -> &str {
214 self.chunk_store
.name()
217 pub fn base_path(&self) -> PathBuf
{
218 self.chunk_store
.base_path()
221 /// Cleanup a backup directory
223 /// Removes all files not mentioned in the manifest.
224 pub fn cleanup_backup_dir(&self, backup_dir
: &BackupDir
, manifest
: &BackupManifest
225 ) -> Result
<(), Error
> {
227 let mut full_path
= self.base_path();
228 full_path
.push(backup_dir
.relative_path());
230 let mut wanted_files
= HashSet
::new();
231 wanted_files
.insert(MANIFEST_BLOB_NAME
.to_string());
232 wanted_files
.insert(CLIENT_LOG_BLOB_NAME
.to_string());
233 manifest
.files().iter().for_each(|item
| { wanted_files.insert(item.filename.clone()); }
);
235 for item
in proxmox_sys
::fs
::read_subdir(libc
::AT_FDCWD
, &full_path
)?
{
236 if let Ok(item
) = item
{
237 if let Some(file_type
) = item
.file_type() {
238 if file_type
!= nix
::dir
::Type
::File { continue; }
240 let file_name
= item
.file_name().to_bytes();
241 if file_name
== b
"." || file_name
== b
".." { continue; }
;
243 if let Ok(name
) = std
::str::from_utf8(file_name
) {
244 if wanted_files
.contains(name
) { continue; }
246 println
!("remove unused file {:?}", item
.file_name());
247 let dirfd
= item
.parent_fd();
248 let _res
= unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) }
;
255 /// Returns the absolute path for a backup_group
256 pub fn group_path(&self, backup_group
: &BackupGroup
) -> PathBuf
{
257 let mut full_path
= self.base_path();
258 full_path
.push(backup_group
.group_path());
262 /// Returns the absolute path for backup_dir
263 pub fn snapshot_path(&self, backup_dir
: &BackupDir
) -> PathBuf
{
264 let mut full_path
= self.base_path();
265 full_path
.push(backup_dir
.relative_path());
269 /// Remove a complete backup group including all snapshots, returns true
270 /// if all snapshots were removed, and false if some were protected
271 pub fn remove_backup_group(&self, backup_group
: &BackupGroup
) -> Result
<bool
, Error
> {
273 let full_path
= self.group_path(backup_group
);
275 let _guard
= proxmox_sys
::fs
::lock_dir_noblock(&full_path
, "backup group", "possible running backup")?
;
277 log
::info
!("removing backup group {:?}", full_path
);
279 let mut removed_all
= true;
281 // remove all individual backup dirs first to ensure nothing is using them
282 for snap
in backup_group
.list_backups(&self.base_path())?
{
283 if snap
.backup_dir
.is_protected(self.base_path()) {
287 self.remove_backup_dir(&snap
.backup_dir
, false)?
;
291 // no snapshots left, we can now safely remove the empty folder
292 std
::fs
::remove_dir_all(&full_path
)
295 "removing backup group directory {:?} failed - {}",
305 /// Remove a backup directory including all content
306 pub fn remove_backup_dir(&self, backup_dir
: &BackupDir
, force
: bool
) -> Result
<(), Error
> {
308 let full_path
= self.snapshot_path(backup_dir
);
310 let (_guard
, _manifest_guard
);
312 _guard
= lock_dir_noblock(&full_path
, "snapshot", "possibly running or in use")?
;
313 _manifest_guard
= self.lock_manifest(backup_dir
)?
;
316 if backup_dir
.is_protected(self.base_path()) {
317 bail
!("cannot remove protected snapshot");
320 log
::info
!("removing backup snapshot {:?}", full_path
);
321 std
::fs
::remove_dir_all(&full_path
)
324 "removing backup snapshot {:?} failed - {}",
330 // the manifest does not exists anymore, we do not need to keep the lock
331 if let Ok(path
) = self.manifest_lock_path(backup_dir
) {
333 let _
= std
::fs
::remove_file(path
);
339 /// Returns the time of the last successful backup
341 /// Or None if there is no backup in the group (or the group dir does not exist).
342 pub fn last_successful_backup(&self, backup_group
: &BackupGroup
) -> Result
<Option
<i64>, Error
> {
343 let base_path
= self.base_path();
344 let mut group_path
= base_path
.clone();
345 group_path
.push(backup_group
.group_path());
347 if group_path
.exists() {
348 backup_group
.last_successful_backup(&base_path
)
354 /// Returns the backup owner.
356 /// The backup owner is the entity who first created the backup group.
357 pub fn get_owner(&self, backup_group
: &BackupGroup
) -> Result
<Authid
, Error
> {
358 let mut full_path
= self.base_path();
359 full_path
.push(backup_group
.group_path());
360 full_path
.push("owner");
361 let owner
= proxmox_sys
::fs
::file_read_firstline(full_path
)?
;
362 Ok(owner
.trim_end().parse()?
) // remove trailing newline
365 pub fn owns_backup(&self, backup_group
: &BackupGroup
, auth_id
: &Authid
) -> Result
<bool
, Error
> {
366 let owner
= self.get_owner(backup_group
)?
;
368 Ok(check_backup_owner(&owner
, auth_id
).is_ok())
371 /// Set the backup owner.
374 backup_group
: &BackupGroup
,
377 ) -> Result
<(), Error
> {
378 let mut path
= self.base_path();
379 path
.push(backup_group
.group_path());
382 let mut open_options
= std
::fs
::OpenOptions
::new();
383 open_options
.write(true);
384 open_options
.truncate(true);
387 open_options
.create(true);
389 open_options
.create_new(true);
392 let mut file
= open_options
.open(&path
)
393 .map_err(|err
| format_err
!("unable to create owner file {:?} - {}", path
, err
))?
;
395 writeln
!(file
, "{}", auth_id
)
396 .map_err(|err
| format_err
!("unable to write owner file {:?} - {}", path
, err
))?
;
401 /// Create (if it does not already exists) and lock a backup group
403 /// And set the owner to 'userid'. If the group already exists, it returns the
404 /// current owner (instead of setting the owner).
406 /// This also acquires an exclusive lock on the directory and returns the lock guard.
407 pub fn create_locked_backup_group(
409 backup_group
: &BackupGroup
,
411 ) -> Result
<(Authid
, DirLockGuard
), Error
> {
412 // create intermediate path first:
413 let mut full_path
= self.base_path();
414 full_path
.push(backup_group
.backup_type());
415 std
::fs
::create_dir_all(&full_path
)?
;
417 full_path
.push(backup_group
.backup_id());
419 // create the last component now
420 match std
::fs
::create_dir(&full_path
) {
422 let guard
= lock_dir_noblock(&full_path
, "backup group", "another backup is already running")?
;
423 self.set_owner(backup_group
, auth_id
, false)?
;
424 let owner
= self.get_owner(backup_group
)?
; // just to be sure
427 Err(ref err
) if err
.kind() == io
::ErrorKind
::AlreadyExists
=> {
428 let guard
= lock_dir_noblock(&full_path
, "backup group", "another backup is already running")?
;
429 let owner
= self.get_owner(backup_group
)?
; // just to be sure
432 Err(err
) => bail
!("unable to create backup group {:?} - {}", full_path
, err
),
436 /// Creates a new backup snapshot inside a BackupGroup
438 /// The BackupGroup directory needs to exist.
439 pub fn create_locked_backup_dir(&self, backup_dir
: &BackupDir
)
440 -> Result
<(PathBuf
, bool
, DirLockGuard
), Error
>
442 let relative_path
= backup_dir
.relative_path();
443 let mut full_path
= self.base_path();
444 full_path
.push(&relative_path
);
447 lock_dir_noblock(&full_path
, "snapshot", "internal error - tried creating snapshot that's already in use");
449 match std
::fs
::create_dir(&full_path
) {
450 Ok(_
) => Ok((relative_path
, true, lock()?
)),
451 Err(ref e
) if e
.kind() == io
::ErrorKind
::AlreadyExists
=> Ok((relative_path
, false, lock()?
)),
452 Err(e
) => Err(e
.into())
456 pub fn list_images(&self) -> Result
<Vec
<PathBuf
>, Error
> {
457 let base
= self.base_path();
459 let mut list
= vec
![];
461 use walkdir
::WalkDir
;
463 let walker
= WalkDir
::new(&base
).into_iter();
465 // make sure we skip .chunks (and other hidden files to keep it simple)
466 fn is_hidden(entry
: &walkdir
::DirEntry
) -> bool
{
469 .map(|s
| s
.starts_with('
.'
))
472 let handle_entry_err
= |err
: walkdir
::Error
| {
473 if let Some(inner
) = err
.io_error() {
474 if let Some(path
) = err
.path() {
475 if inner
.kind() == io
::ErrorKind
::PermissionDenied
{
476 // only allow to skip ext4 fsck directory, avoid GC if, for example,
477 // a user got file permissions wrong on datastore rsync to new server
478 if err
.depth() > 1 || !path
.ends_with("lost+found") {
479 bail
!("cannot continue garbage-collection safely, permission denied on: {:?}", path
)
482 bail
!("unexpected error on datastore traversal: {} - {:?}", inner
, path
)
485 bail
!("unexpected error on datastore traversal: {}", inner
)
490 for entry
in walker
.filter_entry(|e
| !is_hidden(e
)) {
491 let path
= match entry
{
492 Ok(entry
) => entry
.into_path(),
494 handle_entry_err(err
)?
;
498 if let Ok(archive_type
) = archive_type(&path
) {
499 if archive_type
== ArchiveType
::FixedIndex
|| archive_type
== ArchiveType
::DynamicIndex
{
508 // mark chunks used by ``index`` as used
509 fn index_mark_used_chunks
<I
: IndexFile
>(
512 file_name
: &Path
, // only used for error reporting
513 status
: &mut GarbageCollectionStatus
,
514 worker
: &dyn WorkerTaskContext
,
515 ) -> Result
<(), Error
> {
517 status
.index_file_count
+= 1;
518 status
.index_data_bytes
+= index
.index_bytes();
520 for pos
in 0..index
.index_count() {
521 worker
.check_abort()?
;
522 worker
.fail_on_shutdown()?
;
523 let digest
= index
.index_digest(pos
).unwrap();
524 if !self.chunk_store
.cond_touch_chunk(digest
, false)?
{
527 "warning: unable to access non-existent chunk {}, required by {:?}",
532 // touch any corresponding .bad files to keep them around, meaning if a chunk is
533 // rewritten correctly they will be removed automatically, as well as if no index
534 // file requires the chunk anymore (won't get to this loop then)
536 let bad_ext
= format
!("{}.bad", i
);
537 let mut bad_path
= PathBuf
::new();
538 bad_path
.push(self.chunk_path(digest
).0);
539 bad_path
.set_extension(bad_ext
);
540 self.chunk_store
.cond_touch_path(&bad_path
, false)?
;
549 status
: &mut GarbageCollectionStatus
,
550 worker
: &dyn WorkerTaskContext
,
551 ) -> Result
<(), Error
> {
553 let image_list
= self.list_images()?
;
554 let image_count
= image_list
.len();
556 let mut last_percentage
: usize = 0;
558 let mut strange_paths_count
: u64 = 0;
560 for (i
, img
) in image_list
.into_iter().enumerate() {
562 worker
.check_abort()?
;
563 worker
.fail_on_shutdown()?
;
565 if let Some(backup_dir_path
) = img
.parent() {
566 let backup_dir_path
= backup_dir_path
.strip_prefix(self.base_path())?
;
567 if let Some(backup_dir_str
) = backup_dir_path
.to_str() {
568 if BackupDir
::from_str(backup_dir_str
).is_err() {
569 strange_paths_count
+= 1;
574 match std
::fs
::File
::open(&img
) {
576 if let Ok(archive_type
) = archive_type(&img
) {
577 if archive_type
== ArchiveType
::FixedIndex
{
578 let index
= FixedIndexReader
::new(file
).map_err(|e
| {
579 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
581 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
582 } else if archive_type
== ArchiveType
::DynamicIndex
{
583 let index
= DynamicIndexReader
::new(file
).map_err(|e
| {
584 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
586 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
590 Err(err
) if err
.kind() == io
::ErrorKind
::NotFound
=> (), // ignore vanished files
591 Err(err
) => bail
!("can't open index {} - {}", img
.to_string_lossy(), err
),
594 let percentage
= (i
+ 1) * 100 / image_count
;
595 if percentage
> last_percentage
{
598 "marked {}% ({} of {} index files)",
603 last_percentage
= percentage
;
607 if strange_paths_count
> 0 {
610 "found (and marked) {} index files outside of expected directory scheme",
619 pub fn last_gc_status(&self) -> GarbageCollectionStatus
{
620 self.last_gc_status
.lock().unwrap().clone()
623 pub fn garbage_collection_running(&self) -> bool
{
624 !matches
!(self.gc_mutex
.try_lock(), Ok(_
))
627 pub fn garbage_collection(&self, worker
: &dyn WorkerTaskContext
, upid
: &UPID
) -> Result
<(), Error
> {
629 if let Ok(ref mut _mutex
) = self.gc_mutex
.try_lock() {
631 // avoids that we run GC if an old daemon process has still a
632 // running backup writer, which is not save as we have no "oldest
633 // writer" information and thus no safe atime cutoff
634 let _exclusive_lock
= self.chunk_store
.try_exclusive_lock()?
;
636 let phase1_start_time
= proxmox_time
::epoch_i64();
637 let oldest_writer
= self.chunk_store
.oldest_writer().unwrap_or(phase1_start_time
);
639 let mut gc_status
= GarbageCollectionStatus
::default();
640 gc_status
.upid
= Some(upid
.to_string());
642 task_log
!(worker
, "Start GC phase1 (mark used chunks)");
644 self.mark_used_chunks(&mut gc_status
, worker
)?
;
646 task_log
!(worker
, "Start GC phase2 (sweep unused chunks)");
647 self.chunk_store
.sweep_unused_chunks(
656 "Removed garbage: {}",
657 HumanByte
::from(gc_status
.removed_bytes
),
659 task_log
!(worker
, "Removed chunks: {}", gc_status
.removed_chunks
);
660 if gc_status
.pending_bytes
> 0 {
663 "Pending removals: {} (in {} chunks)",
664 HumanByte
::from(gc_status
.pending_bytes
),
665 gc_status
.pending_chunks
,
668 if gc_status
.removed_bad
> 0 {
669 task_log
!(worker
, "Removed bad chunks: {}", gc_status
.removed_bad
);
672 if gc_status
.still_bad
> 0 {
673 task_log
!(worker
, "Leftover bad chunks: {}", gc_status
.still_bad
);
678 "Original data usage: {}",
679 HumanByte
::from(gc_status
.index_data_bytes
),
682 if gc_status
.index_data_bytes
> 0 {
683 let comp_per
= (gc_status
.disk_bytes
as f64 * 100.)/gc_status
.index_data_bytes
as f64;
686 "On-Disk usage: {} ({:.2}%)",
687 HumanByte
::from(gc_status
.disk_bytes
),
692 task_log
!(worker
, "On-Disk chunks: {}", gc_status
.disk_chunks
);
694 let deduplication_factor
= if gc_status
.disk_bytes
> 0 {
695 (gc_status
.index_data_bytes
as f64)/(gc_status
.disk_bytes
as f64)
700 task_log
!(worker
, "Deduplication factor: {:.2}", deduplication_factor
);
702 if gc_status
.disk_chunks
> 0 {
703 let avg_chunk
= gc_status
.disk_bytes
/(gc_status
.disk_chunks
as u64);
704 task_log
!(worker
, "Average chunk size: {}", HumanByte
::from(avg_chunk
));
707 if let Ok(serialized
) = serde_json
::to_string(&gc_status
) {
708 let mut path
= self.base_path();
709 path
.push(".gc-status");
711 let backup_user
= pbs_config
::backup_user()?
;
712 let mode
= nix
::sys
::stat
::Mode
::from_bits_truncate(0o0644);
713 // set the correct owner/group/permissions while saving file
714 // owner(rw) = backup, group(r)= backup
715 let options
= CreateOptions
::new()
717 .owner(backup_user
.uid
)
718 .group(backup_user
.gid
);
721 let _
= replace_file(path
, serialized
.as_bytes(), options
, false);
724 *self.last_gc_status
.lock().unwrap() = gc_status
;
727 bail
!("Start GC failed - (already running/locked)");
733 pub fn try_shared_chunk_store_lock(&self) -> Result
<ProcessLockSharedGuard
, Error
> {
734 self.chunk_store
.try_shared_lock()
737 pub fn chunk_path(&self, digest
:&[u8; 32]) -> (PathBuf
, String
) {
738 self.chunk_store
.chunk_path(digest
)
741 pub fn cond_touch_chunk(&self, digest
: &[u8; 32], fail_if_not_exist
: bool
) -> Result
<bool
, Error
> {
742 self.chunk_store
.cond_touch_chunk(digest
, fail_if_not_exist
)
749 ) -> Result
<(bool
, u64), Error
> {
750 self.chunk_store
.insert_chunk(chunk
, digest
)
753 pub fn load_blob(&self, backup_dir
: &BackupDir
, filename
: &str) -> Result
<DataBlob
, Error
> {
754 let mut path
= self.base_path();
755 path
.push(backup_dir
.relative_path());
758 proxmox_lang
::try_block
!({
759 let mut file
= std
::fs
::File
::open(&path
)?
;
760 DataBlob
::load_from_reader(&mut file
)
761 }).map_err(|err
| format_err
!("unable to load blob '{:?}' - {}", path
, err
))
765 pub fn stat_chunk(&self, digest
: &[u8; 32]) -> Result
<std
::fs
::Metadata
, Error
> {
766 let (chunk_path
, _digest_str
) = self.chunk_store
.chunk_path(digest
);
767 std
::fs
::metadata(chunk_path
).map_err(Error
::from
)
770 pub fn load_chunk(&self, digest
: &[u8; 32]) -> Result
<DataBlob
, Error
> {
772 let (chunk_path
, digest_str
) = self.chunk_store
.chunk_path(digest
);
774 proxmox_lang
::try_block
!({
775 let mut file
= std
::fs
::File
::open(&chunk_path
)?
;
776 DataBlob
::load_from_reader(&mut file
)
777 }).map_err(|err
| format_err
!(
778 "store '{}', unable to load chunk '{}' - {}",
785 /// Returns the filename to lock a manifest
787 /// Also creates the basedir. The lockfile is located in
788 /// '/run/proxmox-backup/locks/{datastore}/{type}/{id}/{timestamp}.index.json.lck'
789 fn manifest_lock_path(
791 backup_dir
: &BackupDir
,
792 ) -> Result
<String
, Error
> {
793 let mut path
= format
!(
794 "/run/proxmox-backup/locks/{}/{}/{}",
796 backup_dir
.group().backup_type(),
797 backup_dir
.group().backup_id(),
799 std
::fs
::create_dir_all(&path
)?
;
801 write
!(path
, "/{}{}", backup_dir
.backup_time_string(), &MANIFEST_LOCK_NAME
)?
;
808 backup_dir
: &BackupDir
,
809 ) -> Result
<BackupLockGuard
, Error
> {
810 let path
= self.manifest_lock_path(backup_dir
)?
;
812 // update_manifest should never take a long time, so if someone else has
813 // the lock we can simply block a bit and should get it soon
814 open_backup_lockfile(&path
, Some(Duration
::from_secs(5)), true)
817 "unable to acquire manifest lock {:?} - {}", &path
, err
822 /// Load the manifest without a lock. Must not be written back.
823 pub fn load_manifest(
825 backup_dir
: &BackupDir
,
826 ) -> Result
<(BackupManifest
, u64), Error
> {
827 let blob
= self.load_blob(backup_dir
, MANIFEST_BLOB_NAME
)?
;
828 let raw_size
= blob
.raw_size();
829 let manifest
= BackupManifest
::try_from(blob
)?
;
830 Ok((manifest
, raw_size
))
833 /// Update the manifest of the specified snapshot. Never write a manifest directly,
834 /// only use this method - anything else may break locking guarantees.
835 pub fn update_manifest(
837 backup_dir
: &BackupDir
,
838 update_fn
: impl FnOnce(&mut BackupManifest
),
839 ) -> Result
<(), Error
> {
841 let _guard
= self.lock_manifest(backup_dir
)?
;
842 let (mut manifest
, _
) = self.load_manifest(backup_dir
)?
;
844 update_fn(&mut manifest
);
846 let manifest
= serde_json
::to_value(manifest
)?
;
847 let manifest
= serde_json
::to_string_pretty(&manifest
)?
;
848 let blob
= DataBlob
::encode(manifest
.as_bytes(), None
, true)?
;
849 let raw_data
= blob
.raw_data();
851 let mut path
= self.base_path();
852 path
.push(backup_dir
.relative_path());
853 path
.push(MANIFEST_BLOB_NAME
);
855 // atomic replace invalidates flock - no other writes past this point!
856 replace_file(&path
, raw_data
, CreateOptions
::new(), false)?
;
861 /// Updates the protection status of the specified snapshot.
862 pub fn update_protection(
864 backup_dir
: &BackupDir
,
866 ) -> Result
<(), Error
> {
867 let full_path
= self.snapshot_path(backup_dir
);
869 let _guard
= lock_dir_noblock(&full_path
, "snapshot", "possibly running or in use")?
;
871 let protected_path
= backup_dir
.protected_file(self.base_path());
873 std
::fs
::File
::create(protected_path
)
874 .map_err(|err
| format_err
!("could not create protection file: {}", err
))?
;
875 } else if let Err(err
) = std
::fs
::remove_file(protected_path
) {
876 // ignore error for non-existing file
877 if err
.kind() != std
::io
::ErrorKind
::NotFound
{
878 bail
!("could not remove protection file: {}", err
);
885 pub fn verify_new(&self) -> bool
{
889 /// returns a list of chunks sorted by their inode number on disk
890 /// chunks that could not be stat'ed are at the end of the list
891 pub fn get_chunks_in_order
<F
, A
>(
893 index
: &Box
<dyn IndexFile
+ Send
>,
896 ) -> Result
<Vec
<(usize, u64)>, Error
>
898 F
: Fn(&[u8; 32]) -> bool
,
899 A
: Fn(usize) -> Result
<(), Error
>,
901 let index_count
= index
.index_count();
902 let mut chunk_list
= Vec
::with_capacity(index_count
);
903 use std
::os
::unix
::fs
::MetadataExt
;
904 for pos
in 0..index_count
{
907 let info
= index
.chunk_info(pos
).unwrap();
909 if skip_chunk(&info
.digest
) {
913 let ino
= match self.stat_chunk(&info
.digest
) {
914 Err(_
) => u64::MAX
, // could not stat, move to end of list
915 Ok(metadata
) => metadata
.ino(),
918 chunk_list
.push((pos
, ino
));
921 // sorting by inode improves data locality, which makes it lots faster on spinners
922 chunk_list
.sort_unstable_by(|(_
, ino_a
), (_
, ino_b
)| ino_a
.cmp(ino_b
));