1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::io
::{self, Write}
;
3 use std
::path
::{Path, PathBuf}
;
4 use std
::sync
::{Arc, Mutex}
;
5 use std
::convert
::TryFrom
;
7 use std
::time
::Duration
;
9 use anyhow
::{bail, format_err, Error}
;
10 use lazy_static
::lazy_static
;
12 use proxmox
::tools
::fs
::{replace_file, file_read_optional_string, CreateOptions}
;
14 use pbs_api_types
::{UPID, DataStoreConfig, Authid, GarbageCollectionStatus}
;
15 use pbs_datastore
::{task_log, task_warn}
;
16 use pbs_datastore
::DataBlob
;
17 use pbs_datastore
::backup_info
::{BackupGroup, BackupDir}
;
18 use pbs_datastore
::chunk_store
::ChunkStore
;
19 use pbs_datastore
::dynamic_index
::{DynamicIndexReader, DynamicIndexWriter}
;
20 use pbs_datastore
::fixed_index
::{FixedIndexReader, FixedIndexWriter}
;
21 use pbs_datastore
::index
::IndexFile
;
22 use pbs_datastore
::manifest
::{
23 MANIFEST_BLOB_NAME
, MANIFEST_LOCK_NAME
, CLIENT_LOG_BLOB_NAME
,
24 ArchiveType
, BackupManifest
,
27 use pbs_datastore
::task
::TaskState
;
28 use pbs_tools
::format
::HumanByte
;
29 use pbs_tools
::fs
::{lock_dir_noblock, DirLockGuard}
;
30 use pbs_tools
::process_locker
::ProcessLockSharedGuard
;
31 use pbs_config
::{open_backup_lockfile, BackupLockGuard}
;
33 use crate::tools
::fail_on_shutdown
;
36 static ref DATASTORE_MAP
: Mutex
<HashMap
<String
, Arc
<DataStore
>>> = Mutex
::new(HashMap
::new());
39 /// checks if auth_id is owner, or, if owner is a token, if
40 /// auth_id is the user of the token
41 pub fn check_backup_owner(
44 ) -> Result
<(), Error
> {
45 let correct_owner
= owner
== auth_id
46 || (owner
.is_token() && &Authid
::from(owner
.user().clone()) == auth_id
);
48 bail
!("backup owner check failed ({} != {})", auth_id
, owner
);
53 /// Datastore Management
55 /// A Datastore can store severals backups, and provides the
56 /// management interface for backup.
57 pub struct DataStore
{
58 chunk_store
: Arc
<ChunkStore
>,
60 last_gc_status
: Mutex
<GarbageCollectionStatus
>,
66 pub fn lookup_datastore(name
: &str) -> Result
<Arc
<DataStore
>, Error
> {
68 let (config
, _digest
) = pbs_config
::datastore
::config()?
;
69 let config
: DataStoreConfig
= config
.lookup("datastore", name
)?
;
70 let path
= PathBuf
::from(&config
.path
);
72 let mut map
= DATASTORE_MAP
.lock().unwrap();
74 if let Some(datastore
) = map
.get(name
) {
75 // Compare Config - if changed, create new Datastore object!
76 if datastore
.chunk_store
.base() == path
&&
77 datastore
.verify_new
== config
.verify_new
.unwrap_or(false)
79 return Ok(datastore
.clone());
83 let datastore
= DataStore
::open_with_path(name
, &path
, config
)?
;
85 let datastore
= Arc
::new(datastore
);
86 map
.insert(name
.to_string(), datastore
.clone());
91 /// removes all datastores that are not configured anymore
92 pub fn remove_unused_datastores() -> Result
<(), Error
>{
93 let (config
, _digest
) = pbs_config
::datastore
::config()?
;
95 let mut map
= DATASTORE_MAP
.lock().unwrap();
96 // removes all elements that are not in the config
98 config
.sections
.contains_key(key
)
103 fn open_with_path(store_name
: &str, path
: &Path
, config
: DataStoreConfig
) -> Result
<Self, Error
> {
104 let chunk_store
= ChunkStore
::open(store_name
, path
)?
;
106 let mut gc_status_path
= chunk_store
.base_path();
107 gc_status_path
.push(".gc-status");
109 let gc_status
= if let Some(state
) = file_read_optional_string(gc_status_path
)?
{
110 match serde_json
::from_str(&state
) {
113 eprintln
!("error reading gc-status: {}", err
);
114 GarbageCollectionStatus
::default()
118 GarbageCollectionStatus
::default()
122 chunk_store
: Arc
::new(chunk_store
),
123 gc_mutex
: Mutex
::new(()),
124 last_gc_status
: Mutex
::new(gc_status
),
125 verify_new
: config
.verify_new
.unwrap_or(false),
129 pub fn get_chunk_iterator(
132 impl Iterator
<Item
= (Result
<pbs_tools
::fs
::ReadDirEntry
, Error
>, usize, bool
)>,
135 self.chunk_store
.get_chunk_iterator()
138 pub fn create_fixed_writer
<P
: AsRef
<Path
>>(&self, filename
: P
, size
: usize, chunk_size
: usize) -> Result
<FixedIndexWriter
, Error
> {
140 let index
= FixedIndexWriter
::create(self.chunk_store
.clone(), filename
.as_ref(), size
, chunk_size
)?
;
145 pub fn open_fixed_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<FixedIndexReader
, Error
> {
147 let full_path
= self.chunk_store
.relative_path(filename
.as_ref());
149 let index
= FixedIndexReader
::open(&full_path
)?
;
154 pub fn create_dynamic_writer
<P
: AsRef
<Path
>>(
156 ) -> Result
<DynamicIndexWriter
, Error
> {
158 let index
= DynamicIndexWriter
::create(
159 self.chunk_store
.clone(), filename
.as_ref())?
;
164 pub fn open_dynamic_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<DynamicIndexReader
, Error
> {
166 let full_path
= self.chunk_store
.relative_path(filename
.as_ref());
168 let index
= DynamicIndexReader
::open(&full_path
)?
;
173 pub fn open_index
<P
>(&self, filename
: P
) -> Result
<Box
<dyn IndexFile
+ Send
>, Error
>
177 let filename
= filename
.as_ref();
178 let out
: Box
<dyn IndexFile
+ Send
> =
179 match archive_type(filename
)?
{
180 ArchiveType
::DynamicIndex
=> Box
::new(self.open_dynamic_reader(filename
)?
),
181 ArchiveType
::FixedIndex
=> Box
::new(self.open_fixed_reader(filename
)?
),
182 _
=> bail
!("cannot open index file of unknown type: {:?}", filename
),
187 /// Fast index verification - only check if chunks exists
188 pub fn fast_index_verification(
190 index
: &dyn IndexFile
,
191 checked
: &mut HashSet
<[u8;32]>,
192 ) -> Result
<(), Error
> {
194 for pos
in 0..index
.index_count() {
195 let info
= index
.chunk_info(pos
).unwrap();
196 if checked
.contains(&info
.digest
) {
200 self.stat_chunk(&info
.digest
).
203 "fast_index_verification error, stat_chunk {} failed - {}",
204 proxmox
::tools
::digest_to_hex(&info
.digest
),
209 checked
.insert(info
.digest
);
215 pub fn name(&self) -> &str {
216 self.chunk_store
.name()
219 pub fn base_path(&self) -> PathBuf
{
220 self.chunk_store
.base_path()
223 /// Cleanup a backup directory
225 /// Removes all files not mentioned in the manifest.
226 pub fn cleanup_backup_dir(&self, backup_dir
: &BackupDir
, manifest
: &BackupManifest
227 ) -> Result
<(), Error
> {
229 let mut full_path
= self.base_path();
230 full_path
.push(backup_dir
.relative_path());
232 let mut wanted_files
= HashSet
::new();
233 wanted_files
.insert(MANIFEST_BLOB_NAME
.to_string());
234 wanted_files
.insert(CLIENT_LOG_BLOB_NAME
.to_string());
235 manifest
.files().iter().for_each(|item
| { wanted_files.insert(item.filename.clone()); }
);
237 for item
in pbs_tools
::fs
::read_subdir(libc
::AT_FDCWD
, &full_path
)?
{
238 if let Ok(item
) = item
{
239 if let Some(file_type
) = item
.file_type() {
240 if file_type
!= nix
::dir
::Type
::File { continue; }
242 let file_name
= item
.file_name().to_bytes();
243 if file_name
== b
"." || file_name
== b
".." { continue; }
;
245 if let Ok(name
) = std
::str::from_utf8(file_name
) {
246 if wanted_files
.contains(name
) { continue; }
248 println
!("remove unused file {:?}", item
.file_name());
249 let dirfd
= item
.parent_fd();
250 let _res
= unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) }
;
257 /// Returns the absolute path for a backup_group
258 pub fn group_path(&self, backup_group
: &BackupGroup
) -> PathBuf
{
259 let mut full_path
= self.base_path();
260 full_path
.push(backup_group
.group_path());
264 /// Returns the absolute path for backup_dir
265 pub fn snapshot_path(&self, backup_dir
: &BackupDir
) -> PathBuf
{
266 let mut full_path
= self.base_path();
267 full_path
.push(backup_dir
.relative_path());
271 /// Remove a complete backup group including all snapshots
272 pub fn remove_backup_group(&self, backup_group
: &BackupGroup
) -> Result
<(), Error
> {
274 let full_path
= self.group_path(backup_group
);
276 let _guard
= pbs_tools
::fs
::lock_dir_noblock(&full_path
, "backup group", "possible running backup")?
;
278 log
::info
!("removing backup group {:?}", full_path
);
280 // remove all individual backup dirs first to ensure nothing is using them
281 for snap
in backup_group
.list_backups(&self.base_path())?
{
282 self.remove_backup_dir(&snap
.backup_dir
, false)?
;
285 // no snapshots left, we can now safely remove the empty folder
286 std
::fs
::remove_dir_all(&full_path
)
289 "removing backup group directory {:?} failed - {}",
298 /// Remove a backup directory including all content
299 pub fn remove_backup_dir(&self, backup_dir
: &BackupDir
, force
: bool
) -> Result
<(), Error
> {
301 let full_path
= self.snapshot_path(backup_dir
);
303 let (_guard
, _manifest_guard
);
305 _guard
= lock_dir_noblock(&full_path
, "snapshot", "possibly running or in use")?
;
306 _manifest_guard
= self.lock_manifest(backup_dir
)?
;
309 log
::info
!("removing backup snapshot {:?}", full_path
);
310 std
::fs
::remove_dir_all(&full_path
)
313 "removing backup snapshot {:?} failed - {}",
319 // the manifest does not exists anymore, we do not need to keep the lock
320 if let Ok(path
) = self.manifest_lock_path(backup_dir
) {
322 let _
= std
::fs
::remove_file(path
);
328 /// Returns the time of the last successful backup
330 /// Or None if there is no backup in the group (or the group dir does not exist).
331 pub fn last_successful_backup(&self, backup_group
: &BackupGroup
) -> Result
<Option
<i64>, Error
> {
332 let base_path
= self.base_path();
333 let mut group_path
= base_path
.clone();
334 group_path
.push(backup_group
.group_path());
336 if group_path
.exists() {
337 backup_group
.last_successful_backup(&base_path
)
343 /// Returns the backup owner.
345 /// The backup owner is the entity who first created the backup group.
346 pub fn get_owner(&self, backup_group
: &BackupGroup
) -> Result
<Authid
, Error
> {
347 let mut full_path
= self.base_path();
348 full_path
.push(backup_group
.group_path());
349 full_path
.push("owner");
350 let owner
= proxmox
::tools
::fs
::file_read_firstline(full_path
)?
;
351 Ok(owner
.trim_end().parse()?
) // remove trailing newline
354 pub fn owns_backup(&self, backup_group
: &BackupGroup
, auth_id
: &Authid
) -> Result
<bool
, Error
> {
355 let owner
= self.get_owner(backup_group
)?
;
357 Ok(check_backup_owner(&owner
, auth_id
).is_ok())
360 /// Set the backup owner.
363 backup_group
: &BackupGroup
,
366 ) -> Result
<(), Error
> {
367 let mut path
= self.base_path();
368 path
.push(backup_group
.group_path());
371 let mut open_options
= std
::fs
::OpenOptions
::new();
372 open_options
.write(true);
373 open_options
.truncate(true);
376 open_options
.create(true);
378 open_options
.create_new(true);
381 let mut file
= open_options
.open(&path
)
382 .map_err(|err
| format_err
!("unable to create owner file {:?} - {}", path
, err
))?
;
384 writeln
!(file
, "{}", auth_id
)
385 .map_err(|err
| format_err
!("unable to write owner file {:?} - {}", path
, err
))?
;
390 /// Create (if it does not already exists) and lock a backup group
392 /// And set the owner to 'userid'. If the group already exists, it returns the
393 /// current owner (instead of setting the owner).
395 /// This also acquires an exclusive lock on the directory and returns the lock guard.
396 pub fn create_locked_backup_group(
398 backup_group
: &BackupGroup
,
400 ) -> Result
<(Authid
, DirLockGuard
), Error
> {
401 // create intermediate path first:
402 let mut full_path
= self.base_path();
403 full_path
.push(backup_group
.backup_type());
404 std
::fs
::create_dir_all(&full_path
)?
;
406 full_path
.push(backup_group
.backup_id());
408 // create the last component now
409 match std
::fs
::create_dir(&full_path
) {
411 let guard
= lock_dir_noblock(&full_path
, "backup group", "another backup is already running")?
;
412 self.set_owner(backup_group
, auth_id
, false)?
;
413 let owner
= self.get_owner(backup_group
)?
; // just to be sure
416 Err(ref err
) if err
.kind() == io
::ErrorKind
::AlreadyExists
=> {
417 let guard
= lock_dir_noblock(&full_path
, "backup group", "another backup is already running")?
;
418 let owner
= self.get_owner(backup_group
)?
; // just to be sure
421 Err(err
) => bail
!("unable to create backup group {:?} - {}", full_path
, err
),
425 /// Creates a new backup snapshot inside a BackupGroup
427 /// The BackupGroup directory needs to exist.
428 pub fn create_locked_backup_dir(&self, backup_dir
: &BackupDir
)
429 -> Result
<(PathBuf
, bool
, DirLockGuard
), Error
>
431 let relative_path
= backup_dir
.relative_path();
432 let mut full_path
= self.base_path();
433 full_path
.push(&relative_path
);
436 lock_dir_noblock(&full_path
, "snapshot", "internal error - tried creating snapshot that's already in use");
438 match std
::fs
::create_dir(&full_path
) {
439 Ok(_
) => Ok((relative_path
, true, lock()?
)),
440 Err(ref e
) if e
.kind() == io
::ErrorKind
::AlreadyExists
=> Ok((relative_path
, false, lock()?
)),
441 Err(e
) => Err(e
.into())
445 pub fn list_images(&self) -> Result
<Vec
<PathBuf
>, Error
> {
446 let base
= self.base_path();
448 let mut list
= vec
![];
450 use walkdir
::WalkDir
;
452 let walker
= WalkDir
::new(&base
).into_iter();
454 // make sure we skip .chunks (and other hidden files to keep it simple)
455 fn is_hidden(entry
: &walkdir
::DirEntry
) -> bool
{
458 .map(|s
| s
.starts_with('
.'
))
461 let handle_entry_err
= |err
: walkdir
::Error
| {
462 if let Some(inner
) = err
.io_error() {
463 if let Some(path
) = err
.path() {
464 if inner
.kind() == io
::ErrorKind
::PermissionDenied
{
465 // only allow to skip ext4 fsck directory, avoid GC if, for example,
466 // a user got file permissions wrong on datastore rsync to new server
467 if err
.depth() > 1 || !path
.ends_with("lost+found") {
468 bail
!("cannot continue garbage-collection safely, permission denied on: {:?}", path
)
471 bail
!("unexpected error on datastore traversal: {} - {:?}", inner
, path
)
474 bail
!("unexpected error on datastore traversal: {}", inner
)
479 for entry
in walker
.filter_entry(|e
| !is_hidden(e
)) {
480 let path
= match entry
{
481 Ok(entry
) => entry
.into_path(),
483 handle_entry_err(err
)?
;
487 if let Ok(archive_type
) = archive_type(&path
) {
488 if archive_type
== ArchiveType
::FixedIndex
|| archive_type
== ArchiveType
::DynamicIndex
{
497 // mark chunks used by ``index`` as used
498 fn index_mark_used_chunks
<I
: IndexFile
>(
501 file_name
: &Path
, // only used for error reporting
502 status
: &mut GarbageCollectionStatus
,
503 worker
: &dyn TaskState
,
504 ) -> Result
<(), Error
> {
506 status
.index_file_count
+= 1;
507 status
.index_data_bytes
+= index
.index_bytes();
509 for pos
in 0..index
.index_count() {
510 worker
.check_abort()?
;
512 let digest
= index
.index_digest(pos
).unwrap();
513 if !self.chunk_store
.cond_touch_chunk(digest
, false)?
{
516 "warning: unable to access non-existent chunk {}, required by {:?}",
517 proxmox
::tools
::digest_to_hex(digest
),
521 // touch any corresponding .bad files to keep them around, meaning if a chunk is
522 // rewritten correctly they will be removed automatically, as well as if no index
523 // file requires the chunk anymore (won't get to this loop then)
525 let bad_ext
= format
!("{}.bad", i
);
526 let mut bad_path
= PathBuf
::new();
527 bad_path
.push(self.chunk_path(digest
).0);
528 bad_path
.set_extension(bad_ext
);
529 self.chunk_store
.cond_touch_path(&bad_path
, false)?
;
538 status
: &mut GarbageCollectionStatus
,
539 worker
: &dyn TaskState
,
540 ) -> Result
<(), Error
> {
542 let image_list
= self.list_images()?
;
543 let image_count
= image_list
.len();
545 let mut last_percentage
: usize = 0;
547 let mut strange_paths_count
: u64 = 0;
549 for (i
, img
) in image_list
.into_iter().enumerate() {
551 worker
.check_abort()?
;
554 if let Some(backup_dir_path
) = img
.parent() {
555 let backup_dir_path
= backup_dir_path
.strip_prefix(self.base_path())?
;
556 if let Some(backup_dir_str
) = backup_dir_path
.to_str() {
557 if BackupDir
::from_str(backup_dir_str
).is_err() {
558 strange_paths_count
+= 1;
563 match std
::fs
::File
::open(&img
) {
565 if let Ok(archive_type
) = archive_type(&img
) {
566 if archive_type
== ArchiveType
::FixedIndex
{
567 let index
= FixedIndexReader
::new(file
).map_err(|e
| {
568 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
570 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
571 } else if archive_type
== ArchiveType
::DynamicIndex
{
572 let index
= DynamicIndexReader
::new(file
).map_err(|e
| {
573 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
575 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
579 Err(err
) if err
.kind() == io
::ErrorKind
::NotFound
=> (), // ignore vanished files
580 Err(err
) => bail
!("can't open index {} - {}", img
.to_string_lossy(), err
),
583 let percentage
= (i
+ 1) * 100 / image_count
;
584 if percentage
> last_percentage
{
587 "marked {}% ({} of {} index files)",
592 last_percentage
= percentage
;
596 if strange_paths_count
> 0 {
599 "found (and marked) {} index files outside of expected directory scheme",
608 pub fn last_gc_status(&self) -> GarbageCollectionStatus
{
609 self.last_gc_status
.lock().unwrap().clone()
612 pub fn garbage_collection_running(&self) -> bool
{
613 !matches
!(self.gc_mutex
.try_lock(), Ok(_
))
616 pub fn garbage_collection(&self, worker
: &dyn TaskState
, upid
: &UPID
) -> Result
<(), Error
> {
618 if let Ok(ref mut _mutex
) = self.gc_mutex
.try_lock() {
620 // avoids that we run GC if an old daemon process has still a
621 // running backup writer, which is not save as we have no "oldest
622 // writer" information and thus no safe atime cutoff
623 let _exclusive_lock
= self.chunk_store
.try_exclusive_lock()?
;
625 let phase1_start_time
= proxmox
::tools
::time
::epoch_i64();
626 let oldest_writer
= self.chunk_store
.oldest_writer().unwrap_or(phase1_start_time
);
628 let mut gc_status
= GarbageCollectionStatus
::default();
629 gc_status
.upid
= Some(upid
.to_string());
631 task_log
!(worker
, "Start GC phase1 (mark used chunks)");
633 self.mark_used_chunks(&mut gc_status
, worker
)?
;
635 task_log
!(worker
, "Start GC phase2 (sweep unused chunks)");
636 self.chunk_store
.sweep_unused_chunks(
646 "Removed garbage: {}",
647 HumanByte
::from(gc_status
.removed_bytes
),
649 task_log
!(worker
, "Removed chunks: {}", gc_status
.removed_chunks
);
650 if gc_status
.pending_bytes
> 0 {
653 "Pending removals: {} (in {} chunks)",
654 HumanByte
::from(gc_status
.pending_bytes
),
655 gc_status
.pending_chunks
,
658 if gc_status
.removed_bad
> 0 {
659 task_log
!(worker
, "Removed bad chunks: {}", gc_status
.removed_bad
);
662 if gc_status
.still_bad
> 0 {
663 task_log
!(worker
, "Leftover bad chunks: {}", gc_status
.still_bad
);
668 "Original data usage: {}",
669 HumanByte
::from(gc_status
.index_data_bytes
),
672 if gc_status
.index_data_bytes
> 0 {
673 let comp_per
= (gc_status
.disk_bytes
as f64 * 100.)/gc_status
.index_data_bytes
as f64;
676 "On-Disk usage: {} ({:.2}%)",
677 HumanByte
::from(gc_status
.disk_bytes
),
682 task_log
!(worker
, "On-Disk chunks: {}", gc_status
.disk_chunks
);
684 let deduplication_factor
= if gc_status
.disk_bytes
> 0 {
685 (gc_status
.index_data_bytes
as f64)/(gc_status
.disk_bytes
as f64)
690 task_log
!(worker
, "Deduplication factor: {:.2}", deduplication_factor
);
692 if gc_status
.disk_chunks
> 0 {
693 let avg_chunk
= gc_status
.disk_bytes
/(gc_status
.disk_chunks
as u64);
694 task_log
!(worker
, "Average chunk size: {}", HumanByte
::from(avg_chunk
));
697 if let Ok(serialized
) = serde_json
::to_string(&gc_status
) {
698 let mut path
= self.base_path();
699 path
.push(".gc-status");
701 let backup_user
= pbs_config
::backup_user()?
;
702 let mode
= nix
::sys
::stat
::Mode
::from_bits_truncate(0o0644);
703 // set the correct owner/group/permissions while saving file
704 // owner(rw) = backup, group(r)= backup
705 let options
= CreateOptions
::new()
707 .owner(backup_user
.uid
)
708 .group(backup_user
.gid
);
711 let _
= replace_file(path
, serialized
.as_bytes(), options
);
714 *self.last_gc_status
.lock().unwrap() = gc_status
;
717 bail
!("Start GC failed - (already running/locked)");
723 pub fn try_shared_chunk_store_lock(&self) -> Result
<ProcessLockSharedGuard
, Error
> {
724 self.chunk_store
.try_shared_lock()
727 pub fn chunk_path(&self, digest
:&[u8; 32]) -> (PathBuf
, String
) {
728 self.chunk_store
.chunk_path(digest
)
731 pub fn cond_touch_chunk(&self, digest
: &[u8; 32], fail_if_not_exist
: bool
) -> Result
<bool
, Error
> {
732 self.chunk_store
.cond_touch_chunk(digest
, fail_if_not_exist
)
739 ) -> Result
<(bool
, u64), Error
> {
740 self.chunk_store
.insert_chunk(chunk
, digest
)
743 pub fn load_blob(&self, backup_dir
: &BackupDir
, filename
: &str) -> Result
<DataBlob
, Error
> {
744 let mut path
= self.base_path();
745 path
.push(backup_dir
.relative_path());
748 proxmox
::try_block
!({
749 let mut file
= std
::fs
::File
::open(&path
)?
;
750 DataBlob
::load_from_reader(&mut file
)
751 }).map_err(|err
| format_err
!("unable to load blob '{:?}' - {}", path
, err
))
755 pub fn stat_chunk(&self, digest
: &[u8; 32]) -> Result
<std
::fs
::Metadata
, Error
> {
756 let (chunk_path
, _digest_str
) = self.chunk_store
.chunk_path(digest
);
757 std
::fs
::metadata(chunk_path
).map_err(Error
::from
)
760 pub fn load_chunk(&self, digest
: &[u8; 32]) -> Result
<DataBlob
, Error
> {
762 let (chunk_path
, digest_str
) = self.chunk_store
.chunk_path(digest
);
764 proxmox
::try_block
!({
765 let mut file
= std
::fs
::File
::open(&chunk_path
)?
;
766 DataBlob
::load_from_reader(&mut file
)
767 }).map_err(|err
| format_err
!(
768 "store '{}', unable to load chunk '{}' - {}",
775 /// Returns the filename to lock a manifest
777 /// Also creates the basedir. The lockfile is located in
778 /// '/run/proxmox-backup/locks/{datastore}/{type}/{id}/{timestamp}.index.json.lck'
779 fn manifest_lock_path(
781 backup_dir
: &BackupDir
,
782 ) -> Result
<String
, Error
> {
783 let mut path
= format
!(
784 "/run/proxmox-backup/locks/{}/{}/{}",
786 backup_dir
.group().backup_type(),
787 backup_dir
.group().backup_id(),
789 std
::fs
::create_dir_all(&path
)?
;
791 write
!(path
, "/{}{}", backup_dir
.backup_time_string(), &MANIFEST_LOCK_NAME
)?
;
798 backup_dir
: &BackupDir
,
799 ) -> Result
<BackupLockGuard
, Error
> {
800 let path
= self.manifest_lock_path(backup_dir
)?
;
802 // update_manifest should never take a long time, so if someone else has
803 // the lock we can simply block a bit and should get it soon
804 open_backup_lockfile(&path
, Some(Duration
::from_secs(5)), true)
807 "unable to acquire manifest lock {:?} - {}", &path
, err
812 /// Load the manifest without a lock. Must not be written back.
813 pub fn load_manifest(
815 backup_dir
: &BackupDir
,
816 ) -> Result
<(BackupManifest
, u64), Error
> {
817 let blob
= self.load_blob(backup_dir
, MANIFEST_BLOB_NAME
)?
;
818 let raw_size
= blob
.raw_size();
819 let manifest
= BackupManifest
::try_from(blob
)?
;
820 Ok((manifest
, raw_size
))
823 /// Update the manifest of the specified snapshot. Never write a manifest directly,
824 /// only use this method - anything else may break locking guarantees.
825 pub fn update_manifest(
827 backup_dir
: &BackupDir
,
828 update_fn
: impl FnOnce(&mut BackupManifest
),
829 ) -> Result
<(), Error
> {
831 let _guard
= self.lock_manifest(backup_dir
)?
;
832 let (mut manifest
, _
) = self.load_manifest(&backup_dir
)?
;
834 update_fn(&mut manifest
);
836 let manifest
= serde_json
::to_value(manifest
)?
;
837 let manifest
= serde_json
::to_string_pretty(&manifest
)?
;
838 let blob
= DataBlob
::encode(manifest
.as_bytes(), None
, true)?
;
839 let raw_data
= blob
.raw_data();
841 let mut path
= self.base_path();
842 path
.push(backup_dir
.relative_path());
843 path
.push(MANIFEST_BLOB_NAME
);
845 // atomic replace invalidates flock - no other writes past this point!
846 replace_file(&path
, raw_data
, CreateOptions
::new())?
;
851 pub fn verify_new(&self) -> bool
{
855 /// returns a list of chunks sorted by their inode number on disk
856 /// chunks that could not be stat'ed are at the end of the list
857 pub fn get_chunks_in_order
<F
, A
>(
859 index
: &Box
<dyn IndexFile
+ Send
>,
862 ) -> Result
<Vec
<(usize, u64)>, Error
>
864 F
: Fn(&[u8; 32]) -> bool
,
865 A
: Fn(usize) -> Result
<(), Error
>,
867 let index_count
= index
.index_count();
868 let mut chunk_list
= Vec
::with_capacity(index_count
);
869 use std
::os
::unix
::fs
::MetadataExt
;
870 for pos
in 0..index_count
{
873 let info
= index
.chunk_info(pos
).unwrap();
875 if skip_chunk(&info
.digest
) {
879 let ino
= match self.stat_chunk(&info
.digest
) {
880 Err(_
) => u64::MAX
, // could not stat, move to end of list
881 Ok(metadata
) => metadata
.ino(),
884 chunk_list
.push((pos
, ino
));
887 // sorting by inode improves data locality, which makes it lots faster on spinners
888 chunk_list
.sort_unstable_by(|(_
, ino_a
), (_
, ino_b
)| ino_a
.cmp(&ino_b
));