1 use std
::collections
::{HashSet, HashMap}
;
2 use std
::io
::{self, Write}
;
3 use std
::path
::{Path, PathBuf}
;
4 use std
::sync
::{Arc, Mutex}
;
5 use std
::convert
::TryFrom
;
7 use std
::time
::Duration
;
10 use anyhow
::{bail, format_err, Error}
;
11 use lazy_static
::lazy_static
;
13 use proxmox
::tools
::fs
::{replace_file, file_read_optional_string, CreateOptions, open_file_locked}
;
15 use super::backup_info
::{BackupGroup, BackupDir}
;
16 use super::chunk_store
::ChunkStore
;
17 use super::dynamic_index
::{DynamicIndexReader, DynamicIndexWriter}
;
18 use super::fixed_index
::{FixedIndexReader, FixedIndexWriter}
;
19 use super::manifest
::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, CLIENT_LOG_BLOB_NAME, BackupManifest}
;
21 use super::{DataBlob, ArchiveType, archive_type}
;
22 use crate::config
::datastore
::{self, DataStoreConfig}
;
23 use crate::task
::TaskState
;
25 use crate::tools
::format
::HumanByte
;
26 use crate::tools
::fs
::{lock_dir_noblock, DirLockGuard}
;
27 use crate::api2
::types
::{Authid, GarbageCollectionStatus}
;
28 use crate::server
::UPID
;
31 static ref DATASTORE_MAP
: Mutex
<HashMap
<String
, Arc
<DataStore
>>> = Mutex
::new(HashMap
::new());
34 /// Datastore Management
36 /// A Datastore can store severals backups, and provides the
37 /// management interface for backup.
38 pub struct DataStore
{
39 chunk_store
: Arc
<ChunkStore
>,
40 gc_mutex
: Mutex
<bool
>,
41 last_gc_status
: Mutex
<GarbageCollectionStatus
>,
47 pub fn lookup_datastore(name
: &str) -> Result
<Arc
<DataStore
>, Error
> {
49 let (config
, _digest
) = datastore
::config()?
;
50 let config
: datastore
::DataStoreConfig
= config
.lookup("datastore", name
)?
;
51 let path
= PathBuf
::from(&config
.path
);
53 let mut map
= DATASTORE_MAP
.lock().unwrap();
55 if let Some(datastore
) = map
.get(name
) {
56 // Compare Config - if changed, create new Datastore object!
57 if datastore
.chunk_store
.base
== path
&&
58 datastore
.verify_new
== config
.verify_new
.unwrap_or(false)
60 return Ok(datastore
.clone());
64 let datastore
= DataStore
::open_with_path(name
, &path
, config
)?
;
66 let datastore
= Arc
::new(datastore
);
67 map
.insert(name
.to_string(), datastore
.clone());
72 fn open_with_path(store_name
: &str, path
: &Path
, config
: DataStoreConfig
) -> Result
<Self, Error
> {
73 let chunk_store
= ChunkStore
::open(store_name
, path
)?
;
75 let mut gc_status_path
= chunk_store
.base_path();
76 gc_status_path
.push(".gc-status");
78 let gc_status
= if let Some(state
) = file_read_optional_string(gc_status_path
)?
{
79 match serde_json
::from_str(&state
) {
82 eprintln
!("error reading gc-status: {}", err
);
83 GarbageCollectionStatus
::default()
87 GarbageCollectionStatus
::default()
91 chunk_store
: Arc
::new(chunk_store
),
92 gc_mutex
: Mutex
::new(false),
93 last_gc_status
: Mutex
::new(gc_status
),
94 verify_new
: config
.verify_new
.unwrap_or(false),
98 pub fn get_chunk_iterator(
101 impl Iterator
<Item
= (Result
<tools
::fs
::ReadDirEntry
, Error
>, usize, bool
)>,
104 self.chunk_store
.get_chunk_iterator()
107 pub fn create_fixed_writer
<P
: AsRef
<Path
>>(&self, filename
: P
, size
: usize, chunk_size
: usize) -> Result
<FixedIndexWriter
, Error
> {
109 let index
= FixedIndexWriter
::create(self.chunk_store
.clone(), filename
.as_ref(), size
, chunk_size
)?
;
114 pub fn open_fixed_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<FixedIndexReader
, Error
> {
116 let full_path
= self.chunk_store
.relative_path(filename
.as_ref());
118 let index
= FixedIndexReader
::open(&full_path
)?
;
123 pub fn create_dynamic_writer
<P
: AsRef
<Path
>>(
125 ) -> Result
<DynamicIndexWriter
, Error
> {
127 let index
= DynamicIndexWriter
::create(
128 self.chunk_store
.clone(), filename
.as_ref())?
;
133 pub fn open_dynamic_reader
<P
: AsRef
<Path
>>(&self, filename
: P
) -> Result
<DynamicIndexReader
, Error
> {
135 let full_path
= self.chunk_store
.relative_path(filename
.as_ref());
137 let index
= DynamicIndexReader
::open(&full_path
)?
;
142 pub fn open_index
<P
>(&self, filename
: P
) -> Result
<Box
<dyn IndexFile
+ Send
>, Error
>
146 let filename
= filename
.as_ref();
147 let out
: Box
<dyn IndexFile
+ Send
> =
148 match archive_type(filename
)?
{
149 ArchiveType
::DynamicIndex
=> Box
::new(self.open_dynamic_reader(filename
)?
),
150 ArchiveType
::FixedIndex
=> Box
::new(self.open_fixed_reader(filename
)?
),
151 _
=> bail
!("cannot open index file of unknown type: {:?}", filename
),
156 pub fn name(&self) -> &str {
157 self.chunk_store
.name()
160 pub fn base_path(&self) -> PathBuf
{
161 self.chunk_store
.base_path()
164 /// Cleanup a backup directory
166 /// Removes all files not mentioned in the manifest.
167 pub fn cleanup_backup_dir(&self, backup_dir
: &BackupDir
, manifest
: &BackupManifest
168 ) -> Result
<(), Error
> {
170 let mut full_path
= self.base_path();
171 full_path
.push(backup_dir
.relative_path());
173 let mut wanted_files
= HashSet
::new();
174 wanted_files
.insert(MANIFEST_BLOB_NAME
.to_string());
175 wanted_files
.insert(CLIENT_LOG_BLOB_NAME
.to_string());
176 manifest
.files().iter().for_each(|item
| { wanted_files.insert(item.filename.clone()); }
);
178 for item
in tools
::fs
::read_subdir(libc
::AT_FDCWD
, &full_path
)?
{
179 if let Ok(item
) = item
{
180 if let Some(file_type
) = item
.file_type() {
181 if file_type
!= nix
::dir
::Type
::File { continue; }
183 let file_name
= item
.file_name().to_bytes();
184 if file_name
== b
"." || file_name
== b
".." { continue; }
;
186 if let Ok(name
) = std
::str::from_utf8(file_name
) {
187 if wanted_files
.contains(name
) { continue; }
189 println
!("remove unused file {:?}", item
.file_name());
190 let dirfd
= item
.parent_fd();
191 let _res
= unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) }
;
198 /// Returns the absolute path for a backup_group
199 pub fn group_path(&self, backup_group
: &BackupGroup
) -> PathBuf
{
200 let mut full_path
= self.base_path();
201 full_path
.push(backup_group
.group_path());
205 /// Returns the absolute path for backup_dir
206 pub fn snapshot_path(&self, backup_dir
: &BackupDir
) -> PathBuf
{
207 let mut full_path
= self.base_path();
208 full_path
.push(backup_dir
.relative_path());
212 /// Remove a complete backup group including all snapshots
213 pub fn remove_backup_group(&self, backup_group
: &BackupGroup
) -> Result
<(), Error
> {
215 let full_path
= self.group_path(backup_group
);
217 let _guard
= tools
::fs
::lock_dir_noblock(&full_path
, "backup group", "possible running backup")?
;
219 log
::info
!("removing backup group {:?}", full_path
);
221 // remove all individual backup dirs first to ensure nothing is using them
222 for snap
in backup_group
.list_backups(&self.base_path())?
{
223 self.remove_backup_dir(&snap
.backup_dir
, false)?
;
226 // no snapshots left, we can now safely remove the empty folder
227 std
::fs
::remove_dir_all(&full_path
)
230 "removing backup group directory {:?} failed - {}",
239 /// Remove a backup directory including all content
240 pub fn remove_backup_dir(&self, backup_dir
: &BackupDir
, force
: bool
) -> Result
<(), Error
> {
242 let full_path
= self.snapshot_path(backup_dir
);
244 let (_guard
, _manifest_guard
);
246 _guard
= lock_dir_noblock(&full_path
, "snapshot", "possibly running or in use")?
;
247 _manifest_guard
= self.lock_manifest(backup_dir
)?
;
250 log
::info
!("removing backup snapshot {:?}", full_path
);
251 std
::fs
::remove_dir_all(&full_path
)
254 "removing backup snapshot {:?} failed - {}",
260 // the manifest does not exists anymore, we do not need to keep the lock
261 if let Ok(path
) = self.manifest_lock_path(backup_dir
) {
263 let _
= std
::fs
::remove_file(path
);
269 /// Returns the time of the last successful backup
271 /// Or None if there is no backup in the group (or the group dir does not exist).
272 pub fn last_successful_backup(&self, backup_group
: &BackupGroup
) -> Result
<Option
<i64>, Error
> {
273 let base_path
= self.base_path();
274 let mut group_path
= base_path
.clone();
275 group_path
.push(backup_group
.group_path());
277 if group_path
.exists() {
278 backup_group
.last_successful_backup(&base_path
)
284 /// Returns the backup owner.
286 /// The backup owner is the entity who first created the backup group.
287 pub fn get_owner(&self, backup_group
: &BackupGroup
) -> Result
<Authid
, Error
> {
288 let mut full_path
= self.base_path();
289 full_path
.push(backup_group
.group_path());
290 full_path
.push("owner");
291 let owner
= proxmox
::tools
::fs
::file_read_firstline(full_path
)?
;
292 Ok(owner
.trim_end().parse()?
) // remove trailing newline
295 /// Set the backup owner.
298 backup_group
: &BackupGroup
,
301 ) -> Result
<(), Error
> {
302 let mut path
= self.base_path();
303 path
.push(backup_group
.group_path());
306 let mut open_options
= std
::fs
::OpenOptions
::new();
307 open_options
.write(true);
308 open_options
.truncate(true);
311 open_options
.create(true);
313 open_options
.create_new(true);
316 let mut file
= open_options
.open(&path
)
317 .map_err(|err
| format_err
!("unable to create owner file {:?} - {}", path
, err
))?
;
319 writeln
!(file
, "{}", auth_id
)
320 .map_err(|err
| format_err
!("unable to write owner file {:?} - {}", path
, err
))?
;
325 /// Create (if it does not already exists) and lock a backup group
327 /// And set the owner to 'userid'. If the group already exists, it returns the
328 /// current owner (instead of setting the owner).
330 /// This also acquires an exclusive lock on the directory and returns the lock guard.
331 pub fn create_locked_backup_group(
333 backup_group
: &BackupGroup
,
335 ) -> Result
<(Authid
, DirLockGuard
), Error
> {
336 // create intermediate path first:
337 let mut full_path
= self.base_path();
338 full_path
.push(backup_group
.backup_type());
339 std
::fs
::create_dir_all(&full_path
)?
;
341 full_path
.push(backup_group
.backup_id());
343 // create the last component now
344 match std
::fs
::create_dir(&full_path
) {
346 let guard
= lock_dir_noblock(&full_path
, "backup group", "another backup is already running")?
;
347 self.set_owner(backup_group
, auth_id
, false)?
;
348 let owner
= self.get_owner(backup_group
)?
; // just to be sure
351 Err(ref err
) if err
.kind() == io
::ErrorKind
::AlreadyExists
=> {
352 let guard
= lock_dir_noblock(&full_path
, "backup group", "another backup is already running")?
;
353 let owner
= self.get_owner(backup_group
)?
; // just to be sure
356 Err(err
) => bail
!("unable to create backup group {:?} - {}", full_path
, err
),
360 /// Creates a new backup snapshot inside a BackupGroup
362 /// The BackupGroup directory needs to exist.
363 pub fn create_locked_backup_dir(&self, backup_dir
: &BackupDir
)
364 -> Result
<(PathBuf
, bool
, DirLockGuard
), Error
>
366 let relative_path
= backup_dir
.relative_path();
367 let mut full_path
= self.base_path();
368 full_path
.push(&relative_path
);
371 lock_dir_noblock(&full_path
, "snapshot", "internal error - tried creating snapshot that's already in use");
373 match std
::fs
::create_dir(&full_path
) {
374 Ok(_
) => Ok((relative_path
, true, lock()?
)),
375 Err(ref e
) if e
.kind() == io
::ErrorKind
::AlreadyExists
=> Ok((relative_path
, false, lock()?
)),
376 Err(e
) => Err(e
.into())
380 pub fn list_images(&self) -> Result
<Vec
<PathBuf
>, Error
> {
381 let base
= self.base_path();
383 let mut list
= vec
![];
385 use walkdir
::WalkDir
;
387 let walker
= WalkDir
::new(&base
).into_iter();
389 // make sure we skip .chunks (and other hidden files to keep it simple)
390 fn is_hidden(entry
: &walkdir
::DirEntry
) -> bool
{
393 .map(|s
| s
.starts_with('
.'
))
396 let handle_entry_err
= |err
: walkdir
::Error
| {
397 if let Some(inner
) = err
.io_error() {
398 let path
= err
.path().unwrap_or(Path
::new(""));
400 io
::ErrorKind
::PermissionDenied
=> {
401 // only allow to skip ext4 fsck directory, avoid GC if, for example,
402 // a user got file permissions wrong on datastore rsync to new server
403 if err
.depth() > 1 || !path
.ends_with("lost+found") {
404 bail
!("cannot continue garbage-collection safely, permission denied on: {}", path
.display())
407 _
=> bail
!("unexpected error on datastore traversal: {} - {}", inner
, path
.display()),
412 for entry
in walker
.filter_entry(|e
| !is_hidden(e
)) {
413 let path
= match entry
{
414 Ok(entry
) => entry
.into_path(),
416 handle_entry_err(err
)?
;
420 if let Ok(archive_type
) = archive_type(&path
) {
421 if archive_type
== ArchiveType
::FixedIndex
|| archive_type
== ArchiveType
::DynamicIndex
{
430 // mark chunks used by ``index`` as used
431 fn index_mark_used_chunks
<I
: IndexFile
>(
434 file_name
: &Path
, // only used for error reporting
435 status
: &mut GarbageCollectionStatus
,
436 worker
: &dyn TaskState
,
437 ) -> Result
<(), Error
> {
439 status
.index_file_count
+= 1;
440 status
.index_data_bytes
+= index
.index_bytes();
442 for pos
in 0..index
.index_count() {
443 worker
.check_abort()?
;
444 tools
::fail_on_shutdown()?
;
445 let digest
= index
.index_digest(pos
).unwrap();
446 if let Err(err
) = self.chunk_store
.touch_chunk(digest
) {
449 "warning: unable to access chunk {}, required by {:?} - {}",
450 proxmox
::tools
::digest_to_hex(digest
),
455 // touch any corresponding .bad files to keep them around, meaning if a chunk is
456 // rewritten correctly they will be removed automatically, as well as if no index
457 // file requires the chunk anymore (won't get to this loop then)
459 let bad_ext
= format
!("{}.bad", i
);
460 let mut bad_path
= PathBuf
::new();
461 bad_path
.push(self.chunk_path(digest
).0);
462 bad_path
.set_extension(bad_ext
);
463 self.chunk_store
.cond_touch_path(&bad_path
, false)?
;
472 status
: &mut GarbageCollectionStatus
,
473 worker
: &dyn TaskState
,
474 ) -> Result
<(), Error
> {
476 let image_list
= self.list_images()?
;
477 let image_count
= image_list
.len();
480 let mut last_percentage
: usize = 0;
482 let mut strange_paths_count
: u64 = 0;
484 for img
in image_list
{
486 worker
.check_abort()?
;
487 tools
::fail_on_shutdown()?
;
489 if let Some(backup_dir_path
) = img
.parent() {
490 let backup_dir_path
= backup_dir_path
.strip_prefix(self.base_path())?
;
491 if let Some(backup_dir_str
) = backup_dir_path
.to_str() {
492 if BackupDir
::from_str(backup_dir_str
).is_err() {
493 strange_paths_count
+= 1;
498 match std
::fs
::File
::open(&img
) {
500 if let Ok(archive_type
) = archive_type(&img
) {
501 if archive_type
== ArchiveType
::FixedIndex
{
502 let index
= FixedIndexReader
::new(file
).map_err(|e
| {
503 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
505 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
506 } else if archive_type
== ArchiveType
::DynamicIndex
{
507 let index
= DynamicIndexReader
::new(file
).map_err(|e
| {
508 format_err
!("can't read index '{}' - {}", img
.to_string_lossy(), e
)
510 self.index_mark_used_chunks(index
, &img
, status
, worker
)?
;
514 Err(err
) if err
.kind() == io
::ErrorKind
::NotFound
=> (), // ignore vanished files
515 Err(err
) => bail
!("can't open index {} - {}", img
.to_string_lossy(), err
),
519 let percentage
= done
*100/image_count
;
520 if percentage
> last_percentage
{
523 "marked {}% ({} of {} index files)",
528 last_percentage
= percentage
;
532 if strange_paths_count
> 0 {
535 "found (and marked) {} index files outside of expected directory scheme",
544 pub fn last_gc_status(&self) -> GarbageCollectionStatus
{
545 self.last_gc_status
.lock().unwrap().clone()
548 pub fn garbage_collection_running(&self) -> bool
{
549 if let Ok(_
) = self.gc_mutex
.try_lock() { false }
else { true }
552 pub fn garbage_collection(&self, worker
: &dyn TaskState
, upid
: &UPID
) -> Result
<(), Error
> {
554 if let Ok(ref mut _mutex
) = self.gc_mutex
.try_lock() {
556 // avoids that we run GC if an old daemon process has still a
557 // running backup writer, which is not save as we have no "oldest
558 // writer" information and thus no safe atime cutoff
559 let _exclusive_lock
= self.chunk_store
.try_exclusive_lock()?
;
561 let phase1_start_time
= proxmox
::tools
::time
::epoch_i64();
562 let oldest_writer
= self.chunk_store
.oldest_writer().unwrap_or(phase1_start_time
);
564 let mut gc_status
= GarbageCollectionStatus
::default();
565 gc_status
.upid
= Some(upid
.to_string());
567 crate::task_log
!(worker
, "Start GC phase1 (mark used chunks)");
569 self.mark_used_chunks(&mut gc_status
, worker
)?
;
571 crate::task_log
!(worker
, "Start GC phase2 (sweep unused chunks)");
572 self.chunk_store
.sweep_unused_chunks(
581 "Removed garbage: {}",
582 HumanByte
::from(gc_status
.removed_bytes
),
584 crate::task_log
!(worker
, "Removed chunks: {}", gc_status
.removed_chunks
);
585 if gc_status
.pending_bytes
> 0 {
588 "Pending removals: {} (in {} chunks)",
589 HumanByte
::from(gc_status
.pending_bytes
),
590 gc_status
.pending_chunks
,
593 if gc_status
.removed_bad
> 0 {
594 crate::task_log
!(worker
, "Removed bad chunks: {}", gc_status
.removed_bad
);
597 if gc_status
.still_bad
> 0 {
598 crate::task_log
!(worker
, "Leftover bad chunks: {}", gc_status
.still_bad
);
603 "Original data usage: {}",
604 HumanByte
::from(gc_status
.index_data_bytes
),
607 if gc_status
.index_data_bytes
> 0 {
608 let comp_per
= (gc_status
.disk_bytes
as f64 * 100.)/gc_status
.index_data_bytes
as f64;
611 "On-Disk usage: {} ({:.2}%)",
612 HumanByte
::from(gc_status
.disk_bytes
),
617 crate::task_log
!(worker
, "On-Disk chunks: {}", gc_status
.disk_chunks
);
619 let deduplication_factor
= if gc_status
.disk_bytes
> 0 {
620 (gc_status
.index_data_bytes
as f64)/(gc_status
.disk_bytes
as f64)
625 crate::task_log
!(worker
, "Deduplication factor: {:.2}", deduplication_factor
);
627 if gc_status
.disk_chunks
> 0 {
628 let avg_chunk
= gc_status
.disk_bytes
/(gc_status
.disk_chunks
as u64);
629 crate::task_log
!(worker
, "Average chunk size: {}", HumanByte
::from(avg_chunk
));
632 if let Ok(serialized
) = serde_json
::to_string(&gc_status
) {
633 let mut path
= self.base_path();
634 path
.push(".gc-status");
636 let backup_user
= crate::backup
::backup_user()?
;
637 let mode
= nix
::sys
::stat
::Mode
::from_bits_truncate(0o0644);
638 // set the correct owner/group/permissions while saving file
639 // owner(rw) = backup, group(r)= backup
640 let options
= CreateOptions
::new()
642 .owner(backup_user
.uid
)
643 .group(backup_user
.gid
);
646 let _
= replace_file(path
, serialized
.as_bytes(), options
);
649 *self.last_gc_status
.lock().unwrap() = gc_status
;
652 bail
!("Start GC failed - (already running/locked)");
658 pub fn try_shared_chunk_store_lock(&self) -> Result
<tools
::ProcessLockSharedGuard
, Error
> {
659 self.chunk_store
.try_shared_lock()
662 pub fn chunk_path(&self, digest
:&[u8; 32]) -> (PathBuf
, String
) {
663 self.chunk_store
.chunk_path(digest
)
666 pub fn cond_touch_chunk(&self, digest
: &[u8; 32], fail_if_not_exist
: bool
) -> Result
<bool
, Error
> {
667 self.chunk_store
.cond_touch_chunk(digest
, fail_if_not_exist
)
674 ) -> Result
<(bool
, u64), Error
> {
675 self.chunk_store
.insert_chunk(chunk
, digest
)
678 pub fn load_blob(&self, backup_dir
: &BackupDir
, filename
: &str) -> Result
<DataBlob
, Error
> {
679 let mut path
= self.base_path();
680 path
.push(backup_dir
.relative_path());
683 proxmox
::try_block
!({
684 let mut file
= std
::fs
::File
::open(&path
)?
;
685 DataBlob
::load_from_reader(&mut file
)
686 }).map_err(|err
| format_err
!("unable to load blob '{:?}' - {}", path
, err
))
690 pub fn load_chunk(&self, digest
: &[u8; 32]) -> Result
<DataBlob
, Error
> {
692 let (chunk_path
, digest_str
) = self.chunk_store
.chunk_path(digest
);
694 proxmox
::try_block
!({
695 let mut file
= std
::fs
::File
::open(&chunk_path
)?
;
696 DataBlob
::load_from_reader(&mut file
)
697 }).map_err(|err
| format_err
!(
698 "store '{}', unable to load chunk '{}' - {}",
705 /// Returns the filename to lock a manifest
707 /// Also creates the basedir. The lockfile is located in
708 /// '/run/proxmox-backup/locks/{datastore}/{type}/{id}/{timestamp}.index.json.lck'
709 fn manifest_lock_path(
711 backup_dir
: &BackupDir
,
712 ) -> Result
<String
, Error
> {
713 let mut path
= format
!(
714 "/run/proxmox-backup/locks/{}/{}/{}",
716 backup_dir
.group().backup_type(),
717 backup_dir
.group().backup_id(),
719 std
::fs
::create_dir_all(&path
)?
;
721 write
!(path
, "/{}{}", backup_dir
.backup_time_string(), &MANIFEST_LOCK_NAME
)?
;
728 backup_dir
: &BackupDir
,
729 ) -> Result
<File
, Error
> {
730 let path
= self.manifest_lock_path(backup_dir
)?
;
732 // update_manifest should never take a long time, so if someone else has
733 // the lock we can simply block a bit and should get it soon
734 open_file_locked(&path
, Duration
::from_secs(5), true)
737 "unable to acquire manifest lock {:?} - {}", &path
, err
742 /// Load the manifest without a lock. Must not be written back.
743 pub fn load_manifest(
745 backup_dir
: &BackupDir
,
746 ) -> Result
<(BackupManifest
, u64), Error
> {
747 let blob
= self.load_blob(backup_dir
, MANIFEST_BLOB_NAME
)?
;
748 let raw_size
= blob
.raw_size();
749 let manifest
= BackupManifest
::try_from(blob
)?
;
750 Ok((manifest
, raw_size
))
753 /// Update the manifest of the specified snapshot. Never write a manifest directly,
754 /// only use this method - anything else may break locking guarantees.
755 pub fn update_manifest(
757 backup_dir
: &BackupDir
,
758 update_fn
: impl FnOnce(&mut BackupManifest
),
759 ) -> Result
<(), Error
> {
761 let _guard
= self.lock_manifest(backup_dir
)?
;
762 let (mut manifest
, _
) = self.load_manifest(&backup_dir
)?
;
764 update_fn(&mut manifest
);
766 let manifest
= serde_json
::to_value(manifest
)?
;
767 let manifest
= serde_json
::to_string_pretty(&manifest
)?
;
768 let blob
= DataBlob
::encode(manifest
.as_bytes(), None
, true)?
;
769 let raw_data
= blob
.raw_data();
771 let mut path
= self.base_path();
772 path
.push(backup_dir
.relative_path());
773 path
.push(MANIFEST_BLOB_NAME
);
775 // atomic replace invalidates flock - no other writes past this point!
776 replace_file(&path
, raw_data
, CreateOptions
::new())?
;
781 pub fn verify_new(&self) -> bool
{