2 use std
::collections
::HashSet
;
3 use std
::sync
::atomic
::{AtomicUsize, Ordering}
;
4 use std
::sync
::{Arc, Mutex}
;
5 use std
::time
::Instant
;
7 use anyhow
::{bail, format_err, Error}
;
28 tools
::fs
::lock_dir_noblock_shared
,
29 tools
::ParallelHandler
,
32 /// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
33 /// already been verified or detected as corrupt.
34 pub struct VerifyWorker
{
35 worker
: Arc
<dyn TaskState
+ Send
+ Sync
>,
36 datastore
: Arc
<DataStore
>,
37 verified_chunks
: Arc
<Mutex
<HashSet
<[u8; 32]>>>,
38 corrupt_chunks
: Arc
<Mutex
<HashSet
<[u8; 32]>>>,
42 /// Creates a new VerifyWorker for a given task worker and datastore.
43 pub fn new(worker
: Arc
<dyn TaskState
+ Send
+ Sync
>, datastore
: Arc
<DataStore
>) -> Self {
47 // start with 16k chunks == up to 64G data
48 verified_chunks
: Arc
::new(Mutex
::new(HashSet
::with_capacity(16 * 1024))),
49 // start with 64 chunks since we assume there are few corrupt ones
50 corrupt_chunks
: Arc
::new(Mutex
::new(HashSet
::with_capacity(64))),
56 datastore
: Arc
<DataStore
>,
57 backup_dir
: &BackupDir
,
59 ) -> Result
<(), Error
> {
60 let blob
= datastore
.load_blob(backup_dir
, &info
.filename
)?
;
62 let raw_size
= blob
.raw_size();
63 if raw_size
!= info
.size
{
64 bail
!("wrong size ({} != {})", info
.size
, raw_size
);
67 let csum
= openssl
::sha
::sha256(blob
.raw_data());
68 if csum
!= info
.csum
{
69 bail
!("wrong index checksum");
72 match blob
.crypt_mode()?
{
73 CryptMode
::Encrypt
=> Ok(()),
75 // digest already verified above
76 blob
.decode(None
, None
)?
;
79 CryptMode
::SignOnly
=> bail
!("Invalid CryptMode for blob"),
83 fn rename_corrupted_chunk(
84 datastore
: Arc
<DataStore
>,
86 worker
: &dyn TaskState
,
88 let (path
, digest_str
) = datastore
.chunk_path(digest
);
91 let mut new_path
= path
.clone();
93 new_path
.set_file_name(format
!("{}.{}.bad", digest_str
, counter
));
94 if new_path
.exists() && counter
< 9 {
101 match std
::fs
::rename(&path
, &new_path
) {
103 task_log
!(worker
, "corrupted chunk renamed to {:?}", &new_path
);
107 std
::io
::ErrorKind
::NotFound
=> { /* ignored */ }
,
108 _
=> task_log
!(worker
, "could not rename corrupted chunk {:?} - {}", &path
, err
)
114 fn verify_index_chunks(
115 verify_worker
: &VerifyWorker
,
116 index
: Box
<dyn IndexFile
+ Send
>,
117 crypt_mode
: CryptMode
,
118 ) -> Result
<(), Error
> {
119 let errors
= Arc
::new(AtomicUsize
::new(0));
121 let start_time
= Instant
::now();
123 let mut read_bytes
= 0;
124 let mut decoded_bytes
= 0;
126 let worker2
= Arc
::clone(&verify_worker
.worker
);
127 let datastore2
= Arc
::clone(&verify_worker
.datastore
);
128 let corrupt_chunks2
= Arc
::clone(&verify_worker
.corrupt_chunks
);
129 let verified_chunks2
= Arc
::clone(&verify_worker
.verified_chunks
);
130 let errors2
= Arc
::clone(&errors
);
132 let decoder_pool
= ParallelHandler
::new(
133 "verify chunk decoder",
135 move |(chunk
, digest
, size
): (DataBlob
, [u8; 32], u64)| {
136 let chunk_crypt_mode
= match chunk
.crypt_mode() {
138 corrupt_chunks2
.lock().unwrap().insert(digest
);
139 task_log
!(worker2
, "can't verify chunk, unknown CryptMode - {}", err
);
140 errors2
.fetch_add(1, Ordering
::SeqCst
);
146 if chunk_crypt_mode
!= crypt_mode
{
149 "chunk CryptMode {:?} does not match index CryptMode {:?}",
153 errors2
.fetch_add(1, Ordering
::SeqCst
);
156 if let Err(err
) = chunk
.verify_unencrypted(size
as usize, &digest
) {
157 corrupt_chunks2
.lock().unwrap().insert(digest
);
158 task_log
!(worker2
, "{}", err
);
159 errors2
.fetch_add(1, Ordering
::SeqCst
);
160 rename_corrupted_chunk(datastore2
.clone(), &digest
, &worker2
);
162 verified_chunks2
.lock().unwrap().insert(digest
);
169 let skip_chunk
= |digest
: &[u8; 32]| -> bool
{
170 if verify_worker
.verified_chunks
.lock().unwrap().contains(digest
) {
172 } else if verify_worker
.corrupt_chunks
.lock().unwrap().contains(digest
) {
173 let digest_str
= proxmox
::tools
::digest_to_hex(digest
);
174 task_log
!(verify_worker
.worker
, "chunk {} was marked as corrupt", digest_str
);
175 errors
.fetch_add(1, Ordering
::SeqCst
);
182 let check_abort
= |pos
: usize| -> Result
<(), Error
> {
184 verify_worker
.worker
.check_abort()?
;
185 crate::tools
::fail_on_shutdown()?
;
193 .get_chunks_in_order(&index
, skip_chunk
, check_abort
)?
;
195 for (pos
, _
) in chunk_list
{
196 verify_worker
.worker
.check_abort()?
;
197 crate::tools
::fail_on_shutdown()?
;
199 let info
= index
.chunk_info(pos
).unwrap();
201 // we must always recheck this here, the parallel worker below alter it!
202 if skip_chunk(&info
.digest
) {
203 continue; // already verified or marked corrupt
206 match verify_worker
.datastore
.load_chunk(&info
.digest
) {
208 verify_worker
.corrupt_chunks
.lock().unwrap().insert(info
.digest
);
209 task_log
!(verify_worker
.worker
, "can't verify chunk, load failed - {}", err
);
210 errors
.fetch_add(1, Ordering
::SeqCst
);
211 rename_corrupted_chunk(
212 verify_worker
.datastore
.clone(),
214 &verify_worker
.worker
,
218 let size
= info
.size();
219 read_bytes
+= chunk
.raw_size();
220 decoder_pool
.send((chunk
, info
.digest
, size
))?
;
221 decoded_bytes
+= size
;
226 decoder_pool
.complete()?
;
228 let elapsed
= start_time
.elapsed().as_secs_f64();
230 let read_bytes_mib
= (read_bytes
as f64) / (1024.0 * 1024.0);
231 let decoded_bytes_mib
= (decoded_bytes
as f64) / (1024.0 * 1024.0);
233 let read_speed
= read_bytes_mib
/ elapsed
;
234 let decode_speed
= decoded_bytes_mib
/ elapsed
;
236 let error_count
= errors
.load(Ordering
::SeqCst
);
239 verify_worker
.worker
,
240 " verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
249 if errors
.load(Ordering
::SeqCst
) > 0 {
250 bail
!("chunks could not be verified");
256 fn verify_fixed_index(
257 verify_worker
: &VerifyWorker
,
258 backup_dir
: &BackupDir
,
260 ) -> Result
<(), Error
> {
261 let mut path
= backup_dir
.relative_path();
262 path
.push(&info
.filename
);
264 let index
= verify_worker
.datastore
.open_fixed_reader(&path
)?
;
266 let (csum
, size
) = index
.compute_csum();
267 if size
!= info
.size
{
268 bail
!("wrong size ({} != {})", info
.size
, size
);
271 if csum
!= info
.csum
{
272 bail
!("wrong index checksum");
275 verify_index_chunks(verify_worker
, Box
::new(index
), info
.chunk_crypt_mode())
278 fn verify_dynamic_index(
279 verify_worker
: &VerifyWorker
,
280 backup_dir
: &BackupDir
,
282 ) -> Result
<(), Error
> {
283 let mut path
= backup_dir
.relative_path();
284 path
.push(&info
.filename
);
286 let index
= verify_worker
.datastore
.open_dynamic_reader(&path
)?
;
288 let (csum
, size
) = index
.compute_csum();
289 if size
!= info
.size
{
290 bail
!("wrong size ({} != {})", info
.size
, size
);
293 if csum
!= info
.csum
{
294 bail
!("wrong index checksum");
297 verify_index_chunks(verify_worker
, Box
::new(index
), info
.chunk_crypt_mode())
300 /// Verify a single backup snapshot
302 /// This checks all archives inside a backup snapshot.
303 /// Errors are logged to the worker log.
306 /// - Ok(true) if verify is successful
307 /// - Ok(false) if there were verification errors
308 /// - Err(_) if task was aborted
309 pub fn verify_backup_dir(
310 verify_worker
: &VerifyWorker
,
311 backup_dir
: &BackupDir
,
313 filter
: Option
<&dyn Fn(&BackupManifest
) -> bool
>,
314 ) -> Result
<bool
, Error
> {
315 let snap_lock
= lock_dir_noblock_shared(
316 &verify_worker
.datastore
.snapshot_path(&backup_dir
),
318 "locked by another operation",
322 verify_backup_dir_with_lock(verify_worker
, backup_dir
, upid
, filter
, snap_lock
)
326 verify_worker
.worker
,
327 "SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
328 verify_worker
.datastore
.name(),
337 /// See verify_backup_dir
338 pub fn verify_backup_dir_with_lock(
339 verify_worker
: &VerifyWorker
,
340 backup_dir
: &BackupDir
,
342 filter
: Option
<&dyn Fn(&BackupManifest
) -> bool
>,
344 ) -> Result
<bool
, Error
> {
345 let manifest
= match verify_worker
.datastore
.load_manifest(&backup_dir
) {
346 Ok((manifest
, _
)) => manifest
,
349 verify_worker
.worker
,
350 "verify {}:{} - manifest load error: {}",
351 verify_worker
.datastore
.name(),
359 if let Some(filter
) = filter
{
360 if !filter(&manifest
) {
362 verify_worker
.worker
,
363 "SKIPPED: verify {}:{} (recently verified)",
364 verify_worker
.datastore
.name(),
371 task_log
!(verify_worker
.worker
, "verify {}:{}", verify_worker
.datastore
.name(), backup_dir
);
373 let mut error_count
= 0;
375 let mut verify_result
= VerifyState
::Ok
;
376 for info
in manifest
.files() {
377 let result
= proxmox
::try_block
!({
378 task_log
!(verify_worker
.worker
, " check {}", info
.filename
);
379 match archive_type(&info
.filename
)?
{
380 ArchiveType
::FixedIndex
=> verify_fixed_index(verify_worker
, &backup_dir
, info
),
381 ArchiveType
::DynamicIndex
=> verify_dynamic_index(verify_worker
, &backup_dir
, info
),
382 ArchiveType
::Blob
=> {
383 verify_blob(verify_worker
.datastore
.clone(), &backup_dir
, info
)
388 verify_worker
.worker
.check_abort()?
;
389 crate::tools
::fail_on_shutdown()?
;
391 if let Err(err
) = result
{
393 verify_worker
.worker
,
394 "verify {}:{}/{} failed: {}",
395 verify_worker
.datastore
.name(),
401 verify_result
= VerifyState
::Failed
;
405 let verify_state
= SnapshotVerifyState
{
406 state
: verify_result
,
409 let verify_state
= serde_json
::to_value(verify_state
)?
;
412 .update_manifest(&backup_dir
, |manifest
| {
413 manifest
.unprotected
["verify_state"] = verify_state
;
415 .map_err(|err
| format_err
!("unable to update manifest blob - {}", err
))?
;
420 /// Verify all backups inside a backup group
422 /// Errors are logged to the worker log.
425 /// - Ok((count, failed_dirs)) where failed_dirs had verification errors
426 /// - Err(_) if task was aborted
427 pub fn verify_backup_group(
428 verify_worker
: &VerifyWorker
,
430 progress
: &mut StoreProgress
,
432 filter
: Option
<&dyn Fn(&BackupManifest
) -> bool
>,
433 ) -> Result
<Vec
<String
>, Error
> {
434 let mut errors
= Vec
::new();
435 let mut list
= match group
.list_backups(&verify_worker
.datastore
.base_path()) {
439 verify_worker
.worker
,
440 "verify group {}:{} - unable to list backups: {}",
441 verify_worker
.datastore
.name(),
449 let snapshot_count
= list
.len();
451 verify_worker
.worker
,
452 "verify group {}:{} ({} snapshots)",
453 verify_worker
.datastore
.name(),
458 progress
.group_snapshots
= snapshot_count
as u64;
460 BackupInfo
::sort_list(&mut list
, false); // newest first
461 for (pos
, info
) in list
.into_iter().enumerate() {
462 if !verify_backup_dir(verify_worker
, &info
.backup_dir
, upid
.clone(), filter
)?
{
463 errors
.push(info
.backup_dir
.to_string());
465 progress
.done_snapshots
= pos
as u64 + 1;
466 task_log
!(verify_worker
.worker
, "percentage done: {}", progress
);
472 /// Verify all (owned) backups inside a datastore
474 /// Errors are logged to the worker log.
477 /// - Ok(failed_dirs) where failed_dirs had verification errors
478 /// - Err(_) if task was aborted
479 pub fn verify_all_backups(
480 verify_worker
: &VerifyWorker
,
482 owner
: Option
<Authid
>,
483 filter
: Option
<&dyn Fn(&BackupManifest
) -> bool
>,
484 ) -> Result
<Vec
<String
>, Error
> {
485 let mut errors
= Vec
::new();
486 let worker
= Arc
::clone(&verify_worker
.worker
);
488 task_log
!(worker
, "verify datastore {}", verify_worker
.datastore
.name());
490 if let Some(owner
) = &owner
{
491 task_log
!(worker
, "limiting to backups owned by {}", owner
);
494 let filter_by_owner
= |group
: &BackupGroup
| {
495 match (verify_worker
.datastore
.get_owner(group
), &owner
) {
496 (Ok(ref group_owner
), Some(owner
)) => {
498 || (group_owner
.is_token()
500 && group_owner
.user() == owner
.user())
502 (Ok(_
), None
) => true,
503 (Err(err
), Some(_
)) => {
504 // intentionally not in task log
505 // the task user might not be allowed to see this group!
506 println
!("Failed to get owner of group '{}' - {}", group
, err
);
509 (Err(err
), None
) => {
510 // we don't filter by owner, but we want to log the error
513 "Failed to get owner of group '{} - {}",
517 errors
.push(group
.to_string());
523 let mut list
= match BackupInfo
::list_backup_groups(&verify_worker
.datastore
.base_path()) {
526 .filter(|group
| !(group
.backup_type() == "host" && group
.backup_id() == "benchmark"))
527 .filter(filter_by_owner
)
528 .collect
::<Vec
<BackupGroup
>>(),
530 task_log
!(worker
, "unable to list backups: {}", err
,);
535 list
.sort_unstable();
537 let group_count
= list
.len();
538 task_log
!(worker
, "found {} groups", group_count
);
540 let mut progress
= StoreProgress
::new(group_count
as u64);
542 for (pos
, group
) in list
.into_iter().enumerate() {
543 progress
.done_groups
= pos
as u64;
544 progress
.done_snapshots
= 0;
545 progress
.group_snapshots
= 0;
547 let mut group_errors
=
548 verify_backup_group(verify_worker
, &group
, &mut progress
, upid
, filter
)?
;
549 errors
.append(&mut group_errors
);
555 /// Filter for the verification of snapshots
556 pub fn verify_filter(
557 ignore_verified_snapshots
: bool
,
558 outdated_after
: Option
<i64>,
559 manifest
: &BackupManifest
,
561 if !ignore_verified_snapshots
{
565 let raw_verify_state
= manifest
.unprotected
["verify_state"].clone();
566 match serde_json
::from_value
::<SnapshotVerifyState
>(raw_verify_state
) {
567 Err(_
) => true, // no last verification, always include
569 match outdated_after
{
570 None
=> false, // never re-verify if ignored and no max age
572 let now
= proxmox
::tools
::time
::epoch_i64();
573 let days_since_last_verify
= (now
- last_verify
.upid
.starttime
) / 86400;
575 days_since_last_verify
> max_age