use anyhow::{bail, format_err, Error};
-use crate::{
- api2::types::*,
- backup::{
- DataStore,
- StoreProgress,
- DataBlob,
- BackupGroup,
- BackupDir,
- BackupInfo,
- BackupManifest,
- IndexFile,
- CryptMode,
- FileInfo,
- ArchiveType,
- archive_type,
- },
- server::UPID,
- task::TaskState,
- task_log,
- tools::fs::lock_dir_noblock_shared,
- tools::ParallelHandler,
-};
+use proxmox_sys::{task_log, WorkerTaskContext};
+
+use pbs_api_types::{Authid, CryptMode, VerifyState, UPID, SnapshotVerifyState};
+use pbs_datastore::{DataStore, DataBlob, StoreProgress};
+use pbs_datastore::backup_info::{BackupGroup, BackupDir, BackupInfo};
+use pbs_datastore::index::IndexFile;
+use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
+use proxmox_sys::fs::lock_dir_noblock_shared;
+
+use crate::tools::ParallelHandler;
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
/// already been verified or detected as corrupt.
pub struct VerifyWorker {
- worker: Arc<dyn TaskState + Send + Sync>,
+ worker: Arc<dyn WorkerTaskContext>,
datastore: Arc<DataStore>,
verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
impl VerifyWorker {
/// Creates a new VerifyWorker for a given task worker and datastore.
- pub fn new(worker: Arc<dyn TaskState + Send + Sync>, datastore: Arc<DataStore>) -> Self {
+ pub fn new(worker: Arc<dyn WorkerTaskContext>, datastore: Arc<DataStore>) -> Self {
Self {
worker,
datastore,
fn rename_corrupted_chunk(
datastore: Arc<DataStore>,
digest: &[u8;32],
- worker: &dyn TaskState,
+ worker: &dyn WorkerTaskContext,
) {
let (path, digest_str) = datastore.chunk_path(digest);
}
);
- let index_count = index.index_count();
- let mut chunk_list = Vec::with_capacity(index_count);
-
- use std::os::unix::fs::MetadataExt;
-
- for pos in 0..index_count {
- verify_worker.worker.check_abort()?;
- crate::tools::fail_on_shutdown()?;
-
- let info = index.chunk_info(pos).unwrap();
-
- if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) {
- continue; // already verified
- }
-
- if verify_worker.corrupt_chunks.lock().unwrap().contains(&info.digest) {
- let digest_str = proxmox::tools::digest_to_hex(&info.digest);
+ let skip_chunk = |digest: &[u8; 32]| -> bool {
+ if verify_worker.verified_chunks.lock().unwrap().contains(digest) {
+ true
+ } else if verify_worker.corrupt_chunks.lock().unwrap().contains(digest) {
+ let digest_str = hex::encode(digest);
task_log!(verify_worker.worker, "chunk {} was marked as corrupt", digest_str);
errors.fetch_add(1, Ordering::SeqCst);
- continue;
+ true
+ } else {
+ false
}
+ };
- match verify_worker.datastore.stat_chunk(&info.digest) {
- Err(err) => {
- verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
- task_log!(verify_worker.worker, "can't verify chunk, stat failed - {}", err);
- errors.fetch_add(1, Ordering::SeqCst);
- rename_corrupted_chunk(
- verify_worker.datastore.clone(),
- &info.digest,
- &verify_worker.worker,
- );
- }
- Ok(metadata) => {
- chunk_list.push((pos, metadata.ino()));
- }
+ let check_abort = |pos: usize| -> Result<(), Error> {
+ if pos & 1023 == 0 {
+ verify_worker.worker.check_abort()?;
+ verify_worker.worker.fail_on_shutdown()?;
}
- }
+ Ok(())
+ };
- // sorting by inode improves data locality, which makes it lots faster on spinners
- chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(&ino_b));
+ let chunk_list =
+ verify_worker
+ .datastore
+ .get_chunks_in_order(&index, skip_chunk, check_abort)?;
for (pos, _) in chunk_list {
verify_worker.worker.check_abort()?;
- crate::tools::fail_on_shutdown()?;
+ verify_worker.worker.fail_on_shutdown()?;
let info = index.chunk_info(pos).unwrap();
// we must always recheck this here, the parallel worker below alter it!
- // Else we miss skipping repeated chunks from the same index, and re-verify them all
- if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) {
- continue; // already verified
+ if skip_chunk(&info.digest) {
+ continue; // already verified or marked corrupt
}
match verify_worker.datastore.load_chunk(&info.digest) {
let mut verify_result = VerifyState::Ok;
for info in manifest.files() {
- let result = proxmox::try_block!({
+ let result = proxmox_lang::try_block!({
task_log!(verify_worker.worker, " check {}", info.filename);
match archive_type(&info.filename)? {
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, &backup_dir, info),
});
verify_worker.worker.check_abort()?;
- crate::tools::fail_on_shutdown()?;
+ verify_worker.worker.fail_on_shutdown()?;
if let Err(err) = result {
task_log!(
Ok(errors)
}
+
+/// Filter for the verification of snapshots
+pub fn verify_filter(
+ ignore_verified_snapshots: bool,
+ outdated_after: Option<i64>,
+ manifest: &BackupManifest,
+) -> bool {
+ if !ignore_verified_snapshots {
+ return true;
+ }
+
+ let raw_verify_state = manifest.unprotected["verify_state"].clone();
+ match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
+ Err(_) => true, // no last verification, always include
+ Ok(last_verify) => {
+ match outdated_after {
+ None => false, // never re-verify if ignored and no max age
+ Some(max_age) => {
+ let now = proxmox_time::epoch_i64();
+ let days_since_last_verify = (now - last_verify.upid.starttime) / 86400;
+
+ days_since_last_verify > max_age
+ }
+ }
+ }
+ }
+}