From c2009e5309bd8d3d264ab12f6924a3b74f045546 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Wed, 24 Jun 2020 13:11:45 +0200 Subject: [PATCH] src/api2/admin/datastore.rs: add verify api --- src/api2/admin/datastore.rs | 89 +++++++++++++++++++ src/backup.rs | 3 + src/backup/verify.rs | 168 ++++++++++++++++++++++++++++++++++++ 3 files changed, 260 insertions(+) create mode 100644 src/backup/verify.rs diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 6b22ca06..460bf2ff 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -394,6 +394,90 @@ pub fn status( crate::tools::disks::disk_usage(&datastore.base_path()) } +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + optional: true, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + optional: true, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + optional: true, + }, + }, + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme + }, +)] +/// Verify backups. +/// +/// This function can verify a single backup snapshot, all backup from a backup group, +/// or all backups in the datastore. +pub fn verify( + store: String, + backup_type: Option, + backup_id: Option, + backup_time: Option, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let datastore = DataStore::lookup_datastore(&store)?; + + let what; + + let mut backup_dir = None; + let mut backup_group = None; + + match (backup_type, backup_id, backup_time) { + (Some(backup_type), Some(backup_id), Some(backup_time)) => { + let dir = BackupDir::new(backup_type, backup_id, backup_time); + what = format!("{}:{}", store, dir); + backup_dir = Some(dir); + } + (Some(backup_type), Some(backup_id), None) => { + let group = BackupGroup::new(backup_type, backup_id); + what = format!("{}:{}", store, group); + backup_group = Some(group); + } + (None, None, None) => { + what = store.clone(); + } + _ => bail!("parameters do not spefify a backup group or snapshot"), + } + + let username = rpcenv.get_user().unwrap(); + let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; + + let upid_str = WorkerTask::new_thread( + "verify", Some(what.clone()), &username, to_stdout, move |worker| + { + let success = if let Some(backup_dir) = backup_dir { + verify_backup_dir(&datastore, &backup_dir, &worker) + } else if let Some(backup_group) = backup_group { + verify_backup_group(&datastore, &backup_group, &worker) + } else { + verify_all_backups(&datastore, &worker) + }; + if !success { + bail!("verfication failed - please check the log for details"); + } + Ok(()) + })?; + + Ok(json!(upid_str)) +} + #[macro_export] macro_rules! add_common_prune_prameters { ( [ $( $list1:tt )* ] ) => { @@ -1261,6 +1345,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ &Router::new() .upload(&API_METHOD_UPLOAD_BACKUP_LOG) ), + ( + "verify", + &Router::new() + .post(&API_METHOD_VERIFY) + ), ]; const DATASTORE_INFO_ROUTER: Router = Router::new() diff --git a/src/backup.rs b/src/backup.rs index 3a89bcb2..10a65d6c 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -198,6 +198,9 @@ pub use prune::*; mod datastore; pub use datastore::*; +mod verify; +pub use verify::*; + mod catalog_shell; pub use catalog_shell::*; diff --git a/src/backup/verify.rs b/src/backup/verify.rs new file mode 100644 index 00000000..1a554e28 --- /dev/null +++ b/src/backup/verify.rs @@ -0,0 +1,168 @@ +use anyhow::{bail, Error}; + +use crate::server::WorkerTask; + +use super::{ + DataStore, BackupGroup, BackupDir, BackupInfo, IndexFile, + ENCR_COMPR_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, + FileInfo, ArchiveType, archive_type, +}; + +fn verify_blob(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { + + let (blob, raw_size) = datastore.load_blob(backup_dir, &info.filename)?; + + let csum = openssl::sha::sha256(blob.raw_data()); + if raw_size != info.size { + bail!("wrong size ({} != {})", info.size, raw_size); + } + + if csum != info.csum { + bail!("wrong index checksum"); + } + + blob.verify_crc()?; + + let magic = blob.magic(); + + if magic == &ENCR_COMPR_BLOB_MAGIC_1_0 || magic == &ENCRYPTED_BLOB_MAGIC_1_0 { + return Ok(()); + } + + blob.decode(None)?; + + Ok(()) +} + +fn verify_fixed_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { + + let mut path = backup_dir.relative_path(); + path.push(&info.filename); + + let index = datastore.open_fixed_reader(&path)?; + + let (csum, size) = index.compute_csum(); + if size != info.size { + bail!("wrong size ({} != {})", info.size, size); + } + + if csum != info.csum { + bail!("wrong index checksum"); + } + + for pos in 0..index.index_count() { + let (start, end, digest) = index.chunk_info(pos).unwrap(); + let size = end - start; + datastore.verify_stored_chunk(&digest, size)?; + } + + Ok(()) +} + +fn verify_dynamic_index(datastore: &DataStore, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { + let mut path = backup_dir.relative_path(); + path.push(&info.filename); + + let index = datastore.open_dynamic_reader(&path)?; + + let (csum, size) = index.compute_csum(); + if size != info.size { + bail!("wrong size ({} != {})", info.size, size); + } + + if csum != info.csum { + bail!("wrong index checksum"); + } + + for pos in 0..index.index_count() { + let chunk_info = index.chunk_info(pos).unwrap(); + let size = chunk_info.range.end - chunk_info.range.start; + datastore.verify_stored_chunk(&chunk_info.digest, size)?; + } + + Ok(()) +} + +/// Verify a single backup snapshot +/// +/// This checks all archives inside a backup snapshot. +/// Errors are logged to the worker log. +/// +/// Return true if verify is successful +pub fn verify_backup_dir(datastore: &DataStore, backup_dir: &BackupDir, worker: &WorkerTask) -> bool { + + let manifest = match datastore.load_manifest(&backup_dir) { + Ok((manifest, _)) => manifest, + Err(err) => { + worker.log(format!("verify {}:{} - manifest load error: {}", datastore.name(), backup_dir, err)); + return false; + } + }; + + worker.log(format!("verify {}:{}", datastore.name(), backup_dir)); + + let mut error_count = 0; + + for info in manifest.files() { + let result = proxmox::try_block!({ + worker.log(format!(" check {}", info.filename)); + match archive_type(&info.filename)? { + ArchiveType::FixedIndex => verify_fixed_index(&datastore, &backup_dir, info), + ArchiveType::DynamicIndex => verify_dynamic_index(&datastore, &backup_dir, info), + ArchiveType::Blob => verify_blob(&datastore, &backup_dir, info), + } + }); + if let Err(err) = result { + worker.log(format!("verify {}:{}/{} failed: {}", datastore.name(), backup_dir, info.filename, err)); + error_count += 1; + } + } + + error_count == 0 +} + +pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> bool { + + let mut list = match group.list_backups(&datastore.base_path()) { + Ok(list) => list, + Err(err) => { + worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err)); + return false; + } + }; + + worker.log(format!("verify group {}:{}", datastore.name(), group)); + + let mut error_count = 0; + + BackupInfo::sort_list(&mut list, false); // newest first + for info in list { + if !verify_backup_dir(datastore, &info.backup_dir, worker) { + error_count += 1; + } + } + + error_count == 0 +} + +pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> bool { + + let list = match BackupGroup::list_groups(&datastore.base_path()) { + Ok(list) => list, + Err(err) => { + worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err)); + return false; + } + }; + + worker.log(format!("verify datastore {}", datastore.name())); + + let mut error_count = 0; + for group in list { + if !verify_backup_group(datastore, &group, worker) { + error_count += 1; + } + } + + error_count == 0 +} -- 2.39.5