X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=src%2Fapi2%2Fadmin%2Fdatastore.rs;h=462b8d9ce2f2846f9b05ff3e0ac3caf177ba9a61;hb=2d55beeca071a7507d5cdaf23814a58a7e7e2527;hp=0dce2b4fc3ecd04529d63711936b1862741cc7e0;hpb=ba694720fcc6163fd46dcb186f2d5fd9f1c515a2;p=proxmox-backup.git diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 0dce2b4f..462b8d9c 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -1,8 +1,8 @@ use std::collections::{HashSet, HashMap}; -use std::convert::TryFrom; +use std::ffi::OsStr; +use std::os::unix::ffi::OsStrExt; -use chrono::{TimeZone, Local}; -use anyhow::{bail, Error}; +use anyhow::{bail, format_err, Error}; use futures::*; use hyper::http::request::Parts; use hyper::{header, Body, Response, StatusCode}; @@ -10,13 +10,17 @@ use serde_json::{json, Value}; use proxmox::api::{ api, ApiResponseFuture, ApiHandler, ApiMethod, Router, - RpcEnvironment, RpcEnvironmentType, Permission, UserInformation}; + RpcEnvironment, RpcEnvironmentType, Permission +}; use proxmox::api::router::SubdirMap; use proxmox::api::schema::*; -use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions}; +use proxmox::tools::fs::{replace_file, CreateOptions}; use proxmox::try_block; use proxmox::{http_err, identity, list_subdirs_api_method, sortable}; +use pxar::accessor::aio::Accessor; +use pxar::EntryKind; + use crate::api2::types::*; use crate::api2::node::rrd::create_value_from_rrd; use crate::backup::*; @@ -24,7 +28,7 @@ use crate::config::datastore; use crate::config::cached_user_info::CachedUserInfo; use crate::server::WorkerTask; -use crate::tools; +use crate::tools::{self, AsyncReaderStream, WrappedReaderStream}; use crate::config::acl::{ PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, @@ -33,7 +37,11 @@ use crate::config::acl::{ PRIV_DATASTORE_BACKUP, }; -fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> Result<(), Error> { +fn check_backup_owner( + store: &DataStore, + group: &BackupGroup, + userid: &Userid, +) -> Result<(), Error> { let owner = store.get_owner(group)?; if &owner != userid { bail!("backup owner check failed ({} != {})", userid, owner); @@ -41,32 +49,53 @@ fn check_backup_owner(store: &DataStore, group: &BackupGroup, userid: &str) -> R Ok(()) } -fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result, Error> { - - let mut path = store.base_path(); - path.push(backup_dir.relative_path()); - path.push(MANIFEST_BLOB_NAME); - - let raw_data = file_get_contents(&path)?; - let index_size = raw_data.len() as u64; - let blob = DataBlob::from_raw(raw_data)?; +fn read_backup_index( + store: &DataStore, + backup_dir: &BackupDir, +) -> Result<(BackupManifest, Vec), Error> { - let manifest = BackupManifest::try_from(blob)?; + let (manifest, index_size) = store.load_manifest(backup_dir)?; let mut result = Vec::new(); for item in manifest.files() { result.push(BackupContent { filename: item.filename.clone(), + crypt_mode: Some(item.crypt_mode), size: Some(item.size), }); } result.push(BackupContent { filename: MANIFEST_BLOB_NAME.to_string(), + crypt_mode: Some(CryptMode::None), size: Some(index_size), }); - Ok(result) + Ok((manifest, result)) +} + +fn get_all_snapshot_files( + store: &DataStore, + info: &BackupInfo, +) -> Result<(BackupManifest, Vec), Error> { + + let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?; + + let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { + acc.insert(item.filename.clone()); + acc + }); + + for file in &info.files { + if file_set.contains(file) { continue; } + files.push(BackupContent { + filename: file.to_string(), + size: None, + crypt_mode: None, + }); + } + + Ok((manifest, files)) } fn group_backups(backup_list: Vec) -> HashMap> { @@ -110,9 +139,9 @@ fn list_groups( rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { - let username = rpcenv.get_user().unwrap(); + let userid: Userid = rpcenv.get_user().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); let datastore = DataStore::lookup_datastore(&store)?; @@ -133,7 +162,7 @@ fn list_groups( let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; let owner = datastore.get_owner(group)?; if !list_all { - if owner != username { continue; } + if owner != userid { continue; } } let result_item = GroupListItem { @@ -191,30 +220,20 @@ pub fn list_snapshot_files( rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { - let username = rpcenv.get_user().unwrap(); + let userid: Userid = rpcenv.get_user().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); let datastore = DataStore::lookup_datastore(&store)?; let snapshot = BackupDir::new(backup_type, backup_id, backup_time); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0; - if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; } - - let mut files = read_backup_index(&datastore, &snapshot)?; + if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; } let info = BackupInfo::new(&datastore.base_path(), snapshot)?; - let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { - acc.insert(item.filename.clone()); - acc - }); - - for file in info.files { - if file_set.contains(&file) { continue; } - files.push(BackupContent { filename: file, size: None }); - } + let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?; Ok(files) } @@ -253,18 +272,18 @@ fn delete_snapshot( rpcenv: &mut dyn RpcEnvironment, ) -> Result { - let username = rpcenv.get_user().unwrap(); + let userid: Userid = rpcenv.get_user().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); let snapshot = BackupDir::new(backup_type, backup_id, backup_time); let datastore = DataStore::lookup_datastore(&store)?; let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0; - if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; } + if !allowed { check_backup_owner(&datastore, snapshot.group(), &userid)?; } - datastore.remove_backup_dir(&snapshot)?; + datastore.remove_backup_dir(&snapshot, false)?; Ok(Value::Null) } @@ -309,9 +328,9 @@ pub fn list_snapshots ( rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { - let username = rpcenv.get_user().unwrap(); + let userid: Userid = rpcenv.get_user().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); let datastore = DataStore::lookup_datastore(&store)?; @@ -334,28 +353,49 @@ pub fn list_snapshots ( let owner = datastore.get_owner(group)?; if !list_all { - if owner != username { continue; } + if owner != userid { continue; } } - let mut result_item = SnapshotListItem { + let mut size = None; + + let (comment, files) = match get_all_snapshot_files(&datastore, &info) { + Ok((manifest, files)) => { + size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); + // extract the first line from notes + let comment: Option = manifest.unprotected["notes"] + .as_str() + .and_then(|notes| notes.lines().next()) + .map(String::from); + + (comment, files) + }, + Err(err) => { + eprintln!("error during snapshot file listing: '{}'", err); + ( + None, + info + .files + .iter() + .map(|x| BackupContent { + filename: x.to_string(), + size: None, + crypt_mode: None, + }) + .collect() + ) + }, + }; + + let result_item = SnapshotListItem { backup_type: group.backup_type().to_string(), backup_id: group.backup_id().to_string(), backup_time: info.backup_dir.backup_time().timestamp(), - files: info.files, - size: None, + comment, + files, + size, owner: Some(owner), }; - if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) { - let mut backup_size = 0; - for item in index.iter() { - if let Some(item_size) = item.size { - backup_size += item_size; - } - } - result_item.size = Some(backup_size); - } - snapshots.push(result_item); } @@ -387,6 +427,104 @@ pub fn status( crate::tools::disks::disk_usage(&datastore.base_path()) } +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + optional: true, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + optional: true, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + optional: true, + }, + }, + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme + }, +)] +/// Verify backups. +/// +/// This function can verify a single backup snapshot, all backup from a backup group, +/// or all backups in the datastore. +pub fn verify( + store: String, + backup_type: Option, + backup_id: Option, + backup_time: Option, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let datastore = DataStore::lookup_datastore(&store)?; + + let worker_id; + + let mut backup_dir = None; + let mut backup_group = None; + + match (backup_type, backup_id, backup_time) { + (Some(backup_type), Some(backup_id), Some(backup_time)) => { + worker_id = format!("{}_{}_{}_{:08X}", store, backup_type, backup_id, backup_time); + let dir = BackupDir::new(backup_type, backup_id, backup_time); + backup_dir = Some(dir); + } + (Some(backup_type), Some(backup_id), None) => { + worker_id = format!("{}_{}_{}", store, backup_type, backup_id); + let group = BackupGroup::new(backup_type, backup_id); + backup_group = Some(group); + } + (None, None, None) => { + worker_id = store.clone(); + } + _ => bail!("parameters do not spefify a backup group or snapshot"), + } + + let userid: Userid = rpcenv.get_user().unwrap().parse()?; + let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; + + let upid_str = WorkerTask::new_thread( + "verify", + Some(worker_id.clone()), + userid, + to_stdout, + move |worker| { + let failed_dirs = if let Some(backup_dir) = backup_dir { + let mut verified_chunks = HashSet::with_capacity(1024*16); + let mut corrupt_chunks = HashSet::with_capacity(64); + let mut res = Vec::new(); + if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? { + res.push(backup_dir.to_string()); + } + res + } else if let Some(backup_group) = backup_group { + verify_backup_group(&datastore, &backup_group, &worker)? + } else { + verify_all_backups(&datastore, &worker)? + }; + if failed_dirs.len() > 0 { + worker.log("Failed to verify following snapshots:"); + for dir in failed_dirs { + worker.log(format!("\t{}", dir)); + } + bail!("verfication failed - please check the log for details"); + } + Ok(()) + }, + )?; + + Ok(json!(upid_str)) +} + #[macro_export] macro_rules! add_common_prune_prameters { ( [ $( $list1:tt )* ] ) => { @@ -432,7 +570,7 @@ macro_rules! add_common_prune_prameters { pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new( "Returns the list of snapshots and a flag indicating if there are kept or removed.", - PruneListItem::API_SCHEMA + &PruneListItem::API_SCHEMA ).schema(); const API_METHOD_PRUNE: ApiMethod = ApiMethod::new( @@ -467,9 +605,9 @@ fn prune( let backup_type = tools::required_string_param(¶m, "backup-type")?; let backup_id = tools::required_string_param(¶m, "backup-id")?; - let username = rpcenv.get_user().unwrap(); + let userid: Userid = rpcenv.get_user().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); let dry_run = param["dry-run"].as_bool().unwrap_or(false); @@ -478,7 +616,7 @@ fn prune( let datastore = DataStore::lookup_datastore(&store)?; let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0; - if !allowed { check_backup_owner(&datastore, &group, &username)?; } + if !allowed { check_backup_owner(&datastore, &group, &userid)?; } let prune_options = PruneOptions { keep_last: param["keep-last"].as_u64(), @@ -520,7 +658,7 @@ fn prune( // We use a WorkerTask just to have a task log, but run synchrounously - let worker = WorkerTask::new("prune", Some(worker_id), "root@pam", true)?; + let worker = WorkerTask::new("prune", Some(worker_id), Userid::root_userid().clone(), true)?; let result = try_block! { if keep_all { @@ -557,7 +695,7 @@ fn prune( })); if !(dry_run || keep) { - datastore.remove_backup_dir(&info.backup_dir)?; + datastore.remove_backup_dir(&info.backup_dir, true)?; } } @@ -602,11 +740,15 @@ fn start_garbage_collection( let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let upid_str = WorkerTask::new_thread( - "garbage_collection", Some(store.clone()), "root@pam", to_stdout, move |worker| - { + "garbage_collection", + Some(store.clone()), + Userid::root_userid().clone(), + to_stdout, + move |worker| { worker.log(format!("starting garbage collection on store {}", store)); datastore.garbage_collection(&worker) - })?; + }, + )?; Ok(json!(upid_str)) } @@ -670,13 +812,13 @@ fn get_datastore_list( let (config, _digest) = datastore::config()?; - let username = rpcenv.get_user().unwrap(); + let userid: Userid = rpcenv.get_user().unwrap().parse()?; let user_info = CachedUserInfo::new()?; let mut list = Vec::new(); for (store, (_, data)) in &config.sections { - let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; if allowed { let mut entry = json!({ "store": store }); @@ -721,9 +863,9 @@ fn download_file( let store = tools::required_string_param(¶m, "store")?; let datastore = DataStore::lookup_datastore(store)?; - let username = rpcenv.get_user().unwrap(); + let userid: Userid = rpcenv.get_user().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let user_privs = user_info.lookup_privs(&username, &["datastore", &store]); + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); @@ -734,18 +876,17 @@ fn download_file( let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; - if !allowed { check_backup_owner(&datastore, backup_dir.group(), &username)?; } + if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } - println!("Download {} from {} ({}/{}/{}/{})", file_name, store, - backup_type, backup_id, Local.timestamp(backup_time, 0), file_name); + println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); path.push(&file_name); let file = tokio::fs::File::open(&path) - .map_err(|err| http_err!(BAD_REQUEST, format!("File open failed: {}", err))) - .await?; + .await + .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())) @@ -764,6 +905,125 @@ fn download_file( }.boxed() } +#[sortable] +pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new( + &ApiHandler::AsyncHttp(&download_file_decoded), + &ObjectSchema::new( + "Download single decoded file from backup snapshot. Only works if it's not encrypted.", + &sorted!([ + ("store", false, &DATASTORE_SCHEMA), + ("backup-type", false, &BACKUP_TYPE_SCHEMA), + ("backup-id", false, &BACKUP_ID_SCHEMA), + ("backup-time", false, &BACKUP_TIME_SCHEMA), + ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), + ]), + ) +).access(None, &Permission::Privilege( + &["datastore", "{store}"], + PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, + true) +); + +fn download_file_decoded( + _parts: Parts, + _req_body: Body, + param: Value, + _info: &ApiMethod, + rpcenv: Box, +) -> ApiResponseFuture { + + async move { + let store = tools::required_string_param(¶m, "store")?; + let datastore = DataStore::lookup_datastore(store)?; + + let userid: Userid = rpcenv.get_user().unwrap().parse()?; + let user_info = CachedUserInfo::new()?; + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); + + let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); + + let backup_type = tools::required_string_param(¶m, "backup-type")?; + let backup_id = tools::required_string_param(¶m, "backup-id")?; + let backup_time = tools::required_integer_param(¶m, "backup-time")?; + + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + + let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; + if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } + + let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; + for file in files { + if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + bail!("cannot decode '{}' - is encrypted", file_name); + } + } + + println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); + + let mut path = datastore.base_path(); + path.push(backup_dir.relative_path()); + path.push(&file_name); + + let extension = file_name.rsplitn(2, '.').next().unwrap(); + + let body = match extension { + "didx" => { + let index = DynamicIndexReader::open(&path) + .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; + let (csum, size) = index.compute_csum(); + manifest.verify_file(&file_name, &csum, size)?; + + let chunk_reader = LocalChunkReader::new(datastore, None); + let reader = AsyncIndexReader::new(index, chunk_reader); + Body::wrap_stream(AsyncReaderStream::new(reader) + .map_err(move |err| { + eprintln!("error during streaming of '{:?}' - {}", path, err); + err + })) + }, + "fidx" => { + let index = FixedIndexReader::open(&path) + .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?; + + let (csum, size) = index.compute_csum(); + manifest.verify_file(&file_name, &csum, size)?; + + let chunk_reader = LocalChunkReader::new(datastore, None); + let reader = AsyncIndexReader::new(index, chunk_reader); + Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024) + .map_err(move |err| { + eprintln!("error during streaming of '{:?}' - {}", path, err); + err + })) + }, + "blob" => { + let file = std::fs::File::open(&path) + .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; + + // FIXME: load full blob to verify index checksum? + + Body::wrap_stream( + WrappedReaderStream::new(DataBlobReader::new(file, None)?) + .map_err(move |err| { + eprintln!("error during streaming of '{:?}' - {}", path, err); + err + }) + ) + }, + extension => { + bail!("cannot download '{}' files", extension); + }, + }; + + // fixme: set other headers ? + Ok(Response::builder() + .status(StatusCode::OK) + .header(header::CONTENT_TYPE, "application/octet-stream") + .body(body) + .unwrap()) + }.boxed() +} + #[sortable] pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new( &ApiHandler::AsyncHttp(&upload_backup_log), @@ -801,8 +1061,8 @@ fn upload_backup_log( let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); - let username = rpcenv.get_user().unwrap(); - check_backup_owner(&datastore, backup_dir.group(), &username)?; + let userid: Userid = rpcenv.get_user().unwrap().parse()?; + check_backup_owner(&datastore, backup_dir.group(), &userid)?; let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); @@ -823,17 +1083,242 @@ fn upload_backup_log( }) .await?; - let blob = DataBlob::from_raw(data)?; - // always verify CRC at server side - blob.verify_crc()?; - let raw_data = blob.raw_data(); - replace_file(&path, raw_data, CreateOptions::new())?; + // always verify blob/CRC at server side + let blob = DataBlob::load_from_reader(&mut &data[..])?; + + replace_file(&path, blob.raw_data(), CreateOptions::new())?; // fixme: use correct formatter Ok(crate::server::formatter::json_response(Ok(Value::Null))) }.boxed() } +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + }, + "filepath": { + description: "Base64 encoded path.", + type: String, + } + }, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), + }, +)] +/// Get the entries of the given path of the catalog +fn catalog( + store: String, + backup_type: String, + backup_id: String, + backup_time: i64, + filepath: String, + _param: Value, + _info: &ApiMethod, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let datastore = DataStore::lookup_datastore(&store)?; + + let userid: Userid = rpcenv.get_user().unwrap().parse()?; + let user_info = CachedUserInfo::new()?; + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); + + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + + let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; + if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } + + let file_name = CATALOG_NAME; + + let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; + for file in files { + if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + bail!("cannot decode '{}' - is encrypted", file_name); + } + } + + let mut path = datastore.base_path(); + path.push(backup_dir.relative_path()); + path.push(file_name); + + let index = DynamicIndexReader::open(&path) + .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; + + let (csum, size) = index.compute_csum(); + manifest.verify_file(&file_name, &csum, size)?; + + let chunk_reader = LocalChunkReader::new(datastore, None); + let reader = BufferedDynamicReader::new(index, chunk_reader); + + let mut catalog_reader = CatalogReader::new(reader); + let mut current = catalog_reader.root()?; + let mut components = vec![]; + + + if filepath != "root" { + components = base64::decode(filepath)?; + if components.len() > 0 && components[0] == '/' as u8 { + components.remove(0); + } + for component in components.split(|c| *c == '/' as u8) { + if let Some(entry) = catalog_reader.lookup(¤t, component)? { + current = entry; + } else { + bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components)); + } + } + } + + let mut res = Vec::new(); + + for direntry in catalog_reader.read_dir(¤t)? { + let mut components = components.clone(); + components.push('/' as u8); + components.extend(&direntry.name); + let path = base64::encode(components); + let text = String::from_utf8_lossy(&direntry.name); + let mut entry = json!({ + "filepath": path, + "text": text, + "type": CatalogEntryType::from(&direntry.attr).to_string(), + "leaf": true, + }); + match direntry.attr { + DirEntryAttribute::Directory { start: _ } => { + entry["leaf"] = false.into(); + }, + DirEntryAttribute::File { size, mtime } => { + entry["size"] = size.into(); + entry["mtime"] = mtime.into(); + }, + _ => {}, + } + res.push(entry); + } + + Ok(res.into()) +} + +#[sortable] +pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( + &ApiHandler::AsyncHttp(&pxar_file_download), + &ObjectSchema::new( + "Download single file from pxar file of a bacup snapshot. Only works if it's not encrypted.", + &sorted!([ + ("store", false, &DATASTORE_SCHEMA), + ("backup-type", false, &BACKUP_TYPE_SCHEMA), + ("backup-id", false, &BACKUP_ID_SCHEMA), + ("backup-time", false, &BACKUP_TIME_SCHEMA), + ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), + ]), + ) +).access(None, &Permission::Privilege( + &["datastore", "{store}"], + PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, + true) +); + +fn pxar_file_download( + _parts: Parts, + _req_body: Body, + param: Value, + _info: &ApiMethod, + rpcenv: Box, +) -> ApiResponseFuture { + + async move { + let store = tools::required_string_param(¶m, "store")?; + let datastore = DataStore::lookup_datastore(&store)?; + + let userid: Userid = rpcenv.get_user().unwrap().parse()?; + let user_info = CachedUserInfo::new()?; + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); + + let filepath = tools::required_string_param(¶m, "filepath")?.to_owned(); + + let backup_type = tools::required_string_param(¶m, "backup-type")?; + let backup_id = tools::required_string_param(¶m, "backup-id")?; + let backup_time = tools::required_integer_param(¶m, "backup-time")?; + + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + + let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; + if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } + + let mut components = base64::decode(&filepath)?; + if components.len() > 0 && components[0] == '/' as u8 { + components.remove(0); + } + + let mut split = components.splitn(2, |c| *c == '/' as u8); + let pxar_name = std::str::from_utf8(split.next().unwrap())?; + let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?; + let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; + for file in files { + if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { + bail!("cannot decode '{}' - is encrypted", pxar_name); + } + } + + let mut path = datastore.base_path(); + path.push(backup_dir.relative_path()); + path.push(pxar_name); + + let index = DynamicIndexReader::open(&path) + .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; + + let (csum, size) = index.compute_csum(); + manifest.verify_file(&pxar_name, &csum, size)?; + + let chunk_reader = LocalChunkReader::new(datastore, None); + let reader = BufferedDynamicReader::new(index, chunk_reader); + let archive_size = reader.archive_size(); + let reader = LocalDynamicReadAt::new(reader); + + let decoder = Accessor::new(reader, archive_size).await?; + let root = decoder.open_root().await?; + let file = root + .lookup(OsStr::from_bytes(file_path)).await? + .ok_or(format_err!("error opening '{:?}'", file_path))?; + + let file = match file.kind() { + EntryKind::File { .. } => file, + EntryKind::Hardlink(_) => { + decoder.follow_hardlink(&file).await? + }, + // TODO symlink + other => bail!("cannot download file of type {:?}", other), + }; + + let body = Body::wrap_stream( + AsyncReaderStream::new(file.contents().await?) + .map_err(move |err| { + eprintln!("error during streaming of '{:?}' - {}", filepath, err); + err + }) + ); + + // fixme: set other headers ? + Ok(Response::builder() + .status(StatusCode::OK) + .header(header::CONTENT_TYPE, "application/octet-stream") + .body(body) + .unwrap()) + }.boxed() +} + #[api( input: { properties: { @@ -873,13 +1358,125 @@ fn get_rrd_stats( ) } +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + }, + }, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), + }, +)] +/// Get "notes" for a specific backup +fn get_notes( + store: String, + backup_type: String, + backup_id: String, + backup_time: i64, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let datastore = DataStore::lookup_datastore(&store)?; + + let userid: Userid = rpcenv.get_user().unwrap().parse()?; + let user_info = CachedUserInfo::new()?; + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); + + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + + let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; + if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } + + let manifest = datastore.load_manifest_json(&backup_dir)?; + + let notes = manifest["unprotected"]["notes"] + .as_str() + .unwrap_or(""); + + Ok(String::from(notes)) +} + +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + "backup-type": { + schema: BACKUP_TYPE_SCHEMA, + }, + "backup-id": { + schema: BACKUP_ID_SCHEMA, + }, + "backup-time": { + schema: BACKUP_TIME_SCHEMA, + }, + notes: { + description: "A multiline text.", + }, + }, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), + }, +)] +/// Set "notes" for a specific backup +fn set_notes( + store: String, + backup_type: String, + backup_id: String, + backup_time: i64, + notes: String, + rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let datastore = DataStore::lookup_datastore(&store)?; + + let userid: Userid = rpcenv.get_user().unwrap().parse()?; + let user_info = CachedUserInfo::new()?; + let user_privs = user_info.lookup_privs(&userid, &["datastore", &store]); + + let backup_dir = BackupDir::new(backup_type, backup_id, backup_time); + + let allowed = (user_privs & PRIV_DATASTORE_READ) != 0; + if !allowed { check_backup_owner(&datastore, backup_dir.group(), &userid)?; } + + let mut manifest = datastore.load_manifest_json(&backup_dir)?; + + manifest["unprotected"]["notes"] = notes.into(); + + datastore.store_manifest(&backup_dir, manifest)?; + + Ok(()) +} + #[sortable] const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ + ( + "catalog", + &Router::new() + .get(&API_METHOD_CATALOG) + ), ( "download", &Router::new() .download(&API_METHOD_DOWNLOAD_FILE) ), + ( + "download-decoded", + &Router::new() + .download(&API_METHOD_DOWNLOAD_FILE_DECODED) + ), ( "files", &Router::new() @@ -896,11 +1493,22 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ &Router::new() .get(&API_METHOD_LIST_GROUPS) ), + ( + "notes", + &Router::new() + .get(&API_METHOD_GET_NOTES) + .put(&API_METHOD_SET_NOTES) + ), ( "prune", &Router::new() .post(&API_METHOD_PRUNE) ), + ( + "pxar-file-download", + &Router::new() + .download(&API_METHOD_PXAR_FILE_DOWNLOAD) + ), ( "rrd", &Router::new() @@ -922,6 +1530,11 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ &Router::new() .upload(&API_METHOD_UPLOAD_BACKUP_LOG) ), + ( + "verify", + &Router::new() + .post(&API_METHOD_VERIFY) + ), ]; const DATASTORE_INFO_ROUTER: Router = Router::new()