]>
Commit | Line | Data |
---|---|---|
1 | //! Datastore Management | |
2 | ||
3 | use std::collections::HashSet; | |
4 | use std::ffi::OsStr; | |
5 | use std::os::unix::ffi::OsStrExt; | |
6 | use std::path::PathBuf; | |
7 | use std::sync::Arc; | |
8 | ||
9 | use anyhow::{bail, format_err, Error}; | |
10 | use futures::*; | |
11 | use hyper::http::request::Parts; | |
12 | use hyper::{header, Body, Response, StatusCode}; | |
13 | use serde_json::{json, Value}; | |
14 | use tokio_stream::wrappers::ReceiverStream; | |
15 | ||
16 | use proxmox_async::blocking::WrappedReaderStream; | |
17 | use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; | |
18 | use proxmox_compression::zstd::ZstdEncoder; | |
19 | use proxmox_router::{ | |
20 | http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission, | |
21 | Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, | |
22 | }; | |
23 | use proxmox_schema::*; | |
24 | use proxmox_sys::fs::{ | |
25 | file_read_firstline, file_read_optional_string, replace_file, CreateOptions, | |
26 | }; | |
27 | use proxmox_sys::sortable; | |
28 | use proxmox_sys::{task_log, task_warn}; | |
29 | ||
30 | use pxar::accessor::aio::Accessor; | |
31 | use pxar::EntryKind; | |
32 | ||
33 | use pbs_api_types::{ | |
34 | Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus, | |
35 | GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, | |
36 | SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, | |
37 | BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, | |
38 | PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, | |
39 | PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, | |
40 | }; | |
41 | use pbs_client::pxar::{create_tar, create_zip}; | |
42 | use pbs_config::CachedUserInfo; | |
43 | use pbs_datastore::backup_info::BackupInfo; | |
44 | use pbs_datastore::cached_chunk_reader::CachedChunkReader; | |
45 | use pbs_datastore::catalog::{ArchiveEntry, CatalogReader}; | |
46 | use pbs_datastore::data_blob::DataBlob; | |
47 | use pbs_datastore::data_blob_reader::DataBlobReader; | |
48 | use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; | |
49 | use pbs_datastore::fixed_index::FixedIndexReader; | |
50 | use pbs_datastore::index::IndexFile; | |
51 | use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; | |
52 | use pbs_datastore::prune::compute_prune_info; | |
53 | use pbs_datastore::{ | |
54 | check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, | |
55 | StoreProgress, CATALOG_NAME, | |
56 | }; | |
57 | use pbs_tools::json::{required_integer_param, required_string_param}; | |
58 | use proxmox_rest_server::{formatter, WorkerTask}; | |
59 | ||
60 | use crate::api2::node::rrd::create_value_from_rrd; | |
61 | use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter}; | |
62 | ||
63 | use crate::server::jobstate::Job; | |
64 | ||
65 | const GROUP_NOTES_FILE_NAME: &str = "notes"; | |
66 | ||
67 | fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf { | |
68 | let mut note_path = store.base_path(); | |
69 | note_path.push(group.to_string()); | |
70 | note_path.push(GROUP_NOTES_FILE_NAME); | |
71 | note_path | |
72 | } | |
73 | ||
74 | fn check_priv_or_backup_owner( | |
75 | store: &DataStore, | |
76 | group: &pbs_api_types::BackupGroup, | |
77 | auth_id: &Authid, | |
78 | required_privs: u64, | |
79 | ) -> Result<(), Error> { | |
80 | let user_info = CachedUserInfo::new()?; | |
81 | let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]); | |
82 | ||
83 | if privs & required_privs == 0 { | |
84 | let owner = store.get_owner(group)?; | |
85 | check_backup_owner(&owner, auth_id)?; | |
86 | } | |
87 | Ok(()) | |
88 | } | |
89 | ||
90 | fn read_backup_index( | |
91 | store: &DataStore, | |
92 | backup_dir: &BackupDir, | |
93 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { | |
94 | let (manifest, index_size) = store.load_manifest(backup_dir)?; | |
95 | ||
96 | let mut result = Vec::new(); | |
97 | for item in manifest.files() { | |
98 | result.push(BackupContent { | |
99 | filename: item.filename.clone(), | |
100 | crypt_mode: Some(item.crypt_mode), | |
101 | size: Some(item.size), | |
102 | }); | |
103 | } | |
104 | ||
105 | result.push(BackupContent { | |
106 | filename: MANIFEST_BLOB_NAME.to_string(), | |
107 | crypt_mode: match manifest.signature { | |
108 | Some(_) => Some(CryptMode::SignOnly), | |
109 | None => Some(CryptMode::None), | |
110 | }, | |
111 | size: Some(index_size), | |
112 | }); | |
113 | ||
114 | Ok((manifest, result)) | |
115 | } | |
116 | ||
117 | fn get_all_snapshot_files( | |
118 | store: &DataStore, | |
119 | info: &BackupInfo, | |
120 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { | |
121 | let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?; | |
122 | ||
123 | let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { | |
124 | acc.insert(item.filename.clone()); | |
125 | acc | |
126 | }); | |
127 | ||
128 | for file in &info.files { | |
129 | if file_set.contains(file) { | |
130 | continue; | |
131 | } | |
132 | files.push(BackupContent { | |
133 | filename: file.to_string(), | |
134 | size: None, | |
135 | crypt_mode: None, | |
136 | }); | |
137 | } | |
138 | ||
139 | Ok((manifest, files)) | |
140 | } | |
141 | ||
142 | #[api( | |
143 | input: { | |
144 | properties: { | |
145 | store: { | |
146 | schema: DATASTORE_SCHEMA, | |
147 | }, | |
148 | }, | |
149 | }, | |
150 | returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE, | |
151 | access: { | |
152 | permission: &Permission::Privilege( | |
153 | &["datastore", "{store}"], | |
154 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
155 | true), | |
156 | }, | |
157 | )] | |
158 | /// List backup groups. | |
159 | pub fn list_groups( | |
160 | store: String, | |
161 | rpcenv: &mut dyn RpcEnvironment, | |
162 | ) -> Result<Vec<GroupListItem>, Error> { | |
163 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
164 | let user_info = CachedUserInfo::new()?; | |
165 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
166 | ||
167 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
168 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; | |
169 | ||
170 | datastore | |
171 | .iter_backup_groups()? | |
172 | .try_fold(Vec::new(), |mut group_info, group| { | |
173 | let group = group?; | |
174 | let owner = match datastore.get_owner(group.as_ref()) { | |
175 | Ok(auth_id) => auth_id, | |
176 | Err(err) => { | |
177 | let id = &store; | |
178 | eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err); | |
179 | return Ok(group_info); | |
180 | } | |
181 | }; | |
182 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
183 | return Ok(group_info); | |
184 | } | |
185 | ||
186 | let snapshots = match group.list_backups() { | |
187 | Ok(snapshots) => snapshots, | |
188 | Err(_) => return Ok(group_info), | |
189 | }; | |
190 | ||
191 | let backup_count: u64 = snapshots.len() as u64; | |
192 | if backup_count == 0 { | |
193 | return Ok(group_info); | |
194 | } | |
195 | ||
196 | let last_backup = snapshots | |
197 | .iter() | |
198 | .fold(&snapshots[0], |a, b| { | |
199 | if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() { | |
200 | a | |
201 | } else { | |
202 | b | |
203 | } | |
204 | }) | |
205 | .to_owned(); | |
206 | ||
207 | let note_path = get_group_note_path(&datastore, group.as_ref()); | |
208 | let comment = file_read_firstline(¬e_path).ok(); | |
209 | ||
210 | group_info.push(GroupListItem { | |
211 | backup: group.into(), | |
212 | last_backup: last_backup.backup_dir.backup_time(), | |
213 | owner: Some(owner), | |
214 | backup_count, | |
215 | files: last_backup.files, | |
216 | comment, | |
217 | }); | |
218 | ||
219 | Ok(group_info) | |
220 | }) | |
221 | } | |
222 | ||
223 | #[api( | |
224 | input: { | |
225 | properties: { | |
226 | store: { schema: DATASTORE_SCHEMA }, | |
227 | "backup-type": { type: BackupType }, | |
228 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
229 | }, | |
230 | }, | |
231 | access: { | |
232 | permission: &Permission::Privilege( | |
233 | &["datastore", "{store}"], | |
234 | PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE, | |
235 | true), | |
236 | }, | |
237 | )] | |
238 | /// Delete backup group including all snapshots. | |
239 | pub fn delete_group( | |
240 | store: String, | |
241 | backup_type: BackupType, | |
242 | backup_id: String, | |
243 | _info: &ApiMethod, | |
244 | rpcenv: &mut dyn RpcEnvironment, | |
245 | ) -> Result<Value, Error> { | |
246 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
247 | ||
248 | let group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); | |
249 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
250 | ||
251 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?; | |
252 | ||
253 | if !datastore.remove_backup_group(&group)? { | |
254 | bail!("group only partially deleted due to protected snapshots"); | |
255 | } | |
256 | ||
257 | Ok(Value::Null) | |
258 | } | |
259 | ||
260 | #[api( | |
261 | input: { | |
262 | properties: { | |
263 | store: { schema: DATASTORE_SCHEMA }, | |
264 | "backup-type": { type: BackupType }, | |
265 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
266 | "backup-time": { schema: BACKUP_TIME_SCHEMA }, | |
267 | }, | |
268 | }, | |
269 | returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE, | |
270 | access: { | |
271 | permission: &Permission::Privilege( | |
272 | &["datastore", "{store}"], | |
273 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
274 | true), | |
275 | }, | |
276 | )] | |
277 | /// List snapshot files. | |
278 | pub fn list_snapshot_files( | |
279 | store: String, | |
280 | backup_type: BackupType, | |
281 | backup_id: String, | |
282 | backup_time: i64, | |
283 | _info: &ApiMethod, | |
284 | rpcenv: &mut dyn RpcEnvironment, | |
285 | ) -> Result<Vec<BackupContent>, Error> { | |
286 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
287 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
288 | ||
289 | let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
290 | ||
291 | check_priv_or_backup_owner( | |
292 | &datastore, | |
293 | snapshot.as_ref(), | |
294 | &auth_id, | |
295 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ, | |
296 | )?; | |
297 | ||
298 | let info = BackupInfo::new(snapshot)?; | |
299 | ||
300 | let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?; | |
301 | ||
302 | Ok(files) | |
303 | } | |
304 | ||
305 | #[api( | |
306 | input: { | |
307 | properties: { | |
308 | store: { schema: DATASTORE_SCHEMA }, | |
309 | "backup-type": { type: BackupType }, | |
310 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
311 | "backup-time": { schema: BACKUP_TIME_SCHEMA }, | |
312 | }, | |
313 | }, | |
314 | access: { | |
315 | permission: &Permission::Privilege( | |
316 | &["datastore", "{store}"], | |
317 | PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE, | |
318 | true), | |
319 | }, | |
320 | )] | |
321 | /// Delete backup snapshot. | |
322 | pub fn delete_snapshot( | |
323 | store: String, | |
324 | backup_type: BackupType, | |
325 | backup_id: String, | |
326 | backup_time: i64, | |
327 | _info: &ApiMethod, | |
328 | rpcenv: &mut dyn RpcEnvironment, | |
329 | ) -> Result<Value, Error> { | |
330 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
331 | ||
332 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
333 | let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
334 | ||
335 | check_priv_or_backup_owner( | |
336 | &datastore, | |
337 | snapshot.as_ref(), | |
338 | &auth_id, | |
339 | PRIV_DATASTORE_MODIFY, | |
340 | )?; | |
341 | ||
342 | datastore.remove_backup_dir(snapshot.as_ref(), false)?; | |
343 | ||
344 | Ok(Value::Null) | |
345 | } | |
346 | ||
347 | #[api( | |
348 | streaming: true, | |
349 | input: { | |
350 | properties: { | |
351 | store: { schema: DATASTORE_SCHEMA }, | |
352 | "backup-type": { | |
353 | optional: true, | |
354 | type: BackupType, | |
355 | }, | |
356 | "backup-id": { | |
357 | optional: true, | |
358 | schema: BACKUP_ID_SCHEMA, | |
359 | }, | |
360 | }, | |
361 | }, | |
362 | returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE, | |
363 | access: { | |
364 | permission: &Permission::Privilege( | |
365 | &["datastore", "{store}"], | |
366 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
367 | true), | |
368 | }, | |
369 | )] | |
370 | /// List backup snapshots. | |
371 | pub fn list_snapshots( | |
372 | store: String, | |
373 | backup_type: Option<BackupType>, | |
374 | backup_id: Option<String>, | |
375 | _param: Value, | |
376 | _info: &ApiMethod, | |
377 | rpcenv: &mut dyn RpcEnvironment, | |
378 | ) -> Result<Vec<SnapshotListItem>, Error> { | |
379 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
380 | let user_info = CachedUserInfo::new()?; | |
381 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
382 | ||
383 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; | |
384 | ||
385 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
386 | ||
387 | // FIXME: filter also owner before collecting, for doing that nicely the owner should move into | |
388 | // backup group and provide an error free (Err -> None) accessor | |
389 | let groups = match (backup_type, backup_id) { | |
390 | (Some(backup_type), Some(backup_id)) => { | |
391 | vec![datastore.backup_group_from_parts(backup_type, backup_id)] | |
392 | } | |
393 | (Some(backup_type), None) => datastore | |
394 | .iter_backup_groups_ok()? | |
395 | .filter(|group| group.backup_type() == backup_type) | |
396 | .collect(), | |
397 | (None, Some(backup_id)) => datastore | |
398 | .iter_backup_groups_ok()? | |
399 | .filter(|group| group.backup_id() == backup_id) | |
400 | .collect(), | |
401 | _ => datastore.list_backup_groups()?, | |
402 | }; | |
403 | ||
404 | let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| { | |
405 | let backup = pbs_api_types::BackupDir { | |
406 | group: group.into(), | |
407 | time: info.backup_dir.backup_time(), | |
408 | }; | |
409 | let protected = info.backup_dir.is_protected(); | |
410 | ||
411 | match get_all_snapshot_files(&datastore, &info) { | |
412 | Ok((manifest, files)) => { | |
413 | // extract the first line from notes | |
414 | let comment: Option<String> = manifest.unprotected["notes"] | |
415 | .as_str() | |
416 | .and_then(|notes| notes.lines().next()) | |
417 | .map(String::from); | |
418 | ||
419 | let fingerprint = match manifest.fingerprint() { | |
420 | Ok(fp) => fp, | |
421 | Err(err) => { | |
422 | eprintln!("error parsing fingerprint: '{}'", err); | |
423 | None | |
424 | } | |
425 | }; | |
426 | ||
427 | let verification = manifest.unprotected["verify_state"].clone(); | |
428 | let verification: Option<SnapshotVerifyState> = | |
429 | match serde_json::from_value(verification) { | |
430 | Ok(verify) => verify, | |
431 | Err(err) => { | |
432 | eprintln!("error parsing verification state : '{}'", err); | |
433 | None | |
434 | } | |
435 | }; | |
436 | ||
437 | let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); | |
438 | ||
439 | SnapshotListItem { | |
440 | backup, | |
441 | comment, | |
442 | verification, | |
443 | fingerprint, | |
444 | files, | |
445 | size, | |
446 | owner, | |
447 | protected, | |
448 | } | |
449 | } | |
450 | Err(err) => { | |
451 | eprintln!("error during snapshot file listing: '{}'", err); | |
452 | let files = info | |
453 | .files | |
454 | .into_iter() | |
455 | .map(|filename| BackupContent { | |
456 | filename, | |
457 | size: None, | |
458 | crypt_mode: None, | |
459 | }) | |
460 | .collect(); | |
461 | ||
462 | SnapshotListItem { | |
463 | backup, | |
464 | comment: None, | |
465 | verification: None, | |
466 | fingerprint: None, | |
467 | files, | |
468 | size: None, | |
469 | owner, | |
470 | protected, | |
471 | } | |
472 | } | |
473 | } | |
474 | }; | |
475 | ||
476 | groups.iter().try_fold(Vec::new(), |mut snapshots, group| { | |
477 | let owner = match datastore.get_owner(group.as_ref()) { | |
478 | Ok(auth_id) => auth_id, | |
479 | Err(err) => { | |
480 | eprintln!( | |
481 | "Failed to get owner of group '{}/{}' - {}", | |
482 | &store, group, err | |
483 | ); | |
484 | return Ok(snapshots); | |
485 | } | |
486 | }; | |
487 | ||
488 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
489 | return Ok(snapshots); | |
490 | } | |
491 | ||
492 | let group_backups = group.list_backups()?; | |
493 | ||
494 | snapshots.extend( | |
495 | group_backups | |
496 | .into_iter() | |
497 | .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info)), | |
498 | ); | |
499 | ||
500 | Ok(snapshots) | |
501 | }) | |
502 | } | |
503 | ||
504 | fn get_snapshots_count( | |
505 | store: &Arc<DataStore>, | |
506 | filter_owner: Option<&Authid>, | |
507 | ) -> Result<Counts, Error> { | |
508 | store | |
509 | .iter_backup_groups_ok()? | |
510 | .filter(|group| { | |
511 | let owner = match store.get_owner(group.as_ref()) { | |
512 | Ok(owner) => owner, | |
513 | Err(err) => { | |
514 | let id = store.name(); | |
515 | eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err); | |
516 | return false; | |
517 | } | |
518 | }; | |
519 | ||
520 | match filter_owner { | |
521 | Some(filter) => check_backup_owner(&owner, filter).is_ok(), | |
522 | None => true, | |
523 | } | |
524 | }) | |
525 | .try_fold(Counts::default(), |mut counts, group| { | |
526 | let snapshot_count = group.list_backups()?.len() as u64; | |
527 | ||
528 | // only include groups with snapshots, counting/displaying emtpy groups can confuse | |
529 | if snapshot_count > 0 { | |
530 | let type_count = match group.backup_type() { | |
531 | BackupType::Ct => counts.ct.get_or_insert(Default::default()), | |
532 | BackupType::Vm => counts.vm.get_or_insert(Default::default()), | |
533 | BackupType::Host => counts.host.get_or_insert(Default::default()), | |
534 | }; | |
535 | ||
536 | type_count.groups += 1; | |
537 | type_count.snapshots += snapshot_count; | |
538 | } | |
539 | ||
540 | Ok(counts) | |
541 | }) | |
542 | } | |
543 | ||
544 | #[api( | |
545 | input: { | |
546 | properties: { | |
547 | store: { | |
548 | schema: DATASTORE_SCHEMA, | |
549 | }, | |
550 | verbose: { | |
551 | type: bool, | |
552 | default: false, | |
553 | optional: true, | |
554 | description: "Include additional information like snapshot counts and GC status.", | |
555 | }, | |
556 | }, | |
557 | ||
558 | }, | |
559 | returns: { | |
560 | type: DataStoreStatus, | |
561 | }, | |
562 | access: { | |
563 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
564 | }, | |
565 | )] | |
566 | /// Get datastore status. | |
567 | pub fn status( | |
568 | store: String, | |
569 | verbose: bool, | |
570 | _info: &ApiMethod, | |
571 | rpcenv: &mut dyn RpcEnvironment, | |
572 | ) -> Result<DataStoreStatus, Error> { | |
573 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
574 | let storage = crate::tools::disks::disk_usage(&datastore.base_path())?; | |
575 | let (counts, gc_status) = if verbose { | |
576 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
577 | let user_info = CachedUserInfo::new()?; | |
578 | ||
579 | let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
580 | let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 { | |
581 | None | |
582 | } else { | |
583 | Some(&auth_id) | |
584 | }; | |
585 | ||
586 | let counts = Some(get_snapshots_count(&datastore, filter_owner)?); | |
587 | let gc_status = Some(datastore.last_gc_status()); | |
588 | ||
589 | (counts, gc_status) | |
590 | } else { | |
591 | (None, None) | |
592 | }; | |
593 | ||
594 | Ok(DataStoreStatus { | |
595 | total: storage.total, | |
596 | used: storage.used, | |
597 | avail: storage.avail, | |
598 | gc_status, | |
599 | counts, | |
600 | }) | |
601 | } | |
602 | ||
603 | #[api( | |
604 | input: { | |
605 | properties: { | |
606 | store: { | |
607 | schema: DATASTORE_SCHEMA, | |
608 | }, | |
609 | "backup-type": { | |
610 | type: BackupType, | |
611 | optional: true, | |
612 | }, | |
613 | "backup-id": { | |
614 | schema: BACKUP_ID_SCHEMA, | |
615 | optional: true, | |
616 | }, | |
617 | "ignore-verified": { | |
618 | schema: IGNORE_VERIFIED_BACKUPS_SCHEMA, | |
619 | optional: true, | |
620 | }, | |
621 | "outdated-after": { | |
622 | schema: VERIFICATION_OUTDATED_AFTER_SCHEMA, | |
623 | optional: true, | |
624 | }, | |
625 | "backup-time": { | |
626 | schema: BACKUP_TIME_SCHEMA, | |
627 | optional: true, | |
628 | }, | |
629 | }, | |
630 | }, | |
631 | returns: { | |
632 | schema: UPID_SCHEMA, | |
633 | }, | |
634 | access: { | |
635 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true), | |
636 | }, | |
637 | )] | |
638 | /// Verify backups. | |
639 | /// | |
640 | /// This function can verify a single backup snapshot, all backup from a backup group, | |
641 | /// or all backups in the datastore. | |
642 | pub fn verify( | |
643 | store: String, | |
644 | backup_type: Option<BackupType>, | |
645 | backup_id: Option<String>, | |
646 | backup_time: Option<i64>, | |
647 | ignore_verified: Option<bool>, | |
648 | outdated_after: Option<i64>, | |
649 | rpcenv: &mut dyn RpcEnvironment, | |
650 | ) -> Result<Value, Error> { | |
651 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
652 | let ignore_verified = ignore_verified.unwrap_or(true); | |
653 | ||
654 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
655 | let worker_id; | |
656 | ||
657 | let mut backup_dir = None; | |
658 | let mut backup_group = None; | |
659 | let mut worker_type = "verify"; | |
660 | ||
661 | match (backup_type, backup_id, backup_time) { | |
662 | (Some(backup_type), Some(backup_id), Some(backup_time)) => { | |
663 | worker_id = format!( | |
664 | "{}:{}/{}/{:08X}", | |
665 | store, backup_type, backup_id, backup_time | |
666 | ); | |
667 | let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
668 | ||
669 | check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?; | |
670 | ||
671 | backup_dir = Some(dir); | |
672 | worker_type = "verify_snapshot"; | |
673 | } | |
674 | (Some(backup_type), Some(backup_id), None) => { | |
675 | worker_id = format!("{}:{}/{}", store, backup_type, backup_id); | |
676 | let group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); | |
677 | ||
678 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?; | |
679 | ||
680 | backup_group = Some(datastore.backup_group(group)); | |
681 | worker_type = "verify_group"; | |
682 | } | |
683 | (None, None, None) => { | |
684 | worker_id = store.clone(); | |
685 | } | |
686 | _ => bail!("parameters do not specify a backup group or snapshot"), | |
687 | } | |
688 | ||
689 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; | |
690 | ||
691 | let upid_str = WorkerTask::new_thread( | |
692 | worker_type, | |
693 | Some(worker_id), | |
694 | auth_id.to_string(), | |
695 | to_stdout, | |
696 | move |worker| { | |
697 | let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore); | |
698 | let failed_dirs = if let Some(backup_dir) = backup_dir { | |
699 | let mut res = Vec::new(); | |
700 | if !verify_backup_dir( | |
701 | &verify_worker, | |
702 | &backup_dir, | |
703 | worker.upid().clone(), | |
704 | Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)), | |
705 | )? { | |
706 | res.push(backup_dir.to_string()); | |
707 | } | |
708 | res | |
709 | } else if let Some(backup_group) = backup_group { | |
710 | let failed_dirs = verify_backup_group( | |
711 | &verify_worker, | |
712 | &backup_group, | |
713 | &mut StoreProgress::new(1), | |
714 | worker.upid(), | |
715 | Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)), | |
716 | )?; | |
717 | failed_dirs | |
718 | } else { | |
719 | let privs = CachedUserInfo::new()?.lookup_privs(&auth_id, &["datastore", &store]); | |
720 | ||
721 | let owner = if privs & PRIV_DATASTORE_VERIFY == 0 { | |
722 | Some(auth_id) | |
723 | } else { | |
724 | None | |
725 | }; | |
726 | ||
727 | verify_all_backups( | |
728 | &verify_worker, | |
729 | worker.upid(), | |
730 | owner, | |
731 | Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)), | |
732 | )? | |
733 | }; | |
734 | if !failed_dirs.is_empty() { | |
735 | task_log!(worker, "Failed to verify the following snapshots/groups:"); | |
736 | for dir in failed_dirs { | |
737 | task_log!(worker, "\t{}", dir); | |
738 | } | |
739 | bail!("verification failed - please check the log for details"); | |
740 | } | |
741 | Ok(()) | |
742 | }, | |
743 | )?; | |
744 | ||
745 | Ok(json!(upid_str)) | |
746 | } | |
747 | ||
748 | #[api( | |
749 | input: { | |
750 | properties: { | |
751 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
752 | "backup-type": { type: BackupType }, | |
753 | "dry-run": { | |
754 | optional: true, | |
755 | type: bool, | |
756 | default: false, | |
757 | description: "Just show what prune would do, but do not delete anything.", | |
758 | }, | |
759 | "prune-options": { | |
760 | type: PruneOptions, | |
761 | flatten: true, | |
762 | }, | |
763 | store: { | |
764 | schema: DATASTORE_SCHEMA, | |
765 | }, | |
766 | }, | |
767 | }, | |
768 | returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE, | |
769 | access: { | |
770 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true), | |
771 | }, | |
772 | )] | |
773 | /// Prune a group on the datastore | |
774 | pub fn prune( | |
775 | backup_id: String, | |
776 | backup_type: BackupType, | |
777 | dry_run: bool, | |
778 | prune_options: PruneOptions, | |
779 | store: String, | |
780 | _param: Value, | |
781 | rpcenv: &mut dyn RpcEnvironment, | |
782 | ) -> Result<Value, Error> { | |
783 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
784 | ||
785 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
786 | ||
787 | let group = datastore.backup_group_from_parts(backup_type, &backup_id); | |
788 | ||
789 | check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?; | |
790 | ||
791 | let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id); | |
792 | ||
793 | let mut prune_result = Vec::new(); | |
794 | ||
795 | let list = group.list_backups()?; | |
796 | ||
797 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
798 | ||
799 | prune_info.reverse(); // delete older snapshots first | |
800 | ||
801 | let keep_all = !pbs_datastore::prune::keeps_something(&prune_options); | |
802 | ||
803 | if dry_run { | |
804 | for (info, mark) in prune_info { | |
805 | let keep = keep_all || mark.keep(); | |
806 | ||
807 | prune_result.push(json!({ | |
808 | "backup-type": info.backup_dir.backup_type(), | |
809 | "backup-id": info.backup_dir.backup_id(), | |
810 | "backup-time": info.backup_dir.backup_time(), | |
811 | "keep": keep, | |
812 | "protected": mark.protected(), | |
813 | })); | |
814 | } | |
815 | return Ok(json!(prune_result)); | |
816 | } | |
817 | ||
818 | // We use a WorkerTask just to have a task log, but run synchrounously | |
819 | let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?; | |
820 | ||
821 | if keep_all { | |
822 | task_log!(worker, "No prune selection - keeping all files."); | |
823 | } else { | |
824 | task_log!( | |
825 | worker, | |
826 | "retention options: {}", | |
827 | pbs_datastore::prune::cli_options_string(&prune_options) | |
828 | ); | |
829 | task_log!( | |
830 | worker, | |
831 | "Starting prune on store \"{}\" group \"{}/{}\"", | |
832 | store, | |
833 | backup_type, | |
834 | backup_id | |
835 | ); | |
836 | } | |
837 | ||
838 | for (info, mark) in prune_info { | |
839 | let keep = keep_all || mark.keep(); | |
840 | ||
841 | let backup_time = info.backup_dir.backup_time(); | |
842 | let timestamp = info.backup_dir.backup_time_string(); | |
843 | let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref(); | |
844 | ||
845 | let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,); | |
846 | ||
847 | task_log!(worker, "{}", msg); | |
848 | ||
849 | prune_result.push(json!({ | |
850 | "backup-type": group.ty, | |
851 | "backup-id": group.id, | |
852 | "backup-time": backup_time, | |
853 | "keep": keep, | |
854 | "protected": mark.protected(), | |
855 | })); | |
856 | ||
857 | if !(dry_run || keep) { | |
858 | if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) { | |
859 | task_warn!( | |
860 | worker, | |
861 | "failed to remove dir {:?}: {}", | |
862 | info.backup_dir.relative_path(), | |
863 | err, | |
864 | ); | |
865 | } | |
866 | } | |
867 | } | |
868 | ||
869 | worker.log_result(&Ok(())); | |
870 | ||
871 | Ok(json!(prune_result)) | |
872 | } | |
873 | ||
874 | #[api( | |
875 | input: { | |
876 | properties: { | |
877 | "dry-run": { | |
878 | optional: true, | |
879 | type: bool, | |
880 | default: false, | |
881 | description: "Just show what prune would do, but do not delete anything.", | |
882 | }, | |
883 | "prune-options": { | |
884 | type: PruneOptions, | |
885 | flatten: true, | |
886 | }, | |
887 | store: { | |
888 | schema: DATASTORE_SCHEMA, | |
889 | }, | |
890 | }, | |
891 | }, | |
892 | returns: { | |
893 | schema: UPID_SCHEMA, | |
894 | }, | |
895 | access: { | |
896 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true), | |
897 | }, | |
898 | )] | |
899 | /// Prune the datastore | |
900 | pub fn prune_datastore( | |
901 | dry_run: bool, | |
902 | prune_options: PruneOptions, | |
903 | store: String, | |
904 | _param: Value, | |
905 | rpcenv: &mut dyn RpcEnvironment, | |
906 | ) -> Result<String, Error> { | |
907 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
908 | ||
909 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
910 | ||
911 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; | |
912 | ||
913 | let upid_str = WorkerTask::new_thread( | |
914 | "prune", | |
915 | Some(store.clone()), | |
916 | auth_id.to_string(), | |
917 | to_stdout, | |
918 | move |worker| { | |
919 | crate::server::prune_datastore( | |
920 | worker, | |
921 | auth_id, | |
922 | prune_options, | |
923 | &store, | |
924 | datastore, | |
925 | dry_run, | |
926 | ) | |
927 | }, | |
928 | )?; | |
929 | ||
930 | Ok(upid_str) | |
931 | } | |
932 | ||
933 | #[api( | |
934 | input: { | |
935 | properties: { | |
936 | store: { | |
937 | schema: DATASTORE_SCHEMA, | |
938 | }, | |
939 | }, | |
940 | }, | |
941 | returns: { | |
942 | schema: UPID_SCHEMA, | |
943 | }, | |
944 | access: { | |
945 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false), | |
946 | }, | |
947 | )] | |
948 | /// Start garbage collection. | |
949 | pub fn start_garbage_collection( | |
950 | store: String, | |
951 | _info: &ApiMethod, | |
952 | rpcenv: &mut dyn RpcEnvironment, | |
953 | ) -> Result<Value, Error> { | |
954 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
955 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
956 | ||
957 | let job = Job::new("garbage_collection", &store) | |
958 | .map_err(|_| format_err!("garbage collection already running"))?; | |
959 | ||
960 | let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; | |
961 | ||
962 | let upid_str = | |
963 | crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout) | |
964 | .map_err(|err| { | |
965 | format_err!( | |
966 | "unable to start garbage collection job on datastore {} - {}", | |
967 | store, | |
968 | err | |
969 | ) | |
970 | })?; | |
971 | ||
972 | Ok(json!(upid_str)) | |
973 | } | |
974 | ||
975 | #[api( | |
976 | input: { | |
977 | properties: { | |
978 | store: { | |
979 | schema: DATASTORE_SCHEMA, | |
980 | }, | |
981 | }, | |
982 | }, | |
983 | returns: { | |
984 | type: GarbageCollectionStatus, | |
985 | }, | |
986 | access: { | |
987 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
988 | }, | |
989 | )] | |
990 | /// Garbage collection status. | |
991 | pub fn garbage_collection_status( | |
992 | store: String, | |
993 | _info: &ApiMethod, | |
994 | _rpcenv: &mut dyn RpcEnvironment, | |
995 | ) -> Result<GarbageCollectionStatus, Error> { | |
996 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
997 | ||
998 | let status = datastore.last_gc_status(); | |
999 | ||
1000 | Ok(status) | |
1001 | } | |
1002 | ||
1003 | #[api( | |
1004 | returns: { | |
1005 | description: "List the accessible datastores.", | |
1006 | type: Array, | |
1007 | items: { type: DataStoreListItem }, | |
1008 | }, | |
1009 | access: { | |
1010 | permission: &Permission::Anybody, | |
1011 | }, | |
1012 | )] | |
1013 | /// Datastore list | |
1014 | pub fn get_datastore_list( | |
1015 | _param: Value, | |
1016 | _info: &ApiMethod, | |
1017 | rpcenv: &mut dyn RpcEnvironment, | |
1018 | ) -> Result<Vec<DataStoreListItem>, Error> { | |
1019 | let (config, _digest) = pbs_config::datastore::config()?; | |
1020 | ||
1021 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1022 | let user_info = CachedUserInfo::new()?; | |
1023 | ||
1024 | let mut list = Vec::new(); | |
1025 | ||
1026 | for (store, (_, data)) in &config.sections { | |
1027 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); | |
1028 | let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; | |
1029 | if allowed { | |
1030 | list.push(DataStoreListItem { | |
1031 | store: store.clone(), | |
1032 | comment: data["comment"].as_str().map(String::from), | |
1033 | }); | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | Ok(list) | |
1038 | } | |
1039 | ||
1040 | #[sortable] | |
1041 | pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new( | |
1042 | &ApiHandler::AsyncHttp(&download_file), | |
1043 | &ObjectSchema::new( | |
1044 | "Download single raw file from backup snapshot.", | |
1045 | &sorted!([ | |
1046 | ("store", false, &DATASTORE_SCHEMA), | |
1047 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1048 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1049 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1050 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), | |
1051 | ]), | |
1052 | ), | |
1053 | ) | |
1054 | .access( | |
1055 | None, | |
1056 | &Permission::Privilege( | |
1057 | &["datastore", "{store}"], | |
1058 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1059 | true, | |
1060 | ), | |
1061 | ); | |
1062 | ||
1063 | pub fn download_file( | |
1064 | _parts: Parts, | |
1065 | _req_body: Body, | |
1066 | param: Value, | |
1067 | _info: &ApiMethod, | |
1068 | rpcenv: Box<dyn RpcEnvironment>, | |
1069 | ) -> ApiResponseFuture { | |
1070 | async move { | |
1071 | let store = required_string_param(¶m, "store")?; | |
1072 | let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?; | |
1073 | ||
1074 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1075 | ||
1076 | let file_name = required_string_param(¶m, "file-name")?.to_owned(); | |
1077 | ||
1078 | let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; | |
1079 | let backup_id = required_string_param(¶m, "backup-id")?.to_owned(); | |
1080 | let backup_time = required_integer_param(¶m, "backup-time")?; | |
1081 | ||
1082 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1083 | ||
1084 | check_priv_or_backup_owner( | |
1085 | &datastore, | |
1086 | backup_dir.as_ref(), | |
1087 | &auth_id, | |
1088 | PRIV_DATASTORE_READ, | |
1089 | )?; | |
1090 | ||
1091 | println!( | |
1092 | "Download {} from {} ({}/{})", | |
1093 | file_name, store, backup_dir, file_name | |
1094 | ); | |
1095 | ||
1096 | let mut path = datastore.base_path(); | |
1097 | path.push(backup_dir.relative_path()); | |
1098 | path.push(&file_name); | |
1099 | ||
1100 | let file = tokio::fs::File::open(&path) | |
1101 | .await | |
1102 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; | |
1103 | ||
1104 | let payload = | |
1105 | tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) | |
1106 | .map_ok(|bytes| bytes.freeze()) | |
1107 | .map_err(move |err| { | |
1108 | eprintln!("error during streaming of '{:?}' - {}", &path, err); | |
1109 | err | |
1110 | }); | |
1111 | let body = Body::wrap_stream(payload); | |
1112 | ||
1113 | // fixme: set other headers ? | |
1114 | Ok(Response::builder() | |
1115 | .status(StatusCode::OK) | |
1116 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1117 | .body(body) | |
1118 | .unwrap()) | |
1119 | } | |
1120 | .boxed() | |
1121 | } | |
1122 | ||
1123 | #[sortable] | |
1124 | pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new( | |
1125 | &ApiHandler::AsyncHttp(&download_file_decoded), | |
1126 | &ObjectSchema::new( | |
1127 | "Download single decoded file from backup snapshot. Only works if it's not encrypted.", | |
1128 | &sorted!([ | |
1129 | ("store", false, &DATASTORE_SCHEMA), | |
1130 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1131 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1132 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1133 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), | |
1134 | ]), | |
1135 | ), | |
1136 | ) | |
1137 | .access( | |
1138 | None, | |
1139 | &Permission::Privilege( | |
1140 | &["datastore", "{store}"], | |
1141 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1142 | true, | |
1143 | ), | |
1144 | ); | |
1145 | ||
1146 | pub fn download_file_decoded( | |
1147 | _parts: Parts, | |
1148 | _req_body: Body, | |
1149 | param: Value, | |
1150 | _info: &ApiMethod, | |
1151 | rpcenv: Box<dyn RpcEnvironment>, | |
1152 | ) -> ApiResponseFuture { | |
1153 | async move { | |
1154 | let store = required_string_param(¶m, "store")?; | |
1155 | let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?; | |
1156 | ||
1157 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1158 | ||
1159 | let file_name = required_string_param(¶m, "file-name")?.to_owned(); | |
1160 | ||
1161 | let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; | |
1162 | let backup_id = required_string_param(¶m, "backup-id")?.to_owned(); | |
1163 | let backup_time = required_integer_param(¶m, "backup-time")?; | |
1164 | ||
1165 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1166 | ||
1167 | check_priv_or_backup_owner( | |
1168 | &datastore, | |
1169 | backup_dir.as_ref(), | |
1170 | &auth_id, | |
1171 | PRIV_DATASTORE_READ, | |
1172 | )?; | |
1173 | ||
1174 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; | |
1175 | for file in files { | |
1176 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1177 | bail!("cannot decode '{}' - is encrypted", file_name); | |
1178 | } | |
1179 | } | |
1180 | ||
1181 | println!( | |
1182 | "Download {} from {} ({}/{})", | |
1183 | file_name, store, backup_dir, file_name | |
1184 | ); | |
1185 | ||
1186 | let mut path = datastore.base_path(); | |
1187 | path.push(backup_dir.relative_path()); | |
1188 | path.push(&file_name); | |
1189 | ||
1190 | let extension = file_name.rsplitn(2, '.').next().unwrap(); | |
1191 | ||
1192 | let body = match extension { | |
1193 | "didx" => { | |
1194 | let index = DynamicIndexReader::open(&path).map_err(|err| { | |
1195 | format_err!("unable to read dynamic index '{:?}' - {}", &path, err) | |
1196 | })?; | |
1197 | let (csum, size) = index.compute_csum(); | |
1198 | manifest.verify_file(&file_name, &csum, size)?; | |
1199 | ||
1200 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); | |
1201 | let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable(); | |
1202 | Body::wrap_stream(AsyncReaderStream::new(reader).map_err(move |err| { | |
1203 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1204 | err | |
1205 | })) | |
1206 | } | |
1207 | "fidx" => { | |
1208 | let index = FixedIndexReader::open(&path).map_err(|err| { | |
1209 | format_err!("unable to read fixed index '{:?}' - {}", &path, err) | |
1210 | })?; | |
1211 | ||
1212 | let (csum, size) = index.compute_csum(); | |
1213 | manifest.verify_file(&file_name, &csum, size)?; | |
1214 | ||
1215 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); | |
1216 | let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable(); | |
1217 | Body::wrap_stream( | |
1218 | AsyncReaderStream::with_buffer_size(reader, 4 * 1024 * 1024).map_err( | |
1219 | move |err| { | |
1220 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1221 | err | |
1222 | }, | |
1223 | ), | |
1224 | ) | |
1225 | } | |
1226 | "blob" => { | |
1227 | let file = std::fs::File::open(&path) | |
1228 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; | |
1229 | ||
1230 | // FIXME: load full blob to verify index checksum? | |
1231 | ||
1232 | Body::wrap_stream( | |
1233 | WrappedReaderStream::new(DataBlobReader::new(file, None)?).map_err( | |
1234 | move |err| { | |
1235 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1236 | err | |
1237 | }, | |
1238 | ), | |
1239 | ) | |
1240 | } | |
1241 | extension => { | |
1242 | bail!("cannot download '{}' files", extension); | |
1243 | } | |
1244 | }; | |
1245 | ||
1246 | // fixme: set other headers ? | |
1247 | Ok(Response::builder() | |
1248 | .status(StatusCode::OK) | |
1249 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1250 | .body(body) | |
1251 | .unwrap()) | |
1252 | } | |
1253 | .boxed() | |
1254 | } | |
1255 | ||
1256 | #[sortable] | |
1257 | pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new( | |
1258 | &ApiHandler::AsyncHttp(&upload_backup_log), | |
1259 | &ObjectSchema::new( | |
1260 | "Upload the client backup log file into a backup snapshot ('client.log.blob').", | |
1261 | &sorted!([ | |
1262 | ("store", false, &DATASTORE_SCHEMA), | |
1263 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1264 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1265 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1266 | ]), | |
1267 | ), | |
1268 | ) | |
1269 | .access( | |
1270 | Some("Only the backup creator/owner is allowed to do this."), | |
1271 | &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false), | |
1272 | ); | |
1273 | ||
1274 | pub fn upload_backup_log( | |
1275 | _parts: Parts, | |
1276 | req_body: Body, | |
1277 | param: Value, | |
1278 | _info: &ApiMethod, | |
1279 | rpcenv: Box<dyn RpcEnvironment>, | |
1280 | ) -> ApiResponseFuture { | |
1281 | async move { | |
1282 | let store = required_string_param(¶m, "store")?; | |
1283 | let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?; | |
1284 | ||
1285 | let file_name = CLIENT_LOG_BLOB_NAME; | |
1286 | ||
1287 | let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; | |
1288 | let backup_id = required_string_param(¶m, "backup-id")?; | |
1289 | let backup_time = required_integer_param(¶m, "backup-time")?; | |
1290 | ||
1291 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1292 | ||
1293 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1294 | let owner = datastore.get_owner(backup_dir.as_ref())?; | |
1295 | check_backup_owner(&owner, &auth_id)?; | |
1296 | ||
1297 | let mut path = datastore.base_path(); | |
1298 | path.push(backup_dir.relative_path()); | |
1299 | path.push(&file_name); | |
1300 | ||
1301 | if path.exists() { | |
1302 | bail!("backup already contains a log."); | |
1303 | } | |
1304 | ||
1305 | println!( | |
1306 | "Upload backup log to {}/{}/{}/{}/{}", | |
1307 | store, | |
1308 | backup_type, | |
1309 | backup_id, | |
1310 | backup_dir.backup_time_string(), | |
1311 | file_name | |
1312 | ); | |
1313 | ||
1314 | let data = req_body | |
1315 | .map_err(Error::from) | |
1316 | .try_fold(Vec::new(), |mut acc, chunk| { | |
1317 | acc.extend_from_slice(&*chunk); | |
1318 | future::ok::<_, Error>(acc) | |
1319 | }) | |
1320 | .await?; | |
1321 | ||
1322 | // always verify blob/CRC at server side | |
1323 | let blob = DataBlob::load_from_reader(&mut &data[..])?; | |
1324 | ||
1325 | replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?; | |
1326 | ||
1327 | // fixme: use correct formatter | |
1328 | Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv)) | |
1329 | } | |
1330 | .boxed() | |
1331 | } | |
1332 | ||
1333 | #[api( | |
1334 | input: { | |
1335 | properties: { | |
1336 | store: { schema: DATASTORE_SCHEMA }, | |
1337 | "backup-type": { type: BackupType }, | |
1338 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1339 | "backup-time": { schema: BACKUP_TIME_SCHEMA }, | |
1340 | "filepath": { | |
1341 | description: "Base64 encoded path.", | |
1342 | type: String, | |
1343 | } | |
1344 | }, | |
1345 | }, | |
1346 | access: { | |
1347 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), | |
1348 | }, | |
1349 | )] | |
1350 | /// Get the entries of the given path of the catalog | |
1351 | pub fn catalog( | |
1352 | store: String, | |
1353 | backup_type: BackupType, | |
1354 | backup_id: String, | |
1355 | backup_time: i64, | |
1356 | filepath: String, | |
1357 | rpcenv: &mut dyn RpcEnvironment, | |
1358 | ) -> Result<Vec<ArchiveEntry>, Error> { | |
1359 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
1360 | ||
1361 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1362 | ||
1363 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1364 | ||
1365 | check_priv_or_backup_owner( | |
1366 | &datastore, | |
1367 | backup_dir.as_ref(), | |
1368 | &auth_id, | |
1369 | PRIV_DATASTORE_READ, | |
1370 | )?; | |
1371 | ||
1372 | let file_name = CATALOG_NAME; | |
1373 | ||
1374 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; | |
1375 | for file in files { | |
1376 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1377 | bail!("cannot decode '{}' - is encrypted", file_name); | |
1378 | } | |
1379 | } | |
1380 | ||
1381 | let mut path = datastore.base_path(); | |
1382 | path.push(backup_dir.relative_path()); | |
1383 | path.push(file_name); | |
1384 | ||
1385 | let index = DynamicIndexReader::open(&path) | |
1386 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1387 | ||
1388 | let (csum, size) = index.compute_csum(); | |
1389 | manifest.verify_file(file_name, &csum, size)?; | |
1390 | ||
1391 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); | |
1392 | let reader = BufferedDynamicReader::new(index, chunk_reader); | |
1393 | ||
1394 | let mut catalog_reader = CatalogReader::new(reader); | |
1395 | ||
1396 | let path = if filepath != "root" && filepath != "/" { | |
1397 | base64::decode(filepath)? | |
1398 | } else { | |
1399 | vec![b'/'] | |
1400 | }; | |
1401 | ||
1402 | catalog_reader.list_dir_contents(&path) | |
1403 | } | |
1404 | ||
1405 | #[sortable] | |
1406 | pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( | |
1407 | &ApiHandler::AsyncHttp(&pxar_file_download), | |
1408 | &ObjectSchema::new( | |
1409 | "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.", | |
1410 | &sorted!([ | |
1411 | ("store", false, &DATASTORE_SCHEMA), | |
1412 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1413 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1414 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1415 | ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), | |
1416 | ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()), | |
1417 | ]), | |
1418 | ) | |
1419 | ).access(None, &Permission::Privilege( | |
1420 | &["datastore", "{store}"], | |
1421 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1422 | true) | |
1423 | ); | |
1424 | ||
1425 | pub fn pxar_file_download( | |
1426 | _parts: Parts, | |
1427 | _req_body: Body, | |
1428 | param: Value, | |
1429 | _info: &ApiMethod, | |
1430 | rpcenv: Box<dyn RpcEnvironment>, | |
1431 | ) -> ApiResponseFuture { | |
1432 | async move { | |
1433 | let store = required_string_param(¶m, "store")?; | |
1434 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
1435 | ||
1436 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1437 | ||
1438 | let filepath = required_string_param(¶m, "filepath")?.to_owned(); | |
1439 | ||
1440 | let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?; | |
1441 | let backup_id = required_string_param(¶m, "backup-id")?; | |
1442 | let backup_time = required_integer_param(¶m, "backup-time")?; | |
1443 | ||
1444 | let tar = param["tar"].as_bool().unwrap_or(false); | |
1445 | ||
1446 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1447 | ||
1448 | check_priv_or_backup_owner( | |
1449 | &datastore, | |
1450 | backup_dir.as_ref(), | |
1451 | &auth_id, | |
1452 | PRIV_DATASTORE_READ, | |
1453 | )?; | |
1454 | ||
1455 | let mut components = base64::decode(&filepath)?; | |
1456 | if !components.is_empty() && components[0] == b'/' { | |
1457 | components.remove(0); | |
1458 | } | |
1459 | ||
1460 | let mut split = components.splitn(2, |c| *c == b'/'); | |
1461 | let pxar_name = std::str::from_utf8(split.next().unwrap())?; | |
1462 | let file_path = split.next().unwrap_or(b"/"); | |
1463 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; | |
1464 | for file in files { | |
1465 | if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1466 | bail!("cannot decode '{}' - is encrypted", pxar_name); | |
1467 | } | |
1468 | } | |
1469 | ||
1470 | let mut path = datastore.base_path(); | |
1471 | path.push(backup_dir.relative_path()); | |
1472 | path.push(pxar_name); | |
1473 | ||
1474 | let index = DynamicIndexReader::open(&path) | |
1475 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1476 | ||
1477 | let (csum, size) = index.compute_csum(); | |
1478 | manifest.verify_file(pxar_name, &csum, size)?; | |
1479 | ||
1480 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); | |
1481 | let reader = BufferedDynamicReader::new(index, chunk_reader); | |
1482 | let archive_size = reader.archive_size(); | |
1483 | let reader = LocalDynamicReadAt::new(reader); | |
1484 | ||
1485 | let decoder = Accessor::new(reader, archive_size).await?; | |
1486 | let root = decoder.open_root().await?; | |
1487 | let path = OsStr::from_bytes(file_path).to_os_string(); | |
1488 | let file = root | |
1489 | .lookup(&path) | |
1490 | .await? | |
1491 | .ok_or_else(|| format_err!("error opening '{:?}'", path))?; | |
1492 | ||
1493 | let body = match file.kind() { | |
1494 | EntryKind::File { .. } => Body::wrap_stream( | |
1495 | AsyncReaderStream::new(file.contents().await?).map_err(move |err| { | |
1496 | eprintln!("error during streaming of file '{:?}' - {}", filepath, err); | |
1497 | err | |
1498 | }), | |
1499 | ), | |
1500 | EntryKind::Hardlink(_) => Body::wrap_stream( | |
1501 | AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?) | |
1502 | .map_err(move |err| { | |
1503 | eprintln!("error during streaming of hardlink '{:?}' - {}", path, err); | |
1504 | err | |
1505 | }), | |
1506 | ), | |
1507 | EntryKind::Directory => { | |
1508 | let (sender, receiver) = tokio::sync::mpsc::channel::<Result<_, Error>>(100); | |
1509 | let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024); | |
1510 | if tar { | |
1511 | proxmox_rest_server::spawn_internal_task(create_tar( | |
1512 | channelwriter, | |
1513 | decoder, | |
1514 | path.clone(), | |
1515 | false, | |
1516 | )); | |
1517 | let zstdstream = ZstdEncoder::new(ReceiverStream::new(receiver))?; | |
1518 | Body::wrap_stream(zstdstream.map_err(move |err| { | |
1519 | eprintln!("error during streaming of tar.zst '{:?}' - {}", path, err); | |
1520 | err | |
1521 | })) | |
1522 | } else { | |
1523 | proxmox_rest_server::spawn_internal_task(create_zip( | |
1524 | channelwriter, | |
1525 | decoder, | |
1526 | path.clone(), | |
1527 | false, | |
1528 | )); | |
1529 | Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| { | |
1530 | eprintln!("error during streaming of zip '{:?}' - {}", path, err); | |
1531 | err | |
1532 | })) | |
1533 | } | |
1534 | } | |
1535 | other => bail!("cannot download file of type {:?}", other), | |
1536 | }; | |
1537 | ||
1538 | // fixme: set other headers ? | |
1539 | Ok(Response::builder() | |
1540 | .status(StatusCode::OK) | |
1541 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1542 | .body(body) | |
1543 | .unwrap()) | |
1544 | } | |
1545 | .boxed() | |
1546 | } | |
1547 | ||
1548 | #[api( | |
1549 | input: { | |
1550 | properties: { | |
1551 | store: { | |
1552 | schema: DATASTORE_SCHEMA, | |
1553 | }, | |
1554 | timeframe: { | |
1555 | type: RRDTimeFrame, | |
1556 | }, | |
1557 | cf: { | |
1558 | type: RRDMode, | |
1559 | }, | |
1560 | }, | |
1561 | }, | |
1562 | access: { | |
1563 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
1564 | }, | |
1565 | )] | |
1566 | /// Read datastore stats | |
1567 | pub fn get_rrd_stats( | |
1568 | store: String, | |
1569 | timeframe: RRDTimeFrame, | |
1570 | cf: RRDMode, | |
1571 | _param: Value, | |
1572 | ) -> Result<Value, Error> { | |
1573 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
1574 | let disk_manager = crate::tools::disks::DiskManage::new(); | |
1575 | ||
1576 | let mut rrd_fields = vec![ | |
1577 | "total", | |
1578 | "used", | |
1579 | "read_ios", | |
1580 | "read_bytes", | |
1581 | "write_ios", | |
1582 | "write_bytes", | |
1583 | ]; | |
1584 | ||
1585 | // we do not have io_ticks for zpools, so don't include them | |
1586 | match disk_manager.find_mounted_device(&datastore.base_path()) { | |
1587 | Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {} | |
1588 | _ => rrd_fields.push("io_ticks"), | |
1589 | }; | |
1590 | ||
1591 | create_value_from_rrd(&format!("datastore/{}", store), &rrd_fields, timeframe, cf) | |
1592 | } | |
1593 | ||
1594 | #[api( | |
1595 | input: { | |
1596 | properties: { | |
1597 | store: { | |
1598 | schema: DATASTORE_SCHEMA, | |
1599 | }, | |
1600 | }, | |
1601 | }, | |
1602 | access: { | |
1603 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true), | |
1604 | }, | |
1605 | )] | |
1606 | /// Read datastore stats | |
1607 | pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Error> { | |
1608 | let active_operations = task_tracking::get_active_operations(&store)?; | |
1609 | Ok(json!({ | |
1610 | "read": active_operations.read, | |
1611 | "write": active_operations.write, | |
1612 | })) | |
1613 | } | |
1614 | ||
1615 | #[api( | |
1616 | input: { | |
1617 | properties: { | |
1618 | store: { schema: DATASTORE_SCHEMA }, | |
1619 | "backup-type": { type: BackupType }, | |
1620 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1621 | }, | |
1622 | }, | |
1623 | access: { | |
1624 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
1625 | }, | |
1626 | )] | |
1627 | /// Get "notes" for a backup group | |
1628 | pub fn get_group_notes( | |
1629 | store: String, | |
1630 | backup_type: BackupType, | |
1631 | backup_id: String, | |
1632 | rpcenv: &mut dyn RpcEnvironment, | |
1633 | ) -> Result<String, Error> { | |
1634 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
1635 | ||
1636 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1637 | let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); | |
1638 | ||
1639 | check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?; | |
1640 | ||
1641 | let note_path = get_group_note_path(&datastore, &backup_group); | |
1642 | Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned())) | |
1643 | } | |
1644 | ||
1645 | #[api( | |
1646 | input: { | |
1647 | properties: { | |
1648 | store: { schema: DATASTORE_SCHEMA }, | |
1649 | "backup-type": { type: BackupType }, | |
1650 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1651 | notes: { | |
1652 | description: "A multiline text.", | |
1653 | }, | |
1654 | }, | |
1655 | }, | |
1656 | access: { | |
1657 | permission: &Permission::Privilege(&["datastore", "{store}"], | |
1658 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, | |
1659 | true), | |
1660 | }, | |
1661 | )] | |
1662 | /// Set "notes" for a backup group | |
1663 | pub fn set_group_notes( | |
1664 | store: String, | |
1665 | backup_type: BackupType, | |
1666 | backup_id: String, | |
1667 | notes: String, | |
1668 | rpcenv: &mut dyn RpcEnvironment, | |
1669 | ) -> Result<(), Error> { | |
1670 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
1671 | ||
1672 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1673 | let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); | |
1674 | ||
1675 | check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?; | |
1676 | ||
1677 | let note_path = get_group_note_path(&datastore, &backup_group); | |
1678 | replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?; | |
1679 | ||
1680 | Ok(()) | |
1681 | } | |
1682 | ||
1683 | #[api( | |
1684 | input: { | |
1685 | properties: { | |
1686 | store: { schema: DATASTORE_SCHEMA }, | |
1687 | "backup-type": { type: BackupType }, | |
1688 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1689 | "backup-time": { schema: BACKUP_TIME_SCHEMA }, | |
1690 | }, | |
1691 | }, | |
1692 | access: { | |
1693 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
1694 | }, | |
1695 | )] | |
1696 | /// Get "notes" for a specific backup | |
1697 | pub fn get_notes( | |
1698 | store: String, | |
1699 | backup_type: BackupType, | |
1700 | backup_id: String, | |
1701 | backup_time: i64, | |
1702 | rpcenv: &mut dyn RpcEnvironment, | |
1703 | ) -> Result<String, Error> { | |
1704 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
1705 | ||
1706 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1707 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1708 | ||
1709 | check_priv_or_backup_owner( | |
1710 | &datastore, | |
1711 | backup_dir.as_ref(), | |
1712 | &auth_id, | |
1713 | PRIV_DATASTORE_AUDIT, | |
1714 | )?; | |
1715 | ||
1716 | let (manifest, _) = datastore.load_manifest(&backup_dir)?; | |
1717 | ||
1718 | let notes = manifest.unprotected["notes"].as_str().unwrap_or(""); | |
1719 | ||
1720 | Ok(String::from(notes)) | |
1721 | } | |
1722 | ||
1723 | #[api( | |
1724 | input: { | |
1725 | properties: { | |
1726 | store: { schema: DATASTORE_SCHEMA }, | |
1727 | "backup-type": { type: BackupType }, | |
1728 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1729 | "backup-time": { schema: BACKUP_TIME_SCHEMA }, | |
1730 | notes: { | |
1731 | description: "A multiline text.", | |
1732 | }, | |
1733 | }, | |
1734 | }, | |
1735 | access: { | |
1736 | permission: &Permission::Privilege(&["datastore", "{store}"], | |
1737 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, | |
1738 | true), | |
1739 | }, | |
1740 | )] | |
1741 | /// Set "notes" for a specific backup | |
1742 | pub fn set_notes( | |
1743 | store: String, | |
1744 | backup_type: BackupType, | |
1745 | backup_id: String, | |
1746 | backup_time: i64, | |
1747 | notes: String, | |
1748 | rpcenv: &mut dyn RpcEnvironment, | |
1749 | ) -> Result<(), Error> { | |
1750 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
1751 | ||
1752 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1753 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1754 | ||
1755 | check_priv_or_backup_owner( | |
1756 | &datastore, | |
1757 | backup_dir.as_ref(), | |
1758 | &auth_id, | |
1759 | PRIV_DATASTORE_MODIFY, | |
1760 | )?; | |
1761 | ||
1762 | datastore | |
1763 | .update_manifest(&backup_dir, |manifest| { | |
1764 | manifest.unprotected["notes"] = notes.into(); | |
1765 | }) | |
1766 | .map_err(|err| format_err!("unable to update manifest blob - {}", err))?; | |
1767 | ||
1768 | Ok(()) | |
1769 | } | |
1770 | ||
1771 | #[api( | |
1772 | input: { | |
1773 | properties: { | |
1774 | store: { schema: DATASTORE_SCHEMA }, | |
1775 | "backup-type": { type: BackupType }, | |
1776 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1777 | "backup-time": { schema: BACKUP_TIME_SCHEMA }, | |
1778 | }, | |
1779 | }, | |
1780 | access: { | |
1781 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
1782 | }, | |
1783 | )] | |
1784 | /// Query protection for a specific backup | |
1785 | pub fn get_protection( | |
1786 | store: String, | |
1787 | backup_type: BackupType, | |
1788 | backup_id: String, | |
1789 | backup_time: i64, | |
1790 | rpcenv: &mut dyn RpcEnvironment, | |
1791 | ) -> Result<bool, Error> { | |
1792 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; | |
1793 | ||
1794 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1795 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1796 | ||
1797 | check_priv_or_backup_owner( | |
1798 | &datastore, | |
1799 | backup_dir.as_ref(), | |
1800 | &auth_id, | |
1801 | PRIV_DATASTORE_AUDIT, | |
1802 | )?; | |
1803 | ||
1804 | Ok(backup_dir.is_protected()) | |
1805 | } | |
1806 | ||
1807 | #[api( | |
1808 | input: { | |
1809 | properties: { | |
1810 | store: { schema: DATASTORE_SCHEMA }, | |
1811 | "backup-type": { type: BackupType }, | |
1812 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1813 | "backup-time": { schema: BACKUP_TIME_SCHEMA }, | |
1814 | protected: { | |
1815 | description: "Enable/disable protection.", | |
1816 | }, | |
1817 | }, | |
1818 | }, | |
1819 | access: { | |
1820 | permission: &Permission::Privilege(&["datastore", "{store}"], | |
1821 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, | |
1822 | true), | |
1823 | }, | |
1824 | )] | |
1825 | /// En- or disable protection for a specific backup | |
1826 | pub fn set_protection( | |
1827 | store: String, | |
1828 | backup_type: BackupType, | |
1829 | backup_id: String, | |
1830 | backup_time: i64, | |
1831 | protected: bool, | |
1832 | rpcenv: &mut dyn RpcEnvironment, | |
1833 | ) -> Result<(), Error> { | |
1834 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
1835 | ||
1836 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1837 | let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; | |
1838 | ||
1839 | check_priv_or_backup_owner( | |
1840 | &datastore, | |
1841 | backup_dir.as_ref(), | |
1842 | &auth_id, | |
1843 | PRIV_DATASTORE_MODIFY, | |
1844 | )?; | |
1845 | ||
1846 | datastore.update_protection(&backup_dir, protected) | |
1847 | } | |
1848 | ||
1849 | #[api( | |
1850 | input: { | |
1851 | properties: { | |
1852 | store: { schema: DATASTORE_SCHEMA }, | |
1853 | "backup-type": { type: BackupType }, | |
1854 | "backup-id": { schema: BACKUP_ID_SCHEMA }, | |
1855 | "new-owner": { | |
1856 | type: Authid, | |
1857 | }, | |
1858 | }, | |
1859 | }, | |
1860 | access: { | |
1861 | permission: &Permission::Anybody, | |
1862 | description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup" | |
1863 | }, | |
1864 | )] | |
1865 | /// Change owner of a backup group | |
1866 | pub fn set_backup_owner( | |
1867 | store: String, | |
1868 | backup_type: BackupType, | |
1869 | backup_id: String, | |
1870 | new_owner: Authid, | |
1871 | rpcenv: &mut dyn RpcEnvironment, | |
1872 | ) -> Result<(), Error> { | |
1873 | let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; | |
1874 | ||
1875 | let backup_group = datastore.backup_group_from_parts(backup_type, backup_id); | |
1876 | ||
1877 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
1878 | ||
1879 | let user_info = CachedUserInfo::new()?; | |
1880 | ||
1881 | let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
1882 | ||
1883 | let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 { | |
1884 | // High-privilege user/token | |
1885 | true | |
1886 | } else if (privs & PRIV_DATASTORE_BACKUP) != 0 { | |
1887 | let owner = datastore.get_owner(backup_group.as_ref())?; | |
1888 | ||
1889 | match (owner.is_token(), new_owner.is_token()) { | |
1890 | (true, true) => { | |
1891 | // API token to API token, owned by same user | |
1892 | let owner = owner.user(); | |
1893 | let new_owner = new_owner.user(); | |
1894 | owner == new_owner && Authid::from(owner.clone()) == auth_id | |
1895 | } | |
1896 | (true, false) => { | |
1897 | // API token to API token owner | |
1898 | Authid::from(owner.user().clone()) == auth_id && new_owner == auth_id | |
1899 | } | |
1900 | (false, true) => { | |
1901 | // API token owner to API token | |
1902 | owner == auth_id && Authid::from(new_owner.user().clone()) == auth_id | |
1903 | } | |
1904 | (false, false) => { | |
1905 | // User to User, not allowed for unprivileged users | |
1906 | false | |
1907 | } | |
1908 | } | |
1909 | } else { | |
1910 | false | |
1911 | }; | |
1912 | ||
1913 | if !allowed { | |
1914 | return Err(http_err!( | |
1915 | UNAUTHORIZED, | |
1916 | "{} does not have permission to change owner of backup group '{}' to {}", | |
1917 | auth_id, | |
1918 | backup_group, | |
1919 | new_owner, | |
1920 | )); | |
1921 | } | |
1922 | ||
1923 | if !user_info.is_active_auth_id(&new_owner) { | |
1924 | bail!( | |
1925 | "{} '{}' is inactive or non-existent", | |
1926 | if new_owner.is_token() { | |
1927 | "API token".to_string() | |
1928 | } else { | |
1929 | "user".to_string() | |
1930 | }, | |
1931 | new_owner | |
1932 | ); | |
1933 | } | |
1934 | ||
1935 | datastore.set_owner(backup_group.as_ref(), &new_owner, true)?; | |
1936 | ||
1937 | Ok(()) | |
1938 | } | |
1939 | ||
1940 | #[sortable] | |
1941 | const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ | |
1942 | ( | |
1943 | "active-operations", | |
1944 | &Router::new().get(&API_METHOD_GET_ACTIVE_OPERATIONS), | |
1945 | ), | |
1946 | ("catalog", &Router::new().get(&API_METHOD_CATALOG)), | |
1947 | ( | |
1948 | "change-owner", | |
1949 | &Router::new().post(&API_METHOD_SET_BACKUP_OWNER), | |
1950 | ), | |
1951 | ( | |
1952 | "download", | |
1953 | &Router::new().download(&API_METHOD_DOWNLOAD_FILE), | |
1954 | ), | |
1955 | ( | |
1956 | "download-decoded", | |
1957 | &Router::new().download(&API_METHOD_DOWNLOAD_FILE_DECODED), | |
1958 | ), | |
1959 | ("files", &Router::new().get(&API_METHOD_LIST_SNAPSHOT_FILES)), | |
1960 | ( | |
1961 | "gc", | |
1962 | &Router::new() | |
1963 | .get(&API_METHOD_GARBAGE_COLLECTION_STATUS) | |
1964 | .post(&API_METHOD_START_GARBAGE_COLLECTION), | |
1965 | ), | |
1966 | ( | |
1967 | "group-notes", | |
1968 | &Router::new() | |
1969 | .get(&API_METHOD_GET_GROUP_NOTES) | |
1970 | .put(&API_METHOD_SET_GROUP_NOTES), | |
1971 | ), | |
1972 | ( | |
1973 | "groups", | |
1974 | &Router::new() | |
1975 | .get(&API_METHOD_LIST_GROUPS) | |
1976 | .delete(&API_METHOD_DELETE_GROUP), | |
1977 | ), | |
1978 | ( | |
1979 | "notes", | |
1980 | &Router::new() | |
1981 | .get(&API_METHOD_GET_NOTES) | |
1982 | .put(&API_METHOD_SET_NOTES), | |
1983 | ), | |
1984 | ( | |
1985 | "protected", | |
1986 | &Router::new() | |
1987 | .get(&API_METHOD_GET_PROTECTION) | |
1988 | .put(&API_METHOD_SET_PROTECTION), | |
1989 | ), | |
1990 | ("prune", &Router::new().post(&API_METHOD_PRUNE)), | |
1991 | ( | |
1992 | "prune-datastore", | |
1993 | &Router::new().post(&API_METHOD_PRUNE_DATASTORE), | |
1994 | ), | |
1995 | ( | |
1996 | "pxar-file-download", | |
1997 | &Router::new().download(&API_METHOD_PXAR_FILE_DOWNLOAD), | |
1998 | ), | |
1999 | ("rrd", &Router::new().get(&API_METHOD_GET_RRD_STATS)), | |
2000 | ( | |
2001 | "snapshots", | |
2002 | &Router::new() | |
2003 | .get(&API_METHOD_LIST_SNAPSHOTS) | |
2004 | .delete(&API_METHOD_DELETE_SNAPSHOT), | |
2005 | ), | |
2006 | ("status", &Router::new().get(&API_METHOD_STATUS)), | |
2007 | ( | |
2008 | "upload-backup-log", | |
2009 | &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), | |
2010 | ), | |
2011 | ("verify", &Router::new().post(&API_METHOD_VERIFY)), | |
2012 | ]; | |
2013 | ||
2014 | const DATASTORE_INFO_ROUTER: Router = Router::new() | |
2015 | .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS)) | |
2016 | .subdirs(DATASTORE_INFO_SUBDIRS); | |
2017 | ||
2018 | pub const ROUTER: Router = Router::new() | |
2019 | .get(&API_METHOD_GET_DATASTORE_LIST) | |
2020 | .match_all("store", &DATASTORE_INFO_ROUTER); |