]> git.proxmox.com Git - proxmox-backup.git/blame - src/api2/admin/datastore.rs
pbs-client: add 'create_tar' helper function
[proxmox-backup.git] / src / api2 / admin / datastore.rs
CommitLineData
bf78f708
DM
1//! Datastore Management
2
0d08fcee 3use std::collections::HashSet;
d33d8f4e
DC
4use std::ffi::OsStr;
5use std::os::unix::ffi::OsStrExt;
d6688884 6use std::path::PathBuf;
cad540e9 7
6ef9bb59 8use anyhow::{bail, format_err, Error};
9e47c0a5 9use futures::*;
cad540e9
WB
10use hyper::http::request::Parts;
11use hyper::{header, Body, Response, StatusCode};
15e9b4ed 12use serde_json::{json, Value};
7c667013 13use tokio_stream::wrappers::ReceiverStream;
15e9b4ed 14
e25982f2 15use proxmox_sys::sortable;
25877d05 16use proxmox_sys::fs::{
d6688884
SR
17 file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
18};
6ef1b649
WB
19use proxmox_router::{
20 list_subdirs_api_method, http_err, ApiResponseFuture, ApiHandler, ApiMethod, Router,
21 RpcEnvironment, RpcEnvironmentType, SubdirMap, Permission,
22};
23use proxmox_schema::*;
d5790a9f 24use proxmox_sys::{task_log, task_warn};
9a1b24b6 25use proxmox_async::blocking::WrappedReaderStream;
92a8f0bc 26use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
e18a6c9e 27
2e219481 28use pxar::accessor::aio::Accessor;
d33d8f4e
DC
29use pxar::EntryKind;
30
89725197
DM
31use pbs_api_types::{ Authid, BackupContent, Counts, CryptMode,
32 DataStoreListItem, GarbageCollectionStatus, GroupListItem,
e9d2fc93 33 Operation, SnapshotListItem, SnapshotVerifyState, PruneOptions,
c68fa58a 34 DataStoreStatus, RRDMode, RRDTimeFrame,
89725197
DM
35 BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
36 BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
37 IGNORE_VERIFIED_BACKUPS_SCHEMA, UPID_SCHEMA,
38 VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_AUDIT,
39 PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ, PRIV_DATASTORE_PRUNE,
8cc3760e
DM
40 PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY,
41
b2065dc7 42};
2b7f8dd5 43use pbs_client::pxar::create_zip;
8a23ea46
DM
44use pbs_datastore::{
45 check_backup_owner, DataStore, BackupDir, BackupGroup, StoreProgress, LocalChunkReader,
5fd823c3 46 CATALOG_NAME, task_tracking
8a23ea46 47};
b2065dc7
WB
48use pbs_datastore::backup_info::BackupInfo;
49use pbs_datastore::cached_chunk_reader::CachedChunkReader;
013b1e8b 50use pbs_datastore::catalog::{ArchiveEntry, CatalogReader};
b2065dc7
WB
51use pbs_datastore::data_blob::DataBlob;
52use pbs_datastore::data_blob_reader::DataBlobReader;
53use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
54use pbs_datastore::fixed_index::{FixedIndexReader};
55use pbs_datastore::index::IndexFile;
56use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
89725197 57use pbs_datastore::prune::compute_prune_info;
3c8c2827 58use pbs_tools::json::{required_integer_param, required_string_param};
e7d4be9d 59use pbs_config::CachedUserInfo;
b9700a9f 60use proxmox_rest_server::{WorkerTask, formatter};
2b7f8dd5 61
431cc7b1 62use crate::api2::node::rrd::create_value_from_rrd;
b2065dc7 63use crate::backup::{
6d5d305d 64 verify_all_backups, verify_backup_group, verify_backup_dir, verify_filter,
b2065dc7 65};
54552dda 66
b9700a9f 67use crate::server::jobstate::Job;
804f6143 68
1629d2ad 69
d6688884
SR
70const GROUP_NOTES_FILE_NAME: &str = "notes";
71
72fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
73 let mut note_path = store.base_path();
74 note_path.push(group.group_path());
75 note_path.push(GROUP_NOTES_FILE_NAME);
76 note_path
77}
78
bff85572 79fn check_priv_or_backup_owner(
e7cb4dc5
WB
80 store: &DataStore,
81 group: &BackupGroup,
e6dc35ac 82 auth_id: &Authid,
bff85572
FG
83 required_privs: u64,
84) -> Result<(), Error> {
85 let user_info = CachedUserInfo::new()?;
9a37bd6c 86 let privs = user_info.lookup_privs(auth_id, &["datastore", store.name()]);
bff85572
FG
87
88 if privs & required_privs == 0 {
89 let owner = store.get_owner(group)?;
90 check_backup_owner(&owner, auth_id)?;
91 }
92 Ok(())
93}
94
e7cb4dc5
WB
95fn read_backup_index(
96 store: &DataStore,
97 backup_dir: &BackupDir,
98) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
8c70e3eb 99
ff86ef00 100 let (manifest, index_size) = store.load_manifest(backup_dir)?;
8c70e3eb 101
09b1f7b2
DM
102 let mut result = Vec::new();
103 for item in manifest.files() {
104 result.push(BackupContent {
105 filename: item.filename.clone(),
f28d9088 106 crypt_mode: Some(item.crypt_mode),
09b1f7b2
DM
107 size: Some(item.size),
108 });
8c70e3eb
DM
109 }
110
09b1f7b2 111 result.push(BackupContent {
96d65fbc 112 filename: MANIFEST_BLOB_NAME.to_string(),
882c0823
FG
113 crypt_mode: match manifest.signature {
114 Some(_) => Some(CryptMode::SignOnly),
115 None => Some(CryptMode::None),
116 },
09b1f7b2
DM
117 size: Some(index_size),
118 });
4f1e40a2 119
70030b43 120 Ok((manifest, result))
8c70e3eb
DM
121}
122
1c090810
DC
123fn get_all_snapshot_files(
124 store: &DataStore,
125 info: &BackupInfo,
70030b43
DM
126) -> Result<(BackupManifest, Vec<BackupContent>), Error> {
127
9a37bd6c 128 let (manifest, mut files) = read_backup_index(store, &info.backup_dir)?;
1c090810
DC
129
130 let file_set = files.iter().fold(HashSet::new(), |mut acc, item| {
131 acc.insert(item.filename.clone());
132 acc
133 });
134
135 for file in &info.files {
136 if file_set.contains(file) { continue; }
f28d9088
WB
137 files.push(BackupContent {
138 filename: file.to_string(),
139 size: None,
140 crypt_mode: None,
141 });
1c090810
DC
142 }
143
70030b43 144 Ok((manifest, files))
1c090810
DC
145}
146
b31c8019
DM
147#[api(
148 input: {
149 properties: {
150 store: {
151 schema: DATASTORE_SCHEMA,
152 },
153 },
154 },
7b570c17 155 returns: pbs_api_types::ADMIN_DATASTORE_LIST_GROUPS_RETURN_TYPE,
bb34b589 156 access: {
54552dda
DM
157 permission: &Permission::Privilege(
158 &["datastore", "{store}"],
159 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
160 true),
bb34b589 161 },
b31c8019
DM
162)]
163/// List backup groups.
b2362a12 164pub fn list_groups(
b31c8019 165 store: String,
54552dda 166 rpcenv: &mut dyn RpcEnvironment,
b31c8019 167) -> Result<Vec<GroupListItem>, Error> {
812c6f87 168
e6dc35ac 169 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 170 let user_info = CachedUserInfo::new()?;
e6dc35ac 171 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
54552dda 172
e9d2fc93 173 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
0d08fcee
FG
174 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
175
176 let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?;
177
178 let group_info = backup_groups
179 .into_iter()
180 .fold(Vec::new(), |mut group_info, group| {
181 let owner = match datastore.get_owner(&group) {
182 Ok(auth_id) => auth_id,
183 Err(err) => {
1ed02257
FG
184 eprintln!("Failed to get owner of group '{}/{}' - {}",
185 &store,
186 group,
187 err);
0d08fcee
FG
188 return group_info;
189 },
190 };
191 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
192 return group_info;
193 }
194
195 let snapshots = match group.list_backups(&datastore.base_path()) {
196 Ok(snapshots) => snapshots,
197 Err(_) => {
198 return group_info;
199 },
200 };
201
202 let backup_count: u64 = snapshots.len() as u64;
203 if backup_count == 0 {
204 return group_info;
205 }
206
207 let last_backup = snapshots
208 .iter()
209 .fold(&snapshots[0], |last, curr| {
210 if curr.is_finished()
211 && curr.backup_dir.backup_time() > last.backup_dir.backup_time() {
212 curr
213 } else {
214 last
215 }
216 })
217 .to_owned();
218
d6688884
SR
219 let note_path = get_group_note_path(&datastore, &group);
220 let comment = file_read_firstline(&note_path).ok();
221
0d08fcee
FG
222 group_info.push(GroupListItem {
223 backup_type: group.backup_type().to_string(),
224 backup_id: group.backup_id().to_string(),
225 last_backup: last_backup.backup_dir.backup_time(),
226 owner: Some(owner),
227 backup_count,
228 files: last_backup.files,
d6688884 229 comment,
0d08fcee
FG
230 });
231
232 group_info
233 });
812c6f87 234
0d08fcee 235 Ok(group_info)
812c6f87 236}
8f579717 237
f32791b4
DC
238#[api(
239 input: {
240 properties: {
241 store: {
242 schema: DATASTORE_SCHEMA,
243 },
244 "backup-type": {
245 schema: BACKUP_TYPE_SCHEMA,
246 },
247 "backup-id": {
248 schema: BACKUP_ID_SCHEMA,
249 },
250 },
251 },
252 access: {
253 permission: &Permission::Privilege(
254 &["datastore", "{store}"],
255 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
256 true),
257 },
258)]
259/// Delete backup group including all snapshots.
260pub fn delete_group(
261 store: String,
262 backup_type: String,
263 backup_id: String,
264 _info: &ApiMethod,
265 rpcenv: &mut dyn RpcEnvironment,
266) -> Result<Value, Error> {
267
268 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
269
270 let group = BackupGroup::new(backup_type, backup_id);
e9d2fc93 271 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
f32791b4
DC
272
273 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
274
5cc7d891 275 if !datastore.remove_backup_group(&group)? {
171a00ca 276 bail!("group only partially deleted due to protected snapshots");
5cc7d891 277 }
f32791b4
DC
278
279 Ok(Value::Null)
280}
281
09b1f7b2
DM
282#[api(
283 input: {
284 properties: {
285 store: {
286 schema: DATASTORE_SCHEMA,
287 },
288 "backup-type": {
289 schema: BACKUP_TYPE_SCHEMA,
290 },
291 "backup-id": {
292 schema: BACKUP_ID_SCHEMA,
293 },
294 "backup-time": {
295 schema: BACKUP_TIME_SCHEMA,
296 },
297 },
298 },
7b570c17 299 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
bb34b589 300 access: {
54552dda
DM
301 permission: &Permission::Privilege(
302 &["datastore", "{store}"],
303 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
304 true),
bb34b589 305 },
09b1f7b2
DM
306)]
307/// List snapshot files.
ea5f547f 308pub fn list_snapshot_files(
09b1f7b2
DM
309 store: String,
310 backup_type: String,
311 backup_id: String,
312 backup_time: i64,
01a13423 313 _info: &ApiMethod,
54552dda 314 rpcenv: &mut dyn RpcEnvironment,
09b1f7b2 315) -> Result<Vec<BackupContent>, Error> {
01a13423 316
e6dc35ac 317 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e9d2fc93 318 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
54552dda 319
e0e5b442 320 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
01a13423 321
bff85572 322 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?;
54552dda 323
d7c24397 324 let info = BackupInfo::new(&datastore.base_path(), snapshot)?;
01a13423 325
70030b43
DM
326 let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?;
327
328 Ok(files)
01a13423
DM
329}
330
68a6a0ee
DM
331#[api(
332 input: {
333 properties: {
334 store: {
335 schema: DATASTORE_SCHEMA,
336 },
337 "backup-type": {
338 schema: BACKUP_TYPE_SCHEMA,
339 },
340 "backup-id": {
341 schema: BACKUP_ID_SCHEMA,
342 },
343 "backup-time": {
344 schema: BACKUP_TIME_SCHEMA,
345 },
346 },
347 },
bb34b589 348 access: {
54552dda
DM
349 permission: &Permission::Privilege(
350 &["datastore", "{store}"],
351 PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE,
352 true),
bb34b589 353 },
68a6a0ee
DM
354)]
355/// Delete backup snapshot.
bf78f708 356pub fn delete_snapshot(
68a6a0ee
DM
357 store: String,
358 backup_type: String,
359 backup_id: String,
360 backup_time: i64,
6f62c924 361 _info: &ApiMethod,
54552dda 362 rpcenv: &mut dyn RpcEnvironment,
6f62c924
DM
363) -> Result<Value, Error> {
364
e6dc35ac 365 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 366
e0e5b442 367 let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
e9d2fc93 368 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
6f62c924 369
bff85572 370 check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
54552dda 371
c9756b40 372 datastore.remove_backup_dir(&snapshot, false)?;
6f62c924
DM
373
374 Ok(Value::Null)
375}
376
fc189b19 377#[api(
b7c3eaa9 378 streaming: true,
fc189b19
DM
379 input: {
380 properties: {
381 store: {
382 schema: DATASTORE_SCHEMA,
383 },
384 "backup-type": {
385 optional: true,
386 schema: BACKUP_TYPE_SCHEMA,
387 },
388 "backup-id": {
389 optional: true,
390 schema: BACKUP_ID_SCHEMA,
391 },
392 },
393 },
7b570c17 394 returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE,
bb34b589 395 access: {
54552dda
DM
396 permission: &Permission::Privilege(
397 &["datastore", "{store}"],
398 PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
399 true),
bb34b589 400 },
fc189b19
DM
401)]
402/// List backup snapshots.
f24fc116 403pub fn list_snapshots (
54552dda
DM
404 store: String,
405 backup_type: Option<String>,
406 backup_id: Option<String>,
407 _param: Value,
184f17af 408 _info: &ApiMethod,
54552dda 409 rpcenv: &mut dyn RpcEnvironment,
fc189b19 410) -> Result<Vec<SnapshotListItem>, Error> {
184f17af 411
e6dc35ac 412 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 413 let user_info = CachedUserInfo::new()?;
e6dc35ac 414 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
184f17af 415
0d08fcee
FG
416 let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
417
e9d2fc93 418 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
184f17af 419
c0977501 420 let base_path = datastore.base_path();
184f17af 421
0d08fcee
FG
422 let groups = match (backup_type, backup_id) {
423 (Some(backup_type), Some(backup_id)) => {
424 let mut groups = Vec::with_capacity(1);
425 groups.push(BackupGroup::new(backup_type, backup_id));
426 groups
427 },
428 (Some(backup_type), None) => {
429 BackupInfo::list_backup_groups(&base_path)?
430 .into_iter()
431 .filter(|group| group.backup_type() == backup_type)
432 .collect()
433 },
434 (None, Some(backup_id)) => {
435 BackupInfo::list_backup_groups(&base_path)?
436 .into_iter()
437 .filter(|group| group.backup_id() == backup_id)
438 .collect()
439 },
440 _ => BackupInfo::list_backup_groups(&base_path)?,
441 };
54552dda 442
0d08fcee 443 let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
79c53595
FG
444 let backup_type = group.backup_type().to_string();
445 let backup_id = group.backup_id().to_string();
0d08fcee 446 let backup_time = info.backup_dir.backup_time();
02db7267 447 let protected = info.backup_dir.is_protected(base_path.clone());
1c090810 448
79c53595 449 match get_all_snapshot_files(&datastore, &info) {
70030b43 450 Ok((manifest, files)) => {
70030b43
DM
451 // extract the first line from notes
452 let comment: Option<String> = manifest.unprotected["notes"]
453 .as_str()
454 .and_then(|notes| notes.lines().next())
455 .map(String::from);
456
035c40e6
FG
457 let fingerprint = match manifest.fingerprint() {
458 Ok(fp) => fp,
459 Err(err) => {
460 eprintln!("error parsing fingerprint: '{}'", err);
461 None
462 },
463 };
464
79c53595
FG
465 let verification = manifest.unprotected["verify_state"].clone();
466 let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) {
3b2046d2
TL
467 Ok(verify) => verify,
468 Err(err) => {
469 eprintln!("error parsing verification state : '{}'", err);
470 None
471 }
472 };
473
0d08fcee
FG
474 let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
475
79c53595
FG
476 SnapshotListItem {
477 backup_type,
478 backup_id,
479 backup_time,
480 comment,
481 verification,
035c40e6 482 fingerprint,
79c53595
FG
483 files,
484 size,
485 owner,
02db7267 486 protected,
79c53595 487 }
1c090810
DC
488 },
489 Err(err) => {
490 eprintln!("error during snapshot file listing: '{}'", err);
79c53595 491 let files = info
70030b43 492 .files
0d08fcee 493 .into_iter()
44288184
FG
494 .map(|filename| BackupContent {
495 filename,
70030b43
DM
496 size: None,
497 crypt_mode: None,
498 })
79c53595
FG
499 .collect();
500
501 SnapshotListItem {
502 backup_type,
503 backup_id,
504 backup_time,
505 comment: None,
506 verification: None,
035c40e6 507 fingerprint: None,
79c53595
FG
508 files,
509 size: None,
510 owner,
02db7267 511 protected,
79c53595 512 }
1c090810 513 },
0d08fcee
FG
514 }
515 };
184f17af 516
0d08fcee
FG
517 groups
518 .iter()
519 .try_fold(Vec::new(), |mut snapshots, group| {
520 let owner = match datastore.get_owner(group) {
521 Ok(auth_id) => auth_id,
522 Err(err) => {
523 eprintln!("Failed to get owner of group '{}/{}' - {}",
524 &store,
525 group,
526 err);
527 return Ok(snapshots);
528 },
529 };
530
531 if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
532 return Ok(snapshots);
533 }
534
535 let group_backups = group.list_backups(&datastore.base_path())?;
536
537 snapshots.extend(
538 group_backups
539 .into_iter()
9a37bd6c 540 .map(|info| info_to_snapshot_list_item(group, Some(owner.clone()), info))
0d08fcee
FG
541 );
542
543 Ok(snapshots)
544 })
184f17af
DM
545}
546
fdfcb74d 547fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
16f9f244 548 let base_path = store.base_path();
fdfcb74d 549 let groups = BackupInfo::list_backup_groups(&base_path)?;
16f9f244 550
fdfcb74d
FG
551 groups.iter()
552 .filter(|group| {
9a37bd6c 553 let owner = match store.get_owner(group) {
fdfcb74d
FG
554 Ok(owner) => owner,
555 Err(err) => {
1ed02257
FG
556 eprintln!("Failed to get owner of group '{}/{}' - {}",
557 store.name(),
558 group,
559 err);
fdfcb74d
FG
560 return false;
561 },
562 };
14e08625 563
fdfcb74d
FG
564 match filter_owner {
565 Some(filter) => check_backup_owner(&owner, filter).is_ok(),
566 None => true,
567 }
568 })
569 .try_fold(Counts::default(), |mut counts, group| {
570 let snapshot_count = group.list_backups(&base_path)?.len() as u64;
571
b44483a8
DM
572 // only include groups with snapshots (avoid confusing users
573 // by counting/displaying emtpy groups)
574 if snapshot_count > 0 {
575 let type_count = match group.backup_type() {
576 "ct" => counts.ct.get_or_insert(Default::default()),
577 "vm" => counts.vm.get_or_insert(Default::default()),
578 "host" => counts.host.get_or_insert(Default::default()),
579 _ => counts.other.get_or_insert(Default::default()),
580 };
14e08625 581
b44483a8
DM
582 type_count.groups += 1;
583 type_count.snapshots += snapshot_count;
584 }
16f9f244 585
fdfcb74d
FG
586 Ok(counts)
587 })
16f9f244
DC
588}
589
1dc117bb
DM
590#[api(
591 input: {
592 properties: {
593 store: {
594 schema: DATASTORE_SCHEMA,
595 },
98afc7b1
FG
596 verbose: {
597 type: bool,
598 default: false,
599 optional: true,
600 description: "Include additional information like snapshot counts and GC status.",
601 },
1dc117bb 602 },
98afc7b1 603
1dc117bb
DM
604 },
605 returns: {
14e08625 606 type: DataStoreStatus,
1dc117bb 607 },
bb34b589 608 access: {
54552dda 609 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
bb34b589 610 },
1dc117bb
DM
611)]
612/// Get datastore status.
ea5f547f 613pub fn status(
1dc117bb 614 store: String,
98afc7b1 615 verbose: bool,
0eecf38f 616 _info: &ApiMethod,
fdfcb74d 617 rpcenv: &mut dyn RpcEnvironment,
14e08625 618) -> Result<DataStoreStatus, Error> {
e9d2fc93 619 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
14e08625 620 let storage = crate::tools::disks::disk_usage(&datastore.base_path())?;
fdfcb74d
FG
621 let (counts, gc_status) = if verbose {
622 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
623 let user_info = CachedUserInfo::new()?;
624
625 let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
626 let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 {
627 None
628 } else {
629 Some(&auth_id)
630 };
631
632 let counts = Some(get_snapshots_count(&datastore, filter_owner)?);
633 let gc_status = Some(datastore.last_gc_status());
634
635 (counts, gc_status)
636 } else {
637 (None, None)
98afc7b1 638 };
16f9f244 639
14e08625
DC
640 Ok(DataStoreStatus {
641 total: storage.total,
642 used: storage.used,
643 avail: storage.avail,
644 gc_status,
645 counts,
646 })
0eecf38f
DM
647}
648
c2009e53
DM
649#[api(
650 input: {
651 properties: {
652 store: {
653 schema: DATASTORE_SCHEMA,
654 },
655 "backup-type": {
656 schema: BACKUP_TYPE_SCHEMA,
657 optional: true,
658 },
659 "backup-id": {
660 schema: BACKUP_ID_SCHEMA,
661 optional: true,
662 },
dcbf29e7
HL
663 "ignore-verified": {
664 schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
665 optional: true,
666 },
667 "outdated-after": {
668 schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
669 optional: true,
670 },
c2009e53
DM
671 "backup-time": {
672 schema: BACKUP_TIME_SCHEMA,
673 optional: true,
674 },
675 },
676 },
677 returns: {
678 schema: UPID_SCHEMA,
679 },
680 access: {
09f6a240 681 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
c2009e53
DM
682 },
683)]
684/// Verify backups.
685///
686/// This function can verify a single backup snapshot, all backup from a backup group,
687/// or all backups in the datastore.
688pub fn verify(
689 store: String,
690 backup_type: Option<String>,
691 backup_id: Option<String>,
692 backup_time: Option<i64>,
dcbf29e7
HL
693 ignore_verified: Option<bool>,
694 outdated_after: Option<i64>,
c2009e53
DM
695 rpcenv: &mut dyn RpcEnvironment,
696) -> Result<Value, Error> {
e9d2fc93 697 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
dcbf29e7 698 let ignore_verified = ignore_verified.unwrap_or(true);
c2009e53 699
09f6a240 700 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
8ea00f6e 701 let worker_id;
c2009e53
DM
702
703 let mut backup_dir = None;
704 let mut backup_group = None;
133042b5 705 let mut worker_type = "verify";
c2009e53
DM
706
707 match (backup_type, backup_id, backup_time) {
708 (Some(backup_type), Some(backup_id), Some(backup_time)) => {
4ebda996 709 worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
e0e5b442 710 let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
09f6a240
FG
711
712 check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
713
c2009e53 714 backup_dir = Some(dir);
133042b5 715 worker_type = "verify_snapshot";
c2009e53
DM
716 }
717 (Some(backup_type), Some(backup_id), None) => {
4ebda996 718 worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
c2009e53 719 let group = BackupGroup::new(backup_type, backup_id);
09f6a240
FG
720
721 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
722
c2009e53 723 backup_group = Some(group);
133042b5 724 worker_type = "verify_group";
c2009e53
DM
725 }
726 (None, None, None) => {
8ea00f6e 727 worker_id = store.clone();
c2009e53 728 }
5a718dce 729 _ => bail!("parameters do not specify a backup group or snapshot"),
c2009e53
DM
730 }
731
39735609 732 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
c2009e53
DM
733
734 let upid_str = WorkerTask::new_thread(
133042b5 735 worker_type,
44288184 736 Some(worker_id),
049a22a3 737 auth_id.to_string(),
e7cb4dc5
WB
738 to_stdout,
739 move |worker| {
9c26a3d6 740 let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
adfdc369 741 let failed_dirs = if let Some(backup_dir) = backup_dir {
adfdc369 742 let mut res = Vec::new();
f6b1d1cc 743 if !verify_backup_dir(
9c26a3d6 744 &verify_worker,
f6b1d1cc 745 &backup_dir,
f6b1d1cc 746 worker.upid().clone(),
dcbf29e7
HL
747 Some(&move |manifest| {
748 verify_filter(ignore_verified, outdated_after, manifest)
749 }),
f6b1d1cc 750 )? {
adfdc369
DC
751 res.push(backup_dir.to_string());
752 }
753 res
c2009e53 754 } else if let Some(backup_group) = backup_group {
7e25b9aa 755 let failed_dirs = verify_backup_group(
9c26a3d6 756 &verify_worker,
63d9aca9 757 &backup_group,
7e25b9aa 758 &mut StoreProgress::new(1),
f6b1d1cc 759 worker.upid(),
dcbf29e7
HL
760 Some(&move |manifest| {
761 verify_filter(ignore_verified, outdated_after, manifest)
762 }),
63d9aca9
DM
763 )?;
764 failed_dirs
c2009e53 765 } else {
09f6a240
FG
766 let privs = CachedUserInfo::new()?
767 .lookup_privs(&auth_id, &["datastore", &store]);
768
769 let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
770 Some(auth_id)
771 } else {
772 None
773 };
774
dcbf29e7
HL
775 verify_all_backups(
776 &verify_worker,
777 worker.upid(),
778 owner,
779 Some(&move |manifest| {
780 verify_filter(ignore_verified, outdated_after, manifest)
781 }),
782 )?
c2009e53 783 };
3984a5fd 784 if !failed_dirs.is_empty() {
1ec0d70d 785 task_log!(worker, "Failed to verify the following snapshots/groups:");
adfdc369 786 for dir in failed_dirs {
1ec0d70d 787 task_log!(worker, "\t{}", dir);
adfdc369 788 }
1ffe0301 789 bail!("verification failed - please check the log for details");
c2009e53
DM
790 }
791 Ok(())
e7cb4dc5
WB
792 },
793 )?;
c2009e53
DM
794
795 Ok(json!(upid_str))
796}
797
0a240aaa
DC
798#[api(
799 input: {
800 properties: {
801 "backup-id": {
802 schema: BACKUP_ID_SCHEMA,
803 },
804 "backup-type": {
805 schema: BACKUP_TYPE_SCHEMA,
806 },
807 "dry-run": {
808 optional: true,
809 type: bool,
810 default: false,
811 description: "Just show what prune would do, but do not delete anything.",
812 },
813 "prune-options": {
814 type: PruneOptions,
815 flatten: true,
816 },
817 store: {
818 schema: DATASTORE_SCHEMA,
819 },
820 },
821 },
7b570c17 822 returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
0a240aaa
DC
823 access: {
824 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
825 },
826)]
9805207a 827/// Prune a group on the datastore
bf78f708 828pub fn prune(
0a240aaa
DC
829 backup_id: String,
830 backup_type: String,
831 dry_run: bool,
832 prune_options: PruneOptions,
833 store: String,
834 _param: Value,
54552dda 835 rpcenv: &mut dyn RpcEnvironment,
83b7db02
DM
836) -> Result<Value, Error> {
837
e6dc35ac 838 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 839
0a240aaa 840 let group = BackupGroup::new(&backup_type, &backup_id);
9fdc3ef4 841
e9d2fc93 842 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
54552dda 843
bff85572 844 check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
83b7db02 845
0a240aaa 846 let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
503995c7 847
dda70154
DM
848 let mut prune_result = Vec::new();
849
850 let list = group.list_backups(&datastore.base_path())?;
851
852 let mut prune_info = compute_prune_info(list, &prune_options)?;
853
854 prune_info.reverse(); // delete older snapshots first
855
89725197 856 let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
dda70154
DM
857
858 if dry_run {
02db7267
DC
859 for (info, mark) in prune_info {
860 let keep = keep_all || mark.keep();
dda70154
DM
861
862 let backup_time = info.backup_dir.backup_time();
863 let group = info.backup_dir.group();
864
865 prune_result.push(json!({
866 "backup-type": group.backup_type(),
867 "backup-id": group.backup_id(),
6a7be83e 868 "backup-time": backup_time,
dda70154 869 "keep": keep,
02db7267 870 "protected": mark.protected(),
dda70154
DM
871 }));
872 }
873 return Ok(json!(prune_result));
874 }
875
876
163e9bbe 877 // We use a WorkerTask just to have a task log, but run synchrounously
049a22a3 878 let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
dda70154 879
f1539300 880 if keep_all {
1ec0d70d 881 task_log!(worker, "No prune selection - keeping all files.");
f1539300 882 } else {
1ec0d70d
DM
883 task_log!(worker, "retention options: {}", pbs_datastore::prune::cli_options_string(&prune_options));
884 task_log!(worker, "Starting prune on store \"{}\" group \"{}/{}\"",
885 store, backup_type, backup_id);
f1539300 886 }
3b03abfe 887
02db7267
DC
888 for (info, mark) in prune_info {
889 let keep = keep_all || mark.keep();
dda70154 890
f1539300
SR
891 let backup_time = info.backup_dir.backup_time();
892 let timestamp = info.backup_dir.backup_time_string();
893 let group = info.backup_dir.group();
3b03abfe 894
3b03abfe 895
f1539300
SR
896 let msg = format!(
897 "{}/{}/{} {}",
898 group.backup_type(),
899 group.backup_id(),
900 timestamp,
02db7267 901 mark,
f1539300
SR
902 );
903
1ec0d70d 904 task_log!(worker, "{}", msg);
f1539300
SR
905
906 prune_result.push(json!({
907 "backup-type": group.backup_type(),
908 "backup-id": group.backup_id(),
909 "backup-time": backup_time,
910 "keep": keep,
02db7267 911 "protected": mark.protected(),
f1539300
SR
912 }));
913
914 if !(dry_run || keep) {
915 if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
1ec0d70d
DM
916 task_warn!(
917 worker,
918 "failed to remove dir {:?}: {}",
919 info.backup_dir.relative_path(),
920 err,
f1539300 921 );
8f0b4c1f 922 }
8f579717 923 }
f1539300 924 }
dd8e744f 925
f1539300 926 worker.log_result(&Ok(()));
83b7db02 927
dda70154 928 Ok(json!(prune_result))
83b7db02
DM
929}
930
9805207a
DC
931#[api(
932 input: {
933 properties: {
934 "dry-run": {
935 optional: true,
936 type: bool,
937 default: false,
938 description: "Just show what prune would do, but do not delete anything.",
939 },
940 "prune-options": {
941 type: PruneOptions,
942 flatten: true,
943 },
944 store: {
945 schema: DATASTORE_SCHEMA,
946 },
947 },
948 },
949 returns: {
950 schema: UPID_SCHEMA,
951 },
952 access: {
953 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
954 },
955)]
956/// Prune the datastore
957pub fn prune_datastore(
958 dry_run: bool,
959 prune_options: PruneOptions,
960 store: String,
961 _param: Value,
962 rpcenv: &mut dyn RpcEnvironment,
963) -> Result<String, Error> {
964
965 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
966
e9d2fc93 967 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
9805207a 968
bfa942c0
DC
969 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
970
9805207a
DC
971 let upid_str = WorkerTask::new_thread(
972 "prune",
973 Some(store.clone()),
049a22a3 974 auth_id.to_string(),
bfa942c0 975 to_stdout,
9805207a 976 move |worker| crate::server::prune_datastore(
aa174e8e 977 worker,
9805207a
DC
978 auth_id,
979 prune_options,
980 &store,
981 datastore,
982 dry_run
983 ),
984 )?;
985
986 Ok(upid_str)
987}
988
dfc58d47
DM
989#[api(
990 input: {
991 properties: {
992 store: {
993 schema: DATASTORE_SCHEMA,
994 },
995 },
996 },
997 returns: {
998 schema: UPID_SCHEMA,
999 },
bb34b589 1000 access: {
54552dda 1001 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
bb34b589 1002 },
dfc58d47
DM
1003)]
1004/// Start garbage collection.
bf78f708 1005pub fn start_garbage_collection(
dfc58d47 1006 store: String,
6049b71f 1007 _info: &ApiMethod,
dd5495d6 1008 rpcenv: &mut dyn RpcEnvironment,
6049b71f 1009) -> Result<Value, Error> {
15e9b4ed 1010
e9d2fc93 1011 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
e6dc35ac 1012 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
15e9b4ed 1013
4fdf5ddf
DC
1014 let job = Job::new("garbage_collection", &store)
1015 .map_err(|_| format_err!("garbage collection already running"))?;
15e9b4ed 1016
39735609 1017 let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
15e9b4ed 1018
4fdf5ddf
DC
1019 let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
1020 .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?;
0f778e06
DM
1021
1022 Ok(json!(upid_str))
15e9b4ed
DM
1023}
1024
a92830dc
DM
1025#[api(
1026 input: {
1027 properties: {
1028 store: {
1029 schema: DATASTORE_SCHEMA,
1030 },
1031 },
1032 },
1033 returns: {
1034 type: GarbageCollectionStatus,
bb34b589
DM
1035 },
1036 access: {
1037 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
1038 },
a92830dc
DM
1039)]
1040/// Garbage collection status.
5eeea607 1041pub fn garbage_collection_status(
a92830dc 1042 store: String,
6049b71f 1043 _info: &ApiMethod,
dd5495d6 1044 _rpcenv: &mut dyn RpcEnvironment,
a92830dc 1045) -> Result<GarbageCollectionStatus, Error> {
691c89a0 1046
e9d2fc93 1047 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f2b99c34 1048
f2b99c34 1049 let status = datastore.last_gc_status();
691c89a0 1050
a92830dc 1051 Ok(status)
691c89a0
DM
1052}
1053
bb34b589 1054#[api(
30fb6025
DM
1055 returns: {
1056 description: "List the accessible datastores.",
1057 type: Array,
9b93c620 1058 items: { type: DataStoreListItem },
30fb6025 1059 },
bb34b589 1060 access: {
54552dda 1061 permission: &Permission::Anybody,
bb34b589
DM
1062 },
1063)]
1064/// Datastore list
bf78f708 1065pub fn get_datastore_list(
6049b71f
DM
1066 _param: Value,
1067 _info: &ApiMethod,
54552dda 1068 rpcenv: &mut dyn RpcEnvironment,
455e5f71 1069) -> Result<Vec<DataStoreListItem>, Error> {
15e9b4ed 1070
e7d4be9d 1071 let (config, _digest) = pbs_config::datastore::config()?;
15e9b4ed 1072
e6dc35ac 1073 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda
DM
1074 let user_info = CachedUserInfo::new()?;
1075
30fb6025 1076 let mut list = Vec::new();
54552dda 1077
30fb6025 1078 for (store, (_, data)) in &config.sections {
9a37bd6c 1079 let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
54552dda 1080 let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0;
30fb6025 1081 if allowed {
455e5f71
FG
1082 list.push(
1083 DataStoreListItem {
1084 store: store.clone(),
1085 comment: data["comment"].as_str().map(String::from),
1086 }
1087 );
30fb6025 1088 }
54552dda
DM
1089 }
1090
44288184 1091 Ok(list)
15e9b4ed
DM
1092}
1093
0ab08ac9
DM
1094#[sortable]
1095pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new(
1096 &ApiHandler::AsyncHttp(&download_file),
1097 &ObjectSchema::new(
1098 "Download single raw file from backup snapshot.",
1099 &sorted!([
66c49c21 1100 ("store", false, &DATASTORE_SCHEMA),
0ab08ac9
DM
1101 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1102 ("backup-id", false, &BACKUP_ID_SCHEMA),
1103 ("backup-time", false, &BACKUP_TIME_SCHEMA),
4191018c 1104 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
0ab08ac9
DM
1105 ]),
1106 )
54552dda
DM
1107).access(None, &Permission::Privilege(
1108 &["datastore", "{store}"],
1109 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1110 true)
1111);
691c89a0 1112
bf78f708 1113pub fn download_file(
9e47c0a5
DM
1114 _parts: Parts,
1115 _req_body: Body,
1116 param: Value,
255f378a 1117 _info: &ApiMethod,
54552dda 1118 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1119) -> ApiResponseFuture {
9e47c0a5 1120
ad51d02a 1121 async move {
3c8c2827 1122 let store = required_string_param(&param, "store")?;
e9d2fc93 1123 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
f14a8c9a 1124
e6dc35ac 1125 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
54552dda 1126
3c8c2827 1127 let file_name = required_string_param(&param, "file-name")?.to_owned();
9e47c0a5 1128
3c8c2827
WB
1129 let backup_type = required_string_param(&param, "backup-type")?;
1130 let backup_id = required_string_param(&param, "backup-id")?;
1131 let backup_time = required_integer_param(&param, "backup-time")?;
9e47c0a5 1132
e0e5b442 1133 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
54552dda 1134
bff85572 1135 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
54552dda 1136
abdb9763 1137 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
9e47c0a5 1138
ad51d02a
DM
1139 let mut path = datastore.base_path();
1140 path.push(backup_dir.relative_path());
1141 path.push(&file_name);
1142
ba694720 1143 let file = tokio::fs::File::open(&path)
8aa67ee7
WB
1144 .await
1145 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
ad51d02a 1146
db0cb9ce 1147 let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
44288184 1148 .map_ok(|bytes| bytes.freeze())
ba694720
DC
1149 .map_err(move |err| {
1150 eprintln!("error during streaming of '{:?}' - {}", &path, err);
1151 err
1152 });
ad51d02a 1153 let body = Body::wrap_stream(payload);
9e47c0a5 1154
ad51d02a
DM
1155 // fixme: set other headers ?
1156 Ok(Response::builder()
1157 .status(StatusCode::OK)
1158 .header(header::CONTENT_TYPE, "application/octet-stream")
1159 .body(body)
1160 .unwrap())
1161 }.boxed()
9e47c0a5
DM
1162}
1163
6ef9bb59
DC
1164#[sortable]
1165pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new(
1166 &ApiHandler::AsyncHttp(&download_file_decoded),
1167 &ObjectSchema::new(
1168 "Download single decoded file from backup snapshot. Only works if it's not encrypted.",
1169 &sorted!([
1170 ("store", false, &DATASTORE_SCHEMA),
1171 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1172 ("backup-id", false, &BACKUP_ID_SCHEMA),
1173 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1174 ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA),
1175 ]),
1176 )
1177).access(None, &Permission::Privilege(
1178 &["datastore", "{store}"],
1179 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1180 true)
1181);
1182
bf78f708 1183pub fn download_file_decoded(
6ef9bb59
DC
1184 _parts: Parts,
1185 _req_body: Body,
1186 param: Value,
1187 _info: &ApiMethod,
1188 rpcenv: Box<dyn RpcEnvironment>,
1189) -> ApiResponseFuture {
1190
1191 async move {
3c8c2827 1192 let store = required_string_param(&param, "store")?;
e9d2fc93 1193 let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
6ef9bb59 1194
e6dc35ac 1195 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
6ef9bb59 1196
3c8c2827 1197 let file_name = required_string_param(&param, "file-name")?.to_owned();
6ef9bb59 1198
3c8c2827
WB
1199 let backup_type = required_string_param(&param, "backup-type")?;
1200 let backup_id = required_string_param(&param, "backup-id")?;
1201 let backup_time = required_integer_param(&param, "backup-time")?;
6ef9bb59 1202
e0e5b442 1203 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
6ef9bb59 1204
bff85572 1205 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
6ef9bb59 1206
2d55beec 1207 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
6ef9bb59 1208 for file in files {
f28d9088 1209 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
6ef9bb59
DC
1210 bail!("cannot decode '{}' - is encrypted", file_name);
1211 }
1212 }
1213
1214 println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name);
1215
1216 let mut path = datastore.base_path();
1217 path.push(backup_dir.relative_path());
1218 path.push(&file_name);
1219
1220 let extension = file_name.rsplitn(2, '.').next().unwrap();
1221
1222 let body = match extension {
1223 "didx" => {
1224 let index = DynamicIndexReader::open(&path)
1225 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
2d55beec
FG
1226 let (csum, size) = index.compute_csum();
1227 manifest.verify_file(&file_name, &csum, size)?;
6ef9bb59 1228
14f6c9cb 1229 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1230 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1231 Body::wrap_stream(AsyncReaderStream::new(reader)
6ef9bb59
DC
1232 .map_err(move |err| {
1233 eprintln!("error during streaming of '{:?}' - {}", path, err);
1234 err
1235 }))
1236 },
1237 "fidx" => {
1238 let index = FixedIndexReader::open(&path)
1239 .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?;
1240
2d55beec
FG
1241 let (csum, size) = index.compute_csum();
1242 manifest.verify_file(&file_name, &csum, size)?;
1243
14f6c9cb 1244 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
1ef6e8b6 1245 let reader = CachedChunkReader::new(chunk_reader, index, 1).seekable();
f386f512 1246 Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024)
6ef9bb59
DC
1247 .map_err(move |err| {
1248 eprintln!("error during streaming of '{:?}' - {}", path, err);
1249 err
1250 }))
1251 },
1252 "blob" => {
1253 let file = std::fs::File::open(&path)
8aa67ee7 1254 .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
6ef9bb59 1255
2d55beec
FG
1256 // FIXME: load full blob to verify index checksum?
1257
6ef9bb59
DC
1258 Body::wrap_stream(
1259 WrappedReaderStream::new(DataBlobReader::new(file, None)?)
1260 .map_err(move |err| {
1261 eprintln!("error during streaming of '{:?}' - {}", path, err);
1262 err
1263 })
1264 )
1265 },
1266 extension => {
1267 bail!("cannot download '{}' files", extension);
1268 },
1269 };
1270
1271 // fixme: set other headers ?
1272 Ok(Response::builder()
1273 .status(StatusCode::OK)
1274 .header(header::CONTENT_TYPE, "application/octet-stream")
1275 .body(body)
1276 .unwrap())
1277 }.boxed()
1278}
1279
552c2259 1280#[sortable]
0ab08ac9
DM
1281pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new(
1282 &ApiHandler::AsyncHttp(&upload_backup_log),
255f378a 1283 &ObjectSchema::new(
54552dda 1284 "Upload the client backup log file into a backup snapshot ('client.log.blob').",
552c2259 1285 &sorted!([
66c49c21 1286 ("store", false, &DATASTORE_SCHEMA),
255f378a 1287 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
0ab08ac9 1288 ("backup-id", false, &BACKUP_ID_SCHEMA),
255f378a 1289 ("backup-time", false, &BACKUP_TIME_SCHEMA),
552c2259 1290 ]),
9e47c0a5 1291 )
54552dda
DM
1292).access(
1293 Some("Only the backup creator/owner is allowed to do this."),
1294 &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false)
1295);
9e47c0a5 1296
bf78f708 1297pub fn upload_backup_log(
07ee2235
DM
1298 _parts: Parts,
1299 req_body: Body,
1300 param: Value,
255f378a 1301 _info: &ApiMethod,
54552dda 1302 rpcenv: Box<dyn RpcEnvironment>,
bb084b9c 1303) -> ApiResponseFuture {
07ee2235 1304
ad51d02a 1305 async move {
3c8c2827 1306 let store = required_string_param(&param, "store")?;
e9d2fc93 1307 let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
07ee2235 1308
96d65fbc 1309 let file_name = CLIENT_LOG_BLOB_NAME;
07ee2235 1310
3c8c2827
WB
1311 let backup_type = required_string_param(&param, "backup-type")?;
1312 let backup_id = required_string_param(&param, "backup-id")?;
1313 let backup_time = required_integer_param(&param, "backup-time")?;
07ee2235 1314
e0e5b442 1315 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
07ee2235 1316
e6dc35ac 1317 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
bff85572
FG
1318 let owner = datastore.get_owner(backup_dir.group())?;
1319 check_backup_owner(&owner, &auth_id)?;
54552dda 1320
ad51d02a
DM
1321 let mut path = datastore.base_path();
1322 path.push(backup_dir.relative_path());
1323 path.push(&file_name);
07ee2235 1324
ad51d02a
DM
1325 if path.exists() {
1326 bail!("backup already contains a log.");
1327 }
e128d4e8 1328
ad51d02a 1329 println!("Upload backup log to {}/{}/{}/{}/{}", store,
6a7be83e 1330 backup_type, backup_id, backup_dir.backup_time_string(), file_name);
ad51d02a
DM
1331
1332 let data = req_body
1333 .map_err(Error::from)
1334 .try_fold(Vec::new(), |mut acc, chunk| {
1335 acc.extend_from_slice(&*chunk);
1336 future::ok::<_, Error>(acc)
1337 })
1338 .await?;
1339
39f18b30
DM
1340 // always verify blob/CRC at server side
1341 let blob = DataBlob::load_from_reader(&mut &data[..])?;
1342
e0a19d33 1343 replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
ad51d02a
DM
1344
1345 // fixme: use correct formatter
53daae8e 1346 Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
ad51d02a 1347 }.boxed()
07ee2235
DM
1348}
1349
5b1cfa01
DC
1350#[api(
1351 input: {
1352 properties: {
1353 store: {
1354 schema: DATASTORE_SCHEMA,
1355 },
1356 "backup-type": {
1357 schema: BACKUP_TYPE_SCHEMA,
1358 },
1359 "backup-id": {
1360 schema: BACKUP_ID_SCHEMA,
1361 },
1362 "backup-time": {
1363 schema: BACKUP_TIME_SCHEMA,
1364 },
1365 "filepath": {
1366 description: "Base64 encoded path.",
1367 type: String,
1368 }
1369 },
1370 },
1371 access: {
1372 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true),
1373 },
1374)]
1375/// Get the entries of the given path of the catalog
bf78f708 1376pub fn catalog(
5b1cfa01
DC
1377 store: String,
1378 backup_type: String,
1379 backup_id: String,
1380 backup_time: i64,
1381 filepath: String,
5b1cfa01 1382 rpcenv: &mut dyn RpcEnvironment,
227501c0 1383) -> Result<Vec<ArchiveEntry>, Error> {
e9d2fc93 1384 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
5b1cfa01 1385
e6dc35ac 1386 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
5b1cfa01 1387
e0e5b442 1388 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
5b1cfa01 1389
bff85572 1390 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
5b1cfa01 1391
9238cdf5
FG
1392 let file_name = CATALOG_NAME;
1393
2d55beec 1394 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1395 for file in files {
1396 if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1397 bail!("cannot decode '{}' - is encrypted", file_name);
1398 }
1399 }
1400
5b1cfa01
DC
1401 let mut path = datastore.base_path();
1402 path.push(backup_dir.relative_path());
9238cdf5 1403 path.push(file_name);
5b1cfa01
DC
1404
1405 let index = DynamicIndexReader::open(&path)
1406 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1407
2d55beec 1408 let (csum, size) = index.compute_csum();
9a37bd6c 1409 manifest.verify_file(file_name, &csum, size)?;
2d55beec 1410
14f6c9cb 1411 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
5b1cfa01
DC
1412 let reader = BufferedDynamicReader::new(index, chunk_reader);
1413
1414 let mut catalog_reader = CatalogReader::new(reader);
5b1cfa01 1415
5279ee74 1416 let path = if filepath != "root" && filepath != "/" {
227501c0
DC
1417 base64::decode(filepath)?
1418 } else {
1419 vec![b'/']
1420 };
5b1cfa01 1421
86582454 1422 catalog_reader.list_dir_contents(&path)
5b1cfa01
DC
1423}
1424
d33d8f4e
DC
1425#[sortable]
1426pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
1427 &ApiHandler::AsyncHttp(&pxar_file_download),
1428 &ObjectSchema::new(
1ffe0301 1429 "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.",
d33d8f4e
DC
1430 &sorted!([
1431 ("store", false, &DATASTORE_SCHEMA),
1432 ("backup-type", false, &BACKUP_TYPE_SCHEMA),
1433 ("backup-id", false, &BACKUP_ID_SCHEMA),
1434 ("backup-time", false, &BACKUP_TIME_SCHEMA),
1435 ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
1436 ]),
1437 )
1438).access(None, &Permission::Privilege(
1439 &["datastore", "{store}"],
1440 PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
1441 true)
1442);
1443
bf78f708 1444pub fn pxar_file_download(
d33d8f4e
DC
1445 _parts: Parts,
1446 _req_body: Body,
1447 param: Value,
1448 _info: &ApiMethod,
1449 rpcenv: Box<dyn RpcEnvironment>,
1450) -> ApiResponseFuture {
1451
1452 async move {
3c8c2827 1453 let store = required_string_param(&param, "store")?;
e9d2fc93 1454 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d33d8f4e 1455
e6dc35ac 1456 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
d33d8f4e 1457
3c8c2827 1458 let filepath = required_string_param(&param, "filepath")?.to_owned();
d33d8f4e 1459
3c8c2827
WB
1460 let backup_type = required_string_param(&param, "backup-type")?;
1461 let backup_id = required_string_param(&param, "backup-id")?;
1462 let backup_time = required_integer_param(&param, "backup-time")?;
d33d8f4e 1463
e0e5b442 1464 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
d33d8f4e 1465
bff85572 1466 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?;
d33d8f4e 1467
d33d8f4e 1468 let mut components = base64::decode(&filepath)?;
3984a5fd 1469 if !components.is_empty() && components[0] == b'/' {
d33d8f4e
DC
1470 components.remove(0);
1471 }
1472
d8d8af98 1473 let mut split = components.splitn(2, |c| *c == b'/');
9238cdf5 1474 let pxar_name = std::str::from_utf8(split.next().unwrap())?;
0dfce17a 1475 let file_path = split.next().unwrap_or(b"/");
2d55beec 1476 let (manifest, files) = read_backup_index(&datastore, &backup_dir)?;
9238cdf5
FG
1477 for file in files {
1478 if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) {
1479 bail!("cannot decode '{}' - is encrypted", pxar_name);
1480 }
1481 }
d33d8f4e 1482
9238cdf5
FG
1483 let mut path = datastore.base_path();
1484 path.push(backup_dir.relative_path());
1485 path.push(pxar_name);
d33d8f4e
DC
1486
1487 let index = DynamicIndexReader::open(&path)
1488 .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
1489
2d55beec 1490 let (csum, size) = index.compute_csum();
9a37bd6c 1491 manifest.verify_file(pxar_name, &csum, size)?;
2d55beec 1492
14f6c9cb 1493 let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None);
d33d8f4e
DC
1494 let reader = BufferedDynamicReader::new(index, chunk_reader);
1495 let archive_size = reader.archive_size();
1496 let reader = LocalDynamicReadAt::new(reader);
1497
1498 let decoder = Accessor::new(reader, archive_size).await?;
1499 let root = decoder.open_root().await?;
2e219481 1500 let path = OsStr::from_bytes(file_path).to_os_string();
d33d8f4e 1501 let file = root
2e219481
DC
1502 .lookup(&path).await?
1503 .ok_or_else(|| format_err!("error opening '{:?}'", path))?;
d33d8f4e 1504
804f6143
DC
1505 let body = match file.kind() {
1506 EntryKind::File { .. } => Body::wrap_stream(
1507 AsyncReaderStream::new(file.contents().await?).map_err(move |err| {
1508 eprintln!("error during streaming of file '{:?}' - {}", filepath, err);
1509 err
1510 }),
1511 ),
1512 EntryKind::Hardlink(_) => Body::wrap_stream(
1513 AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?)
1514 .map_err(move |err| {
1515 eprintln!(
1516 "error during streaming of hardlink '{:?}' - {}",
2e219481 1517 path, err
804f6143
DC
1518 );
1519 err
1520 }),
1521 ),
1522 EntryKind::Directory => {
1523 let (sender, receiver) = tokio::sync::mpsc::channel(100);
804f6143 1524 let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
fd6d2438 1525 proxmox_rest_server::spawn_internal_task(
2e219481
DC
1526 create_zip(channelwriter, decoder, path.clone(), false)
1527 );
7c667013 1528 Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
2e219481 1529 eprintln!("error during streaming of zip '{:?}' - {}", path, err);
d33d8f4e 1530 err
804f6143
DC
1531 }))
1532 }
1533 other => bail!("cannot download file of type {:?}", other),
1534 };
d33d8f4e
DC
1535
1536 // fixme: set other headers ?
1537 Ok(Response::builder()
1538 .status(StatusCode::OK)
1539 .header(header::CONTENT_TYPE, "application/octet-stream")
1540 .body(body)
1541 .unwrap())
1542 }.boxed()
1543}
1544
1a0d3d11
DM
1545#[api(
1546 input: {
1547 properties: {
1548 store: {
1549 schema: DATASTORE_SCHEMA,
1550 },
1551 timeframe: {
c68fa58a 1552 type: RRDTimeFrame,
1a0d3d11
DM
1553 },
1554 cf: {
1555 type: RRDMode,
1556 },
1557 },
1558 },
1559 access: {
1560 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1561 },
1562)]
1563/// Read datastore stats
bf78f708 1564pub fn get_rrd_stats(
1a0d3d11 1565 store: String,
c68fa58a 1566 timeframe: RRDTimeFrame,
1a0d3d11
DM
1567 cf: RRDMode,
1568 _param: Value,
1569) -> Result<Value, Error> {
1570
e9d2fc93 1571 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
f27b6086
DC
1572 let disk_manager = crate::tools::disks::DiskManage::new();
1573
1574 let mut rrd_fields = vec![
1575 "total", "used",
1576 "read_ios", "read_bytes",
1577 "write_ios", "write_bytes",
1578 ];
1579
1580 // we do not have io_ticks for zpools, so don't include them
1581 match disk_manager.find_mounted_device(&datastore.base_path()) {
1582 Ok(Some((fs_type, _, _))) if fs_type.as_str() == "zfs" => {},
1583 _ => rrd_fields.push("io_ticks"),
1584 };
1585
431cc7b1
DC
1586 create_value_from_rrd(
1587 &format!("datastore/{}", store),
f27b6086 1588 &rrd_fields,
1a0d3d11
DM
1589 timeframe,
1590 cf,
1591 )
1592}
1593
5fd823c3
HL
1594#[api(
1595 input: {
1596 properties: {
1597 store: {
1598 schema: DATASTORE_SCHEMA,
1599 },
1600 },
1601 },
1602 access: {
1603 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, true),
1604 },
1605)]
1606/// Read datastore stats
1607pub fn get_active_operations(
1608 store: String,
1609 _param: Value,
1610) -> Result<Value, Error> {
1611 let active_operations = task_tracking::get_active_operations(&store)?;
1612 Ok(json!({
1613 "read": active_operations.read,
1614 "write": active_operations.write,
1615 }))
1616}
1617
d6688884
SR
1618#[api(
1619 input: {
1620 properties: {
1621 store: {
1622 schema: DATASTORE_SCHEMA,
1623 },
1624 "backup-type": {
1625 schema: BACKUP_TYPE_SCHEMA,
1626 },
1627 "backup-id": {
1628 schema: BACKUP_ID_SCHEMA,
1629 },
1630 },
1631 },
1632 access: {
1633 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1634 },
1635)]
1636/// Get "notes" for a backup group
1637pub fn get_group_notes(
1638 store: String,
1639 backup_type: String,
1640 backup_id: String,
1641 rpcenv: &mut dyn RpcEnvironment,
1642) -> Result<String, Error> {
e9d2fc93 1643 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
d6688884
SR
1644
1645 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1646 let backup_group = BackupGroup::new(backup_type, backup_id);
1647
1648 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
1649
1650 let note_path = get_group_note_path(&datastore, &backup_group);
1651 Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
1652}
1653
1654#[api(
1655 input: {
1656 properties: {
1657 store: {
1658 schema: DATASTORE_SCHEMA,
1659 },
1660 "backup-type": {
1661 schema: BACKUP_TYPE_SCHEMA,
1662 },
1663 "backup-id": {
1664 schema: BACKUP_ID_SCHEMA,
1665 },
1666 notes: {
1667 description: "A multiline text.",
1668 },
1669 },
1670 },
1671 access: {
1672 permission: &Permission::Privilege(&["datastore", "{store}"],
1673 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1674 true),
1675 },
1676)]
1677/// Set "notes" for a backup group
1678pub fn set_group_notes(
1679 store: String,
1680 backup_type: String,
1681 backup_id: String,
1682 notes: String,
1683 rpcenv: &mut dyn RpcEnvironment,
1684) -> Result<(), Error> {
e9d2fc93 1685 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
d6688884
SR
1686
1687 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1688 let backup_group = BackupGroup::new(backup_type, backup_id);
1689
1690 check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
1691
1692 let note_path = get_group_note_path(&datastore, &backup_group);
e0a19d33 1693 replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
d6688884
SR
1694
1695 Ok(())
1696}
1697
912b3f5b
DM
1698#[api(
1699 input: {
1700 properties: {
1701 store: {
1702 schema: DATASTORE_SCHEMA,
1703 },
1704 "backup-type": {
1705 schema: BACKUP_TYPE_SCHEMA,
1706 },
1707 "backup-id": {
1708 schema: BACKUP_ID_SCHEMA,
1709 },
1710 "backup-time": {
1711 schema: BACKUP_TIME_SCHEMA,
1712 },
1713 },
1714 },
1715 access: {
1401f4be 1716 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
912b3f5b
DM
1717 },
1718)]
1719/// Get "notes" for a specific backup
bf78f708 1720pub fn get_notes(
912b3f5b
DM
1721 store: String,
1722 backup_type: String,
1723 backup_id: String,
1724 backup_time: i64,
1725 rpcenv: &mut dyn RpcEnvironment,
1726) -> Result<String, Error> {
e9d2fc93 1727 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
912b3f5b 1728
e6dc35ac 1729 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1730 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1731
1401f4be 1732 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
912b3f5b 1733
883aa6d5 1734 let (manifest, _) = datastore.load_manifest(&backup_dir)?;
912b3f5b 1735
883aa6d5 1736 let notes = manifest.unprotected["notes"]
912b3f5b
DM
1737 .as_str()
1738 .unwrap_or("");
1739
1740 Ok(String::from(notes))
1741}
1742
1743#[api(
1744 input: {
1745 properties: {
1746 store: {
1747 schema: DATASTORE_SCHEMA,
1748 },
1749 "backup-type": {
1750 schema: BACKUP_TYPE_SCHEMA,
1751 },
1752 "backup-id": {
1753 schema: BACKUP_ID_SCHEMA,
1754 },
1755 "backup-time": {
1756 schema: BACKUP_TIME_SCHEMA,
1757 },
1758 notes: {
1759 description: "A multiline text.",
1760 },
1761 },
1762 },
1763 access: {
b728a69e
FG
1764 permission: &Permission::Privilege(&["datastore", "{store}"],
1765 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1766 true),
912b3f5b
DM
1767 },
1768)]
1769/// Set "notes" for a specific backup
bf78f708 1770pub fn set_notes(
912b3f5b
DM
1771 store: String,
1772 backup_type: String,
1773 backup_id: String,
1774 backup_time: i64,
1775 notes: String,
1776 rpcenv: &mut dyn RpcEnvironment,
1777) -> Result<(), Error> {
e9d2fc93 1778 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
912b3f5b 1779
e6dc35ac 1780 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
e0e5b442 1781 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
912b3f5b 1782
b728a69e 1783 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
912b3f5b 1784
1a374fcf
SR
1785 datastore.update_manifest(&backup_dir,|manifest| {
1786 manifest.unprotected["notes"] = notes.into();
1787 }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
912b3f5b
DM
1788
1789 Ok(())
1790}
1791
8292d3d2
DC
1792#[api(
1793 input: {
1794 properties: {
1795 store: {
1796 schema: DATASTORE_SCHEMA,
1797 },
1798 "backup-type": {
1799 schema: BACKUP_TYPE_SCHEMA,
1800 },
1801 "backup-id": {
1802 schema: BACKUP_ID_SCHEMA,
1803 },
1804 "backup-time": {
1805 schema: BACKUP_TIME_SCHEMA,
1806 },
1807 },
1808 },
1809 access: {
1810 permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true),
1811 },
1812)]
1813/// Query protection for a specific backup
1814pub fn get_protection(
1815 store: String,
1816 backup_type: String,
1817 backup_id: String,
1818 backup_time: i64,
1819 rpcenv: &mut dyn RpcEnvironment,
1820) -> Result<bool, Error> {
e9d2fc93 1821 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
8292d3d2
DC
1822
1823 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1824 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1825
1826 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?;
1827
9b1e2ae8 1828 Ok(backup_dir.is_protected(datastore.base_path()))
8292d3d2
DC
1829}
1830
1831#[api(
1832 input: {
1833 properties: {
1834 store: {
1835 schema: DATASTORE_SCHEMA,
1836 },
1837 "backup-type": {
1838 schema: BACKUP_TYPE_SCHEMA,
1839 },
1840 "backup-id": {
1841 schema: BACKUP_ID_SCHEMA,
1842 },
1843 "backup-time": {
1844 schema: BACKUP_TIME_SCHEMA,
1845 },
1846 protected: {
1847 description: "Enable/disable protection.",
1848 },
1849 },
1850 },
1851 access: {
1852 permission: &Permission::Privilege(&["datastore", "{store}"],
1853 PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
1854 true),
1855 },
1856)]
1857/// En- or disable protection for a specific backup
1858pub fn set_protection(
1859 store: String,
1860 backup_type: String,
1861 backup_id: String,
1862 backup_time: i64,
1863 protected: bool,
1864 rpcenv: &mut dyn RpcEnvironment,
1865) -> Result<(), Error> {
e9d2fc93 1866 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
8292d3d2
DC
1867
1868 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1869 let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
1870
1871 check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?;
1872
1873 datastore.update_protection(&backup_dir, protected)
1874}
1875
72be0eb1 1876#[api(
4940012d 1877 input: {
72be0eb1
DW
1878 properties: {
1879 store: {
1880 schema: DATASTORE_SCHEMA,
1881 },
1882 "backup-type": {
1883 schema: BACKUP_TYPE_SCHEMA,
1884 },
1885 "backup-id": {
1886 schema: BACKUP_ID_SCHEMA,
1887 },
1888 "new-owner": {
e6dc35ac 1889 type: Authid,
72be0eb1
DW
1890 },
1891 },
4940012d
FG
1892 },
1893 access: {
bff85572
FG
1894 permission: &Permission::Anybody,
1895 description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup"
4940012d 1896 },
72be0eb1
DW
1897)]
1898/// Change owner of a backup group
bf78f708 1899pub fn set_backup_owner(
72be0eb1
DW
1900 store: String,
1901 backup_type: String,
1902 backup_id: String,
e6dc35ac 1903 new_owner: Authid,
bff85572 1904 rpcenv: &mut dyn RpcEnvironment,
72be0eb1
DW
1905) -> Result<(), Error> {
1906
e9d2fc93 1907 let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
72be0eb1
DW
1908
1909 let backup_group = BackupGroup::new(backup_type, backup_id);
1910
bff85572
FG
1911 let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
1912
72be0eb1
DW
1913 let user_info = CachedUserInfo::new()?;
1914
bff85572
FG
1915 let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]);
1916
1917 let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
1918 // High-privilege user/token
1919 true
1920 } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
1921 let owner = datastore.get_owner(&backup_group)?;
1922
1923 match (owner.is_token(), new_owner.is_token()) {
1924 (true, true) => {
1925 // API token to API token, owned by same user
1926 let owner = owner.user();
1927 let new_owner = new_owner.user();
1928 owner == new_owner && Authid::from(owner.clone()) == auth_id
1929 },
1930 (true, false) => {
1931 // API token to API token owner
1932 Authid::from(owner.user().clone()) == auth_id
1933 && new_owner == auth_id
1934 },
1935 (false, true) => {
1936 // API token owner to API token
1937 owner == auth_id
1938 && Authid::from(new_owner.user().clone()) == auth_id
1939 },
1940 (false, false) => {
1941 // User to User, not allowed for unprivileged users
1942 false
1943 },
1944 }
1945 } else {
1946 false
1947 };
1948
1949 if !allowed {
1950 return Err(http_err!(UNAUTHORIZED,
1951 "{} does not have permission to change owner of backup group '{}' to {}",
1952 auth_id,
1953 backup_group,
1954 new_owner,
1955 ));
1956 }
1957
e6dc35ac
FG
1958 if !user_info.is_active_auth_id(&new_owner) {
1959 bail!("{} '{}' is inactive or non-existent",
1960 if new_owner.is_token() {
1961 "API token".to_string()
1962 } else {
1963 "user".to_string()
1964 },
1965 new_owner);
72be0eb1
DW
1966 }
1967
1968 datastore.set_owner(&backup_group, &new_owner, true)?;
1969
1970 Ok(())
1971}
1972
552c2259 1973#[sortable]
255f378a 1974const DATASTORE_INFO_SUBDIRS: SubdirMap = &[
5fd823c3
HL
1975 (
1976 "active-operations",
1977 &Router::new()
1978 .get(&API_METHOD_GET_ACTIVE_OPERATIONS)
1979 ),
5b1cfa01
DC
1980 (
1981 "catalog",
1982 &Router::new()
1983 .get(&API_METHOD_CATALOG)
1984 ),
72be0eb1
DW
1985 (
1986 "change-owner",
1987 &Router::new()
1988 .post(&API_METHOD_SET_BACKUP_OWNER)
1989 ),
255f378a
DM
1990 (
1991 "download",
1992 &Router::new()
1993 .download(&API_METHOD_DOWNLOAD_FILE)
1994 ),
6ef9bb59
DC
1995 (
1996 "download-decoded",
1997 &Router::new()
1998 .download(&API_METHOD_DOWNLOAD_FILE_DECODED)
1999 ),
255f378a
DM
2000 (
2001 "files",
2002 &Router::new()
09b1f7b2 2003 .get(&API_METHOD_LIST_SNAPSHOT_FILES)
255f378a
DM
2004 ),
2005 (
2006 "gc",
2007 &Router::new()
2008 .get(&API_METHOD_GARBAGE_COLLECTION_STATUS)
2009 .post(&API_METHOD_START_GARBAGE_COLLECTION)
2010 ),
d6688884
SR
2011 (
2012 "group-notes",
2013 &Router::new()
2014 .get(&API_METHOD_GET_GROUP_NOTES)
2015 .put(&API_METHOD_SET_GROUP_NOTES)
2016 ),
255f378a
DM
2017 (
2018 "groups",
2019 &Router::new()
b31c8019 2020 .get(&API_METHOD_LIST_GROUPS)
f32791b4 2021 .delete(&API_METHOD_DELETE_GROUP)
255f378a 2022 ),
912b3f5b
DM
2023 (
2024 "notes",
2025 &Router::new()
2026 .get(&API_METHOD_GET_NOTES)
2027 .put(&API_METHOD_SET_NOTES)
2028 ),
8292d3d2
DC
2029 (
2030 "protected",
2031 &Router::new()
2032 .get(&API_METHOD_GET_PROTECTION)
2033 .put(&API_METHOD_SET_PROTECTION)
2034 ),
255f378a
DM
2035 (
2036 "prune",
2037 &Router::new()
2038 .post(&API_METHOD_PRUNE)
2039 ),
9805207a
DC
2040 (
2041 "prune-datastore",
2042 &Router::new()
2043 .post(&API_METHOD_PRUNE_DATASTORE)
2044 ),
d33d8f4e
DC
2045 (
2046 "pxar-file-download",
2047 &Router::new()
2048 .download(&API_METHOD_PXAR_FILE_DOWNLOAD)
2049 ),
1a0d3d11
DM
2050 (
2051 "rrd",
2052 &Router::new()
2053 .get(&API_METHOD_GET_RRD_STATS)
2054 ),
255f378a
DM
2055 (
2056 "snapshots",
2057 &Router::new()
fc189b19 2058 .get(&API_METHOD_LIST_SNAPSHOTS)
68a6a0ee 2059 .delete(&API_METHOD_DELETE_SNAPSHOT)
255f378a
DM
2060 ),
2061 (
2062 "status",
2063 &Router::new()
2064 .get(&API_METHOD_STATUS)
2065 ),
2066 (
2067 "upload-backup-log",
2068 &Router::new()
2069 .upload(&API_METHOD_UPLOAD_BACKUP_LOG)
2070 ),
c2009e53
DM
2071 (
2072 "verify",
2073 &Router::new()
2074 .post(&API_METHOD_VERIFY)
2075 ),
255f378a
DM
2076];
2077
ad51d02a 2078const DATASTORE_INFO_ROUTER: Router = Router::new()
255f378a
DM
2079 .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS))
2080 .subdirs(DATASTORE_INFO_SUBDIRS);
2081
2082
2083pub const ROUTER: Router = Router::new()
bb34b589 2084 .get(&API_METHOD_GET_DATASTORE_LIST)
255f378a 2085 .match_all("store", &DATASTORE_INFO_ROUTER);