]>
Commit | Line | Data |
---|---|---|
0d08fcee | 1 | use std::collections::HashSet; |
d33d8f4e DC |
2 | use std::ffi::OsStr; |
3 | use std::os::unix::ffi::OsStrExt; | |
6b809ff5 | 4 | use std::sync::{Arc, Mutex}; |
53a561a2 | 5 | use std::path::{Path, PathBuf}; |
804f6143 | 6 | use std::pin::Pin; |
cad540e9 | 7 | |
6ef9bb59 | 8 | use anyhow::{bail, format_err, Error}; |
9e47c0a5 | 9 | use futures::*; |
cad540e9 WB |
10 | use hyper::http::request::Parts; |
11 | use hyper::{header, Body, Response, StatusCode}; | |
15e9b4ed DM |
12 | use serde_json::{json, Value}; |
13 | ||
bb34b589 DM |
14 | use proxmox::api::{ |
15 | api, ApiResponseFuture, ApiHandler, ApiMethod, Router, | |
e7cb4dc5 WB |
16 | RpcEnvironment, RpcEnvironmentType, Permission |
17 | }; | |
cad540e9 WB |
18 | use proxmox::api::router::SubdirMap; |
19 | use proxmox::api::schema::*; | |
60f9a6ea | 20 | use proxmox::tools::fs::{replace_file, CreateOptions}; |
9ea4bce4 | 21 | use proxmox::{http_err, identity, list_subdirs_api_method, sortable}; |
e18a6c9e | 22 | |
804f6143 | 23 | use pxar::accessor::aio::{Accessor, FileContents, FileEntry}; |
d33d8f4e DC |
24 | use pxar::EntryKind; |
25 | ||
cad540e9 | 26 | use crate::api2::types::*; |
431cc7b1 | 27 | use crate::api2::node::rrd::create_value_from_rrd; |
e5064ba6 | 28 | use crate::backup::*; |
cad540e9 | 29 | use crate::config::datastore; |
54552dda DM |
30 | use crate::config::cached_user_info::CachedUserInfo; |
31 | ||
4fdf5ddf | 32 | use crate::server::{jobstate::Job, WorkerTask}; |
804f6143 DC |
33 | use crate::tools::{ |
34 | self, | |
35 | zip::{ZipEncoder, ZipEntry}, | |
36 | AsyncChannelWriter, AsyncReaderStream, WrappedReaderStream, | |
37 | }; | |
38 | ||
d00e1a21 DM |
39 | use crate::config::acl::{ |
40 | PRIV_DATASTORE_AUDIT, | |
54552dda | 41 | PRIV_DATASTORE_MODIFY, |
d00e1a21 DM |
42 | PRIV_DATASTORE_READ, |
43 | PRIV_DATASTORE_PRUNE, | |
54552dda | 44 | PRIV_DATASTORE_BACKUP, |
09f6a240 | 45 | PRIV_DATASTORE_VERIFY, |
d00e1a21 | 46 | }; |
1629d2ad | 47 | |
bff85572 | 48 | fn check_priv_or_backup_owner( |
e7cb4dc5 WB |
49 | store: &DataStore, |
50 | group: &BackupGroup, | |
e6dc35ac | 51 | auth_id: &Authid, |
bff85572 FG |
52 | required_privs: u64, |
53 | ) -> Result<(), Error> { | |
54 | let user_info = CachedUserInfo::new()?; | |
55 | let privs = user_info.lookup_privs(&auth_id, &["datastore", store.name()]); | |
56 | ||
57 | if privs & required_privs == 0 { | |
58 | let owner = store.get_owner(group)?; | |
59 | check_backup_owner(&owner, auth_id)?; | |
60 | } | |
61 | Ok(()) | |
62 | } | |
63 | ||
64 | fn check_backup_owner( | |
65 | owner: &Authid, | |
66 | auth_id: &Authid, | |
e7cb4dc5 | 67 | ) -> Result<(), Error> { |
bff85572 FG |
68 | let correct_owner = owner == auth_id |
69 | || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id); | |
70 | if !correct_owner { | |
e6dc35ac | 71 | bail!("backup owner check failed ({} != {})", auth_id, owner); |
54552dda DM |
72 | } |
73 | Ok(()) | |
74 | } | |
75 | ||
e7cb4dc5 WB |
76 | fn read_backup_index( |
77 | store: &DataStore, | |
78 | backup_dir: &BackupDir, | |
79 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { | |
8c70e3eb | 80 | |
ff86ef00 | 81 | let (manifest, index_size) = store.load_manifest(backup_dir)?; |
8c70e3eb | 82 | |
09b1f7b2 DM |
83 | let mut result = Vec::new(); |
84 | for item in manifest.files() { | |
85 | result.push(BackupContent { | |
86 | filename: item.filename.clone(), | |
f28d9088 | 87 | crypt_mode: Some(item.crypt_mode), |
09b1f7b2 DM |
88 | size: Some(item.size), |
89 | }); | |
8c70e3eb DM |
90 | } |
91 | ||
09b1f7b2 | 92 | result.push(BackupContent { |
96d65fbc | 93 | filename: MANIFEST_BLOB_NAME.to_string(), |
882c0823 FG |
94 | crypt_mode: match manifest.signature { |
95 | Some(_) => Some(CryptMode::SignOnly), | |
96 | None => Some(CryptMode::None), | |
97 | }, | |
09b1f7b2 DM |
98 | size: Some(index_size), |
99 | }); | |
4f1e40a2 | 100 | |
70030b43 | 101 | Ok((manifest, result)) |
8c70e3eb DM |
102 | } |
103 | ||
1c090810 DC |
104 | fn get_all_snapshot_files( |
105 | store: &DataStore, | |
106 | info: &BackupInfo, | |
70030b43 DM |
107 | ) -> Result<(BackupManifest, Vec<BackupContent>), Error> { |
108 | ||
109 | let (manifest, mut files) = read_backup_index(&store, &info.backup_dir)?; | |
1c090810 DC |
110 | |
111 | let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { | |
112 | acc.insert(item.filename.clone()); | |
113 | acc | |
114 | }); | |
115 | ||
116 | for file in &info.files { | |
117 | if file_set.contains(file) { continue; } | |
f28d9088 WB |
118 | files.push(BackupContent { |
119 | filename: file.to_string(), | |
120 | size: None, | |
121 | crypt_mode: None, | |
122 | }); | |
1c090810 DC |
123 | } |
124 | ||
70030b43 | 125 | Ok((manifest, files)) |
1c090810 DC |
126 | } |
127 | ||
b31c8019 DM |
128 | #[api( |
129 | input: { | |
130 | properties: { | |
131 | store: { | |
132 | schema: DATASTORE_SCHEMA, | |
133 | }, | |
134 | }, | |
135 | }, | |
136 | returns: { | |
137 | type: Array, | |
138 | description: "Returns the list of backup groups.", | |
139 | items: { | |
140 | type: GroupListItem, | |
141 | } | |
142 | }, | |
bb34b589 | 143 | access: { |
54552dda DM |
144 | permission: &Permission::Privilege( |
145 | &["datastore", "{store}"], | |
146 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
147 | true), | |
bb34b589 | 148 | }, |
b31c8019 DM |
149 | )] |
150 | /// List backup groups. | |
ad20d198 | 151 | fn list_groups( |
b31c8019 | 152 | store: String, |
54552dda | 153 | rpcenv: &mut dyn RpcEnvironment, |
b31c8019 | 154 | ) -> Result<Vec<GroupListItem>, Error> { |
812c6f87 | 155 | |
e6dc35ac | 156 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 157 | let user_info = CachedUserInfo::new()?; |
e6dc35ac | 158 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
54552dda | 159 | |
b31c8019 | 160 | let datastore = DataStore::lookup_datastore(&store)?; |
0d08fcee FG |
161 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; |
162 | ||
163 | let backup_groups = BackupInfo::list_backup_groups(&datastore.base_path())?; | |
164 | ||
165 | let group_info = backup_groups | |
166 | .into_iter() | |
167 | .fold(Vec::new(), |mut group_info, group| { | |
168 | let owner = match datastore.get_owner(&group) { | |
169 | Ok(auth_id) => auth_id, | |
170 | Err(err) => { | |
1ed02257 FG |
171 | eprintln!("Failed to get owner of group '{}/{}' - {}", |
172 | &store, | |
173 | group, | |
174 | err); | |
0d08fcee FG |
175 | return group_info; |
176 | }, | |
177 | }; | |
178 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
179 | return group_info; | |
180 | } | |
181 | ||
182 | let snapshots = match group.list_backups(&datastore.base_path()) { | |
183 | Ok(snapshots) => snapshots, | |
184 | Err(_) => { | |
185 | return group_info; | |
186 | }, | |
187 | }; | |
188 | ||
189 | let backup_count: u64 = snapshots.len() as u64; | |
190 | if backup_count == 0 { | |
191 | return group_info; | |
192 | } | |
193 | ||
194 | let last_backup = snapshots | |
195 | .iter() | |
196 | .fold(&snapshots[0], |last, curr| { | |
197 | if curr.is_finished() | |
198 | && curr.backup_dir.backup_time() > last.backup_dir.backup_time() { | |
199 | curr | |
200 | } else { | |
201 | last | |
202 | } | |
203 | }) | |
204 | .to_owned(); | |
205 | ||
206 | group_info.push(GroupListItem { | |
207 | backup_type: group.backup_type().to_string(), | |
208 | backup_id: group.backup_id().to_string(), | |
209 | last_backup: last_backup.backup_dir.backup_time(), | |
210 | owner: Some(owner), | |
211 | backup_count, | |
212 | files: last_backup.files, | |
213 | }); | |
214 | ||
215 | group_info | |
216 | }); | |
812c6f87 | 217 | |
0d08fcee | 218 | Ok(group_info) |
812c6f87 | 219 | } |
8f579717 | 220 | |
09b1f7b2 DM |
221 | #[api( |
222 | input: { | |
223 | properties: { | |
224 | store: { | |
225 | schema: DATASTORE_SCHEMA, | |
226 | }, | |
227 | "backup-type": { | |
228 | schema: BACKUP_TYPE_SCHEMA, | |
229 | }, | |
230 | "backup-id": { | |
231 | schema: BACKUP_ID_SCHEMA, | |
232 | }, | |
233 | "backup-time": { | |
234 | schema: BACKUP_TIME_SCHEMA, | |
235 | }, | |
236 | }, | |
237 | }, | |
238 | returns: { | |
239 | type: Array, | |
240 | description: "Returns the list of archive files inside a backup snapshots.", | |
241 | items: { | |
242 | type: BackupContent, | |
243 | } | |
244 | }, | |
bb34b589 | 245 | access: { |
54552dda DM |
246 | permission: &Permission::Privilege( |
247 | &["datastore", "{store}"], | |
248 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
249 | true), | |
bb34b589 | 250 | }, |
09b1f7b2 DM |
251 | )] |
252 | /// List snapshot files. | |
ea5f547f | 253 | pub fn list_snapshot_files( |
09b1f7b2 DM |
254 | store: String, |
255 | backup_type: String, | |
256 | backup_id: String, | |
257 | backup_time: i64, | |
01a13423 | 258 | _info: &ApiMethod, |
54552dda | 259 | rpcenv: &mut dyn RpcEnvironment, |
09b1f7b2 | 260 | ) -> Result<Vec<BackupContent>, Error> { |
01a13423 | 261 | |
e6dc35ac | 262 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
09b1f7b2 | 263 | let datastore = DataStore::lookup_datastore(&store)?; |
54552dda | 264 | |
e0e5b442 | 265 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; |
01a13423 | 266 | |
bff85572 | 267 | check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)?; |
54552dda | 268 | |
d7c24397 | 269 | let info = BackupInfo::new(&datastore.base_path(), snapshot)?; |
01a13423 | 270 | |
70030b43 DM |
271 | let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?; |
272 | ||
273 | Ok(files) | |
01a13423 DM |
274 | } |
275 | ||
68a6a0ee DM |
276 | #[api( |
277 | input: { | |
278 | properties: { | |
279 | store: { | |
280 | schema: DATASTORE_SCHEMA, | |
281 | }, | |
282 | "backup-type": { | |
283 | schema: BACKUP_TYPE_SCHEMA, | |
284 | }, | |
285 | "backup-id": { | |
286 | schema: BACKUP_ID_SCHEMA, | |
287 | }, | |
288 | "backup-time": { | |
289 | schema: BACKUP_TIME_SCHEMA, | |
290 | }, | |
291 | }, | |
292 | }, | |
bb34b589 | 293 | access: { |
54552dda DM |
294 | permission: &Permission::Privilege( |
295 | &["datastore", "{store}"], | |
296 | PRIV_DATASTORE_MODIFY| PRIV_DATASTORE_PRUNE, | |
297 | true), | |
bb34b589 | 298 | }, |
68a6a0ee DM |
299 | )] |
300 | /// Delete backup snapshot. | |
301 | fn delete_snapshot( | |
302 | store: String, | |
303 | backup_type: String, | |
304 | backup_id: String, | |
305 | backup_time: i64, | |
6f62c924 | 306 | _info: &ApiMethod, |
54552dda | 307 | rpcenv: &mut dyn RpcEnvironment, |
6f62c924 DM |
308 | ) -> Result<Value, Error> { |
309 | ||
e6dc35ac | 310 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 311 | |
e0e5b442 | 312 | let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?; |
68a6a0ee | 313 | let datastore = DataStore::lookup_datastore(&store)?; |
6f62c924 | 314 | |
bff85572 | 315 | check_priv_or_backup_owner(&datastore, snapshot.group(), &auth_id, PRIV_DATASTORE_MODIFY)?; |
54552dda | 316 | |
c9756b40 | 317 | datastore.remove_backup_dir(&snapshot, false)?; |
6f62c924 DM |
318 | |
319 | Ok(Value::Null) | |
320 | } | |
321 | ||
fc189b19 DM |
322 | #[api( |
323 | input: { | |
324 | properties: { | |
325 | store: { | |
326 | schema: DATASTORE_SCHEMA, | |
327 | }, | |
328 | "backup-type": { | |
329 | optional: true, | |
330 | schema: BACKUP_TYPE_SCHEMA, | |
331 | }, | |
332 | "backup-id": { | |
333 | optional: true, | |
334 | schema: BACKUP_ID_SCHEMA, | |
335 | }, | |
336 | }, | |
337 | }, | |
338 | returns: { | |
339 | type: Array, | |
340 | description: "Returns the list of snapshots.", | |
341 | items: { | |
342 | type: SnapshotListItem, | |
343 | } | |
344 | }, | |
bb34b589 | 345 | access: { |
54552dda DM |
346 | permission: &Permission::Privilege( |
347 | &["datastore", "{store}"], | |
348 | PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, | |
349 | true), | |
bb34b589 | 350 | }, |
fc189b19 DM |
351 | )] |
352 | /// List backup snapshots. | |
f24fc116 | 353 | pub fn list_snapshots ( |
54552dda DM |
354 | store: String, |
355 | backup_type: Option<String>, | |
356 | backup_id: Option<String>, | |
357 | _param: Value, | |
184f17af | 358 | _info: &ApiMethod, |
54552dda | 359 | rpcenv: &mut dyn RpcEnvironment, |
fc189b19 | 360 | ) -> Result<Vec<SnapshotListItem>, Error> { |
184f17af | 361 | |
e6dc35ac | 362 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 363 | let user_info = CachedUserInfo::new()?; |
e6dc35ac | 364 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
184f17af | 365 | |
0d08fcee FG |
366 | let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; |
367 | ||
54552dda | 368 | let datastore = DataStore::lookup_datastore(&store)?; |
184f17af | 369 | |
c0977501 | 370 | let base_path = datastore.base_path(); |
184f17af | 371 | |
0d08fcee FG |
372 | let groups = match (backup_type, backup_id) { |
373 | (Some(backup_type), Some(backup_id)) => { | |
374 | let mut groups = Vec::with_capacity(1); | |
375 | groups.push(BackupGroup::new(backup_type, backup_id)); | |
376 | groups | |
377 | }, | |
378 | (Some(backup_type), None) => { | |
379 | BackupInfo::list_backup_groups(&base_path)? | |
380 | .into_iter() | |
381 | .filter(|group| group.backup_type() == backup_type) | |
382 | .collect() | |
383 | }, | |
384 | (None, Some(backup_id)) => { | |
385 | BackupInfo::list_backup_groups(&base_path)? | |
386 | .into_iter() | |
387 | .filter(|group| group.backup_id() == backup_id) | |
388 | .collect() | |
389 | }, | |
390 | _ => BackupInfo::list_backup_groups(&base_path)?, | |
391 | }; | |
54552dda | 392 | |
0d08fcee | 393 | let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| { |
79c53595 FG |
394 | let backup_type = group.backup_type().to_string(); |
395 | let backup_id = group.backup_id().to_string(); | |
0d08fcee | 396 | let backup_time = info.backup_dir.backup_time(); |
1c090810 | 397 | |
79c53595 | 398 | match get_all_snapshot_files(&datastore, &info) { |
70030b43 | 399 | Ok((manifest, files)) => { |
70030b43 DM |
400 | // extract the first line from notes |
401 | let comment: Option<String> = manifest.unprotected["notes"] | |
402 | .as_str() | |
403 | .and_then(|notes| notes.lines().next()) | |
404 | .map(String::from); | |
405 | ||
035c40e6 FG |
406 | let fingerprint = match manifest.fingerprint() { |
407 | Ok(fp) => fp, | |
408 | Err(err) => { | |
409 | eprintln!("error parsing fingerprint: '{}'", err); | |
410 | None | |
411 | }, | |
412 | }; | |
413 | ||
79c53595 FG |
414 | let verification = manifest.unprotected["verify_state"].clone(); |
415 | let verification: Option<SnapshotVerifyState> = match serde_json::from_value(verification) { | |
3b2046d2 TL |
416 | Ok(verify) => verify, |
417 | Err(err) => { | |
418 | eprintln!("error parsing verification state : '{}'", err); | |
419 | None | |
420 | } | |
421 | }; | |
422 | ||
0d08fcee FG |
423 | let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); |
424 | ||
79c53595 FG |
425 | SnapshotListItem { |
426 | backup_type, | |
427 | backup_id, | |
428 | backup_time, | |
429 | comment, | |
430 | verification, | |
035c40e6 | 431 | fingerprint, |
79c53595 FG |
432 | files, |
433 | size, | |
434 | owner, | |
435 | } | |
1c090810 DC |
436 | }, |
437 | Err(err) => { | |
438 | eprintln!("error during snapshot file listing: '{}'", err); | |
79c53595 | 439 | let files = info |
70030b43 | 440 | .files |
0d08fcee | 441 | .into_iter() |
70030b43 DM |
442 | .map(|x| BackupContent { |
443 | filename: x.to_string(), | |
444 | size: None, | |
445 | crypt_mode: None, | |
446 | }) | |
79c53595 FG |
447 | .collect(); |
448 | ||
449 | SnapshotListItem { | |
450 | backup_type, | |
451 | backup_id, | |
452 | backup_time, | |
453 | comment: None, | |
454 | verification: None, | |
035c40e6 | 455 | fingerprint: None, |
79c53595 FG |
456 | files, |
457 | size: None, | |
458 | owner, | |
459 | } | |
1c090810 | 460 | }, |
0d08fcee FG |
461 | } |
462 | }; | |
184f17af | 463 | |
0d08fcee FG |
464 | groups |
465 | .iter() | |
466 | .try_fold(Vec::new(), |mut snapshots, group| { | |
467 | let owner = match datastore.get_owner(group) { | |
468 | Ok(auth_id) => auth_id, | |
469 | Err(err) => { | |
470 | eprintln!("Failed to get owner of group '{}/{}' - {}", | |
471 | &store, | |
472 | group, | |
473 | err); | |
474 | return Ok(snapshots); | |
475 | }, | |
476 | }; | |
477 | ||
478 | if !list_all && check_backup_owner(&owner, &auth_id).is_err() { | |
479 | return Ok(snapshots); | |
480 | } | |
481 | ||
482 | let group_backups = group.list_backups(&datastore.base_path())?; | |
483 | ||
484 | snapshots.extend( | |
485 | group_backups | |
486 | .into_iter() | |
487 | .map(|info| info_to_snapshot_list_item(&group, Some(owner.clone()), info)) | |
488 | ); | |
489 | ||
490 | Ok(snapshots) | |
491 | }) | |
184f17af DM |
492 | } |
493 | ||
fdfcb74d | 494 | fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> { |
16f9f244 | 495 | let base_path = store.base_path(); |
fdfcb74d | 496 | let groups = BackupInfo::list_backup_groups(&base_path)?; |
16f9f244 | 497 | |
fdfcb74d FG |
498 | groups.iter() |
499 | .filter(|group| { | |
500 | let owner = match store.get_owner(&group) { | |
501 | Ok(owner) => owner, | |
502 | Err(err) => { | |
1ed02257 FG |
503 | eprintln!("Failed to get owner of group '{}/{}' - {}", |
504 | store.name(), | |
505 | group, | |
506 | err); | |
fdfcb74d FG |
507 | return false; |
508 | }, | |
509 | }; | |
14e08625 | 510 | |
fdfcb74d FG |
511 | match filter_owner { |
512 | Some(filter) => check_backup_owner(&owner, filter).is_ok(), | |
513 | None => true, | |
514 | } | |
515 | }) | |
516 | .try_fold(Counts::default(), |mut counts, group| { | |
517 | let snapshot_count = group.list_backups(&base_path)?.len() as u64; | |
518 | ||
519 | let type_count = match group.backup_type() { | |
520 | "ct" => counts.ct.get_or_insert(Default::default()), | |
521 | "vm" => counts.vm.get_or_insert(Default::default()), | |
522 | "host" => counts.host.get_or_insert(Default::default()), | |
523 | _ => counts.other.get_or_insert(Default::default()), | |
524 | }; | |
14e08625 | 525 | |
fdfcb74d FG |
526 | type_count.groups += 1; |
527 | type_count.snapshots += snapshot_count; | |
16f9f244 | 528 | |
fdfcb74d FG |
529 | Ok(counts) |
530 | }) | |
16f9f244 DC |
531 | } |
532 | ||
1dc117bb DM |
533 | #[api( |
534 | input: { | |
535 | properties: { | |
536 | store: { | |
537 | schema: DATASTORE_SCHEMA, | |
538 | }, | |
98afc7b1 FG |
539 | verbose: { |
540 | type: bool, | |
541 | default: false, | |
542 | optional: true, | |
543 | description: "Include additional information like snapshot counts and GC status.", | |
544 | }, | |
1dc117bb | 545 | }, |
98afc7b1 | 546 | |
1dc117bb DM |
547 | }, |
548 | returns: { | |
14e08625 | 549 | type: DataStoreStatus, |
1dc117bb | 550 | }, |
bb34b589 | 551 | access: { |
54552dda | 552 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), |
bb34b589 | 553 | }, |
1dc117bb DM |
554 | )] |
555 | /// Get datastore status. | |
ea5f547f | 556 | pub fn status( |
1dc117bb | 557 | store: String, |
98afc7b1 | 558 | verbose: bool, |
0eecf38f | 559 | _info: &ApiMethod, |
fdfcb74d | 560 | rpcenv: &mut dyn RpcEnvironment, |
14e08625 | 561 | ) -> Result<DataStoreStatus, Error> { |
1dc117bb | 562 | let datastore = DataStore::lookup_datastore(&store)?; |
14e08625 | 563 | let storage = crate::tools::disks::disk_usage(&datastore.base_path())?; |
fdfcb74d FG |
564 | let (counts, gc_status) = if verbose { |
565 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; | |
566 | let user_info = CachedUserInfo::new()?; | |
567 | ||
568 | let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); | |
569 | let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 { | |
570 | None | |
571 | } else { | |
572 | Some(&auth_id) | |
573 | }; | |
574 | ||
575 | let counts = Some(get_snapshots_count(&datastore, filter_owner)?); | |
576 | let gc_status = Some(datastore.last_gc_status()); | |
577 | ||
578 | (counts, gc_status) | |
579 | } else { | |
580 | (None, None) | |
98afc7b1 | 581 | }; |
16f9f244 | 582 | |
14e08625 DC |
583 | Ok(DataStoreStatus { |
584 | total: storage.total, | |
585 | used: storage.used, | |
586 | avail: storage.avail, | |
587 | gc_status, | |
588 | counts, | |
589 | }) | |
0eecf38f DM |
590 | } |
591 | ||
c2009e53 DM |
592 | #[api( |
593 | input: { | |
594 | properties: { | |
595 | store: { | |
596 | schema: DATASTORE_SCHEMA, | |
597 | }, | |
598 | "backup-type": { | |
599 | schema: BACKUP_TYPE_SCHEMA, | |
600 | optional: true, | |
601 | }, | |
602 | "backup-id": { | |
603 | schema: BACKUP_ID_SCHEMA, | |
604 | optional: true, | |
605 | }, | |
606 | "backup-time": { | |
607 | schema: BACKUP_TIME_SCHEMA, | |
608 | optional: true, | |
609 | }, | |
610 | }, | |
611 | }, | |
612 | returns: { | |
613 | schema: UPID_SCHEMA, | |
614 | }, | |
615 | access: { | |
09f6a240 | 616 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true), |
c2009e53 DM |
617 | }, |
618 | )] | |
619 | /// Verify backups. | |
620 | /// | |
621 | /// This function can verify a single backup snapshot, all backup from a backup group, | |
622 | /// or all backups in the datastore. | |
623 | pub fn verify( | |
624 | store: String, | |
625 | backup_type: Option<String>, | |
626 | backup_id: Option<String>, | |
627 | backup_time: Option<i64>, | |
628 | rpcenv: &mut dyn RpcEnvironment, | |
629 | ) -> Result<Value, Error> { | |
630 | let datastore = DataStore::lookup_datastore(&store)?; | |
631 | ||
09f6a240 | 632 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
8ea00f6e | 633 | let worker_id; |
c2009e53 DM |
634 | |
635 | let mut backup_dir = None; | |
636 | let mut backup_group = None; | |
133042b5 | 637 | let mut worker_type = "verify"; |
c2009e53 DM |
638 | |
639 | match (backup_type, backup_id, backup_time) { | |
640 | (Some(backup_type), Some(backup_id), Some(backup_time)) => { | |
4ebda996 | 641 | worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time); |
e0e5b442 | 642 | let dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
09f6a240 FG |
643 | |
644 | check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?; | |
645 | ||
c2009e53 | 646 | backup_dir = Some(dir); |
133042b5 | 647 | worker_type = "verify_snapshot"; |
c2009e53 DM |
648 | } |
649 | (Some(backup_type), Some(backup_id), None) => { | |
4ebda996 | 650 | worker_id = format!("{}:{}/{}", store, backup_type, backup_id); |
c2009e53 | 651 | let group = BackupGroup::new(backup_type, backup_id); |
09f6a240 FG |
652 | |
653 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?; | |
654 | ||
c2009e53 | 655 | backup_group = Some(group); |
133042b5 | 656 | worker_type = "verify_group"; |
c2009e53 DM |
657 | } |
658 | (None, None, None) => { | |
8ea00f6e | 659 | worker_id = store.clone(); |
c2009e53 | 660 | } |
5a718dce | 661 | _ => bail!("parameters do not specify a backup group or snapshot"), |
c2009e53 DM |
662 | } |
663 | ||
c2009e53 DM |
664 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; |
665 | ||
666 | let upid_str = WorkerTask::new_thread( | |
133042b5 | 667 | worker_type, |
e7cb4dc5 | 668 | Some(worker_id.clone()), |
09f6a240 | 669 | auth_id.clone(), |
e7cb4dc5 WB |
670 | to_stdout, |
671 | move |worker| { | |
4f09d310 DM |
672 | let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); |
673 | let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); | |
674 | ||
adfdc369 | 675 | let failed_dirs = if let Some(backup_dir) = backup_dir { |
adfdc369 | 676 | let mut res = Vec::new(); |
f6b1d1cc WB |
677 | if !verify_backup_dir( |
678 | datastore, | |
679 | &backup_dir, | |
680 | verified_chunks, | |
681 | corrupt_chunks, | |
682 | worker.clone(), | |
683 | worker.upid().clone(), | |
d771a608 | 684 | None, |
f6b1d1cc | 685 | )? { |
adfdc369 DC |
686 | res.push(backup_dir.to_string()); |
687 | } | |
688 | res | |
c2009e53 | 689 | } else if let Some(backup_group) = backup_group { |
7e25b9aa | 690 | let failed_dirs = verify_backup_group( |
63d9aca9 DM |
691 | datastore, |
692 | &backup_group, | |
693 | verified_chunks, | |
694 | corrupt_chunks, | |
7e25b9aa | 695 | &mut StoreProgress::new(1), |
63d9aca9 | 696 | worker.clone(), |
f6b1d1cc | 697 | worker.upid(), |
d771a608 | 698 | None, |
63d9aca9 DM |
699 | )?; |
700 | failed_dirs | |
c2009e53 | 701 | } else { |
09f6a240 FG |
702 | let privs = CachedUserInfo::new()? |
703 | .lookup_privs(&auth_id, &["datastore", &store]); | |
704 | ||
705 | let owner = if privs & PRIV_DATASTORE_VERIFY == 0 { | |
706 | Some(auth_id) | |
707 | } else { | |
708 | None | |
709 | }; | |
710 | ||
711 | verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)? | |
c2009e53 | 712 | }; |
adfdc369 | 713 | if failed_dirs.len() > 0 { |
a4fa3fc2 | 714 | worker.log("Failed to verify the following snapshots/groups:"); |
adfdc369 DC |
715 | for dir in failed_dirs { |
716 | worker.log(format!("\t{}", dir)); | |
717 | } | |
1ffe0301 | 718 | bail!("verification failed - please check the log for details"); |
c2009e53 DM |
719 | } |
720 | Ok(()) | |
e7cb4dc5 WB |
721 | }, |
722 | )?; | |
c2009e53 DM |
723 | |
724 | Ok(json!(upid_str)) | |
725 | } | |
726 | ||
255f378a DM |
727 | #[macro_export] |
728 | macro_rules! add_common_prune_prameters { | |
552c2259 DM |
729 | ( [ $( $list1:tt )* ] ) => { |
730 | add_common_prune_prameters!([$( $list1 )* ] , []) | |
731 | }; | |
732 | ( [ $( $list1:tt )* ] , [ $( $list2:tt )* ] ) => { | |
255f378a | 733 | [ |
552c2259 | 734 | $( $list1 )* |
255f378a | 735 | ( |
552c2259 | 736 | "keep-daily", |
255f378a | 737 | true, |
49ff1092 | 738 | &PRUNE_SCHEMA_KEEP_DAILY, |
255f378a | 739 | ), |
102d8d41 DM |
740 | ( |
741 | "keep-hourly", | |
742 | true, | |
49ff1092 | 743 | &PRUNE_SCHEMA_KEEP_HOURLY, |
102d8d41 | 744 | ), |
255f378a | 745 | ( |
552c2259 | 746 | "keep-last", |
255f378a | 747 | true, |
49ff1092 | 748 | &PRUNE_SCHEMA_KEEP_LAST, |
255f378a DM |
749 | ), |
750 | ( | |
552c2259 | 751 | "keep-monthly", |
255f378a | 752 | true, |
49ff1092 | 753 | &PRUNE_SCHEMA_KEEP_MONTHLY, |
255f378a DM |
754 | ), |
755 | ( | |
552c2259 | 756 | "keep-weekly", |
255f378a | 757 | true, |
49ff1092 | 758 | &PRUNE_SCHEMA_KEEP_WEEKLY, |
255f378a DM |
759 | ), |
760 | ( | |
761 | "keep-yearly", | |
762 | true, | |
49ff1092 | 763 | &PRUNE_SCHEMA_KEEP_YEARLY, |
255f378a | 764 | ), |
552c2259 | 765 | $( $list2 )* |
255f378a DM |
766 | ] |
767 | } | |
0eecf38f DM |
768 | } |
769 | ||
db1e061d DM |
770 | pub const API_RETURN_SCHEMA_PRUNE: Schema = ArraySchema::new( |
771 | "Returns the list of snapshots and a flag indicating if there are kept or removed.", | |
660a3489 | 772 | &PruneListItem::API_SCHEMA |
db1e061d DM |
773 | ).schema(); |
774 | ||
0ab08ac9 DM |
775 | const API_METHOD_PRUNE: ApiMethod = ApiMethod::new( |
776 | &ApiHandler::Sync(&prune), | |
255f378a | 777 | &ObjectSchema::new( |
0ab08ac9 DM |
778 | "Prune the datastore.", |
779 | &add_common_prune_prameters!([ | |
780 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
781 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
3b03abfe DM |
782 | ("dry-run", true, &BooleanSchema::new( |
783 | "Just show what prune would do, but do not delete anything.") | |
784 | .schema() | |
785 | ), | |
0ab08ac9 | 786 | ],[ |
66c49c21 | 787 | ("store", false, &DATASTORE_SCHEMA), |
0ab08ac9 | 788 | ]) |
db1e061d DM |
789 | )) |
790 | .returns(&API_RETURN_SCHEMA_PRUNE) | |
791 | .access(None, &Permission::Privilege( | |
54552dda DM |
792 | &["datastore", "{store}"], |
793 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, | |
794 | true) | |
795 | ); | |
255f378a | 796 | |
83b7db02 DM |
797 | fn prune( |
798 | param: Value, | |
799 | _info: &ApiMethod, | |
54552dda | 800 | rpcenv: &mut dyn RpcEnvironment, |
83b7db02 DM |
801 | ) -> Result<Value, Error> { |
802 | ||
54552dda | 803 | let store = tools::required_string_param(¶m, "store")?; |
9fdc3ef4 DM |
804 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
805 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
806 | ||
e6dc35ac | 807 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 808 | |
3b03abfe DM |
809 | let dry_run = param["dry-run"].as_bool().unwrap_or(false); |
810 | ||
9fdc3ef4 DM |
811 | let group = BackupGroup::new(backup_type, backup_id); |
812 | ||
54552dda DM |
813 | let datastore = DataStore::lookup_datastore(&store)?; |
814 | ||
bff85572 | 815 | check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?; |
83b7db02 | 816 | |
9e3f0088 DM |
817 | let prune_options = PruneOptions { |
818 | keep_last: param["keep-last"].as_u64(), | |
102d8d41 | 819 | keep_hourly: param["keep-hourly"].as_u64(), |
9e3f0088 DM |
820 | keep_daily: param["keep-daily"].as_u64(), |
821 | keep_weekly: param["keep-weekly"].as_u64(), | |
822 | keep_monthly: param["keep-monthly"].as_u64(), | |
823 | keep_yearly: param["keep-yearly"].as_u64(), | |
824 | }; | |
8f579717 | 825 | |
4ebda996 | 826 | let worker_id = format!("{}:{}/{}", store, backup_type, backup_id); |
503995c7 | 827 | |
dda70154 DM |
828 | let mut prune_result = Vec::new(); |
829 | ||
830 | let list = group.list_backups(&datastore.base_path())?; | |
831 | ||
832 | let mut prune_info = compute_prune_info(list, &prune_options)?; | |
833 | ||
834 | prune_info.reverse(); // delete older snapshots first | |
835 | ||
836 | let keep_all = !prune_options.keeps_something(); | |
837 | ||
838 | if dry_run { | |
839 | for (info, mut keep) in prune_info { | |
840 | if keep_all { keep = true; } | |
841 | ||
842 | let backup_time = info.backup_dir.backup_time(); | |
843 | let group = info.backup_dir.group(); | |
844 | ||
845 | prune_result.push(json!({ | |
846 | "backup-type": group.backup_type(), | |
847 | "backup-id": group.backup_id(), | |
6a7be83e | 848 | "backup-time": backup_time, |
dda70154 DM |
849 | "keep": keep, |
850 | })); | |
851 | } | |
852 | return Ok(json!(prune_result)); | |
853 | } | |
854 | ||
855 | ||
163e9bbe | 856 | // We use a WorkerTask just to have a task log, but run synchrounously |
e6dc35ac | 857 | let worker = WorkerTask::new("prune", Some(worker_id), auth_id.clone(), true)?; |
dda70154 | 858 | |
f1539300 SR |
859 | if keep_all { |
860 | worker.log("No prune selection - keeping all files."); | |
861 | } else { | |
862 | worker.log(format!("retention options: {}", prune_options.cli_options_string())); | |
863 | worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"", | |
864 | store, backup_type, backup_id)); | |
865 | } | |
3b03abfe | 866 | |
f1539300 SR |
867 | for (info, mut keep) in prune_info { |
868 | if keep_all { keep = true; } | |
dda70154 | 869 | |
f1539300 SR |
870 | let backup_time = info.backup_dir.backup_time(); |
871 | let timestamp = info.backup_dir.backup_time_string(); | |
872 | let group = info.backup_dir.group(); | |
3b03abfe | 873 | |
3b03abfe | 874 | |
f1539300 SR |
875 | let msg = format!( |
876 | "{}/{}/{} {}", | |
877 | group.backup_type(), | |
878 | group.backup_id(), | |
879 | timestamp, | |
880 | if keep { "keep" } else { "remove" }, | |
881 | ); | |
882 | ||
883 | worker.log(msg); | |
884 | ||
885 | prune_result.push(json!({ | |
886 | "backup-type": group.backup_type(), | |
887 | "backup-id": group.backup_id(), | |
888 | "backup-time": backup_time, | |
889 | "keep": keep, | |
890 | })); | |
891 | ||
892 | if !(dry_run || keep) { | |
893 | if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) { | |
894 | worker.warn( | |
895 | format!( | |
896 | "failed to remove dir {:?}: {}", | |
897 | info.backup_dir.relative_path(), err | |
898 | ) | |
899 | ); | |
8f0b4c1f | 900 | } |
8f579717 | 901 | } |
f1539300 | 902 | } |
dd8e744f | 903 | |
f1539300 | 904 | worker.log_result(&Ok(())); |
83b7db02 | 905 | |
dda70154 | 906 | Ok(json!(prune_result)) |
83b7db02 DM |
907 | } |
908 | ||
dfc58d47 DM |
909 | #[api( |
910 | input: { | |
911 | properties: { | |
912 | store: { | |
913 | schema: DATASTORE_SCHEMA, | |
914 | }, | |
915 | }, | |
916 | }, | |
917 | returns: { | |
918 | schema: UPID_SCHEMA, | |
919 | }, | |
bb34b589 | 920 | access: { |
54552dda | 921 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false), |
bb34b589 | 922 | }, |
dfc58d47 DM |
923 | )] |
924 | /// Start garbage collection. | |
6049b71f | 925 | fn start_garbage_collection( |
dfc58d47 | 926 | store: String, |
6049b71f | 927 | _info: &ApiMethod, |
dd5495d6 | 928 | rpcenv: &mut dyn RpcEnvironment, |
6049b71f | 929 | ) -> Result<Value, Error> { |
15e9b4ed | 930 | |
3e6a7dee | 931 | let datastore = DataStore::lookup_datastore(&store)?; |
e6dc35ac | 932 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
15e9b4ed | 933 | |
4fdf5ddf DC |
934 | let job = Job::new("garbage_collection", &store) |
935 | .map_err(|_| format_err!("garbage collection already running"))?; | |
15e9b4ed | 936 | |
0f778e06 | 937 | let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; |
15e9b4ed | 938 | |
4fdf5ddf DC |
939 | let upid_str = crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout) |
940 | .map_err(|err| format_err!("unable to start garbage collection job on datastore {} - {}", store, err))?; | |
0f778e06 DM |
941 | |
942 | Ok(json!(upid_str)) | |
15e9b4ed DM |
943 | } |
944 | ||
a92830dc DM |
945 | #[api( |
946 | input: { | |
947 | properties: { | |
948 | store: { | |
949 | schema: DATASTORE_SCHEMA, | |
950 | }, | |
951 | }, | |
952 | }, | |
953 | returns: { | |
954 | type: GarbageCollectionStatus, | |
bb34b589 DM |
955 | }, |
956 | access: { | |
957 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), | |
958 | }, | |
a92830dc DM |
959 | )] |
960 | /// Garbage collection status. | |
5eeea607 | 961 | pub fn garbage_collection_status( |
a92830dc | 962 | store: String, |
6049b71f | 963 | _info: &ApiMethod, |
dd5495d6 | 964 | _rpcenv: &mut dyn RpcEnvironment, |
a92830dc | 965 | ) -> Result<GarbageCollectionStatus, Error> { |
691c89a0 | 966 | |
f2b99c34 DM |
967 | let datastore = DataStore::lookup_datastore(&store)?; |
968 | ||
f2b99c34 | 969 | let status = datastore.last_gc_status(); |
691c89a0 | 970 | |
a92830dc | 971 | Ok(status) |
691c89a0 DM |
972 | } |
973 | ||
bb34b589 | 974 | #[api( |
30fb6025 DM |
975 | returns: { |
976 | description: "List the accessible datastores.", | |
977 | type: Array, | |
978 | items: { | |
979 | description: "Datastore name and description.", | |
455e5f71 | 980 | type: DataStoreListItem, |
30fb6025 DM |
981 | }, |
982 | }, | |
bb34b589 | 983 | access: { |
54552dda | 984 | permission: &Permission::Anybody, |
bb34b589 DM |
985 | }, |
986 | )] | |
987 | /// Datastore list | |
6049b71f DM |
988 | fn get_datastore_list( |
989 | _param: Value, | |
990 | _info: &ApiMethod, | |
54552dda | 991 | rpcenv: &mut dyn RpcEnvironment, |
455e5f71 | 992 | ) -> Result<Vec<DataStoreListItem>, Error> { |
15e9b4ed | 993 | |
d0187a51 | 994 | let (config, _digest) = datastore::config()?; |
15e9b4ed | 995 | |
e6dc35ac | 996 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda DM |
997 | let user_info = CachedUserInfo::new()?; |
998 | ||
30fb6025 | 999 | let mut list = Vec::new(); |
54552dda | 1000 | |
30fb6025 | 1001 | for (store, (_, data)) in &config.sections { |
e6dc35ac | 1002 | let user_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
54552dda | 1003 | let allowed = (user_privs & (PRIV_DATASTORE_AUDIT| PRIV_DATASTORE_BACKUP)) != 0; |
30fb6025 | 1004 | if allowed { |
455e5f71 FG |
1005 | list.push( |
1006 | DataStoreListItem { | |
1007 | store: store.clone(), | |
1008 | comment: data["comment"].as_str().map(String::from), | |
1009 | } | |
1010 | ); | |
30fb6025 | 1011 | } |
54552dda DM |
1012 | } |
1013 | ||
30fb6025 | 1014 | Ok(list.into()) |
15e9b4ed DM |
1015 | } |
1016 | ||
0ab08ac9 DM |
1017 | #[sortable] |
1018 | pub const API_METHOD_DOWNLOAD_FILE: ApiMethod = ApiMethod::new( | |
1019 | &ApiHandler::AsyncHttp(&download_file), | |
1020 | &ObjectSchema::new( | |
1021 | "Download single raw file from backup snapshot.", | |
1022 | &sorted!([ | |
66c49c21 | 1023 | ("store", false, &DATASTORE_SCHEMA), |
0ab08ac9 DM |
1024 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), |
1025 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1026 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
4191018c | 1027 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), |
0ab08ac9 DM |
1028 | ]), |
1029 | ) | |
54552dda DM |
1030 | ).access(None, &Permission::Privilege( |
1031 | &["datastore", "{store}"], | |
1032 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1033 | true) | |
1034 | ); | |
691c89a0 | 1035 | |
9e47c0a5 DM |
1036 | fn download_file( |
1037 | _parts: Parts, | |
1038 | _req_body: Body, | |
1039 | param: Value, | |
255f378a | 1040 | _info: &ApiMethod, |
54552dda | 1041 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 1042 | ) -> ApiResponseFuture { |
9e47c0a5 | 1043 | |
ad51d02a DM |
1044 | async move { |
1045 | let store = tools::required_string_param(¶m, "store")?; | |
ad51d02a | 1046 | let datastore = DataStore::lookup_datastore(store)?; |
f14a8c9a | 1047 | |
e6dc35ac | 1048 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
54552dda | 1049 | |
ad51d02a | 1050 | let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); |
9e47c0a5 | 1051 | |
ad51d02a DM |
1052 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
1053 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1054 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
9e47c0a5 | 1055 | |
e0e5b442 | 1056 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
54552dda | 1057 | |
bff85572 | 1058 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
54552dda | 1059 | |
abdb9763 | 1060 | println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); |
9e47c0a5 | 1061 | |
ad51d02a DM |
1062 | let mut path = datastore.base_path(); |
1063 | path.push(backup_dir.relative_path()); | |
1064 | path.push(&file_name); | |
1065 | ||
ba694720 | 1066 | let file = tokio::fs::File::open(&path) |
8aa67ee7 WB |
1067 | .await |
1068 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; | |
ad51d02a | 1069 | |
db0cb9ce | 1070 | let payload = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) |
ba694720 DC |
1071 | .map_ok(|bytes| hyper::body::Bytes::from(bytes.freeze())) |
1072 | .map_err(move |err| { | |
1073 | eprintln!("error during streaming of '{:?}' - {}", &path, err); | |
1074 | err | |
1075 | }); | |
ad51d02a | 1076 | let body = Body::wrap_stream(payload); |
9e47c0a5 | 1077 | |
ad51d02a DM |
1078 | // fixme: set other headers ? |
1079 | Ok(Response::builder() | |
1080 | .status(StatusCode::OK) | |
1081 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1082 | .body(body) | |
1083 | .unwrap()) | |
1084 | }.boxed() | |
9e47c0a5 DM |
1085 | } |
1086 | ||
6ef9bb59 DC |
1087 | #[sortable] |
1088 | pub const API_METHOD_DOWNLOAD_FILE_DECODED: ApiMethod = ApiMethod::new( | |
1089 | &ApiHandler::AsyncHttp(&download_file_decoded), | |
1090 | &ObjectSchema::new( | |
1091 | "Download single decoded file from backup snapshot. Only works if it's not encrypted.", | |
1092 | &sorted!([ | |
1093 | ("store", false, &DATASTORE_SCHEMA), | |
1094 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1095 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1096 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1097 | ("file-name", false, &BACKUP_ARCHIVE_NAME_SCHEMA), | |
1098 | ]), | |
1099 | ) | |
1100 | ).access(None, &Permission::Privilege( | |
1101 | &["datastore", "{store}"], | |
1102 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1103 | true) | |
1104 | ); | |
1105 | ||
1106 | fn download_file_decoded( | |
1107 | _parts: Parts, | |
1108 | _req_body: Body, | |
1109 | param: Value, | |
1110 | _info: &ApiMethod, | |
1111 | rpcenv: Box<dyn RpcEnvironment>, | |
1112 | ) -> ApiResponseFuture { | |
1113 | ||
1114 | async move { | |
1115 | let store = tools::required_string_param(¶m, "store")?; | |
1116 | let datastore = DataStore::lookup_datastore(store)?; | |
1117 | ||
e6dc35ac | 1118 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
6ef9bb59 DC |
1119 | |
1120 | let file_name = tools::required_string_param(¶m, "file-name")?.to_owned(); | |
1121 | ||
1122 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
1123 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1124 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
1125 | ||
e0e5b442 | 1126 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
6ef9bb59 | 1127 | |
bff85572 | 1128 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
6ef9bb59 | 1129 | |
2d55beec | 1130 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
6ef9bb59 | 1131 | for file in files { |
f28d9088 | 1132 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { |
6ef9bb59 DC |
1133 | bail!("cannot decode '{}' - is encrypted", file_name); |
1134 | } | |
1135 | } | |
1136 | ||
1137 | println!("Download {} from {} ({}/{})", file_name, store, backup_dir, file_name); | |
1138 | ||
1139 | let mut path = datastore.base_path(); | |
1140 | path.push(backup_dir.relative_path()); | |
1141 | path.push(&file_name); | |
1142 | ||
1143 | let extension = file_name.rsplitn(2, '.').next().unwrap(); | |
1144 | ||
1145 | let body = match extension { | |
1146 | "didx" => { | |
1147 | let index = DynamicIndexReader::open(&path) | |
1148 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
2d55beec FG |
1149 | let (csum, size) = index.compute_csum(); |
1150 | manifest.verify_file(&file_name, &csum, size)?; | |
6ef9bb59 | 1151 | |
14f6c9cb | 1152 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
6ef9bb59 | 1153 | let reader = AsyncIndexReader::new(index, chunk_reader); |
f386f512 | 1154 | Body::wrap_stream(AsyncReaderStream::new(reader) |
6ef9bb59 DC |
1155 | .map_err(move |err| { |
1156 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1157 | err | |
1158 | })) | |
1159 | }, | |
1160 | "fidx" => { | |
1161 | let index = FixedIndexReader::open(&path) | |
1162 | .map_err(|err| format_err!("unable to read fixed index '{:?}' - {}", &path, err))?; | |
1163 | ||
2d55beec FG |
1164 | let (csum, size) = index.compute_csum(); |
1165 | manifest.verify_file(&file_name, &csum, size)?; | |
1166 | ||
14f6c9cb | 1167 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
6ef9bb59 | 1168 | let reader = AsyncIndexReader::new(index, chunk_reader); |
f386f512 | 1169 | Body::wrap_stream(AsyncReaderStream::with_buffer_size(reader, 4*1024*1024) |
6ef9bb59 DC |
1170 | .map_err(move |err| { |
1171 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1172 | err | |
1173 | })) | |
1174 | }, | |
1175 | "blob" => { | |
1176 | let file = std::fs::File::open(&path) | |
8aa67ee7 | 1177 | .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; |
6ef9bb59 | 1178 | |
2d55beec FG |
1179 | // FIXME: load full blob to verify index checksum? |
1180 | ||
6ef9bb59 DC |
1181 | Body::wrap_stream( |
1182 | WrappedReaderStream::new(DataBlobReader::new(file, None)?) | |
1183 | .map_err(move |err| { | |
1184 | eprintln!("error during streaming of '{:?}' - {}", path, err); | |
1185 | err | |
1186 | }) | |
1187 | ) | |
1188 | }, | |
1189 | extension => { | |
1190 | bail!("cannot download '{}' files", extension); | |
1191 | }, | |
1192 | }; | |
1193 | ||
1194 | // fixme: set other headers ? | |
1195 | Ok(Response::builder() | |
1196 | .status(StatusCode::OK) | |
1197 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1198 | .body(body) | |
1199 | .unwrap()) | |
1200 | }.boxed() | |
1201 | } | |
1202 | ||
552c2259 | 1203 | #[sortable] |
0ab08ac9 DM |
1204 | pub const API_METHOD_UPLOAD_BACKUP_LOG: ApiMethod = ApiMethod::new( |
1205 | &ApiHandler::AsyncHttp(&upload_backup_log), | |
255f378a | 1206 | &ObjectSchema::new( |
54552dda | 1207 | "Upload the client backup log file into a backup snapshot ('client.log.blob').", |
552c2259 | 1208 | &sorted!([ |
66c49c21 | 1209 | ("store", false, &DATASTORE_SCHEMA), |
255f378a | 1210 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), |
0ab08ac9 | 1211 | ("backup-id", false, &BACKUP_ID_SCHEMA), |
255f378a | 1212 | ("backup-time", false, &BACKUP_TIME_SCHEMA), |
552c2259 | 1213 | ]), |
9e47c0a5 | 1214 | ) |
54552dda DM |
1215 | ).access( |
1216 | Some("Only the backup creator/owner is allowed to do this."), | |
1217 | &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_BACKUP, false) | |
1218 | ); | |
9e47c0a5 | 1219 | |
07ee2235 DM |
1220 | fn upload_backup_log( |
1221 | _parts: Parts, | |
1222 | req_body: Body, | |
1223 | param: Value, | |
255f378a | 1224 | _info: &ApiMethod, |
54552dda | 1225 | rpcenv: Box<dyn RpcEnvironment>, |
bb084b9c | 1226 | ) -> ApiResponseFuture { |
07ee2235 | 1227 | |
ad51d02a DM |
1228 | async move { |
1229 | let store = tools::required_string_param(¶m, "store")?; | |
ad51d02a | 1230 | let datastore = DataStore::lookup_datastore(store)?; |
07ee2235 | 1231 | |
96d65fbc | 1232 | let file_name = CLIENT_LOG_BLOB_NAME; |
07ee2235 | 1233 | |
ad51d02a DM |
1234 | let backup_type = tools::required_string_param(¶m, "backup-type")?; |
1235 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1236 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
07ee2235 | 1237 | |
e0e5b442 | 1238 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
07ee2235 | 1239 | |
e6dc35ac | 1240 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
bff85572 FG |
1241 | let owner = datastore.get_owner(backup_dir.group())?; |
1242 | check_backup_owner(&owner, &auth_id)?; | |
54552dda | 1243 | |
ad51d02a DM |
1244 | let mut path = datastore.base_path(); |
1245 | path.push(backup_dir.relative_path()); | |
1246 | path.push(&file_name); | |
07ee2235 | 1247 | |
ad51d02a DM |
1248 | if path.exists() { |
1249 | bail!("backup already contains a log."); | |
1250 | } | |
e128d4e8 | 1251 | |
ad51d02a | 1252 | println!("Upload backup log to {}/{}/{}/{}/{}", store, |
6a7be83e | 1253 | backup_type, backup_id, backup_dir.backup_time_string(), file_name); |
ad51d02a DM |
1254 | |
1255 | let data = req_body | |
1256 | .map_err(Error::from) | |
1257 | .try_fold(Vec::new(), |mut acc, chunk| { | |
1258 | acc.extend_from_slice(&*chunk); | |
1259 | future::ok::<_, Error>(acc) | |
1260 | }) | |
1261 | .await?; | |
1262 | ||
39f18b30 DM |
1263 | // always verify blob/CRC at server side |
1264 | let blob = DataBlob::load_from_reader(&mut &data[..])?; | |
1265 | ||
1266 | replace_file(&path, blob.raw_data(), CreateOptions::new())?; | |
ad51d02a DM |
1267 | |
1268 | // fixme: use correct formatter | |
1269 | Ok(crate::server::formatter::json_response(Ok(Value::Null))) | |
1270 | }.boxed() | |
07ee2235 DM |
1271 | } |
1272 | ||
5b1cfa01 DC |
1273 | #[api( |
1274 | input: { | |
1275 | properties: { | |
1276 | store: { | |
1277 | schema: DATASTORE_SCHEMA, | |
1278 | }, | |
1279 | "backup-type": { | |
1280 | schema: BACKUP_TYPE_SCHEMA, | |
1281 | }, | |
1282 | "backup-id": { | |
1283 | schema: BACKUP_ID_SCHEMA, | |
1284 | }, | |
1285 | "backup-time": { | |
1286 | schema: BACKUP_TIME_SCHEMA, | |
1287 | }, | |
1288 | "filepath": { | |
1289 | description: "Base64 encoded path.", | |
1290 | type: String, | |
1291 | } | |
1292 | }, | |
1293 | }, | |
1294 | access: { | |
1295 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), | |
1296 | }, | |
1297 | )] | |
1298 | /// Get the entries of the given path of the catalog | |
1299 | fn catalog( | |
1300 | store: String, | |
1301 | backup_type: String, | |
1302 | backup_id: String, | |
1303 | backup_time: i64, | |
1304 | filepath: String, | |
1305 | _param: Value, | |
1306 | _info: &ApiMethod, | |
1307 | rpcenv: &mut dyn RpcEnvironment, | |
1308 | ) -> Result<Value, Error> { | |
1309 | let datastore = DataStore::lookup_datastore(&store)?; | |
1310 | ||
e6dc35ac | 1311 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
5b1cfa01 | 1312 | |
e0e5b442 | 1313 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
5b1cfa01 | 1314 | |
bff85572 | 1315 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
5b1cfa01 | 1316 | |
9238cdf5 FG |
1317 | let file_name = CATALOG_NAME; |
1318 | ||
2d55beec | 1319 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
9238cdf5 FG |
1320 | for file in files { |
1321 | if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1322 | bail!("cannot decode '{}' - is encrypted", file_name); | |
1323 | } | |
1324 | } | |
1325 | ||
5b1cfa01 DC |
1326 | let mut path = datastore.base_path(); |
1327 | path.push(backup_dir.relative_path()); | |
9238cdf5 | 1328 | path.push(file_name); |
5b1cfa01 DC |
1329 | |
1330 | let index = DynamicIndexReader::open(&path) | |
1331 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1332 | ||
2d55beec FG |
1333 | let (csum, size) = index.compute_csum(); |
1334 | manifest.verify_file(&file_name, &csum, size)?; | |
1335 | ||
14f6c9cb | 1336 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
5b1cfa01 DC |
1337 | let reader = BufferedDynamicReader::new(index, chunk_reader); |
1338 | ||
1339 | let mut catalog_reader = CatalogReader::new(reader); | |
1340 | let mut current = catalog_reader.root()?; | |
1341 | let mut components = vec![]; | |
1342 | ||
1343 | ||
1344 | if filepath != "root" { | |
1345 | components = base64::decode(filepath)?; | |
1346 | if components.len() > 0 && components[0] == '/' as u8 { | |
1347 | components.remove(0); | |
1348 | } | |
1349 | for component in components.split(|c| *c == '/' as u8) { | |
1350 | if let Some(entry) = catalog_reader.lookup(¤t, component)? { | |
1351 | current = entry; | |
1352 | } else { | |
1353 | bail!("path {:?} not found in catalog", &String::from_utf8_lossy(&components)); | |
1354 | } | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | let mut res = Vec::new(); | |
1359 | ||
1360 | for direntry in catalog_reader.read_dir(¤t)? { | |
1361 | let mut components = components.clone(); | |
1362 | components.push('/' as u8); | |
1363 | components.extend(&direntry.name); | |
1364 | let path = base64::encode(components); | |
1365 | let text = String::from_utf8_lossy(&direntry.name); | |
1366 | let mut entry = json!({ | |
1367 | "filepath": path, | |
1368 | "text": text, | |
1369 | "type": CatalogEntryType::from(&direntry.attr).to_string(), | |
1370 | "leaf": true, | |
1371 | }); | |
1372 | match direntry.attr { | |
1373 | DirEntryAttribute::Directory { start: _ } => { | |
1374 | entry["leaf"] = false.into(); | |
1375 | }, | |
1376 | DirEntryAttribute::File { size, mtime } => { | |
1377 | entry["size"] = size.into(); | |
1378 | entry["mtime"] = mtime.into(); | |
1379 | }, | |
1380 | _ => {}, | |
1381 | } | |
1382 | res.push(entry); | |
1383 | } | |
1384 | ||
1385 | Ok(res.into()) | |
1386 | } | |
1387 | ||
53a561a2 WB |
1388 | fn recurse_files<'a, T, W>( |
1389 | zip: &'a mut ZipEncoder<W>, | |
1390 | decoder: &'a mut Accessor<T>, | |
1391 | prefix: &'a Path, | |
804f6143 | 1392 | file: FileEntry<T>, |
53a561a2 | 1393 | ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> |
804f6143 DC |
1394 | where |
1395 | T: Clone + pxar::accessor::ReadAt + Unpin + Send + Sync + 'static, | |
1396 | W: tokio::io::AsyncWrite + Unpin + Send + 'static, | |
1397 | { | |
1398 | Box::pin(async move { | |
1399 | let metadata = file.entry().metadata(); | |
1400 | let path = file.entry().path().strip_prefix(&prefix)?.to_path_buf(); | |
1401 | ||
1402 | match file.kind() { | |
1403 | EntryKind::File { .. } => { | |
1404 | let entry = ZipEntry::new( | |
1405 | path, | |
1406 | metadata.stat.mtime.secs, | |
1407 | metadata.stat.mode as u16, | |
1408 | true, | |
1409 | ); | |
1410 | zip.add_entry(entry, Some(file.contents().await?)) | |
e832860a WB |
1411 | .await |
1412 | .map_err(|err| format_err!("could not send file entry: {}", err))?; | |
804f6143 DC |
1413 | } |
1414 | EntryKind::Hardlink(_) => { | |
1415 | let realfile = decoder.follow_hardlink(&file).await?; | |
1416 | let entry = ZipEntry::new( | |
1417 | path, | |
1418 | metadata.stat.mtime.secs, | |
1419 | metadata.stat.mode as u16, | |
1420 | true, | |
1421 | ); | |
1422 | zip.add_entry(entry, Some(realfile.contents().await?)) | |
e832860a WB |
1423 | .await |
1424 | .map_err(|err| format_err!("could not send file entry: {}", err))?; | |
804f6143 DC |
1425 | } |
1426 | EntryKind::Directory => { | |
1427 | let dir = file.enter_directory().await?; | |
1428 | let mut readdir = dir.read_dir(); | |
1429 | let entry = ZipEntry::new( | |
1430 | path, | |
1431 | metadata.stat.mtime.secs, | |
1432 | metadata.stat.mode as u16, | |
1433 | false, | |
1434 | ); | |
1435 | zip.add_entry::<FileContents<T>>(entry, None).await?; | |
1436 | while let Some(entry) = readdir.next().await { | |
1437 | let entry = entry?.decode_entry().await?; | |
53a561a2 | 1438 | recurse_files(zip, decoder, prefix, entry).await?; |
804f6143 DC |
1439 | } |
1440 | } | |
1441 | _ => {} // ignore all else | |
1442 | }; | |
1443 | ||
53a561a2 | 1444 | Ok(()) |
804f6143 DC |
1445 | }) |
1446 | } | |
1447 | ||
d33d8f4e DC |
1448 | #[sortable] |
1449 | pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( | |
1450 | &ApiHandler::AsyncHttp(&pxar_file_download), | |
1451 | &ObjectSchema::new( | |
1ffe0301 | 1452 | "Download single file from pxar file of a backup snapshot. Only works if it's not encrypted.", |
d33d8f4e DC |
1453 | &sorted!([ |
1454 | ("store", false, &DATASTORE_SCHEMA), | |
1455 | ("backup-type", false, &BACKUP_TYPE_SCHEMA), | |
1456 | ("backup-id", false, &BACKUP_ID_SCHEMA), | |
1457 | ("backup-time", false, &BACKUP_TIME_SCHEMA), | |
1458 | ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), | |
1459 | ]), | |
1460 | ) | |
1461 | ).access(None, &Permission::Privilege( | |
1462 | &["datastore", "{store}"], | |
1463 | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, | |
1464 | true) | |
1465 | ); | |
1466 | ||
1467 | fn pxar_file_download( | |
1468 | _parts: Parts, | |
1469 | _req_body: Body, | |
1470 | param: Value, | |
1471 | _info: &ApiMethod, | |
1472 | rpcenv: Box<dyn RpcEnvironment>, | |
1473 | ) -> ApiResponseFuture { | |
1474 | ||
1475 | async move { | |
1476 | let store = tools::required_string_param(¶m, "store")?; | |
1477 | let datastore = DataStore::lookup_datastore(&store)?; | |
1478 | ||
e6dc35ac | 1479 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
d33d8f4e DC |
1480 | |
1481 | let filepath = tools::required_string_param(¶m, "filepath")?.to_owned(); | |
1482 | ||
1483 | let backup_type = tools::required_string_param(¶m, "backup-type")?; | |
1484 | let backup_id = tools::required_string_param(¶m, "backup-id")?; | |
1485 | let backup_time = tools::required_integer_param(¶m, "backup-time")?; | |
1486 | ||
e0e5b442 | 1487 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
d33d8f4e | 1488 | |
bff85572 | 1489 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_READ)?; |
d33d8f4e | 1490 | |
d33d8f4e DC |
1491 | let mut components = base64::decode(&filepath)?; |
1492 | if components.len() > 0 && components[0] == '/' as u8 { | |
1493 | components.remove(0); | |
1494 | } | |
1495 | ||
1496 | let mut split = components.splitn(2, |c| *c == '/' as u8); | |
9238cdf5 | 1497 | let pxar_name = std::str::from_utf8(split.next().unwrap())?; |
d33d8f4e | 1498 | let file_path = split.next().ok_or(format_err!("filepath looks strange '{}'", filepath))?; |
2d55beec | 1499 | let (manifest, files) = read_backup_index(&datastore, &backup_dir)?; |
9238cdf5 FG |
1500 | for file in files { |
1501 | if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { | |
1502 | bail!("cannot decode '{}' - is encrypted", pxar_name); | |
1503 | } | |
1504 | } | |
d33d8f4e | 1505 | |
9238cdf5 FG |
1506 | let mut path = datastore.base_path(); |
1507 | path.push(backup_dir.relative_path()); | |
1508 | path.push(pxar_name); | |
d33d8f4e DC |
1509 | |
1510 | let index = DynamicIndexReader::open(&path) | |
1511 | .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; | |
1512 | ||
2d55beec FG |
1513 | let (csum, size) = index.compute_csum(); |
1514 | manifest.verify_file(&pxar_name, &csum, size)?; | |
1515 | ||
14f6c9cb | 1516 | let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); |
d33d8f4e DC |
1517 | let reader = BufferedDynamicReader::new(index, chunk_reader); |
1518 | let archive_size = reader.archive_size(); | |
1519 | let reader = LocalDynamicReadAt::new(reader); | |
1520 | ||
1521 | let decoder = Accessor::new(reader, archive_size).await?; | |
1522 | let root = decoder.open_root().await?; | |
1523 | let file = root | |
1524 | .lookup(OsStr::from_bytes(file_path)).await? | |
1525 | .ok_or(format_err!("error opening '{:?}'", file_path))?; | |
1526 | ||
804f6143 DC |
1527 | let body = match file.kind() { |
1528 | EntryKind::File { .. } => Body::wrap_stream( | |
1529 | AsyncReaderStream::new(file.contents().await?).map_err(move |err| { | |
1530 | eprintln!("error during streaming of file '{:?}' - {}", filepath, err); | |
1531 | err | |
1532 | }), | |
1533 | ), | |
1534 | EntryKind::Hardlink(_) => Body::wrap_stream( | |
1535 | AsyncReaderStream::new(decoder.follow_hardlink(&file).await?.contents().await?) | |
1536 | .map_err(move |err| { | |
1537 | eprintln!( | |
1538 | "error during streaming of hardlink '{:?}' - {}", | |
1539 | filepath, err | |
1540 | ); | |
1541 | err | |
1542 | }), | |
1543 | ), | |
1544 | EntryKind::Directory => { | |
1545 | let (sender, receiver) = tokio::sync::mpsc::channel(100); | |
1546 | let mut prefix = PathBuf::new(); | |
1547 | let mut components = file.entry().path().components(); | |
1548 | components.next_back(); // discar last | |
1549 | for comp in components { | |
1550 | prefix.push(comp); | |
1551 | } | |
d33d8f4e | 1552 | |
804f6143 | 1553 | let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024); |
804f6143 DC |
1554 | |
1555 | crate::server::spawn_internal_task(async move { | |
53a561a2 WB |
1556 | let mut zipencoder = ZipEncoder::new(channelwriter); |
1557 | let mut decoder = decoder; | |
1558 | recurse_files(&mut zipencoder, &mut decoder, &prefix, file) | |
804f6143 DC |
1559 | .await |
1560 | .map_err(|err| eprintln!("error during creating of zip: {}", err))?; | |
1561 | ||
1562 | zipencoder | |
1563 | .finish() | |
1564 | .await | |
1565 | .map_err(|err| eprintln!("error during finishing of zip: {}", err)) | |
1566 | }); | |
1567 | ||
1568 | Body::wrap_stream(receiver.map_err(move |err| { | |
1569 | eprintln!("error during streaming of zip '{:?}' - {}", filepath, err); | |
d33d8f4e | 1570 | err |
804f6143 DC |
1571 | })) |
1572 | } | |
1573 | other => bail!("cannot download file of type {:?}", other), | |
1574 | }; | |
d33d8f4e DC |
1575 | |
1576 | // fixme: set other headers ? | |
1577 | Ok(Response::builder() | |
1578 | .status(StatusCode::OK) | |
1579 | .header(header::CONTENT_TYPE, "application/octet-stream") | |
1580 | .body(body) | |
1581 | .unwrap()) | |
1582 | }.boxed() | |
1583 | } | |
1584 | ||
1a0d3d11 DM |
1585 | #[api( |
1586 | input: { | |
1587 | properties: { | |
1588 | store: { | |
1589 | schema: DATASTORE_SCHEMA, | |
1590 | }, | |
1591 | timeframe: { | |
1592 | type: RRDTimeFrameResolution, | |
1593 | }, | |
1594 | cf: { | |
1595 | type: RRDMode, | |
1596 | }, | |
1597 | }, | |
1598 | }, | |
1599 | access: { | |
1600 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), | |
1601 | }, | |
1602 | )] | |
1603 | /// Read datastore stats | |
1604 | fn get_rrd_stats( | |
1605 | store: String, | |
1606 | timeframe: RRDTimeFrameResolution, | |
1607 | cf: RRDMode, | |
1608 | _param: Value, | |
1609 | ) -> Result<Value, Error> { | |
1610 | ||
431cc7b1 DC |
1611 | create_value_from_rrd( |
1612 | &format!("datastore/{}", store), | |
1a0d3d11 DM |
1613 | &[ |
1614 | "total", "used", | |
c94e1f65 DM |
1615 | "read_ios", "read_bytes", |
1616 | "write_ios", "write_bytes", | |
1617 | "io_ticks", | |
1a0d3d11 DM |
1618 | ], |
1619 | timeframe, | |
1620 | cf, | |
1621 | ) | |
1622 | } | |
1623 | ||
912b3f5b DM |
1624 | #[api( |
1625 | input: { | |
1626 | properties: { | |
1627 | store: { | |
1628 | schema: DATASTORE_SCHEMA, | |
1629 | }, | |
1630 | "backup-type": { | |
1631 | schema: BACKUP_TYPE_SCHEMA, | |
1632 | }, | |
1633 | "backup-id": { | |
1634 | schema: BACKUP_ID_SCHEMA, | |
1635 | }, | |
1636 | "backup-time": { | |
1637 | schema: BACKUP_TIME_SCHEMA, | |
1638 | }, | |
1639 | }, | |
1640 | }, | |
1641 | access: { | |
1401f4be | 1642 | permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, true), |
912b3f5b DM |
1643 | }, |
1644 | )] | |
1645 | /// Get "notes" for a specific backup | |
1646 | fn get_notes( | |
1647 | store: String, | |
1648 | backup_type: String, | |
1649 | backup_id: String, | |
1650 | backup_time: i64, | |
1651 | rpcenv: &mut dyn RpcEnvironment, | |
1652 | ) -> Result<String, Error> { | |
1653 | let datastore = DataStore::lookup_datastore(&store)?; | |
1654 | ||
e6dc35ac | 1655 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
e0e5b442 | 1656 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
912b3f5b | 1657 | |
1401f4be | 1658 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_AUDIT)?; |
912b3f5b | 1659 | |
883aa6d5 | 1660 | let (manifest, _) = datastore.load_manifest(&backup_dir)?; |
912b3f5b | 1661 | |
883aa6d5 | 1662 | let notes = manifest.unprotected["notes"] |
912b3f5b DM |
1663 | .as_str() |
1664 | .unwrap_or(""); | |
1665 | ||
1666 | Ok(String::from(notes)) | |
1667 | } | |
1668 | ||
1669 | #[api( | |
1670 | input: { | |
1671 | properties: { | |
1672 | store: { | |
1673 | schema: DATASTORE_SCHEMA, | |
1674 | }, | |
1675 | "backup-type": { | |
1676 | schema: BACKUP_TYPE_SCHEMA, | |
1677 | }, | |
1678 | "backup-id": { | |
1679 | schema: BACKUP_ID_SCHEMA, | |
1680 | }, | |
1681 | "backup-time": { | |
1682 | schema: BACKUP_TIME_SCHEMA, | |
1683 | }, | |
1684 | notes: { | |
1685 | description: "A multiline text.", | |
1686 | }, | |
1687 | }, | |
1688 | }, | |
1689 | access: { | |
b728a69e FG |
1690 | permission: &Permission::Privilege(&["datastore", "{store}"], |
1691 | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, | |
1692 | true), | |
912b3f5b DM |
1693 | }, |
1694 | )] | |
1695 | /// Set "notes" for a specific backup | |
1696 | fn set_notes( | |
1697 | store: String, | |
1698 | backup_type: String, | |
1699 | backup_id: String, | |
1700 | backup_time: i64, | |
1701 | notes: String, | |
1702 | rpcenv: &mut dyn RpcEnvironment, | |
1703 | ) -> Result<(), Error> { | |
1704 | let datastore = DataStore::lookup_datastore(&store)?; | |
1705 | ||
e6dc35ac | 1706 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
e0e5b442 | 1707 | let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?; |
912b3f5b | 1708 | |
b728a69e | 1709 | check_priv_or_backup_owner(&datastore, backup_dir.group(), &auth_id, PRIV_DATASTORE_MODIFY)?; |
912b3f5b | 1710 | |
1a374fcf SR |
1711 | datastore.update_manifest(&backup_dir,|manifest| { |
1712 | manifest.unprotected["notes"] = notes.into(); | |
1713 | }).map_err(|err| format_err!("unable to update manifest blob - {}", err))?; | |
912b3f5b DM |
1714 | |
1715 | Ok(()) | |
1716 | } | |
1717 | ||
72be0eb1 | 1718 | #[api( |
4940012d | 1719 | input: { |
72be0eb1 DW |
1720 | properties: { |
1721 | store: { | |
1722 | schema: DATASTORE_SCHEMA, | |
1723 | }, | |
1724 | "backup-type": { | |
1725 | schema: BACKUP_TYPE_SCHEMA, | |
1726 | }, | |
1727 | "backup-id": { | |
1728 | schema: BACKUP_ID_SCHEMA, | |
1729 | }, | |
1730 | "new-owner": { | |
e6dc35ac | 1731 | type: Authid, |
72be0eb1 DW |
1732 | }, |
1733 | }, | |
4940012d FG |
1734 | }, |
1735 | access: { | |
bff85572 FG |
1736 | permission: &Permission::Anybody, |
1737 | description: "Datastore.Modify on whole datastore, or changing ownership between user and a user's token for owned backups with Datastore.Backup" | |
4940012d | 1738 | }, |
72be0eb1 DW |
1739 | )] |
1740 | /// Change owner of a backup group | |
1741 | fn set_backup_owner( | |
1742 | store: String, | |
1743 | backup_type: String, | |
1744 | backup_id: String, | |
e6dc35ac | 1745 | new_owner: Authid, |
bff85572 | 1746 | rpcenv: &mut dyn RpcEnvironment, |
72be0eb1 DW |
1747 | ) -> Result<(), Error> { |
1748 | ||
1749 | let datastore = DataStore::lookup_datastore(&store)?; | |
1750 | ||
1751 | let backup_group = BackupGroup::new(backup_type, backup_id); | |
1752 | ||
bff85572 FG |
1753 | let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; |
1754 | ||
72be0eb1 DW |
1755 | let user_info = CachedUserInfo::new()?; |
1756 | ||
bff85572 FG |
1757 | let privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); |
1758 | ||
1759 | let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 { | |
1760 | // High-privilege user/token | |
1761 | true | |
1762 | } else if (privs & PRIV_DATASTORE_BACKUP) != 0 { | |
1763 | let owner = datastore.get_owner(&backup_group)?; | |
1764 | ||
1765 | match (owner.is_token(), new_owner.is_token()) { | |
1766 | (true, true) => { | |
1767 | // API token to API token, owned by same user | |
1768 | let owner = owner.user(); | |
1769 | let new_owner = new_owner.user(); | |
1770 | owner == new_owner && Authid::from(owner.clone()) == auth_id | |
1771 | }, | |
1772 | (true, false) => { | |
1773 | // API token to API token owner | |
1774 | Authid::from(owner.user().clone()) == auth_id | |
1775 | && new_owner == auth_id | |
1776 | }, | |
1777 | (false, true) => { | |
1778 | // API token owner to API token | |
1779 | owner == auth_id | |
1780 | && Authid::from(new_owner.user().clone()) == auth_id | |
1781 | }, | |
1782 | (false, false) => { | |
1783 | // User to User, not allowed for unprivileged users | |
1784 | false | |
1785 | }, | |
1786 | } | |
1787 | } else { | |
1788 | false | |
1789 | }; | |
1790 | ||
1791 | if !allowed { | |
1792 | return Err(http_err!(UNAUTHORIZED, | |
1793 | "{} does not have permission to change owner of backup group '{}' to {}", | |
1794 | auth_id, | |
1795 | backup_group, | |
1796 | new_owner, | |
1797 | )); | |
1798 | } | |
1799 | ||
e6dc35ac FG |
1800 | if !user_info.is_active_auth_id(&new_owner) { |
1801 | bail!("{} '{}' is inactive or non-existent", | |
1802 | if new_owner.is_token() { | |
1803 | "API token".to_string() | |
1804 | } else { | |
1805 | "user".to_string() | |
1806 | }, | |
1807 | new_owner); | |
72be0eb1 DW |
1808 | } |
1809 | ||
1810 | datastore.set_owner(&backup_group, &new_owner, true)?; | |
1811 | ||
1812 | Ok(()) | |
1813 | } | |
1814 | ||
552c2259 | 1815 | #[sortable] |
255f378a | 1816 | const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ |
5b1cfa01 DC |
1817 | ( |
1818 | "catalog", | |
1819 | &Router::new() | |
1820 | .get(&API_METHOD_CATALOG) | |
1821 | ), | |
72be0eb1 DW |
1822 | ( |
1823 | "change-owner", | |
1824 | &Router::new() | |
1825 | .post(&API_METHOD_SET_BACKUP_OWNER) | |
1826 | ), | |
255f378a DM |
1827 | ( |
1828 | "download", | |
1829 | &Router::new() | |
1830 | .download(&API_METHOD_DOWNLOAD_FILE) | |
1831 | ), | |
6ef9bb59 DC |
1832 | ( |
1833 | "download-decoded", | |
1834 | &Router::new() | |
1835 | .download(&API_METHOD_DOWNLOAD_FILE_DECODED) | |
1836 | ), | |
255f378a DM |
1837 | ( |
1838 | "files", | |
1839 | &Router::new() | |
09b1f7b2 | 1840 | .get(&API_METHOD_LIST_SNAPSHOT_FILES) |
255f378a DM |
1841 | ), |
1842 | ( | |
1843 | "gc", | |
1844 | &Router::new() | |
1845 | .get(&API_METHOD_GARBAGE_COLLECTION_STATUS) | |
1846 | .post(&API_METHOD_START_GARBAGE_COLLECTION) | |
1847 | ), | |
1848 | ( | |
1849 | "groups", | |
1850 | &Router::new() | |
b31c8019 | 1851 | .get(&API_METHOD_LIST_GROUPS) |
255f378a | 1852 | ), |
912b3f5b DM |
1853 | ( |
1854 | "notes", | |
1855 | &Router::new() | |
1856 | .get(&API_METHOD_GET_NOTES) | |
1857 | .put(&API_METHOD_SET_NOTES) | |
1858 | ), | |
255f378a DM |
1859 | ( |
1860 | "prune", | |
1861 | &Router::new() | |
1862 | .post(&API_METHOD_PRUNE) | |
1863 | ), | |
d33d8f4e DC |
1864 | ( |
1865 | "pxar-file-download", | |
1866 | &Router::new() | |
1867 | .download(&API_METHOD_PXAR_FILE_DOWNLOAD) | |
1868 | ), | |
1a0d3d11 DM |
1869 | ( |
1870 | "rrd", | |
1871 | &Router::new() | |
1872 | .get(&API_METHOD_GET_RRD_STATS) | |
1873 | ), | |
255f378a DM |
1874 | ( |
1875 | "snapshots", | |
1876 | &Router::new() | |
fc189b19 | 1877 | .get(&API_METHOD_LIST_SNAPSHOTS) |
68a6a0ee | 1878 | .delete(&API_METHOD_DELETE_SNAPSHOT) |
255f378a DM |
1879 | ), |
1880 | ( | |
1881 | "status", | |
1882 | &Router::new() | |
1883 | .get(&API_METHOD_STATUS) | |
1884 | ), | |
1885 | ( | |
1886 | "upload-backup-log", | |
1887 | &Router::new() | |
1888 | .upload(&API_METHOD_UPLOAD_BACKUP_LOG) | |
1889 | ), | |
c2009e53 DM |
1890 | ( |
1891 | "verify", | |
1892 | &Router::new() | |
1893 | .post(&API_METHOD_VERIFY) | |
1894 | ), | |
255f378a DM |
1895 | ]; |
1896 | ||
ad51d02a | 1897 | const DATASTORE_INFO_ROUTER: Router = Router::new() |
255f378a DM |
1898 | .get(&list_subdirs_api_method!(DATASTORE_INFO_SUBDIRS)) |
1899 | .subdirs(DATASTORE_INFO_SUBDIRS); | |
1900 | ||
1901 | ||
1902 | pub const ROUTER: Router = Router::new() | |
bb34b589 | 1903 | .get(&API_METHOD_GET_DATASTORE_LIST) |
255f378a | 1904 | .match_all("store", &DATASTORE_INFO_ROUTER); |